From bdaa306aafe44d7caa47bd54084248868b0b4e27 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 18 Mar 2020 16:43:22 -0700 Subject: [PATCH 001/335] Add an on ramp option --- .../exp_configs/non_rl/i210_subnetwork.py | 74 ++++--- flow/controllers/routing_controllers.py | 21 ++ flow/envs/base.py | 14 +- flow/networks/i210_subnetwork.py | 196 ++++++++++-------- 4 files changed, 184 insertions(+), 121 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index dd85c56cf..c3db70f04 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -4,6 +4,7 @@ import numpy as np from flow.controllers.car_following_models import IDMController +from flow.controllers.routing_controllers import I210Router from flow.core.params import SumoParams from flow.core.params import EnvParams from flow.core.params import NetParams @@ -15,18 +16,35 @@ from flow.envs import TestEnv from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -# create the base vehicle type that will be used for inflows -vehicles = VehicleParams() -vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, { - "a": 0.3, "b": 2.0, "noise": 0.5 - }), -) +ON_RAMP = True + +if ON_RAMP: + vehicles = VehicleParams() + vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + routing_controller=(I210Router, {}) + ) + +else: + # create the base vehicle type that will be used for inflows + vehicles = VehicleParams() + vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + ) inflow = InFlows() # main highway @@ -37,18 +55,19 @@ departLane="random", departSpeed=23) # on ramp -# inflow.add( -# veh_type="human", -# edge="27414345", -# vehs_per_hour=321, -# departLane="random", -# departSpeed=20) -# inflow.add( -# veh_type="human", -# edge="27414342#0", -# vehs_per_hour=421, -# departLane="random", -# departSpeed=20) +if ON_RAMP: + inflow.add( + veh_type="human", + edge="27414345", + vehs_per_hour=321, + departLane="random", + departSpeed=20) + inflow.add( + veh_type="human", + edge="27414342#0", + vehs_per_hour=421, + departLane="random", + departSpeed=20) NET_TEMPLATE = os.path.join( config.PROJECT_PATH, @@ -71,20 +90,21 @@ sim=SumoParams( sim_step=0.5, render=False, - color_by_speed=True, + color_by_speed=False, use_ballistic=True ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=4500, + horizon=7200, ), # network-related parameters (see flow.core.params.NetParams and the # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( inflows=inflow, - template=NET_TEMPLATE + template=NET_TEMPLATE, + additional_params={"use_on_ramp": ON_RAMP} ), # vehicles to be placed in the network at the start of a rollout (see diff --git a/flow/controllers/routing_controllers.py b/flow/controllers/routing_controllers.py index e6ccdde78..c880b5bbf 100755 --- a/flow/controllers/routing_controllers.py +++ b/flow/controllers/routing_controllers.py @@ -124,3 +124,24 @@ def choose_route(self, env): new_route = super().choose_route(env) return new_route + +class I210Router(ContinuousRouter): + """Assists in choosing routes in select cases for the Bay Bridge network. + Extension to the Continuous Router. + Usage + ----- + See base class for usage example. + """ + + def choose_route(self, env): + """See parent class.""" + edge = env.k.vehicle.get_edge(self.veh_id) + lane = env.k.vehicle.get_lane(self.veh_id) + + # vehicles on these edges in lanes 4 and 5 are not going to be able to make it out in time + if edge == "119257908#1-AddedOffRampEdge" and lane in [5, 4, 3]: + new_route = env.available_routes["119257908#1-AddedOffRampEdge"][0][0] + else: + new_route = super().choose_route(env) + + return new_route diff --git a/flow/envs/base.py b/flow/envs/base.py index 1abb8a3c9..60eab6ebe 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -396,8 +396,18 @@ def step(self, rl_actions): # test if the environment should terminate due to a collision or the # time horizon being met done = (self.time_counter >= self.env_params.sims_per_step * - (self.env_params.warmup_steps + self.env_params.horizon) - or crash) + (self.env_params.warmup_steps + self.env_params.horizon)) + if crash: + print( + "**********************************************************\n" + "**********************************************************\n" + "**********************************************************\n" + "WARNING: There was a crash. \n" + "**********************************************************\n" + "**********************************************************\n" + "**********************************************************" + ) + # compute the info for each agent infos = {} diff --git a/flow/networks/i210_subnetwork.py b/flow/networks/i210_subnetwork.py index d8e05efb5..febb39b00 100644 --- a/flow/networks/i210_subnetwork.py +++ b/flow/networks/i210_subnetwork.py @@ -45,97 +45,109 @@ def specify_routes(self, net_params): Routes for vehicles moving through the bay bridge from Oakland to San Francisco. """ - rts = { - # Main highway - "119257914": [ - (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) - # (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # 17 / 8378) - ], - # "119257908#0": [ - # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - # # "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1-AddedOnRampEdge": [ - # (["119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1-AddedOnRampEdge", "119257908#1", - # # "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1": [ - # (["119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1-AddedOffRampEdge": [ - # (["119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#2": [ - # (["119257908#2", "119257908#3"], 1), - # ], - # "119257908#3": [ - # (["119257908#3"], 1), - # ], - # - # # On-ramp - # "27414345": [ - # (["27414345", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1 - 9 / 321), - # (["27414345", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 9 / 321), - # ], - # "27414342#0": [ - # (["27414342#0", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1 - 20 / 421), - # (["27414342#0", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 20 / 421), - # ], - # "27414342#1-AddedOnRampEdge": [ - # (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 0.5), - # (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # 0.5), - # ], - # - # # Off-ramp - # "173381935": [ - # (["173381935"], 1), - # ], - } + if net_params.additional_params["use_on_ramp"]: + rts = { + # Main highway + "119257914": [ + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + 17 / 8378) + ], + "119257908#0": [ + (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", + # "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1-AddedOnRampEdge": [ + (["119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1-AddedOnRampEdge", "119257908#1", + # "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1": [ + (["119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1-AddedOffRampEdge": [ + (["119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#2": [ + (["119257908#2", "119257908#3"], 1), + ], + "119257908#3": [ + (["119257908#3"], 1), + ], + + # On-ramp + "27414345": [ + (["27414345", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1 - 9 / 321), + (["27414345", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "173381935"], + 9 / 321), + ], + "27414342#0": [ + (["27414342#0", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1 - 20 / 421), + (["27414342#0", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "173381935"], + 20 / 421), + ], + "27414342#1-AddedOnRampEdge": [ + (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 0.5), + (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + 0.5), + ], + + # Off-ramp + "173381935": [ + (["173381935"], 1), + ], + } + + else: + rts = { + # Main highway + "119257914": [ + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1), + ], + } return rts From 2eac0da8ecb3dbdbc45fd6efcca2718c9207fc12 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 18 Mar 2020 18:25:53 -0700 Subject: [PATCH 002/335] The acceleration noise is now scaled by the sqrt of the sim step as suggested by Benni --- flow/controllers/base_controller.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 41780826b..4004b1c4d 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -107,7 +107,7 @@ def get_action(self, env): # add noise to the accelerations, if requested if self.accel_noise > 0: - accel += np.random.normal(0, self.accel_noise) + accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) # run the failsafes, if requested if self.fail_safe == 'instantaneous': From b8d12126b09bbf2552b27c5ed35887ec078dad97 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 11:32:12 -0700 Subject: [PATCH 003/335] Increased inflows to 10800 to match density in Bennis ring --- examples/exp_configs/non_rl/i210_subnetwork.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index c3db70f04..4f19b89b5 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -16,7 +16,7 @@ from flow.envs import TestEnv from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -ON_RAMP = True +ON_RAMP = False if ON_RAMP: vehicles = VehicleParams() @@ -51,7 +51,7 @@ inflow.add( veh_type="human", edge="119257914", - vehs_per_hour=8378, + vehs_per_hour=10800, departLane="random", departSpeed=23) # on ramp From 661564baeaec5f1be107a65b4ba3a4f6ea727c8c Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 12:10:07 -0700 Subject: [PATCH 004/335] Upgrade the network to not have keepclear value on the junctions --- .../exp_configs/templates/sumo/test2.net.xml | 78 ++++++++++++++----- 1 file changed, 57 insertions(+), 21 deletions(-) diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 00e3edcd5..16170b917 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,5 +1,41 @@ + + @@ -4680,24 +4716,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4801,10 +4837,10 @@ - + - - + + From 5869c581ca884af61902cea9e6acfe52a7b15e80 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 12:41:31 -0700 Subject: [PATCH 005/335] Convert inflows to pick out the best lane to travel in instead of a random lane --- examples/exp_configs/non_rl/i210_subnetwork.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 4f19b89b5..335461270 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -52,8 +52,8 @@ veh_type="human", edge="119257914", vehs_per_hour=10800, - departLane="random", - departSpeed=23) + departLane="best", + departSpeed=23.0) # on ramp if ON_RAMP: inflow.add( From 903bb729ccd6f4ad174ceac639a6665ded59d131 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Tue, 24 Mar 2020 22:49:17 -0700 Subject: [PATCH 006/335] Add 1 lane highway network for Benni --- examples/exp_configs/non_rl/highway.py | 40 +++++++------------ .../exp_configs/non_rl/i210_subnetwork.py | 2 +- flow/networks/highway.py | 2 +- 3 files changed, 16 insertions(+), 28 deletions(-) diff --git a/examples/exp_configs/non_rl/highway.py b/examples/exp_configs/non_rl/highway.py index e7505f2d7..1905e2f7f 100644 --- a/examples/exp_configs/non_rl/highway.py +++ b/examples/exp_configs/non_rl/highway.py @@ -5,25 +5,19 @@ from flow.core.params import VehicleParams, InFlows from flow.envs.ring.lane_change_accel import ADDITIONAL_ENV_PARAMS from flow.networks.highway import HighwayNetwork, ADDITIONAL_NET_PARAMS -from flow.envs import LaneChangeAccelEnv +from flow.envs import TestEnv vehicles = VehicleParams() vehicles.add( - veh_id="human", - acceleration_controller=(IDMController, {}), - lane_change_params=SumoLaneChangeParams( - model="SL2015", - lc_sublane=2.0, - ), - num_vehicles=20) -vehicles.add( - veh_id="human2", - acceleration_controller=(IDMController, {}), - lane_change_params=SumoLaneChangeParams( - model="SL2015", - lc_sublane=2.0, - ), - num_vehicles=20) + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + ) env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) @@ -31,13 +25,7 @@ inflow.add( veh_type="human", edge="highway_0", - probability=0.25, - departLane="free", - departSpeed=20) -inflow.add( - veh_type="human2", - edge="highway_0", - probability=0.25, + vehs_per_hour=10800 / 5.0, departLane="free", departSpeed=20) @@ -47,7 +35,7 @@ exp_tag='highway', # name of the flow environment the experiment is running on - env_name=LaneChangeAccelEnv, + env_name=TestEnv, # name of the network class the experiment is running on network=HighwayNetwork, @@ -58,12 +46,12 @@ # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( render=True, - lateral_resolution=1.0, + sim_step=0.5 ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=1500, + horizon=4000, additional_params=ADDITIONAL_ENV_PARAMS.copy(), ), diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 335461270..ceb625907 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -5,7 +5,7 @@ from flow.controllers.car_following_models import IDMController from flow.controllers.routing_controllers import I210Router -from flow.core.params import SumoParams +from flow.core.params import SumoParams, SumoCarFollowingParams from flow.core.params import EnvParams from flow.core.params import NetParams from flow.core.params import SumoLaneChangeParams diff --git a/flow/networks/highway.py b/flow/networks/highway.py index c63292067..595b0f286 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -9,7 +9,7 @@ # length of the highway "length": 1000, # number of lanes - "lanes": 4, + "lanes": 1, # speed limit for all edges "speed_limit": 30, # number of edges to divide the highway into From 50b017af4b8053874955e5d03e6a5d565d22d4fc Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 26 Mar 2020 10:11:17 -0700 Subject: [PATCH 007/335] dagger initial implementation --- flow/dagger/Untitled.ipynb | 486 +++++++++++++++++++++++++++ flow/dagger/env_params_test.py | 47 +++ flow/dagger/imitating_agent.py | 25 ++ flow/dagger/imitating_controller.py | 78 +++++ flow/dagger/imitating_controller2.py | 94 ++++++ flow/dagger/replay_buffer.py | 60 ++++ flow/dagger/run.py | 73 ++++ flow/dagger/trainer.py | 113 +++++++ flow/dagger/useless.py | 147 ++++++++ flow/dagger/utils.py | 114 +++++++ 10 files changed, 1237 insertions(+) create mode 100644 flow/dagger/Untitled.ipynb create mode 100644 flow/dagger/env_params_test.py create mode 100644 flow/dagger/imitating_agent.py create mode 100644 flow/dagger/imitating_controller.py create mode 100644 flow/dagger/imitating_controller2.py create mode 100644 flow/dagger/replay_buffer.py create mode 100644 flow/dagger/run.py create mode 100644 flow/dagger/trainer.py create mode 100644 flow/dagger/useless.py create mode 100644 flow/dagger/utils.py diff --git a/flow/dagger/Untitled.ipynb b/flow/dagger/Untitled.ipynb new file mode 100644 index 000000000..a6153ffc6 --- /dev/null +++ b/flow/dagger/Untitled.ipynb @@ -0,0 +1,486 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" + ] + } + ], + "source": [ + "import tensorflow as tf\n", + "import numpy as np\n", + "import gym" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n", + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", + " return f(*args, **kwds)\n" + ] + } + ], + "source": [ + "from env_params import flow_params as flow_params" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departLane in InFlows is deprecated, use depart_lane instead.\n", + " PendingDeprecationWarning\n", + "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", + " PendingDeprecationWarning\n", + "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departLane in InFlows is deprecated, use depart_lane instead.\n", + " PendingDeprecationWarning\n", + "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", + " PendingDeprecationWarning\n" + ] + } + ], + "source": [ + "from flow.utils.registry import make_create_env\n", + "from flow.controllers import IDMController, ContinuousRouter\n", + "from flow.core.experiment import Experiment\n", + "from flow.core.params import SumoParams, EnvParams, \\\n", + " InitialConfig, NetParams\n", + "from flow.core.params import VehicleParams\n", + "from flow.networks.ring import RingNetwork, ADDITIONAL_NET_PARAMS\n", + "import numpy as np\n", + "from flow.core.experiment import Experiment\n", + "from flow.core.params import InFlows\n", + "from flow.core.params import SumoLaneChangeParams\n", + "from flow.core.params import SumoCarFollowingParams\n", + "from flow.core.params import VehicleParams\n", + "from flow.controllers import ContinuousRouter\n", + "from flow.benchmarks.bottleneck0 import flow_params\n", + "from flow.benchmarks.bottleneck0 import SCALING" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "# sim_params = flow_params['sim']\n", + "# env_params = flow_params['env']\n", + "# net_params = flow_params['net']\n", + "\n", + "# # we want no autonomous vehicles in the simulation\n", + "# vehicles = VehicleParams()\n", + "# vehicles.add(veh_id='human',\n", + "# car_following_params=SumoCarFollowingParams(\n", + "# speed_mode=9,\n", + "# ),\n", + "# routing_controller=(ContinuousRouter, {}),\n", + "# lane_change_params=SumoLaneChangeParams(\n", + "# lane_change_mode=0,\n", + "# ),\n", + "# num_vehicles=1 * SCALING)\n", + "\n", + "# # only include human vehicles in inflows\n", + "# flow_rate = 2300 * SCALING\n", + "# inflow = InFlows()\n", + "# inflow.add(veh_type='human', edge='1',\n", + "# vehs_per_hour=flow_rate,\n", + "# departLane='random', departSpeed=10)\n", + "# net_params.inflows = inflow\n", + "\n", + "# # modify the rendering to match what is requested\n", + "# # sim_params.render = render\n", + "\n", + "# # set the evaluation flag to True\n", + "# env_params.evaluate = True\n", + "\n", + "# flow_params['env'].horizon = env_params.horizon" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "create_env, _ = make_create_env(flow_params)" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "env = create_env()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/subprocess.py:786: ResourceWarning: subprocess 28341 is still running\n", + " ResourceWarning, source=self)\n" + ] + }, + { + "data": { + "text/plain": [ + "array([0. , 0.1 , 0.05 , 0. , 0. ,\n", + " 0. , 0.05 , 0.05 , 0. , 0.05 ,\n", + " 0. , 0.05 , 0.05 , 0. , 0.1 ,\n", + " 0. , 0.05 , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0.05 , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0.05 ,\n", + " 0. , 0.05 , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0.3234498 , 0.35290716, 0. , 0. ,\n", + " 0. , 0.39880784, 0.41698796, 0. , 0.4171411 ,\n", + " 0. , 0.49073983, 0.40911561, 0. , 0.43184929,\n", + " 0. , 0.41929399, 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0.27513936, 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0.44301522,\n", + " 0. , 0.44301522, 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. ])" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "state = env.reset()\n", + "state" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'bottleneck_0'" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "flow_params[\"exp_tag\"]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rl_actions = {}\n", + "for veh_id in env.k.vehicle.get_ids():\n", + " print(veh_id)\n", + " rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env)\n", + "print(flow_params.env['horizon'])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "state, reward, done, _ = env.step(env.action_space.sample())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.action_space.shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from env_params_akash import name" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "name" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from env_params_akash import flow_params_akash" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "create_env, _ = make_create_env(flow_params_akash)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env = create_env()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "state = env.reset()\n", + "state" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(1000):\n", + " env.step(env.action_space.sample())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rl_actions = {}\n", + "for veh_id in env.k.vehicle.get_ids():\n", + " print(veh_id)\n", + " rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "rl_actions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.step(rl_actions)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(200):\n", + " rl_actions = {}\n", + " for veh_id in env.k.vehicle.get_ids():\n", + " # print(veh_id)\n", + " rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env)\n", + " print(env.get_state())\n", + " env.step(rl_actions)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from flow.controllers.car_following_models import IDMController\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "vehicle_id = env.k.vehicle.get_ids()[0]\n", + "vehicle_id" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "car_following_params = SumoCarFollowingParams()\n", + "idm_controller = IDMController(vehicle_id, car_following_params=car_following_params)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "idm_controller.get_action(env)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ob, rew, done, _ = env.step(rl_actions)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from env_params import flow_params\n", + "create_env, _ = make_create_env(flow_params)\n", + "env = create_env()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.k.vehicle.get_ids()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "flow", + "language": "python", + "name": "flow" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/flow/dagger/env_params_test.py b/flow/dagger/env_params_test.py new file mode 100644 index 000000000..ffd870e86 --- /dev/null +++ b/flow/dagger/env_params_test.py @@ -0,0 +1,47 @@ +from flow.networks.ring import RingNetwork +name = "ring_example" + +from flow.core.params import VehicleParams +vehicles = VehicleParams() + +from flow.controllers.car_following_models import IDMController +from flow.controllers.routing_controllers import ContinuousRouter +from imitating_controller import ImitatingController +vehicles.add("human", + acceleration_controller=(IDMController, {}), + routing_controller=(ContinuousRouter, {}), + num_vehicles=22) + +from flow.networks.ring import ADDITIONAL_NET_PARAMS +from flow.core.params import NetParams +net_params = NetParams(additional_params=ADDITIONAL_NET_PARAMS) + +from flow.core.params import InitialConfig +initial_config = InitialConfig(spacing="uniform", perturbation=1) + +from flow.core.params import TrafficLightParams +traffic_lights = TrafficLightParams() + +from flow.envs.ring.accel import AccelEnv +from flow.core.params import SumoParams +sim_params = SumoParams(sim_step=0.1, render=False, emission_path='data') + +from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS +from flow.core.params import EnvParams +env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) + +flow_params_test = dict( + exp_tag='ring_example', + env_name=AccelEnv, + network=RingNetwork, + simulator='traci', + sim=sim_params, + env=env_params, + net=net_params, + veh=vehicles, + initial=initial_config, + tls=traffic_lights, +) + +# number of time steps +flow_params_test['env'].horizon = 3000 diff --git a/flow/dagger/imitating_agent.py b/flow/dagger/imitating_agent.py new file mode 100644 index 000000000..1abe33995 --- /dev/null +++ b/flow/dagger/imitating_agent.py @@ -0,0 +1,25 @@ +import numpy as np +import tensorflow as tf +import time +from imitating_controller import * +from replay_buffer + +class Imitating_Agent(object): + def __init__(self, sess, env, params): + self.env = env + self.sess = sess + self.params = params + + self.policy = Imitator_Policy(sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate']) + + self.replay_buffer = ReplayBuffer(self.params['replay_buffer_size']) + + + def train(self, obs, acts): + self.policy.update(obs, acts) + + def add_to_replay_buffer(self, rollout_list): + self.replay_buffer.add_rollouts(rollout_list) + + def sample_data(self, batch_size): + return self.replay_buffer.sample_batch(batch_size) diff --git a/flow/dagger/imitating_controller.py b/flow/dagger/imitating_controller.py new file mode 100644 index 000000000..c3e03415f --- /dev/null +++ b/flow/dagger/imitating_controller.py @@ -0,0 +1,78 @@ +import numpy as np +import tensorflow +from tensorflow import keras +import tensorflow as tf +from utils import * +from flow.controllers.base_controller import BaseController +from replay_buffer import ReplayBuffer +from tensorflow.keras.models import Sequential +from tensorflow.keras.layers import Dense +from tensorflow.keras.activations import * + + + +class ImitatingController(BaseController): + + def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): + + BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) + self.sess = sess + self.action_dim = action_dim + self.obs_dim = obs_dim + self.num_layers = num_layers + self.size = size + self.learning_rate = learning_rate + self.training = training + self.model = Sequential() + self.build_network() + + + + if self.training: + self.replay_buffer = ReplayBuffer(replay_buffer_size) + else: + self.replay_buffer = None + + def build_network(self): + self.model.add(Dense(self.size, input_dim=self.obs_dim, activation='tanh')) + for _ in range(self.num_layers): + self.model.add(Dense(self.size, activation='relu')) + # No activation + self.model.add(Dense(self.action_dim)) + self.model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy']) + + + def train(self, observation_batch, action_batch): + assert(self.training, "Policy must be trainable") + #print("Training: observation_batch is ", observation_batch) + #print("action_batch is ", action_batch) + print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) + print("ACT NAN CHECK: ", np.any(np.isnan(action_batch))) + + # print("ACTION BATCH: ", action_batch.shape) + action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) + # print("TEST BATCH: ", observation_batch) + history = self.model.fit(observation_batch, action_batch) + # print("LOSS: ", ret) + + def get_accel_from_observation(self, observation): + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + if len(observation.shape)<=1: + observation = observation[None] + # print("OBS: ", observation) + ret_val = self.model.predict(observation) + # print("ACCEL: ", ret_val) + # print("RET_VAL SHAPE", ret_val.shape) + return ret_val + + def get_accel(self, env): + # TODO make this get_accel(self, env) + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + observation = env.get_state() + return self.get_accel_from_observation(observation) + + def add_to_replay_buffer(self, rollout_list): + self.replay_buffer.add_rollouts(rollout_list) + + def sample_data(self, batch_size): + return self.replay_buffer.sample_batch(batch_size) diff --git a/flow/dagger/imitating_controller2.py b/flow/dagger/imitating_controller2.py new file mode 100644 index 000000000..64b6798ca --- /dev/null +++ b/flow/dagger/imitating_controller2.py @@ -0,0 +1,94 @@ +import numpy as np +import tensorflow as tf +from utils import * +import tensorflow_probability as tfp +from flow.controllers.base_controller import BaseController +from replay_buffer import ReplayBuffer + + + +class ImitatingController(BaseController): + + def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): + + BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) + self.sess = sess + self.action_dim = action_dim + self.obs_dim = obs_dim + self.num_layers = num_layers + self.size = size + self.learning_rate = learning_rate + self.training = training + + + + with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): + self.build_network() + + + if self.training: + self.replay_buffer = ReplayBuffer(replay_buffer_size) + else: + self.replay_buffer = None + + self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] + self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) + + def build_network(self): + self.define_placeholders() + self.define_forward_pass() + if self.training: + with tf.variable_scope('train', reuse=tf.AUTO_REUSE): + self.define_train_op() + + def define_placeholders(self): + self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) + # print('DEBUG ', self.obs_dim) + self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) + + if self.training: + self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) + + def define_forward_pass(self): + pred_action = build_mlp(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) + self.action_predictions = pred_action + print("ACTION PREDICTIONS TYPE ", type(self.action_predictions)) + + def define_train_op(self): + true_actions = self.action_labels_placeholder + predicted_actions = self.action_predictions + + self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) + self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) + + def train(self, observation_batch, action_batch): + assert(self.training, "Policy must be trainable") + # print("ACTION BATCH: ", action_batch.shape) + print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) + print("ACT NAN CHECK: ", np.any(np.isnan(action_batch))) + action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) + # print("TEST BATCH: ", observation_batch) + ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) + # print("LOSS: ", ret) + + def get_accel_from_observation(self, observation): + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + if len(observation.shape)<=1: + observation = observation[None] + # print("OBS: ", observation) + ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] + # print("ACCEL: ", ret_val) + # print("RET_VAL SHAPE", ret_val.shape) + return ret_val + + def get_accel(self, env): + # TODO make this get_accel(self, env) + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + observation = env.get_state() + return self.get_accel_from_observation(observation) + + def add_to_replay_buffer(self, rollout_list): + self.replay_buffer.add_rollouts(rollout_list) + + def sample_data(self, batch_size): + return self.replay_buffer.sample_batch(batch_size) diff --git a/flow/dagger/replay_buffer.py b/flow/dagger/replay_buffer.py new file mode 100644 index 000000000..c7bbb56d0 --- /dev/null +++ b/flow/dagger/replay_buffer.py @@ -0,0 +1,60 @@ +import time +import numpy as np +import tensorflow as tf +import gym +import os +from utils import * + + +class ReplayBuffer(object): + def __init__(self, max_size=100000): + + self.max_size = max_size + + # store each rollout + self.rollouts = [] + + # store component arrays from each rollout + self.observations = None + self.actions = None + self.expert_actions = None + self.rewards = None + self.next_observations = None + self.terminals = None + + + def add_rollouts(self, rollouts_list): + """ + Add a list of rollouts to the replay buffer + """ + + for rollout in rollouts_list: + self.rollouts.append(rollout) + + observations, actions, expert_actions, rewards, next_observations, terminals = unpack_rollouts(rollouts_list) + if self.observations is None: + self.observations = observations[-self.max_size:] + self.actions = actions[-self.max_size:] + self.expert_actions = expert_actions[-self.max_size:] + self.rewards = rewards[-self.max_size:] + self.next_observations = next_observations[-self.max_size:] + self.terminals = terminals[-self.max_size:] + else: + self.observations = np.concatenate([self.observations, observations])[-self.max_size:] + print("SHAPES: ", self.actions.shape, actions.shape) + self.actions = np.concatenate([self.actions, actions])[-self.max_size:] + self.expert_actions = np.concatenate([self.expert_actions, expert_actions])[-self.max_size:] + self.rewards = np.concatenate([self.rewards, rewards])[-self.max_size:] + self.next_observations = np.concatenate([self.next_observations, next_observations])[-self.max_size:] + self.terminals = np.concatenate([self.terminals, terminals])[-self.max_size:] + + def sample_batch(self, batch_size): + """ + Sample a batch of data (with size batch_size) from replay buffer. + Returns data in separate numpy arrays of observations, actions, rewards, next_observations, terminals + """ + assert self.observations is not None and self.actions is not None and self.expert_actions is not None and self.rewards is not None and self.next_observations is not None and self.terminals is not None + + size = len(self.observations) + rand_inds = np.random.randint(0, size, batch_size) + return self.observations[rand_inds], self.actions[rand_inds], self.expert_actions[rand_inds], self.rewards[rand_inds], self.next_observations[rand_inds], self.terminals[rand_inds] diff --git a/flow/dagger/run.py b/flow/dagger/run.py new file mode 100644 index 000000000..40e200505 --- /dev/null +++ b/flow/dagger/run.py @@ -0,0 +1,73 @@ +import os +import time +import numpy as np +import tensorflow as tf +from trainer import Trainer +from flow.controllers.car_following_models import IDMController + + +class Runner(object): + + def __init__(self, params): + + + # agent_params = { + # 'n_layers': params['n_layers'], + # 'size': params['size'], + # 'learning_rate': params['learning_rate'], + # 'max_replay_buffer_size': params['max_replay_buffer_size'], + # } + # + # self.params = params + # self.params['agent_class'] = BCAgent + # self.params['agent_params'] = agent_params + + # initialize trainer + self.params = params + self.trainer = Trainer(params) + + + + def run_training_loop(self): + + self.trainer.run_training_loop(n_iter=self.params['n_iter']) + + +def main(): + import argparse + parser = argparse.ArgumentParser() + # parser.add_argument('--expert_policy_file', '-epf', type=str, required=True) # relative to where you're running this script from + # parser.add_argument('--expert_data', '-ed', type=str, required=True) #relative to where you're running this script from + # parser.add_argument('--env_name', '-env', type=str, help='choices: Ant-v2, Humanoid-v2, Walker-v2, HalfCheetah-v2, Hopper-v2', required=True) + # parser.add_argument('--exp_name', '-exp', type=str, default='pick an experiment name', required=True) + # parser.add_argument('--do_dagger', action='store_true') + parser.add_argument('--ep_len', type=int) + + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) + parser.add_argument('--n_iter', '-n', type=int, default=5) + + parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration + parser.add_argument('--init_batch_size', type=int, default=5000) + + parser.add_argument('--train_batch_size', type=int, + default=100) # number of sampled data points to be used per gradient/train step + + parser.add_argument('--num_layers', type=int, default=2) # depth, of policy to be learned + parser.add_argument('--size', type=int, default=64) # width of each layer, of policy to be learned + parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) # learning rate for supervised learning + parser.add_argument('--replay_buffer_size', type=int, default=1000000) + + args = parser.parse_args() + + # convert args to dictionary + params = vars(args) + + assert args.n_iter>1, ('DAGGER needs more than 1 iteration (n_iter>1) of training, to iteratively query the expert and train (after 1st warmstarting from behavior cloning).') + + + # run training + train = Runner(params) + train.run_training_loop() + +if __name__ == "__main__": + main() diff --git a/flow/dagger/trainer.py b/flow/dagger/trainer.py new file mode 100644 index 000000000..f48b9058d --- /dev/null +++ b/flow/dagger/trainer.py @@ -0,0 +1,113 @@ +import time +from collections import OrderedDict +import pickle +import numpy as np +import tensorflow as tf +import gym +import os +from flow.utils.registry import make_create_env +from env_params_test import flow_params_test +from imitating_controller2 import ImitatingController +from flow.controllers.car_following_models import IDMController +from flow.core.params import SumoCarFollowingParams +from utils import * + +class Trainer(object): + + def __init__(self, params): + self.params = params + self.sess = create_tf_session() + + # TODO: replace this with appropriate Flow env + # print('ERROR CHECK ', flow_params_test['exp_tag']) + create_env, _ = make_create_env(flow_params_test) + self.env = create_env() + self.env.reset() + + self.vehicle_id = self.env.k.vehicle.get_ids()[0] + + obs_dim = self.env.observation_space.shape[0] + + # TODO: make sure this is correct + action_dim = (1,)[0] + self.params['action_dim'] = action_dim + self.params['obs_dim'] = obs_dim + + car_following_params = SumoCarFollowingParams() + self.controller = ImitatingController(self.vehicle_id, self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], car_following_params = car_following_params) + self.expert_controller = IDMController(self.vehicle_id, car_following_params = car_following_params) + + tf.global_variables_initializer().run(session=self.sess) + + + def run_training_loop(self, n_iter): + """ + :param n_iter: number of (dagger) iterations + :param collect_policy: + :param eval_policy: + :param initial_expertdata: + :param relabel_with_expert: whether to perform dagger + :param start_relabel_with_expert: iteration at which to start relabel with expert + :param expert_policy: + """ + + # init vars at beginning of training + self.total_envsteps = 0 + self.start_time = time.time() + + for itr in range(n_iter): + print("\n\n********** Iteration %i ************"%itr) + + # collect trajectories, to be used for training + if itr == 0: + training_returns = self.collect_training_trajectories(itr, self.params['init_batch_size']) + else: + training_returns = self.collect_training_trajectories(itr, self.params['batch_size']) + + paths, envsteps_this_batch = training_returns + self.total_envsteps += envsteps_this_batch + + # add collected data to replay buffer + self.controller.add_to_replay_buffer(paths) + + # train agent (using sampled data from replay buffer) + loss = self.train_controller() + + def collect_training_trajectories(self, itr, batch_size): + """ + :param itr: + :param load_initial_expertdata: path to expert data pkl file + :param collect_policy: the current policy using which we collect data + :param batch_size: the number of transitions we collect + :return: + paths: a list trajectories + envsteps_this_batch: the sum over the numbers of environment steps in paths + train_video_paths: paths which also contain videos for visualization purposes + """ + + if itr == 0: + collect_controller = self.expert_controller + else: + collect_controller = self.controller + + print("\nCollecting data to be used for training...") + paths, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_id, collect_controller, self.expert_controller, batch_size, self.params['ep_len']) + + return paths, envsteps_this_batch + + def train_controller(self): + print('Training controller using sampled data from replay buffer') + for train_step in range(self.params['num_agent_train_steps_per_iter']): + # TODO: fix this + ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) + self.controller.train(ob_batch, expert_ac_batch) + + + # def do_relabel_with_expert(self, paths): + # print("Relabelling collected observations with labels from an expert policy...") + # + # for i in range(len(paths)): + # acs = self.expert_policy.get_action(paths[i]["observation"]) + # paths[i]["action"] = acs + # + # return paths diff --git a/flow/dagger/useless.py b/flow/dagger/useless.py new file mode 100644 index 000000000..86f3ee9ad --- /dev/null +++ b/flow/dagger/useless.py @@ -0,0 +1,147 @@ +# """Benchmark for bottleneck0. +# Bottleneck in which the actions are specifying a desired velocity in a segment +# of space. The autonomous penetration rate in this example is 10%. +# - **Action Dimension**: (?, ) +# - **Observation Dimension**: (?, ) +# - **Horizon**: 1000 steps +# """ +# from flow.envs import BottleneckDesiredVelocityEnv +# from flow.networks import BottleneckNetwork +# from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ +# InFlows, SumoCarFollowingParams, SumoLaneChangeParams +# from flow.core.params import TrafficLightParams +# from flow.core.params import VehicleParams +# from flow.controllers import RLController, ContinuousRouter +# +# # time horizon of a single rollout +# HORIZON = 1500 +# +# SCALING = 1 +# NUM_LANES = 4 * SCALING # number of lanes in the widest highway +# DISABLE_TB = True +# DISABLE_RAMP_METER = True +# AV_FRAC = 0.10 +# +# vehicles = VehicleParams() +# vehicles.add( +# veh_id="human", +# routing_controller=(ContinuousRouter, {}), +# car_following_params=SumoCarFollowingParams( +# speed_mode=9, +# ), +# lane_change_params=SumoLaneChangeParams( +# lane_change_mode=0, +# ), +# num_vehicles=1 * SCALING) +# vehicles.add( +# veh_id="rl", +# acceleration_controller=(RLController, {}), +# routing_controller=(ContinuousRouter, {}), +# car_following_params=SumoCarFollowingParams( +# speed_mode=9, +# ), +# lane_change_params=SumoLaneChangeParams( +# lane_change_mode=0, +# ), +# num_vehicles=1 * SCALING) +# +# controlled_segments = [("1", 1, False), ("2", 2, True), ("3", 2, True), +# ("4", 2, True), ("5", 1, False)] +# num_observed_segments = [("1", 1), ("2", 3), ("3", 3), ("4", 3), ("5", 1)] +# +# additional_env_params = { +# "target_velocity": 40, +# "disable_tb": True, +# "disable_ramp_metering": True, +# "controlled_segments": controlled_segments, +# "symmetric": False, +# "observed_segments": num_observed_segments, +# "reset_inflow": False, +# "lane_change_duration": 5, +# "max_accel": 3, +# "max_decel": 3, +# "inflow_range": [1200, 2500] +# } +# +# # flow rate +# flow_rate = 2000 * SCALING +# +# # percentage of flow coming out of each lane +# inflow = InFlows() +# inflow.add( +# veh_type="human", +# edge="1", +# vehs_per_hour=flow_rate * (1 - AV_FRAC), +# departLane="random", +# departSpeed=10) +# inflow.add( +# veh_type="rl", +# edge="1", +# vehs_per_hour=flow_rate * AV_FRAC, +# departLane="random", +# departSpeed=10) +# +# traffic_lights = TrafficLightParams() +# if not DISABLE_TB: +# traffic_lights.add(node_id="2") +# if not DISABLE_RAMP_METER: +# traffic_lights.add(node_id="3") +# +# additional_net_params = {"scaling": SCALING, "speed_limit": 23} +# net_params = NetParams( +# inflows=inflow, +# additional_params=additional_net_params) +# +# flow_params = dict( +# # name of the experiment +# exp_tag="bottleneck_0", +# +# # name of the flow environment the experiment is running on +# env_name=BottleneckDesiredVelocityEnv, +# +# # name of the network class the experiment is running on +# network=BottleneckNetwork, +# +# # simulator that is used by the experiment +# simulator='traci', +# +# # sumo-related parameters (see flow.core.params.SumoParams) +# sim=SumoParams( +# sim_step=0.5, +# render=False, +# print_warnings=False, +# restart_instance=True, +# ), +# +# # environment related parameters (see flow.core.params.EnvParams) +# env=EnvParams( +# warmup_steps=40, +# sims_per_step=1, +# horizon=HORIZON, +# additional_params=additional_env_params, +# ), +# +# # network-related parameters (see flow.core.params.NetParams and the +# # network's documentation or ADDITIONAL_NET_PARAMS component) +# net=NetParams( +# inflows=inflow, +# additional_params=additional_net_params, +# ), +# +# # vehicles to be placed in the network at the start of a rollout (see +# # flow.core.params.VehicleParams) +# veh=vehicles, +# +# # parameters specifying the positioning of vehicles upon initialization/ +# # reset (see flow.core.params.InitialConfig) +# initial=InitialConfig( +# spacing="uniform", +# min_gap=5, +# lanes_distribution=float("inf"), +# edges_distribution=["2", "3", "4", "5"], +# ), +# +# # traffic lights to be introduced to specific nodes (see +# # flow.core.params.TrafficLightParams) +# tls=traffic_lights, +# ) diff --git a/flow/dagger/utils.py b/flow/dagger/utils.py new file mode 100644 index 000000000..9074cb205 --- /dev/null +++ b/flow/dagger/utils.py @@ -0,0 +1,114 @@ +import tensorflow as tf +import os +import numpy as np +import math + +# class agnostic helper functions + +def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length): + observation = env.reset() + + observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] + traj_length = 0 + + while True: + observations.append(observation) + action = controller.get_action(env) + assert action is not None, "action is None" + assert (not math.isnan(action)), "action is a nan" + + actions.append(action) + + expert_action = expert_controller.get_action(env) + assert expert_action is not None, "expert actio is None" + assert (not math.isnan(expert_action)), "expert action is a nan" + expert_actions.append(expert_action) + + rl_actions = {} + for veh_id in env.k.vehicle.get_ids(): + if veh_id == vehicle_id: + rl_actions[veh_id] = action + else: + rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env) + + observation, reward, done, _ = env.step(rl_actions) + traj_length += 1 + next_observations.append(observation) + rewards.append(reward) + terminate_rollout = traj_length == max_trajectory_length or done + terminals.append(terminate_rollout) + + if terminate_rollout: + break + + return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals) + + +def sample_trajectories(env, vehicle_id, controller, expert_controller, min_batch_timesteps, max_trajectory_length): + total_envsteps = 0 + trajectories = [] + + while total_envsteps < min_batch_timesteps: + trajectory = sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length) + trajectories.append(trajectory) + + traj_env_steps = len(trajectory["rewards"]) + total_envsteps += traj_env_steps + + return trajectories, total_envsteps + +def traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals): + return {"observations" : np.array(observations, dtype=np.float32), + "actions" : np.array(actions, dtype=np.float32), + "expert_actions": np.array(expert_actions, dtype=np.float32), + "rewards" : np.array(rewards, dtype=np.float32), + "next_observations": np.array(next_observations, dtype=np.float32), + "terminals": np.array(terminals, dtype=np.float32)} + +def unpack_rollouts(rollouts_list): + """ + Convert list of rollout dictionaries to individual observation, action, rewards, next observation, terminal arrays + rollouts: list of rollout dictionaries + rollout dictionary: dictionary with keys "observations", "actions", "rewards", "next_observations", "is_terminals" + return separate np arrays of observations, actions, rewards, next_observations, and is_terminals + """ + observations = np.concatenate([rollout["observations"] for rollout in rollouts_list]) + actions = np.concatenate([rollout["actions"] for rollout in rollouts_list]) + expert_actions = np.concatenate([rollout["expert_actions"] for rollout in rollouts_list]) + rewards = np.concatenate([rollout["rewards"] for rollout in rollouts_list]) + next_observations = np.concatenate([rollout["next_observations"] for rollout in rollouts_list]) + terminals = np.concatenate([rollout["terminals"] for rollout in rollouts_list]) + + return observations, actions, expert_actions, rewards, next_observations, terminals + + +# Below are tensorflow related functions +def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): + """ + Builds a MLP + + arguments: + input_placeholder: placeholder variable for the state (batch_size, input_size) + scope: variable scope of the network + + n_layers: number of hidden layers + size: dimension of each hidden layer + activation: activation of each hidden layer + + output_size: size of the output layer + output_activation: activation of the output layer + + returns: + output_placeholder: the result of a forward pass through the hidden layers + the output layer + """ + output_placeholder = input_placeholder + with tf.variable_scope(scope): + for _ in range(n_layers): + output_placeholder = tf.layers.dense(output_placeholder, size, activation=activation) + output_placeholder = tf.layers.dense(output_placeholder, output_size, activation=output_activation) + return output_placeholder + +def create_tf_session(): + config = tf.ConfigProto(device_count={'GPU': 0}) + sess = tf.Session(config=config) + return sess From 91144cae9ddb0651603ab32b8652ca11cf7f9579 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 26 Mar 2020 12:46:39 -0700 Subject: [PATCH 008/335] Add current dev version of multiagent I210 --- environment.yml | 4 +- .../rl/multiagent/multiagent_i210.py | 38 +++-- examples/train.py | 146 +++++++++++++----- requirements.txt | 4 +- 4 files changed, 132 insertions(+), 60 deletions(-) diff --git a/environment.yml b/environment.yml index f57c8d33d..480ea7eba 100644 --- a/environment.yml +++ b/environment.yml @@ -21,9 +21,11 @@ dependencies: - matplotlib==3.0.0 - dill - lz4 - - ray==0.7.3 + - ray==0.8.0 - setproctitle - psutil - opencv-python - boto3==1.4.8 - redis~=2.10.6 + - tabulate + - pytz diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 94f709ff4..872568cab 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -8,8 +8,9 @@ from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy from ray.tune.registry import register_env +from flow.controllers import RLController +from flow.controllers.car_following_models import IDMController import flow.config as config -from flow.controllers.rlcontroller import RLController from flow.core.params import EnvParams from flow.core.params import NetParams from flow.core.params import InitialConfig @@ -23,14 +24,8 @@ # SET UP PARAMETERS FOR THE SIMULATION -# number of training iterations -N_TRAINING_ITERATIONS = 200 -# number of rollouts per training iteration -N_ROLLOUTS = 2 # number of steps per rollout -HORIZON = 500 -# number of parallel workers -N_CPUS = 1 +HORIZON = 4000 # percentage of autonomous vehicles compared to human vehicles on highway PENETRATION_RATE = 10 @@ -38,10 +33,12 @@ # SET UP PARAMETERS FOR THE ENVIRONMENT additional_env_params = ADDITIONAL_ENV_PARAMS.copy() additional_env_params.update({ - 'max_accel': 1, - 'max_decel': 1, + 'max_accel': 2.6, + 'max_decel': 4.5, # configure the observation space. Look at the I210MultiEnv class for more info. 'lead_obs': True, + # whether to add in a reward for the speed of nearby vehicles + "local_reward": True }) # CREATE VEHICLE TYPES AND INFLOWS @@ -50,9 +47,8 @@ vehicles.add( "human", num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ) + lane_change_params=SumoLaneChangeParams(lane_change_mode="strategic"), + acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.6}), ) vehicles.add( "av", @@ -68,11 +64,11 @@ inflow.add( veh_type="human", edge="119257914", - vehs_per_hour=8378 * pen_rate, + vehs_per_hour=int(10800 * (1 - pen_rate)), # probability=1.0, departLane="random", departSpeed=20) -# on ramp +# # on ramp # inflow.add( # veh_type="human", # edge="27414345", @@ -91,7 +87,7 @@ inflow.add( veh_type="av", edge="119257914", - vehs_per_hour=int(8378 * pen_rate), + vehs_per_hour=int(10800 * pen_rate), # probability=1.0, departLane="random", departSpeed=20) @@ -128,16 +124,18 @@ # simulation-related parameters sim=SumoParams( - sim_step=0.8, + sim_step=0.5, render=False, - color_by_speed=True, - restart_instance=True + color_by_speed=False, + restart_instance=True, + use_ballistic=True ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( horizon=HORIZON, sims_per_step=1, + warmup_steps=0, additional_params=additional_env_params, ), @@ -171,7 +169,7 @@ obs_space = test_env.observation_space act_space = test_env.action_space -POLICY_GRAPHS = {'av': (PPOTFPolicy, obs_space, act_space, {})} +POLICY_GRAPHS = {'av': (None, obs_space, act_space, {})} POLICIES_TO_TRAIN = ['av'] diff --git a/examples/train.py b/examples/train.py index a1288e2f0..1f2cd6300 100644 --- a/examples/train.py +++ b/examples/train.py @@ -7,12 +7,16 @@ python train.py EXP_CONFIG """ import argparse +from datetime import datetime import json import os import sys from time import strftime from copy import deepcopy +import numpy as np +import pytz + from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv from stable_baselines import PPO2 @@ -26,14 +30,15 @@ from ray.rllib.agents.registry import get_agent_class from flow.core.util import ensure_dir +from flow.core.rewards import energy_consumption from flow.utils.registry import env_constructor from flow.utils.rllib import FlowParamsEncoder, get_flow_params from flow.utils.registry import make_create_env + def parse_args(args): """Parse training options user can specify in command line. - Returns ------- argparse.Namespace @@ -54,16 +59,35 @@ def parse_args(args): parser.add_argument( '--rl_trainer', type=str, default="rllib", help='the RL trainer to use. either rllib or Stable-Baselines') - + parser.add_argument( + '--algorithm', type=str, default="PPO", + help='RL algorithm to use. Options are PPO, TD3, MATD3 (MADDPG w/ TD3) right now.' + ) + parser.add_argument('--exp_title', type=str, default='test', + help='Informative experiment title to help distinguish results') parser.add_argument( '--num_cpus', type=int, default=1, help='How many CPUs to use') parser.add_argument( '--num_steps', type=int, default=5000, - help='How many total steps to perform learning over') + help='How many total steps to perform learning over. Relevant for stable-baselines') + parser.add_argument( + '--grid_search', action='store_true', default=False, + help='Whether to grid search over hyperparams') + parser.add_argument( + '--num_iterations', type=int, default=200, + help='How many iterations are in a training run.') + parser.add_argument( + '--num_rollouts', type=int, default=1, + help='How many rollouts are in a training batch') parser.add_argument( '--rollout_size', type=int, default=1000, help='How many steps are in a training batch.') + parser.add_argument('--use_s3', action='store_true', help='If true, upload results to s3') + parser.add_argument('--local_mode', action='store_true', default=False, + help='If true only 1 CPU will be used') + parser.add_argument('--render', action='store_true', default=False, + help='If true, we render the display') parser.add_argument( '--checkpoint_path', type=str, default=None, help='Directory with checkpoint to restore training from.') @@ -110,11 +134,12 @@ def run_model_stablebaseline(flow_params, def setup_exps_rllib(flow_params, n_cpus, n_rollouts, + flags, policy_graphs=None, policy_mapping_fn=None, - policies_to_train=None): + policies_to_train=None, + ): """Return the relevant components of an RLlib experiment. - Parameters ---------- flow_params : dict @@ -123,13 +148,14 @@ def setup_exps_rllib(flow_params, number of CPUs to run the experiment over n_rollouts : int number of rollouts per training iteration + flags: + custom arguments policy_graphs : dict, optional TODO policy_mapping_fn : function, optional TODO policies_to_train : list of str, optional TODO - Returns ------- str @@ -141,20 +167,59 @@ def setup_exps_rllib(flow_params, """ horizon = flow_params['env'].horizon - alg_run = "PPO" - - agent_cls = get_agent_class(alg_run) - config = deepcopy(agent_cls._default_config) - - config["num_workers"] = n_cpus - config["train_batch_size"] = horizon * n_rollouts - config["gamma"] = 0.999 # discount rate - config["model"].update({"fcnet_hiddens": [32, 32, 32]}) - config["use_gae"] = True - config["lambda"] = 0.97 - config["kl_target"] = 0.02 - config["num_sgd_iter"] = 10 - config["horizon"] = horizon + alg_run = flags.algorithm.upper() + + if alg_run == "PPO": + agent_cls = get_agent_class(alg_run) + config = deepcopy(agent_cls._default_config) + + config["num_workers"] = n_cpus + config["horizon"] = horizon + config["model"].update({"fcnet_hiddens": [32, 32, 32]}) + config["train_batch_size"] = horizon * n_rollouts + config["gamma"] = 0.999 # discount rate + config["use_gae"] = True + config["lambda"] = 0.97 + config["kl_target"] = 0.02 + config["num_sgd_iter"] = 10 + elif alg_run == "TD3": + agent_cls = get_agent_class(alg_run) + config = deepcopy(agent_cls._default_config) + + config["num_workers"] = n_cpus + config["horizon"] = horizon + config["buffer_size"] = 20000 # reduced to test if this is the source of memory problems + if flags.grid_search: + config["prioritized_replay"] = tune.grid_search(['True', 'False']) + config["actor_lr"] = tune.grid_search([1e-3, 1e-4]) + config["critic_lr"] = tune.grid_search([1e-3, 1e-4]) + config["n_step"] = tune.grid_search([1, 10]) + else: + sys.exit("We only support PPO and TD3 right now.") + + # define some standard and useful callbacks + def on_episode_start(info): + episode = info["episode"] + episode.user_data["avg_speed"] = [] + episode.user_data["avg_energy"] = [] + + def on_episode_step(info): + episode = info["episode"] + env = info["env"].get_unwrapped()[0] + speed = np.mean([speed for speed in env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]) + if not np.isnan(speed): + episode.user_data["avg_speed"].append(speed) + episode.user_data["avg_energy"].append(energy_consumption(env)) + + def on_episode_end(info): + episode = info["episode"] + avg_speed = np.mean(episode.user_data["avg_speed"]) + episode.custom_metrics["avg_speed"] = avg_speed + episode.custom_metrics["avg_energy_per_veh"] = np.mean(episode.user_data["avg_energy"]) + + config["callbacks"] = {"on_episode_start": tune.function(on_episode_start), + "on_episode_step": tune.function(on_episode_step), + "on_episode_end": tune.function(on_episode_end)} # save the flow params for replay flow_json = json.dumps( @@ -167,8 +232,7 @@ def setup_exps_rllib(flow_params, print("policy_graphs", policy_graphs) config['multiagent'].update({'policies': policy_graphs}) if policy_mapping_fn is not None: - config['multiagent'].update( - {'policy_mapping_fn': tune.function(policy_mapping_fn)}) + config['multiagent'].update({'policy_mapping_fn': tune.function(policy_mapping_fn)}) if policies_to_train is not None: config['multiagent'].update({'policies_to_train': policies_to_train}) @@ -182,34 +246,40 @@ def setup_exps_rllib(flow_params, def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" flow_params = submodule.flow_params - n_cpus = submodule.N_CPUS - n_rollouts = submodule.N_ROLLOUTS + flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) policy_mapping_fn = getattr(submodule, "policy_mapping_fn", None) policies_to_train = getattr(submodule, "policies_to_train", None) alg_run, gym_name, config = setup_exps_rllib( - flow_params, n_cpus, n_rollouts, + flow_params, flags.num_cpus, flags.num_rollouts, flags, policy_graphs, policy_mapping_fn, policies_to_train) - ray.init(num_cpus=n_cpus + 1, object_store_memory=200 * 1024 * 1024) - exp_config = { - "run": alg_run, - "env": gym_name, - "config": { - **config - }, + config['num_workers'] = flags.num_cpus + config['env'] = gym_name + + if flags.local_mode: + ray.init(local_mode=True) + else: + ray.init() + exp_dict = { + "run_or_experiment": alg_run, + "name": gym_name, + "config": config, "checkpoint_freq": 20, "checkpoint_at_end": True, - "max_failures": 999, + "max_failures": 0, "stop": { - "training_iteration": flags.num_steps, + "training_iteration": flags.num_iterations, }, } - - if flags.checkpoint_path is not None: - exp_config['restore'] = flags.checkpoint_path - run_experiments({flow_params["exp_tag"]: exp_config}) + date = datetime.now(tz=pytz.utc) + date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") + s3_string = "s3://i210.experiments/i210/" \ + + date + '/' + flags.exp_title + if flags.use_s3: + exp_dict['upload_dir'] = s3_string + tune.run(**exp_dict, queue_trials=False, raise_on_failed_trial=False) def train_h_baselines(flow_params, args, multiagent): diff --git a/requirements.txt b/requirements.txt index 546cb4e26..4569dfca5 100644 --- a/requirements.txt +++ b/requirements.txt @@ -14,7 +14,7 @@ pyglet==1.3.2 matplotlib==3.1.0 imutils==0.5.1 numpydoc -ray==0.7.3 +ray==0.8.0 opencv-python dill lz4 @@ -25,3 +25,5 @@ boto3==1.4.8 redis~=2.10.6 pandas==0.24.2 plotly==2.4.0 +tabulate +pytz \ No newline at end of file From 1fc027d95ac18248ca0df1e7e5c0dbc622c6ca19 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 26 Mar 2020 12:48:08 -0700 Subject: [PATCH 009/335] Changed environment to be single agent RL --- flow/dagger/.idea/dagger.iml | 12 ++++ flow/dagger/Untitled.ipynb | 103 +++++++++++++++++++----------- flow/dagger/env_params_test.py | 110 +++++++++++++++++++++----------- flow/dagger/env_params_test2.py | 47 ++++++++++++++ flow/dagger/trainer.py | 7 +- flow/dagger/utils.py | 30 ++++++--- 6 files changed, 221 insertions(+), 88 deletions(-) create mode 100644 flow/dagger/.idea/dagger.iml create mode 100644 flow/dagger/env_params_test2.py diff --git a/flow/dagger/.idea/dagger.iml b/flow/dagger/.idea/dagger.iml new file mode 100644 index 000000000..0bc0e0321 --- /dev/null +++ b/flow/dagger/.idea/dagger.iml @@ -0,0 +1,12 @@ + + + + + + + + + + \ No newline at end of file diff --git a/flow/dagger/Untitled.ipynb b/flow/dagger/Untitled.ipynb index a6153ffc6..0f1ac9809 100644 --- a/flow/dagger/Untitled.ipynb +++ b/flow/dagger/Untitled.ipynb @@ -43,29 +43,14 @@ "metadata": {}, "outputs": [ { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n", - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/importlib/_bootstrap.py:219: RuntimeWarning: numpy.ufunc size changed, may indicate binary incompatibility. Expected 192 from C header, got 216 from PyObject\n", - " return f(*args, **kwds)\n" + "ename": "ModuleNotFoundError", + "evalue": "No module named 'env_params'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0menv_params\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mflow_params\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mflow_params\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'env_params'" ] } ], @@ -283,7 +268,7 @@ "metadata": {}, "outputs": [], "source": [ - "from env_params_akash import name" + "from env_params_test import name" ] }, { @@ -297,25 +282,25 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ - "from env_params_akash import flow_params_akash" + "from env_params_test import flow_params" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ - "create_env, _ = make_create_env(flow_params_akash)" + "create_env, _ = make_create_env(flow_params)" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 7, "metadata": {}, "outputs": [], "source": [ @@ -324,9 +309,31 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 8, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "-----------------------\n", + "ring length: 265\n", + "v_max: 5.37714246265477\n", + "-----------------------\n" + ] + }, + { + "data": { + "text/plain": [ + "array([ 0.31246011, -0.00413767, 0.04496073])" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "state = env.reset()\n", "state" @@ -334,24 +341,42 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 12, "metadata": {}, "outputs": [], "source": [ - "for i in range(1000):\n", - " env.step(env.action_space.sample())" + "vehicle_id = 'rl_0'" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 13, "metadata": {}, "outputs": [], "source": [ - "rl_actions = {}\n", - "for veh_id in env.k.vehicle.get_ids():\n", - " print(veh_id)\n", - " rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env)" + "from flow.controllers.car_following_models import IDMController\n", + "car_following_params = SumoCarFollowingParams()\n", + "idm_controller = IDMController(vehicle_id, car_following_params=car_following_params)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(array([ 0.31200989, -0.00526746, 0.04493147]), 0.6436939709782903, False, {})" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "env.step(idm_controller.get_action(env))" ] }, { diff --git a/flow/dagger/env_params_test.py b/flow/dagger/env_params_test.py index ffd870e86..20ced1ce9 100644 --- a/flow/dagger/env_params_test.py +++ b/flow/dagger/env_params_test.py @@ -1,47 +1,85 @@ -from flow.networks.ring import RingNetwork -name = "ring_example" +"""Ring road example. +Trains a single autonomous vehicle to stabilize the flow of 21 human-driven +vehicles in a variable length ring road. +""" +from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams +from flow.core.params import VehicleParams, SumoCarFollowingParams +from flow.controllers import RLController, IDMController, ContinuousRouter +from flow.envs import WaveAttenuationPOEnv +from flow.networks import RingNetwork -from flow.core.params import VehicleParams +# time horizon of a single rollout +HORIZON = 3000 +# number of rollouts per training iteration +N_ROLLOUTS = 20 +# number of parallel workers +N_CPUS = 2 + +# We place one autonomous vehicle and 22 human-driven vehicles in the network vehicles = VehicleParams() +vehicles.add( + veh_id="human", + acceleration_controller=(IDMController, { + "noise": 0.2 + }), + car_following_params=SumoCarFollowingParams( + min_gap=0 + ), + routing_controller=(ContinuousRouter, {}), + num_vehicles=21) +vehicles.add( + veh_id="rl", + acceleration_controller=(RLController, {}), + routing_controller=(ContinuousRouter, {}), + num_vehicles=1) -from flow.controllers.car_following_models import IDMController -from flow.controllers.routing_controllers import ContinuousRouter -from imitating_controller import ImitatingController -vehicles.add("human", - acceleration_controller=(IDMController, {}), - routing_controller=(ContinuousRouter, {}), - num_vehicles=22) +flow_params = dict( + # name of the experiment + exp_tag="stabilizing_the_ring", -from flow.networks.ring import ADDITIONAL_NET_PARAMS -from flow.core.params import NetParams -net_params = NetParams(additional_params=ADDITIONAL_NET_PARAMS) + # name of the flow environment the experiment is running on + env_name=WaveAttenuationPOEnv, -from flow.core.params import InitialConfig -initial_config = InitialConfig(spacing="uniform", perturbation=1) + # name of the network class the experiment is running on + network=RingNetwork, -from flow.core.params import TrafficLightParams -traffic_lights = TrafficLightParams() + # simulator that is used by the experiment + simulator='traci', -from flow.envs.ring.accel import AccelEnv -from flow.core.params import SumoParams -sim_params = SumoParams(sim_step=0.1, render=False, emission_path='data') + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.1, + render=False, + restart_instance=False + ), -from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS -from flow.core.params import EnvParams -env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=750, + clip_actions=False, + additional_params={ + "max_accel": 1, + "max_decel": 1, + "ring_length": [220, 270], + }, + ), -flow_params_test = dict( - exp_tag='ring_example', - env_name=AccelEnv, - network=RingNetwork, - simulator='traci', - sim=sim_params, - env=env_params, - net=net_params, + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + additional_params={ + "length": 260, + "lanes": 1, + "speed_limit": 30, + "resolution": 40, + }, ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) veh=vehicles, - initial=initial_config, - tls=traffic_lights, -) -# number of time steps -flow_params_test['env'].horizon = 3000 + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) diff --git a/flow/dagger/env_params_test2.py b/flow/dagger/env_params_test2.py new file mode 100644 index 000000000..7140af720 --- /dev/null +++ b/flow/dagger/env_params_test2.py @@ -0,0 +1,47 @@ +from flow.networks.ring import RingNetwork +name = "ring_example" + +from flow.core.params import VehicleParams +vehicles = VehicleParams() + +from flow.controllers.car_following_models import IDMController +from flow.controllers.routing_controllers import ContinuousRouter +from imitating_controller2 import ImitatingController +vehicles.add("human", + acceleration_controller=(IDMController, {}), + routing_controller=(ContinuousRouter, {}), + num_vehicles=22) + +from flow.networks.ring import ADDITIONAL_NET_PARAMS +from flow.core.params import NetParams +net_params = NetParams(additional_params=ADDITIONAL_NET_PARAMS) + +from flow.core.params import InitialConfig +initial_config = InitialConfig(spacing="uniform", perturbation=1) + +from flow.core.params import TrafficLightParams +traffic_lights = TrafficLightParams() + +from flow.envs.ring.accel import AccelEnv +from flow.core.params import SumoParams +sim_params = SumoParams(sim_step=0.1, render=False, emission_path='data') + +from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS +from flow.core.params import EnvParams +env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) + +flow_params_test = dict( + exp_tag='ring_example', + env_name=AccelEnv, + network=RingNetwork, + simulator='traci', + sim=sim_params, + env=env_params, + net=net_params, + veh=vehicles, + initial=initial_config, + tls=traffic_lights, +) + +# number of time steps +flow_params_test['env'].horizon = 3000 diff --git a/flow/dagger/trainer.py b/flow/dagger/trainer.py index f48b9058d..9dd7ae326 100644 --- a/flow/dagger/trainer.py +++ b/flow/dagger/trainer.py @@ -6,7 +6,7 @@ import gym import os from flow.utils.registry import make_create_env -from env_params_test import flow_params_test +from env_params_test import flow_params from imitating_controller2 import ImitatingController from flow.controllers.car_following_models import IDMController from flow.core.params import SumoCarFollowingParams @@ -20,11 +20,12 @@ def __init__(self, params): # TODO: replace this with appropriate Flow env # print('ERROR CHECK ', flow_params_test['exp_tag']) - create_env, _ = make_create_env(flow_params_test) + create_env, _ = make_create_env(flow_params) self.env = create_env() self.env.reset() - self.vehicle_id = self.env.k.vehicle.get_ids()[0] + assert 'rl_0' in self.env.k.vehicle.get_ids() + self.vehicle_id = 'rl_0' obs_dim = self.env.observation_space.shape[0] diff --git a/flow/dagger/utils.py b/flow/dagger/utils.py index 9074cb205..00ae864aa 100644 --- a/flow/dagger/utils.py +++ b/flow/dagger/utils.py @@ -6,32 +6,42 @@ # class agnostic helper functions def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length): + print("CONTROLLER: ", controller) observation = env.reset() + print("VEHICLE ID: ", vehicle_id) + print("VEHICLE IDS: ", env.k.vehicle.get_ids()) + assert vehicle_id in env.k.vehicle.get_ids(), "Vehicle ID not in env!" + observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] traj_length = 0 while True: observations.append(observation) action = controller.get_action(env) - assert action is not None, "action is None" - assert (not math.isnan(action)), "action is a nan" + #assert action is not None, "action is None" + #assert (not math.isnan(action)), "action is a nan" + assert not (len(env.k.vehicle.get_edge(vehicle_id)) == 0), "Case One" + assert not (env.k.vehicle.get_edge(vehicle_id)[0] == ":"), "Case Two" actions.append(action) expert_action = expert_controller.get_action(env) - assert expert_action is not None, "expert actio is None" + assert env is not None, "environment is None" + assert expert_action is not None, "expert action is None" assert (not math.isnan(expert_action)), "expert action is a nan" expert_actions.append(expert_action) - rl_actions = {} - for veh_id in env.k.vehicle.get_ids(): - if veh_id == vehicle_id: - rl_actions[veh_id] = action - else: - rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env) + # rl_actions = {} + # for veh_id in env.k.vehicle.get_ids(): + # if veh_id == vehicle_id: + # rl_actions[veh_id] = action + # else: + # rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env) + + # observation, reward, done, _ = env.step(rl_actions) + observation, reward, done, _ = env.step(action) - observation, reward, done, _ = env.step(rl_actions) traj_length += 1 next_observations.append(observation) rewards.append(reward) From d01aeb5548ab583c4aae03ad28e4c5607dd4fcbe Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 26 Mar 2020 13:26:44 -0700 Subject: [PATCH 010/335] cleaned up code --- flow/dagger/imitating_agent.py | 3 ++- flow/dagger/imitating_controller.py | 12 +++--------- flow/dagger/imitating_controller2.py | 9 ++------- flow/dagger/replay_buffer.py | 1 - flow/dagger/run.py | 19 ------------------- flow/dagger/trainer.py | 3 +-- flow/dagger/utils.py | 9 +++++++-- 7 files changed, 15 insertions(+), 41 deletions(-) diff --git a/flow/dagger/imitating_agent.py b/flow/dagger/imitating_agent.py index 1abe33995..4e9e3f443 100644 --- a/flow/dagger/imitating_agent.py +++ b/flow/dagger/imitating_agent.py @@ -1,10 +1,11 @@ import numpy as np import tensorflow as tf import time -from imitating_controller import * +from imitating_controller2 import * from replay_buffer class Imitating_Agent(object): + # ignore this class! def __init__(self, sess, env, params): self.env = env self.sess = sess diff --git a/flow/dagger/imitating_controller.py b/flow/dagger/imitating_controller.py index c3e03415f..f85040856 100644 --- a/flow/dagger/imitating_controller.py +++ b/flow/dagger/imitating_controller.py @@ -12,6 +12,7 @@ class ImitatingController(BaseController): + # Implementation in Keras just for testing def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): @@ -44,29 +45,22 @@ def build_network(self): def train(self, observation_batch, action_batch): assert(self.training, "Policy must be trainable") - #print("Training: observation_batch is ", observation_batch) - #print("action_batch is ", action_batch) + print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) print("ACT NAN CHECK: ", np.any(np.isnan(action_batch))) - # print("ACTION BATCH: ", action_batch.shape) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - # print("TEST BATCH: ", observation_batch) history = self.model.fit(observation_batch, action_batch) - # print("LOSS: ", ret) def get_accel_from_observation(self, observation): # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays if len(observation.shape)<=1: observation = observation[None] - # print("OBS: ", observation) ret_val = self.model.predict(observation) - # print("ACCEL: ", ret_val) - # print("RET_VAL SHAPE", ret_val.shape) + return ret_val def get_accel(self, env): - # TODO make this get_accel(self, env) # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays observation = env.get_state() return self.get_accel_from_observation(observation) diff --git a/flow/dagger/imitating_controller2.py b/flow/dagger/imitating_controller2.py index 64b6798ca..ce3b26b0e 100644 --- a/flow/dagger/imitating_controller2.py +++ b/flow/dagger/imitating_controller2.py @@ -8,6 +8,7 @@ class ImitatingController(BaseController): + # Implementation in Tensorflow def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): @@ -43,7 +44,6 @@ def build_network(self): def define_placeholders(self): self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) - # print('DEBUG ', self.obs_dim) self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) if self.training: @@ -67,22 +67,17 @@ def train(self, observation_batch, action_batch): print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) print("ACT NAN CHECK: ", np.any(np.isnan(action_batch))) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - # print("TEST BATCH: ", observation_batch) ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) - # print("LOSS: ", ret) def get_accel_from_observation(self, observation): # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays if len(observation.shape)<=1: observation = observation[None] - # print("OBS: ", observation) ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] - # print("ACCEL: ", ret_val) - # print("RET_VAL SHAPE", ret_val.shape) + return ret_val def get_accel(self, env): - # TODO make this get_accel(self, env) # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays observation = env.get_state() return self.get_accel_from_observation(observation) diff --git a/flow/dagger/replay_buffer.py b/flow/dagger/replay_buffer.py index c7bbb56d0..a6717fc90 100644 --- a/flow/dagger/replay_buffer.py +++ b/flow/dagger/replay_buffer.py @@ -41,7 +41,6 @@ def add_rollouts(self, rollouts_list): self.terminals = terminals[-self.max_size:] else: self.observations = np.concatenate([self.observations, observations])[-self.max_size:] - print("SHAPES: ", self.actions.shape, actions.shape) self.actions = np.concatenate([self.actions, actions])[-self.max_size:] self.expert_actions = np.concatenate([self.expert_actions, expert_actions])[-self.max_size:] self.rewards = np.concatenate([self.rewards, rewards])[-self.max_size:] diff --git a/flow/dagger/run.py b/flow/dagger/run.py index 40e200505..162059d5f 100644 --- a/flow/dagger/run.py +++ b/flow/dagger/run.py @@ -10,24 +10,10 @@ class Runner(object): def __init__(self, params): - - # agent_params = { - # 'n_layers': params['n_layers'], - # 'size': params['size'], - # 'learning_rate': params['learning_rate'], - # 'max_replay_buffer_size': params['max_replay_buffer_size'], - # } - # - # self.params = params - # self.params['agent_class'] = BCAgent - # self.params['agent_params'] = agent_params - # initialize trainer self.params = params self.trainer = Trainer(params) - - def run_training_loop(self): self.trainer.run_training_loop(n_iter=self.params['n_iter']) @@ -36,11 +22,6 @@ def run_training_loop(self): def main(): import argparse parser = argparse.ArgumentParser() - # parser.add_argument('--expert_policy_file', '-epf', type=str, required=True) # relative to where you're running this script from - # parser.add_argument('--expert_data', '-ed', type=str, required=True) #relative to where you're running this script from - # parser.add_argument('--env_name', '-env', type=str, help='choices: Ant-v2, Humanoid-v2, Walker-v2, HalfCheetah-v2, Hopper-v2', required=True) - # parser.add_argument('--exp_name', '-exp', type=str, default='pick an experiment name', required=True) - # parser.add_argument('--do_dagger', action='store_true') parser.add_argument('--ep_len', type=int) parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) diff --git a/flow/dagger/trainer.py b/flow/dagger/trainer.py index 9dd7ae326..3c72b0e63 100644 --- a/flow/dagger/trainer.py +++ b/flow/dagger/trainer.py @@ -18,12 +18,11 @@ def __init__(self, params): self.params = params self.sess = create_tf_session() - # TODO: replace this with appropriate Flow env - # print('ERROR CHECK ', flow_params_test['exp_tag']) create_env, _ = make_create_env(flow_params) self.env = create_env() self.env.reset() + # might need to replace this hardcode assert 'rl_0' in self.env.k.vehicle.get_ids() self.vehicle_id = 'rl_0' diff --git a/flow/dagger/utils.py b/flow/dagger/utils.py index 00ae864aa..c5af25c61 100644 --- a/flow/dagger/utils.py +++ b/flow/dagger/utils.py @@ -6,7 +6,9 @@ # class agnostic helper functions def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length): - print("CONTROLLER: ", controller) + + print("COLLECTING CONTROLLER: ", controller) + print("EXPERT CONTROLLER: ", expert_controller) observation = env.reset() print("VEHICLE ID: ", vehicle_id) @@ -22,6 +24,7 @@ def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajec #assert action is not None, "action is None" #assert (not math.isnan(action)), "action is a nan" assert not (len(env.k.vehicle.get_edge(vehicle_id)) == 0), "Case One" + # point of error: assert not (env.k.vehicle.get_edge(vehicle_id)[0] == ":"), "Case Two" actions.append(action) @@ -40,6 +43,7 @@ def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajec # rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env) # observation, reward, done, _ = env.step(rl_actions) + observation, reward, done, _ = env.step(action) traj_length += 1 @@ -93,9 +97,10 @@ def unpack_rollouts(rollouts_list): # Below are tensorflow related functions + def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): """ - Builds a MLP + Builds a feedfoward neural net arguments: input_placeholder: placeholder variable for the state (batch_size, input_size) From 920dd7346175e4f76b4e46b6bb2bacb34b0041ae Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sat, 28 Mar 2020 18:13:41 -0700 Subject: [PATCH 011/335] check for None or Nan action before adding to buffer --- flow/dagger/env_params_test2.py | 2 +- flow/dagger/imitating_agent.py | 4 ++-- flow/dagger/imitating_controller.py | 2 +- flow/dagger/imitating_controller2.py | 6 +++--- flow/dagger/replay_buffer.py | 2 ++ flow/dagger/run.py | 2 +- flow/dagger/trainer.py | 3 ++- flow/dagger/utils.py | 31 +++++++++------------------- 8 files changed, 22 insertions(+), 30 deletions(-) diff --git a/flow/dagger/env_params_test2.py b/flow/dagger/env_params_test2.py index 7140af720..ffd870e86 100644 --- a/flow/dagger/env_params_test2.py +++ b/flow/dagger/env_params_test2.py @@ -6,7 +6,7 @@ from flow.controllers.car_following_models import IDMController from flow.controllers.routing_controllers import ContinuousRouter -from imitating_controller2 import ImitatingController +from imitating_controller import ImitatingController vehicles.add("human", acceleration_controller=(IDMController, {}), routing_controller=(ContinuousRouter, {}), diff --git a/flow/dagger/imitating_agent.py b/flow/dagger/imitating_agent.py index 4e9e3f443..8aabadace 100644 --- a/flow/dagger/imitating_agent.py +++ b/flow/dagger/imitating_agent.py @@ -1,11 +1,11 @@ import numpy as np import tensorflow as tf import time -from imitating_controller2 import * +from imitating_controller import * from replay_buffer class Imitating_Agent(object): - # ignore this class! + # ignore this class! def __init__(self, sess, env, params): self.env = env self.sess = sess diff --git a/flow/dagger/imitating_controller.py b/flow/dagger/imitating_controller.py index f85040856..eadeb0ab8 100644 --- a/flow/dagger/imitating_controller.py +++ b/flow/dagger/imitating_controller.py @@ -47,7 +47,7 @@ def train(self, observation_batch, action_batch): assert(self.training, "Policy must be trainable") print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) - print("ACT NAN CHECK: ", np.any(np.isnan(action_batch))) + assert (not np.any(np.isnan(action_batch))), "TRAIN ERROR ACTION NAN" action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) history = self.model.fit(observation_batch, action_batch) diff --git a/flow/dagger/imitating_controller2.py b/flow/dagger/imitating_controller2.py index ce3b26b0e..4b877877e 100644 --- a/flow/dagger/imitating_controller2.py +++ b/flow/dagger/imitating_controller2.py @@ -8,7 +8,7 @@ class ImitatingController(BaseController): - # Implementation in Tensorflow + # Implementation in Tensorflow def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): @@ -64,8 +64,8 @@ def define_train_op(self): def train(self, observation_batch, action_batch): assert(self.training, "Policy must be trainable") # print("ACTION BATCH: ", action_batch.shape) - print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) - print("ACT NAN CHECK: ", np.any(np.isnan(action_batch))) + # print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) + # print("ACT NAN CHECK: ", np.any(np.isnan(action_batch))) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) diff --git a/flow/dagger/replay_buffer.py b/flow/dagger/replay_buffer.py index a6717fc90..1213b985e 100644 --- a/flow/dagger/replay_buffer.py +++ b/flow/dagger/replay_buffer.py @@ -32,6 +32,8 @@ def add_rollouts(self, rollouts_list): self.rollouts.append(rollout) observations, actions, expert_actions, rewards, next_observations, terminals = unpack_rollouts(rollouts_list) + assert (not np.any(np.isnan(expert_actions))), "REPLAY BUFFER ERROR" + if self.observations is None: self.observations = observations[-self.max_size:] self.actions = actions[-self.max_size:] diff --git a/flow/dagger/run.py b/flow/dagger/run.py index 162059d5f..67bac9dda 100644 --- a/flow/dagger/run.py +++ b/flow/dagger/run.py @@ -22,7 +22,7 @@ def run_training_loop(self): def main(): import argparse parser = argparse.ArgumentParser() - parser.add_argument('--ep_len', type=int) + parser.add_argument('--ep_len', type=int, default=3000) parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) parser.add_argument('--n_iter', '-n', type=int, default=5) diff --git a/flow/dagger/trainer.py b/flow/dagger/trainer.py index 3c72b0e63..b532c04c6 100644 --- a/flow/dagger/trainer.py +++ b/flow/dagger/trainer.py @@ -7,7 +7,7 @@ import os from flow.utils.registry import make_create_env from env_params_test import flow_params -from imitating_controller2 import ImitatingController +from imitating_controller import ImitatingController from flow.controllers.car_following_models import IDMController from flow.core.params import SumoCarFollowingParams from utils import * @@ -100,6 +100,7 @@ def train_controller(self): for train_step in range(self.params['num_agent_train_steps_per_iter']): # TODO: fix this ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) + print(expert_ac_batch) self.controller.train(ob_batch, expert_ac_batch) diff --git a/flow/dagger/utils.py b/flow/dagger/utils.py index c5af25c61..43f1f5fc2 100644 --- a/flow/dagger/utils.py +++ b/flow/dagger/utils.py @@ -19,31 +19,20 @@ def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajec traj_length = 0 while True: - observations.append(observation) action = controller.get_action(env) - #assert action is not None, "action is None" - #assert (not math.isnan(action)), "action is a nan" - assert not (len(env.k.vehicle.get_edge(vehicle_id)) == 0), "Case One" - # point of error: - assert not (env.k.vehicle.get_edge(vehicle_id)[0] == ":"), "Case Two" + expert_action = expert_controller.get_action(env) + if (expert_action is None or math.isnan(expert_action)): + print("HIT CASE") + observation, reward, done, _ = env.step(action) + traj_length += 1 + terminate_rollout = traj_length == max_trajectory_length or done + if terminate_rollout: + break + continue + observations.append(observation) actions.append(action) - - expert_action = expert_controller.get_action(env) - assert env is not None, "environment is None" - assert expert_action is not None, "expert action is None" - assert (not math.isnan(expert_action)), "expert action is a nan" expert_actions.append(expert_action) - - # rl_actions = {} - # for veh_id in env.k.vehicle.get_ids(): - # if veh_id == vehicle_id: - # rl_actions[veh_id] = action - # else: - # rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env) - - # observation, reward, done, _ = env.step(rl_actions) - observation, reward, done, _ = env.step(action) traj_length += 1 From 630a100f6ad37ed8b26600002a4e49108cf4e25e Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sat, 28 Mar 2020 18:39:36 -0700 Subject: [PATCH 012/335] Fixed dimension bug --- flow/dagger/env_params_test2.py | 2 +- flow/dagger/imitating_agent.py | 2 +- flow/dagger/imitating_controller.py | 6 ++---- flow/dagger/imitating_controller2.py | 4 ---- flow/dagger/trainer.py | 3 +-- flow/dagger/utils.py | 7 ++++--- 6 files changed, 9 insertions(+), 15 deletions(-) diff --git a/flow/dagger/env_params_test2.py b/flow/dagger/env_params_test2.py index ffd870e86..7140af720 100644 --- a/flow/dagger/env_params_test2.py +++ b/flow/dagger/env_params_test2.py @@ -6,7 +6,7 @@ from flow.controllers.car_following_models import IDMController from flow.controllers.routing_controllers import ContinuousRouter -from imitating_controller import ImitatingController +from imitating_controller2 import ImitatingController vehicles.add("human", acceleration_controller=(IDMController, {}), routing_controller=(ContinuousRouter, {}), diff --git a/flow/dagger/imitating_agent.py b/flow/dagger/imitating_agent.py index 8aabadace..f5b09dee3 100644 --- a/flow/dagger/imitating_agent.py +++ b/flow/dagger/imitating_agent.py @@ -1,7 +1,7 @@ import numpy as np import tensorflow as tf import time -from imitating_controller import * +from imitating_controller2 import * from replay_buffer class Imitating_Agent(object): diff --git a/flow/dagger/imitating_controller.py b/flow/dagger/imitating_controller.py index eadeb0ab8..2537d70b8 100644 --- a/flow/dagger/imitating_controller.py +++ b/flow/dagger/imitating_controller.py @@ -40,14 +40,12 @@ def build_network(self): self.model.add(Dense(self.size, activation='relu')) # No activation self.model.add(Dense(self.action_dim)) - self.model.compile(loss='mean_squared_error', optimizer='adam', metrics=['accuracy']) + self.model.compile(loss='mean_squared_error', optimizer='adam') def train(self, observation_batch, action_batch): assert(self.training, "Policy must be trainable") - - print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) - assert (not np.any(np.isnan(action_batch))), "TRAIN ERROR ACTION NAN" + assert (not np.any(np.isnan(action_batch))), "NANs in training labels" action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) history = self.model.fit(observation_batch, action_batch) diff --git a/flow/dagger/imitating_controller2.py b/flow/dagger/imitating_controller2.py index 4b877877e..65c7c9d1d 100644 --- a/flow/dagger/imitating_controller2.py +++ b/flow/dagger/imitating_controller2.py @@ -52,7 +52,6 @@ def define_placeholders(self): def define_forward_pass(self): pred_action = build_mlp(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) self.action_predictions = pred_action - print("ACTION PREDICTIONS TYPE ", type(self.action_predictions)) def define_train_op(self): true_actions = self.action_labels_placeholder @@ -63,9 +62,6 @@ def define_train_op(self): def train(self, observation_batch, action_batch): assert(self.training, "Policy must be trainable") - # print("ACTION BATCH: ", action_batch.shape) - # print("OBS NAN CHECK: ", np.any(np.isnan(observation_batch))) - # print("ACT NAN CHECK: ", np.any(np.isnan(action_batch))) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) diff --git a/flow/dagger/trainer.py b/flow/dagger/trainer.py index b532c04c6..3c72b0e63 100644 --- a/flow/dagger/trainer.py +++ b/flow/dagger/trainer.py @@ -7,7 +7,7 @@ import os from flow.utils.registry import make_create_env from env_params_test import flow_params -from imitating_controller import ImitatingController +from imitating_controller2 import ImitatingController from flow.controllers.car_following_models import IDMController from flow.core.params import SumoCarFollowingParams from utils import * @@ -100,7 +100,6 @@ def train_controller(self): for train_step in range(self.params['num_agent_train_steps_per_iter']): # TODO: fix this ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) - print(expert_ac_batch) self.controller.train(ob_batch, expert_ac_batch) diff --git a/flow/dagger/utils.py b/flow/dagger/utils.py index 43f1f5fc2..177fc620f 100644 --- a/flow/dagger/utils.py +++ b/flow/dagger/utils.py @@ -11,8 +11,6 @@ def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajec print("EXPERT CONTROLLER: ", expert_controller) observation = env.reset() - print("VEHICLE ID: ", vehicle_id) - print("VEHICLE IDS: ", env.k.vehicle.get_ids()) assert vehicle_id in env.k.vehicle.get_ids(), "Vehicle ID not in env!" observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] @@ -20,9 +18,12 @@ def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajec while True: action = controller.get_action(env) + + if type(action) == np.ndarray: + action = action.flatten()[0] + expert_action = expert_controller.get_action(env) if (expert_action is None or math.isnan(expert_action)): - print("HIT CASE") observation, reward, done, _ = env.step(action) traj_length += 1 terminate_rollout = traj_length == max_trajectory_length or done From eec0a02b430238c3a7ce4f05de0f04fb362d6e2b Mon Sep 17 00:00:00 2001 From: Kanaad Parvate Date: Sun, 29 Mar 2020 16:20:28 -0700 Subject: [PATCH 013/335] flake and pydocstyle --- examples/exp_configs/rl/multiagent/multiagent_i210.py | 1 - examples/train.py | 6 +++--- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 872568cab..1779adf69 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -5,7 +5,6 @@ """ import os -from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy from ray.tune.registry import register_env from flow.controllers import RLController diff --git a/examples/train.py b/examples/train.py index 1f2cd6300..ec99d4ee1 100644 --- a/examples/train.py +++ b/examples/train.py @@ -22,7 +22,6 @@ import ray from ray import tune -from ray.tune import run_experiments from ray.tune.registry import register_env try: from ray.rllib.agents.agent import get_agent_class @@ -36,9 +35,9 @@ from flow.utils.registry import make_create_env - def parse_args(args): """Parse training options user can specify in command line. + Returns ------- argparse.Namespace @@ -140,6 +139,7 @@ def setup_exps_rllib(flow_params, policies_to_train=None, ): """Return the relevant components of an RLlib experiment. + Parameters ---------- flow_params : dict @@ -188,7 +188,7 @@ def setup_exps_rllib(flow_params, config["num_workers"] = n_cpus config["horizon"] = horizon - config["buffer_size"] = 20000 # reduced to test if this is the source of memory problems + config["buffer_size"] = 20000 # reduced to test if this is the source of memory problems if flags.grid_search: config["prioritized_replay"] = tune.grid_search(['True', 'False']) config["actor_lr"] = tune.grid_search([1e-3, 1e-4]) From 27f325b9fd9031f027b32732d195b808167ee980 Mon Sep 17 00:00:00 2001 From: Kanaad Parvate Date: Sun, 29 Mar 2020 16:21:59 -0700 Subject: [PATCH 014/335] missed a flake8 --- examples/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/train.py b/examples/train.py index ec99d4ee1..4e6f97bbe 100644 --- a/examples/train.py +++ b/examples/train.py @@ -139,7 +139,7 @@ def setup_exps_rllib(flow_params, policies_to_train=None, ): """Return the relevant components of an RLlib experiment. - + Parameters ---------- flow_params : dict From ba2ff131f64db344b8d928ff290266653680213f Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Mon, 30 Mar 2020 11:56:15 -0700 Subject: [PATCH 015/335] Add an option for a local reward that just computes speed of the AV and its follower (#891) --- .../exp_configs/non_rl/i210_subnetwork.py | 2 +- flow/envs/multiagent/i210.py | 72 +++++++++++-------- 2 files changed, 44 insertions(+), 30 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index dd85c56cf..d993ae93a 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -101,7 +101,7 @@ edge_id = "119257908#1-AddedOnRampEdge" custom_callables = { "avg_merge_speed": lambda env: np.nan_to_num(np.mean( - env.k.vehicle.get_speed(env.k.vehicle.get_ids_by_edge(edge_id)))), + env.k.vehicle.get_speed(env.k.vehicle.get_ids()))), "avg_outflow": lambda env: np.nan_to_num( env.k.vehicle.get_outflow_rate(120)), # we multiply by 5 to account for the vehicle length and by 1000 to convert diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index 409aeb14f..4082eb415 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -16,6 +16,8 @@ "max_decel": 1, # whether we use an obs space that contains adjacent lane info or just the lead obs "lead_obs": True, + # whether the reward should come from local vehicles instead of global rewards + "local_reward": True } @@ -137,35 +139,47 @@ def compute_reward(self, rl_actions, **kwargs): return {} rewards = {} - for rl_id in self.k.vehicle.get_rl_ids(): - if self.env_params.evaluate: - # reward is speed of vehicle if we are in evaluation mode - reward = self.k.vehicle.get_speed(rl_id) - elif kwargs['fail']: - # reward is 0 if a collision occurred - reward = 0 - else: - # reward high system-level velocities - cost1 = average_velocity(self, fail=kwargs['fail']) - - # penalize small time headways - cost2 = 0 - t_min = 1 # smallest acceptable time headway - - lead_id = self.k.vehicle.get_leader(rl_id) - if lead_id not in ["", None] \ - and self.k.vehicle.get_speed(rl_id) > 0: - t_headway = max( - self.k.vehicle.get_headway(rl_id) / - self.k.vehicle.get_speed(rl_id), 0) - cost2 += min((t_headway - t_min) / t_min, 0) - - # weights for cost1, cost2, and cost3, respectively - eta1, eta2 = 1.00, 0.10 - - reward = max(eta1 * cost1 + eta2 * cost2, 0) - - rewards[rl_id] = reward + if self.env_params.additional_params["local_reward"]: + for rl_id in self.k.vehicle.get_rl_ids(): + rewards[rl_id] = 0 + speeds = [] + follow_speed = self.k.vehicle.get_speed(self.k.vehicle.get_follower(rl_id)) + speeds.extend([speed for speed in follow_speed if speed >= 0]) + if self.k.vehicle.get_speed(rl_id) >= 0: + speeds.append(self.k.vehicle.get_speed(rl_id)) + if len(speeds) > 0: + # rescale so the q function can estimate it quickly + rewards[rl_id] = np.mean(speeds) / 500.0 + else: + for rl_id in self.k.vehicle.get_rl_ids(): + if self.env_params.evaluate: + # reward is speed of vehicle if we are in evaluation mode + reward = self.k.vehicle.get_speed(rl_id) + elif kwargs['fail']: + # reward is 0 if a collision occurred + reward = 0 + else: + # reward high system-level velocities + cost1 = average_velocity(self, fail=kwargs['fail']) + + # penalize small time headways + cost2 = 0 + t_min = 1 # smallest acceptable time headway + + lead_id = self.k.vehicle.get_leader(rl_id) + if lead_id not in ["", None] \ + and self.k.vehicle.get_speed(rl_id) > 0: + t_headway = max( + self.k.vehicle.get_headway(rl_id) / + self.k.vehicle.get_speed(rl_id), 0) + cost2 += min((t_headway - t_min) / t_min, 0) + + # weights for cost1, cost2, and cost3, respectively + eta1, eta2 = 1.00, 0.10 + + reward = max(eta1 * cost1 + eta2 * cost2, 0) + + rewards[rl_id] = reward return rewards def additional_command(self): From 861c31e21d042471d9ea3be54d9ab0145d4321ff Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Tue, 31 Mar 2020 11:49:58 -0700 Subject: [PATCH 016/335] Noise fix (#894) * Add an option for a local reward that just computes speed of the AV and its follower * Set the noise scaling to match Bennis suggestions --- examples/exp_configs/rl/multiagent/multiagent_i210.py | 2 +- flow/controllers/base_controller.py | 2 +- flow/envs/multiagent/i210.py | 3 ++- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 1779adf69..7710b4a4d 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -47,7 +47,7 @@ "human", num_vehicles=0, lane_change_params=SumoLaneChangeParams(lane_change_mode="strategic"), - acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.6}), + acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), ) vehicles.add( "av", diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 41780826b..4004b1c4d 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -107,7 +107,7 @@ def get_action(self, env): # add noise to the accelerations, if requested if self.accel_noise > 0: - accel += np.random.normal(0, self.accel_noise) + accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) # run the failsafes, if requested if self.fail_safe == 'instantaneous': diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index 4082eb415..6368e7a2d 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -144,7 +144,8 @@ def compute_reward(self, rl_actions, **kwargs): rewards[rl_id] = 0 speeds = [] follow_speed = self.k.vehicle.get_speed(self.k.vehicle.get_follower(rl_id)) - speeds.extend([speed for speed in follow_speed if speed >= 0]) + if follow_speed >= 0: + speeds.append(follow_speed) if self.k.vehicle.get_speed(rl_id) >= 0: speeds.append(self.k.vehicle.get_speed(rl_id)) if len(speeds) > 0: From d04f1440c7f3e953ac8cdddcc591b75f2800a13b Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Tue, 31 Mar 2020 13:43:29 -0700 Subject: [PATCH 017/335] Python upgrade (#895) - Upgrade python - Clean up AMI --- environment.yml | 23 +++++++++++------------ examples/train.py | 18 +++++++++++++----- requirements.txt | 7 ++++--- scripts/ray_autoscale.yaml | 4 +++- 4 files changed, 31 insertions(+), 21 deletions(-) diff --git a/environment.yml b/environment.yml index 480ea7eba..162bed533 100644 --- a/environment.yml +++ b/environment.yml @@ -1,18 +1,17 @@ name: flow dependencies: - - python==3.6.8 - - scipy==1.1.0 - - lxml==4.4.1 - - six==1.11.0 - - path.py - - python-dateutil==2.7.3 - - pip>=18.0 - - tensorflow==1.9.0 - - cloudpickle==1.2.1 - - setuptools==41.0.0 - - plotly==2.4.0 + - python==3.7.3 - pip: + - scipy==1.1.0 + - lxml==4.4.1 + - six==1.11.0 + - path.py + - python-dateutil==2.7.3 + - pip>=18.0 + - tensorflow==1.14.0 + - setuptools==41.0.0 + - plotly==2.4.0 - gym==0.14.0 - pyprind==2.11.2 - nose2==0.8.0 @@ -25,7 +24,7 @@ dependencies: - setproctitle - psutil - opencv-python - - boto3==1.4.8 + - boto3==1.10.45 - redis~=2.10.6 - tabulate - pytz diff --git a/examples/train.py b/examples/train.py index 4e6f97bbe..8150bc883 100644 --- a/examples/train.py +++ b/examples/train.py @@ -17,8 +17,11 @@ import numpy as np import pytz -from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv -from stable_baselines import PPO2 +try: + from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv + from stable_baselines import PPO2 +except ImportError: + print("Stable-baselines not installed") import ray from ray import tune @@ -54,6 +57,10 @@ def parse_args(args): help='Name of the experiment configuration file, as located in ' 'exp_configs/rl/singleagent or exp_configs/rl/multiagent.') + parser.add_argument( + 'exp_title', type=str, + help='Title to give the run.') + # optional input parameters parser.add_argument( '--rl_trainer', type=str, default="rllib", @@ -62,8 +69,6 @@ def parse_args(args): '--algorithm', type=str, default="PPO", help='RL algorithm to use. Options are PPO, TD3, MATD3 (MADDPG w/ TD3) right now.' ) - parser.add_argument('--exp_title', type=str, default='test', - help='Informative experiment title to help distinguish results') parser.add_argument( '--num_cpus', type=int, default=1, help='How many CPUs to use') @@ -76,6 +81,9 @@ def parse_args(args): parser.add_argument( '--num_iterations', type=int, default=200, help='How many iterations are in a training run.') + parser.add_argument( + '--checkpoint_freq', type=int, default=20, + help='How often to checkpoint.') parser.add_argument( '--num_rollouts', type=int, default=1, help='How many rollouts are in a training batch') @@ -266,7 +274,7 @@ def train_rllib(submodule, flags): "run_or_experiment": alg_run, "name": gym_name, "config": config, - "checkpoint_freq": 20, + "checkpoint_freq": flags.checkpoint_freq, "checkpoint_at_end": True, "max_failures": 0, "stop": { diff --git a/requirements.txt b/requirements.txt index 4569dfca5..191ecc740 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,6 @@ path.py joblib==0.10.3 python-dateutil==2.7.3 cached_property -cloudpickle==1.2.0 pyglet==1.3.2 matplotlib==3.1.0 imutils==0.5.1 @@ -21,9 +20,11 @@ lz4 setproctitle psutil opencv-python -boto3==1.4.8 +boto3==1.10.45 redis~=2.10.6 pandas==0.24.2 plotly==2.4.0 tabulate -pytz \ No newline at end of file +tensorflow==1.14.0 +awscli==1.16.309 +pytz diff --git a/scripts/ray_autoscale.yaml b/scripts/ray_autoscale.yaml index 5bf2a9c4a..ea84bbee0 100644 --- a/scripts/ray_autoscale.yaml +++ b/scripts/ray_autoscale.yaml @@ -67,9 +67,11 @@ worker_nodes: # Additional options in the boto docs. setup_commands: - - cd flow && git fetch && git checkout origin/master + - cd flow && git fetch && git checkout origin/i210_dev head_setup_commands: + - pip install ray==0.8.0 + - pip install tabulate - pip install boto3==1.10.45 # 1.4.8 adds InstanceMarketOptions - pip install awscli==1.16.309 - pip install pytz From 1100d8d2223014ea3919bbf553f7498d9c69bd60 Mon Sep 17 00:00:00 2001 From: Kanaad Parvate Date: Sun, 5 Apr 2020 22:41:18 -0700 Subject: [PATCH 018/335] I210 Replay Script (#886) Replay script for the i210 env. --- .../rl/multiagent/multiagent_i210.py | 45 ++- flow/utils/rllib.py | 7 + flow/visualize/i210_replay.py | 378 ++++++++++++++++++ flow/visualize/plot_custom_callables.py | 74 ++++ flow/visualize/transfer/util.py | 138 +++++++ 5 files changed, 627 insertions(+), 15 deletions(-) create mode 100644 flow/visualize/i210_replay.py create mode 100644 flow/visualize/plot_custom_callables.py create mode 100644 flow/visualize/transfer/util.py diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 7710b4a4d..327282e28 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -4,6 +4,7 @@ highway with ramps network. """ import os +import numpy as np from ray.tune.registry import register_env @@ -17,6 +18,7 @@ from flow.core.params import VehicleParams from flow.core.params import SumoParams from flow.core.params import SumoLaneChangeParams +from flow.core.rewards import energy_consumption from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION from flow.envs.multiagent.i210 import I210MultiEnv, ADDITIONAL_ENV_PARAMS from flow.utils.registry import make_create_env @@ -26,6 +28,10 @@ # number of steps per rollout HORIZON = 4000 +VEH_PER_HOUR_BASE_119257914 = 10800 +VEH_PER_HOUR_BASE_27414345 = 321 +VEH_PER_HOUR_BASE_27414342 = 421 + # percentage of autonomous vehicles compared to human vehicles on highway PENETRATION_RATE = 10 @@ -63,46 +69,46 @@ inflow.add( veh_type="human", edge="119257914", - vehs_per_hour=int(10800 * (1 - pen_rate)), + vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * (1 - pen_rate)), # probability=1.0, - departLane="random", + depart_lane="random", departSpeed=20) # # on ramp # inflow.add( # veh_type="human", # edge="27414345", # vehs_per_hour=321 * pen_rate, -# departLane="random", -# departSpeed=20) +# depart_lane="random", +# depart_speed=20) # inflow.add( # veh_type="human", # edge="27414342#0", # vehs_per_hour=421 * pen_rate, -# departLane="random", -# departSpeed=20) +# depart_lane="random", +# depart_speed=20) # Now add the AVs # main highway inflow.add( veh_type="av", edge="119257914", - vehs_per_hour=int(10800 * pen_rate), + vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * pen_rate), # probability=1.0, - departLane="random", - departSpeed=20) + depart_lane="random", + depart_speed=20) # # on ramp # inflow.add( # veh_type="av", # edge="27414345", -# vehs_per_hour=int(321 * pen_rate), -# departLane="random", -# departSpeed=20) +# vehs_per_hour=int(VEH_PER_HOUR_BASE_27414345 * pen_rate), +# depart_lane="random", +# depart_speed=20) # inflow.add( # veh_type="av", # edge="27414342#0", -# vehs_per_hour=int(421 * pen_rate), -# departLane="random", -# departSpeed=20) +# vehs_per_hour=int(VEH_PER_HOUR_BASE_27414342 * pen_rate), +# depart_lane="random", +# depart_speed=20) NET_TEMPLATE = os.path.join( config.PROJECT_PATH, @@ -176,3 +182,12 @@ def policy_mapping_fn(_): """Map a policy in RLlib.""" return 'av' + + +custom_callables = { + "avg_speed": lambda env: np.mean([speed for speed in + env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]), + "avg_outflow": lambda env: np.nan_to_num( + env.k.vehicle.get_outflow_rate(120)), + "avg_energy": lambda env: -1*energy_consumption(env, 0.1) +} diff --git a/flow/utils/rllib.py b/flow/utils/rllib.py index 80193c22b..7d777d769 100644 --- a/flow/utils/rllib.py +++ b/flow/utils/rllib.py @@ -146,6 +146,13 @@ def get_flow_params(config): if flow_params["net"]["inflows"]: net.inflows.__dict__ = flow_params["net"]["inflows"].copy() + if len(net.template) > 0: + dirname = os.getcwd() + filename = os.path.join(dirname, '../../examples') + split = net.template.split('examples')[1][1:] + path = os.path.abspath(os.path.join(filename, split)) + net.template = path + env = EnvParams() env.__dict__ = flow_params["env"].copy() diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py new file mode 100644 index 000000000..502d646aa --- /dev/null +++ b/flow/visualize/i210_replay.py @@ -0,0 +1,378 @@ +"""Transfer and replay for i210 environment.""" +import argparse +from collections import defaultdict +from copy import deepcopy +import numpy as np +import json +import os +import time + +import ray +try: + from ray.rllib.agents.agent import get_agent_class +except ImportError: + from ray.rllib.agents.registry import get_agent_class +from ray.tune.registry import register_env + +from flow.core.util import emission_to_csv +from flow.utils.registry import make_create_env +from flow.utils.rllib import get_flow_params +from flow.utils.rllib import get_rllib_config +from flow.utils.rllib import get_rllib_pkl +from flow.utils.rllib import FlowParamsEncoder + + +from flow.visualize.transfer.util import inflows_range + +from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS +from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables + +EXAMPLE_USAGE = """ +example usage: + python i210_replay.py -r /ray_results/experiment_dir/result_dir -c 1 + python i210_replay.py --controller idm + python i210_replay.py --controller idm --run_transfer + +Here the arguments are: +1 - the path to the simulation results +2 - the number of the checkpoint +""" + + +def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config=None, result_dir=None): + """Replay or run transfer test (defined by transfer_fn) by modif. + + Arguments: + --------- + args {[Namespace]} -- [args from argparser] + flow_params {[flow_params object, pulled from ]} -- [description] + transfer_fn {[type]} -- [description] + + Keyword Arguments: + ----------------- + rllib_config {[type]} -- [description] (default: {None}) + result_dir {[type]} -- [description] (default: {None}) + """ + assert bool(args.controller) ^ bool(rllib_config), \ + "Need to specify either controller or rllib_config, but not both" + + if args.run_transfer: + flow_params = transfer_test.flow_params_modifier_fn(flow_params) + + if args.controller: + test_params = {} + if args.controller == 'idm': + from flow.controllers.car_following_models import IDMController + controller = IDMController + test_params.update({'v0': 1, 'T': 1, 'a': 0.2, 'b': 0.2}) # An example of really obvious changes + elif args.controller == 'default_human': + controller = flow_params['veh'].type_parameters['human']['acceleration_controller'][0] + test_params.update(flow_params['veh'].type_parameters['human']['acceleration_controller'][1]) + elif args.controller == 'follower_stopper': + from flow.controllers.velocity_controllers import FollowerStopper + controller = FollowerStopper + test_params.update({'v_des': 15}) + elif args.controller == 'sumo': + from flow.controllers.car_following_models import SimCarFollowingController + controller = SimCarFollowingController + + flow_params['veh'].type_parameters['av']['acceleration_controller'] = (controller, test_params) + + for veh_param in flow_params['veh'].initial: + if veh_param['veh_id'] == 'av': + veh_param['acceleration_controller'] = (controller, test_params) + + sim_params = flow_params['sim'] + sim_params.num_clients = 1 + + sim_params.restart_instance = True + dir_path = os.path.dirname(os.path.realpath(__file__)) + emission_path = '{0}/test_time_rollout/'.format(dir_path) + sim_params.emission_path = emission_path if args.gen_emission else None + + # pick your rendering mode + if args.render_mode == 'sumo_web3d': + sim_params.num_clients = 2 + sim_params.render = False + elif args.render_mode == 'drgb': + sim_params.render = 'drgb' + sim_params.pxpm = 4 + elif args.render_mode == 'sumo_gui': + sim_params.render = False # will be set to True below + elif args.render_mode == 'no_render': + sim_params.render = False + if args.save_render: + if args.render_mode != 'sumo_gui': + sim_params.render = 'drgb' + sim_params.pxpm = 4 + sim_params.save_render = True + + # Start the environment with the gui turned on and a path for the + # emission file + env_params = flow_params['env'] + env_params.restart_instance = False + if args.evaluate: + env_params.evaluate = True + + # lower the horizon if testing + if args.horizon: + env_params.horizon = args.horizon + + # Create and register a gym+rllib env + create_env, env_name = make_create_env(params=flow_params, version=0) + env = create_env(env_name) + + if args.render_mode == 'sumo_gui': + env.sim_params.render = True # set to True after initializing agent and env + + # if restart_instance, don't restart here because env.reset will restart later + if not sim_params.restart_instance: + env.restart_simulation(sim_params=sim_params, render=sim_params.render) + + if rllib_config: + # check if we have a multiagent environment but in a + # backwards compatible way + if rllib_config.get('multiagent', {}).get('policies', None): + multiagent = True + pkl = get_rllib_pkl(result_dir) + rllib_config['multiagent'] = pkl['multiagent'] + else: + multiagent = False + raise NotImplementedError + + # Run on only one cpu for rendering purposes + rllib_config['num_workers'] = 0 + + # lower the horizon if testing + if args.horizon: + rllib_config['horizon'] = args.horizon + + assert 'run' in rllib_config['env_config'], "Was this trained with the latest version of Flow?" + # Determine agent and checkpoint + config_run = rllib_config['env_config']['run'] + + rllib_flow_params = get_flow_params(rllib_config) + agent_create_env, agent_env_name = make_create_env(params=rllib_flow_params, version=0) + register_env(agent_env_name, agent_create_env) + agent_cls = get_agent_class(config_run) + + # create the agent that will be used to compute the actions + agent = agent_cls(env=agent_env_name, config=rllib_config) + checkpoint = result_dir + '/checkpoint_' + args.checkpoint_num + checkpoint = checkpoint + '/checkpoint-' + args.checkpoint_num + agent.restore(checkpoint) + + if multiagent: + # map the agent id to its policy + policy_map_fn = rllib_config['multiagent']['policy_mapping_fn'] + + if rllib_config['model']['use_lstm']: + use_lstm = True + if multiagent: + # map the agent id to its policy + size = rllib_config['model']['lstm_cell_size'] + lstm_state = defaultdict(lambda: [np.zeros(size, np.float32), + np.zeros(size, np.float32)]) + else: + lstm_state = [ + np.zeros(rllib_config['model']['lstm_cell_size'], np.float32), + np.zeros(rllib_config['model']['lstm_cell_size'], np.float32) + ] + else: + use_lstm = False + + # used to store + info_dict = { + "velocities": [], + "outflows": [], + } + info_dict.update({ + key: [] for key in custom_callables.keys() + }) + + for i in range(args.num_rollouts): + vel = [] + custom_vals = {key: [] for key in custom_callables.keys()} + state = env.reset() + for _ in range(env_params.horizon): + + if rllib_config: + if multiagent: + action = {} + for agent_id in state.keys(): + if use_lstm: + action[agent_id], lstm_state[agent_id], _ = \ + agent.compute_action( + state[agent_id], state=lstm_state[agent_id], + policy_id=policy_map_fn(agent_id)) + else: + action[agent_id] = agent.compute_action( + state[agent_id], policy_id=policy_map_fn(agent_id)) + else: + if use_lstm: + raise NotImplementedError + else: + action = agent.compute_action(state) + else: + action = None + + state, reward, done, _ = env.step(action) + + # Compute the velocity speeds and cumulative returns. + veh_ids = env.k.vehicle.get_ids() + vel.append(np.mean(env.k.vehicle.get_speed(veh_ids))) + + # Compute the results for the custom callables. + for (key, lambda_func) in custom_callables.items(): + custom_vals[key].append(lambda_func(env)) + + if type(done) is dict and done['__all__']: + break + elif type(done) is not dict and done: + break + + # Store the information from the run in info_dict. + outflow = env.k.vehicle.get_outflow_rate(int(500)) + info_dict["velocities"].append(np.mean(vel)) + info_dict["outflows"].append(outflow) + for key in custom_vals.keys(): + info_dict[key].append(np.mean(custom_vals[key])) + + print('======== Summary of results ========') + if args.run_transfer: + print("Transfer test: {}".format(transfer_test.transfer_str)) + print("====================================") + + # Print the averages/std for all variables in the info_dict. + for key in info_dict.keys(): + print("Average, std {}: {}, {}".format( + key, np.mean(info_dict[key]), np.std(info_dict[key]))) + + # terminate the environment + env.unwrapped.terminate() + + if output_dir: + if args.run_transfer: + exp_name = "{}-replay".format(transfer_test.transfer_str) + else: + exp_name = "i210_replay" + replay_out = os.path.join(output_dir, '{}-info.npy'.format(exp_name)) + np.save(replay_out, info_dict) + # if prompted, convert the emission file into a csv file + if args.gen_emission: + emission_filename = '{0}-emission.xml'.format(env.network.name) + time.sleep(0.1) + + emission_path = \ + '{0}/test_time_rollout/{1}'.format(dir_path, emission_filename) + + output_path = os.path.join(output_dir, '{}-emission.csv'.format(exp_name)) + # convert the emission file into a csv file + emission_to_csv(emission_path, output_path=output_path) + + # print the location of the emission csv file + print("\nGenerated emission file at " + output_path) + + # delete the .xml version of the emission file + os.remove(emission_path) + + # Create the flow_params object + with open(os.path.join(output_dir, exp_name) + '.json', 'w') as outfile: + json.dump(flow_params, outfile, + cls=FlowParamsEncoder, sort_keys=True, indent=4) + + return info_dict + + +def create_parser(): + """Create the parser to capture CLI arguments.""" + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description='[Flow] Evaluates a reinforcement learning agent ' + 'given a checkpoint.', + epilog=EXAMPLE_USAGE) + + parser.add_argument( + '--rllib_result_dir', '-r', required=False, type=str, help='Directory containing results') + parser.add_argument('--checkpoint_num', '-c', required=False, type=str, help='Checkpoint number.') + + parser.add_argument( + '--num_rollouts', + type=int, + default=1, + help='The number of rollouts to visualize.') + parser.add_argument( + '--gen_emission', + action='store_true', + help='Specifies whether to generate an emission file from the ' + 'simulation') + parser.add_argument( + '--evaluate', + action='store_true', + help='Specifies whether to use the \'evaluate\' reward ' + 'for the environment.') + parser.add_argument( + '--render_mode', '-rm', + type=str, + default=None, + help='Pick the render mode. Options include sumo_web3d, ' + 'rgbd and sumo_gui') + parser.add_argument( + '--save_render', + action='store_true', + help='Saves a rendered video to a file. NOTE: Overrides render_mode ' + 'with pyglet rendering.') + parser.add_argument( + '--horizon', + type=int, + help='Specifies the horizon.') + parser.add_argument( + '--local', + action='store_true', + help='Adjusts run settings to be compatible with limited ' + 'memory capacity' + ) + parser.add_argument( + '--controller', + type=str, + help='Which custom controller to use. Defaults to IDM' + ) + parser.add_argument( + '--run_transfer', + action='store_true', + help='Runs transfer tests if true' + ) + parser.add_argument( + '--output_dir', + type=str, + help='Directory to save results.', + default=None + ) + return parser + + +if __name__ == '__main__': + parser = create_parser() + args = parser.parse_args() + + rllib_config = None + rllib_result_dir = None + if args.rllib_result_dir is not None: + rllib_result_dir = args.rllib_result_dir if args.rllib_result_dir[-1] != '/' \ + else args.rllib_result_dir[:-1] + + rllib_config = get_rllib_config(rllib_result_dir) + + flow_params = deepcopy(I210_MA_DEFAULT_FLOW_PARAMS) + + if args.local: + ray.init(num_cpus=1, object_store_memory=200 * 1024 * 1024) + else: + ray.init(num_cpus=1) + + if args.run_transfer: + for transfer_test in inflows_range(penetration_rates=[0.05, 0.1, 0.2], flow_rate_coefs=[0.8, 1.0, 1.2]): + replay(args, flow_params, output_dir=args.output_dir, transfer_test=transfer_test, + rllib_config=rllib_config, result_dir=rllib_result_dir) + else: + replay(args, flow_params, output_dir=args.output_dir, rllib_config=rllib_config, result_dir=rllib_result_dir) diff --git a/flow/visualize/plot_custom_callables.py b/flow/visualize/plot_custom_callables.py new file mode 100644 index 000000000..8df0e7f62 --- /dev/null +++ b/flow/visualize/plot_custom_callables.py @@ -0,0 +1,74 @@ +"""Generate charts from with .npy files containing custom callables through replay.""" + +import argparse +from datetime import datetime +import errno +import numpy as np +import matplotlib.pyplot as plt +import os +import pytz +import sys + + +def parse_flags(args): + """Parse training options user can specify in command line. + + Returns + ------- + argparse.Namespace + the output parser object + """ + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description="Parse argument used when running a Flow simulation.", + epilog="python train.py EXP_CONFIG") + parser.add_argument("target_folder", type=str, + help='Folder containing results') + parser.add_argument("--output_folder", type=str, required=False, default=None, + help='Folder to save charts to.') + parser.add_argument("--show_images", action='store_true', + help='Whether to display charts.') + return parser.parse_args(args) + + +if __name__ == "__main__": + flags = parse_flags(sys.argv[1:]) + + date = datetime.now(tz=pytz.utc) + date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") + + if flags.output_folder: + if not os.path.exists(flags.output_folder): + try: + os.makedirs(flags.output_folder) + except OSError as exc: + if exc.errno != errno.EEXIST: + raise + + info_dicts = [] + custom_callable_names = set() + exp_names = [] + for (dirpath, dir_names, file_names) in os.walk(flags.target_folder): + for file_name in file_names: + if file_name[-4:] == ".npy": + exp_name = os.path.basename(os.path.dirname(dirpath)) + info_dict = np.load(os.path.join(dirpath, file_name), allow_pickle=True).item() + + info_dicts.append(info_dict) + exp_names.append(exp_name) + custom_callable_names.update(info_dict.keys()) + + for name in custom_callable_names: + y_vals = [np.mean(info_dict[name]) for info_dict in info_dicts] + y_stds = [np.std(info_dict[name]) for info_dict in info_dicts] + x_pos = np.arange(len(exp_names)) + + plt.bar(x_pos, y_vals, align='center', alpha=0.5) + plt.xticks(x_pos, [exp_name for exp_name in exp_names], rotation=60) + plt.ylabel('Experiment') + plt.title('I210 Replay Result: {}'.format(name)) + plt.tight_layout() + if flags.output_folder: + plt.savefig(os.path.join(flags.output_folder, '{}-plot.png'.format(name))) + + plt.show() diff --git a/flow/visualize/transfer/util.py b/flow/visualize/transfer/util.py new file mode 100644 index 000000000..50b503956 --- /dev/null +++ b/flow/visualize/transfer/util.py @@ -0,0 +1,138 @@ +"""Definitions of transfer classes.""" +from copy import deepcopy + +from flow.core.params import InFlows +from examples.exp_configs.rl.multiagent.multiagent_i210 import VEH_PER_HOUR_BASE_119257914, \ + VEH_PER_HOUR_BASE_27414345, VEH_PER_HOUR_BASE_27414342 + + +def make_inflows(penetration_rate=0.1, flow_rate_coef=1.0, departSpeed=20, on_ramp=False): + """Generate inflows object from parameters. Uses default inflows from multiagent_i210. + + Keyword Arguments: + ----------------- + penetration_rate {float} -- [AV Penetration Rate] (default: {0.1}) + flow_rate_coef {float} -- [Scale flow rate by] (default: {1.0}) + departSpeed {int} -- [Initial speed of all flows] (default: {20}) + + Returns + ------- + [Inflows] -- [Inflows parameter object] + + """ + inflow = InFlows() + # main highway + assert penetration_rate < 1.0, "your penetration rate is over 100%" + assert penetration_rate > 0.0, "your penetration rate should be above zero" + + inflow_119257914 = dict(veh_type="human", + edge="119257914", + vehs_per_hour=VEH_PER_HOUR_BASE_119257914 * penetration_rate * flow_rate_coef, + # probability=1.0, + departLane="random", + departSpeed=departSpeed) + inflow_119257914_av = dict(veh_type="av", + edge="119257914", + vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * penetration_rate * flow_rate_coef), + # probability=1.0, + departLane="random", + departSpeed=departSpeed) + if on_ramp: + inflow_27414345 = dict(veh_type="human", + edge="27414345", + vehs_per_hour=VEH_PER_HOUR_BASE_27414345 * penetration_rate * flow_rate_coef, + departLane="random", + departSpeed=departSpeed) + + inflow_27414342 = dict(veh_type="human", + edge="27414342#0", + vehs_per_hour=VEH_PER_HOUR_BASE_27414342 * penetration_rate * flow_rate_coef, + departLane="random", + departSpeed=departSpeed) + + all_inflow_defs = (inflow_119257914, inflow_27414345, inflow_27414342, inflow_119257914_av) + else: + all_inflow_defs = (inflow_119257914, inflow_119257914_av) + + for inflow_def in all_inflow_defs: + inflow.add(**inflow_def) + + return inflow + + +class BaseTransfer: + """Base Transfer class.""" + + def __init__(self): + self.transfer_str = "Base" + pass + + def flow_params_modifier_fn(self, flow_params, clone_params=True): + """Return modified flow_params. + + Arguments: + --------- + flow_params {[flow_params_dictionary]} -- [flow_params] + """ + if clone_params: + flow_params = deepcopy(flow_params) + + return flow_params + + def env_modifier_fn(self, env): + """Modify the env before rollouts are run. + + Arguments: + --------- + env {[I210MultiEnv]} -- [Env to modify] + """ + pass + + +class InflowTransfer(BaseTransfer): + """Modifies the inflow of i210 env.""" + + def __init__(self, penetration_rate=0.1, flow_rate_coef=1.0, departSpeed=20): + super(InflowTransfer, self).__init__() + self.penetration_rate = penetration_rate + self.flow_rate_coef = flow_rate_coef + self.departSpeed = departSpeed + + self.transfer_str = "{:0.2f}_pen_{:0.2f}_flow_rate_coef_{:0.2f}_depspeed".format( + penetration_rate, flow_rate_coef, departSpeed) + + def flow_params_modifier_fn(self, flow_params, clone_params=True): + """See Parent.""" + if clone_params: + flow_params = deepcopy(flow_params) + + flow_params['net'].inflows = make_inflows(self.penetration_rate, self.flow_rate_coef, self.departSpeed) + + return flow_params + + +def inflows_range(penetration_rates=0.1, flow_rate_coefs=1.0, departSpeeds=20.0): + """Generate inflow objects given penetration_rates, flow_rates, and depart speeds. + + Keyword Arguments: + ----------------- + penetration_rates {float | list of floats} -- [single, or multiple penetration rates] (default: {0.1}) + flow_rate_coefs {float | list of floats} -- [single, or multiple flow rate coefficient] (default: {1.0}) + departSpeeds {float | list of floats} -- [single, or multiple depart speeds] (default: {20.0}) + + Yields + ------ + [InflowTransfer] -- [Transfer object] + """ + if not hasattr(penetration_rates, '__iter__'): + penetration_rates = [penetration_rates] + if not hasattr(flow_rate_coefs, '__iter__'): + flow_rate_coefs = [flow_rate_coefs] + if not hasattr(departSpeeds, '__iter__'): + departSpeeds = [departSpeeds] + + for departSpeed in departSpeeds: + for penetration_rate in penetration_rates: + for flow_rate_coef in flow_rate_coefs: + yield InflowTransfer(penetration_rate=penetration_rate, flow_rate_coef=flow_rate_coef, + departSpeed=departSpeed) From 4ce53319e4768cae17f866f3cfc9686db9080ae7 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 6 Apr 2020 15:28:57 -0700 Subject: [PATCH 019/335] datapip pipeline implemented --- examples/data_pipeline.py | 179 ++++++++++++++++++++++++++++ examples/query.py | 8 ++ examples/run_query.py | 34 ++++++ examples/simulate.py | 10 +- flow/controllers/base_controller.py | 12 ++ flow/core/experiment.py | 27 ++++- flow/core/kernel/vehicle/base.py | 16 +++ flow/core/kernel/vehicle/traci.py | 15 +++ 8 files changed, 299 insertions(+), 2 deletions(-) create mode 100644 examples/data_pipeline.py create mode 100644 examples/query.py create mode 100644 examples/run_query.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py new file mode 100644 index 000000000..5fdc30cf2 --- /dev/null +++ b/examples/data_pipeline.py @@ -0,0 +1,179 @@ +import pandas as pd +import boto3 +from botocore.exceptions import ClientError +from examples.query import QueryStrings +from time import time + + +def generate_trajectory_table(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based on standard SUMO emission + + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ + raw_output = pd.read_csv(data_path, index_col=["time", "id"]) + required_cols = {"time", "id", "speed", "x", "y"} + raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) + + extra_info = pd.DataFrame.from_dict(extra_info) + extra_info.set_index(["time", "id"]) + raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) + + # add the partition column + raw_output['partition'] = partition_name + + output_file_path = data_path[:-4]+"_trajectory.csv" + raw_output.to_csv(output_file_path, index=False) + return output_file_path + + +def upload_to_s3(bucket_name, bucket_key, file_path): + """ upload a file to S3 bucket + + Parameters + ---------- + bucket_name : str + the bucket to upload to + bucket_key: str + the key within the bucket for the file + file_path: str + the path of the file to be uploaded + """ + s3 = boto3.resource("s3") + s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + return + + +class AthenaQuery: + + def __init__(self): + self.MAX_WAIT = 60 + self.client = boto3.client("athena") + self.existing_partitions = self.get_existing_partitions() + + def get_existing_partitions(self): + """prints the existing partitions in the S3 bucket""" + + response = self.client.start_query_execution( + QueryString='SHOW PARTITIONS trajectory_table', + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("get current partitions timed out") + response = self.client.get_query_results( + QueryExecutionId=response['QueryExecutionId'], + MaxResults=1000 + ) + return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] + + def check_status(self, execution_id): + """ Return the status of the execution with given id + + Parameters + ---------- + execution_id : string + id of the execution that is checked for + Returns + ------- + status: str + QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED + """ + + response = self.client.get_query_execution( + QueryExecutionId=execution_id + ) + return response['QueryExecution']['Status']['State'] + + def wait_for_execution(self, execution_id): + """ wait for the execution to finish or time-out + + Parameters + ---------- + execution_id : str + id of the execution this is watiing for + Returns + ------- + time_out: bool + True if time-out, False if success + Raises + ------ + RuntimeError: if execution failed or get canceled + """ + start = time() + while time() - start < self.MAX_WAIT: + state = self.check_status(execution_id) + if state == 'FAILED' or state == 'CANCELLED': + raise RuntimeError("update partition failed") + elif state == 'SUCCEEDED': + return False + return True + + def update_partition(self, partition): + """ load the given partition to the trajectory_table on Athena + + Parameters + ---------- + partition : str + the new partition that needs to be loaded + """ + response = self.client.start_query_execution( + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("update partition timed out") + self.existing_partitions.append(partition) + return + + def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): + """ start the execution of a query, does not wait for it to finish + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be run + result_location: str, optional + location on the S3 bucket where the result will be stored + partition: str, optional + name of the partition to run this query on + Returns + ------- + execution_id: str + the execution id of the execution started by this method + Raises + ------ + ValueError: if tries to run a query not existed in QueryStrings enum + """ + if query_name not in QueryStrings.__members__: + raise ValueError("query not existed: please add it to query.py") + + if partition not in self.existing_partitions: + self.update_partition(partition) + + response = self.client.start_query_execution( + QueryString=QueryStrings[query_name].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + ResultConfiguration={ + 'OutputLocation': result_location, + }, + WorkGroup='primary' + ) + return response['QueryExecutionId'] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py new file mode 100644 index 000000000..3fbbe69e1 --- /dev/null +++ b/examples/query.py @@ -0,0 +1,8 @@ +from enum import Enum + +tags = {} + + +class QueryStrings(Enum): + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py new file mode 100644 index 000000000..7b4a5af7d --- /dev/null +++ b/examples/run_query.py @@ -0,0 +1,34 @@ +import argparse +import sys +from examples.data_pipeline import AthenaQuery +from examples.query import QueryStrings + +parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" + "a S3 location") +parser.add_argument("--run", type=str, nargs="+") +parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") +parser.add_argument("--partition", type=str, nargs='?', default="default") +parser.add_argument("--list_partitions", action="store_true") +parser.add_argument("--check_status", type=str, nargs='+') +parser.add_argument("--list_queries", action="store_true") + + +if __name__ == "__main__": + args = parser.parse_args() + queryEngine = AthenaQuery() + + if args.run: + execution_ids = [] + for query_name in args.run: + execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) + print(execution_ids) + if args.list_partitions: + print(queryEngine.existing_partitions) + if args.check_status: + status = dict() + for execution_id in args.check_status: + status[execution_id] = queryEngine.check_status(execution_id) + print(status) + if args.list_queries: + for q in QueryStrings: + print(q) diff --git a/examples/simulate.py b/examples/simulate.py index 60767b6b7..6ad0048eb 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -45,6 +45,12 @@ def parse_args(args): action='store_true', help='Specifies whether to generate an emission file from the ' 'simulation.') + parser.add_argument( + '--to_aws', + type=str, nargs='?', default=None, const="default", + help='Specifies the name of the partition to store the output' + 'file on S3. Putting not None value for this argument' + 'automatically set gen_emission to True.') return parser.parse_known_args(args)[0] @@ -52,6 +58,8 @@ def parse_args(args): if __name__ == "__main__": flags = parse_args(sys.argv[1:]) + flags.gen_emission = flags.gen_emission or flags.to_aws + # Get the flow_params object. module = __import__("exp_configs.non_rl", fromlist=[flags.exp_config]) flow_params = getattr(module, flags.exp_config).flow_params @@ -74,4 +82,4 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 4004b1c4d..6e6734764 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -88,6 +88,9 @@ def get_action(self, env): float the modified form of the acceleration """ + # clear the current stored accel_without_noise of this vehicle None + env.k.vehicle.update_accel_without_noise(self.veh_id, None) + # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed if len(env.k.vehicle.get_edge(self.veh_id)) == 0: @@ -105,6 +108,15 @@ def get_action(self, env): if accel is None: return None + # store the acceleration without noise to each vehicle + # run fail safe if requested + accel_without_noice = accel + if self.fail_safe == 'instantaneous': + accel_without_noice = self.get_safe_action_instantaneous(env, accel_without_noice) + elif self.fail_safe == 'safe_velocity': + accel_without_noice = self.get_safe_velocity_action(env, accel_without_noice) + env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noice) + # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 69a78cb0e..a81f3b130 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,6 +1,7 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env +from examples.data_pipeline import generate_trajectory_table, upload_to_s3 import datetime import logging import time @@ -85,7 +86,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters @@ -98,6 +99,10 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False): convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file + partition_name: str + Specifies the S3 partition you want to store the output file, + will be used to later for query. If NONE, won't upload output + to S3. Returns ------- @@ -136,6 +141,8 @@ def rl_actions(*_): # time profiling information t = time.time() times = [] + extra_info = {"time": [], "id": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], + "leader_rel_speed": [], "accel_without_noise": [], "road_grade": []} for i in range(num_runs): ret = 0 @@ -153,6 +160,18 @@ def rl_actions(*_): vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward + # collect additional information for the data pipeline + for vid in veh_ids: + extra_info["time"].append(self.env.k.vehicle.get_timestep(veh_ids[0]) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) + extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) + extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) + extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) + extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed(self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) + extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) + extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) + # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) @@ -195,4 +214,10 @@ def rl_actions(*_): # Delete the .xml version of the emission file. os.remove(emission_path) + output_file = generate_trajectory_table(emission_path[:-4] + ".csv", extra_info, partition_name) + + if partition_name: + upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + output_file.split('/')[-1], output_file) + return info_dict diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index d9fc773cd..72951a5d6 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -663,3 +663,19 @@ def get_max_speed(self, veh_id, error): float """ raise NotImplementedError + + ########################################################################### + # Methods for Datapipeline # + ########################################################################### + + def get_accel(self, veh_id): + """ see traci class """ + raise NotImplementedError + + def update_accel_without_noise(self, veh_id, accel_without_noise): + """ see traci class """ + raise NotImplementedError + + def get_accel_without_noise(self, veh_id): + """ see traci class """ + raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 657b89a94..051797b10 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -109,6 +109,7 @@ def initialize(self, vehicles): self.__vehicles[veh_id] = dict() self.__vehicles[veh_id]['type'] = typ['veh_id'] self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed'] + self.__vehicles[veh_id]["accel_without_noise"] = None self.num_vehicles += 1 if typ['acceleration_controller'][0] == RLController: self.num_rl_vehicles += 1 @@ -1094,3 +1095,17 @@ def get_max_speed(self, veh_id, error=-1001): def set_max_speed(self, veh_id, max_speed): """See parent class.""" self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) + + # add for data pipeline + def get_accel(self, veh_id): + return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + + def update_accel_without_noise(self, veh_id, accel_without_noise): + self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise + + def get_accel_without_noise(self, veh_id): + return self.__vehicles[veh_id]["accel_without_noise"] + + def get_road_grade(self, veh_id): + # TODO + return 0 From adcc61787729ec7a60af1bb5e294f1df2eeca825 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Fri, 10 Apr 2020 19:54:30 -0700 Subject: [PATCH 020/335] multiple runs issue solved, testing added --- examples/data_pipeline.py | 55 +++++++- examples/datapipeline_test.py | 33 +++++ examples/query.py | 13 +- examples/run_query.py | 6 +- flow/core/experiment.py | 224 +----------------------------- flow/core/kernel/vehicle/base.py | 4 + flow/core/kernel/vehicle/traci.py | 3 + 7 files changed, 107 insertions(+), 231 deletions(-) create mode 100644 examples/datapipeline_test.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 5fdc30cf2..9d56548c2 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -1,7 +1,8 @@ import pandas as pd +import numpy as np import boto3 from botocore.exceptions import ClientError -from examples.query import QueryStrings +from examples.query import QueryStrings, testing_functions from time import time @@ -30,13 +31,22 @@ def generate_trajectory_table(data_path, extra_info, partition_name): raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) # add the partition column - raw_output['partition'] = partition_name - + # raw_output['partition'] = partition_name + raw_output = raw_output.sort_values(by=["time", "id"]) output_file_path = data_path[:-4]+"_trajectory.csv" raw_output.to_csv(output_file_path, index=False) return output_file_path +def generate_trajectory_from_flow(data_path, extra_info, partition_name): + extra_info = pd.DataFrame.from_dict(extra_info) + # extra_info["partition"] = partition_name + extra_info.to_csv(data_path, index=False) + upload_only_file_path = data_path[:-4] + "_upload" + ".csv" + extra_info.to_csv(upload_only_file_path, index=False, header=False) + return upload_only_file_path + + def upload_to_s3(bucket_name, bucket_key, file_path): """ upload a file to S3 bucket @@ -176,4 +186,41 @@ def run_query(self, query_name, result_location="s3://brent.experiments/query-re }, WorkGroup='primary' ) - return response['QueryExecutionId'] \ No newline at end of file + return response['QueryExecutionId'] + +########################################################################### +# Helpers for testing the SQL Queries # +########################################################################### + + +def test_sql_query(query_name): + if query_name not in testing_functions: + raise ValueError("no tests supported for this query") + + # Run the respective sql query + queryEngine = AthenaQuery() + execution_id = queryEngine.run_query(query_name, result_location="s3://brent.experiments/query-result/query-test", + partition="test") + if queryEngine.wait_for_execution(execution_id): + raise RuntimeError("execution timed out") + + # get the Athena query result from S3 + s3 = boto3.resource("s3") + s3.Bucket("brent.experiments").download_file("query-result/query-test/"+execution_id+".csv", + "data/athena_result.csv") + athena_result = pd.read_csv("data/athena_result.csv") + athena_result = athena_result.sort_values(by=["time", "id"]) + + # get the python expected result + expected_result = pd.read_csv("data/test_data.csv") + expected_result = expected_result.apply(testing_functions[query_name], axis=1, result_type="expand") + expected_result.columns = ["time", "id", "power"] + expected_result = expected_result.sort_values(by=["time", "id"]) + + difference = athena_result["power"] - expected_result["power"] + print("average difference is: " + str(np.mean(difference))) + print("std of difference is: " + str(np.std(difference))) + print("average ratio of difference to expected is: " + + str(np.mean(np.divide(difference, expected_result["power"])))) + difference = pd.DataFrame(difference) + difference.to_csv("./difference.csv") diff --git a/examples/datapipeline_test.py b/examples/datapipeline_test.py new file mode 100644 index 000000000..564060d3b --- /dev/null +++ b/examples/datapipeline_test.py @@ -0,0 +1,33 @@ +import math + +# Vehicle Mass +M = 1200 +# Gravity +g = 9.81 +# Density of Air +ro_air = 1.225 +# Rolling resistance coefficient +C_r = .005 +# Aerodynamic drag coefficient +C_a = 0.3 +# Vehicle Cross sectional Area +A = 2.6 +# Road grade +theta = 0 + + +def heavyside(inp): + return 0 if inp <= 0 else 1 + + +def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): + acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) + accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) + rolling_friction = M * g * C_r * mu + air_drag = .5 * ro_air * A * C_a * mu**3 + power = accel_and_slope + rolling_friction + air_drag + return power + + +def apply_energy_one(row): + return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py index 3fbbe69e1..6354cec3b 100644 --- a/examples/query.py +++ b/examples/query.py @@ -1,8 +1,17 @@ from enum import Enum +from examples.datapipeline_test import apply_energy_one -tags = {} +tags = {"energy": ["ENERGY_ONE"]} + +testing_functions = {"ENERGY_ONE": apply_energy_one} class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" - UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" + ENERGY_ONE = "SELECT id, time, 1200 * speed * " \ + "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py index 7b4a5af7d..ea8839b09 100644 --- a/examples/run_query.py +++ b/examples/run_query.py @@ -1,6 +1,5 @@ import argparse -import sys -from examples.data_pipeline import AthenaQuery +from examples.data_pipeline import AthenaQuery, test_sql_query from examples.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" @@ -11,6 +10,7 @@ parser.add_argument("--list_partitions", action="store_true") parser.add_argument("--check_status", type=str, nargs='+') parser.add_argument("--list_queries", action="store_true") +parser.add_argument("--test_query", nargs=1) if __name__ == "__main__": @@ -32,3 +32,5 @@ if args.list_queries: for q in QueryStrings: print(q) + if args.test_query: + test_sql_query(args.test_query[0]) \ No newline at end of file diff --git a/flow/core/experiment.py b/flow/core/experiment.py index a81f3b130..63c92e798 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,223 +1 @@ -"""Contains an experiment class for running simulations.""" -from flow.core.util import emission_to_csv -from flow.utils.registry import make_create_env -from examples.data_pipeline import generate_trajectory_table, upload_to_s3 -import datetime -import logging -import time -import os -import numpy as np - - -class Experiment: - """ - Class for systematically running simulations in any supported simulator. - - This class acts as a runner for a network and environment. In order to use - it to run an network and environment in the absence of a method specifying - the actions of RL agents in the network, type the following: - - >>> from flow.envs import Env - >>> flow_params = dict(...) # see the examples in exp_config - >>> exp = Experiment(flow_params) # for some experiment configuration - >>> exp.run(num_runs=1) - - If you wish to specify the actions of RL agents in the network, this may be - done as follows: - - >>> rl_actions = lambda state: 0 # replace with something appropriate - >>> exp.run(num_runs=1, rl_actions=rl_actions) - - Finally, if you would like to like to plot and visualize your results, this - class can generate csv files from emission files produced by sumo. These - files will contain the speeds, positions, edges, etc... of every vehicle - in the network at every time step. - - In order to ensure that the simulator constructs an emission file, set the - ``emission_path`` attribute in ``SimParams`` to some path. - - >>> from flow.core.params import SimParams - >>> flow_params['sim'] = SimParams(emission_path="./data") - - Once you have included this in your environment, run your Experiment object - as follows: - - >>> exp.run(num_runs=1, convert_to_csv=True) - - After the experiment is complete, look at the "./data" directory. There - will be two files, one with the suffix .xml and another with the suffix - .csv. The latter should be easily interpretable from any csv reader (e.g. - Excel), and can be parsed using tools such as numpy and pandas. - - Attributes - ---------- - custom_callables : dict < str, lambda > - strings and lambda functions corresponding to some information we want - to extract from the environment. The lambda will be called at each step - to extract information from the env and it will be stored in a dict - keyed by the str. - env : flow.envs.Env - the environment object the simulator will run - """ - - def __init__(self, flow_params, custom_callables=None): - """Instantiate the Experiment class. - - Parameters - ---------- - flow_params : dict - flow-specific parameters - custom_callables : dict < str, lambda > - strings and lambda functions corresponding to some information we - want to extract from the environment. The lambda will be called at - each step to extract information from the env and it will be stored - in a dict keyed by the str. - """ - self.custom_callables = custom_callables or {} - - # Get the env name and a creator for the environment. - create_env, _ = make_create_env(flow_params) - - # Create the environment. - self.env = create_env() - - logging.info(" Starting experiment {} at {}".format( - self.env.network.name, str(datetime.datetime.utcnow()))) - - logging.info("Initializing environment.") - - def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): - """Run the given network for a set number of runs. - - Parameters - ---------- - num_runs : int - number of runs the experiment should perform - rl_actions : method, optional - maps states to actions to be performed by the RL agents (if - there are any) - convert_to_csv : bool - Specifies whether to convert the emission file created by sumo - into a csv file - partition_name: str - Specifies the S3 partition you want to store the output file, - will be used to later for query. If NONE, won't upload output - to S3. - - Returns - ------- - info_dict : dict < str, Any > - contains returns, average speed per step - """ - num_steps = self.env.env_params.horizon - - # raise an error if convert_to_csv is set to True but no emission - # file will be generated, to avoid getting an error at the end of the - # simulation - if convert_to_csv and self.env.sim_params.emission_path is None: - raise ValueError( - 'The experiment was run with convert_to_csv set ' - 'to True, but no emission file will be generated. If you wish ' - 'to generate an emission file, you should set the parameter ' - 'emission_path in the simulation parameters (SumoParams or ' - 'AimsunParams) to the path of the folder where emissions ' - 'output should be generated. If you do not wish to generate ' - 'emissions, set the convert_to_csv parameter to False.') - - # used to store - info_dict = { - "returns": [], - "velocities": [], - "outflows": [], - } - info_dict.update({ - key: [] for key in self.custom_callables.keys() - }) - - if rl_actions is None: - def rl_actions(*_): - return None - - # time profiling information - t = time.time() - times = [] - extra_info = {"time": [], "id": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], - "leader_rel_speed": [], "accel_without_noise": [], "road_grade": []} - - for i in range(num_runs): - ret = 0 - vel = [] - custom_vals = {key: [] for key in self.custom_callables.keys()} - state = self.env.reset() - for j in range(num_steps): - t0 = time.time() - state, reward, done, _ = self.env.step(rl_actions(state)) - t1 = time.time() - times.append(1 / (t1 - t0)) - - # Compute the velocity speeds and cumulative returns. - veh_ids = self.env.k.vehicle.get_ids() - vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) - ret += reward - - # collect additional information for the data pipeline - for vid in veh_ids: - extra_info["time"].append(self.env.k.vehicle.get_timestep(veh_ids[0]) / 1000) - extra_info["id"].append(vid) - extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) - extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) - extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) - extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) - extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed(self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) - extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) - extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) - - # Compute the results for the custom callables. - for (key, lambda_func) in self.custom_callables.items(): - custom_vals[key].append(lambda_func(self.env)) - - if done: - break - - # Store the information from the run in info_dict. - outflow = self.env.k.vehicle.get_outflow_rate(int(500)) - info_dict["returns"].append(ret) - info_dict["velocities"].append(np.mean(vel)) - info_dict["outflows"].append(outflow) - for key in custom_vals.keys(): - info_dict[key].append(np.mean(custom_vals[key])) - - print("Round {0}, return: {1}".format(i, ret)) - - # Print the averages/std for all variables in the info_dict. - for key in info_dict.keys(): - print("Average, std {}: {}, {}".format( - key, np.mean(info_dict[key]), np.std(info_dict[key]))) - - print("Total time:", time.time() - t) - print("steps/second:", np.mean(times)) - self.env.terminate() - - if convert_to_csv and self.env.simulator == "traci": - # wait a short period of time to ensure the xml file is readable - time.sleep(0.1) - - # collect the location of the emission file - dir_path = self.env.sim_params.emission_path - emission_filename = \ - "{0}-emission.xml".format(self.env.network.name) - emission_path = os.path.join(dir_path, emission_filename) - - # convert the emission file into a csv - emission_to_csv(emission_path) - - # Delete the .xml version of the emission file. - os.remove(emission_path) - - output_file = generate_trajectory_table(emission_path[:-4] + ".csv", extra_info, partition_name) - - if partition_name: - upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + output_file.split('/')[-1], output_file) - - return info_dict +"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = self.env.network.name for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = emission_path[:-4] + "_trajectory.csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1], upload_file_path) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 72951a5d6..2a5cf4596 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -676,6 +676,10 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): """ see traci class """ raise NotImplementedError + def get_2D_position(self, veh_id, error=-1001): + """ see traci class """ + raise NotImplementedError + def get_accel_without_noise(self, veh_id): """ see traci class """ raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 051797b10..f5ea8e2eb 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1106,6 +1106,9 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): def get_accel_without_noise(self, veh_id): return self.__vehicles[veh_id]["accel_without_noise"] + def get_2D_position(self, veh_id, error=-1001): + return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) + def get_road_grade(self, veh_id): # TODO return 0 From 722f4393515247907bd14260b803fd5fb6510533 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sun, 12 Apr 2020 18:32:00 -0700 Subject: [PATCH 021/335] Added testing for imitation learning with dagger, more code cleanup --- flow/controllers/dagger/.idea/dagger.iml | 12 ++ flow/controllers/dagger/bottleneck_env.py | 150 +++++++++++++++ .../dagger/imitating_controller.py | 104 ++++++++++ flow/controllers/dagger/replay_buffer.py | 64 +++++++ flow/controllers/dagger/ring_env.py | 85 +++++++++ flow/controllers/dagger/run.py | 79 ++++++++ flow/controllers/dagger/trainer.py | 179 ++++++++++++++++++ flow/controllers/dagger/utils.py | 173 +++++++++++++++++ 8 files changed, 846 insertions(+) create mode 100644 flow/controllers/dagger/.idea/dagger.iml create mode 100644 flow/controllers/dagger/bottleneck_env.py create mode 100644 flow/controllers/dagger/imitating_controller.py create mode 100644 flow/controllers/dagger/replay_buffer.py create mode 100644 flow/controllers/dagger/ring_env.py create mode 100644 flow/controllers/dagger/run.py create mode 100644 flow/controllers/dagger/trainer.py create mode 100644 flow/controllers/dagger/utils.py diff --git a/flow/controllers/dagger/.idea/dagger.iml b/flow/controllers/dagger/.idea/dagger.iml new file mode 100644 index 000000000..0bc0e0321 --- /dev/null +++ b/flow/controllers/dagger/.idea/dagger.iml @@ -0,0 +1,12 @@ + + + + + + + + + + \ No newline at end of file diff --git a/flow/controllers/dagger/bottleneck_env.py b/flow/controllers/dagger/bottleneck_env.py new file mode 100644 index 000000000..820244a87 --- /dev/null +++ b/flow/controllers/dagger/bottleneck_env.py @@ -0,0 +1,150 @@ +"""Bottleneck example. +Bottleneck in which the actions are specifying a desired velocity +in a segment of space +""" +from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ + InFlows, SumoCarFollowingParams, SumoLaneChangeParams +from flow.core.params import TrafficLightParams +from flow.core.params import VehicleParams +from flow.controllers import RLController, ContinuousRouter, \ + SimLaneChangeController +from flow.envs import BottleneckDesiredVelocityEnv +from flow.networks import BottleneckNetwork + +# time horizon of a single rollout +HORIZON = 1000 +# number of parallel workers +N_CPUS = 2 +# number of rollouts per training iteration +N_ROLLOUTS = N_CPUS * 4 + +SCALING = 1 +NUM_LANES = 4 * SCALING # number of lanes in the widest highway +DISABLE_TB = True +DISABLE_RAMP_METER = True +AV_FRAC = 0.10 + +vehicles = VehicleParams() +vehicles.add( + veh_id="human", + lane_change_controller=(SimLaneChangeController, {}), + routing_controller=(ContinuousRouter, {}), + car_following_params=SumoCarFollowingParams( + speed_mode="all_checks", + ), + lane_change_params=SumoLaneChangeParams( + lane_change_mode=0, + ), + num_vehicles=1 * SCALING) +vehicles.add( + veh_id="followerstopper", + acceleration_controller=(RLController, {}), + lane_change_controller=(SimLaneChangeController, {}), + routing_controller=(ContinuousRouter, {}), + car_following_params=SumoCarFollowingParams( + speed_mode=9, + ), + lane_change_params=SumoLaneChangeParams( + lane_change_mode=0, + ), + num_vehicles=1 * SCALING) + +controlled_segments = [("1", 1, False), ("2", 2, True), ("3", 2, True), + ("4", 2, True), ("5", 1, False)] +num_observed_segments = [("1", 1), ("2", 3), ("3", 3), ("4", 3), ("5", 1)] +additional_env_params = { + "target_velocity": 40, + "disable_tb": True, + "disable_ramp_metering": True, + "controlled_segments": controlled_segments, + "symmetric": False, + "observed_segments": num_observed_segments, + "reset_inflow": False, + "lane_change_duration": 5, + "max_accel": 3, + "max_decel": 3, + "inflow_range": [1000, 2000] +} + +# flow rate +flow_rate = 2300 * SCALING + +# percentage of flow coming out of each lane +inflow = InFlows() +inflow.add( + veh_type="human", + edge="1", + vehs_per_hour=flow_rate * (1 - AV_FRAC), + departLane="random", + departSpeed=10) +inflow.add( + veh_type="followerstopper", + edge="1", + vehs_per_hour=flow_rate * AV_FRAC, + departLane="random", + departSpeed=10) + +traffic_lights = TrafficLightParams() +if not DISABLE_TB: + traffic_lights.add(node_id="2") +if not DISABLE_RAMP_METER: + traffic_lights.add(node_id="3") + +additional_net_params = {"scaling": SCALING, "speed_limit": 23} +net_params = NetParams( + inflows=inflow, + additional_params=additional_net_params) + +flow_params = dict( + # name of the experiment + exp_tag="DesiredVelocity", + + # name of the flow environment the experiment is running on + env_name=BottleneckDesiredVelocityEnv, + + # name of the network class the experiment is running on + network=BottleneckNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.5, + render=False, + print_warnings=False, + restart_instance=True, + ), + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + warmup_steps=40, + sims_per_step=1, + horizon=HORIZON, + additional_params=additional_env_params, + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflow, + additional_params=additional_net_params, + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig( + spacing="uniform", + min_gap=5, + lanes_distribution=float("inf"), + edges_distribution=["2", "3", "4", "5"], + ), + + # traffic lights to be introduced to specific nodes (see + # flow.core.params.TrafficLightParams) + tls=traffic_lights, +) diff --git a/flow/controllers/dagger/imitating_controller.py b/flow/controllers/dagger/imitating_controller.py new file mode 100644 index 000000000..0adffb6cd --- /dev/null +++ b/flow/controllers/dagger/imitating_controller.py @@ -0,0 +1,104 @@ +import numpy as np +import tensorflow as tf +from utils import * +import tensorflow_probability as tfp +from flow.controllers.base_controller import BaseController +from replay_buffer import ReplayBuffer + + +class ImitatingController(BaseController): + """ + Controller which learns to imitate another given expert controller. + """ + # Implementation in Tensorflow + + def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, inject_noise=0, noise_variance=0.5, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): + + BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) + self.sess = sess + self.action_dim = action_dim + self.obs_dim = obs_dim + self.num_layers = num_layers + self.size = size + self.learning_rate = learning_rate + self.training = training + self.inject_noise=inject_noise + self.noise_variance = noise_variance + + with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): + self.build_network() + + + if self.training: + self.replay_buffer = ReplayBuffer(replay_buffer_size) + else: + self.replay_buffer = None + + self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] + self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) + + def build_network(self): + """ + Defines neural network for choosing actions. + """ + self.define_placeholders() + self.define_forward_pass() + if self.training: + with tf.variable_scope('train', reuse=tf.AUTO_REUSE): + self.define_train_op() + + + def define_placeholders(self): + """ + Defines input, output, and training placeholders for neural net + """ + self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) + self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) + + if self.training: + self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) + + def define_forward_pass(self): + pred_action = build_neural_net(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) + self.action_predictions = pred_action + print("TYPE: ", type(self.obs_placeholder)) + + if self.inject_noise == 1: + self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) + + def define_train_op(self): + true_actions = self.action_labels_placeholder + predicted_actions = self.action_predictions + + self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) + self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) + + def train(self, observation_batch, action_batch): + action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) + ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) + + def get_accel_from_observation(self, observation): + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + if len(observation.shape)<=1: + observation = observation[None] + ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] + + return ret_val + + def get_accel(self, env): + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + observation = env.get_state() + return self.get_accel_from_observation(observation) + + def add_to_replay_buffer(self, rollout_list): + """ Add rollouts to replay buffer """ + + self.replay_buffer.add_rollouts(rollout_list) + + def sample_data(self, batch_size): + """ Sample a batch of data from replay buffer """ + + return self.replay_buffer.sample_batch(batch_size) + + def save_network(self, save_path): + self.saver.save(self.sess, save_path) diff --git a/flow/controllers/dagger/replay_buffer.py b/flow/controllers/dagger/replay_buffer.py new file mode 100644 index 000000000..4e362bd41 --- /dev/null +++ b/flow/controllers/dagger/replay_buffer.py @@ -0,0 +1,64 @@ +import time +import numpy as np +import tensorflow as tf +import gym +import os +from utils import * + + +class ReplayBuffer(object): + """ Replay buffer class to store state, action, expert_action, reward, next_state, terminal tuples""" + + def __init__(self, max_size=100000): + + self.max_size = max_size + + # store each rollout + self.rollouts = [] + + # store component arrays from each rollout + self.observations = None + self.actions = None + self.expert_actions = None + self.rewards = None + self.next_observations = None + self.terminals = None + + + def add_rollouts(self, rollouts_list): + """ + Add a list of rollouts to the replay buffer + """ + + for rollout in rollouts_list: + self.rollouts.append(rollout) + + observations, actions, expert_actions, rewards, next_observations, terminals = unpack_rollouts(rollouts_list) + + assert (not np.any(np.isnan(expert_actions))), "Invalid actions added to replay buffer" + + if self.observations is None: + self.observations = observations[-self.max_size:] + self.actions = actions[-self.max_size:] + self.expert_actions = expert_actions[-self.max_size:] + self.rewards = rewards[-self.max_size:] + self.next_observations = next_observations[-self.max_size:] + self.terminals = terminals[-self.max_size:] + else: + self.observations = np.concatenate([self.observations, observations])[-self.max_size:] + self.actions = np.concatenate([self.actions, actions])[-self.max_size:] + self.expert_actions = np.concatenate([self.expert_actions, expert_actions])[-self.max_size:] + self.rewards = np.concatenate([self.rewards, rewards])[-self.max_size:] + self.next_observations = np.concatenate([self.next_observations, next_observations])[-self.max_size:] + self.terminals = np.concatenate([self.terminals, terminals])[-self.max_size:] + + def sample_batch(self, batch_size): + """ + Sample a batch of data (with size batch_size) from replay buffer. + Returns data in separate numpy arrays of observations, actions, rewards, next_observations, terminals + """ + assert self.observations is not None and self.actions is not None and self.expert_actions is not None and self.rewards is not None and self.next_observations is not None and self.terminals is not None + + size = len(self.observations) + rand_inds = np.random.randint(0, size, batch_size) + return self.observations[rand_inds], self.actions[rand_inds], self.expert_actions[rand_inds], self.rewards[rand_inds], self.next_observations[rand_inds], self.terminals[rand_inds] diff --git a/flow/controllers/dagger/ring_env.py b/flow/controllers/dagger/ring_env.py new file mode 100644 index 000000000..20ced1ce9 --- /dev/null +++ b/flow/controllers/dagger/ring_env.py @@ -0,0 +1,85 @@ +"""Ring road example. +Trains a single autonomous vehicle to stabilize the flow of 21 human-driven +vehicles in a variable length ring road. +""" +from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams +from flow.core.params import VehicleParams, SumoCarFollowingParams +from flow.controllers import RLController, IDMController, ContinuousRouter +from flow.envs import WaveAttenuationPOEnv +from flow.networks import RingNetwork + +# time horizon of a single rollout +HORIZON = 3000 +# number of rollouts per training iteration +N_ROLLOUTS = 20 +# number of parallel workers +N_CPUS = 2 + +# We place one autonomous vehicle and 22 human-driven vehicles in the network +vehicles = VehicleParams() +vehicles.add( + veh_id="human", + acceleration_controller=(IDMController, { + "noise": 0.2 + }), + car_following_params=SumoCarFollowingParams( + min_gap=0 + ), + routing_controller=(ContinuousRouter, {}), + num_vehicles=21) +vehicles.add( + veh_id="rl", + acceleration_controller=(RLController, {}), + routing_controller=(ContinuousRouter, {}), + num_vehicles=1) + +flow_params = dict( + # name of the experiment + exp_tag="stabilizing_the_ring", + + # name of the flow environment the experiment is running on + env_name=WaveAttenuationPOEnv, + + # name of the network class the experiment is running on + network=RingNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.1, + render=False, + restart_instance=False + ), + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=750, + clip_actions=False, + additional_params={ + "max_accel": 1, + "max_decel": 1, + "ring_length": [220, 270], + }, + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + additional_params={ + "length": 260, + "lanes": 1, + "speed_limit": 30, + "resolution": 40, + }, ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) diff --git a/flow/controllers/dagger/run.py b/flow/controllers/dagger/run.py new file mode 100644 index 000000000..c647f37cd --- /dev/null +++ b/flow/controllers/dagger/run.py @@ -0,0 +1,79 @@ +import os +import time +import numpy as np +import tensorflow as tf +from trainer import Trainer +from flow.controllers.car_following_models import IDMController + + +class Runner(object): + """ Class to run imitation learning (training and evaluation) """ + + def __init__(self, params): + + # initialize trainer + self.params = params + self.trainer = Trainer(params) + + def run_training_loop(self): + + self.trainer.run_training_loop(n_iter=self.params['n_iter']) + + def evaluate(self): + self.trainer.evaluate_controller(num_trajs=self.params['num_eval_episodes']) + + def save_controller_network(self): + self.trainer.save_controller_network() + + +def main(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--ep_len', type=int, default=3000) + + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) + parser.add_argument('--n_iter', '-n', type=int, default=5) + + parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration + parser.add_argument('--init_batch_size', type=int, default=3000) + + parser.add_argument('--train_batch_size', type=int, + default=100) # number of sampled data points to be used per gradient/train step + + parser.add_argument('--num_layers', type=int, default=3) # depth, of policy to be learned + parser.add_argument('--size', type=int, default=64) # width of each layer, of policy to be learned + parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) # learning rate for supervised learning + parser.add_argument('--replay_buffer_size', type=int, default=1000000) + parser.add_argument('--save_path', type=str, default='') + parser.add_argument('--save_model', type=int, default=0) + parser.add_argument('--num_eval_episodes', type=int, default=10) + parser.add_argument('--inject_noise', type=int, default=0) + parser.add_argument('--noise_variance',type=float, default=0.5) + parser.add_argument('--vehicle_id', type=str, default='rl_0') + + args = parser.parse_args() + + # convert args to dictionary + params = vars(args) + print("INJECT: ", params['inject_noise']) + assert args.n_iter>1, ('DAgger needs >1 iteration') + + + # run training + train = Runner(params) + train.run_training_loop() + + # evaluate + train.evaluate() + print("DONE") + + if params['save_model'] == 1: + train.save_controller_network() + + # tensorboard + if params['save_model'] == 1: + writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) + + +if __name__ == "__main__": + main() diff --git a/flow/controllers/dagger/trainer.py b/flow/controllers/dagger/trainer.py new file mode 100644 index 000000000..03364f528 --- /dev/null +++ b/flow/controllers/dagger/trainer.py @@ -0,0 +1,179 @@ +import time +from collections import OrderedDict +import pickle +import numpy as np +import tensorflow as tf +import gym +import os +from flow.utils.registry import make_create_env +from bottleneck_env import flow_params +from imitating_controller import ImitatingController +from flow.controllers.car_following_models import IDMController +from flow.controllers.velocity_controllers import FollowerStopper +from flow.core.params import SumoCarFollowingParams +from utils import * + +class Trainer(object): + """ + Class to initialize and run training for imitation learning (with DAgger) + """ + + def __init__(self, params): + self.params = params + self.sess = create_tf_session() + + create_env, _ = make_create_env(flow_params) + self.env = create_env() + self.env.reset() + + print(self.env.k.vehicle.get_ids()) + assert self.params['vehicle_id'] in self.env.k.vehicle.get_ids() + self.vehicle_id = self.params['vehicle_id'] + + obs_dim = self.env.observation_space.shape[0] + + action_dim = (1,)[0] + self.params['action_dim'] = action_dim + self.params['obs_dim'] = obs_dim + + car_following_params = SumoCarFollowingParams() + self.controller = ImitatingController(self.vehicle_id, self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], car_following_params = car_following_params, inject_noise=self.params['inject_noise'], noise_variance=self.params['noise_variance']) + # self.expert_controller = IDMController(self.vehicle_id, car_following_params = car_following_params) + self.expert_controller = FollowerStopper(self.vehicle_id, car_following_params = car_following_params) + + tf.global_variables_initializer().run(session=self.sess) + + + def run_training_loop(self, n_iter): + """ + Trains controller for n_iter iterations + + Args: + param n_iter: number of iterations to execute training + """ + + # init vars at beginning of training + self.total_envsteps = 0 + self.start_time = time.time() + + for itr in range(n_iter): + print("\n\n********** Iteration %i ************"%itr) + + # collect trajectories, to be used for training + if itr == 0: + # first iteration is standard behavioral cloning + training_returns = self.collect_training_trajectories(itr, self.params['init_batch_size']) + else: + training_returns = self.collect_training_trajectories(itr, self.params['batch_size']) + + paths, envsteps_this_batch = training_returns + self.total_envsteps += envsteps_this_batch + + # add collected data to replay buffer + self.controller.add_to_replay_buffer(paths) + + # train controller (using sampled data from replay buffer) + loss = self.train_controller() + + def collect_training_trajectories(self, itr, batch_size): + """ + Collect (state, action, reward, next_state, terminal) tuples for training + + Args: + itr: iteration of training during which functino is called + batch_size: number of tuples to collect + Returns: + paths: list of trajectories + envsteps_this_batch: the sum over the numbers of environment steps in paths + """ + + if itr == 0: + collect_controller = self.expert_controller + else: + collect_controller = self.controller + + print("\nCollecting data to be used for training...") + paths, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_id, collect_controller, self.expert_controller, batch_size, self.params['ep_len']) + + return paths, envsteps_this_batch + + def train_controller(self): + """ + Trains controller using data sampled from replay buffer + """ + + print('Training controller using sampled data from replay buffer') + for train_step in range(self.params['num_agent_train_steps_per_iter']): + ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) + self.controller.train(ob_batch, expert_ac_batch) + + def evaluate_controller(self, num_trajs = 10): + """ + Evaluates a trained controller on similarity with expert with respect to action taken and total reward per rollout + + Args: + num_trajs: number of trajectories to evaluate performance on + """ + + print("\n\n********** Evaluation ************ \n") + + trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.controller, self.expert_controller, num_trajs, self.params['ep_len']) + + average_imitator_reward = 0 + total_imitator_steps = 0 + average_imitator_reward_per_rollout = 0 + + action_errors = np.array([]) + average_action_expert = 0 + average_action_imitator = 0 + + # compare actions taken in each step of trajectories + for traj in trajectories: + imitator_actions = traj['actions'] + expert_actions = traj['expert_actions'] + + average_action_expert += np.sum(expert_actions) + average_action_imitator += np.sum(imitator_actions) + + action_error = np.linalg.norm(imitator_actions - expert_actions) / len(imitator_actions) + action_errors = np.append(action_errors, action_error) + + average_imitator_reward += np.sum(traj['rewards']) + total_imitator_steps += len(traj['rewards']) + average_imitator_reward_per_rollout += np.sum(traj['rewards']) + + average_imitator_reward = average_imitator_reward / total_imitator_steps + average_imitator_reward_per_rollout = average_imitator_reward_per_rollout / len(trajectories) + + average_action_expert = average_action_expert / total_imitator_steps + average_action_imitator = average_action_imitator / total_imitator_steps + + + expert_trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.expert_controller, self.expert_controller, num_trajs, self.params['ep_len']) + + average_expert_reward = 0 + total_expert_steps = 0 + average_expert_reward_per_rollout = 0 + + # compare reward accumulated in trajectories collected via expert vs. via imitator + for traj in expert_trajectories: + average_expert_reward += np.sum(traj['rewards']) + total_expert_steps += len(traj['rewards']) + average_expert_reward_per_rollout += np.sum(traj['rewards']) + + average_expert_reward_per_rollout = average_expert_reward_per_rollout / len(expert_trajectories) + average_expert_reward = average_expert_reward / total_expert_steps + + print("\nAVERAGE REWARD PER STEP EXPERT: ", average_expert_reward) + print("AVERAGE REWARD PER STEP IMITATOR: ", average_imitator_reward) + print("AVERAGE REWARD PER STEP DIFFERENCE: ", np.abs(average_expert_reward - average_imitator_reward), "\n") + + print("AVERAGE REWARD PER ROLLOUT EXPERT: ", average_expert_reward_per_rollout) + print("AVERAGE REWARD PER ROLLOUT IMITATOR: ", average_imitator_reward_per_rollout) + print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: ", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout), "\n") + + print("MEAN ACTION ERROR: ", np.mean(action_errors), "\n") + + def save_controller_network(self): + print("Saving tensorflow model to: ", self.params['save_path']) + self.controller.save_network(self.params['save_path']) diff --git a/flow/controllers/dagger/utils.py b/flow/controllers/dagger/utils.py new file mode 100644 index 000000000..a5bf7acfa --- /dev/null +++ b/flow/controllers/dagger/utils.py @@ -0,0 +1,173 @@ +import tensorflow as tf +import os +import numpy as np +import math + +""" Class agnostic helper functions """ + +def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length): + """ + Samples a trajectory for a given vehicle using the actions prescribed by specified controller. + + Args: + env: environment + vehicle_id: id of the vehicle that is being controlled/tracked during trajectory + controller: subclass of BaseController, decides actions taken by vehicle + expert_controller: subclass of BaseController, "expert" for imitation learning + max_trajectory_length: maximum steps in a trajectory + + Returns: + Dictionary of numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples + """ + + print("COLLECTING CONTROLLER: ", controller) + print("EXPERT CONTROLLER: ", expert_controller) + observation = env.reset() + + assert vehicle_id in env.k.vehicle.get_ids(), "Vehicle ID not in env!" + + observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] + traj_length = 0 + + while True: + action = controller.get_action(env) + + if type(action) == np.ndarray: + action = action.flatten()[0] + + expert_action = expert_controller.get_action(env) + if (expert_action is None or math.isnan(expert_action)): + observation, reward, done, _ = env.step(action) + traj_length += 1 + terminate_rollout = traj_length == max_trajectory_length or done + if terminate_rollout: + break + continue + + observations.append(observation) + actions.append(action) + expert_actions.append(expert_action) + observation, reward, done, _ = env.step(action) + + traj_length += 1 + next_observations.append(observation) + rewards.append(reward) + terminate_rollout = (traj_length == max_trajectory_length) or done + terminals.append(terminate_rollout) + + if terminate_rollout: + break + + return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals) + + +def sample_trajectories(env, vehicle_id, controller, expert_controller, min_batch_timesteps, max_trajectory_length): + """ + Samples trajectories to collect at least min_batch_timesteps steps in the environment + + Args: + env: environment + vehicle_id: id of vehicle being tracked/controlled + controller: subclass of BaseController, decides actions taken by vehicle + expert_controller: subclass of BaseController, "expert" for imitation learning + min_batch_timesteps: minimum number of environment steps to collect + max_trajectory_length: maximum steps in a trajectory + + Returns: + List of rollout dictionaries, total steps taken by environment + """ + total_envsteps = 0 + trajectories = [] + + while total_envsteps < min_batch_timesteps: + trajectory = sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length) + trajectories.append(trajectory) + + traj_env_steps = len(trajectory["rewards"]) + total_envsteps += traj_env_steps + + return trajectories, total_envsteps + +def sample_n_trajectories(env, vehicle_id, controller, expert_controller, n, max_trajectory_length): + """ + Collects a fixed number of trajectories. + + Args: + env: environment + vehicle_id: id of vehicle being tracked/controlled + controller: subclass of BaseController, decides actions taken by vehicle + expert_controller: subclass of BaseController, "expert" for imitation learning + n: number of trajectories to collect + max_trajectory_length: maximum steps in a trajectory + + Returns: + List of rollout dictionaries + + """ + trajectories = [] + for _ in range(n): + trajectory = sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length) + trajectories.append(trajectory) + + return trajectories + + +def traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals): + """ + Collects individual observation, action, expert_action, rewards, next observation, terminal arrays into a single rollout dictionary + """ + return {"observations" : np.array(observations, dtype=np.float32), + "actions" : np.array(actions, dtype=np.float32), + "expert_actions": np.array(expert_actions, dtype=np.float32), + "rewards" : np.array(rewards, dtype=np.float32), + "next_observations": np.array(next_observations, dtype=np.float32), + "terminals": np.array(terminals, dtype=np.float32)} + + +def unpack_rollouts(rollouts_list): + """ + Convert list of rollout dictionaries to individual observation, action, rewards, next observation, terminal arrays + rollouts: list of rollout dictionaries, rollout dictionary: dictionary with keys "observations", "actions", "rewards", "next_observations", "is_terminals" + return separate np arrays of observations, actions, rewards, next_observations, and is_terminals + """ + observations = np.concatenate([rollout["observations"] for rollout in rollouts_list]) + actions = np.concatenate([rollout["actions"] for rollout in rollouts_list]) + expert_actions = np.concatenate([rollout["expert_actions"] for rollout in rollouts_list]) + rewards = np.concatenate([rollout["rewards"] for rollout in rollouts_list]) + next_observations = np.concatenate([rollout["next_observations"] for rollout in rollouts_list]) + terminals = np.concatenate([rollout["terminals"] for rollout in rollouts_list]) + + return observations, actions, expert_actions, rewards, next_observations, terminals + + +# Below are tensorflow related functions + +def build_neural_net(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): + """ + Builds a feedfoward neural network for action prediction + + arguments: + input_placeholder: placeholder variable for the state (batch_size, input_size) + scope: variable scope of the network + + n_layers: number of hidden layers + size: dimension of each hidden layer + activation: activation of each hidden layer + + output_size: size of the output layer + output_activation: activation of the output layer + + returns: + output_placeholder: the result of pass through Neural Network + """ + output_placeholder = input_placeholder + with tf.variable_scope(scope): + for _ in range(n_layers): + output_placeholder = tf.layers.dense(output_placeholder, size, activation=activation) + output_placeholder = tf.layers.dense(output_placeholder, output_size, activation=output_activation) + return output_placeholder + +def create_tf_session(): + config = tf.ConfigProto(device_count={'GPU': 0}) + sess = tf.Session(config=config) + return sess From e9d763487ff998ddba40ef7e222c080749d1c872 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sun, 12 Apr 2020 18:33:51 -0700 Subject: [PATCH 022/335] Code cleanup, added testing/eval for imitation learning --- flow/dagger/.idea/dagger.iml | 12 - flow/dagger/Untitled.ipynb | 511 --------------------------- flow/dagger/env_params_test.py | 85 ----- flow/dagger/env_params_test2.py | 47 --- flow/dagger/imitating_agent.py | 26 -- flow/dagger/imitating_controller.py | 76 ++-- flow/dagger/imitating_controller2.py | 85 ----- flow/dagger/replay_buffer.py | 61 ---- flow/dagger/run.py | 32 +- flow/dagger/trainer.py | 133 +++++-- flow/dagger/useless.py | 147 -------- flow/dagger/utils.py | 119 ------- 12 files changed, 183 insertions(+), 1151 deletions(-) delete mode 100644 flow/dagger/.idea/dagger.iml delete mode 100644 flow/dagger/Untitled.ipynb delete mode 100644 flow/dagger/env_params_test.py delete mode 100644 flow/dagger/env_params_test2.py delete mode 100644 flow/dagger/imitating_agent.py delete mode 100644 flow/dagger/imitating_controller2.py delete mode 100644 flow/dagger/replay_buffer.py delete mode 100644 flow/dagger/useless.py delete mode 100644 flow/dagger/utils.py diff --git a/flow/dagger/.idea/dagger.iml b/flow/dagger/.idea/dagger.iml deleted file mode 100644 index 0bc0e0321..000000000 --- a/flow/dagger/.idea/dagger.iml +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - - - - \ No newline at end of file diff --git a/flow/dagger/Untitled.ipynb b/flow/dagger/Untitled.ipynb deleted file mode 100644 index 0f1ac9809..000000000 --- a/flow/dagger/Untitled.ipynb +++ /dev/null @@ -1,511 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" - ] - } - ], - "source": [ - "import tensorflow as tf\n", - "import numpy as np\n", - "import gym" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [ - { - "ename": "ModuleNotFoundError", - "evalue": "No module named 'env_params'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0menv_params\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mflow_params\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mflow_params\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'env_params'" - ] - } - ], - "source": [ - "from env_params import flow_params as flow_params" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departLane in InFlows is deprecated, use depart_lane instead.\n", - " PendingDeprecationWarning\n", - "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", - " PendingDeprecationWarning\n", - "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departLane in InFlows is deprecated, use depart_lane instead.\n", - " PendingDeprecationWarning\n", - "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", - " PendingDeprecationWarning\n" - ] - } - ], - "source": [ - "from flow.utils.registry import make_create_env\n", - "from flow.controllers import IDMController, ContinuousRouter\n", - "from flow.core.experiment import Experiment\n", - "from flow.core.params import SumoParams, EnvParams, \\\n", - " InitialConfig, NetParams\n", - "from flow.core.params import VehicleParams\n", - "from flow.networks.ring import RingNetwork, ADDITIONAL_NET_PARAMS\n", - "import numpy as np\n", - "from flow.core.experiment import Experiment\n", - "from flow.core.params import InFlows\n", - "from flow.core.params import SumoLaneChangeParams\n", - "from flow.core.params import SumoCarFollowingParams\n", - "from flow.core.params import VehicleParams\n", - "from flow.controllers import ContinuousRouter\n", - "from flow.benchmarks.bottleneck0 import flow_params\n", - "from flow.benchmarks.bottleneck0 import SCALING" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "# sim_params = flow_params['sim']\n", - "# env_params = flow_params['env']\n", - "# net_params = flow_params['net']\n", - "\n", - "# # we want no autonomous vehicles in the simulation\n", - "# vehicles = VehicleParams()\n", - "# vehicles.add(veh_id='human',\n", - "# car_following_params=SumoCarFollowingParams(\n", - "# speed_mode=9,\n", - "# ),\n", - "# routing_controller=(ContinuousRouter, {}),\n", - "# lane_change_params=SumoLaneChangeParams(\n", - "# lane_change_mode=0,\n", - "# ),\n", - "# num_vehicles=1 * SCALING)\n", - "\n", - "# # only include human vehicles in inflows\n", - "# flow_rate = 2300 * SCALING\n", - "# inflow = InFlows()\n", - "# inflow.add(veh_type='human', edge='1',\n", - "# vehs_per_hour=flow_rate,\n", - "# departLane='random', departSpeed=10)\n", - "# net_params.inflows = inflow\n", - "\n", - "# # modify the rendering to match what is requested\n", - "# # sim_params.render = render\n", - "\n", - "# # set the evaluation flag to True\n", - "# env_params.evaluate = True\n", - "\n", - "# flow_params['env'].horizon = env_params.horizon" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "create_env, _ = make_create_env(flow_params)" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "env = create_env()" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/subprocess.py:786: ResourceWarning: subprocess 28341 is still running\n", - " ResourceWarning, source=self)\n" - ] - }, - { - "data": { - "text/plain": [ - "array([0. , 0.1 , 0.05 , 0. , 0. ,\n", - " 0. , 0.05 , 0.05 , 0. , 0.05 ,\n", - " 0. , 0.05 , 0.05 , 0. , 0.1 ,\n", - " 0. , 0.05 , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0.05 , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0.05 ,\n", - " 0. , 0.05 , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0.3234498 , 0.35290716, 0. , 0. ,\n", - " 0. , 0.39880784, 0.41698796, 0. , 0.4171411 ,\n", - " 0. , 0.49073983, 0.40911561, 0. , 0.43184929,\n", - " 0. , 0.41929399, 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0.27513936, 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0.44301522,\n", - " 0. , 0.44301522, 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. ])" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "state = env.reset()\n", - "state" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'bottleneck_0'" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "flow_params[\"exp_tag\"]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "rl_actions = {}\n", - "for veh_id in env.k.vehicle.get_ids():\n", - " print(veh_id)\n", - " rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env)\n", - "print(flow_params.env['horizon'])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "state, reward, done, _ = env.step(env.action_space.sample())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from env_params_test import name" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "name" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "from env_params_test import flow_params" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "create_env, _ = make_create_env(flow_params)" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "env = create_env()" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "-----------------------\n", - "ring length: 265\n", - "v_max: 5.37714246265477\n", - "-----------------------\n" - ] - }, - { - "data": { - "text/plain": [ - "array([ 0.31246011, -0.00413767, 0.04496073])" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "state = env.reset()\n", - "state" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "vehicle_id = 'rl_0'" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [], - "source": [ - "from flow.controllers.car_following_models import IDMController\n", - "car_following_params = SumoCarFollowingParams()\n", - "idm_controller = IDMController(vehicle_id, car_following_params=car_following_params)" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "(array([ 0.31200989, -0.00526746, 0.04493147]), 0.6436939709782903, False, {})" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.step(idm_controller.get_action(env))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "rl_actions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.step(rl_actions)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for i in range(200):\n", - " rl_actions = {}\n", - " for veh_id in env.k.vehicle.get_ids():\n", - " # print(veh_id)\n", - " rl_actions[veh_id] = env.k.vehicle.get_acc_controller(veh_id).get_action(env)\n", - " print(env.get_state())\n", - " env.step(rl_actions)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from flow.controllers.car_following_models import IDMController\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "vehicle_id = env.k.vehicle.get_ids()[0]\n", - "vehicle_id" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "car_following_params = SumoCarFollowingParams()\n", - "idm_controller = IDMController(vehicle_id, car_following_params=car_following_params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "idm_controller.get_action(env)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ob, rew, done, _ = env.step(rl_actions)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from env_params import flow_params\n", - "create_env, _ = make_create_env(flow_params)\n", - "env = create_env()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.k.vehicle.get_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "flow", - "language": "python", - "name": "flow" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/flow/dagger/env_params_test.py b/flow/dagger/env_params_test.py deleted file mode 100644 index 20ced1ce9..000000000 --- a/flow/dagger/env_params_test.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Ring road example. -Trains a single autonomous vehicle to stabilize the flow of 21 human-driven -vehicles in a variable length ring road. -""" -from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams -from flow.core.params import VehicleParams, SumoCarFollowingParams -from flow.controllers import RLController, IDMController, ContinuousRouter -from flow.envs import WaveAttenuationPOEnv -from flow.networks import RingNetwork - -# time horizon of a single rollout -HORIZON = 3000 -# number of rollouts per training iteration -N_ROLLOUTS = 20 -# number of parallel workers -N_CPUS = 2 - -# We place one autonomous vehicle and 22 human-driven vehicles in the network -vehicles = VehicleParams() -vehicles.add( - veh_id="human", - acceleration_controller=(IDMController, { - "noise": 0.2 - }), - car_following_params=SumoCarFollowingParams( - min_gap=0 - ), - routing_controller=(ContinuousRouter, {}), - num_vehicles=21) -vehicles.add( - veh_id="rl", - acceleration_controller=(RLController, {}), - routing_controller=(ContinuousRouter, {}), - num_vehicles=1) - -flow_params = dict( - # name of the experiment - exp_tag="stabilizing_the_ring", - - # name of the flow environment the experiment is running on - env_name=WaveAttenuationPOEnv, - - # name of the network class the experiment is running on - network=RingNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # sumo-related parameters (see flow.core.params.SumoParams) - sim=SumoParams( - sim_step=0.1, - render=False, - restart_instance=False - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=HORIZON, - warmup_steps=750, - clip_actions=False, - additional_params={ - "max_accel": 1, - "max_decel": 1, - "ring_length": [220, 270], - }, - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - additional_params={ - "length": 260, - "lanes": 1, - "speed_limit": 30, - "resolution": 40, - }, ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig(), -) diff --git a/flow/dagger/env_params_test2.py b/flow/dagger/env_params_test2.py deleted file mode 100644 index 7140af720..000000000 --- a/flow/dagger/env_params_test2.py +++ /dev/null @@ -1,47 +0,0 @@ -from flow.networks.ring import RingNetwork -name = "ring_example" - -from flow.core.params import VehicleParams -vehicles = VehicleParams() - -from flow.controllers.car_following_models import IDMController -from flow.controllers.routing_controllers import ContinuousRouter -from imitating_controller2 import ImitatingController -vehicles.add("human", - acceleration_controller=(IDMController, {}), - routing_controller=(ContinuousRouter, {}), - num_vehicles=22) - -from flow.networks.ring import ADDITIONAL_NET_PARAMS -from flow.core.params import NetParams -net_params = NetParams(additional_params=ADDITIONAL_NET_PARAMS) - -from flow.core.params import InitialConfig -initial_config = InitialConfig(spacing="uniform", perturbation=1) - -from flow.core.params import TrafficLightParams -traffic_lights = TrafficLightParams() - -from flow.envs.ring.accel import AccelEnv -from flow.core.params import SumoParams -sim_params = SumoParams(sim_step=0.1, render=False, emission_path='data') - -from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS -from flow.core.params import EnvParams -env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) - -flow_params_test = dict( - exp_tag='ring_example', - env_name=AccelEnv, - network=RingNetwork, - simulator='traci', - sim=sim_params, - env=env_params, - net=net_params, - veh=vehicles, - initial=initial_config, - tls=traffic_lights, -) - -# number of time steps -flow_params_test['env'].horizon = 3000 diff --git a/flow/dagger/imitating_agent.py b/flow/dagger/imitating_agent.py deleted file mode 100644 index f5b09dee3..000000000 --- a/flow/dagger/imitating_agent.py +++ /dev/null @@ -1,26 +0,0 @@ -import numpy as np -import tensorflow as tf -import time -from imitating_controller2 import * -from replay_buffer - -class Imitating_Agent(object): - # ignore this class! - def __init__(self, sess, env, params): - self.env = env - self.sess = sess - self.params = params - - self.policy = Imitator_Policy(sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate']) - - self.replay_buffer = ReplayBuffer(self.params['replay_buffer_size']) - - - def train(self, obs, acts): - self.policy.update(obs, acts) - - def add_to_replay_buffer(self, rollout_list): - self.replay_buffer.add_rollouts(rollout_list) - - def sample_data(self, batch_size): - return self.replay_buffer.sample_batch(batch_size) diff --git a/flow/dagger/imitating_controller.py b/flow/dagger/imitating_controller.py index 2537d70b8..0adffb6cd 100644 --- a/flow/dagger/imitating_controller.py +++ b/flow/dagger/imitating_controller.py @@ -1,20 +1,18 @@ import numpy as np -import tensorflow -from tensorflow import keras import tensorflow as tf from utils import * +import tensorflow_probability as tfp from flow.controllers.base_controller import BaseController from replay_buffer import ReplayBuffer -from tensorflow.keras.models import Sequential -from tensorflow.keras.layers import Dense -from tensorflow.keras.activations import * - class ImitatingController(BaseController): - # Implementation in Keras just for testing + """ + Controller which learns to imitate another given expert controller. + """ + # Implementation in Tensorflow - def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): + def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, inject_noise=0, noise_variance=0.5, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) self.sess = sess @@ -24,9 +22,11 @@ def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning self.size = size self.learning_rate = learning_rate self.training = training - self.model = Sequential() - self.build_network() + self.inject_noise=inject_noise + self.noise_variance = noise_variance + with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): + self.build_network() if self.training: @@ -34,27 +34,54 @@ def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning else: self.replay_buffer = None + self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] + self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) + def build_network(self): - self.model.add(Dense(self.size, input_dim=self.obs_dim, activation='tanh')) - for _ in range(self.num_layers): - self.model.add(Dense(self.size, activation='relu')) - # No activation - self.model.add(Dense(self.action_dim)) - self.model.compile(loss='mean_squared_error', optimizer='adam') + """ + Defines neural network for choosing actions. + """ + self.define_placeholders() + self.define_forward_pass() + if self.training: + with tf.variable_scope('train', reuse=tf.AUTO_REUSE): + self.define_train_op() - def train(self, observation_batch, action_batch): - assert(self.training, "Policy must be trainable") - assert (not np.any(np.isnan(action_batch))), "NANs in training labels" + def define_placeholders(self): + """ + Defines input, output, and training placeholders for neural net + """ + self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) + self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) + + if self.training: + self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) + + def define_forward_pass(self): + pred_action = build_neural_net(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) + self.action_predictions = pred_action + print("TYPE: ", type(self.obs_placeholder)) + if self.inject_noise == 1: + self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) + + def define_train_op(self): + true_actions = self.action_labels_placeholder + predicted_actions = self.action_predictions + + self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) + self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) + + def train(self, observation_batch, action_batch): action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - history = self.model.fit(observation_batch, action_batch) + ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) def get_accel_from_observation(self, observation): # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays if len(observation.shape)<=1: observation = observation[None] - ret_val = self.model.predict(observation) + ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] return ret_val @@ -64,7 +91,14 @@ def get_accel(self, env): return self.get_accel_from_observation(observation) def add_to_replay_buffer(self, rollout_list): + """ Add rollouts to replay buffer """ + self.replay_buffer.add_rollouts(rollout_list) def sample_data(self, batch_size): + """ Sample a batch of data from replay buffer """ + return self.replay_buffer.sample_batch(batch_size) + + def save_network(self, save_path): + self.saver.save(self.sess, save_path) diff --git a/flow/dagger/imitating_controller2.py b/flow/dagger/imitating_controller2.py deleted file mode 100644 index 65c7c9d1d..000000000 --- a/flow/dagger/imitating_controller2.py +++ /dev/null @@ -1,85 +0,0 @@ -import numpy as np -import tensorflow as tf -from utils import * -import tensorflow_probability as tfp -from flow.controllers.base_controller import BaseController -from replay_buffer import ReplayBuffer - - - -class ImitatingController(BaseController): - # Implementation in Tensorflow - - def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): - - BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) - self.sess = sess - self.action_dim = action_dim - self.obs_dim = obs_dim - self.num_layers = num_layers - self.size = size - self.learning_rate = learning_rate - self.training = training - - - - with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): - self.build_network() - - - if self.training: - self.replay_buffer = ReplayBuffer(replay_buffer_size) - else: - self.replay_buffer = None - - self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] - self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) - - def build_network(self): - self.define_placeholders() - self.define_forward_pass() - if self.training: - with tf.variable_scope('train', reuse=tf.AUTO_REUSE): - self.define_train_op() - - def define_placeholders(self): - self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) - self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) - - if self.training: - self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) - - def define_forward_pass(self): - pred_action = build_mlp(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) - self.action_predictions = pred_action - - def define_train_op(self): - true_actions = self.action_labels_placeholder - predicted_actions = self.action_predictions - - self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) - self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) - - def train(self, observation_batch, action_batch): - assert(self.training, "Policy must be trainable") - action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) - - def get_accel_from_observation(self, observation): - # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays - if len(observation.shape)<=1: - observation = observation[None] - ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] - - return ret_val - - def get_accel(self, env): - # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays - observation = env.get_state() - return self.get_accel_from_observation(observation) - - def add_to_replay_buffer(self, rollout_list): - self.replay_buffer.add_rollouts(rollout_list) - - def sample_data(self, batch_size): - return self.replay_buffer.sample_batch(batch_size) diff --git a/flow/dagger/replay_buffer.py b/flow/dagger/replay_buffer.py deleted file mode 100644 index 1213b985e..000000000 --- a/flow/dagger/replay_buffer.py +++ /dev/null @@ -1,61 +0,0 @@ -import time -import numpy as np -import tensorflow as tf -import gym -import os -from utils import * - - -class ReplayBuffer(object): - def __init__(self, max_size=100000): - - self.max_size = max_size - - # store each rollout - self.rollouts = [] - - # store component arrays from each rollout - self.observations = None - self.actions = None - self.expert_actions = None - self.rewards = None - self.next_observations = None - self.terminals = None - - - def add_rollouts(self, rollouts_list): - """ - Add a list of rollouts to the replay buffer - """ - - for rollout in rollouts_list: - self.rollouts.append(rollout) - - observations, actions, expert_actions, rewards, next_observations, terminals = unpack_rollouts(rollouts_list) - assert (not np.any(np.isnan(expert_actions))), "REPLAY BUFFER ERROR" - - if self.observations is None: - self.observations = observations[-self.max_size:] - self.actions = actions[-self.max_size:] - self.expert_actions = expert_actions[-self.max_size:] - self.rewards = rewards[-self.max_size:] - self.next_observations = next_observations[-self.max_size:] - self.terminals = terminals[-self.max_size:] - else: - self.observations = np.concatenate([self.observations, observations])[-self.max_size:] - self.actions = np.concatenate([self.actions, actions])[-self.max_size:] - self.expert_actions = np.concatenate([self.expert_actions, expert_actions])[-self.max_size:] - self.rewards = np.concatenate([self.rewards, rewards])[-self.max_size:] - self.next_observations = np.concatenate([self.next_observations, next_observations])[-self.max_size:] - self.terminals = np.concatenate([self.terminals, terminals])[-self.max_size:] - - def sample_batch(self, batch_size): - """ - Sample a batch of data (with size batch_size) from replay buffer. - Returns data in separate numpy arrays of observations, actions, rewards, next_observations, terminals - """ - assert self.observations is not None and self.actions is not None and self.expert_actions is not None and self.rewards is not None and self.next_observations is not None and self.terminals is not None - - size = len(self.observations) - rand_inds = np.random.randint(0, size, batch_size) - return self.observations[rand_inds], self.actions[rand_inds], self.expert_actions[rand_inds], self.rewards[rand_inds], self.next_observations[rand_inds], self.terminals[rand_inds] diff --git a/flow/dagger/run.py b/flow/dagger/run.py index 67bac9dda..e53030bf7 100644 --- a/flow/dagger/run.py +++ b/flow/dagger/run.py @@ -7,6 +7,7 @@ class Runner(object): + """ Class to run imitation learning (training and evaluation) """ def __init__(self, params): @@ -18,6 +19,12 @@ def run_training_loop(self): self.trainer.run_training_loop(n_iter=self.params['n_iter']) + def evaluate(self): + self.trainer.evaluate_controller(num_trajs=self.params['num_eval_episodes']) + + def save_controller_network(self): + self.trainer.save_controller_network() + def main(): import argparse @@ -28,27 +35,44 @@ def main(): parser.add_argument('--n_iter', '-n', type=int, default=5) parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration - parser.add_argument('--init_batch_size', type=int, default=5000) + parser.add_argument('--init_batch_size', type=int, default=3000) parser.add_argument('--train_batch_size', type=int, default=100) # number of sampled data points to be used per gradient/train step - parser.add_argument('--num_layers', type=int, default=2) # depth, of policy to be learned + parser.add_argument('--num_layers', type=int, default=3) # depth, of policy to be learned parser.add_argument('--size', type=int, default=64) # width of each layer, of policy to be learned parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) # learning rate for supervised learning parser.add_argument('--replay_buffer_size', type=int, default=1000000) + parser.add_argument('--save_path', type=str, default='') + parser.add_argument('--save_model', type=int, default=0) + parser.add_argument('--num_eval_episodes', type=int, default=10) + parser.add_argument('--inject_noise', type=int, default=0) + parser.add_argument('--noise_variance',type=float, default=0.5) + parser.add_argument('--vehicle_id',type=str, default='rl_0') args = parser.parse_args() # convert args to dictionary params = vars(args) - - assert args.n_iter>1, ('DAGGER needs more than 1 iteration (n_iter>1) of training, to iteratively query the expert and train (after 1st warmstarting from behavior cloning).') + print("INJECT: ", params['inject_noise']) + assert args.n_iter>1, ('DAgger needs >1 iteration') # run training train = Runner(params) train.run_training_loop() + # evaluate + train.evaluate() + print("DONE") + + if params['save_model'] == 1: + train.save_controller_network() + + # tensorboard + writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) + + if __name__ == "__main__": main() diff --git a/flow/dagger/trainer.py b/flow/dagger/trainer.py index 3c72b0e63..141021ef3 100644 --- a/flow/dagger/trainer.py +++ b/flow/dagger/trainer.py @@ -6,13 +6,17 @@ import gym import os from flow.utils.registry import make_create_env -from env_params_test import flow_params -from imitating_controller2 import ImitatingController +from bottleneck_env import flow_params +from imitating_controller import ImitatingController from flow.controllers.car_following_models import IDMController +from flow.controllers.velocity_controllers import FollowerStopper from flow.core.params import SumoCarFollowingParams from utils import * class Trainer(object): + """ + Class to initialize and run training for imitation learning (with DAgger) + """ def __init__(self, params): self.params = params @@ -22,33 +26,31 @@ def __init__(self, params): self.env = create_env() self.env.reset() - # might need to replace this hardcode - assert 'rl_0' in self.env.k.vehicle.get_ids() - self.vehicle_id = 'rl_0' + assert self.params['vehicle_id'] in self.env.k.vehicle.get_ids() + self.vehicle_id = self.params['vehicle_id'] obs_dim = self.env.observation_space.shape[0] - # TODO: make sure this is correct action_dim = (1,)[0] self.params['action_dim'] = action_dim self.params['obs_dim'] = obs_dim car_following_params = SumoCarFollowingParams() - self.controller = ImitatingController(self.vehicle_id, self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], car_following_params = car_following_params) - self.expert_controller = IDMController(self.vehicle_id, car_following_params = car_following_params) + self.controller = ImitatingController(self.vehicle_id, self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], car_following_params = car_following_params, inject_noise=self.params['inject_noise'], noise_variance=self.params['noise_variance']) + + # initialize expert controller + # self.expert_controller = IDMController(self.vehicle_id, car_following_params = car_following_params) + self.expert_controller = FollowerStopper(self.vehicle_id, car_following_params = car_following_params) tf.global_variables_initializer().run(session=self.sess) def run_training_loop(self, n_iter): """ - :param n_iter: number of (dagger) iterations - :param collect_policy: - :param eval_policy: - :param initial_expertdata: - :param relabel_with_expert: whether to perform dagger - :param start_relabel_with_expert: iteration at which to start relabel with expert - :param expert_policy: + Trains controller for n_iter iterations + + Args: + param n_iter: number of iterations to execute training """ # init vars at beginning of training @@ -60,6 +62,7 @@ def run_training_loop(self, n_iter): # collect trajectories, to be used for training if itr == 0: + # first iteration is standard behavioral cloning training_returns = self.collect_training_trajectories(itr, self.params['init_batch_size']) else: training_returns = self.collect_training_trajectories(itr, self.params['batch_size']) @@ -70,19 +73,19 @@ def run_training_loop(self, n_iter): # add collected data to replay buffer self.controller.add_to_replay_buffer(paths) - # train agent (using sampled data from replay buffer) + # train controller (using sampled data from replay buffer) loss = self.train_controller() def collect_training_trajectories(self, itr, batch_size): """ - :param itr: - :param load_initial_expertdata: path to expert data pkl file - :param collect_policy: the current policy using which we collect data - :param batch_size: the number of transitions we collect - :return: - paths: a list trajectories + Collect (state, action, reward, next_state, terminal) tuples for training + + Args: + itr: iteration of training during which functino is called + batch_size: number of tuples to collect + Returns: + paths: list of trajectories envsteps_this_batch: the sum over the numbers of environment steps in paths - train_video_paths: paths which also contain videos for visualization purposes """ if itr == 0: @@ -90,24 +93,88 @@ def collect_training_trajectories(self, itr, batch_size): else: collect_controller = self.controller - print("\nCollecting data to be used for training...") + print("\nCollecting data for training") paths, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_id, collect_controller, self.expert_controller, batch_size, self.params['ep_len']) return paths, envsteps_this_batch def train_controller(self): + """ + Trains controller using data sampled from replay buffer + """ + print('Training controller using sampled data from replay buffer') + for train_step in range(self.params['num_agent_train_steps_per_iter']): - # TODO: fix this ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) self.controller.train(ob_batch, expert_ac_batch) + def evaluate_controller(self, num_trajs = 10): + """ + Evaluates a trained controller on similarity with expert with respect to action taken and total reward per rollout + + Args: + num_trajs: number of trajectories to evaluate performance on + """ + + print("\n\n********** Evaluation ************\n\n") + + trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.controller, self.expert_controller, num_trajs, self.params['ep_len']) + + average_imitator_reward = 0 + total_imitator_steps = 0 + average_imitator_reward_per_rollout = 0 + + action_errors = np.array([]) + average_action_expert = 0 + average_action_imitator = 0 + + for traj in trajectories: + imitator_actions = traj['actions'] + expert_actions = traj['expert_actions'] + + average_action_expert += np.sum(expert_actions) + average_action_imitator += np.sum(imitator_actions) + + action_error = np.linalg.norm(imitator_actions - expert_actions) / len(imitator_actions) + action_errors = np.append(action_errors, action_error) + + average_imitator_reward += np.sum(traj['rewards']) + total_imitator_steps += len(traj['rewards']) + average_imitator_reward_per_rollout += np.sum(traj['rewards']) + + average_imitator_reward = average_imitator_reward / total_imitator_steps + average_imitator_reward_per_rollout = average_imitator_reward_per_rollout / len(trajectories) + + average_action_expert = average_action_expert / total_imitator_steps + average_action_imitator = average_action_imitator / total_imitator_steps + + + print('Evaluating controller on reward') + expert_trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.expert_controller, self.expert_controller, num_trajs, self.params['ep_len']) + + average_expert_reward = 0 + total_expert_steps = 0 + average_expert_reward_per_rollout = 0 + + for traj in expert_trajectories: + average_expert_reward += np.sum(traj['rewards']) + total_expert_steps += len(traj['rewards']) + average_expert_reward_per_rollout += np.sum(traj['rewards']) + + average_expert_reward_per_rollout = average_expert_reward_per_rollout / len(expert_trajectories) + average_expert_reward = average_expert_reward / total_expert_steps + + print("\nAVERAGE REWARD PER STEP EXPERT: ", average_expert_reward) + print("AVERAGE REWARD PER STEP IMITATOR: ", average_imitator_reward) + print("AVERAGE REWARD PER STEP DIFFERENCE: \n", np.abs(average_expert_reward - average_imitator_reward)) + print("AVERAGE REWARD PER ROLLOUT EXPERT: ", average_expert_reward_per_rollout) + print("AVERAGE REWARD PER ROLLOUT IMITATOR: ", average_imitator_reward_per_rollout) + print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: \n", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout)) + print("MEAN ACTION, IMITATOR: ", average_action_imitator) + print("MEAN ACTION, EXPERT: ", average_action_expert) + print("MEAN ACTION ERROR: \n", np.mean(action_errors)) - # def do_relabel_with_expert(self, paths): - # print("Relabelling collected observations with labels from an expert policy...") - # - # for i in range(len(paths)): - # acs = self.expert_policy.get_action(paths[i]["observation"]) - # paths[i]["action"] = acs - # - # return paths + def save_controller_network(self): + print("Saving tensorflow model to: ", self.params['save_path']) + self.controller.save_network(self.params['save_path']) diff --git a/flow/dagger/useless.py b/flow/dagger/useless.py deleted file mode 100644 index 86f3ee9ad..000000000 --- a/flow/dagger/useless.py +++ /dev/null @@ -1,147 +0,0 @@ -# """Benchmark for bottleneck0. -# Bottleneck in which the actions are specifying a desired velocity in a segment -# of space. The autonomous penetration rate in this example is 10%. -# - **Action Dimension**: (?, ) -# - **Observation Dimension**: (?, ) -# - **Horizon**: 1000 steps -# """ -# from flow.envs import BottleneckDesiredVelocityEnv -# from flow.networks import BottleneckNetwork -# from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ -# InFlows, SumoCarFollowingParams, SumoLaneChangeParams -# from flow.core.params import TrafficLightParams -# from flow.core.params import VehicleParams -# from flow.controllers import RLController, ContinuousRouter -# -# # time horizon of a single rollout -# HORIZON = 1500 -# -# SCALING = 1 -# NUM_LANES = 4 * SCALING # number of lanes in the widest highway -# DISABLE_TB = True -# DISABLE_RAMP_METER = True -# AV_FRAC = 0.10 -# -# vehicles = VehicleParams() -# vehicles.add( -# veh_id="human", -# routing_controller=(ContinuousRouter, {}), -# car_following_params=SumoCarFollowingParams( -# speed_mode=9, -# ), -# lane_change_params=SumoLaneChangeParams( -# lane_change_mode=0, -# ), -# num_vehicles=1 * SCALING) -# vehicles.add( -# veh_id="rl", -# acceleration_controller=(RLController, {}), -# routing_controller=(ContinuousRouter, {}), -# car_following_params=SumoCarFollowingParams( -# speed_mode=9, -# ), -# lane_change_params=SumoLaneChangeParams( -# lane_change_mode=0, -# ), -# num_vehicles=1 * SCALING) -# -# controlled_segments = [("1", 1, False), ("2", 2, True), ("3", 2, True), -# ("4", 2, True), ("5", 1, False)] -# num_observed_segments = [("1", 1), ("2", 3), ("3", 3), ("4", 3), ("5", 1)] -# -# additional_env_params = { -# "target_velocity": 40, -# "disable_tb": True, -# "disable_ramp_metering": True, -# "controlled_segments": controlled_segments, -# "symmetric": False, -# "observed_segments": num_observed_segments, -# "reset_inflow": False, -# "lane_change_duration": 5, -# "max_accel": 3, -# "max_decel": 3, -# "inflow_range": [1200, 2500] -# } -# -# # flow rate -# flow_rate = 2000 * SCALING -# -# # percentage of flow coming out of each lane -# inflow = InFlows() -# inflow.add( -# veh_type="human", -# edge="1", -# vehs_per_hour=flow_rate * (1 - AV_FRAC), -# departLane="random", -# departSpeed=10) -# inflow.add( -# veh_type="rl", -# edge="1", -# vehs_per_hour=flow_rate * AV_FRAC, -# departLane="random", -# departSpeed=10) -# -# traffic_lights = TrafficLightParams() -# if not DISABLE_TB: -# traffic_lights.add(node_id="2") -# if not DISABLE_RAMP_METER: -# traffic_lights.add(node_id="3") -# -# additional_net_params = {"scaling": SCALING, "speed_limit": 23} -# net_params = NetParams( -# inflows=inflow, -# additional_params=additional_net_params) -# -# flow_params = dict( -# # name of the experiment -# exp_tag="bottleneck_0", -# -# # name of the flow environment the experiment is running on -# env_name=BottleneckDesiredVelocityEnv, -# -# # name of the network class the experiment is running on -# network=BottleneckNetwork, -# -# # simulator that is used by the experiment -# simulator='traci', -# -# # sumo-related parameters (see flow.core.params.SumoParams) -# sim=SumoParams( -# sim_step=0.5, -# render=False, -# print_warnings=False, -# restart_instance=True, -# ), -# -# # environment related parameters (see flow.core.params.EnvParams) -# env=EnvParams( -# warmup_steps=40, -# sims_per_step=1, -# horizon=HORIZON, -# additional_params=additional_env_params, -# ), -# -# # network-related parameters (see flow.core.params.NetParams and the -# # network's documentation or ADDITIONAL_NET_PARAMS component) -# net=NetParams( -# inflows=inflow, -# additional_params=additional_net_params, -# ), -# -# # vehicles to be placed in the network at the start of a rollout (see -# # flow.core.params.VehicleParams) -# veh=vehicles, -# -# # parameters specifying the positioning of vehicles upon initialization/ -# # reset (see flow.core.params.InitialConfig) -# initial=InitialConfig( -# spacing="uniform", -# min_gap=5, -# lanes_distribution=float("inf"), -# edges_distribution=["2", "3", "4", "5"], -# ), -# -# # traffic lights to be introduced to specific nodes (see -# # flow.core.params.TrafficLightParams) -# tls=traffic_lights, -# ) diff --git a/flow/dagger/utils.py b/flow/dagger/utils.py deleted file mode 100644 index 177fc620f..000000000 --- a/flow/dagger/utils.py +++ /dev/null @@ -1,119 +0,0 @@ -import tensorflow as tf -import os -import numpy as np -import math - -# class agnostic helper functions - -def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length): - - print("COLLECTING CONTROLLER: ", controller) - print("EXPERT CONTROLLER: ", expert_controller) - observation = env.reset() - - assert vehicle_id in env.k.vehicle.get_ids(), "Vehicle ID not in env!" - - observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] - traj_length = 0 - - while True: - action = controller.get_action(env) - - if type(action) == np.ndarray: - action = action.flatten()[0] - - expert_action = expert_controller.get_action(env) - if (expert_action is None or math.isnan(expert_action)): - observation, reward, done, _ = env.step(action) - traj_length += 1 - terminate_rollout = traj_length == max_trajectory_length or done - if terminate_rollout: - break - continue - - observations.append(observation) - actions.append(action) - expert_actions.append(expert_action) - observation, reward, done, _ = env.step(action) - - traj_length += 1 - next_observations.append(observation) - rewards.append(reward) - terminate_rollout = traj_length == max_trajectory_length or done - terminals.append(terminate_rollout) - - if terminate_rollout: - break - - return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals) - - -def sample_trajectories(env, vehicle_id, controller, expert_controller, min_batch_timesteps, max_trajectory_length): - total_envsteps = 0 - trajectories = [] - - while total_envsteps < min_batch_timesteps: - trajectory = sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length) - trajectories.append(trajectory) - - traj_env_steps = len(trajectory["rewards"]) - total_envsteps += traj_env_steps - - return trajectories, total_envsteps - -def traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals): - return {"observations" : np.array(observations, dtype=np.float32), - "actions" : np.array(actions, dtype=np.float32), - "expert_actions": np.array(expert_actions, dtype=np.float32), - "rewards" : np.array(rewards, dtype=np.float32), - "next_observations": np.array(next_observations, dtype=np.float32), - "terminals": np.array(terminals, dtype=np.float32)} - -def unpack_rollouts(rollouts_list): - """ - Convert list of rollout dictionaries to individual observation, action, rewards, next observation, terminal arrays - rollouts: list of rollout dictionaries - rollout dictionary: dictionary with keys "observations", "actions", "rewards", "next_observations", "is_terminals" - return separate np arrays of observations, actions, rewards, next_observations, and is_terminals - """ - observations = np.concatenate([rollout["observations"] for rollout in rollouts_list]) - actions = np.concatenate([rollout["actions"] for rollout in rollouts_list]) - expert_actions = np.concatenate([rollout["expert_actions"] for rollout in rollouts_list]) - rewards = np.concatenate([rollout["rewards"] for rollout in rollouts_list]) - next_observations = np.concatenate([rollout["next_observations"] for rollout in rollouts_list]) - terminals = np.concatenate([rollout["terminals"] for rollout in rollouts_list]) - - return observations, actions, expert_actions, rewards, next_observations, terminals - - -# Below are tensorflow related functions - -def build_mlp(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): - """ - Builds a feedfoward neural net - - arguments: - input_placeholder: placeholder variable for the state (batch_size, input_size) - scope: variable scope of the network - - n_layers: number of hidden layers - size: dimension of each hidden layer - activation: activation of each hidden layer - - output_size: size of the output layer - output_activation: activation of the output layer - - returns: - output_placeholder: the result of a forward pass through the hidden layers + the output layer - """ - output_placeholder = input_placeholder - with tf.variable_scope(scope): - for _ in range(n_layers): - output_placeholder = tf.layers.dense(output_placeholder, size, activation=activation) - output_placeholder = tf.layers.dense(output_placeholder, output_size, activation=output_activation) - return output_placeholder - -def create_tf_session(): - config = tf.ConfigProto(device_count={'GPU': 0}) - sess = tf.Session(config=config) - return sess From 83a78876df6243582c06db0be6163e6a47b80938 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sun, 12 Apr 2020 18:35:43 -0700 Subject: [PATCH 023/335] Moved file to within controller class --- flow/dagger/imitating_controller.py | 104 ---------------- flow/dagger/run.py | 78 ------------ flow/dagger/trainer.py | 180 ---------------------------- 3 files changed, 362 deletions(-) delete mode 100644 flow/dagger/imitating_controller.py delete mode 100644 flow/dagger/run.py delete mode 100644 flow/dagger/trainer.py diff --git a/flow/dagger/imitating_controller.py b/flow/dagger/imitating_controller.py deleted file mode 100644 index 0adffb6cd..000000000 --- a/flow/dagger/imitating_controller.py +++ /dev/null @@ -1,104 +0,0 @@ -import numpy as np -import tensorflow as tf -from utils import * -import tensorflow_probability as tfp -from flow.controllers.base_controller import BaseController -from replay_buffer import ReplayBuffer - - -class ImitatingController(BaseController): - """ - Controller which learns to imitate another given expert controller. - """ - # Implementation in Tensorflow - - def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, inject_noise=0, noise_variance=0.5, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): - - BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) - self.sess = sess - self.action_dim = action_dim - self.obs_dim = obs_dim - self.num_layers = num_layers - self.size = size - self.learning_rate = learning_rate - self.training = training - self.inject_noise=inject_noise - self.noise_variance = noise_variance - - with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): - self.build_network() - - - if self.training: - self.replay_buffer = ReplayBuffer(replay_buffer_size) - else: - self.replay_buffer = None - - self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] - self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) - - def build_network(self): - """ - Defines neural network for choosing actions. - """ - self.define_placeholders() - self.define_forward_pass() - if self.training: - with tf.variable_scope('train', reuse=tf.AUTO_REUSE): - self.define_train_op() - - - def define_placeholders(self): - """ - Defines input, output, and training placeholders for neural net - """ - self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) - self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) - - if self.training: - self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) - - def define_forward_pass(self): - pred_action = build_neural_net(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) - self.action_predictions = pred_action - print("TYPE: ", type(self.obs_placeholder)) - - if self.inject_noise == 1: - self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) - - def define_train_op(self): - true_actions = self.action_labels_placeholder - predicted_actions = self.action_predictions - - self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) - self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) - - def train(self, observation_batch, action_batch): - action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) - - def get_accel_from_observation(self, observation): - # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays - if len(observation.shape)<=1: - observation = observation[None] - ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] - - return ret_val - - def get_accel(self, env): - # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays - observation = env.get_state() - return self.get_accel_from_observation(observation) - - def add_to_replay_buffer(self, rollout_list): - """ Add rollouts to replay buffer """ - - self.replay_buffer.add_rollouts(rollout_list) - - def sample_data(self, batch_size): - """ Sample a batch of data from replay buffer """ - - return self.replay_buffer.sample_batch(batch_size) - - def save_network(self, save_path): - self.saver.save(self.sess, save_path) diff --git a/flow/dagger/run.py b/flow/dagger/run.py deleted file mode 100644 index e53030bf7..000000000 --- a/flow/dagger/run.py +++ /dev/null @@ -1,78 +0,0 @@ -import os -import time -import numpy as np -import tensorflow as tf -from trainer import Trainer -from flow.controllers.car_following_models import IDMController - - -class Runner(object): - """ Class to run imitation learning (training and evaluation) """ - - def __init__(self, params): - - # initialize trainer - self.params = params - self.trainer = Trainer(params) - - def run_training_loop(self): - - self.trainer.run_training_loop(n_iter=self.params['n_iter']) - - def evaluate(self): - self.trainer.evaluate_controller(num_trajs=self.params['num_eval_episodes']) - - def save_controller_network(self): - self.trainer.save_controller_network() - - -def main(): - import argparse - parser = argparse.ArgumentParser() - parser.add_argument('--ep_len', type=int, default=3000) - - parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) - parser.add_argument('--n_iter', '-n', type=int, default=5) - - parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration - parser.add_argument('--init_batch_size', type=int, default=3000) - - parser.add_argument('--train_batch_size', type=int, - default=100) # number of sampled data points to be used per gradient/train step - - parser.add_argument('--num_layers', type=int, default=3) # depth, of policy to be learned - parser.add_argument('--size', type=int, default=64) # width of each layer, of policy to be learned - parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) # learning rate for supervised learning - parser.add_argument('--replay_buffer_size', type=int, default=1000000) - parser.add_argument('--save_path', type=str, default='') - parser.add_argument('--save_model', type=int, default=0) - parser.add_argument('--num_eval_episodes', type=int, default=10) - parser.add_argument('--inject_noise', type=int, default=0) - parser.add_argument('--noise_variance',type=float, default=0.5) - parser.add_argument('--vehicle_id',type=str, default='rl_0') - - args = parser.parse_args() - - # convert args to dictionary - params = vars(args) - print("INJECT: ", params['inject_noise']) - assert args.n_iter>1, ('DAgger needs >1 iteration') - - - # run training - train = Runner(params) - train.run_training_loop() - - # evaluate - train.evaluate() - print("DONE") - - if params['save_model'] == 1: - train.save_controller_network() - - # tensorboard - writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) - - -if __name__ == "__main__": - main() diff --git a/flow/dagger/trainer.py b/flow/dagger/trainer.py deleted file mode 100644 index 141021ef3..000000000 --- a/flow/dagger/trainer.py +++ /dev/null @@ -1,180 +0,0 @@ -import time -from collections import OrderedDict -import pickle -import numpy as np -import tensorflow as tf -import gym -import os -from flow.utils.registry import make_create_env -from bottleneck_env import flow_params -from imitating_controller import ImitatingController -from flow.controllers.car_following_models import IDMController -from flow.controllers.velocity_controllers import FollowerStopper -from flow.core.params import SumoCarFollowingParams -from utils import * - -class Trainer(object): - """ - Class to initialize and run training for imitation learning (with DAgger) - """ - - def __init__(self, params): - self.params = params - self.sess = create_tf_session() - - create_env, _ = make_create_env(flow_params) - self.env = create_env() - self.env.reset() - - assert self.params['vehicle_id'] in self.env.k.vehicle.get_ids() - self.vehicle_id = self.params['vehicle_id'] - - obs_dim = self.env.observation_space.shape[0] - - action_dim = (1,)[0] - self.params['action_dim'] = action_dim - self.params['obs_dim'] = obs_dim - - car_following_params = SumoCarFollowingParams() - self.controller = ImitatingController(self.vehicle_id, self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], car_following_params = car_following_params, inject_noise=self.params['inject_noise'], noise_variance=self.params['noise_variance']) - - # initialize expert controller - # self.expert_controller = IDMController(self.vehicle_id, car_following_params = car_following_params) - self.expert_controller = FollowerStopper(self.vehicle_id, car_following_params = car_following_params) - - tf.global_variables_initializer().run(session=self.sess) - - - def run_training_loop(self, n_iter): - """ - Trains controller for n_iter iterations - - Args: - param n_iter: number of iterations to execute training - """ - - # init vars at beginning of training - self.total_envsteps = 0 - self.start_time = time.time() - - for itr in range(n_iter): - print("\n\n********** Iteration %i ************"%itr) - - # collect trajectories, to be used for training - if itr == 0: - # first iteration is standard behavioral cloning - training_returns = self.collect_training_trajectories(itr, self.params['init_batch_size']) - else: - training_returns = self.collect_training_trajectories(itr, self.params['batch_size']) - - paths, envsteps_this_batch = training_returns - self.total_envsteps += envsteps_this_batch - - # add collected data to replay buffer - self.controller.add_to_replay_buffer(paths) - - # train controller (using sampled data from replay buffer) - loss = self.train_controller() - - def collect_training_trajectories(self, itr, batch_size): - """ - Collect (state, action, reward, next_state, terminal) tuples for training - - Args: - itr: iteration of training during which functino is called - batch_size: number of tuples to collect - Returns: - paths: list of trajectories - envsteps_this_batch: the sum over the numbers of environment steps in paths - """ - - if itr == 0: - collect_controller = self.expert_controller - else: - collect_controller = self.controller - - print("\nCollecting data for training") - paths, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_id, collect_controller, self.expert_controller, batch_size, self.params['ep_len']) - - return paths, envsteps_this_batch - - def train_controller(self): - """ - Trains controller using data sampled from replay buffer - """ - - print('Training controller using sampled data from replay buffer') - - for train_step in range(self.params['num_agent_train_steps_per_iter']): - ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) - self.controller.train(ob_batch, expert_ac_batch) - - def evaluate_controller(self, num_trajs = 10): - """ - Evaluates a trained controller on similarity with expert with respect to action taken and total reward per rollout - - Args: - num_trajs: number of trajectories to evaluate performance on - """ - - print("\n\n********** Evaluation ************\n\n") - - trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.controller, self.expert_controller, num_trajs, self.params['ep_len']) - - average_imitator_reward = 0 - total_imitator_steps = 0 - average_imitator_reward_per_rollout = 0 - - action_errors = np.array([]) - average_action_expert = 0 - average_action_imitator = 0 - - for traj in trajectories: - imitator_actions = traj['actions'] - expert_actions = traj['expert_actions'] - - average_action_expert += np.sum(expert_actions) - average_action_imitator += np.sum(imitator_actions) - - action_error = np.linalg.norm(imitator_actions - expert_actions) / len(imitator_actions) - action_errors = np.append(action_errors, action_error) - - average_imitator_reward += np.sum(traj['rewards']) - total_imitator_steps += len(traj['rewards']) - average_imitator_reward_per_rollout += np.sum(traj['rewards']) - - average_imitator_reward = average_imitator_reward / total_imitator_steps - average_imitator_reward_per_rollout = average_imitator_reward_per_rollout / len(trajectories) - - average_action_expert = average_action_expert / total_imitator_steps - average_action_imitator = average_action_imitator / total_imitator_steps - - - print('Evaluating controller on reward') - expert_trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.expert_controller, self.expert_controller, num_trajs, self.params['ep_len']) - - average_expert_reward = 0 - total_expert_steps = 0 - average_expert_reward_per_rollout = 0 - - for traj in expert_trajectories: - average_expert_reward += np.sum(traj['rewards']) - total_expert_steps += len(traj['rewards']) - average_expert_reward_per_rollout += np.sum(traj['rewards']) - - average_expert_reward_per_rollout = average_expert_reward_per_rollout / len(expert_trajectories) - average_expert_reward = average_expert_reward / total_expert_steps - - print("\nAVERAGE REWARD PER STEP EXPERT: ", average_expert_reward) - print("AVERAGE REWARD PER STEP IMITATOR: ", average_imitator_reward) - print("AVERAGE REWARD PER STEP DIFFERENCE: \n", np.abs(average_expert_reward - average_imitator_reward)) - print("AVERAGE REWARD PER ROLLOUT EXPERT: ", average_expert_reward_per_rollout) - print("AVERAGE REWARD PER ROLLOUT IMITATOR: ", average_imitator_reward_per_rollout) - print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: \n", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout)) - print("MEAN ACTION, IMITATOR: ", average_action_imitator) - print("MEAN ACTION, EXPERT: ", average_action_expert) - print("MEAN ACTION ERROR: \n", np.mean(action_errors)) - - def save_controller_network(self): - print("Saving tensorflow model to: ", self.params['save_path']) - self.controller.save_network(self.params['save_path']) From c668336689d0b061244b43ce72fe3c4928bcbc9f Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sun, 12 Apr 2020 18:37:55 -0700 Subject: [PATCH 024/335] Renamed directory, code cleanup, evaluation script --- flow/controllers/{dagger => imitation_learning}/.idea/dagger.iml | 0 flow/controllers/{dagger => imitation_learning}/bottleneck_env.py | 0 .../{dagger => imitation_learning}/imitating_controller.py | 0 flow/controllers/{dagger => imitation_learning}/replay_buffer.py | 0 flow/controllers/{dagger => imitation_learning}/ring_env.py | 0 flow/controllers/{dagger => imitation_learning}/run.py | 0 flow/controllers/{dagger => imitation_learning}/trainer.py | 0 flow/controllers/{dagger => imitation_learning}/utils.py | 0 8 files changed, 0 insertions(+), 0 deletions(-) rename flow/controllers/{dagger => imitation_learning}/.idea/dagger.iml (100%) rename flow/controllers/{dagger => imitation_learning}/bottleneck_env.py (100%) rename flow/controllers/{dagger => imitation_learning}/imitating_controller.py (100%) rename flow/controllers/{dagger => imitation_learning}/replay_buffer.py (100%) rename flow/controllers/{dagger => imitation_learning}/ring_env.py (100%) rename flow/controllers/{dagger => imitation_learning}/run.py (100%) rename flow/controllers/{dagger => imitation_learning}/trainer.py (100%) rename flow/controllers/{dagger => imitation_learning}/utils.py (100%) diff --git a/flow/controllers/dagger/.idea/dagger.iml b/flow/controllers/imitation_learning/.idea/dagger.iml similarity index 100% rename from flow/controllers/dagger/.idea/dagger.iml rename to flow/controllers/imitation_learning/.idea/dagger.iml diff --git a/flow/controllers/dagger/bottleneck_env.py b/flow/controllers/imitation_learning/bottleneck_env.py similarity index 100% rename from flow/controllers/dagger/bottleneck_env.py rename to flow/controllers/imitation_learning/bottleneck_env.py diff --git a/flow/controllers/dagger/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py similarity index 100% rename from flow/controllers/dagger/imitating_controller.py rename to flow/controllers/imitation_learning/imitating_controller.py diff --git a/flow/controllers/dagger/replay_buffer.py b/flow/controllers/imitation_learning/replay_buffer.py similarity index 100% rename from flow/controllers/dagger/replay_buffer.py rename to flow/controllers/imitation_learning/replay_buffer.py diff --git a/flow/controllers/dagger/ring_env.py b/flow/controllers/imitation_learning/ring_env.py similarity index 100% rename from flow/controllers/dagger/ring_env.py rename to flow/controllers/imitation_learning/ring_env.py diff --git a/flow/controllers/dagger/run.py b/flow/controllers/imitation_learning/run.py similarity index 100% rename from flow/controllers/dagger/run.py rename to flow/controllers/imitation_learning/run.py diff --git a/flow/controllers/dagger/trainer.py b/flow/controllers/imitation_learning/trainer.py similarity index 100% rename from flow/controllers/dagger/trainer.py rename to flow/controllers/imitation_learning/trainer.py diff --git a/flow/controllers/dagger/utils.py b/flow/controllers/imitation_learning/utils.py similarity index 100% rename from flow/controllers/dagger/utils.py rename to flow/controllers/imitation_learning/utils.py From f54eebc7c9ff97a54b06951c5c8047c0fbd81eaa Mon Sep 17 00:00:00 2001 From: Akash Velu <31679538+akashvelu@users.noreply.github.com> Date: Mon, 13 Apr 2020 12:23:50 -0700 Subject: [PATCH 025/335] Delete dagger.iml --- flow/controllers/imitation_learning/.idea/dagger.iml | 12 ------------ 1 file changed, 12 deletions(-) delete mode 100644 flow/controllers/imitation_learning/.idea/dagger.iml diff --git a/flow/controllers/imitation_learning/.idea/dagger.iml b/flow/controllers/imitation_learning/.idea/dagger.iml deleted file mode 100644 index 0bc0e0321..000000000 --- a/flow/controllers/imitation_learning/.idea/dagger.iml +++ /dev/null @@ -1,12 +0,0 @@ - - - - - - - - - - \ No newline at end of file From eb7b3a2596dbd430f58fe200863fda1eb2c7ba0b Mon Sep 17 00:00:00 2001 From: akashvelu Date: Tue, 14 Apr 2020 10:54:26 -0700 Subject: [PATCH 026/335] initial multiagent imitation learning implementation --- flow/controllers/dagger/run.py | 78 +++++++ flow/controllers/dagger/trainer.py | 179 +++++++++++++++ .../imitation_learning/Untitled.ipynb | 215 ++++++++++++++++++ .../imitation_learning/bottleneck_env.py | 2 +- .../imitation_learning/i210_multiagent.py | 192 ++++++++++++++++ .../imitating_controller.py | 93 +------- .../imitation_learning/imitating_network.py | 102 +++++++++ .../imitation_learning/multiagent_ring_env.py | 99 ++++++++ flow/controllers/imitation_learning/run.py | 8 +- .../controllers/imitation_learning/trainer.py | 62 +++-- flow/controllers/imitation_learning/utils.py | 102 ++++++++- 11 files changed, 1012 insertions(+), 120 deletions(-) create mode 100644 flow/controllers/dagger/run.py create mode 100644 flow/controllers/dagger/trainer.py create mode 100644 flow/controllers/imitation_learning/Untitled.ipynb create mode 100644 flow/controllers/imitation_learning/i210_multiagent.py create mode 100644 flow/controllers/imitation_learning/imitating_network.py create mode 100644 flow/controllers/imitation_learning/multiagent_ring_env.py diff --git a/flow/controllers/dagger/run.py b/flow/controllers/dagger/run.py new file mode 100644 index 000000000..faa7d4ee6 --- /dev/null +++ b/flow/controllers/dagger/run.py @@ -0,0 +1,78 @@ +import os +import time +import numpy as np +import tensorflow as tf +from trainer import Trainer +from flow.controllers.car_following_models import IDMController + + +class Runner(object): + """ Class to run imitation learning (training and evaluation) """ + + def __init__(self, params): + + # initialize trainer + self.params = params + self.trainer = Trainer(params) + + def run_training_loop(self): + + self.trainer.run_training_loop(n_iter=self.params['n_iter']) + + def evaluate(self): + self.trainer.evaluate_controller(num_trajs=self.params['num_eval_episodes']) + + def save_controller_network(self): + self.trainer.save_controller_network() + + +def main(): + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--ep_len', type=int, default=3000) + + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) + parser.add_argument('--n_iter', '-n', type=int, default=5) + + parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration + parser.add_argument('--init_batch_size', type=int, default=3000) + + parser.add_argument('--train_batch_size', type=int, + default=100) # number of sampled data points to be used per gradient/train step + + parser.add_argument('--num_layers', type=int, default=3) # depth, of policy to be learned + parser.add_argument('--size', type=int, default=64) # width of each layer, of policy to be learned + parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) # learning rate for supervised learning + parser.add_argument('--replay_buffer_size', type=int, default=1000000) + parser.add_argument('--save_path', type=str, default='') + parser.add_argument('--save_model', type=int, default=0) + parser.add_argument('--num_eval_episodes', type=int, default=10) + parser.add_argument('--inject_noise', type=int, default=0) + parser.add_argument('--noise_variance',type=float, default=0.5) + parser.add_argument('--vehicle_id', type=str, default='rl_0') + + args = parser.parse_args() + + # convert args to dictionary + params = vars(args) + assert args.n_iter>1, ('DAgger needs >1 iteration') + + + # run training + train = Runner(params) + train.run_training_loop() + + # evaluate + train.evaluate() + print("DONE") + + if params['save_model'] == 1: + train.save_controller_network() + + # tensorboard + if params['save_model'] == 1: + writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) + + +if __name__ == "__main__": + main() diff --git a/flow/controllers/dagger/trainer.py b/flow/controllers/dagger/trainer.py new file mode 100644 index 000000000..03364f528 --- /dev/null +++ b/flow/controllers/dagger/trainer.py @@ -0,0 +1,179 @@ +import time +from collections import OrderedDict +import pickle +import numpy as np +import tensorflow as tf +import gym +import os +from flow.utils.registry import make_create_env +from bottleneck_env import flow_params +from imitating_controller import ImitatingController +from flow.controllers.car_following_models import IDMController +from flow.controllers.velocity_controllers import FollowerStopper +from flow.core.params import SumoCarFollowingParams +from utils import * + +class Trainer(object): + """ + Class to initialize and run training for imitation learning (with DAgger) + """ + + def __init__(self, params): + self.params = params + self.sess = create_tf_session() + + create_env, _ = make_create_env(flow_params) + self.env = create_env() + self.env.reset() + + print(self.env.k.vehicle.get_ids()) + assert self.params['vehicle_id'] in self.env.k.vehicle.get_ids() + self.vehicle_id = self.params['vehicle_id'] + + obs_dim = self.env.observation_space.shape[0] + + action_dim = (1,)[0] + self.params['action_dim'] = action_dim + self.params['obs_dim'] = obs_dim + + car_following_params = SumoCarFollowingParams() + self.controller = ImitatingController(self.vehicle_id, self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], car_following_params = car_following_params, inject_noise=self.params['inject_noise'], noise_variance=self.params['noise_variance']) + # self.expert_controller = IDMController(self.vehicle_id, car_following_params = car_following_params) + self.expert_controller = FollowerStopper(self.vehicle_id, car_following_params = car_following_params) + + tf.global_variables_initializer().run(session=self.sess) + + + def run_training_loop(self, n_iter): + """ + Trains controller for n_iter iterations + + Args: + param n_iter: number of iterations to execute training + """ + + # init vars at beginning of training + self.total_envsteps = 0 + self.start_time = time.time() + + for itr in range(n_iter): + print("\n\n********** Iteration %i ************"%itr) + + # collect trajectories, to be used for training + if itr == 0: + # first iteration is standard behavioral cloning + training_returns = self.collect_training_trajectories(itr, self.params['init_batch_size']) + else: + training_returns = self.collect_training_trajectories(itr, self.params['batch_size']) + + paths, envsteps_this_batch = training_returns + self.total_envsteps += envsteps_this_batch + + # add collected data to replay buffer + self.controller.add_to_replay_buffer(paths) + + # train controller (using sampled data from replay buffer) + loss = self.train_controller() + + def collect_training_trajectories(self, itr, batch_size): + """ + Collect (state, action, reward, next_state, terminal) tuples for training + + Args: + itr: iteration of training during which functino is called + batch_size: number of tuples to collect + Returns: + paths: list of trajectories + envsteps_this_batch: the sum over the numbers of environment steps in paths + """ + + if itr == 0: + collect_controller = self.expert_controller + else: + collect_controller = self.controller + + print("\nCollecting data to be used for training...") + paths, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_id, collect_controller, self.expert_controller, batch_size, self.params['ep_len']) + + return paths, envsteps_this_batch + + def train_controller(self): + """ + Trains controller using data sampled from replay buffer + """ + + print('Training controller using sampled data from replay buffer') + for train_step in range(self.params['num_agent_train_steps_per_iter']): + ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) + self.controller.train(ob_batch, expert_ac_batch) + + def evaluate_controller(self, num_trajs = 10): + """ + Evaluates a trained controller on similarity with expert with respect to action taken and total reward per rollout + + Args: + num_trajs: number of trajectories to evaluate performance on + """ + + print("\n\n********** Evaluation ************ \n") + + trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.controller, self.expert_controller, num_trajs, self.params['ep_len']) + + average_imitator_reward = 0 + total_imitator_steps = 0 + average_imitator_reward_per_rollout = 0 + + action_errors = np.array([]) + average_action_expert = 0 + average_action_imitator = 0 + + # compare actions taken in each step of trajectories + for traj in trajectories: + imitator_actions = traj['actions'] + expert_actions = traj['expert_actions'] + + average_action_expert += np.sum(expert_actions) + average_action_imitator += np.sum(imitator_actions) + + action_error = np.linalg.norm(imitator_actions - expert_actions) / len(imitator_actions) + action_errors = np.append(action_errors, action_error) + + average_imitator_reward += np.sum(traj['rewards']) + total_imitator_steps += len(traj['rewards']) + average_imitator_reward_per_rollout += np.sum(traj['rewards']) + + average_imitator_reward = average_imitator_reward / total_imitator_steps + average_imitator_reward_per_rollout = average_imitator_reward_per_rollout / len(trajectories) + + average_action_expert = average_action_expert / total_imitator_steps + average_action_imitator = average_action_imitator / total_imitator_steps + + + expert_trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.expert_controller, self.expert_controller, num_trajs, self.params['ep_len']) + + average_expert_reward = 0 + total_expert_steps = 0 + average_expert_reward_per_rollout = 0 + + # compare reward accumulated in trajectories collected via expert vs. via imitator + for traj in expert_trajectories: + average_expert_reward += np.sum(traj['rewards']) + total_expert_steps += len(traj['rewards']) + average_expert_reward_per_rollout += np.sum(traj['rewards']) + + average_expert_reward_per_rollout = average_expert_reward_per_rollout / len(expert_trajectories) + average_expert_reward = average_expert_reward / total_expert_steps + + print("\nAVERAGE REWARD PER STEP EXPERT: ", average_expert_reward) + print("AVERAGE REWARD PER STEP IMITATOR: ", average_imitator_reward) + print("AVERAGE REWARD PER STEP DIFFERENCE: ", np.abs(average_expert_reward - average_imitator_reward), "\n") + + print("AVERAGE REWARD PER ROLLOUT EXPERT: ", average_expert_reward_per_rollout) + print("AVERAGE REWARD PER ROLLOUT IMITATOR: ", average_imitator_reward_per_rollout) + print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: ", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout), "\n") + + print("MEAN ACTION ERROR: ", np.mean(action_errors), "\n") + + def save_controller_network(self): + print("Saving tensorflow model to: ", self.params['save_path']) + self.controller.save_network(self.params['save_path']) diff --git a/flow/controllers/imitation_learning/Untitled.ipynb b/flow/controllers/imitation_learning/Untitled.ipynb new file mode 100644 index 000000000..875fe73b6 --- /dev/null +++ b/flow/controllers/imitation_learning/Untitled.ipynb @@ -0,0 +1,215 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "ename": "ImportError", + "evalue": "cannot import name 'energy_consumption'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mregistry\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mmake_create_env\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 7\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mi210_multiagent\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mflow_params\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mflow_params_multi\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 8\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mring_env\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mflow_params\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mflow_params_single\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcontrollers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcar_following_models\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mIDMController\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/i210_multiagent.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparams\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mSumoParams\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparams\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mSumoLaneChangeParams\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 20\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrewards\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0menergy_consumption\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 21\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnetworks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mi210_subnetwork\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mI210SubNetwork\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mEDGES_DISTRIBUTION\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0menvs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmultiagent\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mi210\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mI210MultiEnv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mADDITIONAL_ENV_PARAMS\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mImportError\u001b[0m: cannot import name 'energy_consumption'" + ] + } + ], + "source": [ + "import time\n", + "import pickle\n", + "import numpy as np\n", + "import gym\n", + "import os\n", + "from flow.utils.registry import make_create_env\n", + "from i210_multiagent import flow_params as flow_params_multi\n", + "from ring_env import flow_params as flow_params_single\n", + "from flow.controllers.car_following_models import IDMController\n", + "from flow.controllers.velocity_controllers import FollowerStopper\n", + "from flow.core.params import SumoCarFollowingParams\n", + "from utils import *" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "create_env, _ = make_create_env(flow_params_multi)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "env = create_env()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\n", + "-----------------------\n", + "ring length: 264\n", + "v_max: 5.329679917416892\n", + "-----------------------\n" + ] + }, + { + "data": { + "text/plain": [ + "{'rl_0_0': array([0.30672195, 0.00223007, 0.02625558]),\n", + " 'rl_1_0': array([ 0.34392208, -0.00785657, 0.02819709])}" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "env.reset()" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['human_0',\n", + " 'human_1',\n", + " 'human_2',\n", + " 'human_3',\n", + " 'human_4',\n", + " 'human_5',\n", + " 'human_6',\n", + " 'human_7',\n", + " 'human_8',\n", + " 'human_9',\n", + " 'human_10',\n", + " 'human_11',\n", + " 'human_12',\n", + " 'human_13',\n", + " 'human_14',\n", + " 'human_15',\n", + " 'human_16',\n", + " 'human_17',\n", + " 'human_18',\n", + " 'human_19',\n", + " 'human_20',\n", + " 'rl_0']" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "env.k.vehicle.get_ids()" + ] + }, + { + "cell_type": "code", + "execution_count": 20, + "metadata": {}, + "outputs": [], + "source": [ + "rl_actions = {'rl_0': env.action_space.sample()}\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.step(None)" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'rl_0_0': array([0.25527085, 0.00670868, 0.02368258]),\n", + " 'rl_1_0': array([ 0.24537913, -0.00482127, 0.02289928])}" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "env.get_state()" + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['rl_0_0', 'rl_1_0']" + ] + }, + "execution_count": 13, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "list(env.get_state().keys())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "flow", + "language": "python", + "name": "flow" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/flow/controllers/imitation_learning/bottleneck_env.py b/flow/controllers/imitation_learning/bottleneck_env.py index 820244a87..c0fabedda 100644 --- a/flow/controllers/imitation_learning/bottleneck_env.py +++ b/flow/controllers/imitation_learning/bottleneck_env.py @@ -111,7 +111,7 @@ # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( sim_step=0.5, - render=False, + render=True, print_warnings=False, restart_instance=True, ), diff --git a/flow/controllers/imitation_learning/i210_multiagent.py b/flow/controllers/imitation_learning/i210_multiagent.py new file mode 100644 index 000000000..6efbf1e3c --- /dev/null +++ b/flow/controllers/imitation_learning/i210_multiagent.py @@ -0,0 +1,192 @@ +"""Multi-agent I-210 example. +Trains a non-constant number of agents, all sharing the same policy, on the +highway with ramps network. +""" +import os +import numpy as np + +from ray.tune.registry import register_env + +from flow.controllers import RLController +from flow.controllers.car_following_models import IDMController +import flow.config as config +from flow.core.params import EnvParams +from flow.core.params import NetParams +from flow.core.params import InitialConfig +from flow.core.params import InFlows +from flow.core.params import VehicleParams +from flow.core.params import SumoParams +from flow.core.params import SumoLaneChangeParams +from flow.core.rewards import energy_consumption +from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION +from flow.envs.multiagent.i210 import I210MultiEnv, ADDITIONAL_ENV_PARAMS +from flow.utils.registry import make_create_env + +# SET UP PARAMETERS FOR THE SIMULATION + +# number of steps per rollout +HORIZON = 4000 + +VEH_PER_HOUR_BASE_119257914 = 10800 +VEH_PER_HOUR_BASE_27414345 = 321 +VEH_PER_HOUR_BASE_27414342 = 421 + +# percentage of autonomous vehicles compared to human vehicles on highway +PENETRATION_RATE = 10 + +# SET UP PARAMETERS FOR THE ENVIRONMENT +additional_env_params = ADDITIONAL_ENV_PARAMS.copy() +additional_env_params.update({ + 'max_accel': 2.6, + 'max_decel': 4.5, + # configure the observation space. Look at the I210MultiEnv class for more info. + 'lead_obs': True, + # whether to add in a reward for the speed of nearby vehicles + "local_reward": True +}) + +# CREATE VEHICLE TYPES AND INFLOWS +# no vehicles in the network +vehicles = VehicleParams() +vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams(lane_change_mode="strategic"), + acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), +) +vehicles.add( + "av", + acceleration_controller=(RLController, {}), + num_vehicles=0, +) + +inflow = InFlows() +# main highway +pen_rate = PENETRATION_RATE / 100 +assert pen_rate < 1.0, "your penetration rate is over 100%" +assert pen_rate > 0.0, "your penetration rate should be above zero" +inflow.add( + veh_type="human", + edge="119257914", + vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * (1 - pen_rate)), + # probability=1.0, + depart_lane="random", + departSpeed=20) +# # on ramp +# inflow.add( +# veh_type="human", +# edge="27414345", +# vehs_per_hour=321 * pen_rate, +# depart_lane="random", +# depart_speed=20) +# inflow.add( +# veh_type="human", +# edge="27414342#0", +# vehs_per_hour=421 * pen_rate, +# depart_lane="random", +# depart_speed=20) + +# Now add the AVs +# main highway +inflow.add( + veh_type="av", + edge="119257914", + vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * pen_rate), + # probability=1.0, + depart_lane="random", + depart_speed=20) +# # on ramp +# inflow.add( +# veh_type="av", +# edge="27414345", +# vehs_per_hour=int(VEH_PER_HOUR_BASE_27414345 * pen_rate), +# depart_lane="random", +# depart_speed=20) +# inflow.add( +# veh_type="av", +# edge="27414342#0", +# vehs_per_hour=int(VEH_PER_HOUR_BASE_27414342 * pen_rate), +# depart_lane="random", +# depart_speed=20) + +NET_TEMPLATE = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/test2.net.xml") + +flow_params = dict( + # name of the experiment + exp_tag='I_210_subnetwork', + + # name of the flow environment the experiment is running on + env_name=I210MultiEnv, + + # name of the network class the experiment is running on + network=I210SubNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # simulation-related parameters + sim=SumoParams( + sim_step=0.5, + render=False, + color_by_speed=False, + restart_instance=True, + use_ballistic=True + ), + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + sims_per_step=1, + warmup_steps=0, + additional_params=additional_env_params, + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflow, + template=NET_TEMPLATE + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig( + edges_distribution=EDGES_DISTRIBUTION, + ), +) + +# SET UP RLLIB MULTI-AGENT FEATURES + +create_env, env_name = make_create_env(params=flow_params, version=0) + +# register as rllib env +register_env(env_name, create_env) + +# multiagent configuration +test_env = create_env() +obs_space = test_env.observation_space +act_space = test_env.action_space + +POLICY_GRAPHS = {'av': (None, obs_space, act_space, {})} + +POLICIES_TO_TRAIN = ['av'] + + +def policy_mapping_fn(_): + """Map a policy in RLlib.""" + return 'av' + + +custom_callables = { + "avg_speed": lambda env: np.mean([speed for speed in + env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]), + "avg_outflow": lambda env: np.nan_to_num( + env.k.vehicle.get_outflow_rate(120)), + "avg_energy": lambda env: -1*energy_consumption(env, 0.1) +} diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index 0adffb6cd..9c7cb0b71 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -12,93 +12,16 @@ class ImitatingController(BaseController): """ # Implementation in Tensorflow - def __init__(self, veh_id, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, inject_noise=0, noise_variance=0.5, policy_scope='policy_vars', car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): + def __init__(self, veh_id, action_network, multiagent, car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) - self.sess = sess - self.action_dim = action_dim - self.obs_dim = obs_dim - self.num_layers = num_layers - self.size = size - self.learning_rate = learning_rate - self.training = training - self.inject_noise=inject_noise - self.noise_variance = noise_variance - - with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): - self.build_network() - - - if self.training: - self.replay_buffer = ReplayBuffer(replay_buffer_size) - else: - self.replay_buffer = None - - self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] - self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) - - def build_network(self): - """ - Defines neural network for choosing actions. - """ - self.define_placeholders() - self.define_forward_pass() - if self.training: - with tf.variable_scope('train', reuse=tf.AUTO_REUSE): - self.define_train_op() - - - def define_placeholders(self): - """ - Defines input, output, and training placeholders for neural net - """ - self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) - self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) - - if self.training: - self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) - - def define_forward_pass(self): - pred_action = build_neural_net(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) - self.action_predictions = pred_action - print("TYPE: ", type(self.obs_placeholder)) - - if self.inject_noise == 1: - self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) - - def define_train_op(self): - true_actions = self.action_labels_placeholder - predicted_actions = self.action_predictions - - self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) - self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) - - def train(self, observation_batch, action_batch): - action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) - - def get_accel_from_observation(self, observation): - # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays - if len(observation.shape)<=1: - observation = observation[None] - ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] - - return ret_val + self.action_network = action_network + self.multiagent = multiagent def get_accel(self, env): - # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays - observation = env.get_state() - return self.get_accel_from_observation(observation) - - def add_to_replay_buffer(self, rollout_list): - """ Add rollouts to replay buffer """ - - self.replay_buffer.add_rollouts(rollout_list) - - def sample_data(self, batch_size): - """ Sample a batch of data from replay buffer """ - - return self.replay_buffer.sample_batch(batch_size) + if self.multiagent: + observation = env.get_state()[self.veh_id] + else: + observation = env.get_state() - def save_network(self, save_path): - self.saver.save(self.sess, save_path) + return self.action_network.get_accel_from_observation(observation) diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py new file mode 100644 index 000000000..383b10beb --- /dev/null +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -0,0 +1,102 @@ +import numpy as np +import tensorflow as tf +from utils import * +import tensorflow_probability as tfp +from flow.controllers.base_controller import BaseController +from replay_buffer import ReplayBuffer + + +class ImitatingNetwork(): + """ + Neural network which learns to imitate another given expert controller. + """ + # Implementation in Tensorflow + + def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, inject_noise=0, noise_variance=0.5, policy_scope='policy_vars'): + + self.sess = sess + self.action_dim = action_dim + self.obs_dim = obs_dim + self.num_layers = num_layers + self.size = size + self.learning_rate = learning_rate + self.training = training + self.inject_noise=inject_noise + self.noise_variance = noise_variance + + with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): + self.build_network() + + if self.training: + self.replay_buffer = ReplayBuffer(replay_buffer_size) + else: + self.replay_buffer = None + + self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] + self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) + + def build_network(self): + """ + Defines neural network for choosing actions. + """ + self.define_placeholders() + self.define_forward_pass() + if self.training: + with tf.variable_scope('train', reuse=tf.AUTO_REUSE): + self.define_train_op() + + + def define_placeholders(self): + """ + Defines input, output, and training placeholders for neural net + """ + self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) + self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) + + if self.training: + self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) + + def define_forward_pass(self): + pred_action = build_neural_net(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) + self.action_predictions = pred_action + print("TYPE: ", type(self.obs_placeholder)) + + if self.inject_noise == 1: + self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) + + def define_train_op(self): + true_actions = self.action_labels_placeholder + predicted_actions = self.action_predictions + + self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) + self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) + + def train(self, observation_batch, action_batch): + action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) + ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) + + def get_accel_from_observation(self, observation): + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + if len(observation.shape)<=1: + observation = observation[None] + ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] + + return ret_val + + def get_accel(self, env): + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + observation = env.get_state() + return self.get_accel_from_observation(observation) + + def add_to_replay_buffer(self, rollout_list): + """ Add rollouts to replay buffer """ + + self.replay_buffer.add_rollouts(rollout_list) + + def sample_data(self, batch_size): + """ Sample a batch of data from replay buffer """ + + return self.replay_buffer.sample_batch(batch_size) + + def save_network(self, save_path): + self.saver.save(self.sess, save_path) diff --git a/flow/controllers/imitation_learning/multiagent_ring_env.py b/flow/controllers/imitation_learning/multiagent_ring_env.py new file mode 100644 index 000000000..538679ed0 --- /dev/null +++ b/flow/controllers/imitation_learning/multiagent_ring_env.py @@ -0,0 +1,99 @@ +"""Ring road example. +Trains a number of autonomous vehicles to stabilize the flow of 22 vehicles in +a variable length ring road. +""" +from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams +from flow.core.params import VehicleParams, SumoCarFollowingParams +from flow.controllers import RLController, IDMController, ContinuousRouter +from flow.envs.multiagent import MultiAgentWaveAttenuationPOEnv +from flow.networks import RingNetwork +from flow.utils.registry import make_create_env + +# time horizon of a single rollout +HORIZON = 3000 +# number of rollouts per training iteration +N_ROLLOUTS = 20 +# number of parallel workers +N_CPUS = 2 +# number of automated vehicles. Must be less than or equal to 22. +NUM_AUTOMATED = 2 + + +# We evenly distribute the automated vehicles in the network. +num_human = 22 - NUM_AUTOMATED +humans_remaining = num_human + +vehicles = VehicleParams() +for i in range(NUM_AUTOMATED): + # Add one automated vehicle. + vehicles.add( + veh_id="rl_{}".format(i), + acceleration_controller=(RLController, {}), + routing_controller=(ContinuousRouter, {}), + num_vehicles=1) + + # Add a fraction of the remaining human vehicles. + vehicles_to_add = round(humans_remaining / (NUM_AUTOMATED - i)) + humans_remaining -= vehicles_to_add + vehicles.add( + veh_id="human_{}".format(i), + acceleration_controller=(IDMController, { + "noise": 0.2 + }), + car_following_params=SumoCarFollowingParams( + min_gap=0 + ), + routing_controller=(ContinuousRouter, {}), + num_vehicles=vehicles_to_add) + + +flow_params = dict( + # name of the experiment + exp_tag="multiagent_ring", + + # name of the flow environment the experiment is running on + env_name=MultiAgentWaveAttenuationPOEnv, + + # name of the network class the experiment is running on + network=RingNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.1, + render=True, + restart_instance=False + ), + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=750, + clip_actions=False, + additional_params={ + "max_accel": 1, + "max_decel": 1, + "ring_length": [220, 270], + }, + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + additional_params={ + "length": 260, + "lanes": 1, + "speed_limit": 30, + "resolution": 40, + }, ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index c647f37cd..b88f7fbd7 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -1,7 +1,7 @@ import os import time import numpy as np -import tensorflow as tf +#import tensorflow as tf from trainer import Trainer from flow.controllers.car_following_models import IDMController @@ -34,8 +34,8 @@ def main(): parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) parser.add_argument('--n_iter', '-n', type=int, default=5) - parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration - parser.add_argument('--init_batch_size', type=int, default=3000) + parser.add_argument('--batch_size', type=int, default=10000) # training data collected (in the env) during each iteration + parser.add_argument('--init_batch_size', type=int, default=30000) parser.add_argument('--train_batch_size', type=int, default=100) # number of sampled data points to be used per gradient/train step @@ -50,12 +50,12 @@ def main(): parser.add_argument('--inject_noise', type=int, default=0) parser.add_argument('--noise_variance',type=float, default=0.5) parser.add_argument('--vehicle_id', type=str, default='rl_0') + parser.add_argument('--multiagent', type=bool, default=False) args = parser.parse_args() # convert args to dictionary params = vars(args) - print("INJECT: ", params['inject_noise']) assert args.n_iter>1, ('DAgger needs >1 iteration') diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 03364f528..d9c1b3164 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -2,15 +2,16 @@ from collections import OrderedDict import pickle import numpy as np -import tensorflow as tf import gym import os from flow.utils.registry import make_create_env -from bottleneck_env import flow_params +from multiagent_ring_env import flow_params from imitating_controller import ImitatingController +from imitating_network import ImitatingNetwork from flow.controllers.car_following_models import IDMController from flow.controllers.velocity_controllers import FollowerStopper from flow.core.params import SumoCarFollowingParams +import tensorflow as tf from utils import * class Trainer(object): @@ -19,34 +20,51 @@ class Trainer(object): """ def __init__(self, params): + + # param setup self.params = params self.sess = create_tf_session() + # environment setup create_env, _ = make_create_env(flow_params) self.env = create_env() - self.env.reset() + init_state = self.env.reset() - print(self.env.k.vehicle.get_ids()) - assert self.params['vehicle_id'] in self.env.k.vehicle.get_ids() - self.vehicle_id = self.params['vehicle_id'] + # vehicle setup + self.multiagent = params['multiagent'] - obs_dim = self.env.observation_space.shape[0] + # TODO: remove print + print("MULTI: ", self.multiagent) + + if self.multiagent: + self.vehicle_ids = list(init_state.keys()) + else: + print("IDS: ", self.env.k.vehicle.get_ids()) + assert self.params['vehicle_id'] in self.env.k.vehicle.get_ids() + self.vehicle_ids = [self.params['vehicle_id']] + # neural net setup + obs_dim = self.env.observation_space.shape[0] action_dim = (1,)[0] self.params['action_dim'] = action_dim self.params['obs_dim'] = obs_dim - car_following_params = SumoCarFollowingParams() - self.controller = ImitatingController(self.vehicle_id, self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], car_following_params = car_following_params, inject_noise=self.params['inject_noise'], noise_variance=self.params['noise_variance']) - # self.expert_controller = IDMController(self.vehicle_id, car_following_params = car_following_params) - self.expert_controller = FollowerStopper(self.vehicle_id, car_following_params = car_following_params) + self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], inject_noise=self.params['inject_noise'], noise_variance=self.params['noise_variance']) tf.global_variables_initializer().run(session=self.sess) + # controllers setup + car_following_params = SumoCarFollowingParams() + self.expert_controllers = [] + self.controllers = [] + for vehicle_id in self.vehicle_ids: + self.expert_controllers.append(FollowerStopper(vehicle_id, car_following_params=car_following_params)) + self.controllers.append(ImitatingController(vehicle_id, self.action_network, self.multiagent, car_following_params=car_following_params)) + def run_training_loop(self, n_iter): """ - Trains controller for n_iter iterations + Trains imitator for n_iter iterations Args: param n_iter: number of iterations to execute training @@ -70,7 +88,7 @@ def run_training_loop(self, n_iter): self.total_envsteps += envsteps_this_batch # add collected data to replay buffer - self.controller.add_to_replay_buffer(paths) + self.action_network.add_to_replay_buffer(paths) # train controller (using sampled data from replay buffer) loss = self.train_controller() @@ -88,14 +106,14 @@ def collect_training_trajectories(self, itr, batch_size): """ if itr == 0: - collect_controller = self.expert_controller + collect_controllers = self.expert_controllers else: - collect_controller = self.controller + collect_controllers = self.controllers print("\nCollecting data to be used for training...") - paths, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_id, collect_controller, self.expert_controller, batch_size, self.params['ep_len']) + trajectories, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_ids, collect_controllers, self.expert_controllers, batch_size, self.params['ep_len'], self.multiagent) - return paths, envsteps_this_batch + return trajectories, envsteps_this_batch def train_controller(self): """ @@ -104,8 +122,8 @@ def train_controller(self): print('Training controller using sampled data from replay buffer') for train_step in range(self.params['num_agent_train_steps_per_iter']): - ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) - self.controller.train(ob_batch, expert_ac_batch) + ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.action_network.sample_data(self.params['train_batch_size']) + self.action_network.train(ob_batch, expert_ac_batch) def evaluate_controller(self, num_trajs = 10): """ @@ -117,7 +135,7 @@ def evaluate_controller(self, num_trajs = 10): print("\n\n********** Evaluation ************ \n") - trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.controller, self.expert_controller, num_trajs, self.params['ep_len']) + trajectories = sample_n_trajectories(self.env, self.vehicle_ids, self.controllers, self.expert_controllers, num_trajs, self.params['ep_len'], self.multiagent) average_imitator_reward = 0 total_imitator_steps = 0 @@ -149,7 +167,7 @@ def evaluate_controller(self, num_trajs = 10): average_action_imitator = average_action_imitator / total_imitator_steps - expert_trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.expert_controller, self.expert_controller, num_trajs, self.params['ep_len']) + expert_trajectories = sample_n_trajectories(self.env, self.vehicle_ids, self.expert_controllers, self.expert_controllers, num_trajs, self.params['ep_len'], self.multiagent) average_expert_reward = 0 total_expert_steps = 0 @@ -176,4 +194,4 @@ def evaluate_controller(self, num_trajs = 10): def save_controller_network(self): print("Saving tensorflow model to: ", self.params['save_path']) - self.controller.save_network(self.params['save_path']) + self.action_network.save_network(self.params['save_path']) diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index a5bf7acfa..6e694ea01 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -5,17 +5,15 @@ """ Class agnostic helper functions """ -def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length): +def sample_trajectory_singleagent(env, vehicle_id, controller, expert_controller, max_trajectory_length): """ Samples a trajectory for a given vehicle using the actions prescribed by specified controller. - Args: env: environment vehicle_id: id of the vehicle that is being controlled/tracked during trajectory controller: subclass of BaseController, decides actions taken by vehicle expert_controller: subclass of BaseController, "expert" for imitation learning max_trajectory_length: maximum steps in a trajectory - Returns: Dictionary of numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples """ @@ -61,7 +59,85 @@ def sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajec return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals) -def sample_trajectories(env, vehicle_id, controller, expert_controller, min_batch_timesteps, max_trajectory_length): +def sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controllers, max_trajectory_length): + """ + Samples a trajectory for a given set of vehicles using the actions prescribed by specified controller. + + Args: + env: environment + vehicle_ids: id of the vehicle that is being controlled/tracked during trajectory + controllers: subclass of BaseController, decides actions taken by vehicle + expert_controllers: subclass of BaseController, "expert" for imitation learning + max_trajectory_length: maximum steps in a trajectory + + Returns: + Dictionary of numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples + """ + + print("COLLECTING CONTROLLER: ", controllers[0]) + print("EXPERT CONTROLLER: ", expert_controllers[0]) + observation_dict = env.reset() + + for vehicle_id in vehicle_ids: + assert vehicle_id in env.k.vehicle.get_ids(), "Vehicle ID not in env!" + + observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] + traj_length = 0 + + while True: + rl_actions = dict() + invalid_expert_action = False + expert_action_dict = dict() + + for i in range(len(vehicle_ids)): + vehicle_id = vehicle_ids[i] + controller = controllers[i] + expert_controller = expert_controllers[i] + + action = controller.get_action(env) + + if type(action) == np.ndarray: + action = action.flatten()[0] + + expert_action = expert_controller.get_action(env) + expert_action_dict[vehicle_id] = expert_action + + if (expert_action is None or math.isnan(expert_action)): + invalid_expert_action = True + + rl_actions[vehicle_id] = action + + if invalid_expert_action: + # invalid action in rl_actions, so default control to SUMO + observations_dict, reward_dict, done_dict, _ = env.step(None) + traj_length += 1 + terminate_rollout = traj_length == max_trajectory_length or done_dict['__all__'] + if terminate_rollout: + break + continue + + for vehicle_id in vehicle_ids: + observations.append(observation_dict[vehicle_id]) + actions.append(rl_actions[vehicle_id]) + expert_actions.append(expert_action_dict[vehicle_id]) + + observation_dict, reward_dict, done_dict, _ = env.step(rl_actions) + terminate_rollout = done_dict['__all__'] or (traj_length == max_trajectory_length) + + for vehicle_id in vehicle_ids: + next_observations.append(observation_dict[vehicle_id]) + rewards.append(reward_dict[vehicle_id]) + terminals.append(terminate_rollout) + + traj_length += 1 + + if terminate_rollout: + break + + return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals) + + +def sample_trajectories(env, vehicle_ids, controllers, expert_controllers, min_batch_timesteps, max_trajectory_length, multiagent): """ Samples trajectories to collect at least min_batch_timesteps steps in the environment @@ -80,15 +156,20 @@ def sample_trajectories(env, vehicle_id, controller, expert_controller, min_batc trajectories = [] while total_envsteps < min_batch_timesteps: - trajectory = sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length) + + if multiagent: + trajectory = sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controllers, max_trajectory_length) + else: + trajectory = sample_trajectory_singleagent(env, vehicle_ids[0], controllers[0], expert_controllers[0], max_trajectory_length) + trajectories.append(trajectory) - traj_env_steps = len(trajectory["rewards"]) + traj_env_steps = len(trajectory["rewards"]) / len(vehicle_ids) total_envsteps += traj_env_steps return trajectories, total_envsteps -def sample_n_trajectories(env, vehicle_id, controller, expert_controller, n, max_trajectory_length): +def sample_n_trajectories(env, vehicle_ids, controllers, expert_controllers, n, max_trajectory_length, multiagent): """ Collects a fixed number of trajectories. @@ -106,7 +187,12 @@ def sample_n_trajectories(env, vehicle_id, controller, expert_controller, n, max """ trajectories = [] for _ in range(n): - trajectory = sample_trajectory(env, vehicle_id, controller, expert_controller, max_trajectory_length) + + if multiagent: + trajectory = sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controllers, max_trajectory_length) + else: + trajectory = sample_trajectory_singleagent(env, vehicle_ids[0], controllers[0], expert_controllers[0], max_trajectory_length) + trajectories.append(trajectory) return trajectories From 47057758ba3cf84dd125ac102fd0bae6681ac91a Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 15 Apr 2020 20:24:51 -0700 Subject: [PATCH 027/335] Evinitsky/straight road pr (#909) Add a straight road training environment. Swap out the reward for a desired velocity squared reward. --- examples/README.md | 6 +- examples/exp_configs/non_rl/straight_road.py | 131 ++++++++++++++ .../rl/multiagent/multiagent_straight_road.py | 162 ++++++++++++++++++ examples/train.py | 11 +- flow/core/kernel/vehicle/traci.py | 2 + flow/core/params.py | 7 +- flow/envs/multiagent/__init__.py | 5 +- flow/envs/multiagent/base.py | 10 +- flow/envs/multiagent/i210.py | 53 ++++-- flow/networks/highway.py | 3 +- flow/visualize/time_space_diagram.py | 79 ++++++++- requirements.txt | 2 + scripts/ray_autoscale.yaml | 4 +- 13 files changed, 443 insertions(+), 32 deletions(-) create mode 100644 examples/exp_configs/non_rl/straight_road.py create mode 100644 examples/exp_configs/rl/multiagent/multiagent_straight_road.py diff --git a/examples/README.md b/examples/README.md index a9d681131..8156831fe 100644 --- a/examples/README.md +++ b/examples/README.md @@ -53,11 +53,11 @@ trained through RL algorithms provided by *RLlib*. To execute these examples, run ```shell script -python train.py EXP_CONFIG --rl_trainer "rllib" +python train.py EXP_CONFIG --rl_trainer "rllib" --algorithm ``` where `EXP_CONFIG` is the name of the experiment configuration file, as located -in `exp_configs/rl/singleagent` or `exp_configs/rl/multiagent.` - +in `exp_configs/rl/singleagent` or `exp_configs/rl/multiagent.` Here `` +should be the name of your desired algorithm. Currently we support PPO and TD3. ### stable-baselines diff --git a/examples/exp_configs/non_rl/straight_road.py b/examples/exp_configs/non_rl/straight_road.py new file mode 100644 index 000000000..c557ce836 --- /dev/null +++ b/examples/exp_configs/non_rl/straight_road.py @@ -0,0 +1,131 @@ +"""Multi-agent highway with ramps example. + +Trains a non-constant number of agents, all sharing the same policy, on the +highway with ramps network. +""" +import numpy as np + +from flow.controllers import IDMController +from flow.controllers.velocity_controllers import FollowerStopper +from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ + VehicleParams, SumoParams, SumoLaneChangeParams +from flow.networks import HighwayNetwork +from flow.envs import TestEnv +from flow.networks.highway import ADDITIONAL_NET_PARAMS + + +# SET UP PARAMETERS FOR THE SIMULATION + +# number of steps per rollout +HORIZON = 2000 + +# inflow rate on the highway in vehicles per hour +HIGHWAY_INFLOW_RATE = 10800 / 5 +# percentage of autonomous vehicles compared to human vehicles on highway +PENETRATION_RATE = 0.0 + + +# SET UP PARAMETERS FOR THE NETWORK + +additional_net_params = ADDITIONAL_NET_PARAMS.copy() +additional_net_params.update({ + # length of the highway + "length": 2000, + # number of lanes + "lanes": 1, + # speed limit for all edges + "speed_limit": 30, + # number of edges to divide the highway into + "num_edges": 2 +}) + +# CREATE VEHICLE TYPES AND INFLOWS + +vehicles = VehicleParams() +inflows = InFlows() + +# human vehicles +vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), +) + +if PENETRATION_RATE > 0.0: + vehicles.add( + "av", + num_vehicles=0, + acceleration_controller=(FollowerStopper, {"v_des": 18.0}), + ) + +# add human vehicles on the highway +# add human vehicles on the highway +inflows.add( + veh_type="human", + edge="highway_0", + vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (1 - PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23", + name="idm_highway_inflow") + +if PENETRATION_RATE > 0.0: + inflows.add( + veh_type="av", + edge="highway_0", + vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23", + name="av_highway_inflow") + +# SET UP FLOW PARAMETERS + +flow_params = dict( + # name of the experiment + exp_tag='multiagent_highway', + + # name of the flow environment the experiment is running on + env_name=TestEnv, + + # name of the network class the experiment is running on + network=HighwayNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=0, + sims_per_step=1, + ), + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.5, + render=False, + restart_instance=False + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflows, + additional_params=additional_net_params + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) + +custom_callables = { + "avg_speed": lambda env: np.nan_to_num(np.mean( + env.k.vehicle.get_speed(env.k.vehicle.get_ids_by_edge(['highway_0', 'highway_1'])))), +} diff --git a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py new file mode 100644 index 000000000..9ed38656f --- /dev/null +++ b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py @@ -0,0 +1,162 @@ +"""Multi-agent highway with ramps example. + +Trains a non-constant number of agents, all sharing the same policy, on the +highway with ramps network. +""" +from flow.controllers import RLController, IDMController +from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ + VehicleParams, SumoParams, SumoLaneChangeParams +from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS +from flow.networks import HighwayNetwork +from flow.envs.multiagent import MultiStraightRoad +from flow.networks.highway import ADDITIONAL_NET_PARAMS +from flow.utils.registry import make_create_env +from ray.tune.registry import register_env + + +# SET UP PARAMETERS FOR THE SIMULATION + +# number of steps per rollout +HORIZON = 2000 + +# inflow rate on the highway in vehicles per hour +HIGHWAY_INFLOW_RATE = 10800 / 5 +# percentage of autonomous vehicles compared to human vehicles on highway +PENETRATION_RATE = 10 + + +# SET UP PARAMETERS FOR THE NETWORK + +additional_net_params = ADDITIONAL_NET_PARAMS.copy() +additional_net_params.update({ + # length of the highway + "length": 2000, + # number of lanes + "lanes": 1, + # speed limit for all edges + "speed_limit": 30, + # number of edges to divide the highway into + "num_edges": 2 +}) + + +# SET UP PARAMETERS FOR THE ENVIRONMENT + +additional_env_params = ADDITIONAL_ENV_PARAMS.copy() +additional_env_params.update({ + 'max_accel': 2.6, + 'max_decel': 4.5, + 'target_velocity': 18, + 'local_reward': True, + 'lead_obs': True +}) + + +# CREATE VEHICLE TYPES AND INFLOWS + +vehicles = VehicleParams() +inflows = InFlows() + +# human vehicles +vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), +) + +# autonomous vehicles +vehicles.add( + veh_id='rl', + acceleration_controller=(RLController, {})) + +# add human vehicles on the highway +inflows.add( + veh_type="human", + edge="highway_0", + vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (1 - PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23.0", + name="idm_highway_inflow") + +# add autonomous vehicles on the highway +# they will stay on the highway, i.e. they won't exit through the off-ramps +inflows.add( + veh_type="rl", + edge="highway_0", + vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23.0", + name="rl_highway_inflow") + +# SET UP FLOW PARAMETERS + +flow_params = dict( + # name of the experiment + exp_tag='multiagent_highway', + + # name of the flow environment the experiment is running on + env_name=MultiStraightRoad, + + # name of the network class the experiment is running on + network=HighwayNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=0, + sims_per_step=1, # do not put more than one + additional_params=additional_env_params, + ), + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.5, + render=False, + use_ballistic=True, + restart_instance=False + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflows, + additional_params=additional_net_params + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) + + +# SET UP RLLIB MULTI-AGENT FEATURES + +create_env, env_name = make_create_env(params=flow_params, version=0) + +# register as rllib env +register_env(env_name, create_env) + +# multiagent configuration +test_env = create_env() +obs_space = test_env.observation_space +act_space = test_env.action_space + + +POLICY_GRAPHS = {'av': (None, obs_space, act_space, {})} + +POLICIES_TO_TRAIN = ['av'] + + +def policy_mapping_fn(_): + """Map a policy in RLlib.""" + return 'av' diff --git a/examples/train.py b/examples/train.py index 8150bc883..d688f2b9a 100644 --- a/examples/train.py +++ b/examples/train.py @@ -25,6 +25,7 @@ import ray from ray import tune +from ray.rllib.env.group_agents_wrapper import _GroupAgentsWrapper from ray.tune.registry import register_env try: from ray.rllib.agents.agent import get_agent_class @@ -203,7 +204,7 @@ def setup_exps_rllib(flow_params, config["critic_lr"] = tune.grid_search([1e-3, 1e-4]) config["n_step"] = tune.grid_search([1, 10]) else: - sys.exit("We only support PPO and TD3 right now.") + sys.exit("We only support PPO, TD3, right now.") # define some standard and useful callbacks def on_episode_start(info): @@ -214,6 +215,8 @@ def on_episode_start(info): def on_episode_step(info): episode = info["episode"] env = info["env"].get_unwrapped()[0] + if isinstance(env, _GroupAgentsWrapper): + env = env.env speed = np.mean([speed for speed in env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]) if not np.isnan(speed): episode.user_data["avg_speed"].append(speed) @@ -246,7 +249,6 @@ def on_episode_end(info): create_env, gym_name = make_create_env(params=flow_params) - # Register as rllib env register_env(gym_name, create_env) return alg_run, gym_name, config @@ -266,6 +268,10 @@ def train_rllib(submodule, flags): config['num_workers'] = flags.num_cpus config['env'] = gym_name + # create a custom string that makes looking at the experiment names easier + def trial_str_creator(trial): + return "{}_{}".format(trial.trainable_name, trial.experiment_tag) + if flags.local_mode: ray.init(local_mode=True) else: @@ -276,6 +282,7 @@ def train_rllib(submodule, flags): "config": config, "checkpoint_freq": flags.checkpoint_freq, "checkpoint_at_end": True, + 'trial_name_creator': trial_str_creator, "max_failures": 0, "stop": { "training_iteration": flags.num_iterations, diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 50cd106c9..22dcc8837 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -990,6 +990,8 @@ def choose_routes(self, veh_ids, route_choices): def get_x_by_id(self, veh_id): """See parent class.""" + if isinstance(veh_id, (list, np.ndarray)): + return [self.get_x_by_id(vehID) for vehID in veh_id] if self.get_edge(veh_id) == '': # occurs when a vehicle crashes is teleported for some other reason return 0. diff --git a/flow/core/params.py b/flow/core/params.py index 5a7467580..afead7017 100755 --- a/flow/core/params.py +++ b/flow/core/params.py @@ -637,6 +637,9 @@ class EnvParams: specifies whether to clip actions from the policy by their range when they are inputted to the reward function. Note that the actions are still clipped before they are provided to `apply_rl_actions`. + done_at_exit : bool, optional + If true, done is returned as True when the vehicle exits. This is only + applied to multi-agent environments. """ def __init__(self, @@ -645,7 +648,8 @@ def __init__(self, warmup_steps=0, sims_per_step=1, evaluate=False, - clip_actions=True): + clip_actions=True, + done_at_exit=True): """Instantiate EnvParams.""" self.additional_params = \ additional_params if additional_params is not None else {} @@ -654,6 +658,7 @@ def __init__(self, self.sims_per_step = sims_per_step self.evaluate = evaluate self.clip_actions = clip_actions + self.done_at_exit = done_at_exit def get_additional_param(self, key): """Return a variable from additional_params.""" diff --git a/flow/envs/multiagent/__init__.py b/flow/envs/multiagent/__init__.py index f7889591d..818d6662b 100644 --- a/flow/envs/multiagent/__init__.py +++ b/flow/envs/multiagent/__init__.py @@ -10,7 +10,7 @@ from flow.envs.multiagent.traffic_light_grid import MultiTrafficLightGridPOEnv from flow.envs.multiagent.highway import MultiAgentHighwayPOEnv from flow.envs.multiagent.merge import MultiAgentMergePOEnv -from flow.envs.multiagent.i210 import I210MultiEnv +from flow.envs.multiagent.i210 import I210MultiEnv, MultiStraightRoad __all__ = [ 'MultiEnv', @@ -21,5 +21,6 @@ 'MultiAgentAccelPOEnv', 'MultiAgentWaveAttenuationPOEnv', 'MultiAgentMergePOEnv', - 'I210MultiEnv' + 'I210MultiEnv', + 'MultiStraightRoad', ] diff --git a/flow/envs/multiagent/base.py b/flow/envs/multiagent/base.py index ec95474c6..dfc7c72ad 100644 --- a/flow/envs/multiagent/base.py +++ b/flow/envs/multiagent/base.py @@ -122,10 +122,11 @@ def step(self, rl_actions): else: reward = self.compute_reward(rl_actions, fail=crash) - for rl_id in self.k.vehicle.get_arrived_rl_ids(): - done[rl_id] = True - reward[rl_id] = 0 - states[rl_id] = np.zeros(self.observation_space.shape[0]) + if self.env_params.done_at_exit: + for rl_id in self.k.vehicle.get_arrived_rl_ids(): + done[rl_id] = True + reward[rl_id] = 0 + states[rl_id] = -1 * np.ones(self.observation_space.shape[0]) return states, reward, done, infos @@ -154,6 +155,7 @@ def reset(self, new_inflow_rate=None): self.sim_params.render = True # got to restart the simulation to make it actually display anything self.restart_simulation(self.sim_params) + self.should_render = False # warn about not using restart_instance when using inflows if len(self.net_params.inflows.get()) > 0 and \ diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index 6368e7a2d..f931b3bec 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -8,6 +8,8 @@ # largest number of lanes on any given edge in the network MAX_LANES = 6 +SPEED_SCALE = 50 +HEADWAY_SCALE = 1000 ADDITIONAL_ENV_PARAMS = { # maximum acceleration for autonomous vehicles, in m/s^2 @@ -61,6 +63,7 @@ class I210MultiEnv(MultiEnv): def __init__(self, env_params, sim_params, network, simulator='traci'): super().__init__(env_params, sim_params, network, simulator) self.lead_obs = env_params.additional_params.get("lead_obs") + self.max_lanes = MAX_LANES @property def observation_space(self): @@ -76,8 +79,8 @@ def observation_space(self): # speed, dist to ego vehicle, binary value which is 1 if the vehicle is # an AV else: - leading_obs = 3 * MAX_LANES - follow_obs = 3 * MAX_LANES + leading_obs = 3 * self.max_lanes + follow_obs = 3 * self.max_lanes # speed and lane self_obs = 2 @@ -119,11 +122,16 @@ def get_state(self): veh_info = {} for rl_id in self.k.vehicle.get_rl_ids(): speed = self.k.vehicle.get_speed(rl_id) - headway = self.k.vehicle.get_headway(rl_id) - lead_speed = self.k.vehicle.get_speed(self.k.vehicle.get_leader(rl_id)) - if lead_speed == -1001: - lead_speed = 0 - veh_info.update({rl_id: np.array([speed / 50.0, headway / 1000.0, lead_speed / 50.0])}) + lead_id = self.k.vehicle.get_leader(rl_id) + if lead_id in ["", None]: + # in case leader is not visible + lead_speed = SPEED_SCALE + headway = HEADWAY_SCALE + else: + lead_speed = self.k.vehicle.get_speed(lead_id) + headway = self.k.vehicle.get_headway(rl_id) + self.leader.append(lead_id) + veh_info.update({rl_id: np.array([speed / SPEED_SCALE, headway /HEADWAY_SCALE, lead_speed / SPEED_SCALE])}) else: veh_info = {rl_id: np.concatenate((self.state_util(rl_id), self.veh_statistics(rl_id))) @@ -131,8 +139,6 @@ def get_state(self): return veh_info def compute_reward(self, rl_actions, **kwargs): - # TODO(@evinitsky) we need something way better than this. Something that adds - # in notions of local reward """See class definition.""" # in the warmup steps if rl_actions is None: @@ -140,6 +146,7 @@ def compute_reward(self, rl_actions, **kwargs): rewards = {} if self.env_params.additional_params["local_reward"]: + des_speed = self.env_params.additional_params["target_velocity"] for rl_id in self.k.vehicle.get_rl_ids(): rewards[rl_id] = 0 speeds = [] @@ -150,7 +157,8 @@ def compute_reward(self, rl_actions, **kwargs): speeds.append(self.k.vehicle.get_speed(rl_id)) if len(speeds) > 0: # rescale so the q function can estimate it quickly - rewards[rl_id] = np.mean(speeds) / 500.0 + rewards[rl_id] = np.mean([(des_speed - np.abs(speed - des_speed))**2 + for speed in speeds]) / (des_speed**2) else: for rl_id in self.k.vehicle.get_rl_ids(): if self.env_params.evaluate: @@ -194,10 +202,6 @@ def additional_command(self): lead_id = self.k.vehicle.get_leader(rl_id) if lead_id: self.k.vehicle.set_observed(lead_id) - # follower - follow_id = self.k.vehicle.get_follower(rl_id) - if follow_id: - self.k.vehicle.set_observed(follow_id) def state_util(self, rl_id): """Return an array of headway, tailway, leader speed, follower speed. @@ -238,3 +242,24 @@ def veh_statistics(self, rl_id): speed = self.k.vehicle.get_speed(rl_id) / 100.0 lane = (self.k.vehicle.get_lane(rl_id) + 1) / 10.0 return np.array([speed, lane]) + + +class MultiStraightRoad(I210MultiEnv): + """Partially observable multi-agent environment for a straight road. Look at superclass for more information.""" + + def __init__(self, env_params, sim_params, network, simulator): + super().__init__(env_params, sim_params, network, simulator) + self.max_lanes = 1 + + def _apply_rl_actions(self, rl_actions): + """See class definition.""" + # in the warmup steps, rl_actions is None + if rl_actions: + rl_ids = [] + accels = [] + for rl_id, actions in rl_actions.items(): + accels.append(actions[0]) + rl_ids.append(rl_id) + + # prevent the AV from blocking the entrance + self.k.vehicle.apply_acceleration(rl_ids, accels) diff --git a/flow/networks/highway.py b/flow/networks/highway.py index c63292067..e1234053c 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -128,7 +128,8 @@ def specify_routes(self, net_params): def specify_edge_starts(self): """See parent class.""" - edgestarts = [("highway_{}".format(i), 0) + length = self.net_params.additional_params["length"] + edgestarts = [("highway_{}".format(i), (length / self.num_edges) * i) for i in range(self.num_edges)] return edgestarts diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index a08ecdf0f..9ac6938d4 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -17,7 +17,7 @@ python time_space_diagram.py .csv .json """ from flow.utils.rllib import get_flow_params -from flow.networks import RingNetwork, FigureEightNetwork, MergeNetwork, I210SubNetwork +from flow.networks import RingNetwork, FigureEightNetwork, MergeNetwork, I210SubNetwork, HighwayNetwork import argparse import csv @@ -37,7 +37,8 @@ RingNetwork, FigureEightNetwork, MergeNetwork, - I210SubNetwork + I210SubNetwork, + HighwayNetwork ] @@ -129,12 +130,14 @@ def get_time_space_data(data, params): assert params['network'] in ACCEPTABLE_NETWORKS, \ 'Network must be one of: ' + ', '.join(ACCEPTABLE_NETWORKS) + # switcher used to compute the positions based on the type of network # switcher used to compute the positions based on the type of network switcher = { RingNetwork: _ring_road, MergeNetwork: _merge, FigureEightNetwork: _figure_eight, - I210SubNetwork: _i210_subnetwork + I210SubNetwork: _i210_subnetwork, + HighwayNetwork: _highway, } # Collect a list of all the unique times. @@ -226,6 +229,74 @@ def _merge(data, params, all_time): return pos, speed, all_time +def _highway(data, params, all_time): + r"""Generate position and speed data for the highway subnetwork. + + Parameters + ---------- + data : dict of dict + Key = "veh_id": name of the vehicle \n Elements: + * "time": time step at every sample + * "edge": edge ID at every sample + * "pos": relative position at every sample + * "vel": speed at every sample + params : dict + flow-specific parameters + all_time : array_like + a (n_steps,) vector representing the unique time steps in the + simulation + Returns + ------- + as_array + n_steps x n_veh matrix specifying the absolute position of every + vehicle at every time step. Set to zero if the vehicle is not present + in the network at that time step. + as_array + n_steps x n_veh matrix specifying the speed of every vehicle at every + time step. Set to zero if the vehicle is not present in the network at + that time step. + """ + length = params['net'].additional_params['length'] + num_edges = params['net'].additional_params['num_edges'] + edge_len = length / num_edges + edge_starts = {} + for i in range(num_edges): + edge_starts.update({"highway_{}".format(i): i * edge_len, ":edge_{}_0".format(i): i * edge_len}) + + # compute the absolute position + for veh_id in data.keys(): + data[veh_id]['abs_pos'] = _get_abs_pos_1_edge(data[veh_id]['edge'], + data[veh_id]['pos'], + edge_starts) + + # track only vehicles that were around during this time period + # create the output variables + pos = np.zeros((all_time.shape[0], len(data.keys()))) + speed = np.zeros((all_time.shape[0], len(data.keys()))) + observed_row_list = [] + for i, veh_id in enumerate(sorted(data.keys())): + for spd, abs_pos, ti, edge, lane in zip(data[veh_id]['vel'], + data[veh_id]['abs_pos'], + data[veh_id]['time'], + data[veh_id]['edge'], + data[veh_id]['lane']): + # avoid vehicles not on the relevant edges. Also only check the second to + # last lane + if edge not in edge_starts.keys() or ti not in all_time: + continue + else: + if i not in observed_row_list: + observed_row_list.append(i) + ind = np.where(ti == all_time)[0] + pos[ind, i] = abs_pos + speed[ind, i] = spd + + pos = pos[:, observed_row_list] + speed = speed[:, observed_row_list] + + return pos, speed, all_time + + def _ring_road(data, params, all_time): r"""Generate position and speed data for the ring road. @@ -585,7 +656,7 @@ def _get_abs_pos_1_edge(edges, rel_pos, edge_starts): for indx_car in range(pos.shape[1]): unique_car_pos = pos[:, indx_car] - if flow_params['network'] == I210SubNetwork: + if flow_params['network'] == I210SubNetwork or flow_params['network'] == HighwayNetwork: indices = np.where(pos[:, indx_car] != 0)[0] unique_car_speed = speed[indices, indx_car] points = np.array([time[indices], pos[indices, indx_car]]).T.reshape(-1, 1, 2) diff --git a/requirements.txt b/requirements.txt index 191ecc740..c069a6cb6 100644 --- a/requirements.txt +++ b/requirements.txt @@ -27,4 +27,6 @@ plotly==2.4.0 tabulate tensorflow==1.14.0 awscli==1.16.309 +torch==1.4.0 pytz +tensorboardX diff --git a/scripts/ray_autoscale.yaml b/scripts/ray_autoscale.yaml index ea84bbee0..9fcf97cb5 100644 --- a/scripts/ray_autoscale.yaml +++ b/scripts/ray_autoscale.yaml @@ -32,7 +32,7 @@ auth: # By default Ray creates a new private keypair, but you can also use your own. # If you do so, make sure to also set "KeyName" in the head and worker node # configurations below. -# ssh_private_key: /path/to/your/key.pem +# ssh_private_key: # Provider-specific config for the head node, e.g. instance type. By default # Ray will auto-configure unspecified fields such as SubnetId and KeyName. @@ -74,6 +74,8 @@ head_setup_commands: - pip install tabulate - pip install boto3==1.10.45 # 1.4.8 adds InstanceMarketOptions - pip install awscli==1.16.309 + - pip install stable-baselines + - pip install torch==1.4.0 - pip install pytz # Custom commands that will be run on worker nodes after common setup. From 1759b027dbf24354e050bce3c6c6705092c2d6ec Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Wed, 22 Apr 2020 05:22:01 -0700 Subject: [PATCH 028/335] added more support for lambda function --- examples/data_pipeline.py | 28 ++++++++++++++++++++++++++-- examples/lambda_function.py | 26 ++++++++++++++++++++++++++ examples/simulate.py | 8 +++++++- flow/core/experiment.py | 2 +- 4 files changed, 60 insertions(+), 4 deletions(-) create mode 100644 examples/lambda_function.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 9d56548c2..28d3b5e73 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -39,6 +39,24 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def generate_trajectory_from_flow(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based only on flow output + + Parameters + ---------- + data_path : str + output file path + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file that should be used for + upload to s3 only, it does not the human readable column names and + will be deleted after uploading to s3. A copy of this file with all + the column name will remain in the ./data folder + """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) @@ -47,7 +65,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name): return upload_only_file_path -def upload_to_s3(bucket_name, bucket_key, file_path): +def upload_to_s3(bucket_name, bucket_key, file_path, only_query): """ upload a file to S3 bucket Parameters @@ -58,9 +76,15 @@ def upload_to_s3(bucket_name, bucket_key, file_path): the key within the bucket for the file file_path: str the path of the file to be uploaded + only_query: str + specify which query should be run on this file by lambda: + if empty: run none of them + if "all": run all available analysis query + if a string of list of queries: run only those mentioned in the list """ s3 = boto3.resource("s3") - s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + s3.Bucket(bucket_name).upload_file(file_path, bucket_key, + ExtraArgs={"Metadata": {"run-query": only_query}}) return diff --git a/examples/lambda_function.py b/examples/lambda_function.py new file mode 100644 index 000000000..01ce1512a --- /dev/null +++ b/examples/lambda_function.py @@ -0,0 +1,26 @@ +import boto3 +from urllib.parse import unquote_plus +from examples.data_pipeline import AthenaQuery +from examples.query import tags + +s3 = boto3.client('s3') +queryEngine = AthenaQuery() + + +def lambda_handler(event, context): + for record in event['Records']: + bucket = record['s3']['bucket']['name'] + key = unquote_plus(record['s3']['object']['key']) + partition = key.split('/')[-2].split('=')[-1] + response = s3.head_object(Bucket=bucket, Key=key) + run_query = response["Metadata"]["run-query"] + + if bucket == 'brent.experiments' and 'trajectory-output/' in key: + if run_query == "all": + query_list = tags["analysis"] + elif not run_query: + break + else: + query_list = run_query.split("\', \'") + for query_name in query_list: + queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) \ No newline at end of file diff --git a/examples/simulate.py b/examples/simulate.py index 6ad0048eb..40e04d4f6 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -51,6 +51,12 @@ def parse_args(args): help='Specifies the name of the partition to store the output' 'file on S3. Putting not None value for this argument' 'automatically set gen_emission to True.') + parser.add_argument( + '--only_query', + nargs='*', default="[\'all\']", + help='specify which query should be run by lambda' + 'for detail, see upload_to_s3 in data_pipeline.py' + ) return parser.parse_known_args(args)[0] @@ -82,4 +88,4 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, only_query=flags.only_query) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 63c92e798..12391f9ae 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1 +1 @@ -"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = self.env.network.name for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = emission_path[:-4] + "_trajectory.csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1], upload_file_path) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file +"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np import uuid class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. only_query: str Specifies whether queries should be automatically run the simulation data when it gets uploaded to s3 Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = uuid.uuid4().hex for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file From e84952580b1c7aeb3809593313121169872790d2 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 02:54:33 -0700 Subject: [PATCH 029/335] fix windoes line ending issue with experiment.py --- flow/core/experiment.py | 240 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 239 insertions(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 12391f9ae..80d607e7d 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1 +1,239 @@ -"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np import uuid class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. only_query: str Specifies whether queries should be automatically run the simulation data when it gets uploaded to s3 Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = uuid.uuid4().hex for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file +"""Contains an experiment class for running simulations.""" +from flow.core.util import emission_to_csv +from flow.utils.registry import make_create_env +from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 +import datetime +import logging +import time +import os +import numpy as np +import uuid + + +class Experiment: + """ + Class for systematically running simulations in any supported simulator. + + This class acts as a runner for a network and environment. In order to use + it to run an network and environment in the absence of a method specifying + the actions of RL agents in the network, type the following: + + >>> from flow.envs import Env + >>> flow_params = dict(...) # see the examples in exp_config + >>> exp = Experiment(flow_params) # for some experiment configuration + >>> exp.run(num_runs=1) + + If you wish to specify the actions of RL agents in the network, this may be + done as follows: + + >>> rl_actions = lambda state: 0 # replace with something appropriate + >>> exp.run(num_runs=1, rl_actions=rl_actions) + + Finally, if you would like to like to plot and visualize your results, this + class can generate csv files from emission files produced by sumo. These + files will contain the speeds, positions, edges, etc... of every vehicle + in the network at every time step. + + In order to ensure that the simulator constructs an emission file, set the + ``emission_path`` attribute in ``SimParams`` to some path. + + >>> from flow.core.params import SimParams + >>> flow_params['sim'] = SimParams(emission_path="./data") + + Once you have included this in your environment, run your Experiment object + as follows: + + >>> exp.run(num_runs=1, convert_to_csv=True) + + After the experiment is complete, look at the "./data" directory. There + will be two files, one with the suffix .xml and another with the suffix + .csv. The latter should be easily interpretable from any csv reader (e.g. + Excel), and can be parsed using tools such as numpy and pandas. + + Attributes + ---------- + custom_callables : dict < str, lambda > + strings and lambda functions corresponding to some information we want + to extract from the environment. The lambda will be called at each step + to extract information from the env and it will be stored in a dict + keyed by the str. + env : flow.envs.Env + the environment object the simulator will run + """ + + def __init__(self, flow_params, custom_callables=None): + """Instantiate the Experiment class. + + Parameters + ---------- + flow_params : dict + flow-specific parameters + custom_callables : dict < str, lambda > + strings and lambda functions corresponding to some information we + want to extract from the environment. The lambda will be called at + each step to extract information from the env and it will be stored + in a dict keyed by the str. + """ + self.custom_callables = custom_callables or {} + + # Get the env name and a creator for the environment. + create_env, _ = make_create_env(flow_params) + + # Create the environment. + self.env = create_env() + + logging.info(" Starting experiment {} at {}".format( + self.env.network.name, str(datetime.datetime.utcnow()))) + + logging.info("Initializing environment.") + + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): + """Run the given network for a set number of runs. + + Parameters + ---------- + num_runs : int + number of runs the experiment should perform + rl_actions : method, optional + maps states to actions to be performed by the RL agents (if + there are any) + convert_to_csv : bool + Specifies whether to convert the emission file created by sumo + into a csv file + partition_name: str + Specifies the S3 partition you want to store the output file, + will be used to later for query. If NONE, won't upload output + to S3. + only_query: str + Specifies whether queries should be automatically run the + simulation data when it gets uploaded to s3 + + Returns + ------- + info_dict : dict < str, Any > + contains returns, average speed per step + """ + num_steps = self.env.env_params.horizon + + # raise an error if convert_to_csv is set to True but no emission + # file will be generated, to avoid getting an error at the end of the + # simulation + if convert_to_csv and self.env.sim_params.emission_path is None: + raise ValueError( + 'The experiment was run with convert_to_csv set ' + 'to True, but no emission file will be generated. If you wish ' + 'to generate an emission file, you should set the parameter ' + 'emission_path in the simulation parameters (SumoParams or ' + 'AimsunParams) to the path of the folder where emissions ' + 'output should be generated. If you do not wish to generate ' + 'emissions, set the convert_to_csv parameter to False.') + + # used to store + info_dict = { + "returns": [], + "velocities": [], + "outflows": [], + } + info_dict.update({ + key: [] for key in self.custom_callables.keys() + }) + + if rl_actions is None: + def rl_actions(*_): + return None + + # time profiling information + t = time.time() + times = [] + extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], + "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], + "road_grade": [], "source_id": []} + source_id = uuid.uuid4().hex + + for i in range(num_runs): + ret = 0 + vel = [] + custom_vals = {key: [] for key in self.custom_callables.keys()} + state = self.env.reset() + for j in range(num_steps): + t0 = time.time() + state, reward, done, _ = self.env.step(rl_actions(state)) + t1 = time.time() + times.append(1 / (t1 - t0)) + + # Compute the velocity speeds and cumulative returns. + veh_ids = self.env.k.vehicle.get_ids() + vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) + ret += reward + + # collect additional information for the data pipeline + for vid in veh_ids: + extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) + extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) + extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) + extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) + extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( + self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) + extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) + extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) + position = self.env.k.vehicle.get_2D_position(vid) + extra_info["x"].append(position[0]) + extra_info["y"].append(position[1]) + extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) + extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) + + # Compute the results for the custom callables. + for (key, lambda_func) in self.custom_callables.items(): + custom_vals[key].append(lambda_func(self.env)) + + if done: + break + + # Store the information from the run in info_dict. + outflow = self.env.k.vehicle.get_outflow_rate(int(500)) + info_dict["returns"].append(ret) + info_dict["velocities"].append(np.mean(vel)) + info_dict["outflows"].append(outflow) + for key in custom_vals.keys(): + info_dict[key].append(np.mean(custom_vals[key])) + + print("Round {0}, return: {1}".format(i, ret)) + + # Print the averages/std for all variables in the info_dict. + for key in info_dict.keys(): + print("Average, std {}: {}, {}".format( + key, np.mean(info_dict[key]), np.std(info_dict[key]))) + + print("Total time:", time.time() - t) + print("steps/second:", np.mean(times)) + self.env.terminate() + + if convert_to_csv and self.env.simulator == "traci": + # wait a short period of time to ensure the xml file is readable + time.sleep(0.1) + + # collect the location of the emission file + dir_path = self.env.sim_params.emission_path + emission_filename = \ + "{0}-emission.xml".format(self.env.network.name) + emission_path = os.path.join(dir_path, emission_filename) + + # convert the emission file into a csv + emission_to_csv(emission_path) + + # Delete the .xml version of the emission file. + os.remove(emission_path) + + trajectory_table_path = './data/' + source_id + ".csv" + upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) + + if partition_name: + upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) + + # delete the S3-only version of the trajectory file + os.remove(upload_file_path) + + return info_dict From cb4cae82166743642cca71d7ea6ed5eb2e00f1c8 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 23 Apr 2020 10:40:00 -0700 Subject: [PATCH 030/335] Added multiagent capabilities for imitation learning --- .../imitating_controller.py | 1 - .../imitation_learning/imitating_network.py | 46 ++++- .../imitation_learning/replay_buffer.py | 24 ++- .../imitation_learning/replay_script.py | 80 +++++++++ flow/controllers/imitation_learning/run.py | 20 +-- .../controllers/imitation_learning/trainer.py | 46 +++-- flow/controllers/imitation_learning/utils.py | 160 +++++++++--------- .../imitation_learning/utils_tensorflow.py | 35 ++++ 8 files changed, 285 insertions(+), 127 deletions(-) create mode 100644 flow/controllers/imitation_learning/replay_script.py create mode 100644 flow/controllers/imitation_learning/utils_tensorflow.py diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index 9c7cb0b71..a3f6864ae 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -1,6 +1,5 @@ import numpy as np import tensorflow as tf -from utils import * import tensorflow_probability as tfp from flow.controllers.base_controller import BaseController from replay_buffer import ReplayBuffer diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 383b10beb..8c7d35b27 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -1,6 +1,6 @@ import numpy as np import tensorflow as tf -from utils import * +from utils_tensorflow import * import tensorflow_probability as tfp from flow.controllers.base_controller import BaseController from replay_buffer import ReplayBuffer @@ -12,7 +12,7 @@ class ImitatingNetwork(): """ # Implementation in Tensorflow - def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, inject_noise=0, noise_variance=0.5, policy_scope='policy_vars'): + def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, inject_noise=0, noise_variance=0.5, policy_scope='policy_vars', load_existing=False, load_path=''): self.sess = sess self.action_dim = action_dim @@ -24,16 +24,21 @@ def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, r self.inject_noise=inject_noise self.noise_variance = noise_variance - with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): - self.build_network() + if load_existing: + self.load_network(load_path) + + else: + with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): + self.build_network() if self.training: self.replay_buffer = ReplayBuffer(replay_buffer_size) else: self.replay_buffer = None - self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] - self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) + if not load_existing: + self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] + self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) def build_network(self): """ @@ -46,25 +51,45 @@ def build_network(self): self.define_train_op() + def load_network(self, path): + """ + Load tensorflow model from the path specified, set action prediction to proper placeholder + """ + loader = tf.train.import_meta_graph(path + 'model.ckpt.meta') + loader.restore(self.sess, path+'model.ckpt') + + self.obs_placeholder = tf.get_default_graph().get_tensor_by_name('policy_vars/obs:0') + self.action_predictions = tf.get_default_graph().get_tensor_by_name('policy_vars/network_scope/Output_Layer/BiasAdd:0') + + if self.inject_noise == 1: + self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) + + def define_placeholders(self): """ Defines input, output, and training placeholders for neural net """ - self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="obs", dtype=tf.float32) + self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="observation", dtype=tf.float32) self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) if self.training: self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) + def define_forward_pass(self): + """ + Build network and initialize proper action prediction op + """ pred_action = build_neural_net(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) self.action_predictions = pred_action - print("TYPE: ", type(self.obs_placeholder)) if self.inject_noise == 1: self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) def define_train_op(self): + """ + Defines training operations for network + """ true_actions = self.action_labels_placeholder predicted_actions = self.action_predictions @@ -72,6 +97,9 @@ def define_train_op(self): self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) def train(self, observation_batch, action_batch): + """ + Executes one training step for the given batch of observation and action data + """ action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) @@ -100,3 +128,5 @@ def sample_data(self, batch_size): def save_network(self, save_path): self.saver.save(self.sess, save_path) + # tensorboard + writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) diff --git a/flow/controllers/imitation_learning/replay_buffer.py b/flow/controllers/imitation_learning/replay_buffer.py index 4e362bd41..77902814c 100644 --- a/flow/controllers/imitation_learning/replay_buffer.py +++ b/flow/controllers/imitation_learning/replay_buffer.py @@ -1,9 +1,6 @@ import time import numpy as np -import tensorflow as tf -import gym import os -from utils import * class ReplayBuffer(object): @@ -33,7 +30,7 @@ def add_rollouts(self, rollouts_list): for rollout in rollouts_list: self.rollouts.append(rollout) - observations, actions, expert_actions, rewards, next_observations, terminals = unpack_rollouts(rollouts_list) + observations, actions, expert_actions, rewards, next_observations, terminals = self.unpack_rollouts(rollouts_list) assert (not np.any(np.isnan(expert_actions))), "Invalid actions added to replay buffer" @@ -61,4 +58,21 @@ def sample_batch(self, batch_size): size = len(self.observations) rand_inds = np.random.randint(0, size, batch_size) - return self.observations[rand_inds], self.actions[rand_inds], self.expert_actions[rand_inds], self.rewards[rand_inds], self.next_observations[rand_inds], self.terminals[rand_inds] + return self.observations[rand_inds], self.actions[rand_inds], self.expert_actions[rand_inds] + + + + def unpack_rollouts(self, rollouts_list): + """ + Convert list of rollout dictionaries to individual observation, action, rewards, next observation, terminal arrays + rollouts: list of rollout dictionaries, rollout dictionary: dictionary with keys "observations", "actions", "rewards", "next_observations", "is_terminals" + return separate np arrays of observations, actions, rewards, next_observations, and is_terminals + """ + observations = np.concatenate([rollout["observations"] for rollout in rollouts_list]) + actions = np.concatenate([rollout["actions"] for rollout in rollouts_list]) + expert_actions = np.concatenate([rollout["expert_actions"] for rollout in rollouts_list]) + rewards = np.concatenate([rollout["rewards"] for rollout in rollouts_list]) + next_observations = np.concatenate([rollout["next_observations"] for rollout in rollouts_list]) + terminals = np.concatenate([rollout["terminals"] for rollout in rollouts_list]) + + return observations, actions, expert_actions, rewards, next_observations, terminals diff --git a/flow/controllers/imitation_learning/replay_script.py b/flow/controllers/imitation_learning/replay_script.py new file mode 100644 index 000000000..5e3984e0d --- /dev/null +++ b/flow/controllers/imitation_learning/replay_script.py @@ -0,0 +1,80 @@ +import time +import numpy as np +import gym +import os +from flow.utils.registry import make_create_env +from i210_multiagent import flow_params as flow_params +from utils import * +from imitating_network import * +from utils_tensorflow import * +from flow.core.experiment import Experiment +from flow.core.params import SimParams + + + +def run_experiment(): + create_env, _ = make_create_env(flow_params) + env = create_env() + + obs_dim = env.observation_space.shape[0] + action_dim = (1,)[0] + + sess = create_tf_session() + action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/') + + def get_rl_actions(state): + rl_actions = {} + for vehicle_id in state.keys(): + obs = state[vehicle_id] + action = action_network.get_accel_from_observation(obs) + rl_actions[vehicle_id] = action + return rl_actions + + exp = Experiment(flow_params) + exp.run(num_runs=1, rl_actions=get_rl_actions, convert_to_csv=True) + + + +def run_rollout(): + + create_env, _ = make_create_env(flow_params) + env = create_env() + + obs_dim = env.observation_space.shape[0] + action_dim = (1,)[0] + + sess = create_tf_session() + action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/') + + init_state = env.reset() + + test_state = np.array([[1.0,1.0,1.0]], dtype='float32') + + reward = 0 + while(True): + rl_vehicles = env.k.vehicle.get_rl_ids() + if len(rl_vehicles) == 0: + observation_dict, reward_dict, done_dict, _ = env.step(None) + reward += sum(reward_dict.values()) + if done_dict['__all__']: + break + continue + + rl_actions = {} + observations = env.get_state() + + for vehicle_id in rl_vehicles: + obs = observations[vehicle_id] + action = action_network.get_accel_from_observation(obs) + rl_actions[vehicle_id] = action + + + observation_dict, reward_dict, done_dict, _ = env.step(rl_actions) + reward += sum(reward_dict.values()) + if done_dict['__all__']: + break + + print("Final Reward: ", reward) + +if __name__ == "__main__": + run_experiment() diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index b88f7fbd7..2b7e823cc 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -32,10 +32,10 @@ def main(): parser.add_argument('--ep_len', type=int, default=3000) parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) - parser.add_argument('--n_iter', '-n', type=int, default=5) + parser.add_argument('--n_iter', type=int, default=5) - parser.add_argument('--batch_size', type=int, default=10000) # training data collected (in the env) during each iteration - parser.add_argument('--init_batch_size', type=int, default=30000) + parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration + parser.add_argument('--init_batch_size', type=int, default=3000) parser.add_argument('--train_batch_size', type=int, default=100) # number of sampled data points to be used per gradient/train step @@ -46,7 +46,7 @@ def main(): parser.add_argument('--replay_buffer_size', type=int, default=1000000) parser.add_argument('--save_path', type=str, default='') parser.add_argument('--save_model', type=int, default=0) - parser.add_argument('--num_eval_episodes', type=int, default=10) + parser.add_argument('--num_eval_episodes', type=int, default=30) parser.add_argument('--inject_noise', type=int, default=0) parser.add_argument('--noise_variance',type=float, default=0.5) parser.add_argument('--vehicle_id', type=str, default='rl_0') @@ -63,16 +63,14 @@ def main(): train = Runner(params) train.run_training_loop() - # evaluate - train.evaluate() - print("DONE") - + # save model after training if params['save_model'] == 1: train.save_controller_network() - # tensorboard - if params['save_model'] == 1: - writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) + + # evaluate + train.evaluate() + print("DONE") if __name__ == "__main__": diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index d9c1b3164..937ab4793 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -5,7 +5,7 @@ import gym import os from flow.utils.registry import make_create_env -from multiagent_ring_env import flow_params +from i210_multiagent import flow_params from imitating_controller import ImitatingController from imitating_network import ImitatingNetwork from flow.controllers.car_following_models import IDMController @@ -13,6 +13,7 @@ from flow.core.params import SumoCarFollowingParams import tensorflow as tf from utils import * +from utils_tensorflow import * class Trainer(object): """ @@ -36,12 +37,7 @@ def __init__(self, params): # TODO: remove print print("MULTI: ", self.multiagent) - if self.multiagent: - self.vehicle_ids = list(init_state.keys()) - else: - print("IDS: ", self.env.k.vehicle.get_ids()) - assert self.params['vehicle_id'] in self.env.k.vehicle.get_ids() - self.vehicle_ids = [self.params['vehicle_id']] + self.vehicle_ids = self.env.k.vehicle.get_rl_ids() # neural net setup obs_dim = self.env.observation_space.shape[0] @@ -55,11 +51,11 @@ def __init__(self, params): # controllers setup car_following_params = SumoCarFollowingParams() - self.expert_controllers = [] - self.controllers = [] + self.controllers = dict() for vehicle_id in self.vehicle_ids: - self.expert_controllers.append(FollowerStopper(vehicle_id, car_following_params=car_following_params)) - self.controllers.append(ImitatingController(vehicle_id, self.action_network, self.multiagent, car_following_params=car_following_params)) + expert = FollowerStopper(vehicle_id, car_following_params=car_following_params) + imitator = ImitatingController(vehicle_id, self.action_network, self.multiagent, car_following_params=car_following_params) + self.controllers[vehicle_id] = (imitator, expert) def run_training_loop(self, n_iter): @@ -105,13 +101,8 @@ def collect_training_trajectories(self, itr, batch_size): envsteps_this_batch: the sum over the numbers of environment steps in paths """ - if itr == 0: - collect_controllers = self.expert_controllers - else: - collect_controllers = self.controllers - print("\nCollecting data to be used for training...") - trajectories, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_ids, collect_controllers, self.expert_controllers, batch_size, self.params['ep_len'], self.multiagent) + trajectories, envsteps_this_batch = sample_trajectories(self.env, self.controllers, self.action_network, batch_size, self.params['ep_len'], self.multiagent, use_expert=itr==0) return trajectories, envsteps_this_batch @@ -122,7 +113,7 @@ def train_controller(self): print('Training controller using sampled data from replay buffer') for train_step in range(self.params['num_agent_train_steps_per_iter']): - ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.action_network.sample_data(self.params['train_batch_size']) + ob_batch, ac_batch, expert_ac_batch = self.action_network.sample_data(self.params['train_batch_size']) self.action_network.train(ob_batch, expert_ac_batch) def evaluate_controller(self, num_trajs = 10): @@ -135,7 +126,7 @@ def evaluate_controller(self, num_trajs = 10): print("\n\n********** Evaluation ************ \n") - trajectories = sample_n_trajectories(self.env, self.vehicle_ids, self.controllers, self.expert_controllers, num_trajs, self.params['ep_len'], self.multiagent) + trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, False) average_imitator_reward = 0 total_imitator_steps = 0 @@ -146,7 +137,9 @@ def evaluate_controller(self, num_trajs = 10): average_action_imitator = 0 # compare actions taken in each step of trajectories - for traj in trajectories: + for traj_pair in trajectories: + traj = traj_pair[0] + traj_len = traj_pair[1] imitator_actions = traj['actions'] expert_actions = traj['expert_actions'] @@ -157,7 +150,7 @@ def evaluate_controller(self, num_trajs = 10): action_errors = np.append(action_errors, action_error) average_imitator_reward += np.sum(traj['rewards']) - total_imitator_steps += len(traj['rewards']) + total_imitator_steps += traj_len average_imitator_reward_per_rollout += np.sum(traj['rewards']) average_imitator_reward = average_imitator_reward / total_imitator_steps @@ -167,16 +160,18 @@ def evaluate_controller(self, num_trajs = 10): average_action_imitator = average_action_imitator / total_imitator_steps - expert_trajectories = sample_n_trajectories(self.env, self.vehicle_ids, self.expert_controllers, self.expert_controllers, num_trajs, self.params['ep_len'], self.multiagent) + expert_trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, True) average_expert_reward = 0 total_expert_steps = 0 average_expert_reward_per_rollout = 0 # compare reward accumulated in trajectories collected via expert vs. via imitator - for traj in expert_trajectories: + for traj_pair in expert_trajectories: + traj = traj_pair[0] + traj_len = traj_pair[1] average_expert_reward += np.sum(traj['rewards']) - total_expert_steps += len(traj['rewards']) + total_expert_steps += traj_len average_expert_reward_per_rollout += np.sum(traj['rewards']) average_expert_reward_per_rollout = average_expert_reward_per_rollout / len(expert_trajectories) @@ -188,8 +183,9 @@ def evaluate_controller(self, num_trajs = 10): print("AVERAGE REWARD PER ROLLOUT EXPERT: ", average_expert_reward_per_rollout) print("AVERAGE REWARD PER ROLLOUT IMITATOR: ", average_imitator_reward_per_rollout) - print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: ", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout), "\n") + print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: \n", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout), "\n") + print("MEAN EXPERT ACTION: ", average_action_expert) print("MEAN ACTION ERROR: ", np.mean(action_errors), "\n") def save_controller_network(self): diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index 6e694ea01..499e06f1d 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -2,10 +2,15 @@ import os import numpy as np import math +from flow.core.params import SumoCarFollowingParams +from imitating_controller import ImitatingController +from imitating_network import ImitatingNetwork +from flow.controllers.car_following_models import IDMController +from flow.controllers.velocity_controllers import FollowerStopper """ Class agnostic helper functions """ -def sample_trajectory_singleagent(env, vehicle_id, controller, expert_controller, max_trajectory_length): +def sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert): """ Samples a trajectory for a given vehicle using the actions prescribed by specified controller. Args: @@ -18,25 +23,57 @@ def sample_trajectory_singleagent(env, vehicle_id, controller, expert_controller Dictionary of numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples """ - print("COLLECTING CONTROLLER: ", controller) - print("EXPERT CONTROLLER: ", expert_controller) + vehicle_ids = env.k.vehicle.get_rl_ids() + print("VEHICLE IDS: ", vehicle_ids) + assert len(vehicle_ids) <= 1, "Not single-agent" observation = env.reset() - assert vehicle_id in env.k.vehicle.get_ids(), "Vehicle ID not in env!" + if len(vehicle_ids) == 1: + vehicle_id = vehicle_ids[0] + else: + vehicle_id = None observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] traj_length = 0 while True: - action = controller.get_action(env) + # update vehicle ids and make sure it is single agent + vehicle_ids = env.k.vehicle.get_rl_ids() + if len(vehicle_ids) == 0: + observation, reward, done, _ = env.step(None) + if done: + break + continue + + assert len(vehicle_ids) == 1, "Not single agent" + + # init controllers if vehicle id is new + vehicle_id = vehicle_ids[0] + if vehicle_id not in set(controllers.get_keys()): + + expert = FollowerStopper(vehicle_id, car_following_params=car_following_params) + imitator = ImitatingController(vehicle_id, action_network, false, car_following_params=car_following_params) + controllers[vehicle_id] = (imitator, expert) + + # decide which controller to use to collect trajectory + expert_controller = controllers[vehicle_id][1] + if use_expert: + controller = expert_controller + else: + controller = controllers[vehicle_id][0] + + + print("COLLECTING CONTROLLER: ", controller) + print("EXPERT CONTROLLER: ", expert_controller) + + action = controller.get_action(env) if type(action) == np.ndarray: action = action.flatten()[0] expert_action = expert_controller.get_action(env) if (expert_action is None or math.isnan(expert_action)): observation, reward, done, _ = env.step(action) - traj_length += 1 terminate_rollout = traj_length == max_trajectory_length or done if terminate_rollout: break @@ -56,10 +93,10 @@ def sample_trajectory_singleagent(env, vehicle_id, controller, expert_controller if terminate_rollout: break - return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals) + return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals), traj_length -def sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controllers, max_trajectory_length): +def sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert): """ Samples a trajectory for a given set of vehicles using the actions prescribed by specified controller. @@ -74,25 +111,45 @@ def sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controlle Dictionary of numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples """ - print("COLLECTING CONTROLLER: ", controllers[0]) - print("EXPERT CONTROLLER: ", expert_controllers[0]) observation_dict = env.reset() - for vehicle_id in vehicle_ids: - assert vehicle_id in env.k.vehicle.get_ids(), "Vehicle ID not in env!" - observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] traj_length = 0 while True: + vehicle_ids = env.k.vehicle.get_rl_ids() + if len(vehicle_ids) == 0: + print("NO RL VEHICLES") + observation_dict, reward, done, _ = env.step(None) + print(env.k.vehicle.get_rl_ids()) + if done['__all__']: + break + continue + + # actions taken by collecting controller rl_actions = dict() invalid_expert_action = False - expert_action_dict = dict() + # actions taken by expert + expert_action_dict= dict() for i in range(len(vehicle_ids)): vehicle_id = vehicle_ids[i] - controller = controllers[i] - expert_controller = expert_controllers[i] + + if vehicle_id not in set(controllers.keys()): + car_following_params = SumoCarFollowingParams() + + expert = FollowerStopper(vehicle_id, car_following_params=car_following_params) + imitator = ImitatingController(vehicle_id, action_network, True, car_following_params=car_following_params) + controllers[vehicle_id] = (imitator, expert) + + expert_controller = controllers[vehicle_id][1] + if use_expert: + controller = expert_controller + else: + controller = controllers[vehicle_id][0] + + if traj_length == 0 and i == 0: + print("COLLECTOR: ", controller) action = controller.get_action(env) @@ -109,8 +166,7 @@ def sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controlle if invalid_expert_action: # invalid action in rl_actions, so default control to SUMO - observations_dict, reward_dict, done_dict, _ = env.step(None) - traj_length += 1 + observation_dict, reward_dict, done_dict, _ = env.step(None) terminate_rollout = traj_length == max_trajectory_length or done_dict['__all__'] if terminate_rollout: break @@ -134,10 +190,10 @@ def sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controlle if terminate_rollout: break - return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals) + return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals), traj_length -def sample_trajectories(env, vehicle_ids, controllers, expert_controllers, min_batch_timesteps, max_trajectory_length, multiagent): +def sample_trajectories(env, controllers, action_network, min_batch_timesteps, max_trajectory_length, multiagent, use_expert): """ Samples trajectories to collect at least min_batch_timesteps steps in the environment @@ -158,18 +214,17 @@ def sample_trajectories(env, vehicle_ids, controllers, expert_controllers, min_b while total_envsteps < min_batch_timesteps: if multiagent: - trajectory = sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controllers, max_trajectory_length) + trajectory, traj_length = sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert) else: - trajectory = sample_trajectory_singleagent(env, vehicle_ids[0], controllers[0], expert_controllers[0], max_trajectory_length) + trajectory, traj_length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert) trajectories.append(trajectory) - traj_env_steps = len(trajectory["rewards"]) / len(vehicle_ids) - total_envsteps += traj_env_steps + total_envsteps += traj_length return trajectories, total_envsteps -def sample_n_trajectories(env, vehicle_ids, controllers, expert_controllers, n, max_trajectory_length, multiagent): +def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_length, multiagent, use_expert): """ Collects a fixed number of trajectories. @@ -189,11 +244,11 @@ def sample_n_trajectories(env, vehicle_ids, controllers, expert_controllers, n, for _ in range(n): if multiagent: - trajectory = sample_trajectory_multiagent(env, vehicle_ids, controllers, expert_controllers, max_trajectory_length) + trajectory, length = sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert) else: - trajectory = sample_trajectory_singleagent(env, vehicle_ids[0], controllers[0], expert_controllers[0], max_trajectory_length) + trajectory, length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert) - trajectories.append(trajectory) + trajectories.append((trajectory, length)) return trajectories @@ -208,52 +263,3 @@ def traj_dict(observations, actions, expert_actions, rewards, next_observations, "rewards" : np.array(rewards, dtype=np.float32), "next_observations": np.array(next_observations, dtype=np.float32), "terminals": np.array(terminals, dtype=np.float32)} - - -def unpack_rollouts(rollouts_list): - """ - Convert list of rollout dictionaries to individual observation, action, rewards, next observation, terminal arrays - rollouts: list of rollout dictionaries, rollout dictionary: dictionary with keys "observations", "actions", "rewards", "next_observations", "is_terminals" - return separate np arrays of observations, actions, rewards, next_observations, and is_terminals - """ - observations = np.concatenate([rollout["observations"] for rollout in rollouts_list]) - actions = np.concatenate([rollout["actions"] for rollout in rollouts_list]) - expert_actions = np.concatenate([rollout["expert_actions"] for rollout in rollouts_list]) - rewards = np.concatenate([rollout["rewards"] for rollout in rollouts_list]) - next_observations = np.concatenate([rollout["next_observations"] for rollout in rollouts_list]) - terminals = np.concatenate([rollout["terminals"] for rollout in rollouts_list]) - - return observations, actions, expert_actions, rewards, next_observations, terminals - - -# Below are tensorflow related functions - -def build_neural_net(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): - """ - Builds a feedfoward neural network for action prediction - - arguments: - input_placeholder: placeholder variable for the state (batch_size, input_size) - scope: variable scope of the network - - n_layers: number of hidden layers - size: dimension of each hidden layer - activation: activation of each hidden layer - - output_size: size of the output layer - output_activation: activation of the output layer - - returns: - output_placeholder: the result of pass through Neural Network - """ - output_placeholder = input_placeholder - with tf.variable_scope(scope): - for _ in range(n_layers): - output_placeholder = tf.layers.dense(output_placeholder, size, activation=activation) - output_placeholder = tf.layers.dense(output_placeholder, output_size, activation=output_activation) - return output_placeholder - -def create_tf_session(): - config = tf.ConfigProto(device_count={'GPU': 0}) - sess = tf.Session(config=config) - return sess diff --git a/flow/controllers/imitation_learning/utils_tensorflow.py b/flow/controllers/imitation_learning/utils_tensorflow.py new file mode 100644 index 000000000..57000323f --- /dev/null +++ b/flow/controllers/imitation_learning/utils_tensorflow.py @@ -0,0 +1,35 @@ +import numpy as np +import tensorflow as tf + + +# Below are tensorflow related functions + +def build_neural_net(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): + """ + Builds a feedfoward neural network for action prediction + + arguments: + input_placeholder: placeholder variable for the state (batch_size, input_size) + scope: variable scope of the network + + n_layers: number of hidden layers + size: dimension of each hidden layer + activation: activation of each hidden layer + + output_size: size of the output layer + output_activation: activation of the output layer + + returns: + output_placeholder: the result of pass through Neural Network + """ + output_placeholder = input_placeholder + with tf.variable_scope(scope): + for _ in range(n_layers): + output_placeholder = tf.layers.dense(output_placeholder, size, activation=activation) + output_placeholder = tf.layers.dense(output_placeholder, output_size, activation=output_activation,name='Output_Layer') + return output_placeholder + +def create_tf_session(): + config = tf.ConfigProto(device_count={'GPU': 0}) + sess = tf.Session(config=config) + return sess From 517499ee2d832deb266a0b86e8785ca105a63547 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:35:54 -0700 Subject: [PATCH 031/335] fix style issue --- examples/data_pipeline.py | 113 ++++++++++++++++-------- examples/datapipeline_test.py | 4 + examples/lambda_function.py | 10 +++ examples/query.py | 11 ++- examples/run_query.py | 1 + flow/controllers/routing_controllers.py | 1 + flow/core/kernel/vehicle/base.py | 12 ++- flow/core/kernel/vehicle/traci.py | 5 ++ 8 files changed, 111 insertions(+), 46 deletions(-) diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 28d3b5e73..03b0f87e5 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -1,3 +1,4 @@ +"""contains class and helper functions for the data pipeline.""" import pandas as pd import numpy as np import boto3 @@ -7,21 +8,21 @@ def generate_trajectory_table(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based on standard SUMO emission + """Generate desired output for the trajectory_table based on standard SUMO emission. - Parameters - ---------- - data_path : str - path to the standard SUMO emission - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file - """ + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ raw_output = pd.read_csv(data_path, index_col=["time", "id"]) required_cols = {"time", "id", "speed", "x", "y"} raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) @@ -39,24 +40,24 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def generate_trajectory_from_flow(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based only on flow output - - Parameters - ---------- - data_path : str - output file path - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file that should be used for - upload to s3 only, it does not the human readable column names and - will be deleted after uploading to s3. A copy of this file with all - the column name will remain in the ./data folder - """ + """Generate desired output for the trajectory_table based only on flow output. + + Parameters + ---------- + data_path : str + output file path + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file that should be used for + upload to s3 only, it does not the human readable column names and + will be deleted after uploading to s3. A copy of this file with all + the column name will remain in the ./data folder + """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) @@ -66,7 +67,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name): def upload_to_s3(bucket_name, bucket_key, file_path, only_query): - """ upload a file to S3 bucket + """Upload a file to S3 bucket. Parameters ---------- @@ -89,15 +90,40 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): class AthenaQuery: + """ + Class used to run query. + + Act as a query engine, maintains an open session with AWS Athena. + + Attributes + ---------- + MAX_WAIT: int + maximum number of seconds to wait before declares time-out + client: boto3.client + the athena client that is used to run the query + existing_partitions: list + a list of partitions that is already recorded in Athena's datalog, + this is obtained through query at the initialization of this class + instance. + """ def __init__(self): + """Initialize AthenaQuery instance. + + initialize a client session with AWS Athena, + query Athena to obtain extisting_partition. + """ self.MAX_WAIT = 60 self.client = boto3.client("athena") self.existing_partitions = self.get_existing_partitions() def get_existing_partitions(self): - """prints the existing partitions in the S3 bucket""" + """Return the existing partitions in the S3 bucket. + Returns + ------- + partitions: a list of existing partitions on S3 bucket + """ response = self.client.start_query_execution( QueryString='SHOW PARTITIONS trajectory_table', QueryExecutionContext={ @@ -114,7 +140,7 @@ def get_existing_partitions(self): return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] def check_status(self, execution_id): - """ Return the status of the execution with given id + """Return the status of the execution with given id. Parameters ---------- @@ -125,14 +151,13 @@ def check_status(self, execution_id): status: str QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED """ - response = self.client.get_query_execution( QueryExecutionId=execution_id ) return response['QueryExecution']['Status']['State'] def wait_for_execution(self, execution_id): - """ wait for the execution to finish or time-out + """Wait for the execution to finish or time-out. Parameters ---------- @@ -156,7 +181,7 @@ def wait_for_execution(self, execution_id): return True def update_partition(self, partition): - """ load the given partition to the trajectory_table on Athena + """Load the given partition to the trajectory_table on Athena. Parameters ---------- @@ -176,7 +201,7 @@ def update_partition(self, partition): return def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): - """ start the execution of a query, does not wait for it to finish + """Start the execution of a query, does not wait for it to finish. Parameters ---------- @@ -218,6 +243,16 @@ def run_query(self, query_name, result_location="s3://brent.experiments/query-re def test_sql_query(query_name): + """Start the execution of a query, does not wait for it to finish. + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be tested + Raises + ------ + RuntimeError: if timeout + """ if query_name not in testing_functions: raise ValueError("no tests supported for this query") diff --git a/examples/datapipeline_test.py b/examples/datapipeline_test.py index 564060d3b..ae0ea382f 100644 --- a/examples/datapipeline_test.py +++ b/examples/datapipeline_test.py @@ -1,3 +1,4 @@ +"""functions that calculates the expected result for testing.""" import math # Vehicle Mass @@ -17,10 +18,12 @@ def heavyside(inp): + """Return 1 if input is positive.""" return 0 if inp <= 0 else 1 def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): + """Calculate the expected power for POWER_DEMAND_MODEL query.""" acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) rolling_friction = M * g * C_r * mu @@ -30,4 +33,5 @@ def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_ def apply_energy_one(row): + """Apply the power calculation to a row of the dataframe.""" return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file diff --git a/examples/lambda_function.py b/examples/lambda_function.py index 01ce1512a..4f7937c85 100644 --- a/examples/lambda_function.py +++ b/examples/lambda_function.py @@ -1,3 +1,4 @@ +"""lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus from examples.data_pipeline import AthenaQuery @@ -8,6 +9,15 @@ def lambda_handler(event, context): + """Invoke by AWS Lambda upon triggered by an event. + + Parameters + ---------- + event : dic < str: dic > + an S3 event + context: + not used + """ for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) diff --git a/examples/query.py b/examples/query.py index 6354cec3b..0f0ee13b4 100644 --- a/examples/query.py +++ b/examples/query.py @@ -1,15 +1,20 @@ +"""stores all the pre-defined query strings.""" from enum import Enum from examples.datapipeline_test import apply_energy_one -tags = {"energy": ["ENERGY_ONE"]} +# tags for different queries +tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} -testing_functions = {"ENERGY_ONE": apply_energy_one} +# specify the function to calculate the expected result of each query +testing_functions = {"POWER_DEMAND_MODEL": apply_energy_one} class QueryStrings(Enum): + """An enumeration of all the pre-defined query strings.""" + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - ENERGY_ONE = "SELECT id, time, 1200 * speed * " \ + POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ diff --git a/examples/run_query.py b/examples/run_query.py index ea8839b09..64baa6656 100644 --- a/examples/run_query.py +++ b/examples/run_query.py @@ -1,3 +1,4 @@ +"""runner script for invoking query manually.""" import argparse from examples.data_pipeline import AthenaQuery, test_sql_query from examples.query import QueryStrings diff --git a/flow/controllers/routing_controllers.py b/flow/controllers/routing_controllers.py index c880b5bbf..24f8af3f3 100755 --- a/flow/controllers/routing_controllers.py +++ b/flow/controllers/routing_controllers.py @@ -127,6 +127,7 @@ def choose_route(self, env): class I210Router(ContinuousRouter): """Assists in choosing routes in select cases for the Bay Bridge network. + Extension to the Continuous Router. Usage ----- diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 2a5cf4596..a3972e86a 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -669,17 +669,21 @@ def get_max_speed(self, veh_id, error): ########################################################################### def get_accel(self, veh_id): - """ see traci class """ + """Return the acceleration of vehicle with veh_id.""" raise NotImplementedError def update_accel_without_noise(self, veh_id, accel_without_noise): - """ see traci class """ + """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError def get_2D_position(self, veh_id, error=-1001): - """ see traci class """ + """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError def get_accel_without_noise(self, veh_id): - """ see traci class """ + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_road_grade(self, veh_id): + """Return the road-grade of the vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index f5ea8e2eb..9a7e3b485 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1098,17 +1098,22 @@ def set_max_speed(self, veh_id, max_speed): # add for data pipeline def get_accel(self, veh_id): + """See parent class.""" return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step def update_accel_without_noise(self, veh_id, accel_without_noise): + """See parent class.""" self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise def get_accel_without_noise(self, veh_id): + """See parent class.""" return self.__vehicles[veh_id]["accel_without_noise"] def get_2D_position(self, veh_id, error=-1001): + """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) def get_road_grade(self, veh_id): + """See parent class.""" # TODO return 0 From c429bf267f6ec18ecf1c9647ea637a490438ee36 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:38:47 -0700 Subject: [PATCH 032/335] reorganized file locations --- {examples => flow/data_pipeline}/data_pipeline.py | 0 {examples => flow/data_pipeline}/datapipeline_test.py | 0 {examples => flow/data_pipeline}/lambda_function.py | 0 {examples => flow/data_pipeline}/query.py | 0 {examples => flow/data_pipeline}/run_query.py | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename {examples => flow/data_pipeline}/data_pipeline.py (100%) rename {examples => flow/data_pipeline}/datapipeline_test.py (100%) rename {examples => flow/data_pipeline}/lambda_function.py (100%) rename {examples => flow/data_pipeline}/query.py (100%) rename {examples => flow/data_pipeline}/run_query.py (100%) diff --git a/examples/data_pipeline.py b/flow/data_pipeline/data_pipeline.py similarity index 100% rename from examples/data_pipeline.py rename to flow/data_pipeline/data_pipeline.py diff --git a/examples/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py similarity index 100% rename from examples/datapipeline_test.py rename to flow/data_pipeline/datapipeline_test.py diff --git a/examples/lambda_function.py b/flow/data_pipeline/lambda_function.py similarity index 100% rename from examples/lambda_function.py rename to flow/data_pipeline/lambda_function.py diff --git a/examples/query.py b/flow/data_pipeline/query.py similarity index 100% rename from examples/query.py rename to flow/data_pipeline/query.py diff --git a/examples/run_query.py b/flow/data_pipeline/run_query.py similarity index 100% rename from examples/run_query.py rename to flow/data_pipeline/run_query.py From 2177ef6e66af579530a003e961fb5302852bbb33 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:58:44 -0700 Subject: [PATCH 033/335] fix some more style issues --- examples/simulate.py | 3 ++- flow/controllers/base_controller.py | 10 +++++----- flow/controllers/routing_controllers.py | 1 + flow/core/experiment.py | 3 ++- flow/core/kernel/vehicle/base.py | 2 +- flow/core/kernel/vehicle/traci.py | 2 +- flow/data_pipeline/__init__.py | 1 + flow/data_pipeline/data_pipeline.py | 3 +-- flow/data_pipeline/datapipeline_test.py | 2 +- flow/data_pipeline/lambda_function.py | 2 +- flow/data_pipeline/query.py | 12 ++++++------ flow/data_pipeline/run_query.py | 8 ++++---- flow/envs/base.py | 1 - 13 files changed, 26 insertions(+), 24 deletions(-) create mode 100644 flow/data_pipeline/__init__.py diff --git a/examples/simulate.py b/examples/simulate.py index 40e04d4f6..b90360760 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -88,4 +88,5 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, only_query=flags.only_query) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, + only_query=flags.only_query) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 6e6734764..7adcdf310 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -110,18 +110,18 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested - accel_without_noice = accel + accel_without_noise = accel if self.fail_safe == 'instantaneous': - accel_without_noice = self.get_safe_action_instantaneous(env, accel_without_noice) + accel_without_noise = self.get_safe_action_instantaneous(env, accel_without_noise) elif self.fail_safe == 'safe_velocity': - accel_without_noice = self.get_safe_velocity_action(env, accel_without_noice) - env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noice) + accel_without_noise = self.get_safe_velocity_action(env, accel_without_noise) + env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noise) # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) - # run the failsafes, if requested + # run the fail-safes, if requested if self.fail_safe == 'instantaneous': accel = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': diff --git a/flow/controllers/routing_controllers.py b/flow/controllers/routing_controllers.py index 24f8af3f3..18d6c1842 100755 --- a/flow/controllers/routing_controllers.py +++ b/flow/controllers/routing_controllers.py @@ -125,6 +125,7 @@ def choose_route(self, env): return new_route + class I210Router(ContinuousRouter): """Assists in choosing routes in select cases for the Bay Bridge network. diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 80d607e7d..aa5028836 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -231,7 +231,8 @@ def rl_actions(*_): if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index a3972e86a..79b1a897a 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -676,7 +676,7 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError - def get_2D_position(self, veh_id, error=-1001): + def get_2d_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 9a7e3b485..d830a49e9 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1109,7 +1109,7 @@ def get_accel_without_noise(self, veh_id): """See parent class.""" return self.__vehicles[veh_id]["accel_without_noise"] - def get_2D_position(self, veh_id, error=-1001): + def get_2d_position(self, veh_id, error=-1001): """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) diff --git a/flow/data_pipeline/__init__.py b/flow/data_pipeline/__init__.py new file mode 100644 index 000000000..622e09b06 --- /dev/null +++ b/flow/data_pipeline/__init__.py @@ -0,0 +1 @@ +"""Empty init file to ensure that data_pipeline is recognized as a package""" diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 03b0f87e5..afbc09f92 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -2,8 +2,7 @@ import pandas as pd import numpy as np import boto3 -from botocore.exceptions import ClientError -from examples.query import QueryStrings, testing_functions +from flow.data_pipeline.query import QueryStrings, testing_functions from time import time diff --git a/flow/data_pipeline/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py index ae0ea382f..0e1a50518 100644 --- a/flow/data_pipeline/datapipeline_test.py +++ b/flow/data_pipeline/datapipeline_test.py @@ -34,4 +34,4 @@ def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_ def apply_energy_one(row): """Apply the power calculation to a row of the dataframe.""" - return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file + return [row[0], row[1], calculate_power(row[4], row[6])] diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 4f7937c85..afef55a4b 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -33,4 +33,4 @@ def lambda_handler(event, context): else: query_list = run_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) \ No newline at end of file + queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 0f0ee13b4..af1b51ce7 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -1,6 +1,6 @@ """stores all the pre-defined query strings.""" from enum import Enum -from examples.datapipeline_test import apply_energy_one +from flow.data_pipeline.datapipeline_test import apply_energy_one # tags for different queries tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} @@ -15,8 +15,8 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ - "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ - "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" \ No newline at end of file + "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" diff --git a/flow/data_pipeline/run_query.py b/flow/data_pipeline/run_query.py index 64baa6656..f065a726e 100644 --- a/flow/data_pipeline/run_query.py +++ b/flow/data_pipeline/run_query.py @@ -1,10 +1,10 @@ """runner script for invoking query manually.""" import argparse -from examples.data_pipeline import AthenaQuery, test_sql_query -from examples.query import QueryStrings +from flow.data_pipeline.data_pipeline import AthenaQuery, test_sql_query +from flow.data_pipeline.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" - "a S3 location") + "a S3 location") parser.add_argument("--run", type=str, nargs="+") parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") parser.add_argument("--partition", type=str, nargs='?', default="default") @@ -34,4 +34,4 @@ for q in QueryStrings: print(q) if args.test_query: - test_sql_query(args.test_query[0]) \ No newline at end of file + test_sql_query(args.test_query[0]) diff --git a/flow/envs/base.py b/flow/envs/base.py index 60eab6ebe..625861afe 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -408,7 +408,6 @@ def step(self, rl_actions): "**********************************************************" ) - # compute the info for each agent infos = {} From 4b8346470714678ed2a50883a32ce60c79681ac6 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 13:02:33 -0700 Subject: [PATCH 034/335] fix one more style issue --- flow/data_pipeline/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/__init__.py b/flow/data_pipeline/__init__.py index 622e09b06..d9d6a6573 100644 --- a/flow/data_pipeline/__init__.py +++ b/flow/data_pipeline/__init__.py @@ -1 +1 @@ -"""Empty init file to ensure that data_pipeline is recognized as a package""" +"""Empty init file to ensure that data_pipeline is recognized as a package.""" From eb67d2804574f20c42f64c974e8df4e8f722532a Mon Sep 17 00:00:00 2001 From: Kathy Jang Date: Tue, 28 Apr 2020 12:57:47 -0700 Subject: [PATCH 035/335] New AMI with ray 0.8.0, tensorflow 2.1.0, h-baselines, stable-baselines (#916) --- scripts/ray_autoscale.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/ray_autoscale.yaml b/scripts/ray_autoscale.yaml index 9fcf97cb5..d0c9cccbb 100644 --- a/scripts/ray_autoscale.yaml +++ b/scripts/ray_autoscale.yaml @@ -40,7 +40,7 @@ auth: # http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances head_node: InstanceType: c4.4xlarge - ImageId: ami-09544298704576518 # Flow AMI (Ubuntu) + ImageId: ami-0b489700e7f810707 # Flow AMI (Ubuntu) InstanceMarketOptions: MarketType: spot #Additional options can be found in the boto docs, e.g. @@ -55,7 +55,7 @@ head_node: # http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances worker_nodes: InstanceType: c4.4xlarge - ImageId: ami-09544298704576518 # Flow AMI (Ubuntu) + ImageId: ami-0b489700e7f810707 # Flow AMI (Ubuntu) #Run workers on spot by default. Comment this out to use on-demand. InstanceMarketOptions: From 23e2ba328c964609f111bafdd9af1463e66d8a91 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Fri, 1 May 2020 00:28:38 -0700 Subject: [PATCH 036/335] Multiagent changes, added stochastic policies --- flow/controllers/dagger/run.py | 78 -- flow/controllers/dagger/trainer.py | 179 ---- .../imitation_learning/Untitled.ipynb | 787 ++++++++++++++++-- .../imitation_learning/Useless/Untitled.ipynb | 438 ++++++++++ .../Useless/Untitled1.ipynb | 96 +++ .../imitation_learning/bottleneck_env.py | 2 +- ...ents.1587254017.Akashs-MacBook-Pro-2.local | Bin 0 -> 265723 bytes ...ents.1587339098.Akashs-MacBook-Pro-2.local | Bin 0 -> 267581 bytes ...ents.1587776769.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587779365.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587780241.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587781276.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587789385.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587841939.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587848505.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587855757.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587860905.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes ...ents.1587860969.Akashs-MacBook-Pro-2.local | Bin 0 -> 267629 bytes .../imitation_learning/i210_multiagent.py | 3 +- .../i210_multiagent_ghost.py | 181 ++++ .../imitating_controller.py | 23 +- .../imitation_learning/imitating_network.py | 79 +- .../imitation_learning/multiagent_ring_env.py | 2 +- .../imitation_learning/replay_script.py | 2 +- flow/controllers/imitation_learning/run.py | 9 +- .../singleagent_straight_road.py | 163 ++++ .../controllers/imitation_learning/trainer.py | 48 +- flow/controllers/imitation_learning/utils.py | 101 ++- .../imitation_learning/utils_tensorflow.py | 2 +- 29 files changed, 1771 insertions(+), 422 deletions(-) delete mode 100644 flow/controllers/dagger/run.py delete mode 100644 flow/controllers/dagger/trainer.py create mode 100644 flow/controllers/imitation_learning/Useless/Untitled.ipynb create mode 100644 flow/controllers/imitation_learning/Useless/Untitled1.ipynb create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587254017.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587339098.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587776769.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587779365.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587780241.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587781276.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587789385.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587841939.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587848505.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587855757.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587860905.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587860969.Akashs-MacBook-Pro-2.local create mode 100644 flow/controllers/imitation_learning/i210_multiagent_ghost.py create mode 100644 flow/controllers/imitation_learning/singleagent_straight_road.py diff --git a/flow/controllers/dagger/run.py b/flow/controllers/dagger/run.py deleted file mode 100644 index faa7d4ee6..000000000 --- a/flow/controllers/dagger/run.py +++ /dev/null @@ -1,78 +0,0 @@ -import os -import time -import numpy as np -import tensorflow as tf -from trainer import Trainer -from flow.controllers.car_following_models import IDMController - - -class Runner(object): - """ Class to run imitation learning (training and evaluation) """ - - def __init__(self, params): - - # initialize trainer - self.params = params - self.trainer = Trainer(params) - - def run_training_loop(self): - - self.trainer.run_training_loop(n_iter=self.params['n_iter']) - - def evaluate(self): - self.trainer.evaluate_controller(num_trajs=self.params['num_eval_episodes']) - - def save_controller_network(self): - self.trainer.save_controller_network() - - -def main(): - import argparse - parser = argparse.ArgumentParser() - parser.add_argument('--ep_len', type=int, default=3000) - - parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) - parser.add_argument('--n_iter', '-n', type=int, default=5) - - parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration - parser.add_argument('--init_batch_size', type=int, default=3000) - - parser.add_argument('--train_batch_size', type=int, - default=100) # number of sampled data points to be used per gradient/train step - - parser.add_argument('--num_layers', type=int, default=3) # depth, of policy to be learned - parser.add_argument('--size', type=int, default=64) # width of each layer, of policy to be learned - parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) # learning rate for supervised learning - parser.add_argument('--replay_buffer_size', type=int, default=1000000) - parser.add_argument('--save_path', type=str, default='') - parser.add_argument('--save_model', type=int, default=0) - parser.add_argument('--num_eval_episodes', type=int, default=10) - parser.add_argument('--inject_noise', type=int, default=0) - parser.add_argument('--noise_variance',type=float, default=0.5) - parser.add_argument('--vehicle_id', type=str, default='rl_0') - - args = parser.parse_args() - - # convert args to dictionary - params = vars(args) - assert args.n_iter>1, ('DAgger needs >1 iteration') - - - # run training - train = Runner(params) - train.run_training_loop() - - # evaluate - train.evaluate() - print("DONE") - - if params['save_model'] == 1: - train.save_controller_network() - - # tensorboard - if params['save_model'] == 1: - writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) - - -if __name__ == "__main__": - main() diff --git a/flow/controllers/dagger/trainer.py b/flow/controllers/dagger/trainer.py deleted file mode 100644 index 03364f528..000000000 --- a/flow/controllers/dagger/trainer.py +++ /dev/null @@ -1,179 +0,0 @@ -import time -from collections import OrderedDict -import pickle -import numpy as np -import tensorflow as tf -import gym -import os -from flow.utils.registry import make_create_env -from bottleneck_env import flow_params -from imitating_controller import ImitatingController -from flow.controllers.car_following_models import IDMController -from flow.controllers.velocity_controllers import FollowerStopper -from flow.core.params import SumoCarFollowingParams -from utils import * - -class Trainer(object): - """ - Class to initialize and run training for imitation learning (with DAgger) - """ - - def __init__(self, params): - self.params = params - self.sess = create_tf_session() - - create_env, _ = make_create_env(flow_params) - self.env = create_env() - self.env.reset() - - print(self.env.k.vehicle.get_ids()) - assert self.params['vehicle_id'] in self.env.k.vehicle.get_ids() - self.vehicle_id = self.params['vehicle_id'] - - obs_dim = self.env.observation_space.shape[0] - - action_dim = (1,)[0] - self.params['action_dim'] = action_dim - self.params['obs_dim'] = obs_dim - - car_following_params = SumoCarFollowingParams() - self.controller = ImitatingController(self.vehicle_id, self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], car_following_params = car_following_params, inject_noise=self.params['inject_noise'], noise_variance=self.params['noise_variance']) - # self.expert_controller = IDMController(self.vehicle_id, car_following_params = car_following_params) - self.expert_controller = FollowerStopper(self.vehicle_id, car_following_params = car_following_params) - - tf.global_variables_initializer().run(session=self.sess) - - - def run_training_loop(self, n_iter): - """ - Trains controller for n_iter iterations - - Args: - param n_iter: number of iterations to execute training - """ - - # init vars at beginning of training - self.total_envsteps = 0 - self.start_time = time.time() - - for itr in range(n_iter): - print("\n\n********** Iteration %i ************"%itr) - - # collect trajectories, to be used for training - if itr == 0: - # first iteration is standard behavioral cloning - training_returns = self.collect_training_trajectories(itr, self.params['init_batch_size']) - else: - training_returns = self.collect_training_trajectories(itr, self.params['batch_size']) - - paths, envsteps_this_batch = training_returns - self.total_envsteps += envsteps_this_batch - - # add collected data to replay buffer - self.controller.add_to_replay_buffer(paths) - - # train controller (using sampled data from replay buffer) - loss = self.train_controller() - - def collect_training_trajectories(self, itr, batch_size): - """ - Collect (state, action, reward, next_state, terminal) tuples for training - - Args: - itr: iteration of training during which functino is called - batch_size: number of tuples to collect - Returns: - paths: list of trajectories - envsteps_this_batch: the sum over the numbers of environment steps in paths - """ - - if itr == 0: - collect_controller = self.expert_controller - else: - collect_controller = self.controller - - print("\nCollecting data to be used for training...") - paths, envsteps_this_batch = sample_trajectories(self.env, self.vehicle_id, collect_controller, self.expert_controller, batch_size, self.params['ep_len']) - - return paths, envsteps_this_batch - - def train_controller(self): - """ - Trains controller using data sampled from replay buffer - """ - - print('Training controller using sampled data from replay buffer') - for train_step in range(self.params['num_agent_train_steps_per_iter']): - ob_batch, ac_batch, expert_ac_batch, re_batch, next_ob_batch, terminal_batch = self.controller.sample_data(self.params['train_batch_size']) - self.controller.train(ob_batch, expert_ac_batch) - - def evaluate_controller(self, num_trajs = 10): - """ - Evaluates a trained controller on similarity with expert with respect to action taken and total reward per rollout - - Args: - num_trajs: number of trajectories to evaluate performance on - """ - - print("\n\n********** Evaluation ************ \n") - - trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.controller, self.expert_controller, num_trajs, self.params['ep_len']) - - average_imitator_reward = 0 - total_imitator_steps = 0 - average_imitator_reward_per_rollout = 0 - - action_errors = np.array([]) - average_action_expert = 0 - average_action_imitator = 0 - - # compare actions taken in each step of trajectories - for traj in trajectories: - imitator_actions = traj['actions'] - expert_actions = traj['expert_actions'] - - average_action_expert += np.sum(expert_actions) - average_action_imitator += np.sum(imitator_actions) - - action_error = np.linalg.norm(imitator_actions - expert_actions) / len(imitator_actions) - action_errors = np.append(action_errors, action_error) - - average_imitator_reward += np.sum(traj['rewards']) - total_imitator_steps += len(traj['rewards']) - average_imitator_reward_per_rollout += np.sum(traj['rewards']) - - average_imitator_reward = average_imitator_reward / total_imitator_steps - average_imitator_reward_per_rollout = average_imitator_reward_per_rollout / len(trajectories) - - average_action_expert = average_action_expert / total_imitator_steps - average_action_imitator = average_action_imitator / total_imitator_steps - - - expert_trajectories = sample_n_trajectories(self.env, self.vehicle_id, self.expert_controller, self.expert_controller, num_trajs, self.params['ep_len']) - - average_expert_reward = 0 - total_expert_steps = 0 - average_expert_reward_per_rollout = 0 - - # compare reward accumulated in trajectories collected via expert vs. via imitator - for traj in expert_trajectories: - average_expert_reward += np.sum(traj['rewards']) - total_expert_steps += len(traj['rewards']) - average_expert_reward_per_rollout += np.sum(traj['rewards']) - - average_expert_reward_per_rollout = average_expert_reward_per_rollout / len(expert_trajectories) - average_expert_reward = average_expert_reward / total_expert_steps - - print("\nAVERAGE REWARD PER STEP EXPERT: ", average_expert_reward) - print("AVERAGE REWARD PER STEP IMITATOR: ", average_imitator_reward) - print("AVERAGE REWARD PER STEP DIFFERENCE: ", np.abs(average_expert_reward - average_imitator_reward), "\n") - - print("AVERAGE REWARD PER ROLLOUT EXPERT: ", average_expert_reward_per_rollout) - print("AVERAGE REWARD PER ROLLOUT IMITATOR: ", average_imitator_reward_per_rollout) - print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: ", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout), "\n") - - print("MEAN ACTION ERROR: ", np.mean(action_errors), "\n") - - def save_controller_network(self): - print("Saving tensorflow model to: ", self.params['save_path']) - self.controller.save_network(self.params['save_path']) diff --git a/flow/controllers/imitation_learning/Untitled.ipynb b/flow/controllers/imitation_learning/Untitled.ipynb index 875fe73b6..d412275b8 100644 --- a/flow/controllers/imitation_learning/Untitled.ipynb +++ b/flow/controllers/imitation_learning/Untitled.ipynb @@ -6,40 +6,554 @@ "metadata": {}, "outputs": [ { - "ename": "ImportError", - "evalue": "cannot import name 'energy_consumption'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mImportError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mos\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mutils\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mregistry\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mmake_create_env\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 7\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mi210_multiagent\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mflow_params\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mflow_params_multi\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 8\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mring_env\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mflow_params\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mflow_params_single\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcontrollers\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcar_following_models\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mIDMController\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/i210_multiagent.py\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 18\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparams\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mSumoParams\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparams\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mSumoLaneChangeParams\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 20\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcore\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrewards\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0menergy_consumption\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 21\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnetworks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mi210_subnetwork\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mI210SubNetwork\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mEDGES_DISTRIBUTION\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mflow\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0menvs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmultiagent\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mi210\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mI210MultiEnv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mADDITIONAL_ENV_PARAMS\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mImportError\u001b[0m: cannot import name 'energy_consumption'" + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", + "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", + " PendingDeprecationWarning\n" ] } ], "source": [ - "import time\n", - "import pickle\n", - "import numpy as np\n", - "import gym\n", - "import os\n", - "from flow.utils.registry import make_create_env\n", - "from i210_multiagent import flow_params as flow_params_multi\n", - "from ring_env import flow_params as flow_params_single\n", - "from flow.controllers.car_following_models import IDMController\n", - "from flow.controllers.velocity_controllers import FollowerStopper\n", - "from flow.core.params import SumoCarFollowingParams\n", - "from utils import *" + "import numpy as np\n", + "import gym\n", + "from i210_multiagent import flow_params" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from flow.utils.registry import make_create_env\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "create_env, _ = make_create_env(flow_params)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env = create_env()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.action_space.shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.reset()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "len(env.get_state())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.action_space.sample()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.action_space.shape" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.k.vehicle.get_rl_ids()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.step({})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.action_space.shape[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from flow.controllers.velocity_controllers import FollowerStopper\n", + "from flow.core.params import SumoCarFollowingParams\n", + "car_following_params = SumoCarFollowingParams()\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "expert = FollowerStopper('followerstopper_0', car_following_params=car_following_params)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "expert.get_action(env)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "len(env.k.vehicle.get_ids())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "expert2 = FollowerStopper('flow_10.1', car_following_params=car_following_params)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "expert2.get_action(env)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import tensorflow as tf" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "t = tf.convert_to_tensor(np.array([1,2]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "t.get_shape()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "t[0:1]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mean = tf.convert_to_tensor(np.array([1.0,2.0]))\n", + "cov = tf.convert_to_tensor(np.array([1.0,1.0]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "tf.random_normal(tf.shape(tf.convert_to_tensor(np.array([1, 1]))), np.array([0,0]), np.array([1,1]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cov" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tf.cast(tf.shape(mean), tf.int64)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mean" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "np.diag(np.array([1,1]))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import tensorflow_probability as tfp" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tfd = tfp.distributions" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tfd" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "t" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sess = tf.Session()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mvn = tfd.MultivariateNormalDiag(loc=mean, scale_diag=cov)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "cov" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mvn.prob([-1, 0]).eval(session=sess)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sess.run(mean)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mvn.prob([1, 2.5]).eval(session=sess)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sess.run(mvn.sample(1))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "inp = tf.placeholder(shape=[None, 2], name=\"obs\", dtype=tf.float32)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "out = inp\n", + "for _ in range(2):\n", + " out = tf.layers.dense(out, 30, activation=tf.tanh)\n", + "out = tf.layers.dense(out, 2, activation=None, name=\"output\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pred = out" ] }, { "cell_type": "code", - "execution_count": 2, + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "type(pred)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "tf.global_variables_initializer().run(session=sess)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "obs = np.array([1,1])\n", + "obs = obs[None]\n", + "ret = sess.run([pred], feed_dict={inp:obs})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "obs = np.array([[1,1], [1,1]])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ret = sess.run([pred], feed_dict={inp:obs})" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ret" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "type(ret)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "batch = np.array([[3,3],[4,4],[1,1]])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "log_likelihood = sess.run(mvn.log_prob(batch))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "log_likelihood" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sess.run(tf.reduce_mean(log_likelihood))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "np.mean(log_likelihood)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "labels_batch = tf.placeholder(shape=[None, 2], dtype=tf.float64)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ll = mvn.log_prob(labels_batch)\n", + "loss = tf.reduce_mean(ll, axis=-1)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "b = batch.reshape(batch.shape[0], 2)" + ] + }, + { + "cell_type": "code", + "execution_count": null, "metadata": {}, "outputs": [], "source": [ - "create_env, _ = make_create_env(flow_params_multi)" + "sess.run([loss], feed_dict={labels_batch:b})" ] }, { @@ -48,33 +562,41 @@ "metadata": {}, "outputs": [], "source": [ - "env = create_env()" + "from singleagent_straight_road import flow_params\n", + "from flow.utils.registry import make_create_env\n" ] }, { "cell_type": "code", "execution_count": 4, "metadata": {}, + "outputs": [], + "source": [ + "create_env, _ = make_create_env(flow_params)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "env = create_env()" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\n", - "-----------------------\n", - "ring length: 264\n", - "v_max: 5.329679917416892\n", - "-----------------------\n" - ] - }, { "data": { "text/plain": [ - "{'rl_0_0': array([0.30672195, 0.00223007, 0.02625558]),\n", - " 'rl_1_0': array([ 0.34392208, -0.00785657, 0.02819709])}" + "array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", + " 0., 0., 0., 0., 0., 0., 0.])" ] }, - "execution_count": 4, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } @@ -85,63 +607,61 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "['human_0',\n", - " 'human_1',\n", - " 'human_2',\n", - " 'human_3',\n", - " 'human_4',\n", - " 'human_5',\n", - " 'human_6',\n", - " 'human_7',\n", - " 'human_8',\n", - " 'human_9',\n", - " 'human_10',\n", - " 'human_11',\n", - " 'human_12',\n", - " 'human_13',\n", - " 'human_14',\n", - " 'human_15',\n", - " 'human_16',\n", - " 'human_17',\n", - " 'human_18',\n", - " 'human_19',\n", - " 'human_20',\n", - " 'rl_0']" + "[]" ] }, - "execution_count": 19, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "env.k.vehicle.get_ids()" + "env.k.vehicle.get_rl_ids()" ] }, { "cell_type": "code", - "execution_count": 20, + "execution_count": 8, "metadata": {}, - "outputs": [], + "outputs": [ + { + "data": { + "text/plain": [ + "Box(8,)" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ - "rl_actions = {'rl_0': env.action_space.sample()}\n" + "env.action_space" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 9, "metadata": {}, "outputs": [], "source": [ - "env.step(None)" + "for i in range(100):\n", + " env.step(None)" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, { "cell_type": "code", "execution_count": 10, @@ -150,8 +670,7 @@ { "data": { "text/plain": [ - "{'rl_0_0': array([0.25527085, 0.00670868, 0.02368258]),\n", - " 'rl_1_0': array([ 0.24537913, -0.00482127, 0.02289928])}" + "24" ] }, "execution_count": 10, @@ -160,7 +679,39 @@ } ], "source": [ - "env.get_state()" + "len(env.get_state())" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['rl_highway_inflow_10.0', 'rl_highway_inflow_10.1', 'rl_highway_inflow_10.2']" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "env.k.vehicle.get_rl_ids()" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "rl_actions = {}\n", + "for vehicle_id in env.k.vehicle.get_rl_ids():\n", + " rl_actions[vehicle_id] = 1.0\n", + " " ] }, { @@ -171,7 +722,7 @@ { "data": { "text/plain": [ - "['rl_0_0', 'rl_1_0']" + "['rl_highway_inflow_10.0', 'rl_highway_inflow_10.2', 'rl_highway_inflow_10.1']" ] }, "execution_count": 13, @@ -180,7 +731,97 @@ } ], "source": [ - "list(env.get_state().keys())" + "env.get_sorted_rl_ids()" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "rl_actions = [1,1,1,0,0,0,0,0]\n", + "rl_actions = np.array(rl_actions)" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0\n", + "rl_highway_inflow_10.0\n", + "1\n", + "rl_highway_inflow_10.2\n", + "2\n", + "rl_highway_inflow_10.1\n" + ] + }, + { + "data": { + "text/plain": [ + "(array([0.54393322, 0.06077194, 0.56137638, 0.40959813, 0.0259221 ,\n", + " 0.4041333 , 0.42759098, 0.02818569, 0.42912874, 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. , 0. ,\n", + " 0. , 0. , 0. , 0. ]),\n", + " 0.1718155323023197,\n", + " False,\n", + " {})" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "env.step(rl_actions)" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "error\n" + ] + } + ], + "source": [ + "try:\n", + " test(1)\n", + "except:\n", + " print(\"error\")" + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [ + { + "ename": "AssertionError", + "evalue": "blah", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAssertionError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32massert\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"blah\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;31mAssertionError\u001b[0m: blah" + ] + } + ], + "source": [ + "assert False, \"blah\"" ] }, { diff --git a/flow/controllers/imitation_learning/Useless/Untitled.ipynb b/flow/controllers/imitation_learning/Useless/Untitled.ipynb new file mode 100644 index 000000000..982ef03a7 --- /dev/null +++ b/flow/controllers/imitation_learning/Useless/Untitled.ipynb @@ -0,0 +1,438 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", + "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", + " PendingDeprecationWarning\n" + ] + } + ], + "source": [ + "import time\n", + "import pickle\n", + "import numpy as np\n", + "import gym\n", + "import os\n", + "from flow.utils.registry import make_create_env\n", + "from i210_multiagent import flow_params as flow_params_multi\n", + "from flow.controllers.car_following_models import IDMController\n", + "from flow.core.params import SumoCarFollowingParams\n", + "from utils import *\n", + "from imitating_network import *\n", + "from utils_tensorflow import *" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "create_env, _ = make_create_env(flow_params_multi)" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "env = create_env()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "obs_dim = env.observation_space.shape[0]\n", + "action_dim = (1,)[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "3" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "obs_dim" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [], + "source": [ + "sess = create_tf_session()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Restoring parameters from /Users/akashvelu/Documents/models2/model.ckpt\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:tensorflow:Restoring parameters from /Users/akashvelu/Documents/models2/model.ckpt\n" + ] + } + ], + "source": [ + "action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/subprocess.py:786: ResourceWarning: subprocess 11185 is still running\n", + " ResourceWarning, source=self)\n" + ] + }, + { + "data": { + "text/plain": [ + "{}" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "init_state = env.reset()\n", + "init_state" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{}" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "env.get_state()" + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "0\n", + "1\n", + "OBS: [[0.4 1. 0. ]]\n", + "SHAPE: (1, 3)\n", + "TYPE: float64\n" + ] + }, + { + "ename": "InvalidArgumentError", + "evalue": "You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n\nCaused by op 'policy_vars/obs', defined at:\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/traitlets/config/application.py\", line 664, in launch_instance\n app.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 583, in start\n self.io_loop.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 149, in start\n self.asyncio_loop.run_forever()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 787, in inner\n self.run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 748, in run\n yielded = self.gen.send(value)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 361, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 268, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 541, in execute_request\n user_expressions, allow_stdin,\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 300, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2662, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2785, in _run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2903, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2963, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 1, in \n action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 29, in __init__\n self.load_network(load_path)\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 111, in load_network\n loader = tf.train.import_meta_graph(path + 'model.ckpt.meta')\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1674, in import_meta_graph\n meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1696, in _import_meta_graph_with_return_elements\n **kwargs))\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/meta_graph.py\", line 806, in import_scoped_meta_graph_with_return_elements\n return_elements=return_elements)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n return func(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3299, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n self._traceback = tf_stack.extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1333\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1334\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1335\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1318\u001b[0m return self._call_tf_sessionrun(\n\u001b[0;32m-> 1319\u001b[0;31m options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m 1320\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m 1406\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1407\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1408\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[{{node policy_vars/obs}} = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]", + "\nDuring handling of the above exception, another exception occurred:\n", + "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;31m# print(len(obs.shape))\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;31m# print(obs[None].shape)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0maction\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0maction_network\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_accel_from_observation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m \u001b[0mrl_actions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mvehicle_id\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0maction\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0menv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrl_actions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\u001b[0m in \u001b[0;36mget_accel_from_observation\u001b[0;34m(self, observation)\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"SHAPE: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 89\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"TYPE: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 90\u001b[0;31m \u001b[0mret_val\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maction_predictions\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mobs_placeholder\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 91\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mret_val\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 927\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 928\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 929\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 930\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 931\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1150\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1151\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1152\u001b[0;31m feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m 1153\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1154\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1326\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1327\u001b[0m return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1328\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1329\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1330\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1346\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1347\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0merror_interpolation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minterpolate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_graph\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1348\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_def\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1349\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1350\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n\nCaused by op 'policy_vars/obs', defined at:\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/traitlets/config/application.py\", line 664, in launch_instance\n app.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 583, in start\n self.io_loop.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 149, in start\n self.asyncio_loop.run_forever()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 787, in inner\n self.run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 748, in run\n yielded = self.gen.send(value)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 361, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 268, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 541, in execute_request\n user_expressions, allow_stdin,\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 300, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2662, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2785, in _run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2903, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2963, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 1, in \n action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 29, in __init__\n self.load_network(load_path)\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 111, in load_network\n loader = tf.train.import_meta_graph(path + 'model.ckpt.meta')\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1674, in import_meta_graph\n meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1696, in _import_meta_graph_with_return_elements\n **kwargs))\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/meta_graph.py\", line 806, in import_scoped_meta_graph_with_return_elements\n return_elements=return_elements)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n return func(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3299, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n self._traceback = tf_stack.extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n" + ] + } + ], + "source": [ + "for i in range(100):\n", + " print(i)\n", + " rl_vehicles = env.k.vehicle.get_rl_ids()\n", + " if len(rl_vehicles) == 0:\n", + " env.step(None)\n", + " continue\n", + " \n", + " rl_actions = {}\n", + " observations = env.get_state()\n", + "# print(observations)\n", + " for vehicle_id in rl_vehicles:\n", + " obs = observations[vehicle_id]\n", + "# print(len(obs.shape))\n", + "# print(obs[None].shape)\n", + " action = action_network.get_accel_from_observation(obs)\n", + " rl_actions[vehicle_id] = action\n", + " env.step(rl_actions)\n", + " \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "dtype('float32')" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "t=np.array([[1.0,1.0,1.0]], dtype='float32')\n", + "t.dtype" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OBS: [[1. 1. 1.]]\n", + "SHAPE: (1, 3)\n", + "TYPE: float32\n" + ] + }, + { + "ename": "InvalidArgumentError", + "evalue": "You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n\nCaused by op 'policy_vars/obs', defined at:\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/traitlets/config/application.py\", line 664, in launch_instance\n app.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 583, in start\n self.io_loop.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 149, in start\n self.asyncio_loop.run_forever()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 787, in inner\n self.run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 748, in run\n yielded = self.gen.send(value)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 361, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 268, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 541, in execute_request\n user_expressions, allow_stdin,\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 300, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2662, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2785, in _run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2903, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2963, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 1, in \n action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 29, in __init__\n self.load_network(load_path)\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 111, in load_network\n loader = tf.train.import_meta_graph(path + 'model.ckpt.meta')\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1674, in import_meta_graph\n meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1696, in _import_meta_graph_with_return_elements\n **kwargs))\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/meta_graph.py\", line 806, in import_scoped_meta_graph_with_return_elements\n return_elements=return_elements)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n return func(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3299, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n self._traceback = tf_stack.extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1333\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1334\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1335\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1318\u001b[0m return self._call_tf_sessionrun(\n\u001b[0;32m-> 1319\u001b[0;31m options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m 1320\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m 1406\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1407\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1408\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[{{node policy_vars/obs}} = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]", + "\nDuring handling of the above exception, another exception occurred:\n", + "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0maction_network\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_accel_from_observation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m~/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\u001b[0m in \u001b[0;36mget_accel_from_observation\u001b[0;34m(self, observation)\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"SHAPE: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 89\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"TYPE: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 90\u001b[0;31m \u001b[0mret_val\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maction_predictions\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mobs_placeholder\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 91\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mret_val\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 927\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 928\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 929\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 930\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 931\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1150\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1151\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1152\u001b[0;31m feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m 1153\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1154\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1326\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1327\u001b[0m return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1328\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1329\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1330\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1346\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1347\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0merror_interpolation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minterpolate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_graph\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1348\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_def\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1349\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1350\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n\nCaused by op 'policy_vars/obs', defined at:\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/traitlets/config/application.py\", line 664, in launch_instance\n app.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 583, in start\n self.io_loop.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 149, in start\n self.asyncio_loop.run_forever()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 787, in inner\n self.run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 748, in run\n yielded = self.gen.send(value)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 361, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 268, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 541, in execute_request\n user_expressions, allow_stdin,\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 300, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2662, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2785, in _run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2903, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2963, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 1, in \n action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 29, in __init__\n self.load_network(load_path)\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 111, in load_network\n loader = tf.train.import_meta_graph(path + 'model.ckpt.meta')\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1674, in import_meta_graph\n meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1696, in _import_meta_graph_with_return_elements\n **kwargs))\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/meta_graph.py\", line 806, in import_scoped_meta_graph_with_return_elements\n return_elements=return_elements)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n return func(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3299, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n self._traceback = tf_stack.extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n" + ] + } + ], + "source": [ + "action_network.get_accel_from_observation(t)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "for i in range(40):\n", + " env.step(None)\n", + " env.render()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "env.get_state()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def test(d):\n", + " d['asdf'] = 2" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "t = dict()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "test(t)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "t" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "set(t.keys())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "b = np.array([1,2,3])\n", + "print(b.dtype)" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [], + "source": [ + "placeholder = tf.get_default_graph().get_tensor_by_name('policy_vars/obs:0')" + ] + }, + { + "cell_type": "code", + "execution_count": 19, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "" + ] + }, + "execution_count": 19, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "placeholder" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "flow", + "language": "python", + "name": "flow" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/flow/controllers/imitation_learning/Useless/Untitled1.ipynb b/flow/controllers/imitation_learning/Useless/Untitled1.ipynb new file mode 100644 index 000000000..b93658a05 --- /dev/null +++ b/flow/controllers/imitation_learning/Useless/Untitled1.ipynb @@ -0,0 +1,96 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", + "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", + " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" + ] + } + ], + "source": [ + "import tensorflow as tf" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from i210_multiagent import flow_params" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from imitating_network import ImitatingNetwork" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from flow.utils.registry import make_create_env" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from utils_tensorflow import *" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "flow", + "language": "python", + "name": "flow" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/flow/controllers/imitation_learning/bottleneck_env.py b/flow/controllers/imitation_learning/bottleneck_env.py index c0fabedda..820244a87 100644 --- a/flow/controllers/imitation_learning/bottleneck_env.py +++ b/flow/controllers/imitation_learning/bottleneck_env.py @@ -111,7 +111,7 @@ # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( sim_step=0.5, - render=True, + render=False, print_warnings=False, restart_instance=True, ), diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587254017.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587254017.Akashs-MacBook-Pro-2.local new file mode 100644 index 0000000000000000000000000000000000000000..0e64e0dc20ed4f255f8e9f110f2f9c0bf6a3ad75 GIT binary patch literal 265723 zcmeIb3y>Vgc^`hWdpI0EfFLm>C1~_8gOX+GC~9#BfR9tjJRB)f{Qu2|2#^#-QLDw> z0^D%1d)nOt38fM%iOW$OJ8`U(lh{txKXECx<0P^yl@u!~TIEEFttfuPaU{i-6gzsQ zWGQ+$~lxH{Cj(00@v=0 zhl8z)-3z_Rv@}~a*8V|Sola>9jXM1*WrysYDMk_x62fO3l+0O2)JAJCR z)1SsS!u9Ki;{D6tWB)~QYq`Dh64;Y{AXfbqb|L1+>}|~kS5i9m#!R3r~O?49Wk z18^G|xsBlWBe<7mchsLfGoC!%oo3}~tMh|V?9bDUjHd&eupGq; zD~tc)wtxH_&r7uOf(kE-&{|&z#YgYirug_lH$L9qf(QNe@o;8zk5HS7uMvJsLRoRj z(6ry*UW;3Y1&fPTURKf40f6=8G^UV$8!G^d>66)mdm%baqEg$#8{mVj5LP_hJrlQ2 z?Vd?Octr2nAY=xkm0UoPhk&$h^P52gcYb#mx9{H_CZSC?gb0aNt@ao|#PH!3ob>kG zxP5YaI|SE6B|*ppvOh~efIo=f6>h?d;nCh?&^t5iKe{G{RLdAEhhRU+zLWlTchoyi z3L%nUJQDD=uy1QTnvnvnKgnoa&uS$u(7}6KaElvXos-k);M^!)qF)n7aU}jWmkMTg z+V7I`y=E3U+-3x3O|tQAFMXH^;AXDk9UNS6;-*t?M=q zl5#7p2RBw0)BXZ}D}tB1OkZ$D^h7GIbS-lwTBUA?er^&*)yLsyT6TlVR3SN_lK>K= zmRHoyVPQ?{rC8C!uSM`G7e#j@Uvm;p8AY)3Gqf%n@NtKnaer?{>XLYgeof>aIjIwB zN_HIk=Z!S7&PW3|-5Hbk+9UCALHyO3^jGlP5&VEn37f=YNof%8&A+inyPRcr^{(6q z`$@3wb<;+*H;b3z#RuN-*j=t@!A2n;_*)L)<8*KIRJ@q}f8xq~F@W=w;8PLYXAy08 zwQk$~+9KkvLMOh(CE^x?Xd3slX;1(0klMrodCQm z!~fL#pL{M&H(_`w+xPI(bZp>%tp#rkVR1swF1PUEY?}F0+fT6=C}2Z|0^qq;7&|B$ z`A}A6#Y|jXjfp$*6Su}W;dcWGuxRMRn{pUYrcHg(w0rrwz8}f#6MmxwcLnfg+SrG& zN7CAdAw-&c3;SV&44;YMm99b*dI}*W+2&d;{{6hH(w9grJ~faJwV>n5o806(3nd9* zgPNwpL9E)w!^ic~v~L4P%D5^uAkLBtW5G5*fsaOTyWI-1K9gS^5?X!)8b#!j(lqq) zd_@@<`>FqA@2B$N^e$>?U<2T9jN^)P%Af)KGduQ{6O-N2hP~yFC1{P8M(LsR5D)_V zx*cv~LXtetQAj~pHJBBA-YK_8K!6p$$Qsjn(W&G#Fh1~K>>v%%Q3WAz2$4BPU_(K8 z3n;ntgbU#BIK>+dC$K>or4Y(Vx~jqnDOj5aQosh8D^kD)bwmmZdu3l2pIg$%f;KVXp`nMtlF4P254kJo$q=3!a+5)&B1&o-% zpB<3`2TGlE6ToF=kCwRMgcLB=g*HUho7W3jf#3r+yA`=Z0T-lzJuSB+)?$u5Xh#az zCk-}unIHvhrcOu!`>?!J=`6XjX!MG>HVbz_3f8i&UQm!XQb1|W4Jn|*2`(ytd4`rQ z&`zCh&av!=#4+<-&H45MC!`=(iL^<;+62?b>_|aQzJd*f-kigPQX4#QLJD#&>LLa7 zvC{uAph!-`U{>(Q4oJaTM?wm$8eZ+VVFf~ba$dm}_Y3H@5EP;V7p!0nffX>hSa(e1 z29`zPa61ZIu!6Nr5&511D^^el!Ce(>!O00LI5rQgfDJNNtbh$Fq$mBZ0!%VxL#X@f zfLH+=$jFNyR=|c(^AZp%U;~+W5gM%8y9!)JD6oRsuLe~11F)!u+nc~9>x31s51VA? zzN^56niE#wM93E_V1sC15^%u^7!i{I-FFqZP!mkq-sZZH^u`L!7eYiGOz#g%8axDpvqSV69mx>x~y z%#Ic0n1U}1)K(X8)u z?^NC1J8;1Yjv=rDCKo$az}Y~lwR!9-I93ZQD1=Z>GU&d7<8`qDE;<_D5GeFZMmoUj5XLcUl58^jAMU_?yD=Z+P)P!mkqjup6&^u`L< zAhu@N=?w%%%pg)ntiXYi8&=>%D40E3;)WAez-BBoB9)HdvcU@2be*sQ_Tk!C0ejGn z6|helY_P1Kyh66RA+eZ%VkOlHD=65KIu$Ecz#gEPFMk>WvCx~u>v-PqQ8FE z-~HKuSOFWz$crCVz=lxs5)dn31DSXc8m!t_fy)RKE677RZbtwcCfyc{+MN*b@PpI7 z0yd;ccJBKMT&Ou=1x|!~downO7goTCm;~sK6}V6nOxcbVxRCV53fLf)Rw8o33K%hi zNFA{P2TE>OffJ!%_GoStBPXnY%~)te0=+@NXs=W{372A(XoC zLF9xL{^9)_~6`Vj|1xzk> ztbntDQloyLK?L%duHmB$NoZEYRx?O0FD1od|w363?gBPQN#u_9wIMnaQ}fz15q}(zBL3&-4f9o|>so zr`5b;tY}OZ7T$R${FrpwA8I1vMGA-5kpjz`OX(5w9ro>;j~)LDz<4j zC8$*z-a%L`_%dc)qN zyY*Cmi(K)syS3FPw{$E%Fn(|c{$m8MaiV{Ee9v&)n|0Wo63L@)b$U)IAj7;gj>Zd1 ze{lvmQA1eC(yHx#_NKpZ=MYD(XjyTRaCZGl?y!w%MSVpJK|>c( zNR-!(aSvB|BD$M~PVJsg@2@zYxP6=`G^rsj9gLH9sZaO&JKgQU`Du2{1W5_L83PFC3SpHJs7z`R>l7KP9KScV162>9BLLX-!p6q)D~JYe?@upO8*^cjqhK zld)IVFT^_DeYnm=tp*DveL8~IIyn%pr0mV~?r`vQziB;jIbEv|WiTWw5PukfaJQ0^ z;Krl=G~=w3CDf+*!*)pY(c-aREnOa zoA)yvWv#{jn5{WozE1ytBG_(P1YAMhSfRv)^|n^=ina=Vwbh&e0`^ml36ACM6{#09 zHE{6V2)3HCnrau<81GekaO@6AlR)m{D6h(NDg+z@hoMQM&0|yIfS+!|!vVM|QYI^S zynuwJxJ_nmjWnOn{54oMq;=fwi><(lnMbX5i3H3gf_n8mK@~sRf*%U$0C~;WfKw%5 zY0|o~NClPp(ewP3HvL6NZ-T^DtVvKMmM-gzA(7*x-2g5=xXi;RHCyabq@o_4 z^vwv4)>Sjbl>%4?q7#t}T!Z==VJP?y5xm-o=WMX<>g8MX6HeSfI|2j}K5F2BDRNj# zNZMN64H0z4(eEW|xwHP{^2Px_>}p{-H}T&5o20fgIwFRJ*vcrV@tt&Ui<{=32Y~Dc zTpc9ig@Lg3l~7Ayb`|_i1jn5sRHa1ELRM3yHd)`#PiwEewEBDXIhsoz^}!*@Du&xK zR-vW5jzsA;sr!_aDyy}iE&zN%x&}ns6ic|GTPmuv&QS=ieq6LW*}rmKO4}Y>i1$9+ z?+x!BTyR_@%kr(bHN_%DZT@7%JbW?|@0QXMe15FKe9kpAXE;&;Ub{tFmQLLkKZ$}9 z;o2?-(c0~68~#!?hM$}Cwg+T2bILFk>ZSO=`j_GRO{1jabuUi4NAn_9xgzGe@baDr zZZZv)F0AAs&dz$`Ht$l-?g8v)BY4y_&+6Y48@DpE>!$MSR2)hvMb|~*_b5T2(lF}I zPKv_y9K@>6q#;R`dex<&UuePUsw{lvMA{&>4pvW64ap&~YQUZuezFB`2`Q=B?he`I zcWyG?B^x>VJNaIxtMJQh)oKuawp1ir4W{^7V9VA@%XXIE!H$4`m&?5WO!d2&W1POs>o9cRK}_a z>(|8lm%qpUi=-ccB)@=D%G ze{~QSoktX$z+XggDx7QN(SoFi0v`&f)0BDPHdZYxL94}rOPv_tueX}uey!xb3E2jg z!LI}jj>T~U^T`tp$al?dHYA^fEF;Rb$bim56p{48dA72cT3`onBhEQIDKMNJ*L#aOv|x z>d^XT5R`}$XQp1Zz=mJSdw&Za3TcF=2g5$O0g?QF;vX6~cA3%mX9kWabrG`0cbZgk zvjJIvAn%WoX449G{9*);1(8|`>vWWktA2Sm-Dp{Fv*mLk;Dl6W{8URP4sJY2Q-UqE zDmtwGz^W*2El2R75SVizz*FhD2=Simi~DQR<=pG+{3oh^Ae@zQo*l*sd4mdoBZ(q* z-gKV6@2GKq_*e+llegT3eJsPM1rJl%(D<~h2lp;AWp@U$T#$@ioo&2aU*eYj)}WhvlI^hKByo_kfG}m{wNYJ z+yHA51v#Z3Fk^~zClMWvj4%^?Bt#ie%rUQc$}%FLIEjh^WRqg5nR}Pw6Ke|J0rD1fb>Lx8khd6(NhSbrSkROeS~%NEBOq@vZqAEysS{?~VKKZ;aU>X& z+t~b}onpKq)`UQP3#t|6X@Wqh)JD+YVm<|z@bL(q^l!@L13ncUru+Gxr4Z_%3-5~p z>@TYnCLANg;&PnCc>ZXeIseTzoNr3_%G9pwKbtjy0PTcXG0b9cz3IDJ;J+BBnIJtj zlf6B?|3WCY)6&>9O+sh$i3zZ7Bvye9JH^<3E>H)hKzoYk0K!+=|V)?m2caGl1aK&^Y+R326qh5<02 zKW%~k=0mf6^9WF5;-`3!|E_!uD-0c~9kWFk3I054u2d`boj0%0A|*K|G5D1hJk^wQ z$c=gqo4uv6nl;_v{34%_*?{84}Q%y>dB z@Yovf^gHCO?P;G}Ew(x6O;2ubQ>-ih;a;*6EZ)!kkjy=RlpdtHsmOG%)O%|*I1W;g z!$6)F9&?U64rB1be!DYa{R>fQP*#TqO{jb5b>S!5`3twIw(H4o`r1CP=K13J_kie@4}Pu6SYFRgCTK_Z#bzu0`d-; z^Zr)#u=n?71nHuIo_-%M(XWZ(4D{eO(e#CU8{+MIz3lcKa#Mnyv)iC1t-jGY z``L%c&D;Ip9#@iNkHQCP>`}0jb;y{(-M_{vw<`l&LUhD9HcBJiDkAOU@L2pvfJj^Q z)byww=U0_oxvuuVLIP|c+%&5?TeK)vaeR(VvI{rY-YWczXYQjRmu9}xz0p(mrv1$_ z`z~<}+0JW=cGh(6UOI!T`@pwr9QLQ0*m1`e&@23?G(Z-s_iy)Rz3$oZrX$-E?^X$n~()8}hq}l2$G*m)T z@!;8$DLLRWQW4fd_hc$w=GT)c*|wx4ubxaAkhg2Uo=hSvJ7pO#Lic3Sz#K=7_GHHz z>&YaBbk1z=$&LvCC$)Qwg$8Pqr4ICvy~eDX{ttm?*k1>tv$n zWS0xllZivn64tXPQ;`hNlZj+?UK1__R{Imr!d}I|d@^8_755D2yH2L!+L8dh*2%>2 z0`+7fQP~m8uo1c^6USxDG~ScZkc;oh@{sM-lZmYB+}+uZ3mtj)WNY*2$y6FBSU@b8 zdG}-z*Rl%)*$OF*!Bu*mJ()_H-aVN#Tb+f5N=PamJbN-F2lL2CDqfsD*?XN#$+jgW zdG%z{fV^G%^<)xZ*(u9_5xOUn2Ie?wv?p6@tS6Hg(mAueCtDMGGLOEi+D}Ap=VW>sfHa0>dB-5dAs)O$t1$EQb7p%_wkq^wO|6rC(DQvVl_0sE?B$mC$pUm_jsh=D*j8f+qHe4s69p$bT#$}T z93n_ZrXm@jBNNH$d?s81thO-{?1-gCLU&x^xQrPaUIaf~<0=}(-s3FbYWp#_aP8HNiEQg^#<_+o^aKAZg42Aq zFrtP}y*Ghv6?yWe%Ubt9i?72SbnR%*3S8?ow^I?5x*A=fJ zSCK)yDb}v8-k9GM`zL;{f-9O_>mU8iRYqj4(ClwlhFJ!4?h6?-VabzY83uu8{QUH49(>-g+w0r%4Ix3S%-zC z1W#rBsPuc+RMv)H@;ggKumPQ?37O32)pcRCoa@zO9x4mm(kdCT1tj>T2yU*r)|vjS zw@Qxse5OC?3`T?5pf~JZ=nZ$t{i1h`N7LELZAAjU1#XUCRY}0A2zrK`O{loi;OyD6 zce-6^@ZktvYMg5fkM<^m-kD+l(X}+LPM_*c`rF-6?|eUQ#S8dZUkm%vh|wF2`jd>( ze%M2ZllxX%>kDp^|84|AH%UV|FX)rg>EPTbUZP(UGpl0$t`ZZpwsHd;B5^Tzo zetNXgedKi4*v(=>MCE^D;ZSfiMegz7 zvyy$TXW0kVOQr?qwz2!w7TnD#bMyc8@-?XFhUdYJ`{uSakv&rxl?k}ifXS-t&Q zRrGr3SPne!%@*9_$*>$ad&>v>M0slRM*YZ5Lg^YqLAzUb` z3^Z@DtCKt1^A2}uoiZxa7bX@z-@;_Of<-w%_=qbBYeAa8U=W5eC}5QZUyfkI!-(W? z;3Hwv3P~9z(477Os_`gn$y7sN=M*Fi{0XYv;P?^(0oqqp^#WQhatMJhqM8hjNjVfC z%3h8drpe$QuU4atICuRR17Ah87d&fHec&f&f44UUm2_W=VtJGghl-Mq1Og)((aBKJ zZg1M1k(+eJ6V_;ONv~~~Rcqz71&px@o%j|P7^9ez2xPRg!lHF#d4qr$=|A{P1ds9j zIn@jWbxk$Y$$)&ur`PmvWOdPjO(1pA!iu6M(qPT-6KzDBXyYwjyk{^Rs@8jXF$OS` zq$$0WKIEmD>i`C$?f$df9QAD*)`QZkBR1e0n3h)5hme{$2a0TitFn<7`*KEFks55u z^0zTft;n*RsOmRZB0WS?D-uv7t72iqOQdxHuuQ~?LiA*MEPM?E*a*x? zLBdbcL8M^fmjD>`*MfO|$@i6d_c93%UqUv_Mc9+pfMvJjYwXAXL1lZ(8;lq5HDsI2 zF)9ZLRKv#6!?npAfvQyn@rBwEK@pnLHWAez^Xy4s!dKowgkuU6IBd5#txdh|8?rHG zLs=kO-f7K^F%H{h({RFe8*7W%EEJo%32eQ9#C91~4R3gC*Fsn}6AZR%8-i}0_kxkw z5G(V-cG-ZLK7>lPQc|kmRo)fmh5)lU0}k6|k5dzrb~+-0s6Y>sv;+c*q-EOeh{6BZ zc$&pXTPujecG;j*>i(%qpjW0YFL~HF$6>p|OjWh69Gh6}%Y}Ahnn9tSY*vk)wvecDctD;lfk_W3b&^ zvg-I@&@2+$<(ZR$grB61h+z?9@e)cmry;Oij$c*n$Ld@Byn+M=9JVX)_WTGBIBZv7 zR1OfRHkrdNacwe3;BpQUIBZvxw-hFP<=uFoz+t<^?TF1|yKE>+zst9c#-NhSq8nkL_AX zR}vXw->!X-0N5@Yq?)R%eul$#*?_rLo^QzvhwVyC%jpb(FG>_K*serEk1dk04JJwB_U-b_NkPI-(q(|~Fhmo-IBb{W zH#D}JYc9k=EI4de;7te*9x4bI3l!^0$bGv4qjG>iV2uJjT$?;!Y&SPGb&+9m8XUGO z$VYw$$XDKt2MQdvd;BsylY!020s5uhPvLm(Pr+?fn)z)T-PsuId_3*M*5D5&)WHXFySPB?6r4MI)Of8VY^ z50kV60#$!6<|V7n5D%Ri;z_U+1SN=eOM_O?ZkFli5P*zWO5j_uMJ zgvWL{&V(Q$RK$knUg6y+>^kSK5`JV57>Vt2Y|3E+fjDy1FxW2lxFTGH#&&bbs!L*n zW|8}LdFG@b;V0?lw&T$$Blqoc{3@%X{CX%17@BMD(qX#-Z_khLfZMk#Fe(QKRKvzW zv$!^yBcKQpwY@ow(^7L=W>ma!BF|(HE?UwMb%%M@(jf1oz_pD<&)K8-LVEkd;P z{-5+Hsz`PIthsEDkozauRJ<^@NL5u}+8Q<+CyXsZz8lX535Br<^f19gAfN~yrcmG| zWpz}t85-{0c#%o}5F*ekLx`6=Y@B~F!km&R%+yqKc+FkF)I3884&#vc6zUIR!t&dK zUffpt4h8HH)qy=CB*q~zE!QH5K3k%Q!8jxmmw!mXVH`4>Qc&@iy#?cl!h{_p#&N<0 zZg_4BV43uevBxX^=_RL^J6hRW`O*AH>_>{-W4!OcUgu_1&2rkSo{ z2|m4?9Jb(P8ZP=f)4^~&VuMsg^~@)4UUL)GNA|!2no)Llvb*g0zbdZtDKMy=o#Dk) z#__(B?AzoOZVxB*b6hbj88N9JYxu{ztH}|5p7dsk%u>2L<7dca+G$4CeTKhAJb8JQ zPSwH_qj4Jlx|Uc>A~{o?!MRCqdq8eCpLY1asZ@D?Z+8Fgu$w;6neFZj`<<=vXf_!S zyW9PpKDp#*v~{t&S_OU=yn;uriYKa~V!&lB>@@vsK)`;%>H^L=Pvox2<-oGBAB< zn;rHoE^u&mj=jMVesd2zUS`+qsjT*uJaL897LOyMX7OwWOeYBVqMSR4+gDWJ z-R9*^!;rHD6S)Qmy%@NX>7!=wrMHp%Ti@D&@U0KOw%Q?o7!gB@VR5ws|M(SU!hJsz zz7$~?)3s5f3lNCQlP6Jum_3dO#OzsgAQlK*QC92~h}SMvAZGfwM1hzcd!{mEYapf% zV*)W778!`S$FC?8_6o#n9oIlyfy21^786*hI+PY{D6<0u;_{dXmHk>PV)i&D5VL2| zfmk4LMOm>|AU<}f0x{Ev(u~d9FE+rxB4+O>s|?v1i0Q+aK+J|k24e2BqzdMZEWVPIB5!SrofB#!GW1yC>lLtM?$*YUL z1dH_&=E2Du{booA2A?B?5`)c=0lJ-XH;kuZx~$6%Hb;ha1e_x)K!eSZwE+R=$YOJ_ zIkKc#&avDOgWV{wqS-A5EjlS7_QExfUHTwR~2)@r4eMw+i zqH#FU#NRwNX*YUi(M@x5vL;wI60hL{%kTm4(3?y2}#og!;E?G&jq*(tID#ZHlS zeRhhhnX^-*hH9tClD}w3dYI<`{1tc!Z%g^jP^0bcl!Tzto^EF5bY%0U!k88|mH%@e z{IiSXn1R$es3pfs^lQR-OrkaQ#5i&By=91sFF2!ykOF*!Mu2qSv+oJ5wp5V2MVCU69sct4Hgxj%-#tjdl(I}p)?HE)!?lK zlU5Dp#j4EWtwt6x8l(eh7_8%iw-wA;HCR;GGJCg`@dTqTMpwg)MO_NfR-)+b1(Q|{ z=FNf3;_aHr8APp^SScrr&gs-DQS^?2IjaVXjz(th4h?(tWnZ$0F$Fe3_#;-9u|$8g z;L@tXyc;_tXZoYTH9|+y$WCt>*^y^H zZPL%TOF!QM--=;zFdEEqc3v$kljx9}{D7t8HP%nF%`wyI;M~am`Mf4$^pD!;lh4;3 zfz}M}Bd{dQp0M&;w zCifTJ$Jm~U{9|`1sJUS2bT2_pdiO`a@XtSW^8fkO-&%$rxiS9S4X_Sva+XxQ z1ktlcqSNWui{xuUzP^KeJ@R58zg$e0U)HnLmxI?wt;MB1?S0GfUif+h<>7bnerS{Z z4CJAAMp3*EqVwgq7xHhfgZ`ru%E&5t?YM@zX+PZXluFVTU?4-VYl0{h7%dfl_*$@$(a zUYgD(gMOLXhSk!yhdg}MT4!k*W^eYZEPxXm8oHizNJ z_Z0)K-vII2b*0xF*niDlh<}o3^sTUX&w#Xeq&M0goSz37;yo!lcv&j)o zI7@mN@K0neDscBY*n2NI$dQ~asGFti+tfw&9Xv$516~8g0%|&)!H6ueq(aq92XLCIIrexO@Y29Xronu5{Q$t)ayA-?c0jBq-2@1C z!TyJay{-OJIjvOla$?g7Kg!vzYy$|<$ z!@CC;m;lnR4w0YA51vS$r+g8E$en};jvd5)Wa)JFSDm?#IGBHedX!+fV<=Y@iC~s? zNF*IRdw4HN76BPka;>_xy~$fzk~atrxOZ}b%7r3esZGFb;$URz3eAmL#hmeY@C*)eyeG$m~d zKBakfE1kqp&}aNY*he--&j#dFT9O-17vGC~i}|4Zl!Birz@xp%pm%0SwyBU9lhrRY ztEKhQK`~BA8IOd;)Kq7^!6?lqd9l!n7mmD0yhn!n6Y;E-rPbH0uD#~y+KVpGZ*G}v zt=)3$mUF#Zw{Jgtw%0#;NBrgA+Y1x8mWNu#XQnY9WcmGO(aH~0mi$S9w|*$zzx+M+ zUqrS-wpU)lSW^8Jb|L1+>}59QKWtN4vqOM8MZ9geN3I1Qi{SPC zT%8{nju!tHxBcVi0`U^*_M3&`qxUS+P4$R$!p9Fb&d2*(@Swjw#L7r4l#)&TH7Y;EDU=j~Yod}MWCGcrO?ZD0fo-8ZUwja* z$5C9thuG@Cw$)WG#gZ=Iqk)V|t&#nL{BFy-$IOv+jmA zdOyRzi{NHQiAmR9IU3Mhg26{3xYfo*3OiQsC1dr6;*zP2{0d#{epWGI>o<&Sz;8w3 z)jKM?N~bS4qu>|@>Z!dE$bodF+51>A>48?zELD7#;V1FZdipxo2K-FRZcv#jBnNcc zxs24ZW`~PKYT{8z+-4iB_~6$fc$Jf#O1|bKoYK><^E0$A8}M<5oWTH9`ZbYz3$T~s1b&L(2HUCwHCZFgvA+K_;5Cz8eGvw z^VVmoZL?PoR+JsRkFmgWtuQuSH1c6QgtYQu2=UF7kD#Q^eg zg@gytH?Tg`f{wF+NIizLP?8WfsA)PJ#HwvPeB_&UnKp2wEMcSu#94B43KJiV;C8ze zpj&;VpnPP2#r4n&3dm*57f8Hylo2gEC4X z6oa^97A{CZu_xbef!_-$U~}N(x<69Ds0w(rv~{5wq=1dg^W%pUFp5TAN~EB=(2oWI z*vM|YxF7|LqKOwjq`+l_Y=fZ4l&xqC1`e?KrNb9Mg%eW1o;1nM9Vu|3cHRXka3VxH zGaYE<)z-4X!38N`A2tcl9Vu|3CYZ9l$#o$qEdOKL6*h>ik#Ir^7%_uL9gzYDN}Y5Q zfE6ilB1G)b=J8?d(gi7CGZxwq#iF8@wk!RGs`;-K6Tt;3V7$pKiM5zx589Ce_DO>c zYWfqK2@oA9XdiSO zrMn;nITw-#Itt0u<(IyR=NbOk0V!bDdB^#?Sk`rR(sa6E1wwo>_50%A>fs1ZSV3{$ zbGe(qAxOGn1)L4c7b{rH6cN)s1y-z}5P~yAP}rc;u7cv-bl#<HZ9QPqON$0;@ykAJGt! zvO#o+gA-Q3h?oTEjup626HM9O=DLvd#tPUV)+WGZR{+4T&!>SC174s4o2*lR%|2|XA9$=FAPm4p7W+mG z{t%9r5-U*hf`b9-@#3dB;Di;hCrz?*#|m7iIbj7( zgnWB5Hi#Eiz=)Ux=#CY*P!mkqjup6&^u`Lab*n@VgfPKP zsaUZB_Mpwd!wAJ`UqOC6F<)k07FYqDmlIY%KPtFrAP9g`uKKJD0f-f)sfl!X10h#Q zU95mUX2%M0@)eC=23P@|niE!#b0K-46)T{RmHvgn3UV4IHM{I9I9?kope?E$D_~)U zVg&+3AihV?_`ZVU2&{m~#f}wlHc)DG9$3NgT3A6LgmRLuFM&AiD=5w?=Uv)_73d*= zXn`Cuh)Kk34tyju;t)>#HKS^%A9$>Q!$7p}3nu!rPFR5x zA>ZDN4dR6rFd`-ax?=?{)C5zuV+AfGy|Dr|h^3W?oZdlT#0(;J#0nfJxnTuPgo4?l z&9Pmu0yblz5fM_7Wt31m2G~TZeTxEXh@7wjMzS_mz#g?k!oK-l%HgXX~^ijoUj7=QN4@mo4QEl zp7_~Tol^o8B5=V9*v(1Vm4)u770}1*SV2y{qH&`qdI&}BhX$oMVFfuCk_TF`0{WQY z9R%*UMLf?=`wC9f#tLYQYR3v#m{DK_wcazx+@suzAmR*8`wC7VumUC*J66EiKp9c} zu!0EWGhGS)gHh*vzc=bm>D3?I{$w(qbgCDDRPP3{p3B%RB$NoZEYRx?TsI<=Mhib0 ziRV*mr{9}S`xA0A6}v1dz3-y)On-3hsaf#?T6J-IVd0&3iv5#rt^LQOefg+~h!-gw zV%r>+HRZ_9oq}r}|q@cc;5sAYjcG*580HrT_T0l-jchgXM^;3HPQ{wh-vRQ$GZ>LxmYMer%L#A1g~{+AYMsXSJ)j6p6*}Hdiui% zHfpmr9`&agXPqpeHq9TtjQOk|cXNq{Dlc=b8NWj=#=~nb&h80Ky!G!zA(L_Gg4YR` zkEXFxSI)e<1-I4aJ-;6@zbDbL66C!)3%!X?G)O!&rV~aXKX_*g`n9D}^fcYPpXn%T zE%wK3&FS)W`u`KbcGDu@3i8GZC3MI1tF7h)5U`(WOmM8YTB*~geK&%wrmUvg#Wlt| zSDa}>=StPGT4|F&u1+2fwk~$bW$V+9Qw8N1I1Ei1Z62Ev2mEv!9uB}&rQq=b5}M*R znYlI6d_MEnpb7mb@{;U|@WobO#mu8tyF>!^sSrQff*%U$0C~;WfKw%5XlM0U-MUR|m;> zVIXXMCDam_T?M}r!EvVuRVmT4kkwSFP4<4|Z;;nsTK&EH9L*(<`c#=@6~k>AtI$$j zN1}9_)O|`ymF^hh7o=-Iv`w*uE4rm3iY3+*T>ZGH%|{F;fq&(?l(s#%5bu4s-y7aN zxZt=*mgQS1CcW(eSt398kLE?JvSM*vczI6*H<<>zXD}S* zAr@;{DMi;s;`b;)pwckv&Q6NL^&G^i z&!izqmU`9Ip})|A(^Xmc%89f=xKPrN91^Pr?2`J)7Q7{-q-MK2WS8H$$#{3CyWQW( z_c|q~6hN!5I+8~kAlhApUv8^bgYdJZBH0>`W|Q$S6-s}!-5+gTjF;~tUuT2aMHP~V zACEe9@?=oy$a*a;s}D=vtsz26hGdA`@@+V>18Q$9A3;x(_Ud* z2w--ng?HhX1KSqeLqxEI?3*O4l7hRsF7%gjI{-_kDNkh28vN5Jq}e^tKNq*j|J6ZU z%9-h!iob~9R5;hjqXkJ31wIr|rz!KoZLC^Yf>w)m?*DqL3GUZQ?wgQpU>W>M(BN1c zH!z<((SUr{>}EsqNysvyT#F3oEJP7WFPvv9i>dWhtE`L(2i_Oh|FYOiqYup}q1Du2 zwHK`j*+Z9X;Ky6==5TtITi|>r-oyTVcF{Gk?0z~tO#*DZrwwO9i=)~ob8T|as*hUf z69FY7E41{`?V>||GrTE&G)#vqYgucyK>(;?G+pB$02)D)Lmpa^`RM9wp{uvRo8vb% zqbmcL@pBP8?(J3Cq927b@)*G0z@7{iW^~gDdl%zQUmp<>7Qi7mF zoH#S}vIRE0mfzokhe8_R>A|p1Za^gepZJFcj$LLn{+WRzN?n94&g;VJ3Rwg^O-jAl zfUHnV)3;3F7bAEqh}2S8r=xUSZYvmOvt{~*Z~8AiBrc51_^Fmo9Nc)4rUa{VSp88& zQQTT42Udl^oKvIA1b8Yv7a`u0eQ|$Hx}1BR7j1~@9|&iqoM(qOr~o*UDB@CLuDj0_ zZvL?lswZ!`D^%~c#XHc+7(sH6$Va(CfzLfbRw#;TDZA(9Rw#;bsVn=tgh-IojfjN_ zX7z3O(GZAJF(vX1eXAmHJaODeT%2RV_=}w1tAKwWR1hS{&~*ua6p0sZfHjGNoYD_U z!D-eI4@YJc=_4V^h+>X;#Z#6M0mVsF6d;=vQ_bAF6rWh^yXKa3!O$NjE9R7bL*A?) zUmV|NspOF)up^fOau&%af{G-~{`t!$p#>cP;w?0%bWQ^CI=!%>J4Nhp6b9riW|_Mu z5opn@2NY_issJEwG2P5XNy01|InhaV%W^?L-eM9mCKwLnErwt{Z~%~3rb$5F4~77F z3%WY+Feu1djK(Ar05~jY$_g#bs}Yd57&qs|y}}8C`WD0M6i0$VxsA;q+9}2>VoeCt zx1g$5UL&Y)F>b+)4orfNNARS7Q%;ZjynAr=Y=1(wg!UC3=BxNoM*Z0{;|V!PaBIBN z?~vW2(|&orXKC4W(1rI!0rr|TDVDd&(I^&B>POJg-_y21TzjZx3@0(`v< zJI#vBxPP)SXUh3Y28~HjHi*IuS0=@Ny{F)8yd7r0o+fDId^$S|e5DPOc`ICiK+y9-i9;r+Zt6j;n7{y2AJfSQ*+2Sx1f!jth6wPL@6 zA#sjxI8!+S@(!Bw{#Ny{_xEN5>7s$2ejhK#$oDB(ZlUO6%Q<`SpbJ5V}MY$H*fcYdt6D9JqjPFu}8s9)*)jCcmEoz+^!6$65>`7Kp%(4;zt4m(5eHv zR?HjeQ9I7BD!X!B?SF*?*g&|Mlj?kqMY0PoSL3QJCc)2m<~|y7Y34iK8$ESz+TSd* z?-JLL?Yy>VXIPIX<&{cLr=Eh-IE>9?=}zLlSvHeoY~%!9q&BdpN#rL#U<%t zUnMpUwJuQK0NN*L8h8ZHG^neZ&^RSe8016Ema&w#!?nTqSG+QRi@;&?gdt8-Fa z=$=f?jHM^r@b1ZI$i??$dC2za$wbz5NwSRCbL+{D&7&t%X`tlw67*ye*P2}9*ON(O zaFw2CPo|QlcTXnGR%fB15|WAs&z?-l!8|gOiWm2c&YaBbk1z=$&Lv&S5GFg zu5NLJ@H;Zk6= zk@4)wR1C}~16Ema&w#$`WGb#L3DB!26USq1OtY%fAECTKj!@nx^IzzmP0g(LIvEYQ z_?|2e*c$xF+FNrC-Iqy|6QE;-u1?kAd zA%b*dDv|*@GLfv#XTl}GY8&HO)~gtpPyVa2;-3F}J2Dm5mh|Vc~W*vLlvZ zBXq|lj?0*_u9E$9jjL!Bdylh#tL?|!!nId7CbF$_b!Qtc^aKAZg42AqF`Y=O5=>rR zP}y*`hIo{S^J)L8A<}L7qU_0Oru(;hvtIY?cyhisi{89urS6%B&f7V+iM}0oipL7PJ!EDeQb}#gXyX1b+yT+sGOmXSJ*|TTwT)C~t zy|=*4@vADiR~1;#kW&hsZaRPuNAObPTw{2&HyQNK4Ev9+Igw0(tm#v|Nq@UL>YeY$ zt#|>?!?mz44J*CDs6WXlk&AW-adIhZYkdK}8-dVG(ooI|`s8#vI5&!y=-0%|s+hm4 z#00IC-j7z$4RDYcz<~5-{cg?_IetoW0ahy3U2;|z(f`wf(Yf@bD)@y6ju@4fy_3#$ z6lVSdu%wy1!6nw}xx_Qi{qUu8iF~flPnTeVc5{jR(>j-6#6*{7vOF7bKgb;Z0pt0H^eL7d#~o$uU7v&i5bB%_dj<0;D| zFNHlcv)r&5FGuk*DTQesTqJjlFRa{AWRwov65s5UQRuj00Kyh<5F9nNIyQ@{TUS>h#>k-_^b5vWe zq>JH8R+X#P3S$k&68L5d?(t+;4jjG$R9*MVPZ42dit;sx%b;Qs{O`y!R1-&%!iTTS zgGj>?i{bYpP~6>ZQ*GK!cFYyWi&({R-R413aV%!w8?83b?Gb79wr3a9R_|zJTC9Tq z9>HsQ9^67k!Icm$TnAM}1vICX`r=wvUo5R>qJ+=4FxjqPQ4SD3;!46=kTEczC8`b! zhP4j(as(S5MkI#=9|@PC0L{ON(?38p9)&HLY6xfVX&_#}pP3GHFL>6Z`oK@l4Tl7>uSKyu z%7;Tm$wvZ#5sm0%m}a*(?as&z*5e6lG`JwqHstlziU4D@v!bGPWO;)?80o*2*A_6w zDsQUVrS>beG6G-8-utJL_FW!Eljc5~X zyv2+642DD1dM_`=0A`XjrI*r&t^*j1w)@X^^YXIoWlM&(J$s=FzJY0JMSTdV$;UWW zDz&E6Ak%liI3Kkh=A%e>@NG;}E3zyns`?Fmy>vP+5m|(hqJfH|-D^cAB8Li~u1jGJW+uMQG^R)R!$`F?)1PM{ zi8YX)cn-CR5#HT~Q#>z-1g+9&p2p^3EXm^y%%7GRrB}^4m3=FMlipkj$xN|Zz$;RO z&u{RZ7QDfmX*r$wi!o`!Ka6mUmts&^4*nuSpyOgnBR@$=&0qGmny++FADO|YFi;KQ zx9CHwDUEd}1_TDNDwmHbhIT{K4qw24H9TiR5b>CCC0>LCe&EX(xQ1s_4jc7h1fCiK zuHhfAT9YwccxXi(EKCOVH4I=QFee2GKS>*a4Q^jOUP38=QGYF%=a+n6sdq1v;P546 z!(4>%xkTwz3AoJ`=VZvA5 zWu@R@lBU3j!*+|)+SKd5b8L*+O!(Tu+!*7qT{eyrwi~c5X5;ew>Dyu)w#&xQ@P@~B zErew=!CdoTpPC{e^sRuE3}qAW&^Ghh5^@WR8F$NYq}bQ8;)Ahf3Q-1hy;6 zTM84t@@{SsD0e0twp-kec*(I{HWR+KFgD%DeYGomKwkL?Qd zFiA@wph#L?*e)ZksckKYgChMQv0XL~Ds}(VCD1EVvX_Zkgn`6%g_){qT{$+f+P8sy zyKF9eh&QJ&+`e5lPNr=o&jPk9l*O7ONE9bdk@sqFFV?W>A@}XF$F@hOg1gRVY}?{Tr1DFWQN0bC8p&b41q676fxMYMB?&KX*g_GW>ZRP z{<1F}tkJZF!*-9kV7o$KH-Jc?;Tt+R@=TN@1m9Jb3dCj|*VNtXe_Lkb4BL}I%fzpC1g)wh8m zTRJLV*%~2MB(^K?_WTGBxP7|iV2uJjT$^mhc8fOI!xw5t1XY8K!*&Jv$nOC8 z%DeGEfx~u>XS|t)yjR!C<@gK`w17!(qD;S3)wYY`MGavK#VD28n682SebC z5=9KQE0Iv7w4x95ir1+8rRv6T*sjc`l+^rXKc}>Y!*-8f&e$%U91`2*I5P(k8Dtf6 zjsY`MmBtuEB4-na?Q(3&VFQ6Ua?~)`F88=1T&Nx0BH*BCa4u;aw#zdo1qnY%H@6)x z=NES-1IMqjI?Aty3XJX2QJsAdiR}u!JwL(&4%-zNl>-E-VdJ1#T${`hPy~t43qZL& z)s8*LeY>K(r7+rx{bQf&YOzyyb)$+hvWgu&8Qo(j)ODR(e%Xdc0)haSkn; zCK6*~k5@6aT8ssP)7We(aN3HZCyJ058yh4P#wO6i1P_6LB6xT;B#by}|74_UNd6qP z^`FgyO5Hz%2=vMjg6Y{9Da#h+2)-C$PRV5C^@vPfa|oDP*ytcu9+ohL+`lIADbyd& zgJ6Q&za}v)*CL2MTcU`;I3yB^kYcJwyj)0Kk=P6lhjGYkN?j3&x>n zV_30)DYodCVldHnx)BoN;5gF<5yP;d5R9DL3O*}AD-rVDWR6WaY#=&sjv5Bz;2y6T zE!3tm6O*|TTw3^Bx- zeu(VCav`qixY&Z1X}IX`Ob5g9$S}w!Z(eib<0E_E0nI48JK0V2{9hH<`4kw`&d%^+ zD&u(HN%n2>3b%)o`Z=x`mW-Ixk2U<`-PPm>KTmqIL}n@7o$)i|GVL@Y>psI@Bc8mx zN~db!iP1QXe_cx~CXt+}&fwgnw>==Yn@>CZ-&Cr+zc;&oci2rI=*)I^hW*agcr=@g zhu!V|PM_S6G}^k@UG<6-x!_k~IFPO|jr!9WxvBg@+}Sba_%HxE6t#b67i*<+A2rbGC*zO@74TOWRHwL|_eB8C>j;%W!}@hi%N z`+g?u8iu{PFlsS%qi(eaTL=({%abQjftWpx3B>GKbRZT8Tv1l+6^Ped194%*mGxSJN8Uv$ksqiAI1b?HY_p_bB|w9ChQf6*E+6&*i;W=5HhL`rBOFq z5tqkAsO;BT5wpiJftWpu4#Wb1E6R$!0`W1|Kx{H%Ol)TQP@1tNHV3GP**nTAL$(HD z`Y~TyWX3wGnu|VL8vSP15 zeB3ntr5Vimk*h?ok? zE1@+^1x?i#0GcQ_o~i8D8i?8Bm_W>)MF(Pmz!hc1UV-?;Tmmr@J|Yk+wqh>?sEFAv z1(hLN12KIV6NuTc$Uw|JenpwES0Fy&9EeR_9VW0+btsKmAKxSR`}-iNg5ufo^7-9i z+$O8#PK&VS761F+su=^dgq=L-Nlso}^d(rVmoN`b-sm?&LNNFo8I%}ojtt}Nl)GU( z71L#1cCa}ztRvtYSpgbsj;u2XI7b$ngUyj8&2o-q8HnCkpxYKkUh#RUCSFLVF)2RA z?P^L4kf)#y7IWwm<8E!|v=zI30X zcNV)EeY5?d-g2C4^!=X^e4jD;lEAb?<8Y#hzj`pbllkwYF}fOjuLfVT>7Q&^9S+Fe;tRdWw4>Uzc(wGUvC_ANm_z9c4*^Vj z7y9BQYA=927kb0petga5{^h;#!sedyMM9$dV>34gh z?e3I>pwgbWS9JCo&Tcudax=5LBbzrBR=2RJ{2zRFk(}zAItR7nc!_>ZIFCuRrk)rl zF1~!C2%?##JHL`+^N8Hi6K6}ej@+e|Zl}mB-A<8Ox}74kbUQ_A>2`|D((M$frQ0bM zmY%@U_Uz&gG1cqf;M#$R`w#vqyf8Db>U%r!@_pp}o!MY^Q82axZ{^I9p%#0|F+fY* ztzmCEjjxA8@wLP8*7`>FL3*^QE>Qo`BD^^J*u&(#p7BHztB+jM+3sH;&bEHnLyxQ< zUAY>%ozu(z-J$y@Nb@C`mnMA6Ce}&HlAc=?uLWi@$PJA zch=3=pT=u&biH*rs`HQJG<_KP&rzPHBt+4vm`Ws>XwG%g=V-+q=ZJ--Xax&D!sjBm zCes#Wifhzuxh{9l#Nl+otr3PohlzlBQgCxnC$&X_-_(}YXuTvV~{0tnZdPE z>Kg{k?R@Q5H)@v|(RjZ=G;{}se4d%s@%;IrB z4N|HaZag)3qF~Od!J^`m**j6Phfx=!t6|WhE`{|}G2r%-0?Arj_!a9HcEQ zlSXo_`14Wu7pu|}Ytqlhq@RyVKcA3(zE%49HtFZvrJwJBZ^f`U7!77QJFga&Np#3f ze!x=l8tbRo=9uYpaBgJ(d|sw8`bTZ_$>-~iK+6dC5m;VkPgr>__Qp`2?L4r&;XVS5 zx$N`y5m-24PuO{&{>?sbAAva-d*b^A%~g-F$^Aw5F}7zS|JYp$YA#qh-Aj-YdA$!2 zVrU*@n2nMRoQ%x9h}m#tyyjDQncsYPGUa>_jFE>q)|iCut1lsuP&p@r>T?8GupkU; zEZh)Lm-E393M8X4pb2xr$cR?&-ZJ9I_{pe@q?~1A>_{L~&IuzU7OS}#Vbsr&Dd&SD z6v&Rsq~dKyEZca>h$G`CqcWRuGs4)BK&Y70k=F8yk9_uexIW8^y)(mpy4zroelv|f zc76OC*Te5bU@M^Wn7Mv5zQvXcnZ{{F;`;IQsJZo{Yj-(P=dZV&kVVY#`R?v$aCSU7 zzkc-g2%d|;*70Pkf`p%&Z6nf?dMGz;AI^EX-H9^K$veoAaMO$da`5zu<#D26Dr&-n z?S#AHvn=1aKaex?de_+qXA~<`^KrYek=dp~cQCg6lZ-2yv9XWmJgwV#%9M{+5leqA zDq^2y%ve=aJm9LL;sKr^q>5;%;i4i|l(@wMRtmYQD5uVIRL_f8_H|Ja`>eByiU(X( zR6M{lgj5l&bX-)#Y9dD!k9E33b+-XDME{;SoWX#;Lnh%iIFTK#GFDYIJi zYW24@-szKVUL$hIGpn~;f+G7Yv$d<$AL$0I1^U(MPwSCat3R>^Wc8w=m~$;HiU;^a zj4Um#R(~!kDvCK*6%`KzP|@?D`h%ZSzMIFr@=Owtu!B*%L}vL^t^omNsnQyd72ZK_ zNMJ_t-pl$?!}Ri%H~sXar8&*?*r&|XW1o_z_y4uvP96xN4Dr8FtxPkNlVG-F`gijm zuT6mEd`~Jl`)~A{3-2^}g8{$Tg8FCP^UG}ARU)K&ye3AlyL?g9n@%pMQ@-<2iRvG< z;C1!sQjD3}(x%7bOhjHZa`8OqGY^$$aE?}pCbJ=gi*N*(cs!}#9gUxhN<=6}Bc35f zqxudal3-%;rh|4gX+A1Z{Yndj#UiJuE7Hv@>J7EcTqeTA6@XGMut)soq!Jm*0SUK- z)uJ*TPk@Q4z5@zW&<-e2QHcoafQ5*VnU0?r`5v9=BsxA0$gw1#I$lLVfr(`MI8a4_ zNWw!@CFGJWyyHbB(otCzE>;P2v9U@b!o-D$RWc=ftdc3IM23r1Y^@Pnl@r@4%$|t~ z5vzDA_*lhLQHcl_t2nCISS65PVnP%PA{`$GER;s#UpP?}i?Qj!hqzcJknj+dNCz(} zk&a4KxL767#l|X$2oo0~R>_p`u}Y?-5*aR5u|;=utl|hTQ6XX#PX!;Vcq%Fp;bIj> z6&tGr5==~pSS8Z&almF`Re?+6W0gR{LsTLiyr@JvDyzc9DuFIGR!KydxDc^Qri71G zG9{JBaIuQ*K0(JSjsO!CB3AKK@Ue=gq7o4ZB_j{x6^zFO7 WNq@WBpG?M+PQHZs+kf_@$NztIWgI^M literal 0 HcmV?d00001 diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587339098.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587339098.Akashs-MacBook-Pro-2.local new file mode 100644 index 0000000000000000000000000000000000000000..d1f73ec681fceb20a96903b3512fd110cce38d52 GIT binary patch literal 267581 zcmeIb3zTF>eIGoxdvN?CE1dG-BQ{onupuj*D^HHZKE`5*d$rG0aAZ~ff$zwpe> z$t9=8-Tv^}+s^fd)7AEmzUa1Z-}cQY``~Z?;Pu^ozjep|oqcZU*FL`&#&GHOXwcs{ z-#OPEPglX(BVgYHJ}>}asr8!zvNJ)6_>+r7APBwGGHIM^BOOt*KYoypnmc5f12 z374-PiuW)6HTf@!8;i~57r~zF1F`I{umdqWW_M%S9}WG7jkLo?;;@76soo#}x29o# zKY}}0c89&`lcVtyoyo>%yVu(64JW`(jMLH{kiackTiZjQD(JH!6z z(Rizc#kuE}(QqaH&e|_AS8QuNZ=Hg4ZACpj4%q!+4 zCB+;7SY1rR2>Q3S3@|_G^)}^Z3rG zB!owFp9(^zKUA4Kkz(W_MfhY8!ENmf;^tjDgCw-+h7cjrY6wv+8w?zNy#Y6Sdv4sk zd2=%a*GMHn$RPX6z17FgjKceD1TVJ}UJNI@<9_$lpm(ypdg?$^N;S|+ey>UkCa#H@Vfs9KGCZl+70}k8q)w+2y>7NqFxn@1S%&%SL0YBX^8&swW!2zuVkT_~t z>vOKGF3UYmr|6GF@CqA6w*+6a5>DYs&K&9H{t1hmaaVVW>XLW?e@(<5IjItAY}@9w zRJl{s_f57(D6Do-*qh^jbt?N6$*b`1Bltd(64sGxqtYPWn}1`Db~(%T>MhiDCG1C0 zyW2@S*6uW3i0AKq-2=B+^AB&g2p=c9!?W>x`u~Y7^Th-$0gN?xCW1Q+qV23yZQEZ> zL-Pu>;=?wfnGT|8+|#B#{l|t#edCTow5gG!gK}9$03?{!ovUnAEw2~z<)-(U4?OmC znr_1IQnc^kr+E{r8#NH|kp{dWgvD_^yV$~qvuWZ}&1El%vE4W?Cw0BICgK9{bR&!% z&_+I#RT(i8TURqT^FC^T&jb=6ZRo?Bau`u^O}&rEG~GaEpYWRvxFvu;y-Z3Jr+0x{1Ti&Nbs=;ZZKOx)@QP-LtM+x8M~(QalyEdAk)D$ z=m7jXb3c_+QM;&a))M!00P6@NCFc~;2KYHM_LdV9-O_};<&P!K8rQb}9zv-O0Q_q+ z+(v{Xc%Y?_3jg!qOJi2>C9B*b00Bn)B5O>mMJHX4Z{0idL^A9E{=f{U1fevCVcPpfzJqx6O3^G%sfDCGh6u6{)G6XW9 zeCq;T=1)NYzH3yV4^~J48A!{EA5uVukn<7{DIf#sc=1CDY(~g7t?&vm)I;*+elN3O zAqFd?z=|`nArTLhwLKAr=1TK17At6HipcjA7_kB>gzBoW!U~Sg0xKYc%oHmigL3I<7j_kpdB~e^ zyIlo@hL#sUtbhz5pJqU;fDEMLg=?@X?<%kv0b>PphrdfHMTSWW61vBquTBg_77IUE zVFhGJo$Ty)71&U-!V0Vi`CPHVDc5>{9N`LMbXG0j!djCcjLQad8U8mzDa!dqpmfIMi% z3dkolHmL14yd+g0RzTSjIu#>UKprfLMlTKxwr91g;Ajn40j4=KtN;(kxv0eJT)M)X zk&@4e5CC6YXu4~wT?M&Hs$vEBF*8<>lfMwL0z5S0p|A(Z1xo#b-)UUT+CPjV*`aoJvSIw?JGE12`ivN zuvZ1CaICO`W3#{t$RIPt3do?E`k}?Vf#8x%$q@4XI^eznGLV)RKdgWZA?GC^RzL>Q z@xnD&m9YYw5q5K50hu2MGFV{+sA_felu0tTI-BTU0YvK*9{h3P|5+->D)) zFdtT6wXfiq16DxfV#W#>8z?k73#{N+C9Hr7!CnJIXXyr&yVWm|-GfhTe0}#kI##ga zNrP+j$b#$OSll)oSs)ongBqkqQnCe*{@em|jNU2#2_2qy$Kd)6EyLxUBun*~T@X)gOqp546w|Dh4k>?yhs(38-s z3jwLIID@|=Rc`}*HKb4};!u;x2NW-dJ@|`P-&sWPxdyy0R2aZ0L_kYJuQ&vx7S?V} zg8hp21$apo=GqPYpNzqWAY@$tG|rkcOrO=C2K(rQizssqGjb8J0lo~zPWr=>CpWJm|sng!9GL| zSy)_ONKfF8=inU?+|Jss+hoSw;S=PHVs0)987#+SCi~X+qM6LCMsw-$$R1P4F20x$ zu^dltFyYVPRdQA`(89P5!r}FP^`>KNuw5`uC!MZ#N^ZwtN{tf4JFu zswl1$wIcjP1Fo;k`|;kOw}DQn$6qZgLDs8rZ1RZ)iHF9D5>zFXw>F?xSt_)r>E!)P zOInPQKW1BTFI=bpUlDB9Edr(>udPs$G(&vcQ`x}3Xw)Zwfc;c$f}_EtN}u*u5p2|D zHRUd@Hr~rd)37rd)FhxgNQzrQtqK9lz+r42>kqffX~0T5XpE>{B6rOxX6|=+@K$@|v**r%J-oG}e{2h(G+T2CVzX z5Pa#SYsJd%Tkut4;j-33KqqHc@8@UXlZwrT)$ycn3oC9mLdLjK0PAq*M6iJ?YCuc$ z>EA@~N-NXO2J5U`xJ5tW#16DWKp^6y0v?zmhn0k6Cwmp@WRIiYfmSf5XnSw%I3Tlh zqXhhPy z5-JJItb*T-_zhu7L!cE3Dbcf#RadFa_f(aZR)4QLM>ENzK2;`K#o$)&&05OqNN%&A z`6;a1r0P>RRZ>{M9%KBfurW(HLjhN`OGQ=IIjDJ8kBep}`{%YxX`B6X@!tD--NCK> zbC!!_S-$1p*fB_v#$VOM;nNX+jh2$&?ABm>a^x3nLvw~BQSjO>(voy)xA=(^oNzak zS%}uGuTA(HcxVza$tC8-afi!9CtOLzNd!vFw%_%hE?uG{HZN z_`AiO?314q#s4JZSzl@d-z`_YU_r{T?G9&zxD5Tll-8&OE(f?IJ zTvSt|oWOS@I3CV5@@Rol1jUB}>NI7Zd!-FWTS>2q%P2GVKisH;`*y*79kLB9gZBju zj`U1~+2n~DT*-a@eKzeHNOnIFo+bh|-qVCrp~X>dl$ka;Xw`?U z^pSv)5fxf^=qBxuABG3xlVLh!QOnAh83cf$qp2DP4$ugi9P-eT%tlwIsjeP{hvNt9 z(Uk_w_}K^^^7g81(GSBJc?=+L;NqoN_cwYz9U^a(rf2mX6#f8Ck}2rNnL`a+7plk z^=VI0+*pj@!yzzdD!}9Exd`!|?2G+t(#71X?EFWn=T2~R0#5178P5*mgsedYz>(-g zGudjq*ZRGF_;?7_qqp3-nEg1K7lb7X8DUl^=(MEW zbA2lmbX=@Yf0y71qPo#xVS<=P6W$gAaiUW~-{7|@0>=}}jl@1ja{P8sL7*Um*CqT> zBv!Zq)+8u7sUHx-h*XCYT@~q3q>qLuBj_CSil-e$cY=x}&HnkzCZPo#0OF;ZlR760@hUdB zo7^)9$V+FLyC)JTZPo({HB(gpke5z3b5S+WQrIT%lR-dUItdXI3E zB9Qk(AwXVAR|Osh1$pUcL^1(@LrPOrXkkXRfV_0voEQ5pB?#(EhgT^M1%qN6n?JNe z$D^?(1nNtvs+Cs@>PyF^+$b@6ba>xm{?L(l%BQ5mjyh6R z7Ba!#?Q`S5s!_D`zL$!TR(70nG7}@MF2B#9!)$9b&9r4hYESAk|N4k?7rt2cy?I0Vwj5t2(DAM54ZoV#&E$P zJhe#yWy{V=GO8=$Nvp#E=+8GB;J*z~Z{I8yRGauQGUUHMU%?8&iEKkP4IuoAG5n9H zzIu)BK(Ak+1$(vvf%i4wY+cSFw(L28_GoQ|Yr56_jkQtF^8);76SnIWnRX{-ZO)YQ zm-K6splA@*X{a#lG2P;K7EZ^TVYc`wq6<;@e`&&a)(Ypqx!MYl)L~|*Zd!~CsTMck zRQ$$TwTO7Mwlnz23Z=Xmfy1a*4#(YrHipIfnIDpwN0Gvf6xSGOF4}^RG~kW4USKO4 zF(6dV5VixcVt;eZ@vR)a;m?}(S=N>%6>!CX`ZG7Mq-!nPYHSv#NE9}_3#q_9kbJ5M zZ>q;z%I4*kD{K|Oy3Q=s>yslKc+=hF2cK=o?teQui7v6j9FK|{#$o2a_q2#?2))~9Z?E9oCsct433DI2bF zk~`5Ip3VBIMe)PiMciYbH*D4%OE1zXq zkX4$xqpj30lUaz|(9cOErtjz8S>dP>v*;9g&^KL`wNv3>SM#iM@+DX+G$~rS(ndkH*!fIXQ^0|l?un=|j8 zOyF9Pi~M>rVGO3y^X$oF()8}hgxRVrG*nL}So=hMtI%N?sLic3Czzj#V_GCv}<=h^oCleS_IkUMZJKB0S^kgzYR_V#w0eUh^ zkrzg+$G|O{Gw|)nc+rV2mv4LV>&f^bu*rM&WHOQn!T0USc(N+536}!P?(bRH%NSrX zAT8`egN^*Sm@R7z)Vn9+M^~vWTu;W2mn+e?C*z5Vj#z|^&^;MHE@7tjo(zXv_ns^d z*Rs!^7mLlib=QlB$3L*a4Y)_M729oor*ADerG3bA@1jhQOi%VQ!w$^=9hs%TO{I>c>*9aKH*ww_880}|;evEz{18DpG8xGL z9T`tnHTh zCwn@5hu6vA?>6kO;FC=!FSf8Xh=!kMp!5E4X!2R)Y-xrT0vBcVM;k|w@{PCux3L_jCSW9%6jpEKw#XSa( z#1DBZj7}eVt@0COdOnvsQgP)tu)bF_m(>j3;Fy=10H0#sG7rB6H_EKzs$o9fWP+x z6r~!4Uun7?KZZ*gc9tsDh00~@Sh++V^2%i_K~aa)QkFxfGImt_y=^LM!awtSZ;!J9 zmAbf0=JVPvH(Jj1N-_(T1#W4jjFApV!wGZ>7NTOS z+Uw-(q9d#nmomezMzEw+Woz6+CubH{73syG8m9=8p6qGdr9E4BdGhHW@^+Ve%AC7s z&33n(N#ia=>~`*we_G`(gxIssUC7Ms+$H~XFte4@bRTHblwyxHAq z-HEeL|1BsJp?{+Z$v`*K4Ag?d@im4F#1dZSE@O}4MO2yN>U=F28 z1g~NF!LgV9je$eG_gzks2CNp-WbSB=ObK3=+T2A4-)O+CEOV9FZ3&L*Cp_8Ju9^#ThT ze;UDSS&k~}6?H$#8rLA;umK^xW|}W^Wf2R$-GJLY85RSFuK;CN*0NJ{u(As)c>MRs zGgKo-62gbC%#F#4wju5T27W&R$^GLd)uvr(OJ8wtX;*iw;q|mMISkjtudpgjvaa|A zum^Y9m#rUMjN_=qbBYeCw;;4FH1O`$PN_<96u9!4aF10Mf zwRiI^hfafk?9^^Bd_5Zi!Jjxa84QzRC~zoy8EP&~2J?8i8of}$!8e@R3zjvZKJb&X zzuW7AO1d9LvaHI6LrKYp0)ZBdXk{2@r#tCP(R~7=F=;ejO-q0kXvK$ZN&q?(k54qS zGN7@vxQ4Ka^dB26ni`H9@P$Zr!-6lz*#bZiKCdL@&VhA6<<8u)1b$3Nb5XcMW3 zRh8cS?ft<(wodFTG{H;~$-(lzL*RuOT0whR;AH^);b!lt&h&h`U~#0Wsoc}!L>y}{z?IW)CA0ZFowmzipm zfyzs{#^j@0K-YCvM<4|zg|y`>^&)ZkaAtHdga%5EG_DjjkQ_>Y`URcB{xFhl>GbFM z!boC2(S+kHFIG)GZ_6@`#yE}5!dQ}rYM6g(cT?Gajo@Z)uDE1YNBUXziZx<}guiIO z>%5s3)0w{*lREsP@@h=YDeb$F13F#`142^sm;H>=+A}Ut4dR~mq2-iDx)TQ!IVT(h ztof=7Si^G01rb{uHP8YN5rh~i(27sKk0}Ru;p;AN4a=q&Hmbu2EHwwXhJCzjO@$wL z*ps}9)b7(m@2H5kU%IR9_2b`9fH+@IDE~qVWweESOYft zDc|pE#2z+)z_%RRWQI{OKsYsQ3_Z6tnITZNDn(6U5VNvHc8@fG+C--YnPpE16Tb4U zJy5t|yY!Sd`MPh&#+VGX%h)cNh64Y@jTZ@!E)515K(fIbLDw91t=<*vhV7D}6};t5 zvBMI8fv{*MF4(SVhyV>S8De=>*e)3`(}%i}wEz+kc+PFj_cF{!905tv(!B}bC1rWJYtXeLw#zf=pSn1DMe6dBhlz7HY?qsyxJBSXyv+hrb?gbTV~#zWrnVz(Of_bD%3n{nUMulnPQ9HM+^}7aH!eI_voqs3;)v~XjEVum0c+&wxwXkG_*q&p zxB#$S%%UT<%gIOF#P2Kb+5?3fwo7kStRLGYL+vuQOQxZ~KmD;?GJscG?1t@zwh01Yu2VrJMb8L*7)%FtB4+%!O8veicy=#5OV^oFV7*GO7478h*SJjm{*GB<2j z;7Z+hSwzkd3zcL(rM$H)ORbLkcF7>b6oqu=fG-LZU9eq&#D$;I+^}7dO(CiI%iffb z!lXTL!*-9>h3#U(yNK;FoT-C|1`(4LzAo@oE(jwC?1=3$Y>Lp01LDX~bHR3*$0gxH zx4XeZvhu=Oqgh96mt{@}5`K~{0)U4Ubo{#Q+hzEb)qbSD^_a67vgKP;9dB;fF2@@e z9s)tOcz|-z;J)K? zH4PDoXTr%?t3ng&HJ9A0UA4hCgU|Q_K z9PmYfq6@Ywkht(unj5w&vMD4rf7#C{t+`>l$Lhj%G2varb{WpNAQA|?$@wcq6?gE) z?Mw!SO)+dZAdU<*7i^b#ykfX;K?rjd$m?Gn9_V%^1IwHcB>W_;IS_KK2p;Yda@@De z@GGs3X0dNKSFsudcHFni@%HQp4{q2l$EX+}oZ4gtnssZF83K|ZQQ4a%xNH!oal>|b zc?)5}SKhS;3KSSmRyk3OyChC~=q~Ao*XSX;*TJ#4Z8&6CLZ1g~@l8PKSznRtBw9Tb z5W!zoJaAM=)a;A3O{Kew)MS&|?D2w(MDJ{b$qC2-g;*3w zAG3@`=vuQAZ0s=|^~+fit#ZpXE~$mMLP6cHM&9?@O4_HqTmp2hS*7dh;?rMIa(}02fglih;EMW zjL|LcqyQ;j<^#|C{Hu9KS6$}Qg%_0rc{bFk-n6@-9^&-r)0VfO>xXEohp@kBeFI*i z;G(xZ=?_Lj%^;t?rfnyxkM4oH6{GBIZz6qk*uU}}E_(@f>*nq4!TD6i@jm)(@^ZU} zlj>^}?3T@>I(ErE-dRbO*m>eoICX0`*V-5jr{mF}v)SA3p$nXb8|OPK z%P|~CaTbvo zFQ-hIxY9=xpGzxU?4HSbDwAA!_L;5ZZW(*CT}OJjd2`cfw>leizJ*if4C*wv2A|vm z_j*|@DgwxAhRkZOW{FFzmOkzfHR-e2!b0M({iK*X$`sG3z`M!H9fu)n3r2Fa488R2 z5|`~}MEDg0xZ<{&Irx{pwFBZ?9ez!>!~)-ez>k3XkO*eejXTeg9i785Q}7t6kC)Rp)^fQq;{0z+oM#)_Cc z?h=T}v(AB-Bk-KEVy}w$sBIwDHFYjQmFPoi*@g%oAP|%HuVsd848-_hmq1L0bqvJJ zp-k4CR~bS ztPZJBYvMb6Z4caE#$xPNHMmVsI-vM4I|2_BOK2pwvLCOs+l)*iHR;E8xY}WE{6wCy2ljoFy zzll|V{lW-m;I@c{&cc0npULc79mFH}xqXn7S|A#|X;t5m6+GgJy9|voo5TW7lO&k|XM07H=bMHvo+f64cJ#B_AV>J9sI#OT$PbLj_96j zx2C<}WHjy!yIZ{pdG+G$^JIj-YOeof6OA#C&igp9UIlepuZF~~zl@EOJfc-}$?K*y zioCHjelG86*NuyJq*XOj#2u)aBJmb8Mcgl%DUyQ2Oc8f?W{SiTd6QD(LI>an;YF-f zp*w!Id!6oZvojg=Hww$Th8Wh;`b()<&8C6W-b@kOyqO}k`2-d= zr{}kk(Ow1z+Xo`%Klq#Q{LIkGZxF?cccM5o?N85hW_RFm#xO}Oy%!y+w9wfYbSIPe zayS%UnjOlNeGnfYs|wyAG;5|r8F8V;`h=eXy4~KByo|Si)fGjjZ&;tzipP7!7?7!Q?6xVWVO8QLBA4w#ch0Z#RaKf~SAPUw6c{^7 z88cNLc~xFw?Wk524qH?5fhyZY_BJp4M}fg()(n!;O4Qvk#Z1)hB9nY!01hI_rv`s4 zF!w6ToP9kgviGWDW-i;~NRCwqN$lN*P%U^wT|=A$&dpY-wZMX?D7ULB3{@x3pNj$wA!U5@jcs@;@ID zf3YGw(H4F_D*Sv*`1w`B&({e*UoZT8gYff>@UF{YzCY|wb9Rn$ODsGJBiT(fne@*L zlULb3M3#N=W3SXaR-l~ukyOdG#!vIqji1kJ0Yd+Xi9Y&#)e*=(9L%qI1QPhkoXkAn zfJ{Db9)W~2@`QN=92>~z%_9(}Ay0fSCs({xfZ0`I`WV@fkvrJ;OQ@-_a*4&Ft(RoT z*9V}5j3S?z2Q^|=%-rsFpBqE1nB6Uv=&|0%OzJitBE?)>$S_P1f#2aU8-|S6e9*&q z*^rTr!+cmW#eCcdF{}{@-C12iXci2iVonY^%WHeQ8k;+b3u`2ZB2gFfaU;aA<|Csb zOmlN`kr9dCo_55L@sm-JAD9`r*bzsln3Ic)NCskN_S=dS;SXKw!fpZ~o@_>n8)FJ1ww&_pL<#S0KU zwG^F5zn({56ZG{h=w347P^;brV-Dfejx!Q3z4fzb~Z+#iPhXcFDd>Ct$rJB=46 z(=j?&Gg&|t{OcaLW%VE&z~7<&zvXogpz-(T2H>mtsWOHXDeLgjERJ{);+aq)^WS3h19RG(?%paat2y;K|Ywt~k*<6g_ zHITj;Q({gZxejHaE0zDX(7d~MhCH4IyfpBkX%JssJpj;N%tk}e4v^KP+Y8|q*njV! zyU{y48f^B)W!vFRW$6lX8Z$nWIhjwFxsW?uav$2sG1|I+*iSt$6>%00Qh`a#aemV4 zZN|<1aGEr(np?UIeH#6uUHsZujp%%-;li@9T93xAxBw z0i<6oB0rWNJd!?-;}}t&!Tb~0qd3bQL9q^r1hcS>B5D7r ztM;N~ae&=gg?@7dLR5rpF<0$P4#8ghxk)=;B}6futQW$dKbc;gy4fqPj*djBGzO6G zZgCk6#lmW%y%vqu8}v4&@g=YiZF<=3Py3_cRgh{Rb&>RIFSP@etKAEGj;Bm>eu)e; z5C4m4WKQ{Gqs5DW{#qjOJK)f96wk)J&0G7YPxr>?_|~4%FjO9rIu)uWj_;gOJ}O2W z92aw&`mrc|ICBLOZQ>WeAv7rzI*^1qC_twBo|fiM^f#a4rE?|hd!pCd?xc~F2_5Dl z;3~+dNn~;u{iQoROL(CHqDUsI0E_K~lm`?ZR$(#yc$OV=S4vaTCg)R%XE)MG3^;wp z&xd_z=k>IY&elb_;ROAj=bO(5#iu0vOaM-H$Nlc90ovk%VoX-Q;H;L`O9%NlC1gAl z7E@E5cKgFLpX9|tBc5A&0a|N6n_5n`-I4th@~q{Bm1|ep*B)uV;2i$u@OYzr_~_v? z-Rm}QIDNX?J91W4M%sUq+`UF&kvr-8Rwk_mwsfpaO68P`rQfugQNA+FIFM zei30w_E*?}m>sj5*_i*Zk#^Wf9Cpw>)f)uh)->$z3s+X~m@GzW@_vLpAs=OGl%zU4 zI7{LCzR8YO?oOy?ww<-HwAe(w#v1y!?687BmYSI>Wty6sC>rqb2ySDQ=*HDnUvohE zFJ1prPY2>9Qf?p)#7FljqMP!W_=u02YUkqv4Yi0)HC$n=Lq%#n|nRa-Yd8ANa> zwc@uqLz`|05hAS`Pt2$1DY=JS`1J;Ghjcr6R-8gfA-F~=2|^~2{TZAYJ{y5)89q;K zrptibHCVbMY+1NH5yu@Swd$^Q#hbU@+W^gWLau1Q$h`Hggj^i_!SGHtr`T>_P+91` z4>>>ZTM=k>nQ{^nZvV10pdj(oARPE;1lO6;c(!oGmWppdH+;~yoj5boZ2ef4+|;#} z-+}D9GZX%8#9tsJv#WIaCTDb=K-m=uED|rQrf+?%@wI|RKJe2Gvq5F55FF5MU#z2+ z3nO$EQIOrUABo@O zxW9F?zgc$KYw+(Q_&$>o@}+~AtYH}6Fv^dTOzhQLsOw5$WdpBc*un;D@OF#vks(7F zK9txpUrf+iw9iERjoOkxhSng1wC!ojp0({RyBo3 z{)`*@FlK~W`!Ix%J*pd-kV5=I1jcRXBn6s!3LkBj`wF4O$4qm*mq9}C;Rdv<4MgZM ztc8+OiBoWMTpE{5fk9bT0p_2FG}#ER`G_x2~1E%A%tR*L6HKw-<(Bl zt1s|-Aq8X(Y+Uz83JBGfhO>H*0x~koj~`M%C~A4B1t}mS+wo$96cCC!Ui^>(n-TDG zKdOpptIl(1DD;+t!3JcqR!9N)uugXNNP!KtEgPi3iV*6|yxLbXhzf78LJ9~GodE5T z0vl?aDVv*I8+E4M-lEM{zp6j)GdrJDeZNP!ihVD@Mdq770&X3VuAReJWc z$YQr#rhSG>|4Sxf2P;@11%#y7k{F9Q@}LUC{Np?_{cY1k2WF&lr5o8 zv6kG7!h{V{kiTll7GvDzfWSxr9*)oHnqCxakOEATb5WNDFrQjDSU}?YV1pFoDiHuY zV-t)YGb05#`I5#9qx6uT9G=r?SmMc6NI}kp;DMGx3c{*}DflA`q<~yX9_R038Q0lS z(`knlaPcV>aIu%5+I|84S$-da6;?oRtuA&GSOiI1tbnnBLZh?53fh?>V!Wroh!s#F z6q5{!70~ytS(lzMRzT*!Ms|O!fKb)c53PEz0x~koj~`Y*C~A2rumY~gR4wI}-yCq> zdO)TJHdq1qu#Ok5!HR_!1B|;0Y(}71LG~VS+&D7BT?J%*seMqW!U`)OPwHf6j}_QZ zv%(6j2>JGAWDqZ`fDq9M&>kzWp~jiAxy`j9>5UbTL5$0VHoFQ4F^xzqu>uQ9c36QG zA!LuHM$rq9A(H?btblx&Yecv(ZklT#tbj};AXY#~R>lg*gJ!INd{SeBl3t-Gsa(2# zGNu~{VTBctAzDJGVl0EngMm0S*q+s{g8axKw#?+P0?ybq?UC+90+-$s&&vucz#rvY zwAO~-x6(OrcKKh&6#6;_aQA$Xt>E5MHx z{^!A$jsqL4;Apx9H9Huro3U$1q*79xu`p2D~;PkjbWr*0{JtKCG!9 zYQhT0$luQ9c36QG zpZEM20y1N+5n)oT8OaQU6_AMp#0m(>%2)w;(2Nz3PikyX+o8B%1!PP`k7BY4 zn|%djh{{+2d9WxNy*M=SVzaLxKYEufGdZjv@V){(9ACBQnl77t1(+u1qRy`fq~<8@ z19Isnb3R4P!U`+MRZ?|d0e;Mk73Abg8ZV5}gW;L&E6BO1iWT6;g0QMHTN|w4SY@mL zx2R^UfP|Ud#|n-)U#wi$4Bn*GxJJO19!Jbl4&r2`Srgm3bnFrSkZ z;sSI!+{s|N;(9O)-K8ST-)QX zc(Gv-4?IjyLx`%y(sD-ypK8FdYUU#}Lz=Ds5*a1IY*mX^_)A~jJ(+9Fu2mS5cLsG+7y&wY zO!HbbXSXRcoM&^jtY#1ye&NetDJBT$N|YY9Y-@p;ngnmIB9+9?9<&bZD=4}9YP6M} z^LM0zt%-nR6~Ps5>&vE&p>NpdFX8+J<2Dc6bSJik#va4K?ULYMk4O!|@wJNxP`19- zQhlYf7;J-X{*pyyGb!}#hloV*8^X!8NL>A4Ypd5Cb|(0OlTL3u9*tY&>rToypd=Lq zmT(tJ;F}!^)+}+92;LU)M{z58KzX1yMwf7rd$H21Jz7up`e)8gO@zNQg2@fd<*P~;d7vALR@2v650Q-@i|Y$$;|KZ`yd#3!S^IUH3__Wn zAa^9hWRQL9d(j|ss}XmGe^a?ijE+b4m`ZlB*o27XczTgf!kxpbfBZuJ=J# zoN11nX{6_ya#sW>$CKJuK7G9)8*HVa7UW}azq?8&)9$pt(V1*?2ie=ZUD+|>iU!r@|2987~MV?b*C+IeMWLpa^_~ipr-#4W zX(+wrD!u9|vHLhuXi`C3RLrUXmO_T<6(yE4O?;w3;-RrZPAaLqwE?}#QlULfC+}xk(proBG24oJ;X3{QieR&D5ikXLZH1C^ z$Mi26^$8$gKUJIH=o_O|`n11_V52UpDR*(T@m@9>%kB_03FwySL4V_X2i+?_X;~El zmVralq~YeVE^)xSn{ZzMu4u|61&Mr`rD-pbU zm1`J)II#om5DHI9A<+N?1}+k0!r0cH8e7GL^xTMNs% ziTCE;B;2~DjWb#zhE!}+mV5~1ca@wgh~Q4tKfGdenXhj z5NL%$O7tvb)m3U+h9%k3>hD$OXeN17W?Z&7BwEGbR`1PP%IipOv!D4XtlOmOQ$nh= z#~8mVY|K*5P{0-KQW3@yD+;c9Tr@k`Ket^<+w7l<_ukj*4sPwAvs@(0@-6?yjzNm% z`F=X$uhCKxe0HqCe9<;EXE;(!z-zlmOVX*`;wMsY!rf42AzD@XqPAV&>zto-j^stGbmh`E;Jqh;m+A()y+0V_A?14Spu#-^#)2EJR=CL9+28GrLauP^;unQYqRNhwqVsK<={>qHr|_G3qlpS(cXj zTnkAW^$NexfD<9d#&SY!kXtCJNe+oo12m$jL8|UN2JJHu^e|lbq zDqSXPg2`hNr-iXSL#`uB~Xm>s17zrP8m!y9l}Tgdqfebo|NlA1N9 z0=($z-x;dO$Ka9pp_(*_JnSDtaF4gnQ00YFEW4$~qZ;~$5r4P1lil)@qWGU>MJAa& ze4zoIaI#t_o3Zc6i^-eEXD0EL(qAW`b(C-yUI&pc2}8~0c4@cvOW`G}m&E%Q|C;<4 z2|uREFW{8&Hm~USB%3PSqq-`KRO=K?F#tZ%fV;w(8|q~-avWaFN!?zd$~cy&^e+dt zExd;aVF}qcNmwNXcXi^I(eeRUIDzM`+ne@JqL5~HckfKxME_R>aZ#$9q(}H}1joa< zMjkCtilF#VK%J({bFZ}FXe;Se^YV&tsS*SHaH9_H+XeS^$TqMH-WN1D(lZfelP7AB zZ(C(8)cwa?Vrq4T%0wwN?RyXJ5A1(gOvcd%XOz%tszqVG8j$Q!tLDb{_T&!!;x($2e3_?m$pMVRWw`){_phS!~Gxd@M zHuv|oKG1-BLmJ_U{-B30dqn@A_=g6DU1Bu;nSmjST?DP=oj{e`w2u}bCfU2A#-8RE zBX}T))KXZdC3Rf+9TDmKxY?19oW?cp_NMpjg^?LM)xwE`8&6V~U{wyQKL036588qc zhrpauqss(%JUtg7-jjW?e@(iWdzGF4NcDGzvr@*h!y8lp9ElgP{mc7m_lJ*%P(6Cf zU7~usE#BpmF*L5OG!h7sdw4#I6$?$5 zgtvu2oamI$H~6iJ!12U#BeBns9KRh@5GcstbqRkIi4|^uH3>>i>IcLyBGutUS7it8 zs!z}OXoxa`&M~ieiZUXgIFX71WD`2o%)RZi2`wGDt{`17^oPmloYHU5n-%Db<=ZT! zJdy-<nc&X;3&WRvi#p^Z(ucT6kARsTDW$vCxptM;J zDAY_<0YF|l-ONQ%!j$`-5`PH-^3q9&m|!@NmkvRC-~b@6NE3m)9|{5TQo1VeFeu1N zMZjrK(n5EvPRY zmvW64=|I!JJ1wB2iQ=N#v!q88>| z1!8!A6kwBCsXAe}L1veOCE7zstIYXtH({$T4M_95stK*v4g&NPdc_cnaoa-ojt2Oz z$SEd>&(0*TP;dB;L;YJ79px`)tg+iS)hEM!z5$QaWsqB>bN3g5R@jOEYK~dRw7|1& zGX7PKqNVq}RE)H;N>%e;cCSzF91&Ht}O*$bWynf)xW!WF5i(M^s ztT{*fn9tVb9Ae9!189%dR=B2H-QQRn^*k@YpEhB;UXf{cQr6~7Ie$sNHVKLbQP}I4 z?%X>Ir{m2qJNFbpBj?i|j{7f77|&Ya95`270g^h*Zm4@m)x8O);y2c+Zp5R#s(-RV zDQ`yLFzS`Vad)7NVex+Ehh*ka1YsKcH@nkr=k#d2)t$x*C?db^fm<|purN1ae53(y zwDkg8(TD+|a)z)Sh!y*rYmRT_=na3?w9m4(EUADi2GpOqfhApQ*;Zq-I7Q-Fk~2Ql zgg4dWEoJj^%N4c?U|naH>h;MH4!r4tAbhqVyZp)b%;Lh7bMT2^mo&h8C{pQ?a`Sqf2+mt~iY1#p7o!4O$PW(4rbCY)iNQ+LutCE1jt zFq3yBcg2MelYMJji4|FHJxdGp$KlcV;YtO%<=|Lfwx#(Re7p($8L5&>sMc>URN}j{ zQUR&3q%3-E5mnwq)%iwvJbtu3b)r36rf~I-E4-h?GmehPn)EvO##)@@PIQN7v%YGP zd}kf*XB8F-C0Jr|Yw(>42M@|6#8m=J0mt{B)^GaA2T4WFtD)eva-+Ehf8V+FCU(Jb zEjmNqT0Y>IYNTT@l1<*=Nw_S{bpM{@S{<8ABAu01h_0*;`&9MertBiI;ELnYl=Z=q7IJ3ox?TmUUw&FEl-sAu?Z9L4BLS4l zSrx-=dPa$-50uML+KJ4)66) zz%ncL84w-CYJHTIE!X9in@fO_E3m3$+%mVQMVpQ4e2&dE3y-*xp)p&7pZ3gsIOOsj zL0q9R%R{#Hwo^0fDt9j;_RM;+qb*g<5wItdX`tYB7kV;*Yeg>d>&b*Mm}<_mCzDCj zyC)N7tFq8gJ(-LL&z?-m0h5tzU8*q5(5XwtH2C&pQnn2#$*U(524wBpuO|}-i%wbS zC3N2<49sw(S$tdb?#Yg}%DFvEPbM&=a%OW+cC__u=*eV)tkRRU1N3B;A}@^i!sy9( z(TOgXW0QV889xLzdC#6qMiRYO?Aw#^WK~`hE(Mm|-?OlnF~DR%im8N8Y-Ltv*OSS( zHY7l=o{S$a=iIj^0YrmdMAS^m%5immcWWvA=WfgzPMn|rc0*OPH= zM7DXc&*M)syk#kv3*Vstetd z$(gCWC&MAvy(h~#x~sFqXZT_00ozvM=~_=*0eR!~P0B*>v(^3tNL|_<07p-{(~?rA(81 zyvUZ{SAt({+8-ikDT{I>V-iqcJ4hb(5{Oolg^hLT%S8{s{qb?%my0B^#2!3(uip!Q zylJVz2)Ajzmgq1W#hsywdkh|lAM#clXD_xr*T+0FPq_f0I}@#s3v_l}{z`NinZUae zn|#B8yRCoZ_u7G^X=Qq?OWEu;U+CKR{8cQTUan>+xf_lwoJ7QeCKA#*URs z4}0T`AiCohje)L+yU>z~x%rdEC6&-D=&5vrqpmC=;Q7qY24C+>Po{yogF@ zTt?5MOV#I=Z={*11&8Bn44H@>m(LjL6RmLm{AL8NVfn$am;H@_L(cn3N#k;oG+?#h zkhv}1s;dxFo4ah_8x6RXWv(*2Ex}R!geSWimYtqK_GRl@mgg^n6-a!w@GTE%e>;M8 zmYwR-7UF@Qv_tS@EJ*(-0?F;$tP~7N?W~m5dVvLvKaJqEEJu~~inr01!T}BsI~2bwD-I+%m3*Lh+|Yj(@!A&?ZtDsw%zt+xvroY@OIwXo8s} zl7r=ahrkOn6s)`qpg-K~J=K|>PuCodH2sWDU3I4yQLMqAyR@{tKDg9miE(9v%j#4w z3oNBK0fxgYf5)Y%}4RD-yueP}tQk?uqT)i|umEW(bt6>cR;Us7-WgkXiPGFySlj5WzqfY#gy&ddi!8 z-S5TjBD(32Sr1cFBye2?{$M9fBxF&n0Pb1SCnz ztAiuN6}uY@B9s)eDqv*e)4E$(=j4Ya}fiiwm}E9t5W{gFh5r7iJlgObgtw zUGjLYm1kQrbHjE8ro|r20bdj-x?sBk2}w%Rz1-#%!sT#{!H~fX+ZEXqlA6ElO*LQP zfIc#F#CF@efbHTj`OvO|;@q%ZhBGdRaJ8vsSP8!3@}^TK=hg`PfE%{UuqlQO2gH$~ z=7Q}qk5>#AE|6L-S$VL~XcpYCU6wf^Ncc&*2ml^Z(DCbr?K1qzYClrn28L{NrP_L_ zBeu)&_Us4`ZrCozs2Cue+GGa1bZe6t0+Jv>+hh-4s2mYw4YC`y%gb8`6Tb4+;j$ru zh$FU3Z&iHuV7p|>j@T}F9K#@`Wv|+>T{13TJB7t|$ruXW+_7B);ZjTNuy5Bi#I7{N zWKxdUE_poD2bZc!C8ccZEN?1?7D`t0x-=zZ&ftdalE<+L`eVBsJ(r}#5zr+qA7Sk_ zw#zf=pSn1DMSAiQk->f)v0ZMavRYS+O{Dg%0ox^WVg0raY?q9a`IwaFy0Bd`GRt9T zY?q9oBEkaJrq#KjH1 zj@T~4rWiIH5J!fZ3%1KVE(sU9r63Q^_9OLe zU~CtUY9Acj_U&@KJv+jK8@9_aDh3D#tdXPV)+Xz*-OAo9!DWk#>mF9pHql|>y`f z3_gz7F2kl6HXIN~hMEhu%RDX#7jm!GXg)Y>gXian?Xt`XLBdbcMS$=yCyf{#v0a8= zS?wpOZ)bWYL#|6VY?tGW3lAPD5Iq|8zxnl1`El7kp~mq{298lNKsYsQ3^eQ3CNl&i zK|)tkc<82b>~X|)d3g(A!dKq42MY8;!^tWqigCBWX%F2c(SqCQgs%WfU-gdiLul9N zA-mVXvAAtGWLGjg?_&CDJ%Ycih~G;F^n%VqLoYRO#9dHTz<1Q|ay^HQA&#dz1u%kgnzwoZy{} zFgXD^pb(1!>BF#q76lX`pli)eu(8K<)Gud6w8|~lxTF@6?1~|Ekndfn%%X#?DT@GI zYgXyHIysMA?D3s@De?$yRK!Oeb>l0muHd^qxwcTMQ1b+kVN#2&DP9IK2u)ZlES$TNq;aJ`VR8xYufP9J#e>Tl%4HOq>m2!SH8n#Z@F&WyuCd* zpUODiN54&8Zuf9feT{8WLJQ{R1 zd)qy9`_pjad}pN`TIY%g;sAr^x>%0kKw8@kdy^@;nEhPbxOz_0QQ_;--@6B1&vJ!z zm&x4Hd~p_$884?ynYhwN6Q4^fUF@F8dMcA#dG?vD6R5H;zu*}_8Nu>GW%JIWN#sldC*${mLx zYYRqlwG6#p8+D2BD@Iy;+;M5`a}NHcZ|#8iR)=3x?T|n05JTy($OhfK;JF!udwwQt z8-}S3JJbrf>ZBH2EtCfc#KozaPJx&_?qY)ES?55^5x5wLd#$XQ>9lPFv8IgIWcF*Uh{@wFftWn&9Edpr&nYYRs)&!;24bBNy98CD52+a&B7A^AOy0ki8L}}D zL~vgRkv@`^#92-Kqw+2}%bPA7)43fno`bHHGabtv)1Rmkr%$O2P7A=I~Y0!WM#4X8dkm-r5<&O|;T(wScSNEpXy-#Tdrr zZ%)9x|0E~FP$iL{_+@gJGhMvbM40A1p5#jN^CU>`(ZjM*ZUXhNN~ zO7>+;z7x~SYvE=J`(+9`IfJD#+v@5l-s@x)yQ$;GJb zqL|LbsGMzZRRz;v(K;@)^}eb3ftvo~O0@z|>h=kN#fa8*X~IHG&9-J14> zlhL>{>~8fYl2AIUGL^q-uK#5dO=cb)EplMJ3Ou!54YFN-85>Y}=&NX_ z*G-EVc?W9zT;BYy8&~;APiv-#TU9ef;#OvgxP&oNB>ke9B5niC6p6{g55kLBtD2nc zZudIf;bvzt=x;b!)-}YimeyZN&1!C4`ak&kJnUyij~B3QCyehTN~2G-6Hls`a#GEd z`4?+crfmJXk;&w9Q%iSnZ%dFq4?75 z)TQi$_yAc&tqNb8hZklayAQoiG#V@7_tGV3-{(2xcB{ACd;jW@i&1*CO?JZm?;WN2 z3PHUDd;oqN^=g8lDMGqvY*hlc{d_(b!9jjL@jmJUoWtq-BQfzLGIJ{&?2LA%+dI=v z#{ML3!;#g-RZ*3H+JN%qC{JAiFj*<>2%;&sw9Fwb^+1zqTZ&RN_L59@m_??`ub3_` z<1Juyg~Zcn*?ZAe0UA_ZNR9xRD#vb{f*MwpYa(*lR!mJTa><^2#sHk(@%y5{*ip)u zsq)B+?GkH8wU+9z)g&KCAu+Ww@oTsLQDE?xHG`zI5_NYhG=up-3I;VM0e>to_bSSq zeLX0$_bM%WF8h>8epWE3)?oHJABY;fjxuR7QBul^EMBK&(ZNLXffNi@(ctxzIim(? z{VlS0eGx)k)J5njxG|_pBHEG$Z=g&XHJG;!B8xW^EV`%*Q&ljiQJ0K4(Fbp&%o#OE z`yG+J8zlo39J?r$45i@Npj7xzq*Np6{s`sLsKUHgJ0vFhBLzn;I>clZ{8ZE-y7m;8 zTZFx0(ZZ-i(qbkh2ANXEMP-Drf<=SMBo+jpj{sZR7!D(=07=z8(bod zujr4l{X`atf=e*ot;w8%$ zEGKV4C*Vyo2GG@M%ZA61#bQw-CQN7i(a(|?Xa7LX%x$)_;SLv=_k``=KL_SNHF{+3@V5=hf0BduOD#9t)Mnxo9+rs)#-iKt&g;#NK#_l1yGZU91w(cHMHt7pzKzpOwCd$gI$zM8)mZQ!e2dK9x_I ztGCOv`e-e>-yI;>61~&2dPFn5d}UBQJz?okF+K7r{q)GE#OeLN4d5zu%#gZA;Bd|K zpb#|uTiK6SCO{m;lSUjw+sCo4ze z>1G!7y2@rQ5+ULWKq(iP%5#bSwcPs_G8NQE_%aj*3h~ zt`0~=h0bF5+r5K%eADwc|StYWFiMC2B$7^<#y6-R=I$;koP7~|uBES+$oqWHCYtl~&` zh>EAdEVUa+1c5DSBv%vq2=;0nS(L3SoKctqQ`KMMkAqzjUH%MEA(u7XL?tS zdb)@1o|Tq7j1vb4u@g@0 zZI4e+`;&9M*d$SY z9@D~qiMhWM1L~s58HQ+2D3qLICx8c(w_82TjTBS z?r3mkJlSqzaqhWgJetm0H^5SNbJ&|s<2D?LuO5y!*VgTZXuSyboa+sD`|)!(;^k(% zh}6|sS#BaLSx5g^$u?U_N!`n^w3a;rpNQbKc0=m8+8&Hzf1a)+JRR78;*w$x z0IV&iVFdkKZviY$`~9uBad@G%7ow9W5H)=q00p#^UMD38A8v%O;_2?`xOrmtbP~cN zdQSx*GZ?8%o=7qBkRp6Ch~Tz&hjH_+-C+{ibVG;`X*GnXS_T7$UunS2-kuvbZ{FGp z!8KAz5HiUAa&PsqGo$c68^O!$gcrl9-ek}_J?x+AREt1|U_Z*flm1qB)Z0dd5K1r} z3HUj%Z*x4Fp+c=c$!J|cY9%J>!TTC;#E!4_&C}`N>?mHsUlU7lB>pzndM+G9wZven zH|yhiFmAj&GO4VE#JM}|cTv&u#9(wbdnACRz`u{+YDDZ1&K&9H{+}#z#$CM`s!QS}{54Va%1M=wW7|Hjr%Ilo z{%^W7MuD=2LgWJftJB%9NYn(x&<~iDv4LD1l?d_P{2O!B%UQQqbL&djk8(k;n>MVy zS-cc4-hIadx7f2y8G5@#7&+M+JsvNn|DV`0U)Yr;kg*QWL~y4;wB6OJ?fa`~h+c(u ze8eV1(?JvseA>LH|JYEiZ{Sg=HZ^o~P%hgDzy#C6d+}9n;-C58qn1s4FkXr_K0Gg$ zjr>O&@cIxI$Mx=FD<96LS%cHne(UOH-b>V6JO6Yej7`&qK8%N;mOcz2wyE-xnhFj3 zOdtW$#y-3$hY@Ak+^ZXNY^)?5wR}Vkzutgb0{Am-@WYr9YVpGm3TBT6Rlw&Xc)6_* zx!yw3GWck-RcafA?j2hWw`gMG?IR6n+wvwh`_@89LfByCh965q*YNu&BkeX#AW2ce z2#tuf|Ap{m7GRFu^C3H!&3T#HmHm~q`-B1SSk+ z6xdMPwm}N42>EtqWDwOR0Jx(3=k^x7kR%{A=)4Xgms|}(XY!_@9fj+OkA(@;T>#{0>Ycvk{F9Q@}LJSYZWZP%b@%-Bmz_koVUCcNLI< zw7mFX1!M?0F9ERvGLViJuEDCjtH5Rij1@?C{d-wB+y1lmY}~W}HduibXXG^jV@qX2 z%?c~9BIJt|kU_k#0zyP*eD+v@4K>b`&26p?NpGxx3}RYzw89DqF^xzqu>uQ9c36QG zp`|9RzN*&P646I*mjm{tq@-LC%HXfyQ<_pJoVFHB5mGR&cB` zR)Aa7#JI13gqaM>v~N(6(^Pryz-C{;F$b)G$i<8mFg8#`%z0o1$0}h3R0zc+ZDAX$ zHxQ0j#R?cCRol-y?JFR&F@OqISOKA>sUO;&Zy=B%g(E3lwshZR^63TBU{uFwW6 zAT#C~5iX1yMhS%#kckAu3JA%{SOIy^j1`biYHU!_D-k@+f}DIA zD+tl&7nbcVMk}l!=b|cBfFBDnOWk%AE3DvnWvl?VsAjBygc*z#P+Y{eK~0^)3>sLy zgK*pdDw@vfKnMODmfPuf{DYsf#1F!9Re%UJab>yd z0G$$dDwwXg9!!K20-HT+j;0L)5eonir){?UCC?%f@Sht2&7Qus5YUs*s|x|Cp}tOW zMgfs<N|@F{-6POgbD)~g$QVA=oN>6)L1;Ce!+f4 z`vSZq3#;26gAQe_k3WB<4oGsELIj^`!0~G4BQ!&r|WpqxN>cH|kE`yqg`P+@DOwllBSx)2)LuXZn-=XtN(5 zD*Y;{D6oXPSOVYhV6RVteZkuz{zz^m54|b6He=R3JsJ15Hha@qdc1V|$^PK%<1>07 zZy6R?Sa|DAe21y%b>N-C(F;x!;qQ((L{3jw-dIYH*hi;XGfPy>*5T55{7bO7hCT-G zh~PHXe%&TB>5ZNsXBBe;D`e1$$%OZ9>_rn^Sc?__9h2-amFxnHDG|$w^d=MjJYFSd zB?B$|<{&JO(Y+}7T`77D3!QJuT@|1lPikNJ^aBX80guA{?kYw0J{xSJi%fdM-lV(v zcz+Y!YO=ez*+&ev7hb3a4x>Zh?KE^^ zcRRhZV>_|?I8ta*L0m;tPz5zqB|p*c?{v2Y+tchYXY|6;$-&lB>Fp+fE;Zmz22&cq zXm`82GwDy!LH6y}qI5cJpRZd}vTVk~_sY#`X@6$t5GB6iJsEpd{eoZKtRhAu`k407 zueNd^mYM7&{O)k@M89r5F{xFl5NR-6Sb#r_fV)o0N^s**f0}XD&JyYz`P0-a2mr~? zilWsH{5L=D<`NHOUS?Y}{&@szl?{_>FJ8QDFdQb{`uC!MZ#g67cHl8@TXl#~}17grna zWus}>84YR@&>bYjEumJ0fMwt?Hjj0Q1AeLr_XXgJrc6@scmWA@ahuHS8fiA4`D;+K z-K|kHM}jNDA2$LkW*)WL1ro4Nh4^t{+k>N9QwPXv#u}U|2}{#hS6c40i{NiJV8cI# z;L9&vD^`Brg0B(_x3m@lIyt*~KR*wjRBSe^jwgLbSaGuvGRBnxScgL=f(=|z1Ez0- zC_yKxe)z2jUTJ08*IDnLi5+N%fI!4Y1w1fC4l46BxS zqu-8JFlT6cZ|yjs55Dv(wicFi6YtHxNw{@Q8)vjc45`?Z>LL9@`pS=;=D!C3*$>$| zNX82RVXG^llEBO=_??K~5T-N)TA`2&Shq>lr*NtyZe0Om{Hm}qOF2UUSF}q-Rn|GEc~_5%#(V_-%62JjYj7^! zdtbjdymfHSa*-^{xBMGB1}U27`{{_kMoUR>erqr?@3CDg`+{v~&Tu3OUfV@ll1}Xw zKaqkH?uIf8(VBz!7W}nxm3em3+Zv$N%qbVJJp${S;QMu>q~mqZPrFC+B38O0X1nn6 zo(Nu|8!TN|$wQo(^~7%8Mmd)Q8~j|vzm$RiJ z^YQYX=<94SJ1;}>+{eq8Y99?M9Z|1^W%cR%01v~1@dKsz0et-)v!jaT@PQ_r32(q< zZ6W6`^i@l6N#>=ovs7b`!o%@{HEFE7s?^y(ir^k^ouSGLr&xANjYl=~lM#QnxRc%T zlcM;a$Fx4*fNnThZII2_x97#=&6Bg!_)6)olh8U!xZ|dR$hXxg?Z|!!ym;-hc>nTW zk^ds$M-sZ4fRbOpDdlZm(eFt%Rk%lWRTin%DXP$K((1!+HsG#s=7xG%j2wp-b5gfg z*sTaCW85zVwk^De2w@4?H%V9}1$TAgV^Qk>ESQHS|Eq$y zsHR3af$v6eBAjdF(E_CiiVp?UY05nJN*j*0l3rC`UKQN`NTUw!I|cW3$hMjcNDx$+ z0=z$HaHMA<%qLIOAm5HkQia)|d=j#ZDApnaItx-n(hKL=(qd|DmC8gZH0>(q2Lk(F z7L#%G!5JmAn(C#CkMkmy6h4Cf8rk+7lc2^rqBFRO-zJXoUj3SBl;xeQyL01d&<_>$IhgD}Q-6eQ~$i%e&~Fy)ZIkr&>61 zaN|kp60FK$)n{}?abr1xkA%RSQ=`iScp^O)A>NaHv42h4ZRbByJ$Hhm6L3mr&Ukis zg9?Bn(TQfVO;_l7r!Dkvy%qZtAykjva+j#yZi{#631h0@C~Upl-> zaVQuR+t~b}9XcM3H6c)6N>#1AT2Nm)F6Bmv(WArr9`%Qg#8W;c9VYjh(5OFqay&r? z32u&e`fcu<{(2$*feJFx()s%fS-aA*8e{t1{=m(}eB1G$76I zswT8vI|$HI=oRDAml7*$!aEw^zapoYAU->jyh6R{KMwV8RdkfUoUzWnsi-_K4nEg_ zhwC!PZPK~>3qdRF#D6u%EM)5L)=kF0tx>e}zL$!TR(70nG8ZGY6&vx=2tI=jv#rrI z)0PdXJ*m_D>m#0C_)=Y@e_EqSQh2Ifr6u#tpWfZ^*FtQCSA@U3;ww7k+H5%mwrbOB z7?|sS)fg@qgr_zsplsP)O~!RaJZW{vZtznAJX#lkZ#KYx8=~I6SuChF@ndAje}BG$ z6$4IW8_MDTMD^8cbO(C<3Z+@zI*FzGm>;jpImDJd2hbj=t#D1Zy1%hD>Umy(KWV~F zy&}`@q^!-Ea{iJ*Z4wj>qOjKq-MRNToQb!>?A%iXjhs(=ANlW1n9N(@95`270g^h* z9Mx?bj|;mPQM=fJ)A1W?wF@K+_~{CzycvPRs8zA#4X?#s21+<6Aj;!?&CES=N>%6>!CX`g1q1q-QPLYHSv# zNKk`pUDP?_Q%!hNJ>F6_FSlG_s{qz@=BZwv(C@&TW>F-9&o*TDKc(A0@hMEKrZr>Z zTjy|A5H9MoF2XhBb4_9Ts_;+*;@QEf2=A=q4p<2JaP>F8@O3qQ8Z=&jaBRm#AQlC1}o-IRAgI}od zeiAQIHeBN*cd|EnJnO5L$#;ouxSv;8ur_d-7U0h+96TtO5LXE>1svahTEFQdA0!ny zuLjCO^+t0Y{-Ja0P3(f>T9LvYa7;DQF&N1vulFQemS(ztPjan}O(v1fR#g!)i7U88 zB=%sh+{m;#E$nTR2Dk^|f%txp07pbu)`ztx!$+F3i^PH}j!RS42TNMWnYHVB6{tp@ z)(hnhsB*jTX#8*h<#JZVaGRca<>>?EGL&{A^KjG z@>!MzS*58v)=nKbnTN;=L)Di)646Qfxp!7Ls>CchMIQ7`S7q%~IM~%Z>zw>5e5k@6 zb~D-fQ=mjH_FLF(8DKg#%Ow>inubr6WuDWcSgnt;vgNwma&rk#as^hEj9X>`wP>}f zj)q>JV{^^IBd%m<%+}#&J#!xpxqL?uS7^-gkZrx~)Xcid-HV8A>ptE+*|D~&<_Or6 z$u!{5ic#@+_hbUsid^K^lL=!m)tqNfCX=RjPbSP(Wuc*ZG8qq^J(-jPCL__X7CO<$ zc$r^MCS}`@lDv8{VL;Zd{dzKiu;`SUg)pXy2;GwjlVZ5_?#Yg|%eg&FPbN%S<;>=u z>{$DIp(m3GvPw_Z3DA>Sikzk}Q`KX+#^&3T@uCx5F5mXz*OT!>V3YUk$z&wad&Ryz z8BbQ_HQ`cV+5J5Wdl>`s$$({6>@%QmPbTBqkN~}UGJd?AbKjnfCn`E(5jH~iWc;{< z8B=uf?#Xb-b??dYknPoz@vN)dy%7D}da}+udNP>?E@)3Ca4ot(o{Ri?GGPp+n)B?* zWYYBR$%NUeEHqR?lJVf#lSw(4M@EwIVxN(G*U6-88&Z;2PbLh=+O=O#CJ+{#vIvf$ z`z~Q%h9g}c=-rcbYU{}ahE&dM?#Vh_Po~+$Xl?K1USq*~olGXkDm~e1fS$}!Jd?nT6&TTiw+kDg4XfeYG` z30y03kzY?HjKNfTo;{gNn%+H`Fk6*{hDt~>9z1(8DF^e&NHSjRGm>vlCS}`@lDv8{ zVL;Zd{dzKiu;`RUa17m(2?H}6)!LJ-*4C2=45^&i+>@w%JM2fbqY|UQdG$ zd+uG736kl_UTWB(8K5Jx6nJ66rW%WDf7AE+YT3ES3r=*nARQS$M39b5MlwK0#*5?DL=RDw&LHL;CaT$oTOBb!0qI(GiQV5xV2z$0f`dSIK^= z!nHV(U4vP`RrX_c;o7Sk|cM)UW2jh#jI~0u#~qii{9gFuO-mIqeTbk3%OuTFTaZnB}fBIX&o2(dx_|Jm`T9SdQX_ z*5a8nXKsRDi{M4tDaUZCHyQL!5BsM&>5PI@D`Mb(7*ANWS|>p za%sbn_-aFPVF|amOWC7%8CBo70$)V;vI{xEP_)B0BX|wV501U;ZwwrA-ggDbO|VwX zigQWBd}OZaAt^C=!Z#XlE6ZGEcH4rZ`Uy{VMJzi#gY3)JrOk~Vf|RF4gEHYe5p1yR zRF}385B#KE#0w8en0Wr92qd>~vr;f9wYyqY>qQnc{v?8%SdJ>|6?H#sje8MAh?Xpb z!FL*PnuAMs1g?%>VO4QteeCl|AA9-6LDa`CW@w+J*3cY`249Nc)hrKoA%k+I4i}n1WyOTE z5X4dfRs4A$zOWOCh%p$*C&6#oGqMp=_PlS7?HnB$5N>eTTpcGwSv_yeV0oz)RYfk`24`AXerd34FwM_wr!C^_1=QrJLp zC;{phbqf2#NVc`ppXZAsiTy+qPO!XKHTAqL%QPC}G&T=oNgk|W{%xGff_P1lG#O-?I4o1;goA)JUv&X%SkAa0QZHEJE-LuC3tYppDTa;e zFak@>0j^;mFI$tuWeeAEmzyGfXh1gjmJ49RF((8GKS^tV4W`j_mrx2|R9_2b`9fMVZIDE~qVWweESOaz!bEz08EzS7A!0;`{Hkn~m3=mEY8$-{nO=bv`t?J?l5~xjd zYLHp>gfQVN?;zn)7Z}~JU3%J^eBD>o7`qgWWGDk<%Q~7CH+IB!$ut!BCvLn*faJh6 z1rF2Up2@ zIZ&w#uA{kj#CAEB10*euo=ei=2vkg3URtWU-!Vhz9kE@WN&nQv(JNAympn{Oup73^ z%~V$Fim^!&+O=uMFT8X6tjo)jZrHAwXAaxFU`fmk+ck3Mj_n#ri^k%D?V1O{sm$OH zfxNDS{W@a10#{rz;|$EiR-h$FGM{qiCQF8n*e)3)%ik!t2pjhu@I`^53$`nekfb!P zRRcm^fz4Q`SJ<@Yi0zVj5ZMb!&0qGWMUXHK2yWPJr!H(46TX1hF2k8Rh-eyJ*^XQn z;3pSEsg^G7$#uha88*eR;ea?Y)LgJ#=5a~5(Cu#UkgUA^)o9ic+hv&(f`p%>ivZvu z1s%U`*e=7btllN{t;d{0?9z3^b~)a@H^PG(w#zXp1_%eNk)!9jM zC^B?TDgsOr!cWl={ zxYQCm?AtXB5dhmIL(C1^B?D&q;8Inf4wnpI*Wt3uCdPfhj@T}l5jH`g8FmPw96gt$ z#Sy4j*}7nvWIRRMGW5od*e)3qOI@gNeWhNcE-!hQICsN#InHIZt{9t0?d!#h9Xr?lpV?H;qicDXebO+Van!FliuG$1gz?b~HI z^-X-;|$DA)5w#)JMy%8SVuw9N(F+eyqYz#E()+RFqBte3%7kk)Mh(MFR z-s!M!mzTE?CVb^xd!TT`c8|NA$>7p1lA$gzwo9g=z(09x*Nab|w%Bdou7NjqY}Y`z z)Dk;jyQU!mG{j_xxna9xz%sVWRk8-uOSV(N%P>pztaPiB+nEd;%K?%WN6#f`aRhWp z%S)v4v}K`Kag&qVnG8IW{;7+jSEMd45gCNxcqRikQ(3Jm#wJqxdhv2$v0XFI-qIaOY}ZIyG!_?Z*F4CDO=WJ_uD})7cUk2V-ee5HAau%e z+_x(*Ev7REd{Ln2g6#?d|5zi~^Ib}J=eY+x?LQ?aW{hZR88@7A=f@8aQ2H~+? zhBG0E@MqZa@j?TU$zp{I0Fg5+1~bPq85lOju;G9>GSpnKUFPwM;le|Z@?fD+q$9S= zGA9HHKS>t>!b1u=e%(V%pmgX5VD9HU}@ zaKIWldTwp99^2K`6dtBlJxI7=yPSLi?Azt&)gCC&VKt|!oG8ZK|E4{3mvqDH^z7Vg z;dtCJoSiG7&!f8dCZP09UmxmdX8NRIncLvdfP?iMIgyFTrD4_V#^S&i_ z=dz;V&-+$ckFH+f61+h(Eq{yP5WG^jg8y6b96>e5TovO#Z>t!>_f95ZmN(0!$Hr8; zwnNRnSld*(yGTtosm&fQ%1HFiMwpy{98icwf%FkD+L!_xpli)eva!c>)Gud6w8||v z!lV}Ryy4I}Hb#-zcx!6;7bMG5!OTkEZ={e#9k&fZ-`sCU|sY1;Y zK!!;zwx*3Tz6L0-c68E5>F)LbE+xd40Makwimy;~?0?0lzoN#ct8RQ}A0nopv&Y;k zPln6yp{VjBKD`vfvRK6gk)ua1#ZIl-JJ!C2SUtA9HArMHS1v(xpgDJ!>7YD?1_Rj* z_1W9h!+vivLN~{EC+L=UQh<~eWRS9`p8WLH9XnBdY!BS67-e^73+bcF z{*~`=*-Nn7H}C8W&!;ku_t9^Ym)Sj>R9~ZDw`?ZWv10b|?rO5a&J&l?nWZ4+pF|gh zrzu_a8TO|1T+w9+Ds=TW=yK8sYO&@5_ zc6WyT_U3prn~aCut^Q6QUC=bzJl|a{mu(A0h2j8%=elUca3Dp$M*ZmwUCe$iZoGU! z(*|WXti${E!0TA9uFX{C$ZGg(h%lB?f)W-GZ{#@=i< zkREQ{+A`WL-J0)ulcdx{)iw3H#8cptd*EI#i$z5MS604<;J48+TY__nF zIBY*D=8iJOvnudzv2w>@$l8LDTmyt&mt9RnA5zQKW2WoytG=}Z;#(blO|?V*utN-` z!y+4W^MYq*5bpb#ux%KY<@sts)T`XcwI3i57pH1E1!D5JiwTlvodYpP;8Gy&x3gxZ z+qqDInCRmI1!8ifo6L}nffzsR5{SvLj)9nY{8?qfUV*sNwhhFk?FN2*9CDsCFG?*~ z6W;>_;^GJlnf)3oV)D34ASTZ`2V#!Ev&xFSD&k`oDi9NWNX@wZikQ5AEi+_eAjS{7 z1Y$C*V<2W8e^!~WS0Fxi!2&T>htz`W4aCL!Dl+>u24eEKOCToCItOBoz_ZGVy#n!Z z+dxdwH5VL$2w!TmhNvq;+5DjivbkAi$i_g7A9e}EWLU>Q%sl?AGGVVkeEfn1Vyq6S zQR@Qn*Y?2uWh};SRfF3Er2~qOup{t5v4lo)EBo^{vB8Zw z6Wl=laDFJ zFfM;{0_OcEITeN~iTuPble?Vh;=LxqH1F{wSDK$ELAtO+8GgOr3=zTLb0nZ?usIUw zy4V7g_{x#s3Bl$_P@RBtB&Bw+Ig%czm}5(|V~qA%o3xC|?T7qilJgZETLnZV;j2@J$?-O64qq zdMDs=sSNi5Onc}0{L#hFgFWYZ!`*&-*~b3mz45}vp6&5gf5^TXu~$`O!|yM`;f&-7 zME6vuJ?oFA<4Jea+wM=vs~2xyBqRJ~bK@_XXpBX4-p7HBDyY*&H6(W9rEHw!5v`(2 z-Y~6EyHKk+gwkinzNoQzVwio0J;dKLCFn zUdUS2^zq(KzuOyab*IC@W?@-Z6T@2BcnLMDg$?Qd><9n)BJ5{IkC(7*CyehTN~2G- z6Hltxd{WKk`4?+cHgEm9k;yl5n@_B5-Z*l;H_A+r*u0q{ws|u}V)JH-*yha?sm&*_ zv^6`wgN*iaIM_K5G5^8egsU<`E5AV$FW-sc)NC+2&zaqU#~8yTmFZq|q|#D%bJ&|s z<164${G9AirtE|G09i$e313@;7i1s154}z_o+#q?%4KNZ=Q-qdYq#8c|Ju>krEtDO zcEVl?=b358jiY-t@5dg1A4k2KU}%bvE*e`s5&jP%ILOZ@-ba0ab2y!UBqp9jW^RRp z-SO^hXLr`k*q_E7IJ(w2998*;1g#DzUyAb7B>U z8hcr$JIo@}YthL@qmpI-TF}tExxI zJAM&-L163{Wz1B0-XI~GB?7d1cHN6AMrdPLm?cOPMs8C@Ez{7OyR4=Ayy; z<$)mnCu;CI%A8SywEh;^yRKl*Meu}fFgqw|@OsLmQG->F;xYFo<4X3 zWzMKU+V6<$-Jp$zE=nasDR?$GH~d#ps*!YmlyYfQVcx495)=JV#ayb%-_H>6+|p}L zak)jt}%X^r*8awUQiJFM@{t6=c|rD_TgZD%_EQuLgr-V z0S9FAdGiP)oRKHYBVbP_pEr*{oQ6E{{hVC!Rsm*LiRoixM@H^oKPaK5#>yoYi?&{p zAzv4O7BY%_W**dtSuu0F+kI{fwPJR+RHDavA2X@f`VcAR;zEXDiU|C6huJV>yyk-* z#><9`bR6cxk}2lnMu=gJNa)V$5<;_J2o-a3&{^I@;MLgNNnBVXK@^F)n2#GFhBY4< z6=9m2lZ%W<{PwgXhK!$#iu}OL$i^V_fd3{G28=pV0? zPpw2J)34{z*93ii3;Me90zki9OqXufvXz^IS454)r9I7k%kf@7$1WE~_{IC7iT3fK zhu#`R@ji&Qi*L{6-(C*;nWT+=iI#)1-{ko@{mouz3{xsGz@F^G%=ex2`vb5yKh`k) zX&)@6Z^58%6Epz?ETiwx1Xf;BO#Di?qH`!}G#B?QE$`cZ;Ie~<;wzqa)yh@;9SV!5 z$Kzpwj*?qBEF3RK{{l}zzL_dzcG5qC$4Y0KZ5~J`fqux*ZTj~WB#c~$E2z6dA01@p zC1&t^9_+3b&kHO+?1JnS-6H+3~u#mK73>~RP9zxy$*8p97O{X&$p#_^%sEX;p4lLo9%hJ($ z=#%N!)9Dk)Uy+S$?QW*er$&yd_tA7`j81UuQR`pe|8Sc5!}AJZ?uUJyy{Ryp%Q3tL z(l=vD%;_W7p)7Qz^1lX}clXbd$J2n920kzA>0D{?;Z9w z`;U)@Tm4Ddc6d`+c`oD^_2DncV!F(Q-08CW&`yr=_Wh$l>Vc_o38MjRX$ zbDR3HD1A6{1rlxI=ffd1DHJ-8gt{m|ru&{&7Ecbgp5mo*CG2~m-{0w`k(3D?<|5!Q zWYi=wIfDMu8$C{Vp#h>uCTjr8ou!lq6du-KIsJH+9dlPoQ_>daQ;KId(@6|Dea2V8 zKD6_CHb7_VqTFzje$VsG=Y!%?5`Lxtr+Sk?@AMFDaX~RAt6y+dOY5bBe4G+89tw-8 zsm^+XQJPQkVxbW)tUMpBwLhL(PPW~V{S)%6*3#-VtDS3(cAkF@e{*EA**S9T$l2bt zTi2gC)9W9-A^yzg_QG4>Di(ejpPu$7=X$BqV>Z~byVs)D50o|%ph9o$P`rQnugHH9 z+G5#ky^ydb`z!21%#Pj5Y>gjQM?ko=!*WuQIP9=@x<8!OKkV-bS6A@ZEJkedzJxs~ zA7yNmth&26Tj6`Z$<9{hQyf}3;0+B*P7CJwt&T!if=(za?rP)I5X4j zERo#UwVvOF?7Bk}{zJrHBqXz|bo!h#mfVLx8u8Lv`o`8eUnA&vl3XF^%*)R<%m$UI zLU2I4{jrW()_RXCs|rWf2pT>b!7HrnRPZ$`;S`?a%#m*H|H&d}5MUL5O;o+I?!Br~ z$SK%DX38S6>18^WU2L{js(AKvdJzvGhJL`LjC?5}CTkkTH;(e7B@=r!7yMl*taRXY z4Qn3Z?G|AqLy9uID6wVUdIFRacqZa+*p>t|v?f`xedn%rrd@g3k!M}G%Wg+92R2k{ zp@B41n;JSR&a^H3Gar2P=~gIyB4sxcj2B%Sk9fIp4!nG{0k1bWvz1=7tvq*Yyq8V0 z2B#})*Rw<}?caUvgJKErbR&#S(}q5bhoF`|3?bB1DR3+rvAir4J`+fQw6PCw%3(yw zHTPZysBU*MlZRh#z%2p%88`T0%m}skVFO>)9lZmvf<%`N$0vAQU-ci&eN|4AbJ3vPz6YP8 zx5K;57+lUzz9-chgUcUFoHbq;r3b@7Ug!IFX4s7gN$@}u?3OH*0x~koj~`M%C~A4B1t}mS+wo$9 z6cCC!Ui^>(n-TD`KV7hQDf-ATsc2}!&&$bFhce)U6;eQk)XB~sDX^ioZG#k85vmI* zAcI>W1?0mz0oo%4Hq5@}0gOn26(L`w zfDB?%1=t`3gc#R`aOhabpcyG3pVYXu zcA3)0N!%a^E2Mz3CG;uQk}E1(FAfd12R2AS{^})LjB%R;0wV?a`7oRd=~^xuqyW=Y zyQqe~1Z8vC1DC%CTXs$nv9Ll4$lI{lkplKe0e;Mk6y)Sf8rOQF3wVGBQuv`pDPV&X z0k8ski*>P^z#>T6 zVg-y16kN^&E9hj3i1D5RBUV6#P)yQx4~NyR0{R{{>(X<^3dkJTKFc2~AXGK=L%SZV zfQ-!Yjy9#VZz*qs(s}uifI+fUD$W%;j zxPcW`K!(xD&K@hUp=N~@SP}B=&B!3Cy$M!W0U@FjpgmS#Lya?KbDL{J(tB3{8N}G; z+UzPI#55wc#0o4Z*ep;IxI!Q{a}OdHs}(s*#JAeVvM zHFZk2!3uIN1P`6*u`WGl ztbojc4YdAP0imj?A8Nu1$jB@|epmsasO6;=tbmMc$BPYCKq%^X;To*USb@z50rnM; zVN%h!;R7uOtFXcf$dEeO*<%GZ)U2=qD?+}#85zV2D`|9RzNE5MHhVO7Hv*kA?6D`N$?MKxmuB+N*#0>0n(!4a&mg5wTY0g;Ot zD`0G((5NS905({`@k&?$6@oQHK)Fc`>2CE)WcT1JAYa!z-=C28PFTNo0}P_{AVKf@ z2H6@$j~bxk6kivNN9{I)cl?8&dpay@adqRoxI(AIoeHKat_Kt0gurI+ zj0Qw38bBPg=`U~i&y9d)Ptk>do`haq2uO|X>->X@;?M;6YDl3{#GxjW4=7#^d+-;p zzHy4+4;pYss4#$0h=7)cUU3LWEv(&|1p5{33-FSB&$T_0eFMVL%7*bJSb~3~E44-N zsRkUcW}jAgTjFg0#rJ$r|qQY{2i@eYgt1d zFW-rd8kh}c=XD)&$(#ze^_;ZU#=l{ozl8G_ETEh0tqNO13w#Vi=19e!3w%r?%GTFd zs;_hwgKf~wU$UrdCWXHJ5RnLeO*p|8NntQ*Z})qn?i62m((O+s<4L=G{Ym*ol%%4- z67FJ&VRplVEsj#0N(66<_#?TMJfJ+#pP);*$URx&eYozC!a#sZ?$CKJuK7G9)8@yIkf_xP2cUS3jhAxcR>`pg(!``I3`FMZxiSBfF z6I~dDZUoA1?)sDv@|2989N#t^_hxN!jYe`xas{d+h)4ybnU})RSYfF_I&|I`w`iy2 z<}PxtfumLNVNW>>68)4k)mMq#$B{yl3gSvZ!1TtfI1L1-{w*%bO7~vU*Hl$bi7qwZP6ksNz-V{7 zi>_*$q5`j--HzBkU$>?hRVW=r>SPNGX@6$t5GB51eEDNlzu=cQtB6sxKZz(;umA5vaUs)=&r|IVXOj}xOkw0czaqD(^#w|3B3WBBcyM$?r z1OG3At-3|P6y&uPO3oeAzi!kgfPno}ZGxjkSCu~PFC*Bj%WBG9Ty4B_Zleeit+Hh` z(k21j8a*6rp6{Z2=BI6|LclU`Xqq(KJk})+_^Bq`7l138GD*SX1tiqPZ8EcKq}hDt zuR$I9QRFMZ72%H?ffX~4TI~V}nDOaqeHf|`KQ3&0aCB?x0C~+=gHt78X&UQFYxv}E zH(##1+Wf>P6Qjc zD#cr&Qt(?5ywb|Fv%$Knbr=2RYMw!aY%MJ3Cf=KWlW^;rHqK~?7*eqr^^_hDVyF2z0kS?o--m1+B;$pE zu+^1NNnmCb{7%Gg2vZsYtx!mbo`tNsN^J@5U>m7^w>n32$)hskdh1OrSIXiO&?*ME zdT-WJUPp49{mf5c-6mC^!l{zN0`?f=SA~sP${7l{qFpM&SYk!NRga5iC;L~nOKDq! zbMfB$`n}<;gL9UPWLduD_ahmks6u=xAMojjzeY<*aDHnr&OI7hEBk_NXwGn?n1I)I zk(Q)WyTwnW;Do!O%tEwOFDk{s7W}nxm3em3+Zv$N%qbVJJp${S;QMu>q~mqZPrFC+ zB37}cVi)k<6TwS#gWWb54)YLaW<9Z+cOhr{0QPec|5grGXCeAB50Z@+nb~#IhuS5F zl1kBb@$o%U5XgOYLKLp%AVz&Ar^?cDpKBq>La&N`0sdJ7PKF#C%L%nXZlRt2%2tEiXNyI$ zIUda><6$b4{s`?l-aOCjK9(SP?&Iasc{HeWM7>ovNkGIZH<%Lr$yQRjX z8v4nIzgyhNZuv=3{Ew}XG@zVh_VD=zbi>JNgKWmWJufD2o}8V=S4w}Kgw|2QU3eWt zzQlN&txjoc_DkT!YnR3Qm;Z|V7YRQCN`3*Sl(%_BzbDyL;U3jhStM|$_?r#5E1bEZ zUKS(A;l-TP?G?ndlpv0FBP;w;VB5lbh!B>LeUpS$QgBxsF&Rf6oKZrnsg7z-5u6I?aoGlbtN{;&)2rA5=R5K4^Y621*FduS$?!B$ z+x4C%oDMCHa-+<&$w8|=Y^4tel#HlQmG2eiwM9GRhu{tIsW2U~sQoYi6s4+a95_HD zXmZFyOH!+@Xu)!Z>f{l4D1JjdI_a*ej01i?f(O05DqHl!a7G>j$Q!tLDb{_T&!!;x($2e970M`pMVSB=2C}J+d)twMx2>?$pRa*CR!0C`d|a@4QYfY2g5$P>=FHc z;vX6qc8SsWX9k8Sb`iA3cM?@{vjJLwm}W1R(h#928{oYWJP<@`DXi0$I|c{E<}U2+ z*S!*uUYl@tI4fm5JB$;u1{DBD;zgXk-l+0w>#f+I2%&oPmOHoHfK_j|#XHc+7(sFm z&quLB;a*_`S)riQl6KGatx(W$u|EA>f+vXT#yktTtx&uz1mZ-egucOVRRoSFmK%wE zj^y}_pn^a_2Cqx_qe!f91FT61PBnAy>Ze_`xUL{wF!YDX=$z7T(3=(Li{;xar96@ZcI3jotdr|+2Ng-0{qvVi zLJK+o#7i|Nbxs8FD$WNpcqNs37?79FGIviTP}-~q6l$ib03a`&Zswvz!i;#|Q+DJ* zKwdft5fcmt^3owl4;%pG6=@=n_roDTUP@O59tH(@>1aeU0f0kFQ&cE*wpJ}*E*&@L z#X9ALL4E1)D#f8-P;6uKhj!?AG}eSbeJNG7@@heS>9~{|Wz<109X{&cl;bl$ZylUD z)1RO%p?yh*$-V6|>d&4WPtZYvo8z5+n>&~`KNVGnBowAEzz^_&D8MGOQgy;`gUl`m zOJY`P`5~kfs1xR;V+Q_C6Si4ePW#CebC%|JRTEmT9R%np6if>F)~!K(M+5v<*C@U;+I;T7R8ulR~ixi(u)fvwu~ieWAqAUO2Oz7_djHHHfY;i*ju*t*Ac zC1YuI7y$kGW&`}UA?od$#e!-RKSqZ9_vb5E5$aYEC$f&<|0k-iUZXqE>sM$IR;@Wl z`81T~- zN_jH^hf%K_j=LRg42$hYGcdAa2ZTLrMLGf(yUgnkF!bU_e4+mPM=lx}y#r!cXa){KpBox@o{xTw#% z2-lF$HHGP`!b8%roNVB${?^fn2K&*%%xJnZp7y)xReurudc%I#wTAJsT#-vG;ol~< zS0K(!14RVmB+^SIV!FGvEW`!)nF?5#r4SW)S$5f204GS^5F%^Hi~v5_gtM%@>sAIU z$)+5InY=5xD=vhX>|5JPtjKcf#rlPbA{YW?;? zCBD0>6_5%`%A(g6f#ywAoo|H4;z#OJC)%^6e*0oQd;8)SD!iY>GmehRLYd=uJH4?M zC%N>ytgl)o-&u$Id4&aQ1E<=8KdW%?pj<*+CBPJLeE(_vrjLA(vKlB0)f>%q_=nD| zH?a$jYtb3<*75Pt%33*`=|a=Y+o z{BQu}a#qD~o1RhP=>z35ly)NXaOUI)TO-MJJ&(@r-&8oBRzAzJAgeTW$FeFRnTNRGls@&9e&m` z_u-K1-k9Yf+j`rnnRS)B7oxXZkzDug$&R&EHAldnOs0W?*UF23{ymw%wdevtdNN@Q zrkeBY$z;;>?#YDNsw^~APbTBRvnP{sFprERt zkX3rJPJo`wQsjjZmxO6rW0NdYE_nB3yy!%i%k!XLPsR^{P2RI7laWO475nyNJXw|3 zgiC>C_xCL9Wem(G1D08_4h`VjlgYR?BtWm8j2|!Oe05H$3*D2+nW?=e!y(tbC(A>& zS5L;Xu5$MxV$ZE7>&&AklWD+Vd5nt3d!0<+T9J$V*2#o1m`cyHCzDCjyC)N7tFq8g z2}#C-XHO>OU>+Gs#*2MM^6kl_Y#UONS5GDk$lA4EPbLr+osxQgD|AmL49sv;Yfsjx zttS&0QaQ7^C+l!M8GVnk)K7RdXW-kD$pl%YCtD5BlUa(q+?y^Gc3l#yu}$;t$#~I; zE*GRHC;T=RLZlF3L0=*W1oDxV3L0LyKRXGbPufXROpQ3;>c%B;+< zBa?A$NPk`(89yFrV&fka8DmHGQx&eok?b1G0*k8dXn@(PAVQWxip!1-66aAuoYwHCfnLkG@>=0Nfv+@O`;R5=-oJ&-?sd_~T7W z6-KyC^R+~W`6%uzRotWSaQvXR;y8P;?YTbgnR&{U3f-A#eO#ck>+)Bk%gBV;mDnuz z8O*N4_xQbbAZc2eUh670`;AXe`;&9M*uepY7Mb-AmF-kV>CA8o)J zSZ=E3a@N!oiOnwqu>;`m{eVTOX5n8pU5_EdB@H`EmFhy}GT-lR-JkVV z(V?SH_9yMZXfPY}hTU_$;V!!K^p^2xIztO5d+^T(Hefl57g~#F&YZcabtBEs$DtD+ zE#+rEuyPleoF4S1b{8%DS_CiBPC15Cy~&_=de}eJX*~z_r3GwnFzQd@2AV0lWqF!i z-`ZHqe%P9wM|Zh4jw)|QTIm>g6n>wHDFk>Ir$S)g5*h9qr!!6G~ia2xytOe1xNK0p6rTPc6tWcm#wRC z2@g@q^Q(nAg**%2iC}|er@FL-c;F{(Q!yDrc<@INNN(X~rC?BMceSk6i!5mTNdz~s z997mU>VDW7S5lyQn6FhNs1_x^(}3GN85RSFuK;CN*RoS|urft9ukQFk9O-{Yo}n5! zk`O+8WgdbrQxJmRi$HP@xkJ`R7!AG@!K+yw>_P_R$`me~87Kvd=a8~tX$?z=>nLmEiVh65i7>vpvjVZxcy&pI_8)RuHL#03olUv_FY7{0hbVC$N) zR>M^_iHzTwnfu&fF7fuEdf4hd*q zjAVJ04TqAF4+R1(8qv=1({69touPxO#}m?MFrL-a@YQH>Fh(;gDjF-x>j=U~|FvFC z3yW1~$46`m3p!=^d?dSR!I$G~0f6vXUI#$4_M0}}Stw9x`0=Jgn@B~Ys`M6b8w`iC zbz)zk31*T=4w&~H0x!%^X#R45!Dy@hR5xGVXnNU_rs_^lcwdJF%R96ac zR5mJaD(XH9e^}Bbd%4%2xiqyr%VMIc-eB?c9GY65fFxN}MYE`|1vIOu>b#_muI;Rj zKnhF>Y0FpYCg%vmIYI*^M;liP8%Pc%KwXu>YRpWm-)hq%{9z>9+Ud{p#gW8*q6sHh zUaXpW-j-z=jd2>Ahp{9N)-eAzPGzQeUQ?=r{~5u}-du6XjHw2VM;yp{EuVl{?zd!_ z6#l#ccX%@`rZayrCUy8nn0TycxJ*`CUQLGYMh@tBDGUfn&0qF`#AFZ%JmUh@Ans`& zT25)CJJCQj4(lWNm~8TjG`-zAeANZ4VL4L=5d(B6?#}Swwp56h;F_U(XP(j1DTmT!6IUz{+NjivNG$Kd=jOuH_EWhacQoVbT z1c$FVHq12a32VSyjiC{$tiJHDYjb$`mSdaDFe(NJr-qH8=hh}O1j<&W#dJO7TRCo| zn?{@(WR^W4O!&&X_CVo=?b6fUdfF*xEwh7`tJ+WE=(li5o8xAi0R5z+ojD z!JPqMGC4~u3Xw- zG8@Zw*bUnygJ2UBb~-u)QI4KV(&7kIOj=%2mZvR)uE7o4<(c$PT^zk4b$Q9d#JL-` z%gt0)>x!|7g!US+T{0K@6y}EQl5sL0lk!{_wo684IaIzQ0dCkX8AHjPJGN^iEgFjp zwrd{b!lp7eY**llOJ;#q70D2G`H43{Q*d#^cFBy0DGKS#0bdj-x?sBk2}w$$s{&r} z+9`iYdO$~PS7cL2YW}h})qG*no+Gx~xd7NM9+USg4T|E3?J}HkK}5t+;tk6qA) zoP80^0XJ-yVN--|91ur_nhUneJgy5D9#WGB3yt;20)-p4%Q7bf2|q~}0l-5r8WA{R zy9~dw+K<$?9CV?XMJwhJYkUgmCZ>4&{jL^70nKgs;4HE1*GmcEfh*?TQy1+a)t$b9x=vE*S@d zPo-tA3ybZN$vI-XHbXpa|kVx^cvIc_#f+7e~+J9Dz7@ z!*;ou%4%IPHc4t$+ph&CSws_#m#wMVbu-W6_^%#Fb8~5py-0_3M4N6l;*Z? zS7cL2YW}hhleSFB(GlA{W`pfo;~jzF8$3BjY?tAT3nGCq8BD%h;)<9IAC5HGmm{{z zuqlQO2gH$~=7Q}qk4wS@T?%r+z;bzqfPNhJ?Xt`XLBdbc^+7gx%8uAB!>_FNBlT@9 zK5y?eZrCoz8y6l*HHNc8%i3vo%Xh?fIYz|*;nXHG(5zdV%n-PkgM{0@U0&WonDCW% zQAxPlkuKPzca`+2zjb!p?M#LmHa(G4QsETvd*aYt;I z41!J2AKT^Vxg;%)fFx;^Um`7x%mq#$WJ-gFh5r z7dF*7p2;9^#U-;+-zCc@yvdlDmmobp!PxOk27zg@2Xnv|1&S`%u0TSP(&Rvkg~)?0 zSCV-;?%O4glQ1A8HGkO`4(4N7#>&y{OornZGq#H-=ZNhxoN+-!;_)t>GA3Uxah;(c zM{Jj2Qw$pph$BPI1>0pFmxK#1Y&Vy;JmhE~^KkoiS>}Ww;U{Sw+)@M&cO!Mgb{T%9 z)lq(}RA6j3*E83*YSinvZ5CrgVP?mEA58y6+r3XlTm&s|2jQ8_gXj} zcMNCeN`|je{YUT@70=y}449RTTm+F682)UP;)A61iK;RG_9pyL5M>MCTcwjsR1!^U zvO_1dERqxvg)e$57v9!@JA-IKC`*E9A|RuN9wtiHmLzxQvZCS7`&Lz-X0TYj%>2J*K06IV+-7Zn+UAwGbDH)IClld*hK5OrDpf0=m|$ z(sgxm9=X_K&6cE4@?^&d$3*a5pIlogRj7Fa$S|qJ*0fQ^SK3v3wJ1$@w+C=3A+`jN zei1i(g`#m}_=-<|MUBt8y3tIf&X|JsFt}Hq43}I~DOEHAKq53hI@ZI-9rzKSUW#E^ ztYU)5(W94Qr&ij+bdklDw+4yq<;o?94(1bI2C^IKv$v^-{oZ7R?t<@5&<*mW04eV} zgJ*v6)jW!zI7AY9@WOH+&xShPpY>MB5L_{!pd~qw^30htHyMWLkRi%kWEZvDj0~Ib zVg(odo#|jW9%%;o^wk|ZQGIL=+^ra8cV`Rfqs#u4?{L{$uG=^7>Q%sl?A zGGVVk+-cheVuUsFo1OjoIOIHOUX)sJy%lkB1cuChjTJF@+$9i`XPpBvN8njy#abfgcPU)uxsm$4YTRSj+vlny99 z!j8ZL#S$9Ht?b7u?KUHmNKN{-5LBL3mOUs-#0EF!R1^R__TDZlwR?TN9Kq-JfaF4U z2SdkztSnYv&B|9}<|&0Fz-BEEN5M=WH+fbW_?uV-*e{H52EGwsh0k~IT(u%Sf_LwO zq|AC|+uOUtxQSNUtrl?gy9G`@rWnJx{LKlN_n+ic7^)=l6TeLEa;A&-nh4Xp$CF%X zex3yB!V+cp^?ox%1cT3!fTqFbNTBOt3sB-KM}j8=nhO#$dduZt0W4!!9U%n+!=^sa(7i5mARc> zx~SYqE=A=5+9`IfH<`Ap@5l-s@x)yQ$)%|5qL}WbsGMzZtv@QGmlVEF80iLe{F#?L9mFZf~^Joel?^g=Jk$3~OcMCDg1IHl+WvAN=c! zu%8({Uc$DWFus#0jXu#%JgH*yNj012U#wBty!GowCf~?yKC!lWSOWHlM)K*6jQaGTO`GVCO)@{0Dy%uF4Fp{033Hd?$)iv%%~?Yiuk>98QS-G z4!PahE%)BPcC>XVobQmGu$RJlX4-M%=w8jSWC!5KQLiQ#nj)l&#>Tt^svV+LB^AIQ zL~xLwPrQ%%0OxQz|42+ciOk#z2fO3l+0O2)o3THQJ8*QZaX6~-4+&ZwP`(u9sY?JR zE2SMlG$hPKt$J`sbsZo(wkt(rFUxd?S!BBWis|xl-U8NE6`j6eeYV{3M}@?^SBz6r z<=E{|P{XRkO++p`3NF=JSl4ps`c5ExL163{Wz1B0{pb8k2x83(UQWGG|{8itN2g%bv@QIg+0h45~GV-`x{6 zcr9hpWTK>$6$leXb%v{t(=qiL{P?yAd;J=bm zjimddluM%u^Iq+cnCOodOuC34lU1--%?{F|@o~9D*eez-j7lUeW>R9%t(4IxdKYsb zGlb7YfGur;JI!uyILy~9@|G6!pB%&uE|Es^9RBB{;xAT(CpyB<$Aq7c3qQX~`1xAl z=j()@uNQv40p4{5EDlD4SsDd zZufhm?)1&Oy-9zo+n-FvlXkWYzIJrw?>-l<$Uf9NJ?y7D^aklS)A(Cg#Q)<8_?-w$ zrFk9#){e#+OcLh^A_-jK*nT`cY;Wyo=N6VNzSiWZvnPK}&@$?HySqCYoEcBH*N$Ev z!P60#dZ{d0BYreMGMn=-H+CP+dAZ)iOO`KKPTqn}z?)_apsUkbhR2b`Vo@U|OlSPj z&ypBt|3J>nYi(!49WE|W&2O5G4b3(cjfJu0pCnwFjE#Ie=V{H%lc$PETChI?GWtFCtmVMn&Ya)+(Y8*s6#=z%t~h zB3uF3sEE`GmMY3WZmXjF<19zjR78@Zjf%);tyM%HuvHO#fMv*0MYsU5Q4uLsELD_$ z+*U>T$61aF6@4QDO)TOXYMPLat3#{g0ivvmPvzYGV70MWCM9Y^OC*{!!c9I;QXR|j zvs{vQJ(Zrx`Q*+!=4+$Uw74uxRk`vqSkB78?sz9o;IL}*^~!hdhJ8hq_?5fkFWr5q zN~HU!dD&m868ppKtZY=iU#b#^1LSRO>TzIG$dkIvCclx-l8~v1%$Z@LdMa5)%~)BDE_;3{>@kh(|UaLx3f5H$T;*^gHyKpe%BO3wc4{pP~Pzq~Ah z_coyVEdr#DNk!CMMMApGYhnls%;SkSom^0oGmY38%DM)Em1D49CaRxqz-w44cE*5P zO>9e*PB&tk8T2?2kr$0zJok98A!}#B&C#f6G8=Mm5rzN}k0%v(M`PzAvmPf$L!QCd zrIVxaB#4-*JDNbp+0g_#GEu$10o>w~m80==GmE;TvYCrSh`0h!$^|AlR*;>gBr=ue zBompF10u@QYMnA2OMr-~x&v}loE?y(A`_9T15yz=W;%9a=zHf(C(!Y6K!zo0ICm2j zMJw{k$sxvYBs@gL({UFSPe*1|Zn27^>l&*BB1Bvcu}Y-m9;-x3GLgB(DzbKoD=CL4 z!w?{%@}%M(tJt~7MC9s#jAOeM3(Um4Viixv#{n6Z0>&x|+quUojs&qNhnUCHaTgU& zM`l%Sv5KSX8mj~%L|hKBN~Gi-t3*mNk-5bxvWV{-s~7@AR1UFgmiPshgrO|j}@ z4X89fZWV+Ft$v6_I80!UiVhK47-G5P-VkJFGE4i722=_dDz-C&yDP*ri+Vm3xm<`3 z1)7~qj2~)1rC`E|k(xN_h3<5oB&9+uFa)Vwbf*H$N+vch=cNp1w&(6|3>_l6FvPr_ Pmz|kJe82GJ&o%x(Y2dEVUa+1c5DSBrEdA#r(FnS(KmSoKctqQ`K6(MafFqm|IER&O=EGrhZw zdb)@1o|Tq7j2#Csv13l0<6~o=lh_U?Bu;n)4j7v#*f9<^B*w9^Q0xFUubcq3iIbCK z6UV80>sH;Wy7jtMb$eDiE6y^nzJJyK{{OG)R$V=Z|NF%s{DGx?b8~CI^|kjseRFcj zsd2YIyzceqdc)~z`^eA#=>K^1TT%AGV|%}K+rGPQKbC!N>2qJ$3vY$1wnu~h#`(^< z?s(D~otpH<=epDWXc+ImXVBf~ogED}d*kK(uxE36e!CYp4oA!12M0T&o$2<@v@Kcg`9-iN`#>!FE9^kbj@{h|Gi;g^dacKM<6&>mx^vi{_Pc}rTYKZyxI5e&ZFP2r z{nMlIRtt-B&n=_jWV(D4EOa&o-N__w!J+ut!DwT3&2EV07sH-&-N8;TzG6LIY{v6Q zU5%y1CbE(>^p90{b2%xgdl42^vq#`l5xm}RNF7&O{bB6S)0Kp$1M9FD#dFK^|LTUH zd5YyFT7G_smw9Nc&V}Nm`(#~wyw;A74>#a$e|_AW8Qp!@=Hg4ZACpj4%ro?ol41@3 ztS+Wu1pQlE2AH4pdYf_M>bd2;5S>7QsOjSXD4?bE+9^T!Xd{FbPj*hl&Eq?#k`Nxz zeKH7{{*Xv3@{yLxZ9W}Da9cZrxOvykAPH@{Aw-C@8bpK!1BYK}z|G#C8#iy>+zi1r zQb`apf$YyDyw6AQGCSeLaI!n@cTWv^C)?E`&}Fb6W#4gcvoq{&p+X2H7>@+}JlMA} z8ctE6)*ENEE+@4T6ZPN&4LD@SSL^1}@FW|3ZK99H|u5j1jJq@!#WvUPy&`JP_qn5Qk=PV4l$7xgn zACKS_Hi~WuzGfwy!qc2N(#`!pTjY$px>Hn_#0&UqqUx2CDj_G;-4wNTlkE|Tnx{}y zqZO@nYrV!3>83;gG4y>VWvn9?MQ^>Wtj)m*6UO4yI0cej&vuH9+85YHdG zHgmajl7 zK4cTi>1-68e%j@y|I$!xYUn5}F53ve1k=KMO}E^{KmFlHEt~jYycBJGcwQ_U`Hwf? zjUg)pjxKAcUbwE^v?`9eWm>#V6edmp*OQ;je-O&j_!9)eo>Fof8q%12O2H0*PM z1V|hE@TMF_lxcHsvd00fLU zM%I{Ci%tbMw(Y3|4B&s2fei5bX0Qh7sDuz$gvcBtFrgr<1uT1l15`@z53S-2gA5PQtR1}SJ)MG9P!DjCE80I*S2i$Yi-1!Pc5q<}#{luPqYWjC1zva)=i1Kksl z!U^Gc@k0vq(+r3dkb!i1@k0u1M#wg=5T9!LDK-e+V1*P|aYmN@jSZCzwJjT@z>1J> zXGR9`LJ9~Go$=Wt1vb<;Q#LobHYB-i0ABIf)Xvx-1r{^5L<%e@wbIQ1VGJADqk(-A z9^Bv-!i^PDKt9a1A(F*KFKw6F8d>wtCgI2=AuFVS@Fun-#$t{0 zpxaR3qYNvgfU+g@Db|uR=`WCl+aLw)tgGh~#32P-{iW*~UHU3KxCT-H8>9eJF}bM6 z{7Me$j5}B%1-VLu%>u?I7(Zr43Ucx#Y)IjWI-Tph&|ThENI}kp;DMGxst+l^(}F*? zKnmI|0Vyy}ex+rH6>#xM(&iW8#RERD!V20BSOJlXamPf5v4MgmPtd>$D`;nm$oCW& zu>vZDVv@FUx7t;3q$*ax#B^;x@6>*g*%&|ttNxnM;?k3PR{_IUSge4|!v`xMG_<_< zVFhFe`7{G!1!N!{FI~|H| zP_x1ctO)sH1!NE}tbh>F3D6!Zu%X78vboK*A?b}3kU>nXgw?JBLQEr4ORT_xk{woH zMW}A9fK0*)DH6mW&rqqhe1}h-ERmKX)gJ!INd{SeBX(_8*ne-B?e38_UO{}m2 z%9hZn7_kEKU{N%BacJVj1}ivXzzSSiK1@>s0r0?=rW>`w3h;28i*f~ohJI>&;!6b{ z9G5&<1Qo2Xf?Oq4u>$;<87s)im$3r68z{J(1y*pR z5>`NkV7;Aya$|)R9Ic8KxFj7ihyem%BfA!du)+$+pql!j<@p8z8A9G)2Yds845a17 z4=W%;$ax8f6_9~+yl@RxWvswvgx$mn$o#5f1>{Mc?Ch}u8){ZqffXTNtbh#Sg%uDY zIsw{a1vb<;Q#NA-HYB~V0y0Pttbh>Hh}04*u%Kjz6<84pW{;*$%H|COGGlckVi-l% zjCiH$Qad687OeIa5Z)?d1>`|9RzNC!&t9G9Ha2`k7|QWYz}kD0N8oO~H8 z2+`*kmhCP^E36>rqAFH^9}6){-QEN%tl(&6tN^#DW~_jO8H^R609(r$1vc*>9Cg47 zh+NEA0b>J&MrVN)9Ib>EP$3kPw1sU_Lpr8@iR^BCa^oAi=h5+k6;B*oqbCtu4@cv+ z;UogdNE*~2J)+VcH9*HHz9ATo40PbnVfmGVcmAWFwZsp?a#er`HF0H$B%o8`P6pEz z*Mo_0LSVC(pE~F-Om<%ZYx1VOzY+YGMnJQt=t4kGLa#0aq{jAj{$f|1V))gNLZyg9 zO(q{uyd3u6FJ66T5y2lc;EqsX0HY8AEe*Zm5Rh6}yEO^+E7}*}C0UqjdrJL>1RRJe zMJZW>|UDas{QMIEKqjcR*^IA1$x9OOiXIpN5$V^S#{NT%b zDXIvlMwA}9Y-@p;ngnmIB9+9?9uy9&hKTge>nYmwoWH|W>|1c$DuOGTPtQqffR6~i zX`jEUzEy#n?!?y6@^(0tSi!nw-<>JF08NzJB~Z4$&Qg8r;%SYxK{r2;p|Y72O#2}s z5&W8Pf-MqEf7sgUb%&kFTXwQzlzZdxXxuuEf4a4Q`gCvH8*cRC%SyjWDhe#&E|$PI zJQS?i>J#1`@keqidFW2iwHedSsqv`0xzU|W)8nOEPxSg{&Q9rpyk%HmZtiWbhfB(#Syv}CBG|0 zzw-s1Z^~U2pd3$XU-|S|L2?>Y2yMA6?FKvw_qnSS+55D=fi5!X4!Yyc#@XHmy47T7 zW21*ICYe7rx_cWwBZNF9qbEkU4MyE*i(I3ToP4a7d9k*xew#?{vgv9cw`iy2<}Pxt z0e8x?^5Z3|;=`WuIV!Zm3avvN0vwmEB%IwilRIo}T9MyMgK*)wYTz(B1l~?V$9J~U zD?7FlyN@GW`Apvo%4)dQ99Azd@{SdgzmCcI4Yn^4Y-rRlm;-| z+3IYMdlPh!ee3looeo;(>(-RaHF)@5=~jUHGdqVU@fGjM*sJOn{PJcMF{<__QBMQZ zN59(2fmmj;m+(7-{^Pw1SxxK3BSv zWRGWBSi)aMuv*zLsrKUe+xmk+;;nx#3YmlOi1kk?iym1}s*j_Kbv>Jvb~eyTRXsWH0-&FRZR4+no8!A4zHQ|{tw z#SV3ML*)i4zxo+AmXC}9+)DBm4sAymjd+DLWcG@`t4{1 zbBeb2UeGw;uh?2x&P}{G|0dzqHEo>H5;3G=GwLZ_1hCWmvjC9&h^>QUybut!x)Lf0 z%&daniTDj+N<*L(3MtXEkX2WyEx{f5HcP#<`n%ORnn@n@sWQ2+aa-03k zPhs6ARiDDCQd0{m0yTV9*qEi9p@1verJ^e99Mrt4$3?S~{Tth*w9Wpxc<;Tv?%>w` zIm<<|EZ_2P>=>kIp6_QP{u(VM!P%|BFmPox6dzsB`9<5%oZ(0myta$9B%RtVej)`Y z+zn+GqBZMl6aH4Y$~-gfZuZe?=7bB_mQhp^f8ovWy}D7-@jB-xox^z%t5{QEC!o|M zyf=btbc3Y}D|v`Bv!2+^yO49anNNDY9_6R4*La!<<{)+~jC~L}BPN)qc>tNL+Rg)YNqXx{9dUpdJ z2`Q=R&NkZRcV;}=K^r`J+xcFnlDU;G>LUnKlULRS+|@(Vbnyv-~6J;|mD_o%MQB7r-_-)z8L;mi&7vKToI zFXp6fuh4?d#D>%j&o2eGExd;aVF}qcNmwNXcXd_hFXVOr7EWNE8i$+k^HE5%JJvfB zH_`u9L0pQNshW!KMsPfwYvj=ar3i`-1=MNEJoic)j<%9sH7~Cy)~DkBhZ=Qo-!8bX zL$-ls@WG(Lk)DY#n>P%@%I3p3iJ9r8o)KzuSxhb(GY6+FWLP;@j^NM5WBk)lCKs~zBfEhm*!Gqpjl`Z;VI3te%z zjellfh+-E(YkVhAB{%J(1qk$B>EULfT1D9a?~CC6AW}9T)4<-z9j0sBX-WwWiBb1-v~3;zXx}zQJ!*1db<` z8;O06L>WQnm{&YS84*yNNJRm% z37sm=CKTo7YX|F>deDNQKTJmFlzxNWtUzBZ-)1T0ktDDqlVBBD=Uw>ipdv}LfBv#b zXh8>nc&X;3&WRviYF~=o>;^v#1M<>Y=I)6EN}Kh7Ld{ea0OY09&0Mrdn9(xtq^c$^ z2*^t(A!35zKwdfo>45`)ydq5m@_sY~$V=&}z{8**FCC3YCIE0qX^ILhe6$vjmyVnB zVjoC@Y&)dGs}zTVL9va^AKIbg(O44#^`%rx%2Nb^LaDW&!E`>9%k-tgNByBA@sv+V zhsnJrH0({E7>&_Ef*YglUW+^DI6oCt*FY9h5+Q~UMFBRMm8uhl8)SAlSfV|IRG62J z8Tk86*s4nd()_M!LhH4I06m3XF)n>6v7{!vvjP4qa*7G!vopyn)EoZeP(M~hNBPSc zYwTK!ddJrn8t`yk2DwE#cYh&hg`N1X=9q;{t7PlG>)+KVT6*70#YihVPC1#0kye-A z=g?ucHJWDHvLUr6b((*D#JLM!tWEOIYcxp;Pt~imWVZQ}B#p=s#e%Pg*b1)*e|cqB zbjr2aatdtKrq?hq*Z-<9TrdbvZBjtlva^zm>WX;M>JXtnZlDi!3;0$8{I?#ofm{?6~#>ThK;jAEB)Ms6UYslxC!t_<)!Re|$%SLJq?ML5__M{Fc z+oMUZqqyOZ$tl0yu-|p9VZ1C?-Y%0XwYplRk;AkIt!ypT?1_a0FkSG{+6 zE#14!Kbs0Tf}g2?g;@$wk(XtcjRkOm5IkR?MuL5b^+oyWzz@zcQ0rbLI4#SbA(OXN;DDm`xav4fHky$u%a)_;w zW;Khzf5K!@*=qCOCO2ognlmT1>s#4jw&&WPLT(F z(^XkJ6%KYa&pIc+0w1ZchuuuJ{uC&Yi~SaMTLzep&9XCpwnM?x(uay`7*1UT%fonxB^6SZjF_=ovvnP{D)4L}VW~;K$ zP(7K92hW~N$^ny+XjluKXk@%tcYxsAlS$b&q$IDNOc;>0YrmdMAS^m%5immcWWvA< zN4553M_T3F9;PP~7*aX2xhFf)VtX=f5iRWriWM_!doM2(&3X4^GC@}9$=U&WGE0#c zMtouPWW4A^m&>=k`1NG`5ZL5BdomeG^j@)VPsWo~c}=(!SayHU!d}L}Y%*Y(73+xL z-IK|o=nQYEHaXe z7yFFl+mlJzHl!r4o=g~!wQIkgOdu>eWf3q!_hiDr3`e#0WbN8|GJzqLGn;#|HrJC; zRKqqqNg6QzS6O4{-IK`#S*0gi3DA>SiriG}@Z!n#HuAsXw@P}ilkuVxT`ovZ#t(t3 zKF^*^MlwK8#*gb{2OMZfwfA5}*C#q6gxOFmqJkrM0 zt2(NY&^?))8B>_@UMIsL*S#mpL$+5>#@B#sYf^=juk^wq0o~+7e!X?1q9hr;)CjZg0K6InSkBix| z$0U7sF7l(R)E2HIHThC;Ng=hfeHA8}?W5$)=MRTi6;z!_PC& zeLk;xDP@}6?L}7d8q)il_J_z>%A#DzGYKfL9V8EV2}G+&>wHicW0$_6bU)k|ANPGj zNfLD61AZ_3@usB;BV6jKB|6lqI9fKGq560f9*!UM)(2-VwmsJ;Ju^?a=%PClt&a^C|!>5b2Ir~T2;{stJCSU6Kr`I)9_ zX`-V%EZ+vB@dtjS0S~a;RL$kAsVNehUj||az~B1;i&D+Pzizr7LxxKlc9tsDh01B{ zSh++V^vY>0K~aa)QkFw!Dt1)-y>0qz!oT!;e~+^PmAbf$=JWb4H(Jj1N-_(T1@3{R zjFOZ`xf!hmJnc8@Kwy{+CLC__YXLtetWUC%fZ*_tc7N0lF6xjcMHdi65v@qD7d+`QS{YTb#mPyekb z6QO^j3CTbxj2Dqxy_A8=?we@3YQdrST0^>GXR*xP;2y<`sEo%n=g}qZbMUPQUc>T( zV=wy~1BaaVRboowsvdln^5o?tPr@o?8ot?pTUq8Rv)d9J)lYb`D`MH{8Dw9!uEHfe zL@5s#CL5Kc$L~b2&azWo+Cn_=leVdt3?V%D;|L_TaI;b{D7CXvRx1e_9!k~60lsoI zs2ct>g4eMeRn{x&e%KmU=BkxuBr9<}WF^KD_)Y_E^JG{I9KHgSU0usg(ZR}6DGC*C z2$xWl*(M|V-;rmiMvf$e4_}!Fk%o!y--|$U54lOTX;<3PR~$FcBH|ES8^6M;h{&4V z7r-9eJYTwga50MKGbAu8A9yEC;9zI8Gu_^q;%;gZFT%oV_Hp=f1g~a!unQTKD=u7c zRfc9zSyXTqf*2D}TafqRgvBp4T(TWyQ4A11;!46=kTx(lOH^KFX!Hr+h+xgbh{SN< zBjF+xIP%QjHWH3yMp}?W+ zWvIC{8O-D5YBYfDSN45L2!d}qwHGXFLVe&T=Kx~Sg#@%OMzXxhhC@lohXR2Xjc8@C zXQw;qOwqjpqcLeTUQKI-6==nWY-$BM6c0HxvwESiw77=ghV&onM@`M<;08Ps$!=Qk z6;7|7O%eT62NNg_F5-ggMR zFhjRWcv;}30R7=+@5xTSHqz9z{^=bRYw+hTEiJDPE;W_XiO}O_qq43LnWiYKW_^SN zf8o;9@+^yqs(ORP({pHQc>6w;Qj)Qy%G#3=!x zfs&(*D}@athZ3NEL8q`kjAUCo{dvAHlGsl+;W*2SRa4K~vP`2fPGhq$mgK=2=HJ@g zRQ5k3xY?U4E}30$xvR7$3xC;wJG_||)0w{*lRErE(hC*6hfi{AR5wI$jC` zLQ?aW{fyGu(=JdA;-2=Q<&;Lc6Ne>?obYl8u;!~SU=7O|7eq=Gu?V2@)v%@&mQ}*; zqJnR@z%?wJV%Vq-Be2vQ;2QSvvNaX4!Cgp-$Y4;Cu$Tnjb^&ZS=7b>OC+Q%9(TE@g zFsiQwv;3m(OZDyr5*)tn*f7(uC#(T`*k-Mf1-|XrCNqqR0m7+aW9YfH$qa$ARRwYI z5Dt~LiB1hN%bpM>eB~X2FAdVUmcH}ofbG)L-sJ1PAsb^d)GlMYWEu+m6E|KYK)O`I zWB|zqZvLBeqKjX8PbN zS#JM;s{8RR%n!#jHk$k23><2wo3-Zj_;qk zIC}Lq!DJ{$Y?nNqaW1QMFX$BJhV7bp=CEBZ)+iPem%Rdtd=(Siuw5f}?%1x8v}i0Y z*sggHjO`jCK~XSzsivID+^}7Nt7ls>bHjE8rp0vTfG-LZU9eq&ge0YfDS|>RdV(9a zE3zphHGkO$axjA%gB!Nnt_$16gm)3!WjN!4NU12@b!->U)Dhcd*c8Kt1LDX~bHR3* z$0gxHR{^^~YIrn7{j0+|-LPGjIUz{+NxBFS9#YWp>xS(z{L1QG0$q5_ImE(`8@9{w z#)St-5A4BVxM1Um?Q)EY0m1=mjrMD|q!*-c=QMv1ckTQ9fBxF&n0Pb z1SFNMS0hY_D+*ad-zQ0!j@T}l2bQ`};rdFwNL?-{6PcRP#oYGo67m`6vRYS+O{Dhq z;)Q$5-Q(30H*D9;Gl%VZaVm#x`8`w$yx0-jHFD>U?HWmo#^QqQng`k4ROW{53S4o? zOwxCG*<7jflI17f1Pzhaao?`MwAh0=;EMu97i?D`AxUXEu*OR<@=nYodpB%XWK&3L z{<5D@T64p8kJN?jV#2$K?J}HkL4>P_HO+mfUAeg+O0h((YS1SCO1S5tUcQ03U;xNn!2w-6?LIT92F&!qrK(a%>D|7{YF?K{j?CF{-!6F^o1j0o%h7X5S{#9jNy|rmZg8(7w#zf= zpSn1DMe6dBhY133!*;ou%4%IPHj&!bip|n7=ESIQGTtI$DDU>-!8`+7aq78LnBmKec>SrV;phZx63gq1_%eN zk)!9Ldjswaq6wia38D$ZUdbq+MAI{2BzNbs zqT$c`R#}g(Uh&sE2g-Ryp#NL(96>e5TovVsmUg_*`pKg z5lO;FNC$jZBTP;}4k*N;K>7&CtSPXi*P5MRV~^>mU(SkXm0NCvNiF1im8+zEl2$?L zE+$H^HLG-8ot#H5_LNE$0TL0)6wT~pJ(j1?;JZG#wos~2^8}D#Qj4uAqKrZ4$_;hC z(nsm;_5dy=#FhZkFXD5h!G8b$fuWLSQe|8AaeBRrP!&JwlHaH5nn@;H&>dt1#b%y*~^tn5FN}WzVu}` z)Msx~4|?745MANk8KaBeNdZ!Rp9-G-#aHtvf?^#Xy6~cMAkT(6)th!#)I*#;ecJLC zbo~%*^$_+Kt#80f6kPPSC;h=_s2Sw5*S76M^@%-jOfkyN_9oIthy5$x;j;Hyw{G6v z9-L2Q9PgvwCNHylIH|ry!EV`1s$<3MND(3>B&pW zbjlW(pp!n(n(k~5daaGoa5^3h zI-9-i9=gD3xN*L-vK+&Kv@!=*-7@xOyN>j5^X8_}Zt0c~ z=SwMNmak6xYvm$j4nDmH?(wo%R0NRK44KuwoFy)?TKc#{)TGa53k!+E_LE}nC{sMA z0`Dd(cN~VSEf~qwGW1dtcG<8rpqm#w zH-m7`&xCEmaNtrP*QV5{HL*EBATCbTbPB}eaTgOL&pHQUj=;r0+-qgcOs74QKum-$ zwORcdt+b}_55(k1H<=+D12KNsB@mNg9Ro4*_;bpHy#jH&WgCe7t68re4mn$zM*{@n z;s^|x{TeG`^0-SNCeJztVvfLb%8I=z;v=?!SXWKB6v;#%F0na4MNHnmmKm}!5aWkk z0x=oZF%UD4Kc`ICD-a*C4#c#pb3np6SoTtL)s(;WKoiCLDl+>u24eEKOCToCItOBo zz;nuqy#n#k-3r7+A5zOU6v+VsG1=TKGh|~R#t*v$Vlu2_AZ8waPMNS*AUn8PKI)Z2RfaF4U2SdkztSnYv%gR?{<|&0Fz-BEEN5M=W zH+fDO_?uY;*e{H52EJ~&!Y8vqb)c1OSY}qGJvRx#FYJS)%z9?qTRVfeiB{UJ7I5{u z1x`Gs7{j>y%?X(IpX6j1swDCgzfA6Ori=HQ2-CdVlU!+jo&@Q_5@q;}eltV_gU^wG zrorY&(ClIhP~s~`f+qxBX0q zQA-P*jX`%Zi7$uC;^$?DHDv^{gJt3C^YFs#$9vIRN~5tNelJ~u_I;j1Znt{NJ@>61 zUcMO4x5-Y}i{U&o?YMDxujaM+1MuUhR}&0P5z<9tt0%($K?Dc+`NaFE4{#2r^N+;D zlgP}iaIiDlnQre)I~n_vxDAI_8&^kF{vkoD1Im}9Jaq}cWTmtth(^(*s?)b*$%r{* z$`cO&gvMTy=?=5Vbomw2<)yp@tge(heP-JJYrpar;!Uf+$W%FY+Z5EWDsdB$%eIzF z=a>HE%&Bc;4ZbKac7!r!syynUmQM#`j7gL&&9 zvUp>`qKgJGRRx2d8oY@zXVf6=cSQDXl1Q=O*hQ&iCrJ zz8vQJ!~Qg9=LomN!lN*f-9(c~|I9FXmF+`h*%v?dO3h;h%9$TYm0V~1G*8|5`MjVY z^becpqt90zf$YP<{F+A~8HCKq%mWU{@|k&1BWA_S?U?)A7;44rZmC3%^*&}& zulFHR%*BNa!xRzt?GCeH$au{MJ&czP8Rx!4N9u ziNLF|xs$lCMuI33buk||LJVs@GAhC}HzyYvk@)RtM+_N185Q|~nURYfafFIFxyXoQ zAZA7`>SxFl^Kl~-$c~Cs$=cCUWATOWdD%^8KmGE?T>9Vp{?0Q8_Wi{B@BXc?z3=Ip zzyDW%cM*Q%%J_>{z$!G+iCFOhL{Bb7C(^Iy(boiheJlF9^g=+toKKf-RX^!o#_H$T=e{b?V}r*FZaZxb{D1T3TP&;*vQDJFg;T;9GcYBcBf zEG+KZf8dgXm&KPq|Ei^{U_X08@cgOKXpo>2>Xxn+POYPVfhQr~OqDV_?w!VCr8CVo z52TYoKji2({reIUMlQr9)ES|V_OtU6Gx$3u)Fah|qMwc;_gfJpO1e# zl6HI<96*D0PIWgP-yV;)>mUDKxN?0w>TYgyC)0EW=`n>jkIzg}bD11Q<7eNjq!U5E z8K=L=zDa+xbS0I4(m32)U4Z#^qqz@m1bq2Z@;o4-+mkKjETNNe@ota>yp?0=62O1K zt^n(>=k;&d=?+S6vj@4&)v)wEbimasAZ}k;c+G+Rm+Xc3-AJR4!TfD~)BwgXzuDiK zEI}U~oA_iWbv0JS$g0WY5GRZ!y#&}NG8g5zdkyTp9UVuB4qjBvQuJ-?BKrv|2k+M>zyHwrvWbwd}tcP zS62@Jv=_6{P_zSNHR<+3xCQp#Gw5#g&W;9~y>Z!gcvD%r0`_1($6u29beRje(%$F~|8yueTXD`@?C{xO#5sQuJx`i+1swt2hH)U4(`9{N)Fu z#?oa4Ke^q%6EXi<*n4lUJGixfjtC(AY7zOd{NUmAdCV6wh}=n#;K-5EOA9BmzpBgy z#lid&*rPbh9YL`Ui3GE-jUs9P$*cFGWO0DqT7`ad1wvGWZ82BxO)i7I_;ZtXzDkH< zI$1A-L4PuRdFp1bczJXXebs|8|`&ywBDe%F^w;QeQ49eW`Eir4X=h& z1F4IoUwf$?s9fzH*mFE(n)6F!pn3RTOe1s39~&)R1oYPuiQf*F9Y^tO+}pghfBJN9 zjE-;ZDGfvAA*oZLYU23LDdnSL#KCbfx2YeC(uXrwAkij%0bGVAg+d3CPzMFbbl=m` z{E7bNle~1Ugnf_qdfS~ek}{#gTm)PV88wMa4xztvhi3^dG(Z%|WEEhsy^!*N!ow;o zrXSC;W9~|6O4{UnO7ZMQI*9?N&-g0Xhjw01`{-<4lp9Xa?|HuYd{BH!!p{WYWOv-} zo*JMnE-1!i^$X5wX}xrik5fX%Lt!yB)oHgsO!G-zEHvV|r5B*J_Oq$wWZNCtKOxUr zURb$qrG4Gu_6yG8Zw`$&+J}xDI@7&=^TyMsyS>9V#h?4aUU(~9#lkP6QtK*>|E12H>xH?uW@L-IcKz< zF*XG zDLl<}ewx;04gRx5&LF@l{+g(IW!-yKrI3?uaVvn>Q`yx#^mTRaZ{6%~mThmw(D#{? zkuN30WKF~P#!-H>WMZ%8LTy(HD;;=U!xkV|gLhbjkqjxy@S?<)`C@|BqJ28zZ`hUu zG_)oeqn(-e_=UE4^r2dG6MDFPkPl)$|q(?aEmj))mkA?i&UlfTtQ^Y??OoVLSx2^kE3G zO_i5dsoMI_1ri`_?8BRK7*TS~y_YD}9V&jA&4KQ8)SOk2b*RU2!62b<9zoS7^rs!%+l0#|V z1de{Y7}h>!#iG{J89W4czp@J)A~;Y(Jz zMF0ehI7Zr@>JC!L&;i6^-a4kT;qifL7y$n)k1fFOo5311lM+H;5h8PV#00#u7EtKS z7bWAq8X(Y?tMa6cDN{4QC8ez{Pw;GqPM> zNq2@5$n;Wx+F1IM4{Ld;1t}mSv$|2E9yUk;p{V1<4=J!20WbUKSf?9OK!!<0Gb5yc z45^cyJyKvpZOaBJup)#yGnHFW$l0Y4B7=hsQb0be6QDg(U_*^FWpk5jL(&^5AcJU@ zf$fn3LQEr4OQgVpQY+mI5GGtAdo*-FMauQEOBONxG8_8=E@xp3)D(o6xa#e#Nzy>KGZ^LFs3fLnB_%Sn5kdrTITDI&&u3XE6*6+$t|pjZKY51Vyq9ag~f)Y^U?oz9xSYI((a zyhz_3@B$UcWUcyZ@?lN=(Aq_;fJ`qOFSTF=WSV^9QZ#*FgB1{pI=yfWRxG?AVBA$; zGXlm67-O!q1%NoC8^}QB$AJu1y9&sYI@#G{1vb>IumUSWs5kS%1jryN#K8(HAVhQm zw8si;sBxxjZgXu&dSeA-5MvWyv#Wp*(}>g(E3lwshZR^63TBT6!~txu0y1N+5vdfN z0s#wTBIQ;_1}uQht^z``GFCtyG-CzilNuY;b|@}b0U5JKtbh#B5;_%Q8B88DIe3_e zY=sr%M-Z`PCWjSh-V<=?v+!_y)uL;`NkKnJmZ7sBQZ z1bQAB>(V-`pauwl%z=&US{%Zvza~^Q^#h9)1l%$}MrQd@uf^N!D2E5V@GK z0>%akje3FxR#?H&N>~9ELNQ6}eFeynj;UWFyBl8t`G)TK-k7|1!uquvU=X2W3zq$F zDP+ndJ!*iCQ+z`(9{sl&yz?LZ>{I^iLs+f~5P=b^615A*Y zL^vU^+4CibzH*9|nIi~@(>DF3#~~8%Um5|;o}vo@Jqf+K5Re+%*ZBt*)vlPYh7>AA z9BMN8fa2w_2Y>Mz`~&`=0e6H70~mz}Xldvbhk(?O-5R7pB1GgLA>b?87vLrNo@;x` z4Tt}ftPqE&Qh0^WG~j48^AVaM%^?92870ANRof%^yPdpMGMFErD-qk(q$zYw)ub8F z+)bVZza7DAELqbZq#)Vy5amZBdR`;WTfVv}i~z0sX^5bF^K4;vg5YnOQ6(V6 zvM_9gbt)c^{k^SZ`i7c)!pqE6_GpXMh!ELPlx=jY5OplYh5@IsQzV*Fmkh#@p zj+IMOxk`+VN%ojZcG1{`h~;>Cu}{LC$E)P5WV3~F9b~TiL06t>HkdWi^G&&{0+i!P z?JJ+YUXTqtK|TuixvO+CMHj|wbS4|!L3iBQINRHJyffL^Ko-gmQd^FuUwz7HmIp=;J^8CHlT_errGk*;#T}lg!e60~!F|(-Ec1gfK72+p_Z4ZuaO&uVw8EbH= zBrHv1U1<%U{BQ%-{bLBe{L-~z<@YW4D$(|hL}Y{G?#}+#LLht%c>>#C!8^5^i17#u+UULn=0< zddQ-1`qr|Y=AQ+C>_==JB;$pEu+^1NNnl1-f!~Sv4Pi<{pcTq&FiFNID#8=LTb-ks z{=$eM+AyiMyAvNBF9+F-ti^0avt3MHox0 zD7flz(d=aZ#&#)fvwtq$dvC8hxV3-Ia*-^{xBMGB1}U23@@FId8Z9NkXU7`M7i~jx zh9kuUyta$9B%RtVej)`Y+zn+GqBYysCj6~(m3d~|-Rz^)%n28;C4jE&V$Yl5dv&9v z<8{tYI*0QjmL*#`;Jr73YjlI%)*lS=5NBpRv72`xXZuBxpN;spabUMUFVK06@_S91`fK9iGWX}Qm}kYu4(MKcHgq5&sDj*aDn+90=3 zQj;7KqXx{9dUpdJ2`Q=R&NkZRcV;}=+3sxiw)4GC$#DtL>TU{H9m#z)5bdnMFEwSW zLGH7~BH0)Xr{mEe6-sZ2E;!pbA1~gCzE1no^D-pQeY{*cj|P>FsMo@>da%&h7@+mf z_+fZ3zJD;bk0VThHQ_9=CqTiEjs&Eg3 zsw@(?Q~b>a+!fB;P%n#-h=m^TIyCui%S1eVB5lbh!B>LeUpS$QgBxUD_0Ggi^nXNI7Zd!;HYlHjEF+4w$bim*6p?hpdA78e zT3s>9O1lIN9}4V$Sxm;!2WOPfYO15!Q{X*?Xt`_yKh}VU!s%6Pf%Bbs_xktQv}+*Q z{X}@02-tXU6HbK|N4ZgE+T@^BAGXqm14>3zXkkX1v_pOf9*9qd>5xS&D^qO{0E&*L zY8*H~BWQBSLrXFnU7e=7dITPdAE-xH8ZhJMB6!f-tFlEu3}@sqfV_c=mtx)b`Fu7+ z-Y8Emt=q6i&LE^D^$ED}c`kJ*eKQD3#E3IfFIix7*YbxOa8F1hJkcNY&}EP4{}cbv zz_3e<#y>MKM6rvgY6G_|5m}KvilKl(tz6kCQBDECOX-OTI*(9QU?Om}e zGkwE1{g>Xe7e;36R0}5#Zahg{f>k-J`e>&JU11!-$3kGvsnKNuJf5D55bw#p*uN%S z%w2eyM!WMLss30vD`h-8yg>!Pk$4f?Z@kvH`KLmt9=+u*QN7(3??5MG1j#)-AH@oV zd+rIcLP4h`?Vjsfp`hbpSN3-ao*=3l9Tp}?oM^(^Lm*CcO6VK>Rz=`=V!4sn=SYs< z2r38^WbnF#KZ?W(H^7<%B`5U*)tN+BXXorH56vL<6u~D#lo51}dBszd5dp=CR1_ea z(5YtbP35r67yH(4HlYPWf0&HUDg6e$S%JP-zRgm~BS~OKE(ByPlHU$0k~I70FPnrG zbO4B#YEJ5$EX1ogJIvsxVL)Cw%iKMYKxwlcP^g)z0)V`9x|xfjgh|>n!ATYUT^Nv; zPC~>4!-2eX2+{)w0C`242;}`}2#}Z3Re^^=L0&ob)2Dl5 zv?a7B=`gvsU535s6QeOYNN{7c-D`2@9OtK^7Uo?AV)#%LV3S#?I$^j$W|xB{+CxaI z%=zy(VXH0;Nb|d@39Z)-0`wGm#Sn{e+d}uw2KcYYDJF=|&Lpo;Z}^Wx{aY0sNNlQh^H66*jge9mJ$4UjV4LqsoGLYrnF?X`IE}d5ygV9hu8|Q z2!DCSS9Hp?*>VbO)uvYrb5*uxzSV~RRb#kd5T4qkfUSE}S2C7XhXK%^Z#BSw8=~I6 zSuChF@ndAje}BG$6$4IWYy9wkqWbDJx&ys_g*xOew2%30UCtr4>^Xq;NNt5{y4C$n zwNcOW0{m$cw(Av{b|+dA2cC;}p-p~Ay%sh&yZZA?F z`pcYR`0)n3$<_;OMI#1;${E6TAXe;esX4xtqc{9n(>}}EvZMm87*K!a29|WKWm}ES z;uMKzNzV986W&~px0KDxEmznofOVZ2sFxyIKB3=%H_g7w2tMDC-T#zs|HP*-v6|M5 zjc=X9SwXm{&$8rv+(zcu|d)1%&4S#Fs7zXBc!1RVcCa3&*!+zJbhVimo zkxMM(cl_C2fjBb_lqE7(;9;D!(z_HR<8`h-QvnOJ6rv(8%Pt!W-~`D7A+m2WC-ID< zBeEvF&e2wSV=Yc{>33OQwMf28Y{UJm!a|`0OEhu~{-VOcgK`ORl>k$~@%^Xun?CYE z%4#Thy%P3scBkFW>Ct$rJB=4mV|T~>w`gjtHTVhV)|=P`$F=ATd24yUW2%vk!ALfF zqbK3AG}HZil52HrGKqAys)~?FT){0Ou?KtQMyAziVQ(8Wz&!}}$M<;zI3l{TKI~J~ z$C|Q>#DXi1OH*_hd&}Y){6uV*H93-)p4nL-uW&cTXl0WR;$*9iS((6nSB&oG@(` z)FdmGbKX4}FFMiXa%|GCC*y~}Chysk$w;F2ihX-Bo~+7i!ll5n`+FAlG6rUo0n4n| zXF%VcOvbe#0ebah{CGL%zC9UFRCL55Y=rK~_;Cp{wfAH=OU=|rk z#*2MM^6kl_Y#UONS5GDk$lA4EPbLr+opN`2GGS5-x86NjySAQ8n6%27%{^KBS<#cp z1X-mgTM5vUS&F>eyPEbk%33j+JH2}{UUZ_%1?kE7A#l~_*^|jg2I$FnvMR3$mjcU; zjAu_KV_-HJu*`~m2K4R8WLz5(pjS`Ek4M^=8L2LGPbO!k_MQxfT=$+V57}Nl8PB@P z-D#|~H)!9ziz~C}$z&QRc-@7bOyF9Pi~QEfgfWksmKmN5&Hs9kI|z=#GpZ zmoQ`O$bPEAwK$SpgIT~;_G5P8+N&Gm*;d(1A$D8Wfj^7j1e<;pZ9XKA%^;lrl~3_99z;UkTpdv_C}7QWoVxo=HG~ z?I3x`OCVZJ7B<$UFCN_w_r=G3Up$h;68qlk1AZ_3@usB;BV6jKB|6MTac8LF9)*YF z2fY=?*^6z@^-0gnQ!cvb&P40u0-ar#zY<+WhTWAI;7ze6-*9AG8{xfvuN_F5R;Jgw zip_qbQ zY(S+hE~ELpzRQi4bG?$(s513g88{x$m>anDl`;w+jNsaGggMolc304$qfhk4t^Tk- z?RN*AbKSuXy7Tmw(Qq=AT$FJ7^y$|vzn13bqtK2Im+~_oSh)*KPW8JJyNecnErJ(o zryRq{?zrDQHRzperz9uYZLN*fIV<{U3`b9ldz+nMcdHjS;yG`=o(KEVD!SVr_Qoke zK!Kg>F>zpL#eVZ-(mykd7x33a&oQ6X%fvVxrVQ?&LsgJ=9`6s&aHn&5b9x0FM26Bw z2Z8iDx!lolR;tUp-XFn|R+X)B51pV{Tv?0D73=Y06fdG$8t2b> zbYJ@1@=Y`owct>EtsxV!K{nE_|~Ax3bJtX166cs-N&=SH!Z@GswPdT@tS=yu?Ela@RK**LNaVXW6MP zZ6O}`Njr!nOg#T_1d?00St%Hl+F2>9^#ThTe;UE-SdJ>|6?H#sjeEg|jqfzzHcy7d zz~L)E+10h|6dkPW!U`V$JMs+G$dQEb;Vbi*1d!j0KynYcNwsPF+0s`WFCZ1irRxV# z#WA0ucevWPfmR!b;M({VR@Fwi;Z+~WzXo59;MFV-b|HguWeOM043uh&=aCv?VHHb= zt1omceyQP-?I??4fbbDl64rvWfx%hy@|r>?;crB+=3zu)IPj5h5el67*KzuJr^bWY zlCFl>)iCfUPVEN67Z(VeeKj>`5hHWfqZmTqYfeoD!=xAr9LipXnoEC|4ZtO@mjpPXwB320x8WO-u!L-!9cc7>?<_EOcKcf^S(pig&7J~UJB43ZuXw+`7d0WTApPwQB`lSczO;^ zElo3MNo3Fz9+>IkI3q>#3JrCua1AI^+BUJ)87Ioh~V*g$e90qPfY3j4!I zwzbor=L;i={bUo4v%FX}^}H?1G#cYHHVb1(9;{*hEu6|sA*QBO2mdpIo4vW>k{Jhq znAqBt%m;tjfIGaI7Sox(7?V2uqqgLvXzUCchwnxX=y)j%2uaOf_A^RrPrE=hhb7(nWyqkb;h11YlHO3ugI6->u^$ZVEN2}{@%>4WW( zL9hu5I~^T@C`ZpFX>kN3NlTt^EL5U=f51mKj@T~Gq<`w-=oP8Uhcly#xnaB9Ol7sM z7@J6F=QdA=S)L?wVMDw=g}Gt7WSq>$q&(M!?UIpM4%JD_4cjGSD7kaTc8#P(V{yTD z&4cW2Ds#hj1+JcL$;=Jg6_^%#Fb8~5py-0_3M3>cjjjrK#p_Us(GlAf*%XqRzwBp} z*4(h&whgu$7_!A<@}XVVak^o<3}@;fq8V0J4ZDChIk%-+ScH_p4cled6vKuC;>b{Q z!FHL)CE)_K7s^`JL$dPtq0y`(w#zan1PMP$7XiXU3Oat>uw8~ZwaE;D3pq%*VY|G%g)reO@7e=}8@5YtSFDEZ zGSHN^d0nodgCGSpnKUFLB~xR86bM)PSy5)=)_ z^K-;@S>}Ww;V0=LKzJCUMvN(d;jnL);a6H6Llj2ZoufBXq6V2{!haj{4=Sh*r7f8kf{UT%n-uoFZB0L<%O)`z--oYgXyHIysMA z?2(|fp0g}9L+BUz4Z--vqKGg1G_r*6`sCU|sY1;YK!!;zwx*32zG{&dKsxE8ba#6I zml9%20O=QT#aAdg_P^rOUs2<;u5L6_sWYaa_tV@fPlih_s#pih-3UMMLq5F}!?IY# z1d*dhFU3x+w1w#ci!E;p64}d@OAsAsF8zsu#UAL(Zm7@RrXKXV;~}~^zB5L*ypsZ? zd|D(t{fn>WbE)d3;4ZwV9LTewPW7hU74;COPoI8Wh#}hQA+n3wxqzYKVgp{H;G(xZ z=?_Lj%^;t>wr$79C-%TG#V9-5n@ArW_OE=0%ieO`x_Ntha6XlBypMjHyv*+5r1}~K zyJa(}j$N{kcUF=mcAmJD&MXBn{{*@yJWc7U&#*V8Coe72DO-4=HBRkc+lj#>l2hdw zoEdjF`@P|G(qjK6Q{`RV>0LX6PWnJ=y0bm#wKhh>>3B5gZ1%Q$==P`K#`(@lIke6d zQNm%BDA&bu3OG8Gz-i4{_qd%fj6*RVclgiw=`dzMP$ayDN`n{ z^wGrU(n=S*XR@BkBv+n&W-EE2+1_l|ksfZ|-1M?r=i4S_R;#XU*WlB8;2tlFMMVHv z&5&8`%UR+QtEG=SL{0i^wy=;mY(FXHjxxn_D)4Tya>rrF+Jcc>1B70eT}?zE6@xE6 z)c99@YX`))I{cbyhx}oO7)pmlHt6OB&&?p*^D|-FFkCTWhAua9?FR_N#i^Q3ftWn* zVuIvZ=RnL6xEP3gt*n{pw0A2I6MgJbASOq;$qd;Ti1EWNftU>I7>Jq2pHn966^Pp{ z+dy2}BJJ15A?Hc+qSS)xt%!>wFl6>?tcc0uE`gXl>l}zV0?#Qc_Ns`F*al)*HKE@* zv8JskT&ifIL-4PN$@|wbLpBCt{IE+PCc`=gV&?JZlnHwU;v>5jh_O0$ArKeutH|uv z7>LQ^E`gXl>l}zV0?#Qc_6o#DZ3D5cns5oKL?2Q!HbnRUftYNTmKm}!5aWkk0x=oZ zF%UD4Kc`ICD-a*uwLpy3A+=y#Mf~+Wa9<2nNlOyhv^8O*#nXb*&PfW1G2JMeJv|rjhUwu zk^q~vJRAixf!yRdW#DgS6=1(G!WsB_fE7O9y>qfgQys)3_=SCtlv&SgduwM9H_=MF z)dH@5x4?aim@N;qjLi|*4I(rgzKP>vshmYn?*v>f zmEj(MN%vfjzn1R#u;*NNu+xh#S>M07H=bMHvo+f64cJ#B_AaZ6#owQYt22_v5#5vR z*0eXAjK-Z|cdIucuU@=ko{aF<&Go-(qA}*ti6IBptDsKn)sWcrm$Gq^N3@DAdEK-| zkvEpc&*eSsx^eN2w5n!`xC1p)B;I1Ch$|B_MbZYEDdO(VOp#b3Z&GSp=m7j}coAz= zle69JUZ*?U>`Vszjl#07C5E-MehoFNxpnFP><9nGJnUyij~B3QCyehTN~2G-6Hltx zd{WKk`4?+cHgEm9k;yl5n@_B5-Z*l;H_A+r*u0q{ws|u}V)JH-*yha?sm&*_usJ=y zjg0nEIM_ZAG5^8eg{v|{E5AV$FW!mb)U-c6&zaqU#~8y@1!You??tDdEOa&o-N_`r z94?EWmz~Cx5y%dfg|E-U3$q{ZMXwW$#)|m8bP3w`c@DYV>Mi%&w|aQ_VmRL>J7F(| z^USp4#^Jr1_hS#hkE32qFf>I-7mcl+2>%BW9OUN{@1s7zIh@Wv5))4%Gq=LQ&S+=4 zy)*4(>`&r09A0f)9aZ^<1g#DzUykzBB>FR^w+%bLeaw{qsx%EYhT{#Sv)qt*HaX~(x}3`SG!D1^oI+MTy%)ZD)_0W zLv%DgF1HAK#iE5#iKN9$N(` zAQ^APnbu*;eveLJOXhV^2GOYa>ZK(m|Z2NkC7c2xr2Sbgqj*F zmsl*?dP#~5( zRLR=WQe*LjOD}`PELC{djtu-s<7@E$ZCNQD@Kg9HWKO(N=7_F@mQeFm+2= zJVyLzpkg+wVQ%a`obz&{iI*%Ru$;UVopCqG72AGcLe{&AM0 zLPg(u5eQQT=oSUc*wcGX@+)uq{=3 zGo5W_n8w(PMlPPaJ=lZrm7UbY)EERW0W2wlj$H~!pVP`3cOr<%=MCRmx zh%&WWCkJE+5K&ckK#q#D19DVkB64*=Dx%D$oE(QCLB!-u$HxKL`GgY{MJw{k$$>?V zgomhjI_{$4>BvOo7OObAuCYoWLd4||t3*ofu}Y*Q6Pa7ABB+#WRZeiK?Ch4j+;oUl z%rvqaWe@~jy(;JCA`_8YtYYTo8ml-G#G;%WnT;_%4#?68Cn^ftxyLGwgomhjI_{$4 z>BvOo7OObAuCYoWLd4||t3*ofu}Y*Q6Pa7ABJiwptYQcdQ8~mamP#%rhgii@k%`DH zRxwmvV--h&h{+*V@pOC~u%1{&@oV>3#gXt36;H=qR6HG-Rk_6~j;?F05{M9SIm9ZF zl6$NYDal0U7OTiQg>$T82oO;@#447Gd#qxq$VB88s~D=Tv5F%>#N-gGcsf1~Xo^*z zXh5a;aSJUxX!Qdu!eIh)RCI{Y!Vt?P_dt-D$t>+R8c-=mK( zXC@Kf(kd(rMw1EJZ+953gIAW7wAJelJCnEUbjQ8TPH#LOja&JW=+Zy90xr*DT)JT| zJv}eqx%chMl^Z}M3M-}1D$woz_$FfvW{&8Cs1AULa>Q~9pex~VjLR_;)I96BI^P+ z#L3FCiQ`*)*RI-Cwd=L3YWIw^My%nydjD1b`~SbHU3K*W{_humB z_SWQ*6O-Oxbp0F7_D8d|&M*JpTX#JEttk88y}Q44$DXe~b}swe%IChY8{Q6AZI6e8 z&2!zey~(saJ~8c2&h}=5@hIMV|FE~&KRq69^(U>D!mjD*-gZB}eCE?L5-`?tvrv3Jl{mH05Y~MW^%m%&T;O+fMd(sdwAo5Z3SEY!wd@i2R0MCZ8&b#BnZYQ|dE!L>qSDBV&{$jW zVf;$M_`VHTj^c&Z;y=CdXPybgNAGFEhlg&YQ@n-MNz_tE20q+?`~3CsKxT9gVw;OE z;eJd)S#fJOM8{CXX!@uVnai}_-|EDTt2wKUS}!lz&OU&(-BIrhDs@oq@Cfpk!k*3XXod>4{v@MyIjOLiAO#<2z(G5{+P6-pgHxk;34cv2 z#gX{iTGis>JP4tQde;6ts7|(c@VCPUu9K9 zrbD-0Oe(3%Huj@RDy;|CTZ?I*0sk(7SJ+ISb4H1OInS-+U6&H?!iDQP@A?|~z|S_! z29>Eoa6l^oB#v5E_?(81G<+%j%~j6u@d#dJqv*EaYgWQ3Jk51}n$~3<{)0u%xTiNm zbxFL0zb2|)IjItI3brge<@Ho|Gt>x9x5p@A_E4-`;D2=@{Z-1)515p(fjk$L2=VUx z8*|jlS+`en!QYjz7bS&WH|<<|vv?_9y!Wn$ZnFgl*5O?iVdPkEbUI#4|39&1zL=n` z0^r#Q?ly?FyIQqk|8{YH{JPOsOhK>%(Wg7vQU|M)D zzRJz-vmbu^8I~V)^BatpqKyyFi)ADK@dmspgvD{ayV%NyvuR#I>hfk??7fLIHd|hz zW^n*`rV+-bX+s~zLr_Z}h7fA16b(6%X5=MJ_*@_X(#Af#DTfiIzPZQ7O1lyt1BKsg zz-MeY<*(&uHLidg>hwH%%5`vF4pl!>W z*z8*iB?)1J+E&6#tW}YOOw4JwX#z=#5=Ll5tR)vFvYPw^J{iGHW-G}0O?H)tYyBNi zDI}kirlzOo6TGI;gnw`DtMcJE7v-EqLqD~?05}}sxa7$qQUJeT#^7>dqFb6Uxcsri zS>swy^e_XtA8M2W|Jn?@5g`d4Xep$^|2+88aqwlU+#&!1MjRt+Oshqwf>Yb}lm`Qp z_`n~S!5XBa5<*}RB6Ezugo3aZ(DDQasFdLETE!a%Con-7g%Ic<_LzkYQqZZ26fm(- z+s!*w{bV)A z>NdpCv&&i$FE1-KBQjXQ3MnAGi7kn-m?ICGkpl8bjSVW6DJ{gRT&nbuuN6{2*%JB` zYst;Y!flX(PS(|P3gVCgrml>1Yo#t5UAlO14WwX&6ksZ97uB?MGWT?89&<)Y&gp~{ zYM?-YJn7V+5%Ew zocwCr4lCf|6W{q~+W-poJ>Ubd!3sJKSOJlXamPd+KC+D?^Ti4}nIiH%1xBoZ3c(s8 zpbE0uRdBc}R^XB#$sh&@fW?5dID{2eKnCT~Q`lVvWC(eG9dK6x8A!`ZffaChSGAPu zrAR3_atqk(DzM>&Yp^QsDzF&=V+EX`Ol2dpqo4Sg8^}QB$AJu1SOIxbX9xCJfeke) ztiXzpFIGSX@xlrS5uE_-u>u=voGF{zTpN-pR^YYZO$ITxxi-5B2r-RFEwKU%N_JR* z6`^4EXowqNgB6e&bB%~(CDJQsmpTkt`_Cp}-7b5)Hg2=4fbdosD$f*jSZ%y ztg_$m603YQ(_|Gky9y{dzH)c&YYK0Zx;W!ud zn03h;#eUy<@)97zYF9z7lB!q%e$0#&1SQgTp26CEox%iS3trH?xH>L03FU?g%uojzzT?5%vb?q1BFICHyBv$D>z&U zE1*IsCTXh^E3Dv1Rjhz%ueANV)4l>S8w03dg%uE5n);#5yn(>*rF>h!k0)t={Y*Gs z3ar4T^daJs;~oL`6_9~+df^(Z%2p&= z0)vpO-at4~16F`(&J8QT!*MR^(gNnQE_vhJv4UJBRj~s6m>Da`$=8n+5A*YL^vU^*%RMu zw)_Q%Fe#@O79#GX!vEL^X!aCc2FrHDgKCLd6|9QNQZ zUVUc~!5=o@u25kBqYwct4ZY$JkQ%aEgEUBJiTpzgd`AZI1+*@VA9hLnMJG zA~xHbwlYNUnFbuGW}Ph#LpgzSnbKP0=F~>Uf30tp7VF8ihT=?TSai;IM?t9 z-?YzPRo|+>O?P5zXn8xFN~~brvhR*8ArO?UueVg+hIm?|ZP3jRWT=@<#WHO$#kK&(hADledpY%tY{rEuXS4l;ICEUdl_=bmq zHCuhcJ0t!`ZY2-BDY`ae);%#9_qH~B(^-1Fbo;6P;MD1v{6JnsCn8y|Us!m@>)}1Z z(F;x!;qQ)Mwn#KvSC`JpUxLN8^n~jJy47T7bF+^w zCRx09eBU;FMhJOIMvsl}7>;|hHn~P4IsQZ~^HMk(UzRBZn6CD5i*`zG?jiRYaHl*g zKVGsbKI|!XaPokPl`{>tN zIS|WC_7Z+~IC!#uA?xXnBH*r*vJ%{Q)SqUYwX=j;H-F{&gpJ{v$_D<6A9r(!ht6Jx zYR12aV6C!YQticycMOKZ#9RMf6fzl8S8H7cFc@w1pDv0kMOcNOY`~3`c|Y18_BYWf z_4uo0CCGX;j!iz%Ao0*xAt#ko-qC=5WvS4frknROZD}z|{+MmWy>OlWe@C!Yw+NVm zytYEAT*F&-O#h-$p8x{(Q?&_>?rW*ir~Op~n{`=Dxr?if_p;G6_++UH9H>b^SC168 zgjy8>mVv|AJk})+_^Bp55P&P1GD*SX1tiqPZ8EcKq}hDtufZZGRdKgJX#`fxJZiP` zBw$&K^^VuGZmn$3;FH3(2S>N24v^Q3H8@ogmL{$%Z84Ic z%I{n7Rif=NMiCVYpp&z!_w)1cNyTQv>Uh$3g%vj&A!A%AfOR-@B0}S&$m@Fa>EA~1 zYAe&u2J5a~xJ5tW#16DWKp^6y0v?zmhn0k+#FKPDVBu-jIQpGv1#^bB_g>IA;IG+Q zSk6tnJO3u()-`RM(GoGFVxuBgDJ)$Cu+#jU09jcf!0)G47Xlx#b&!k~0>V~TLM4Hj zRq(qJzadO%2(&^WC3+UJ>MFG*Yo=^Hro$wEuR2F_$)hskvaK@FDh9WDZ`M*?M{=9} z%uiw6CRLxpsZy08DO^x4{k|@2%u>!!z!mLMQI&NLYTni3qS?v*x$RQg*5GWs`+Cb2qbe(>z=!1ZeeDNAh3|M7yi-D^1yIko#=0NH)i#*ufMPCqwew$IF*$9}g-WQLlw%_38TnkHW+8L#6iteElA?qsp1^p(dOR zZ@^`3A?GjjRZDP5=B2SyRAZ0Bqw&KvX{@`d)Y(6X;687ip~?%VSawSdTcc=tN=eZK z|1je37I(5+eo_?wlZraF6P$EK;pgq^-NEa{BEC+!M~+ zP%n#-h=m%^wKU65POW^R|4A>-a~}2gzTFntdfGeI+a6fA1ocib4R;(RWgNt z9ECKyd;6#2Ci=fBh>L1!loR+~1V_WUMjkCtilF#VK%J({bFZ}FXe;Se^YY3V7=E-- z2lt(V`#NM>O$LgXSXjUZg9b->Cc=F3L=E!oP|>Ko2T(opu2Bhs5;5Y;)JqoF+_n7S2HYRg2#*bheRSC)`v1f~G%)NE zqw&uS3{mVNG$5$Tc(hyL7%KH<1GGYc-YZ4#lD~9e$7Nnt)4uku*p->S z;hX+T@7W6@Gj^(l69+e*q%Of~&P|c{E zf=!j3|48-R364&{DV;gv+2IW;0FFc_n#nd@x#yj>kfH5Xb@)^W)uXrEC91dE;$1q! zpkShkAq2@iJRcX#3I&~(w0o{^g@TTYUD@9yc!H>IbXb@m=Fxk>gk@LNFzfr1QPm+(iCSm6d(lc40Jen91+I-Kb0NRJ|YB19QM=a^SK zMHvxLoJd6hvI(6k&L*@*RC7}2L=dmy$S{LfQmKaldFd>3_e27v&3ZtgW~vGR^3v&ME{YPS z!bufv9RvY+=_Eu&(0*TP;dH=L;YJ79px`)th0MH zm2bYl7aH(rT?ToEbngB_&8U(GQK*|xW!u?+rIjiROZy;O{}vg4GKxfm%ekZlDC zpF@Y))@Yh(%ZAjR)M@_p5$7&^u@>ofYBWg-Pt~imWWM>+yE}d^#8!Ak_{%H4qEoKT zmQ!G>Hob;{x&C{N;etVUYLf!Wmfh84Tvx=CR)_2cKQ|Z%zSRK#ZHRjNX0f2!#E+37 z|NZ$2Rtz|ibp-#PQGN9q-GN@eLTQy`4>h2D%%|&e4zXp=0kp?zD_qm9?r*7$dY%{H zPn)n^ugJ7JDQk14oWEpHn*>FJDC~7YckZ2rlkrxVoqLL)k@IQqBmcDtlX)wg1LtZh zKvIV}AL<@ab#K9m_${@n8}Vqb>YuJq%9{~5jC$p8+?{A+SiG0{A(?v=L72wgt=_EH zJvpA7>7k>%QAED$q1#%TyQip&!p9r%7F#c{6^$4WDrX4WfmpG>t>*Yvj^6NRP5Ufs z%aRJXVnF@58(7k_mTfgQi&G@1LAEaFobj0^ytN*0DVvvDuCP@A>pJsPuTPF};7u0< z;qwjI{ZHxkPkag!t7*;H_|`d`6@-iWtc!3B`CL<&zA8M-<*L7RbjrbAv^jM&-5yW- z-ClNe5BIAR=vNW^X2X8hwTAJsT#-vG;ol~&R4WIaxq@bwUx7F`4HOZ~a0hK!h{6?$ zsSp?7XDVP}mO@nIW!Yt80h}OtbBL@VGXnT@6Hc-E*KL@tB%5*+X7aA&uDB3lvTtoG zu_DW@r)h!y1Uwc$QmH_<931P*wlrUdPc>mMCslF@)%xv)N_-!!=HF$9kjFSzonGzOxSZ^9l<^w6{bf*Wu4A96TtO5LXE> z1svahTEFQdA0!nyuZDux%8lka{C(%vo7e@%wIYQ*L(kBInz*tS2i1lfD{~NHc=>R5+@{EILIV^i5Y~?Nm6} z)jaE*{3?8;!X9=r+4@tUL@xGQ*lih*b!_1?V3`&B42X_mwLZ$qmg{oM%_Ts|6d9n0c=lvc4w#H& z3v(2P89H^zm&bZ0i7uCKd-3bZ_#v># zd-h~9lIXo+-=2&otMZy~DX{GRo`t=PfeS1HmU&LE$MEXOWLz5(pjS`EkC!v#+mrD` zMMtEuvK6`~=uti$zWRbRB0tn4h5o07=8 zCzA=XN>8>LpeM5wd10v47sbk=Tku{d<3%UBT#%lO9|Bi>o;{h2WPqNGC#&+Ba4E3d zpLq6UG6t9oNc}f7*vOBI*|J_=@b1a@(N$^-w@${7N7|SXI?--IGb#uCpf-24wBpuO|}-i%wbSC3N2<49sw(>jS-e zvenvpGJzqLGn;#|)w%R!ANAb3C=(>plfA;QLo+}}W+`w}sl#_&=>ntd1vKgi-W?e) zIMLyPbY%PxK{_%S$p9T0PgdnK;Syl*j!eb?lmBY#$oO&R-I4L5tJD^*Bjd*l)RFN- zMMtC#AAFUJAD1v=3RCb?6|TjR>>A7huCgDq3)f!V7|*uKW~@;Ksvr2X2#&GY#{Tu! z>@^t6Ud;OD0Xx~#={qcw!QW}vU%@AvPF`$bYY+`T&p;3Qyy~TtX>y+z+4B2J@T*Px zL*y)FQ7+_}1Qgf~l1ID*qSa(!V?Fxf(L?ZHeAM^FBS|c=_FMRX-wS`dX{o{pmwIZ6 z4)ammDXO@~;nDbEZ^d!;V%u|l(lhgvi!Qn|(fYVRXV>MgM3<3q*p=8UHx06dFY18s zKEKxvBuy*RYhA@=zwwD_e{!}r8;nQxH^9im!kLQ7&oos_6WvZISs>ImS3lN(H?!PS z&E>4ADH5As24V-m-}?cJQq96YYq}mohD#cDmMYbS%4zIaxkMiJ%4sY?QHQo^rb%6| z6*~8^)5G7xKk<8ikFx=ly10zy^ZG6~TF%UBQlrZ1>Vc(}n~ zjy~0&vFwKq zE5Kj)gATD^y3DakS~Xq z!k)Bn?hQu$Nk$M|1&qc(*Ty#1761iyuE)fIofZ47)9K*UC|<%}6FtX#RxcCdbeM9x zgU-N0N_%oJI+Y=O)F^$~z5@0m#SYMkll^YaE&>o$mv{YY1S?uqwkLgbf@X1LkzNc% zl=EdnHDR^8bmr?WPd)P^-tLl5nR6Fk!D-Fj)fqMJLd0(8F8QZb?m~z?|J;Sl+|FI{ zPpjNAm>a>DrB^Sr9?vHVT(0FE$gNwwGwr)^_8Gh#Wg_%%JS7?EW}1Q8a4^2kkbzjj z{9Wr&yo@SyT%9kXtJW8IBUNu(%N~JmMesV79~^tx-xxULypOpqJ~E|lS<>?%T9G^3-Q2D z+C^OOkc5fnuSOubg`1UvL8;xyq@K#vR+a5qpWca0zNNc-!Y2n0Rp~9>F&GYI>%_i76U-!$95C-Y1YVe-K)#pIF9R5iw)#(ZXXnxd zi$hHf7`2|3v+y04mX_BCmzpFiGF}{&%1;5_%D7-%ONYNK$% z)ba!*$x2>ks!;|kN!=Qgk0JqG+gTlf6qpp!mao){eD1@U(ZvuNC^_1=QrJLpC;{rK z6vl*hZJO~5?=K_0@f8~}LgCX+kjZuMN0Dr6r$5hJ67$lUJU{V~M3ETbC!27T<;AM0 z=WSW0(HN((c^FIba1HZspWjsW-y*oxn=3Avc^O#v%q78f;4d0*mp9X5I`bD}Qip#i zx~KD&(!LitpyQ=5AS5+^+0QAhJ?jG1Ans`&T25)CJ8@X1$_WPnYrgIR*07v$L4?W2 zk*4j-_L!n}gWJsHA&64=DdD!?aDi)BHpQ?}9Y$cOIlwjS<7I17go|omOW{oJx`1!H z05%+RLXhy2bO@ewmYf0@)z^Yqe$n@(diMee4$nC@%rxxj)_@JRIoI&;ZO1m5VN?td zP7NDF&#g^n2$Zc#g0BW~4|(^k(J!Dj(WyaZ*%QKque^guLdUZkwo6ZYldt=#8e^Ag zo(y%Kv0X9^1^$T}FA^ZV`1EOu-LPE)Z+TPfumoTrESiZ6wrd&!H^im}#S8l-Q)rd2 zUq@`05X@|it7N?lt5lDA1G%zx*bUp|SPqc1IC?Hgiz6UOT45|ko}*0GT1JQ+v0a`? z|J23ND^i!2)f$A6CT$0}j+?2h))iwD3GGZT86Mjuvohp5M{Ji+&%%n7XAawCI90xe z;)Gz6c}QMm$`&Zyuw6n`$(=j4Ya}fiiwm}E9wY#^O9m;YD&rc88@5XZ%(cQLv$^CW zFAH_Vb_J%zbmo9B3KU(iU4g`fpVHj0U6D;8srk!(PHD{z+wIha?P9{`5!+=rtw#%?7h7AYAk)h^-?J|!`!bQ0XEnLGxvhtExqgh96mt{@} z5`K~nA_X14ZrCovudMbX^{t2ILhRCY!*)5|xbPs^4CTS0(d+rm3pypbVY?ioVt{bK z8aaAyZ88gfmR1acW4n05)z_6Av0Y9+;wFAydDk8&+^}7GyJG#=E*Z+u@3LN{ZMq%z z?UHF^O-eb_@z^d`a45DXIW-+*7OJ+`4cjGD6}-7)y9UCgme^t6u4#w>4KW$Qp&@qL zw@U`h^ueVn&ae!qm#ms~LD|S`TBXb;!-l~P+a-fw6BOQJcL<^!J(r}#5h(k65tqG0 zDlcR$6f2xCRXRFpFeDuOOa?O4dB%3hG!*#foUmOoi?G-(p{n4`9osb!F15rC*sf`a z01Yu2Vs6+j8SuiucFAmN#dgUcp|D+!o=ei=2v{er>ZoiH;9c5hGD}BnmpqQ8?w`6i zdZzX@5a(|DcDb3#YF#llk=oZsOTki@+nEezp1rYMGK3;O@r}1)(bMrv1~PPJ%QB6+ zW4lJuqOrJOyXHa8Zz^-cb_K4uzDokGczKryue4-qkGu&Q;^Ot>?v}H<;j@a%I8*JAFzQKewkS*_5I%vub z+hsW8f`~OxEr~WBf+)or5o315b{RIsu;G9>GSpnKUFPwM;le|*l_6tCY?ozD2oiph zE&_yy6m$;cwr`i=S6UtA*GhTJ`TSwK9B*8BF#4%=+>&jTau*i3VY?ioVt{bK8aaAy zZL%KQ)zuX4(o<9vI;U~Nb~*V3*tg5kt36Pl!)lIKIZ=$eBu;ziF6oBX=^?u}z>&CP zIAm8spKporO+e}Kl~GG_I={YRjNmUTp1UELPgXYSBZ#D{i1;XCn5Qg~cncEV)r21k zqHJMfWu=o$R1!^UvctlKyalL;DHggE_gTKP0e1({giw|Q(L_jz8Ds-!ZAo%>E-M=T zyl<8D=;{@&{x?jhb1rpr>eg8xe5TnzG`cc~7lBZ{R*f>*k>L(RTe+f=%{ zNKH1W%^tNISN!!S?}`le4)1A%$qC2-g;*3wAM=bx=vuR5Z0s=|^~+fit#ZqaFsX%n z@9mVdPtqz#UFAlAt~IN4U7egqF7|K%Rx`G&s__*`DQXEQO9bEZ$+d-2g_FlfkYD4;2l9Cd%1E6qJv;DL>H)F zAiJSHdz*UL?@dPN=J@Ue-SSQfkn;Of@a!+XmWL)(YwkUGX*rN*L!Ic)daLRoPM$nz zc?-IJh>m&)`-|2$;pGZ0`rFgNa6Hls^4aS;cB1;kF1S}Q%Fgx{(npv5E8pR=mtePV z-QFIaOJyAIq2DI2w0k(IzDB`r*-WZq#q8tV)ntX8CoZKkOF_&(g)RzDQ@ZLi>`m#( zE6Q}r7M^I0Q~TFWVlav1M0p0MCcUjee>9u6*}utDc~5V4&(5%$KG2@+Y!CbG&GBe9 z84tT#{p~)wz-hF3uDe=}Zwo~Pae%>dU9@7@mx52D{&a>eW?xOHpGXt%T^ zaK3U=W}@nrBD*?)$5R*J)4Sk)FN;M*09nnDS?#qfaf#K^#~q?3eKuQINF27G6mv(J z;sq6Ww^+I3Fl24PNUoNlS7v~E+)QSKMYwj^qDFKGzwTQ*AimY%*Hk;?4?DzAIxMn5 zH!pZ$2I0P+3EPI@(ki-N`H*W>$A);SP!1YS^9>{SsTK3{>D=;J&EV)FjA%#e+N z7(eV1h{>>yftY#x1!clsf%x$ACJ=KB{u#SP4dHe-s!d`*+h;<;Q2(iO5 zMiv0K&PHbLot;)Co6JWwp5k=)LHywYwnGKtir?+8KV1!dWT zvP5ifV@^f2fya?HLVIrng)i;`$%X6=hK>PQS**T}m9NIkQwm9d%~~Fgf|)>W@`5t( zx3UVbUl`#Ge8X~uPlw9+?gNt5H|n?^!7uEAq|AC|+h=x$aTBeyTP@(~cMBYQLNSJM z`I{3k??1`$FjPt8Cw`gS<4hOtH4&zHpC`G}{5%QLg(b@HoBU>o2nL@c0ZoI=kwDkQ z7NEpejs#B#Hb;W$1e_x&wS&!(G%v**i*;Jx;HBCr_pJzCrU)RyE^^7om@QI?Ce(%F zLcSGK!JDLXfnVCA+!=^sa(7i5mARc>x~SYqE=J`6+9`InH<`Ap@5l-s@x)yQ$;GJb zqL}W*sGMzZZK(ndMGriqb$UVi_i3Xtid0KDJ|~K}oChoAZqh8=NkZg#3fAzzsi%e&)+ja_HPTm2#XYQ%0;IEO!2gsU@>M-koQo%XCh znvN&kQSVHDN?yHq*CH9=ubLZw*+gS3qVqoXZB#*>HmV`98?RvFB#&qnUGj!$jUsO> zji1YV+70949cfj~6mbV?rbxWSOc7TmW{RW@G*iUgotYxBMBb#-xX?cMVR$KPRnybG z?S8j6+Uib+gU!OSt|NxEvT+SHtA!2e|Lh0<#v<%xMvs@UZ6}QHBub-Cv=dLN*nCpW z=J^+ER5ow@x{=8@a+^=AZQeL?zSqf2k=VSMBDQ%mMPl=2irD7O6sgT8u(UNhw~dVU zGT7hQ7cu|A--N3&Lo2^Q6ffV6;?!&~JI9&bz9$&NR0U;Hp6*6REiH97hrQ`Ez8nt3 zmu81GWdyQ=W#PF+cuDr-1L$?4@k9~7S1v*OKF=bzTf6Q42iFd@E{5}MvJ>`VIL}Nw zZXDXJd98IH`~>RN1VdAVbkW%AiSU0I!G3-|@gC{}oWtq-BQfzLGIKlZ?~Zq7+dH#v z#{M+!z@fFq)lrpyNYLtl@|7r0T>>y!DeVZNDL2634pdMa!KE$@B0y;DC7JFpi%gea zFYthL@qm8bvnQFCr6#tdR&Ju35*@4 zjF~EryecoTc95*R#U%^)eQMBN?nVlbzwU{Iqf8FPOu zF!vhDoP9kgviBOr)buVQd3TR-TbB)TB&(M+cmrk9WTK>$6r+Mq`{jglSU2Zt%JzoOyjlv|NO|L!0 zkGZ z{LhEPU#tpGbcCM|3qKzbetwPc^9{n!Hwr)BB>a3cy!Ucg9E=9DoSnnm5(|&QNOluV zr-M_Y<6)0zZBvo>~@zXqYOZz1DUF63*;Lz~O>?-pm7WHS)ywb8^L71(;nWrjL;w8M%Y~poE$lE0w_ahMNFrkIZ#A%-;~p}VU~2+e{aRLsdiXL%EWS7UQ0abb-FQ6%bOK5m2<)_i1C zglTS0E;1tV+tZF1GJY~D@&hv?7dzqz6?1Zt5y?Qzj9k>ukSXTlMktUS6{(W7qm{<; zORoEWzw??;U)xwn|9k)c@x^_6e&p~E|I%~sfA-cN{Po{kh9A2!{?Zk&22FG#R=fn! z(<{-j^y@kFH9=qBj=rwE1kf)R)1{lWY~^PE8A8VNY zv37P-`meF@;0xQ=P6TcEJ?;MC4&Ba|y%X{|jyJY`?`0^KBwQ?2gWslri zJTV>*6Lgf^%GJX0a`Z3oG~}D9Qf4RplX$FjrrG9!bQ0)?9NngWUqQmig}8z`BlOWh zc3xrzf5(J+q?%Cl(@_NfC+yz9uQsrwrQD|(1arTH2Sz_waDNc?qDl0+C&!aBy;;08 zolVfen#mHX;O~0qwzd7R4}XXL|F*jxLgVkv$G;OvJ3avW&|uvYz0D`LC*$q<$A18> z+?b4eTbsS2I=c(%-CHN#&n34mH=7V6oF^ z?tz;CU;dO_3Pg0fvZb6Ablx`J4YGu{a;#he_%GNMU;}o&;cYv;VaaWFA-B02R=$r8 zxON4^oy!WZ*|+zS-4MSQY4izLykmeGz!(;{24|)#FhIv9KHW`Sja4zSYBD**31dkw z2lk1~MLF(X2fOb?$5Ema5>>MleH**Tp8W@qcfj>P7hluq3`S_dCKak;Iy6o`t?Nm1oBs8BU?M0>GP?Pqw0M$-5#S89DCII7x+J%VE*u;LYRADPiJ>3%;s_o zuY>f>m=bgP$aNqKU8(%9hvvQgQ{?e9;H7~NO@sLA+CG5JayA-@c7UuV-ChW{!QT6a zz0LmV@o=j@DccTjDl1pOF6`&{OR|_Qb0K%SskV|?bp(IEA}RK(fGQh`a#adF!3 zZ^g~QXqGgtURb#deH#6uQ~c&C&Oq0eVX3ot`TnS}a-iTRxBGV^=5K-B5A=J(+XrWf z0Mf4(ksr$s9!;Oed?AC#odgMv98|xubS(R;%3M$!%s+uWinH7?6zh;kFiYDgk`A7} zdN)cI``E2j=r>m&L`B#ZbM@}z0PMz}n|AV5LKM@(OZaVSjTLUjlp3riZP;Y%m^O4XFlF7fHYNQ#(+(+WoNWXv#F_m&ic# z@V}f!=9E7+TD%nKuO$+{6Am0j@oduHx_xl+WPgH=Z|y4$L**fFZW>9M&|xkD zu7-@7L?#E(UwWg{gclkhie$0|u-sWnc|hS|4VKf7XW21#r8FgNaXzJZb~ByCkke;; z73@JfuV({vwl2yI$LRMw-+VqOJ|*F23UItP8T3vJ(H0jJW3u`MXSK9m+Rw)+A>*O2 zn40RWHyEY)Brg^k@xscB(OUcI)N-=zj_jY1XSJ4AuV3w4f2i}~v-q2Xlg-Y-!v|0G zZrHl%YUS@0l!$#U+BXQVa??it%t$*0x7p|`0v004RcgQcn2TGN0}MeqhziEdn-8E6hl z|ED+p%rk*_iBx;mLh;din&_r{Ha_CxmfHFFa0Bl1*9TuA@dZ=DUBKd|@2qm*V~TnS z2k&qIz?B#(!pQPj(^HzbkV6FFqm2+&JVp68O~OeCkLW!egv?+>q~*0MMe#Znd^(8W zP>RKGb%r+G5F$ic4I)B=fy1vgfIF<)$+O}VQVPK}Qb`apf$YyDyw68qT8__garLD@ zhB4CRbGF_>C74hADcv+C+>e;wozMWywnDCGzzDtd?u1+%`~mS!HW$P@Uy))P~~MVbn3U3O#F zdVUwO>vl}|cM*S)kj$>q>2uCl@(2QH#7k@G8(Zsq!C=_wBD$(GFF)Hb8&swW!2#{| z$2w|Ru@A-|6Tw#sOLZ2`$0K-^m7NN{W+fc|`RNQF{)0u%AiygAny7kZ-FsD~kdr17 z7Klyn(6Q{Ov(2W(vnR6qcgQR4DMLSCQbxX%5R)|x;~Pi$(UOV1nhUjEDXethbq#A~ z;awJCBtwcayeP3{zL=n`*x}iTzhPSv(9oJ>#rB;E(b^`Pzp|e8`Du@z_4zIv>d0i% zP;F}HtT@xQ@Xvnu@n>2-)8s?Hsv8N$OVP$NI#PlMt#YrA7{0}eVfc6h-ehoQE4^r2 zdG6MDFPm;j`*UBF(+-{2zq%}3PwNDDrV+-bX+s~zLr_Z}h7j9Sd5Ku%NS_NNK-$=c zH{~#*HydzU0Dr~}ei$=CEq)k6!R*l_#4ko*+?Gx<8`oP1&2<(gD#a98 z_wQvdVz&byYk=wU$0Fbhy@s_=k`Oj1d6A$fsl0?z6$QZI9;JN~NK!~nXhf_f7l=cX zg?}=Fo6KRN{?^~+O(MLf7XH1tuR`i5wf|hDWoTr@=H7j_Ezu9rFY;|8IqCqvV8-Bb ze)2u3))-v=SmLblz%YQj7~x-=VK*Wq!2?aOTmG1?l`nkLgM%+yO)X z6PTckLI~DS1?9#HDWLn$S@gF0LcbRPK<2tc(}?8+Q<1^J1}PvP)(OxaDX^i&nXxa0Vu|Wz5F^xzqkpc@! z?Q}DM5h<`Dl4k;-00~ZW*K&ESj6p#;#Es3$1BM+L90`f_X4TcFa ziY~{{N=s0RH0>La0?L-qr&voa5YqMEU%h0DF>aH7{jP!f11qE;=R)v6OCbf}OUJ=iEsz3oNqL;Vn`K;QM@^?4R=~xlg4eK{3cv;{pto2T zy9q3Uq%Bs!*Z^8!> zo7-F)lHOPW8N}G;+F%8Qm`0?QSb+s4JFLKp5VA*uOBN~DW!0HX0&K7X@?oJ7VP4Hl z-v_v0umUn&E3ANgxH48i9yDVGR^F8+VSOIQP&HD;Sm}#fd zDb#u%u!6%5`wEC$jBi(vTMv0IJwXF2tl)4Ztbhu^8X}-Nv%(7Kd1S0h>#za_snz!L zb-aN4j^sDq{sU zBVeo`59Lj#1#L1+s%AHQfPDkPYF_~vQYSlmtiXnv6;@zH$QLUhgLq*DgosXn_E>=p zHO`dHSb+^mZ>)d}VrnJ81}h-MG$OUc3M?quVFgx%>c$GlB&@Ik@?m`=a=~B)WV%*Z z0r_xctbjad#tO(MH8x1gBEMKKU%WJdPN;eoUZl3#S3ubkIu#>UKpr$Xco?Br?JLNS z;$_RsII;1<=46_7A<{;`51 z4p;$^iy13mY@o2zd0+)cDq#gw2*o6=U-UqRbg%j)vitBAkZrcpgC#+xV00a4n zgXxP15&GBizon3g!CFKD&~b`y4925&o58#P-p@S~mNR7^ADF8GL@2+UfVjG4URF!v@?HDhyy0BA}(AR~!OT3v0I~!G2A{Z#bn6NP>+-@R8U$B5~vbQR14UIj9fxKSU&g-w;l)MN$}y+GqN`QFn?jJn8l)lkuco zzW$_qBT7l^o$*B}QxLWAN?> z?qKcLZ88XDdXn6gzzwXBK`SN`-m|eAO?Y7~T1byc_Lxd`(b$xT`r$!(SbEb=~YMY`0ubthF*gjXcrexU+EdVeYQiI&6KeKa)5??XC{IRNE@XMQ3#AsF$ z>Z4z4i_^T!xmv!$$AP#Kv3himSc|X&Z)>`C`*;d>O*XjRv1Y32B zfGNmpE0oHGK9_sdp7Adl^$8$gKUJIHXwg-rPy4F~HtVvQau-({?`5O0><&?rfNqT* z4mQtq(LM9iwpAfu88|de8g3rz5(oTL6CMb_6-}9>;PCf$z;*)`H^KJ(Y04*e+d zmEel-Cyl_0nMbX5o&@YuA%0TW_TcE&)B*CEu?D9~!qPO>mDcdd-)g{ye+}6u>$hIuUH(e(2M` zjo{T*rkxGeUA=INe#D6#XorA6#76}@Fhve43CR-?w8qizM4L5cXnSw%I3R<+C;>ky z*uJm;zh-M;IXCg{{F{VZ*R*j)OT>_hO@%BJK@Tkg*lGTG0LXsC)AK^{Dv^4A3bknNu!cdj!_E!uRV&NyqD+n|2T7MXa=9v0Zq1Uj*0a2D@W0 z9Ofa;%z9!sZz|80eE|Eph<_^wtFsV&nFq5tH^HSqGaOX9uDe@*_2 zgda&#YywJt0jHF=c}2e`*;L^k)m2#}aHsg&4Y((qxuISbBgf&zoYd_Vs_12hR>Q9Z zwk^De2w@4?H%V9}1$TAgV^M1#EFHsh*X>RF$5BYLySINTZleFIg19KvP0}WOFM^}t zTqBPbC`C|wD49O;<|^T`u6 z$hTuQ8#Q^nyPW&0F9u@ArCFdd~|h^>gq9gBz|)}y3&9dKOe!v z-d>e0`e8UDj{)QjT)Y%pepSa3Qh?8f$Q$M9rF9$j$T@_Rq&@)`K5y5k1VM=yac1fz z3vBLM{%`~C4{3zQ2E#tO>=FHc;vX6qc8SsWX9k8Sb`iA3cMMf>vjJLwm}W1R(rQ7~ z)4V@|hk{5gg>~9e$Cbalo1P1qoejzC6w~g-OwY~_BQtiYg%bxio}@0pY7Tsk;>L2Z z1DCH6;bS2%=hWyj0Uk}yMTmE0U+iC#cH8-nRDW+cD`h-8yg>!Pk$4fOEB7j|w%(8a zsSv71Z@EiUZ@0xe(8(ALOllp-rhNRd_dvye# z2vJ7RIp!5lQAPw5CsI*>Y(l4+xi|4eaaF=Plb{7df0&HUDg6e$S%JP-zRgm~BS~OK zE(ByPlHUm`k~I70FPnrGbO4B#YEJ5$2;x;bo}J@{0eR^xbN567rOkRkp=PQI0P@o5 zW-eMJOwmbIO&1jB*6bO_P|2LO3Rnh50mXb6y((p7W3 z6jGFr8q|beL_2l&VT`C>Rvm*!-a#Iv$NRAy8jRwWK@>>C}Sy(s3y_N{k-8 zbojV`Q;yI4ynS%;WPgITg!UyJCik|>s6TsZJV6HuZjQJ6ZSG*&{8ZE;g9`@;=&%R) zP!wR3S*bc`d|s^``$g)W22HkzG`&_~!09e4zo4)@6`qNayY^1g)?W|J59` zkZBONZZiH=jiROZy;O{}vg4GKxfrRf*l6s8&o%1HR>(BdmJO*rsnh)HBhFp;Vk=wt zPK_o>;i=kEOQy7BzWLLx2Uhpc@LY(k@QU!4SA0dMT$?SYz*cQ~4Fhxi_Zq_mgYeWQ z1#I2px{|T9Iz;FX&1-Hj5PYiv{@W1s_RV5JwTT}iL;m~o6|4w#tB4a>NAUj{)mN|4 z9q9Edv@kns&e1;R({(wA*s|vU+GDj9uIX0yx70?Rpfi8kgzb7orrk+dn=|G7C4<@| zC>lgzuM@g+?=+l@x5Dh)Qv{8iPiyoK{%aE^^Hw+q&ec|cqz-dF)IFl=-hvbHTWVD| z;?Z8!KV6}eHzRNu^~&M6JJH6lcrWuqGWRG__?zPPA~pIJ!N(i$7F#c{6^$4WDrX4W zfmpG>t>*Yvj^6NRP5Ufs%aRJXVnF@58(7k_mTfgQi&G>D8@`}(#%G%F)_T09Y+i1; z!d3yS>&!vDnh;c|-Y0)J@C~1D$nJkiw}0YOm{?6~=;K@Ga8?j5>a#AwHRN+mVfw1@ zkaR338~Ccfb#$VEaMfP~zuB1gdmE08+q>24CbUtE}>e#y--6QSv##)@@((kgqYMFeO*oOOgg@r;1 zmYCc+{CS0g2jvptDgmZ|cVd>eM=UP%CR~(n$C)JQP3Z5ebQmC5ro4Q+AP9aK&+H%KBhQ3pulPU9SS^I@y5g zr3;V8j|R{SXE_W>hcPj4L6ImZa z^a=m6!tu27S(XJ^rKvldRSC&FL|z0JP9QdoounyVfcI25s>CchMIQ7`S7q%~IM~%Z z>zw>5e5Ar2b~D-fQ=mjH_FLF(8IW~s;S*b#73r>Xl$9;l<(8XEfYt}+n#Uv7 zS78IYnW*T9hDNEVjJ=M}HP4GM3qR|b`*6s0Z_M(LZN2T(%$myGi-B)pKm`cyHCzDCjyC)N7tFq8gJ(-LL&z?-m!8|gOj2HWi zSo=hMtI%N?sLic3Czzj#V_GE|KV3YUk$z&wad&Ryz z8BbQ_HQ`cV+5J5Wdl>`s$$({6>@%QmPbTBqkN~}UGJd?A^VK=2&gaS!Sz{t+#?%9P z_hdNay7y#x$oA^Vc-B?!UPSD<^<%y3j|Pu8ieCleS_IkUMZ>pU-dGMON&^kl06dNNCqm)lv>zDL>e7&awX8xo*bPsWcI zs3+rzijGJfKKMEqKQ3XW_MQxfT=$+V57}Nl8PB@P-3!s%7LeY17gy)elgTu2UiU5v zT#GIcWSvYHgQ@g9dor0cy?ZiYwkit^m5^jSc=lvc4w#H&>r#aihE82FroDSIDcgpW z^TqTo{4A7DBWK}*B zE&-O?7|)JO#sHK5YU{}OaWPx->&W=gRcZ^jbCDm9G%<5hUFcOZIWxwN?58SRiz8W_ z%L1;lAF~VBUfmeaw#sITqQbUxgg=Ym7@KYEUw_TsZn5metZyE$lYPOb!|K@IY1m)E zC!0=QP+@COWS|FqUiDJSG`Y`(LjF z9)btsqrNX5Nn(k8?)iY<3xB+6slo`CdTNOd^HJO>s<_AD(fDC+#c}q+^i{e(4?gLc zdCEl>-I-{8T%fb-@>inE$OPV%*yNjv?3FQipWkZ-lBSjEwXR~b-}uC|KRMf*4aOt; z8(?H&;Y>y4XGMsqv<0z=&ijz9d%=%2;LR*IRdYFOYKp|>mx0&;@b`YeqExf+&zi2s zkl~Vsoux{3p>i5KRxXi;y>c2$P}HGqnrRk>89Mi|)5G7xKk<8ikFx=ly10zy^ZG6~ zTF%UBQlrZ1>Vc(*(I;5Q<8nRd!C z9PdpAy%WR!@s1VAG(}BM_a^y>tt#7-J~~0OxUxtuh9b)OYELy`wYzkxxyw_}{K$ECmwd{c zyJ*e!d^wZGU5ME2+$I0C%3TPt=byWfncKNb{^|PN<;&8mmsyYJGXgHx@($$It=^gT z-8lOU-i|U6`Zu1E48+~69>vS3gvMp`BD!3Cp>;FOL~S@2UuVcf?6|ylvjMlW%vEN$EjX&5@MKrS zvePrjzHD9E+~^@)d0I3X*LNe>VA-iIZ6O}`NxO&_9+EKe{M85~w{WvkFetUVT2|`? z7Bv1eg4eSgRn{x&e%KmU=Bky#n5+Qy(6Jax;JXdD!;@h#aQF&Pc6BW~MF%Sj;vY_t zLK3#%e?*?48aa{>K73^!L_nqh^7|1;?jbj+Hf=xK`ikSlr02bCV?XM77cSM6i;VTimmgT`NWKgcSaKTj>nn7hz!I@JrCbTXk^~KU!CQA5n z!zJ5M7R3PJBd#Q@1-0Z(Z4kY(M|af$d?SK&4r01!UQ>i}rhe$(a~7Zniu6HSLUk%~rD z=`G$d7!GCY#J)lk%p{QhmgM0Y=HEWQsqDW+aH}_0Tr%?t3u4}B z)sY(EFB)){H`8J|^A}@Mhkq#G7(@J(V60&RKYTB8K*vjAKuBu-vJbZ z(>}DE(nxorL0}wKH}Wyr(#91eTfuT*E$IwkECPdq`GZ5^F$V__hmR!!aiW2|r2ejK2sT?h;A?jOuH_EWhac zQoVbD1c&Dw8)h2zgf(DyF_${q($b7|$dkk#__kx4%rGhj2&aaPq36~nGX%<3b>RdF z)FwJL$Siw8nDCW%h+v=#HjdaXJ?%}t?mNfEn9PKAG<{?2hV7DZ6!<4@yhwoL3JwJh zE7=I{3;>hKfg83<9#`;|H^mN100zRMnYduPrXgx+h{>SF3f&FcCFC=GaFuKzWR?t2 z&R^agvLm)jW`s>p*y-pHL^*mcNsA*;F=@Gg7GylP4%-piC4*wA`=>6Bp2<0i)aD~E zM{JjysjSu&W0NGbYtu{}%adf%j@T}FJkz$6=en?6GBTT_)vva=VY_4uC3o)Fu9388 zEH2otd64s)%G|JBfh#VVaaEE9geY#=_X!(zna>g16_^%#Fb8~5py-0_3M4N4l;(!* zifjr=&0qF&N^5S|ZpQ}O<@``IDj7icJF(+%5Y z*c8Kt1LDX~bHR3*$0gySG6)+S=*cDRhV8P<2|>b7(nVeCZarGa#tqwL_?6Xuq`s}i z=dFDZ+^}7ax93NAaKm;vM#TW()Fw07rCXcK5Re23-3#*`?om1Rq-~-Dw#&;~2ot{Y zu02q=?c1fdE1q|3m&}9>AKIqdao;W(2g4wxWiKAv4Y+TYjLY(;Zj0TpT{4D(H+O8; zK)BQrJM7yv4N*%&Oa?X8`HuT`3HeMPT&j|kpDrCP8NjZ?K%yOtsVs6+j8AHjPJGN^iEgFjpwrd{b{H8KDY**llOJ*F# zWC(DQtuIb*+>rU$Wj;r2mkdHoQAlSF_@Y431=|%!NKzVoxydIUyRA)1U?0bQyCR!H zQuCL6n6za|j^Kvv9=5@D1H(6XOg^-0AXGHA}#o6rNINWWtRBkx#+vOM)1B3(C$kB6aljn==;t4B?3{C8D#CAFPhzGRz%DeVJ z;fC!VaXXWthQ^r8gmqedW9+tXmyCnKr}<&KWL%a%bzAI)?UFGRyt!k$2EwJ5*a6!$ z4N*%&Oa?Vp=x+OV3HeMPT&i;Ex(KyWeyQg*R<|=5$Rx1|3eB)X5asB(BrT4Bq_XvD zgb8s)jZBm({vrY$CPq1(Cwsuw66Hbz!?? zWVZsM|g1Cx63gq1_-Byje%y} z+GK{ng&ZW@_U-cW7Q%$DylW2>C@>zca-tY_8=UsgU1>LruK-Gq{)}4qQ2up#cJ2*u zB<>i_&Xo+$R~7nFG=jgZxHnreU{kmTw5qrsCfd&Fsa4X3V-oci@dZk zNMDREDBax|z@>!P5E#tdMSowv5E;IM~_~Lomy!NleU+dd$@sj1c~hB$|Z;n<`Z8AvK#8Nx2cEy-eiPs zj_*#;E$^fNDZfz+&;H_T`ShwdL=t-N(sCfrhC0!o^;XqGoIH8*^&y7nsE5cdYUcum zii=Hnxq^%S_H-~DkH{c4)FcOMKK0q_I`D~IaIa#No$W28k1qRHzQbj2xo+ROy*)ga z$~fLbzfE3g_i$2uje^~>nN-Iv*~h!9$qGA9TuNt_f|!2_T@;?Cbk%3ro6?h4l5p5E-9onbe9pgr5!9`@UtCfR(-XhZs{&Z=PN>G zCaSKf*Cn0;pWX%cds!?h0?2BH%xbS?iA$`OKJE}T>9g6wLgKLfq?kL(6fda2yT!^K zhaqbVMsl?bz0`zVc9|04SB$ju`a8a1UVvZstsM~G>hNo-9rA}AVkjLJ*`S*jyfA}s z-_L|?!*FS z+N>V=m(~>iftVcWCNpGXAjS{71Y$C*V<2W8e?ghBS0L`RZ38haCLEeJef>c7e`>o?AKTklgC{GF?rTG5OV}xP*&_!5g(pQASS|>n(=uF#N_>JnIRhkF@D%3 z5R+jY12OaX3(ADO0`cMb1Y#_EsRhqH5Et*O$n4h`h{@wFftWn&9EdprFDNVa3dBcj z1F;Mns~$oNB-sVwAUX^HO_0sfGD9{7V*Id6ASS~)24d#%7nBKm1>z&tfjBfYfz?qp zHp6qf;K4E$W4EfoZGzGP#Ru6Dc%WE9Be|XZc%|KDWD==K-w}e!3(B$wWr^6}#+-^Q z!DF3ud+(2y8o56HNASg6Ai0p;!O$@vD~r|FvGUcJc}gJ(uvyE)Q7{w8O^U~?ouPrx~nQajijN%K<7v1o>TgO_TL*|#EinIeD)yT~OUW41^onouXfe8Vv& z-}$X#_m}o4cLt)E++Ed1Wp1aJE-H7Di&1%ic8Zv-BLy2KUjpTGm=LU-Q%71tUsELC*4u+On*vV zy?ECm8R4&*8-LkEV=ST*L-uV{L7g_LA+Z~;VB;i@Xcb-ZhG~r=Z!C?U%X``lR4APKSfd z!m^l`dUk6rN7&c8j#%Kz#x>Ob7B-~+gKsRtUS{-o3EOtU_)elU`b0bNq>9Za)oh-B zu|{R{)~_3xd?UB{#METC-b|6&d;&{bvvb?XXfK2P zoqZAWAN)1Ah!2oe z1@Gq;;U(F}9zd@XjVFruy>bcK_jwk%-P&#UKe%?NbupZ8lbx^^!+B=fapRCS{K8M5 zUQIAGMMxKot)2+~hY{@O=M(RtKEOGg&OZ_pPa-q7!~X7gXSTgF>t^gv;|?5JYg`>w z`G@kHI-qU5vAAd*&n><76qj3sy<*YA zs6^6YCM5=$QpQDPgs?(P29-%f1z(5&TiOJ7n%&-Tn6FvnEiL9h*^e7sB8}uy{^vvD zFII&oI>OI~g`bZIKfgx!`3B+V8-<^55`MlJ-g`ML4n~7n&dyZK(m|Z2NkC7c2xr6Lv~G9vNY(~cN2eljZZ12ZERJK_ixb8?Xp$w17ET-48yDdyuwD3Bc$sgkv$mB#W* zR{pOm;PNb2_D&4@=`OoL`pq={_T};acRBoS1g7#jcjL7~u?APfah`C4s}$Rhrw8V( z9qQbs&b}OV_VmsPS_&PX>F$gMC&!aBYlm)%;F$0pSwW{I=QH&}>uD zSQuOWNy3%M*vQ9op4QAfd8&wnb{iFu&k|;gDxweAs)#nzDk7h?RuO%`Rz>symLW$K;Vf>WB9iYdRg{0+Rz>;8S&piyh$J=} z6_L+ctB5{et0MXU%aEgra1mjnB2sQxswn@st%~xGvm6yF`ep>0Sj08OG$9>PhgQi0 zL|GM|%DMZ&YGbiXO4NoHsx)bYn|z+6I+o*Sxg_sgDm|0)$(>nj!qQkSsVY}q3Cmd- z*d1@j30$q(K)v$adtpyeC4TMR_$&8btP<&7XkPXgtHk~=JMS8m?-#4Y;Q)DCn|fS& zkFl|kCv}-kej}eHAyX5XGs8snRNj!;<~RBP8$u+3(x#BVSS6zM45Nx@(Pyh7`al2` zU91vqR78tDTNTj<0;uR>mDrz*P?E`Or;Al0+HzYeL`e$vuM*+sr7sgQt86Gyar^C* z%W#HI<<7*_+htk7Z(9{H4hdgN2$^!}d)a5}&Y zse1$t*Gv!lK-0gS{di>p#8Et{7%gZ8oe*>!D1VHMTR7BlXB&0jMCWf%U zJf3*d$ps}jmx!IAtZN`xIR@-yqWb9uypE+}XAHR2#I{uFgd(mJ$j#BHXfhjea1n+85sxPocSmFABC{SRM?;>$*rk)B@g#_tsymuM$Jx;YIxLIIc7R`V(5G4OefIsaX^M8X*hQi6-6uZ%E=+da3nlL#nW*Y6;DTI zRc^70qw5;01R_LS4zWt4l&*BB1Bvcu}Y-m z9;-x3GLgB(Dzb?09IF@tL{tv3ilyQnt5_;B5xK=GhN^2_#gQOla&ka6#`riOODCMD zD1Pl8t2h!KqT=bei;AZs6O~)6;^?}@DuD<0Zp;$6Ah>|KW-I-2d#dHML0}gj*1Qu zS{P!vdvZ6uDf8j|7^XOpG6CK&4>9iIJK(>V@uh zo+PD0EHDJATy)0+%t|IUFXyEUXSV0=ZwwtGx-i7NotK@NM0_i2urwS`r|7MWLvTI3 hx~!x#{obfMecMiN(%c^^Kvdv7D7_ zZPe2}boZ>ZsHn6S?RyxEc5F7RekU8`&HeltLN~4zx2aDw6qU>`^(>W@3Xfh zmz)}R`@`#Ad#*Q}uC{Oa{Eu%v{;eqc;Jtglb?3f!{sm;8Tl(x5_QG4?s_oIBzj3~E zt~;KzMyDpd@wx7_KN`mS?;mtGdS^$2&E9xy zcDH+z_)55Z^-#Qj@vq5$QQTN;F25M|WFLrSe}x^0*|EDD1>X#8YnvN783wKjXhNw4)}Z#?V`T6Yio(|&i*e`{~t8h3}A zqpi-)uzz|q-fCfS?zw$5oJ^N*f`!h;pgWnwEjS!sI~Z-OuGtN-{1Vu6t~=Q2#aFDy zi_Lf*sjIQH*hE&chW@dVZ7wGzbuYrgYW4_xGJ@CI4XNX5t3Ql$o;X8$L22Z9Xspip zFn%Rr{J=UaM)BP8{J*&2XP*hhNB3#Mhlg&YQ@n-MNwlnx41A~o_xbDNfz0S0#5NaS z!u^4p#?(rOS98VnqM zwE?$zdv4skZF4gO*GMHn$RPVOFdjm<@VN+HVJEy8PIkxr?x{iVWPAA#>_-W8+}rF7 zyIZKXLFvFFZeI%fHb%oKs=a#SjMn9({9ywbm*5Ptf%iAyupM8m+a{C#nPI$uzb2OA zNc?RsmEz8%*FojSll|eDxN%JsLZ{1Dz(G`^^f$ZH9xgL8$|i~Z`v|VJl-Rh}-HaFS zMy1!ZKRs_Nu^_yexcGPk*PFOF)$dNYq6(FZqY0@$ZlJZ{VYoJarB!X1Qe1u^DUdE( zKZpvXw9H#uo=@8cK4Gh?!ZrAh5xm@H`kXU5&$#4wmlD6jRqC4c#3ca%ey(9Qs7w`t z16m0nan!Qb=bQy8_c)!RKNi6&Z4})Se9cNYg{L`lq?`MHw#XUxbf>5;i5KwKMAa)N zRYFdxyD4hxCfg$vHBX_aMk`wD)_RR6(oKl~V(14<%2-D(j!J}hZ~l!r>gBB4tGQ6y zm9QU0?`|jUT)WeFA)deYbr0Qc3m>e(J1oM;iSF=hJfHr5V#~bs1Slu)Yy@{3MB7=Z z+P>dRL-Y!?;=?u}nhv69;M3+k{l`W;eFKj|wW*<_gL2tM04A6g-ixnt^ZV?F9)E`A zN8S7eZwO&=T<AY*gCE9>P>UaiP%wKm z3Gs^&Tw^OluD9?~ZK=Ny`uZ&BREkitKI5YeXxZ{6Hv85>NkZ75w)yaqw7dZ^Sp`b_ zCXl2kVT4A+T5_}&T=wG7#LFilxY29{S-;7y5^=4+1B8X-lhO>*{QoC&UzHEXxo8+H zjxbX4S!YZDe$kA<<)eyjX~N+0#}a3aYdz7!4CH>OYo`C58FnK=5?Fo4GgGLQj&-wf6u9hDFQix8P(1SS-OwSa<4UzFhQ zTg4j&Con-7g%FBK+N#0|DQM3EDIkN)6e%EsS|SD9T8N?g$WD6ey1skNmV#bz8fd!>j zx)~r$xYTY3zNz=Zu7N=|peTuc@W)voDkb-vB)pH8+MhY;^nIQ#uIL<}6s!`L@Nl-SI z?LD2G0g;Pw$3$)} z*~Ssn9(ipfpdtYqte~AKBHvSB#0sboYT8wBv?^A>L`Ut)zf*lhW@7*qtomz0i%U;o zcNLH!b`&26p?N$*_+WDrv;VTBbCVj7WJVg(kI?63kW zLc#3O0NYq$1!TrtBO+O{bZJis?Hb?%u-R2W#;=SOkO$3J0r{lH2Gc53*>8A>Rlb^O zvI-llfU+fYDn_h;JV+~3ziQ0Hiw#zAv<9pI)0`PrfQRE;l&OXzZDT0$WtSE)=Qs>j zkgKFBR)8NfV+A?+3lS^8Q?tSfaxMf9G`8FLF|A2ZB--5&tgwQkm9YZcq9#TxlY|-E zMI#fh<-G$Ntl+2vRzT!p#tIl4C?cvSXkdjE9Ib>EP$3kPw8dzw_7xnfiWRt|RWgVH z0$`VUwK#+oRzL>T)DJD@4FoRghul{{<{@vwp9#l{A6B5BW-uR2yhp47?C9xJe+W`z}45%TTL$RJ)=0U@FjpgmS#Lya?KGge?j(i`|9RzNQgRs&XmY0eBQz{7Da8VCZ& z88JctY=x}FCam@qOJw)qlN(>(J&%qTta#!e(^zYp`sFoxw!!sqEN&alHjqq7123d!S*FJ+ zzCIX_40PZ>@$xGN@B9ZpZ@JeXELR1HP!m^{NCG+~?qo1saXpv_Cj>TouBqqqwZ7Jj z63cjNm%3r8o3n^5JIMihF0maK<58h%l^d}K~ zz5%Zb6$UU05zx}mD-HpvA-gpNh=BI{RqeKRNpj`d9tkob9Ib3(MbbF=Ta(y7!KWK= zteW`<&5&lfOd_Ksn5`BSoWE4$t&)X;gsm0ug$UZ!q$zYw)ub6x1_o~7w@jG^N2(ODs?!vrYDXzX>AIihwQA08(=j>EW~y}^z!0U4ns^SXFYluJBDgU> z3mzZJ7^R0U+gf0zCc&GlNG0*J2Q6^uD=4`~t8s!TJ?HO875f$(w~F92`y-e`UOhzs z-?YzPRo|+>O?P5zXn8xFO00-jmVI~TM#Tn;>^%w@1cI{lb(ZQ|7f)-n4Z8V(43*8K zVA>B6iQw0T6Ks)S`oq>%uRH8a-m;UOx!fC%N8{FU{L>x%)2Dmm-f*KAA1eJSsVK07 zyI2C>@KCU3t50})#2?A6#r1{sh<$YQHIpuy%U74q<6nUJRrE1=6;Ju$j-FzQZQg-s<#SYMwy&TSsXK*Hm#rk6-8hpwY;9VR-%5kv-nkU># zAvFLU0&l0G<2zgFl^t7&-N%talM3Q0JYV5==^Ff#z20_bv%fXT4uVGKot)@zKAqlf zl3r@SoeZWlfZ@(oXM5b6po8pNuSMx}&^lkYrkIeeJdaoEWC*%P{h6IZl=zDGWb9S- z3x0XCiWrR=O#A4YtsIDDCVL6LGw46rt6NX9s)P3=rTX9xBH*r*vJ%{A*qdaWwX%d- zH-GMB%zyXeZYJ^2*~{K`D||PC)yjrRwHMFd*&hrNZ~c2w$Yfl&;AH^);b!k?T0p7R z2H>X}a6@I@kM{<>4RlI9{%TnXvR;j2lTS2AJTz9wNs(;eZ4KyEmJ01@I(a|Sk`|-n zkJ(n-3)kuYZv>lli-0M}Yb%t>g}zW7schh1H|i5Wz<#PW!D*_-!hys-?XM!(sLN`~ zU0iLvGufo63Z0TRyOA~t=(3XHmQbrgz%p728J|`b^3pyGRfwMuwmmqyHFbczW~{-flCU(5b)_{z z{G$z6_m3g?@=Mo>mEX7EtHdG;S_=W4oL#-2pM_5<_7GOblfEOYxY-C9<4OUn!=V!q z3|vtR>d~kF5W%agOgkH_vr>1_&qO-S>I7PF%P+R3>l%PKu>?dtG?P3k zGcMb;YqW~Nt=^lpl-H5mW&Shq>lr-W2#k1>8t*qEi9p@1verJ^e99Mrt4$3?S~ z{VUt0w9Wpxc<%$f?%=>k|ge}N!TKG)FU!$cY_}o~7`I2pD&Tu3O zUfV@ll1}XwKaqkH?uIf8(VFdR6aGfI$~-gfZuZe?=7bB_9)b1E@cp_`((yXyC!HgC z5v%B!*)Xo`w&6Vyyi7M(y0DUmI5X>s-MkAq+Xt|pkNCH8usRFTmwAxv-pb6blRnfc zIh2*Sq@qTP#P5-UK<={>qHr|_G3qnnUiJtlNprmW^DXHnsHrnNPW<1(K`!{;q`Cg~wxCChRRY&q*4MaOD@GDK(YLNSEu}C&X z!|8Z5NQKfv>$Ss;^YP-{=cvCv|%TF)d|JqlK;ER|4A>-a~}2gzTFntdfGedLg_6uy6v;9qry#n%zH* zLYm#Zy)$tW{a+QtrI@j5o#A^C91rIjd9*+&g5pB~b(%8Iz0!uGt)y4gmseCO@FR^n zxNjHS*CE?#GB5_m4+IU4^h|`=1%;q)rD!1y(;cdw0Xh6w`3N6fNlXl3Dz#HR}VLD_{%gUG;1c0KWsTv0k z&sKZ{Xsk*zyJ*b{#>p z^#?u^B5#zZm)33ABWDm&lKKR!a>61O@F^t-O2mjWQ!iOybJy~R8gPF|BRtU`^w4FG z=>HS{(7>=ujK)7RFhsG7r~(37`#XUuylEdTOrY0EQ4>n)e0XmJ4+W803hT6_jw^q8 zH+^xp+RMAogn$!LnXywXoH)4gBy|avYHyiyOwGw+QQTOJ;G-chXDYzs>A48;p6rYL zYtqHsg_mh`uLJ;6J$Hhm6L3mr&UkhhCu9vO0FFc_n#tDw<$aBte=>yX(Od4^asyVq z-4^fC83t~K5G42Td|WUq6m(kB?zz4d3OX*=}}jl@1ja{OjcL7*Um*CqT>Bv!Zq)+8u7sUN5gAi6rzf=9##BlviTGJ?)AuXu_w zBA_^tiUMR4I#rxaNaZKg4%RRApanyJn2gRT{RX{RfxcM2%~HxENnl5&hGLy};kSc| zB+dT$%O;@(9RT8`nv*&w3-Ky;vm5*rf`GhqmbrT(fzoC@pinba1ps;JbTbz%5@sYg zsiMCN1M<>Ih?rnFke3cYdf)&cuSgTYQ6C8b@>04g@GvOIOGhJ;2>={YnxaCB5LXMx zOUKQ5u@9s{P+vN{N^vL{6x-PRp&dFNjWr=qUrJT2yjsv*Ixgi#1rMe#9X{?49f_xW zN;*vLHKAc|`qXHQ4ielLZTDK-Imh{_sD*h~ffzm*1=wU(s!kYgklE#6iS`iEDs%q3 zP1veS1JeAiYC`L^g8)5+UNOXC+#1w(Ho$*HPBB4zb|!g+dc%Jl>ffs9$S$g=_kGD; z{F_^YFErrMx(srQbngB_&8U(GQK*|yyxmKnjntx>e}zL$!TR(70nG7}@MF2Bz< z>dRKhG}D$1sXeLF{OcpmUHD=vTlljYO_IV>wWXF!X~}H!Cv|5>6brr{Vk^8N{N)v2 z(J9wv%PFu`n_k1fT>q=aaKRuvwMhY6_o%L9EUgX$pg-Sgfd4i`y?wJ-P;KJJ$dLd3 zd<826C$bIIMcm<5jNyMr_0?;12YUSqwMH1I9N+^DI9r!rTUkTBq9DwOhO1P-HKIUIKv+87q^XMRX#9z_a& zQ`}ypKK(a>k2T;;wq9T>8ZjVL&JeZ(v0{Hq&GD@qz2Q%r_F2}JB^7YRfci5xu%v4( z+iGkUr$`hwd_m`oPdDMs^>|C!yxekytpZrrnWcJtLcarVnthiMe6At8|0&(>h)-c+ zHLV#N-#Ul0f^bovbrG&1pKA)!SA_?stNzgamuRc$aI!s`^g5@|MLfj~Nw#Iw8anuZ zA(Gwx2fxv<-*v5FyewDb5)1hqf3Hw02c5Zs_{pz8oS6oS2xhpbwJb5=3dK|tbMUhj zurNy@D)O@IvatY8ki0QO){q$ie5wg&SpDlZOjnXkISMm*S8`We2r=2Wwv||s<<_&b z?tTIuiyx^}cUumQ^<^DC;FC@0&q$SALbZN-p%UMnl?q6OC1ufTi$L>cs?ImT6Y*p9 zsT1wlG6XgFCl%gL;u&YdHBNFTy2GvljNN2062${qc+#(WtuvczmTAdd5wm}2j!|+i2phtisqATmeK2?3RDZ5B4xZ=1p zWqq)ug`8Qto{<9kRDm6MJbpBQ0y)cJIMOtFYk~mSvYfYn5~iIx@IqN zw@pgrUSy8{72g%Yzo~FMt$dbcK~`z%jra6ax!7-Ew`G9o*sT4RzV8%1RhD^9k7Bhx%F34O za?8ynK*<$YRWfdw3Dlz1syeC>pJQ{)!XvI^Xw25&=R9*C4!L|s5Lal-@{n!4?bOV= z%H4~IZR6Vr0Lz03A0sMXsDh{ z#)D^1Cgp(1NHna4PBb!JX4jKR**2skubxa8khN>So=hMtI%N?sLic3Czzj#V_GCv} z<=h^oCleS_IkUMZJKADnE7lRgyC;)zZAgG# zJsCe<&be<-#uF7C(bOXqVI_3W#?Ol|>)n&#kn7%)?#YDNsw^~ALXz>|*^@~*U^0@eOBIG0x_Of^ z4ZiDSQnn2#$*U(524wBpuO|}-i%wbSC3H_F49sv;Yfsj$ttS&0QaQ7^Cu=_^dNP?H ztMp_m0eUh^k@GESws*0x>*{S9_^y-jq7z*%NKeKOfvY;to=iqEKu^Y#Re4Rg6j*L# zJbN-30~c5ZEc5K$lgYR?BtWm8j31A*DfOz30?MI#E;%!%9?-id!y(tbC(A>&S5L;X zu5$MxJlhtK-s@y5v*^iW8Yp<(g`P~{T9J$V*2#o1m`cyHCzDCjyC)N7tFq8g2}#C- zXHO>OfXPTStc7m1WW3m;guXqQlx;&w^6JTi0a?5D>&XPdqEi+DBXmzD49sv;YfrXP zTTdo1q;h6+PqxDKWYmLfvlHKeWQ$__ud>B9_=xA;MVTO(p6um@9hw0;GE0G*N*!J} zsk~$x7O5k6cVxWaM28E~k?}(W>BwXx19W6OS(VR(OMvC>#IvlIF)*9_S7yaN|M~7* zlyPlHe_kCKKVG1Yj3+8OB6axX&>a~+E@8$Lrr@V5T#F;wHJAllWj|&YuD!Z3o^6$@ z7h<=~ckrhXoM5w!{p+vUYcQ6*nDxyAcCs({bm+u>ykUO@pKLmLv4yQcH2gdRJ?QhQ zmr|z5eO_eC?<>Lkn)ZjtS<0du$(RHb*bb6Myab}vWMN}n`r^?;@L+u0_r)VgEV0i$ z@ArG*k2ft<7~wX}*Ag9Oqqs9vagW2J@x$JVaR%U_8u zBg5`Wv`jk9a?_xHQ|vu{uN_F5R;Jgwip_qbQ;U+CKVVU+S@@St*JH?VNyE-krMgf#jU6kO z$irSajU_1RkXp)e=#<8eiodr_X-)X&e(&#bHlR`$m(hG)-{nTjxn4C>lgwYq-+em#PhXr~;*$?mw{JvHc^Y^RxNa<)6}ZFYv; ztzO)S=e#F>DeOyY;%$v>@f z7eeg0=PqRCcJ7jYI$R)7xIBDWdi65v@qD7d+_u@>YTb>qPyekb6QO^j3CTb= z(G1jr!|}C-48#&%<}PKA;zd-M~9Pl>b>uBl38H2 zm?la$o73}Yx;YZrbR1&-a~B zKWvS=djTQ6cbcz-Wf2R$(||iY85RSFuK;CN*RoS|urfCK73^! zB$S$h=I=!yxrf}O+O#We=_`&KXvJ|Du8m)5RdHm^?h9cLZk{h&Ke!mh^BEE-sWsFm z@vp&GBDk65!7gM_uGHZ|GpMYRcNRh^P+Uq%n}yX(BJkygOSYpdiUGn$TuE39(z>{_ zMCCPwMxXGF2-ZA|NDK!)5-vi4GyghHKj+kVP+QW~5YF=r5eELqsoh}sdM*TlKXz&| z7$(I~;86B5)Lfbj=J9eh8o>4|`@SRu!8e`S3zjvZKJb%s0I}#o0@@cNSzcwsp`_$P zfk2B!v@+PU)17pt=w5-*m^2zU(^_E#TJd3Wt|4q9{l@~#P0i-u z27ED+-L&A#akc;ugwOIK0Ai2hPYEW&Pc|LeL@Hucr8j?Pe=v}(6Z;BHFq1@bz`XAe zcwvUN;9eGZ89;xy*?YQ^!%R(0>%W$kv+!pwEiJDPE;W_PMWL_EMrB<=hlJq|OICt> zgam)?($w-Si;1dwgT>QxXli)^l4K=MHR>*m3R_t0DZ;y#)X}w_)e%U6Ng-|dN<9o` zMi)b9pyX)dN?`-Zp#-R_QWyg&Y17QIk2(#x4*npLZSC~u`NBwIKh=ceEH73~J#Wi0 zjm9{Q&B9ochijOB3#T$uh^Z;n!T*flHgB%DWai}|Gs%ST-3GkQn`tqf`HL~B!#^aw zP@oX*lq=zTkpntj3Ijq?^Ot=fnHby{o^^q05cjkXEvGcnoj5E*<%E|*fHhxp0c%*! zxF8}aUp@FMMHLZ_(#91eTfuT*E$Iwx+@lJmf8p3>vXYf)sq) z1+d|m6M}@Fq&48fS_q+~07mt-V3uF>eW~8PK!U^99UEpE_JlRya1NbXUVtqlxgOx# zj%_l-s2Cue8a9TWTbs-fC|i|c?YEkP2XPwICOS39EPFzj@RfH6zI4GRedo~u+oh+y z$=7{Vjj>BLPlht|yKK_cHrbmkgL|<++y3+^}7NX|V@$z!wFI zF4(R>;=)g9ZrHBKrjXS9Wj~{|=7#OI>%w+1;a$Xb8P2#Ma^ZnDc&6Zn?J{hNVZ#A& zWT?4dyUgQ~a8Z6KwQvm=xEGIB9Sw5Cc3I|xAmJzJAX3nvUyj%=!>_bD%3n|S&|HXx zA2)25Ws)9FnY}Y`z)Dk=F+cgalpdlth zI5fm=`*z8InLfBw<%0haYIfKxyGG&yA3N^bC6mM^=#TAk^jwk_M?jLayjBYcaYZ3p z9U*qacF8=j)csQzN3Td-URG-m2Dt6phjN`st zfoZV^bHEn`iZ0l$K;ptrX>QoA$fl6g{AE9*wC0BG9<2-8#e{be+hsW8f`|(il`~Tc z0nVFrCDF+d+hy1k!-fOm$WU{^cA3W|;UYA)n`@==R$fPJmt{@}5`K~{0)&TchR1dp zer2^Esc${zynEO##~T+O0%5zkZH9{&j@T~8s2Cs|uttuaTbr!Mb}M_ccKegNai(pe z!@gZkKH>o_zVfa;P`F{c$7XjX0~sndLP;|UJ=BC(2<#;9o z8Ct=cJGN^eTxy9OuwBy-0UBa5#LTc=GGL|;E>-PDJ4|L{hYdSoyJQe-g8tYpN6#f` zaRelltvqR2AQVB*WFWJ2#CFN!Sn5KB>yWxQdPVB;3Wo-fI%2!rOl7sM7@J7#>%|Lq z?6JqOLT=cundjMHyJQ}UOy{_7mr%`YS*B5UY}ZIyG!_?Z*F4DXrZP8dSK#WomdxDt z?FvkbJ(vT&C{T34b_EiWl%@k~yuzMSu5>(;L1a@%YW}jHQCf4uc8}R$yIc{e0g;wn zT4_834G0WwXEHFHaX};yc#}^%Ab8`pZyxJBSXyv+hrb?gbTX6!NW}CkwIsZ zLYVNCcMwVFct(fSoUC%97f)P#()YikWy$IMdMF@*zpRMgOXgFC+$05tKi8!? zL$b3Z;-jXC`FAwohk__u0N*ODh^Qo*)MST>KwyxScKyg(x$yP|+#N&{Lc9s0i6ZED zGirFHK1$Dok=&ijiiSV$TV*}Edc~g$4wUnZK>w%WIf81AxhjV1*er}&JUhx?NLB$W zU4E)`ZHJnDv9_sncafTGQky;28&llk9K5R$CMO^V6k<^zeatc%p=-@fu(8K<)Gud6 zw8|~lxTF^Hyx3IMK1r(}b;TC}y4I}Hb#-zc)v-r2wq$=>_-lTJ-FFdu&nMRwN)>9J z05VK!u{A{;FsMbj_cW=%v`Hm9{X2^O>rsZX_w5;B7%7d%1E6qJwa< z(u8j4%dXtc-liV(y5k|b!o4#_7r&DNq>f_4uTijDHk0buCHr`1C0SzUiA(9sQV{b`p^L)Pl&<;= zdsBMy@-m&Wg(q6$)c&=d7)&BLRi43_ad)%d8%`%J_HQy(-qW4lvoq+V545H`+k;+f zV>FzOM}y90Z@Y&sa2jr$@2nKz!eQAb7yNPz2hu`r*qcnzE$rvw#x-*q1O{KHUV`7} z;Qf2x^(O751iH`{fjhubzc zjdokMpTd-#Z{d^~vN{c}!Ke1X{azM}iU6{jA+y?RSmF|^rH?yAP5NxMu#h-xKPl#p zGR5;M@NTkl$6?6Yf{|P;L$A!*^bqP=xg4D<%-&@u6wx94ns4oZ_*REsQ|*vH><~lg zu*e48yx{p6gnND_93l)mR1~>3r4>cJfw(wT($6ZX2JnI~YIRY00aj%s%GoAKx zDG(DK1_;FDNH>`w8v`+Z*d-8?VI2c8^Z4`1guMcByR}<^7^`C!0&#H!hRlAA6)}0- zB@mNmodYpP;CW@mUKR0C+d!;q>RhT@q7SJV8%pf}ftb91Ei+_eAjS{71Y$C*V<2W8 ze_oleS0FyRYk?T6Lu$eGR>Z~oDl+>u24eEKOCToCItOBo!1KzAy#n#E-3r7+A5t@} zKM<46(lSFf24eiMOCTo0ItF6q@#mEZdj;ZSyB3JCI;0k?3&daF0}qz57`s&sZWELa zC_c=Nzyrk+8p$2($1CkNBa=u?`pytko>!LLFH6J*H|A7i2_DmT-28XYR>z?w+al)R zi+ezFA-jX2t6}78S@~+rJf)BX*sSH@D3}T4CeJGae>1B9`-KtCz-t;{5g zr4i5FYVeEuAStt++4k1XAa0_ScB=(k{ceF1PbkJPE`M_Z=KUu*8HOr}{G^6T?s2Az z_nHXPyw8(dX?~sr>B16a_ziwDLD@TGS1e+s4bpp_z!$XSl(?;YK@oxD_4N#$~Qnf~vn zrMeiDF9zjdwmi@>Hb-PPh|qBOCXS1xauz|o6L7gyhWh~~-E%$VwRC%xVfzKJ=UjKN z(~B=z-@mvwo?G9uHQMYA;P>a@>Wt)ZME7L7HSG;2qj6{0-Re!qs~60>?0YqD*Bw}X z$2=M3ubS(B*+k>aqk};XtXF}j)~i9b>n~>mDi3`X?ew~7F(dClji1Y#-*w|EAL(h$ z6mhF+rbyh%Oc9qbW{RX=G*iTFpqU~uS@@gqV%DnA=?2@qPItK3nGE_Hg=Jk!>}6^F zWz<^c)}{YrU-i+NA$(&V_A{f$3)r?3#&;5>(I?u8Csj;2sbm;`h=eXy4~K#B~U{=3LCjw-u z9J_4_YFJgSiO6MJfjOwPAbB~8k&Ej)f$$}Pv7?kRQ{|Bt+a=bHDp*sCSn{BNBS;4c~ zFwz4l*Gd|^o-%1NQBul^EMD)Y!TjX`c_i`F;0=^HqXudHEwXn*a1D~70%`C@%A`?) zdFvpuc%zm@7q`R%DHzl!wd8|0QRa*qr2US_-c4HeTzrrWrC`wHga3_`Y9!qsp<1;()DXDDV$s%1GUV$6&_YI$&&-1wF)Li7GL!2OTPP}PhHcPOaFWC-}>T#eZTbRfnWZ{ zd!N1S2Y>B%7vaaQjK6dRtU?o=h!rnD^z>46BK>+EeNE8Ux1z60F9P(-`E==KHCwqk zczM*AU)a;!w;1mQbnJ3*gkQWLnrI&%dgyIY6z_v*tN8X@{_SP3pGn&2muNXC`%Rvo z)8Fic#vr8<1MJB@%zWQYzdrzb^J5LupZ39g`W6iOHbE0Wz%u#{O4{_!7xE7!-P?&d~!GEHZY9#eSR_{=0Vm&p+{e)i2u zIuZ1nar&F=oAftJS5o;WjU&y~1(R#HTx{tFbCZR!t^{IAJX5rNBOsxhTioYhdqP=r~GrXrgMCqHkjt*>~^| z@(#ET=;CWSo&FFl*rY;LOb51M0l!?9j@Ct=OuwE=pFsYKY-Dq1BYi$Ka#Xz!C)*=* zsAHE}{~Z5^Q_LS;PzZBB>}&5$h1pz;;Wd!H8B=0TAGr=?p(~aDbA(GHN+q}vPOcG!Rapu5pKI~r{E#%0^#O=al{*n|BXe@W)kWiI4S zmpp)Wa*Vbf9QIQWOhuf1EESl<9Ooy!-e%nF52s1v>ba%M(5KNa+Qo0K;tX_k5f<9> zmmiE8ONR=6a=U*gV*VD``#`TdxTAlL2q67x5&5zF;L-GX%oj3<+)0q&$U*f>3n#L_ zs>}t&!Tb~0qd3bQL9q^r1hcS>B5D8WtM{U0ae&=gg?@7dLR5rpF<0+R4#8ghxk)?U zc7S3!*?<6p{$zSh>SnLJCOQ(O(ilL#yTxTR6bq}3b}JgKH|T9l<4a&4+Vrs5pY}(? zt0C1u>LTgaUTOy_SGynf98a0%{1O>x9{v~8$ei-WMvE5%{k25mcfq0KD4vabn|Jh2 zpYDy(@vS|jVW>PLbt+U%9N#&md{m4$I4IY&elb_;ROAj=bO(5#iu0vOaM-H z$Nlc90ovk%VoX-Q;H;L`O9%NlC1gAl7E@E5cKgFLpX9|tBc5A&AzEucn_5n`-I4th z@~q{BmFrg8*BxoU@Erc;@OYzr_~_v?-Rn1RJbk*`J91O}*)Qycx58B{{4zQ<>5b2I zQ>Djjux0mJM9V)=+DL#3z12hU{>8s0|3zp8tGWDQ!kX-_umdqWb~m#%|6wETu#q_I zpnIw}nAAV)?+I5|@YpOyZ1TQ@Jt-e$Y?Q1zJ2+e6d%wxfR_-Dxdb@iQ^&D&HpUZ8! zIe5UTyQ$eT+kj6-@LE=hZd`5kH3y~tiyMCSnLxZmsy%C=_~<@Obi?dAR*ovUj~+35 ziwCptp$6RNuMfUL;tQsPyMV<_-&y6r8d+pKSR4RXVyFls%V$jwL8XJ^LJkpxk2FG9 z@dV}HGzljmJfiz_5HkHCkyhj*E!8VQp9&&4lw$E)ouN%Pgb0yVgNRVQMwx8`f?sU_ zcUZTRXT>R`6oPA{k|1OP*`G;xpNqh>9G|B)^`$_DG1BF8w%$S|n2$%9?l;6n@%{#A zwiR+k14ihrcPHfH;17s*vbiAMA>UJEp%NBIY+Mt;zmGt(+mw@-a0{5F0R>^#z!-cy zg6mCbJX^qGOU1XMTRP}lPm+tc*77TkUH44#30t$%1pZ^hUnC^6t91IDGZF+NURX`v zyIQjzPUe};Jc@n^Ki4oDRHh2S0qyq3I%>HvLT5(h9;eZ&Z2!>5B6y{hoeI8YC7b|E zYS@AQY>_hvu!_GXs$N<5UR5dN6l|f4!XmQi{W_Mdb+#$Gc=l9!H&4pY515pZFD1le zO~d%cQGT>!Vy|Y@)$j`(csawKHF5?Tyu%`lWJpnl7bUjL7YeYIHar{gH*8A+8d{UA z*uFC@qPEHAudJtie%j+_eZI?vIx^WbRGS()E6%hn{Ief={F!B+Y4W*1)r|z>rD)^J zSKJ|9ZkYiuA8WuH49;w&7i}xg-5T#@)4YPzSfZGH3Gj8z1m=S96!w^FDXkfoc zxjsUMFGgV8mQFGo*IO{DRtGiW=p}{WqHo$cGV-fI$ zUc*`_NeCO1Gy-At>$tlgrF|1fQbHE@Q4X`Wi6m!$rCPszi$<97@WWaWfVdvCTT5X8>E2lKWEX~ z>I?l|005Z-8`rfcgcVXisJ1kmF-QSdmMNMM$s)NkoIs`*j+a`H0y0fjH_A-}V1pD8 ziaNdcAq6%gR9f~Aj1-Xhr82FC6p$x%va?4DY^ZJ7AO%*0e31e&h;rYI4N^de=mcnw z6xdMXOxfJz+K}`{3dkU)R@n+EAjC8xwL}UmD7Dhf07j(1icm0nH1$z7NCBDgt|J9x zA_0*CLQ-r=jKv&z(2Nw2PikyX+n=~#Dr8JWhhnk{8>D~?(GvO;YsnQwqZfxJUTlzp z{MAdg7_;3Z;L=Iu=fiMSovvxLK?*QUwTo)#r`8u>$4>qpY}q+QfWitXAaBEFM+(>@ z1^6*DQjn7`X}mB>55)%AGGK!gvZDVv<3z0{R{{ z>(VpE3dkJTe$*c;AXGK=LyN%*q$}}my9&t2EI;c0+GbY)p{V7hzzVnwSqb-W+j>Bz zXN47z59@g08mw4&LBP1Hz-ENq#0tp#s$&J@NuBKMu>u=vR#<@*A>ZDN4B~|q5F$DO z+G7Pa)HqW%x4AYXQ>?&Hz8BfTrAZ)@u)+$+hczO##0o4Z*bV+FWH zHSa4RVJ5>e3to2tD>&+~uYkzK_;v-k^)R)8g3DQ81xG7k1yl&dB!glF^gJ@wrFB>V z6CJhvd>wBfkjYxD#gh+f>W7-J0x~k6sT4hXI9>{@K#F^~!2s2GvDsHZDC+dWHCUCg z0-F&qRuJr^0x~4m{`2}b6Dg8BDA@*LS%5T*;hb5 ztP`L;R$xPoGi5VYU_;V-UjZ4!gg97X1%#MJq?TBL1tmMIz>1JBRzL!H3h;1z2G%uQHdq0s$+;*~4M*BVWC`x)(iZ1zl_G-^R*0Jo@Stbl}>-Ny=!Iba1u zE@rHNv4KLPv%m_DRl*9W5FE};K!$X$`X#da@D-4+@1F0C$$KZPU+Vw^`H6$+iwF_= zx9opQArphOhyb(YGxzd3Mh^t#>#T7aw?qo1saXpv_ zCj>TozU0t1{gM1sgCH(Zys+?J8Uf9oq6+~%3B9@ykQ&?9`Abp(S-{sq3Y8)bHJN-s z@p9OMzjzJ)0iSQc>q3PAj6wvoH1vu?Kx$#_)+E@kYL}`d`Ci@j7>1Hz4*rHy>VPEJ zNCcm5z_DuPBQ!&r<$Q^Zl3=zf=~jx$(_eb>?#ZA?KvyERt4UMnnyN|D0A4Ux9(xM} zemjEKSh806AcZJDL-7_^=xhwSlSzCG+VPRWXk&G)!kBzvp>7JJ^yD$kYt@|HmX1_l zifJ975y`yTm%)-x3#dkv9=dF6fti{FZ>}Ph#LpfIZ}pXm+*dXFm&A_roWCO#Y)yQT zsfysjajxMJzGCk6|Fk5T+_9&SR)_=0fIK#k2UBMwG3uvs7Q{ zEC$=4o4;gH*-Q$3`ynC`{JL<0Es{ci*xKrKhn)$&@TAilk4NKH`TCRcjVMV)fhF9< z68MIPf;G!M7ZJQY;*aE3@__O{Z;USGBKKsaSADde>h;f@ozeq&%j@`ab8owqPXww4 zb{klyCp-fMb zyAonDmwoGd(Ol+MBVjTsxu9c`J*JXfG&Uh(Ii6nZlW^zpDmg3JtY*2OQ5fDq<|-g` z9a?(6DR)(Xay+Sh<?hRVdMHsgupkrTv+mLzMW6@#T+I{eoZKtRhCE2Gc(JW-A9`naLK*JA?j{ zy}I=zt2)p^jg9)?437=mpN~<0C?|$6PBp%AN zXInFV7u^Y1*)Xa0;_Qml#9RMf6fzkXE_fMt?`j${6=4;AssT4t<~_d}GQT3yvJzyy zIt#stPc%q8G*-w-C6%`|pjTNcw5RFh{Y*<*Ymq-@TXE}l`m#5(0ZkJJ;QvOjS+@w7 zg1ojui3@uQeJ@MeD)`rp`UDWLpQ=r86x0!VF;%ei)Y!8;*A!-uP zt_h&8VmJOn2N$)hskvc)0MDh9WDZ`M*?M{=9}%uiw6CRLvj zQl&k{_%&f;mU4yyu4tEvFqT+RaMj}?btaX!!N0OyO55z8i}yaz>kjVdpR-&f%knM% z#*RUX=J|dm;;+$C5`1o~!F^fNx~QY{Cbda5}sJm$ikQztC4L z!6m6#V|1184Asoz@M!#SO`7SfsB8rOQ3UsS>kL(1IK{GCY9#$5n^|?M;Qu({?-qBm zPkvGq|FfKfWcKjI26V#7YMpGxzAG;#ZyTSP#8*mxorKm=!d-YBM83=I(vIwx!An;! ziT5x5HTf?Rek38P2`KpmoKoKA75$!MQ-ym}S7njFo#Jmb;GS^ihI(0y9ETTkQny!V zx1kanQuaacD}ikb?;%21LiSA(R!PBKT^0HZxgCIo6PTxT)Bit>LYm#Zy)$tW{a+Qt zrI?vw5dyv!!SQgekw*)ZA}Br-P^T&L+$(K3+Ddv=Tt=C>|B*%=+_wwv>yT|=8GImU zaHMA<%qCCNAm6sxY*0Q4Sw<9VkpZ0rDI)2H^K5A`wYp-Il{VqP2Lt1%;q)rD!1+$R2mJeN+BJ~uej+?g1Z=#g38zAfqueMnZF116 z4_oP@0VN|UwD8bP+95vzZ;Vfd>5xS&E6p|t07XYrH4Yr05i~jEp(UA(u1-^3JqC}& zZ>&dG8ZhG*B6!%_tFlEu3}@sqfV_c=mtx&F_kSit-Y8Emt=q6i&LE^D^$ED}dApPn z1SMj`nW>j7u(@mbLk+k;q!FIz4|?dbNA&-Re`sLXB}U_)85pA2MbKK_2~^2V`)C1T zlD#`hs|8g@_udE|3L>=>)@ex{SN`&DdM;#kHYE4TitgoIbas9inXywXoH)4gBy|Z^ z<*@3bouat07{NzFV9u%0Wdb~&o{JFg$-daXCSAHSnnCjpd^|)MLFbrPJVhB1P@G6b0kR35YUbWl4$FM8Z=Ffdf}uZ5M(31% zgWjw_Uo78dDdmwQup<`&vKGm22Ng-0{qvViLJK+o#7i|Nbxs!IRU8y%@Y66LFP&xX zo=BjySq~`GOjQ9uUOL^(MNz_}wGr>6swOT7$V(?7VuIm7UOEKnfdhcNB25JHek26Q zOX;e>!=NB99gRpP0B}fYiV7{vs}_)#j+^shdr?IcX4@ehUZpq`42o@R{?HB`kH(r1 zs4u0eR$eWrFCCY1qr~XZONWp9H|6-u&pZ03Pxr=XOK4BhVRCP~413e3Mq_l4;KpdX z*W%7O&QC=x%)3fw`N1f_CbLp?!f=DkE(c4rhmcm8^WSa4R$Us9=66*STCW`h=qdDy zAr{kg=~UsJ4e(!)Q%n$_ok?Dy-tZrX`nM`N%3sb{W4CXrcYJ-J0gu*YkXxj4_ZNaz z*opsYj#)gt22 z+Ror-DwOhO1P-HKIUIKv+87q^XMRX#9z|%_N8rZ(&F-|@IXxP0b*J$H3eB&3=yuH^ zzY%<_0dKPP0$b6D0ikk+upNjM`&(*`Z{_F>f7-OqvbHR#fGY;npSgi0U2EA^W3xC# zqOjpzNCozRO}+t|7bsDc$~wPhny;tr;8N zI)}4@a8aLi5w0PhYYNj>g@>eVIa&6qKldB{*3dx)%#>;X= zF0p`ro7i4~I5Q0t5sVW}dU0Z8yatwg1mBnVvlXx~OCc)qvh1?408Ws+F+|po83BB% z31?V)*PS#`NjBvu%;a6kU2!4AWZ&9WVnvo)&(Z?@33x1iq*8%yIXKprZAM*#Pd1@H zBUN$<)%xv)N_=-#Dj*e>ltr&K1{2zky_u@}P4GngSbge7yORD%h4+(q#?cX3lV0ao zE4{H6C%N>ytgl)m-&u$IS%rln+FPQLYw+h44jz!Lt>5&K4^mb`!E5FI zat;2WbL&m)g5z3rhP<_W$T8JO$6zFzyup)jS(@qoJ;}8?Hkm{^TUABKB(C5Vk=TR1 zawF5~w6M1g8o(ZghvEl40vHipSs(VP>Z48BMPk7f$E7LjgC#BG%-Z#g6xgQ%uHLvCHsj|#-dK9bmQC7BGms@Tw0ZOjGs*-WbOrRF6R@Kq&%jbApv+#&3 z8M?tW_&LwqheNJ=W0r?(>uslI)>ZCaID@VGc=u#STdH~_U{5B~K*8%S^kf3pid^K^ zlL=!mm7Zr$CX=RjPbSP(Wuc*ZG8qq^J(-jPCL__X7CO<$c$r;KCS}`@lDv8{VL;Zd z{dzKiu;`S!{-_8hq5CyqQVh4=J=xJ#Ik$)D$%IL(oY~xy9c{5a8TBA;u_tXT=B(|V zZ%VRdH}LJrWP+^HleGi%WR@Z?vtKk={NJqlHSeB`7oF&GIX3Cnlkr1fllSb&WF!%S z@7t5{WK~`hE(Mm|-?OlnF)*79SZ2jKB6#;?GOi5?(5olo$IChQ?a6qeq9YdhEp$)D zk4u=Ty(hyV*S#mpL$+5>#&e=)=*eUnsJ{Jjjf%&6olM|bbb%o2WWpFs zHRsur$)xGslL@m`S!k$)B;&!eCzEn8i;N`W#XckXu9HdGHl!r4o=g~!wQIkgOdu>e zj}NrO0_X zWqTKCR^$IRo1Tmpo#=8wdNO_pT=jYOWHOQgdNQ7@%4@=QCV1I zX!w$!Am-n@C*z5#)D~`?j31A*G4-mB!eOC%GC4D~_hdNay7y#x$oA^Vc-B>RQ#gaI z`*^RDt<0h)lWAaAdoqD*MK1DNClkhCsyWY|OeRh5o=ljn%0fdWBpDB$J(-k)3o0YY zmvzB%{|%5OnS19 zcHK3(}GCLj>u_WF!N0WIS1w&xA{W z!8RKQ3X$*pdBo zg==vny9Tp>tL(?@!nId7#3S;cjHK+?1_z1CH1_8Xm=^v379 z)Bb2^e*=t6ES#yR{H%x^oa9VvTTOIDq3r!I_^}4Ok>#dpE@w?mk=Xn)5IX?=-Va!m zY8L)w)AblKT+*JR(Ves|D0 z*B$JjJ5O&P4JT8{MG2=*pT2eZ7Mh=rK|4ND%FldY*TzlV$1@DVsNvq1%xQ9;AEUqlliy<6m3LZ4>(yrz%Pd)R)yY4Rel%Lb? zLd0(8F8QZb?m~$1?ozhfaCTP7gEa0!W^U&$`KQCNp>mfmORrvLJ)X}9xLnISklQx9 zTdlis_UXSBWg_%%G$9#?yIDPo7f}g~%jkJ@x%%AlO*9j=;Bb7cArrCV@)<*YuQ<-1 z--_ThEI&B*vcEBKsP;a?(6XfE!F4HLT~6{9tWx2^HydyV%Uor4TY{td2~T!KEIU1e z?90|ANm7NEct}_7=LQYKcOqD4*{LpVAs+ZiJBTDqJpW+?l3Tc0DHxR6St+ZP1PxS! zZBEapHP{i|v70!M)D~gjPa?RL<*2e=QTL;)aSa0Am9X)h2Hfe%uoyUe1t`0^mYt%5 zm0eiD<1Zr5P>mc(2p_&OpGg4uy$B@tkegJSwx2D1#qmPegL~l1)(CmUK15u7-g>a%wjizPLaT(tVd*b+s%;75HPPCWB#834ZQHe=rS%9}PE?C#n;RBfEKX++rd6vaQ zRlULD={YpDJON3vq88;Pe|fQLuqsKS1axg@bp%piQb=3AQZEvhmxv6)NYOya(Z-d+ z29iSwP`{v4*dIi)t)2cnUl>X3rV z%@vo-yuw2G%q0U~zT1G;c{44hGk-BAb@+!Qtpp0;PFW7V7dfEgr7$2QHGkO$l8Gs; zJ?jG1Ans`&T25)CJK>glG{D%h2Gem^^vfp)UvmL#SkAa0;y#fQ!h(L0??noM+kV3Z zu3^~}!$x%&fu-gE*RYS5t!Y{A)jE69Z5eUbBYfKhu;G{!f`p%>ivZ!ylLo+8O971P zYr!nP==)N=dw~RpuRAu(H0LXy&S(2G|Hmv>?&*}Gx8 zBAY@|^OyaM(wZB#+qS`W14FiWOx~|FD2gMt%W%d85f^|UpRo~uvF1rqYJ(fL%djaz zHx7s+L(K)-Wgf2>EI66=7b>OC+Q+Uct}CVuN$_@@GGl#NqxJ^!NCpN z<#^-516Raqgevi1mlq*BcqZjiBoMaCF)9WKr#6|vF5TK>hQNg!B;2rFUfx2O@RfJ% zfx->jrMD|q!**+cY{^X6JgjZH9kE?9PFAm%jhM%Fxrm~`VMDgXWO9!CcFE%k-rTWW z1L0Cj?67awG(;`fE*aDaJh|=LCFC=GaH%R#J4^;B=O7te%es~kQFg?3$&9cG3eB)X z5asB(BrT3W+21AjY7wek8euY?8{F%N?UF&U)csQzM=y{(Ob~z@w#&^_R_ltfiPXMK zXxFBhI+iENq`?i_C68y?mh#MDy8#y{$;f;pQ6xr3Y?q9oXqqOFR z?H=6)Y!{EohjtB$;)v}soN+;f$;Xjyx1%gpxL|KNw_>C~l{CNS*Ad%g*c8Kt1LDX~ zbHR3*$0gySG6<`!nca2axNnzbP6!fyk}d*-J5M@GcHFni@GGnRB=xP{p8|uxczWQ5 z?Q*<5H^PJ4zFm${F+e!A$qY2>)+RFqBtb&AUhLr>m1B?NzFl74LYVNCckO|~4ck3- z!JNrJX2RAM+NRqP+a=>*7^L({1CQ+nY>UabEPu*vRF2p#8AHLFJGN^eTxy9OuwBy- zwKT+J(3-GaLO#<6m#P9`yJUb12iqkx!Y1gC?Q-;7k`_mxV$yQK7sz-s#dgV{SnB?% zi=$VhE-!i1gY9xNmDRdpY?9Qz+B8!Kwo4}Mi0zWcGi^(Gt_#~GBiki0w|%>03?+B& z*shVZXe=(+u6dB%O=WJ_uE5oEEt$FP+ZC7=doTxlQK0C8?Fu9$DUI&<_KMf?w#yL3 zC9sdGSpnKUFLCJxbRScJXjbcEnZ>cwr`hZP6!fyk}d*-hhQ`!aKv^Q zer2^Esc&oXd22^<#CAE}xbPrpm_4*&T+y*-)D7F^7!?DA1J=mVb8D0J*eVtCzU?dT+5-g&j3=v{D8}6er#*C+L<_Ez(ARho-x1^cg3`C!qx{6$HF_fN z^>8e18&2ev%oYB!;<+0V8d>*fiXh%IEtejWeJ&9nObpV6cQoOLf+$-6-zuGCqLOG* zlO0O>hQh1-)g+HRCQZD(0e1({giw|Q(L_k`m;`~=mLzxQvZCS7`&LDQ1WIYc$Hm1_G9cuQ)+NRRoMQXB1ZT1+_ubje;BoxBC z8ewt*azG=2@IshOe?y4LIj8+%Mg{c=`BtK4!UOll!X=R`^y-&4~La9Q{6F`PZEw(0c^1_9DtxO(vGztZj z?(PcUQbKGAApLMc(XszkpZ z{D@C4#jq?^F+t?$(Mz#YD{W!Az+%hWf<*RmQryqT~QBl`t<2rLk!VY50PEe&IJq=7aQTNq;aJY6khtwQV~-KE4O;RgAK;y@~YEVgJf^xa=+0t=qP@2j^26$NT8F$t&z0 zPO7g_uv<2h>ewawcxNS9V&{oV>C93P^G~6R!qb$l`V4zhdh+rzow9`|TI1CIwVfDD zA~{u_!I^P)v)>y|CoT4GGF9Hwo!+xE=%f#{raRk%UTb4CoQ_9>&Sr1Bhi-owZk+F| zltb%W5kVY~@mv?nF&s!2!-l=d6kW`IE^b^ir`eK-uSM zoHAwNN*_&pF0FL2dnW6tOmgM9XSR~NW$ev%9qHk=%}t}-$_z|h5wB_O-1T-!_|zV_ z-^*fA5kOWmWLA3(OI%{L^l^u%NuSLY77~Z;C&k=Rrg&Zj-c45SI1E`^Fp_J4(Cf0% ziRhzZ@Wtmi|C(>@fcRF2UsLUnKkN`g>9EKK-MrxW8H9U&CcGPA*rD9WwI3i57pH1E z1!D5JiwTlvodYpP;9?-|wX$ZW)3y!7vRXoq+3V{Gm*7ow2>yYX9O))AWMd%454!|n zGOS}DW*&cDnXp$NZnvH*ff%bVKp-xTz>wLmu_7jqy98qLtaBjd2t2Q>*sCHwY8!}4 z?^`oqgiBB*`q-t4n7n^2Gh|~R#t*v$Vlu2_AZ8waUYW30AU^t>3B*`^0RnOHzKYC# zje(dv?h=T}v(AB-Bk;VkVy{4a%r+2T2o*8WVStL5Y?hW8vM~_jhg||O8P+inGmk&7 zOxPlPCMX?He3%`92Z|*$k~`RsSK4hx zCXt%-ogt_^uPnP?mWT~*%&Eu{Jgy$0BkdZ9_eNg$;vSG($nIe1Y8d%iR=yfDPbnk; zHfwn}3T6Vi$@9v<-^?n&eqn?&a9c!!zM$p1t4ii7>WD3K6Zpk_kd#@^Y~mcl_S9ug3XbjIsxZMO6_2CB+W}P$6{NJZ#+}&G5b~oFHr;# zVHdgNW6Tz*L=);1hd?Q&q^&qS#Q^_opK@m)ipiZ7ZB*uVdg-EaJGmH@`)H@wx$bz< zs=gyDc*GNT86+2@vWsFm7o&2v!L|OVj9yasK5bM+k!k_Q=R^^g^I)aiO`3(fI%93M zs1MI9!VBaq$Mg4&?%PgYs;Z=NIlN5&chpi{49XXS@-SN-Xc?O$vKvHbID8Yw#Zozo zpxz0%Tq?u;0F&;y9)EN(^Pa`tWmUoX0@!n|JJ{*Pm#pt!+#AoW@7Wq{_6G3#^Kf-W z@;IV`Vszjl#07C5E-M{xWJ-bL-Oo*$@7WdDzd49xq_qP8i=w zlt!OuC!SQX`J|f7^Dow@Y~K2HBa?6BHlJAAym4f-u`o>~W{Sk-%@ncCn<)~TH&etm zZ>C6XK7obJ>G^GBw3orb_JN4`5B@e>l^I(34WfAQZWO1c{poqm><&D^7$&K)_o8!_ z7CIY)?qm{Q4u|4PvxAhf58?x46+s=oJ`XR-KK1~5ooF;x#P6j`(7w-e$n92dzyHD2 zBg+@V`8L@Jdoi46rX4qq=)?I>qFzlfG(|`kjjf&t|M>_G^7D!JQ6JzOPUjzqi6@bn zJK$hvv@_k_nRYVvCvh8&tTwKWs{BKFQyoyg66L8&046J?9YHkZ1~?*-C-ObbL(u?h zR*?Xqv6p1J!z?mge#LZo8E*lrl6N*4M!nZ@6|Cy_!$*Kjm1DO}K@F=CHxapP7rJwO zJx{@jT4|D9{SkagVC*Pm%v5>gRe6cEqXlaY`%rSK3g*;m%HFY+_z3)0fx%oip%b1lGRHZyq+>?GEq{> ziY#95r!HI|E5xKxmqbJ*4cgEvv;j2fiz6k3ib?w4z71HLHKW^R3qvB2<6hK!n{{I zBqsVJ3MOZ%Lp-&;pbz* KzFzqG2I1!$g`aPNcV7Li0+am+*we}9%{&mNAy0fiCs({xfZ0`I`WV@fkvrHA zN~o!^a*4&Ft(RoT*9V}5j3S?z2Q^|=%-rsEpBqE1nBDD_=&|0%OzO2hM2fk%kYSi2 z0>8^)HVhfB`JjjKvLPcKhxxE%iut$^Vpt;*y1TlB&@322#he^;mNyZ2H8yt=7uHA+ zMWQa|<3@;K%|}K>nC9l>A|n#NJ?)4g<0qpcKQJ?Lu_KO9F((%pkqpGl$VL4OnPNU} zgaX-7kt$g`T52r5XlWG|2BXOYExR0n>)=&vio+>%tJfWNCU4p4j(eM(-grD3x3U%P z)gw!P?+Um)`%w4PpqK8@>!;sL;%{Fb|G$^R??hlK&GQhjdL-6hk~l{YN#Od!_T%Yc zd#gvX;L-XvoHU7faUcpOuDSQuOWNy3%M*vQ9oo>t8~d8&w{1sfHS&k|;gDxweAs)#t%~RaEJKbe!WDpxib$>u zMI+ioaLxc(KjQ|#3HVtrU~ho zI9zBVdNi_5}P zl}oRH#jFhMjJD$hu2yZnUi!|xu&<~Rzj|-{m3uE%iF6+|FZ+vCVsDV0m5s{xi&f&F zkG!o#Jq~ONc~Y0zuI z04lmzCEBQn7JarVq7MX6(ZwpUHy)xSlh;lct3@EVqioiX566Wday(~a0>20czhEviU2_mNIjwaA?b~J&GOjI9e z0Jr#LEL|g$V9~uErz5i} zw^+r|b&XX55h5;!SS3<&k5wWinaJE?6<~?c8G(M}kESjEwGja32>A})tmB~o&a zRU##s$lPKTS;TjaRSW?lDu-CbQgM$}EESoE++r0&)wQnTNDwhOIUpNjd>oLa6HZhV zzjlvR90?Cm@pRlp#nX|A$}LuLbX{YWK!k|PAy$c$++&qUNhUJ4SVeZTIL9i601=f# ztYWFS$10YJOhj(6ilOQnt2h!wOb)S%r{m**rdajy22`3Kw+h08RzJid940VFMTZD2 z46$5tZwxXsnWg<^11bd!72BD?-4kM(MLi#iTrR{%0?ke)#t%23QZV7fNKG8|LU%h) vl2RcS7=lzTx|0EBB@>&M^HPR0+jI9fh7J*37-HVe%g#(9zR$kveTV-)ZVZHo literal 0 HcmV?d00001 diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587789385.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587789385.Akashs-MacBook-Pro-2.local new file mode 100644 index 0000000000000000000000000000000000000000..19e8b20ce62d6ee2ec51efec446381bb897032c0 GIT binary patch literal 267629 zcmeIb3zTF>eILBHdvXtC;@-bIh$0Hcx6!$uFZs}*`Sy)(V5 zMm^m_ch5>ozI=wn9}r?E9NWjo#y%&B9ZpD`7zi8)Hc_x+9AZd}V`HH>0c)vc;quUl2OccpK|S?1OEU-f_g|G%nRb!Zp<@0Wkpb$UDEr`Dd%kt+-uDiFEc@K@=fAiI-Ue4~kB5Ve zbKSGO$+SH_HSJH%_GW|eDBgF^u(#2FY&_iTPg*a8-P6Z<+x__R)r0ZArN1KoMR8-P z+1dxYH)rS2UmHiF)(^vh?s#Xmy))}(k4)n$LG~2vK+KNa+b9p~LO9Y+HWG&&_D=PO zQ=Z#8hW-5rZe!UU^=D6vCy#fh8{_SMd$T{9_S=v5C!_wbeaC1p8}x>QxAiCONpG|{ z-s>8y1lEOs}Bz3DV=!{PXv;doVne6Isa``o}7?*-A=F2M1h2OnQpeTSU=;iFbS2?w|2iy1@vhdw zKfV6vpJI85TF)!-vH*?MU7`5sJxTcBOh_@Rj{>F4z7{^xfV=(mac^dH_hFlhFX4Vn zLRoRDm}$Si*@+v6IE#x~FD_|mKfvlz8f(zMwHCla`egRt9*9n$xYP6yCVZ?B!iuLm zr{d=Eol{8&kLW!agv?;n$^{g82uSHRp9vzkt({@qymM!mgf`s}B1BpZB0_^r!>={q zCU4J;n>TH4hTs~hBnX*6_Gc2_7b1AMo$z8f*_#Y{r-uELoz~@~s$qi*7r6}sw+_NS zl#(a?&F-kTh3X%abv&~2rLcEnJer~Etv^X=!3P^~*p9FEP1EV%%qU*OUlU7lB>pzn zx&jWMT4J!-oAq(U7dNgp$8aRMooT;|ik8O*qchnf0U{Orn+UG4l-Q)-+l-g)K&97g zFgs@}F=KKH!Nn&dxX#4IsX=eb6;-HQ98XF8aXqaK55qO_E39h6lw#}oq>8$1{Q#<> z(lT$YwUD+Cd?lr>3fJJ@MetIa>2uELJY&&oT9*>H##QQ?^~5Fa4DV~04JuQG;DA;F zNF24CPE_eV&VrOXvQE*Th~O1Aif#+OW+j{=?K*R$oBMyT$QgI`W~eTS7xC9b)hj1e zLXK^Nyp}3?hWfwh_80}q9tx4W_+OpMensL9AclU}q>Ods;;2N3_vGK0qh8Lsy_#EB z!akG>dfl{P?akuFc;T+w@4wleZOYKQEW*f%-srJ-A^rcvmifZ2ETM`ucshbR45IC> zRBhjxLPOhRw_Z(S`3kh-!#1&;&PLJcr(J&fFAdeEhK|zWvW);tFfBa8m$KpZ zN;63F|L@IxRX!Z&qR2b~AZdh=lFvC~0`N;_3@#s4bW0Nkmp_&`YrHT@51m);`X%54 z{A)ApMua4Iprw#BtB`9U!~kEm$}IvQV8k)9#U%zYXeP#fu+OK!!k&S-u^EZe2itMnI&145ZVGA5vg5LbiE@_>>z` zK!!=pw({lyH>7|Jsgs>OQeZ=E%LXa1BIMhdkwKIj0Bn!~LPRG(d!)dI8fVJpCf9}} zw++B6ZkbvD8>GNu#+FEd1*LYn8Ni4XSP??@Xked&2e&?pOu`B&ARp%15MfII4jJbI zJ|MFz!3S1I0eM<%NsPrDdC-g$kWXsdTGAVTBY>wuC;#T5^FnH28@XQqak| zdQL$cQoz}@X06m^PfHgMu7MQ51}VT)I2W~^bs$I~XT%68SRn+m`kxB*Yi3Mt6B5IoRQNQM8o&r*jrz#m&61)a8l6c{JJ(ze42xcJ0( z{``Ib`vrHTXbdQ$HyaH+Y;5c2*yAXY#I((>Ym6_6q1yadDw$Ur(?xCX28t^%78 zFjl~&dP7Ade=EStY|!u&WU$&*V8t1EO~BYv*-*2>3akkEVg+OnFRXwN(HWmTR$xPo zGi7s|YeUi-D5UhaA)CwygAFhlQ zkO#wr0h6CtVFky|Csu$5_r(e@70yLVUgF%{r;SsHml^ORt@ahuliFjipOm)b!D zG;~?*D>&wW6%e_Yu>!^h=(K!61FLrsj#a`6s1S-t+Ug`Vq`TBFk=>2YW_&~M96DaG z;)#Q6^dy4o;8@%-oJ1fQNrM`sM^v(XoBnMCbe!TFg7L^e2mTzE_wv8{@BV@%eh`+c z0z{~ZD@!B+of3C4n69`UOoS5xo4xZ}29bO-g0~vrKQ;oIJw+D+dJ=kdAs{stXYdxD z5^VTdNTE{1p(c|LC|+)#g1>n69e4zP*nrzZg#nC01hh2tibFtZVeQr=*sp3|fR|)p zuI-Uv0TG*rP55af>vsIDN$kepvkf>_&3uGrNV8lfkx>%NR#W*1-AYl^`3p+E(rU7q zFGkR*CQYGhswT~l44YW{od{lI$(sHk1rD=QcWzPZwX`fg2A%jwm9kiM>_UXkTuj&f zG_O^2cAJvoJX?Z7_|8=H%9enU2$4f0wDizrTMNw8BzSWbsU&{(pmkthiOStqCpdJ@ z-;pZzEjVr!!D-%uhzghX6ajqGK7Unxs{%LOiLIgK?QkkF9@%hvwIWN6;;R)XTVHFb zzIE}mM%$p9AIMPIOgw>*!Y1H1gcEF$6b7UAR=+puPT#tdotWI8OvaP;as1OQgVU$` zlm2LJ>+P@O3oX@oZ0`{cX>^(he|N+|a^k?!`eJ&jK03IXNq5cGq0$lj zi?Fboo=N@g2ySKV*KIPB-so|1R&h+`vUhzCn#-=$NSKUDF6fwKkEvuAU`&Zvj;Gg~ z@aOR=IV%}xVO$4bX^d_@$!|l^V_4{XQ|_t&<#g;!``I3@mPNY-D=G}~9um@th1$Cj0Zvm0k}hpkO3@>^*T zF1)K6IE)T~x6{z^ovrlBj;+M*<4B=N1#zL`c<o`jYs`y##uW{sCDz_ zUdH?vKknud4`p6vTQhz?g4N1~NwpU*+&UNz6L0-{QNXvH(KvY-z+kl5f3hg9R8INR z4Y#byw;x`peZkuU_?n1>(dGv_n83;-dl{m?DRjgj9H! zlTNI0^xM!1<_vA`y`XWxU$eEaoSS%0{!PNIYuY%YC1Oa$X4F&q%8#Aqp9O&IM{OM> zkvg+fa7EM(PHYD;hj+er0$)j66=9`&g*(JBVFdT-WJ zUPp49{mf5c-6mC^5>jQU#8a6ed`;MxrJSLFE83-^D(f87ysO7WbDaCJ zv?QI{Eq)>eC)^EX7NRxlYZLxjxyn2<>1__sYUY#+*dBrPE$}kkDCv0JbJOmTyoi-* z!*=22{SmxGH(0u`l7~1m>xtdG3pv{duwRJyw{oyL3(=Q(knG;d%&wb0)Gj%cREoBX z#P64aK<={>qHr|_G3qnnUiJtlNmcft`i6hpfD>g+`N|2kL1Z1QnxtxyLt@kbm554Y z@ZJVI98yxVoo%$s@62SpgEn~dxAVPD$#DtL>Z^|Az8Z*jSKwEgveh8>*A-w&tcmmJ8aipot2>&n&X?Az@&%{mi ze^n5dV#bO^2>3w+$HTcs9xYIcp!iTgou4o!bX)(2`tcJA<&Q!qg;lTcv z#bg|Pa7GEOraG!UMR1~0YH`^Hexd;nhSRIq0^`@Xl6(F8Y}z%D?0zCVO$2PbzX_*8 zi=*5qGi`Fvst;S~LjfftDzq@8P1+$p2yco{hUt(+Eh~$mAOI8{P1QJXfJV^dkcXCJ zKDs(hb@ebj7{93=U1`9KUyR@ZZ?DQ0{V<%7#{lvME?$at-{Q!sofvq4dolC=nyhOub}*&0WhMX}~=pjqt=^*hiN=qW@3)Lj%JuF&h8Oz!1eQ zV(MU8Xc6!PD)nXqv_gSiJU!Bc4@7W(5UHiGPFw1@%rdw3wRgp4%k&N3^j~^*ei)gt zQ!Si0xbY-)308Ada}+n0inq9hTJYl`Fz3|hG65b>&qauLXJ71JlP-cym7V`c_1p=L zPQWRhIpf*k4JrVRL?@cbw(PWp46V0fe>#Nf(Od2k)!S|HE}daeFj2)2g5(~aj|*mn zf=)}?J=eEFLC3|e?C%mhK~y(-tx&u(1mZ-egucOVRRoSFmK%wSBROcUw)WxQ3MvQ` zWbnF#KZ?W(H^7<%B`5U*JFG}`OwpkTJ{h8npmWSCo}!EhC{CoJ0NI326=xG_cYLvL zZG~vT&>tqFb4tHKZ&si$mT$9^@<Y=I)6EN}Kh7Ld{ea0OY09&0G{EOe#d)Nfp>{7?77vLc|2afxL7G z(gOzoc}1EC*^WS1Aq! zgJK(-KeR)~qp>Cg>PxAXl&1&+g;HxlgXw%Im+9eskN87J;whh!4wHLLXw;uQF`l4< z1UJUp{Wf>baegXlA&DxT<%gpHo6Jhp3BwICyBsXh9zt4W&VQ>3TXktbn%`ATXuWn2 zpr_C)hFDC~rBj7>H^6^IPBB4zb|!g+dc%Jl>c^_+$i9-O-uESY@o(1}e6ax!)n$-d zq;vNdf>zjx|7wm|$h5$-?z{d~jiROZy;O{}vg4GKxfrRf*obFG@cBl4*$SCv+Oi?F zCv}>CeZ;v7UuuOkDLhqMYRQzA%r}3kVJU4>Tv+kfLu`dtgulG9D>~)cY&iwC zYSSx*xonM?idI-F`yVxi3kKn-O$yk$$8{xRX?2LuAKiZNLexrV2)@+-|80nR`)0AA z+Qg5MA^-jP3RVm_k*)E=|BC9X*XR!P`W0GOmNn;SAM;~%IfvM?=K$KnwH2=ER`)m8 zMm^69@Mlffu2*E*os_jXQ_f#9s7->RK@|2np*!~;gVXV5n4Noyppo-w56Au2CQRn7 za1NZStpG_K=6tAoMAf|sr{Xu)s&2%iy{dn%LMd-X;4tcy!*REvjbZUV=7(hNQKay< zD`DSeZ`SLc9#6J<=uB@Ens2}VX5DR8@QDVz+13keMI#1;${E6TAXe;etvSAxqc?n~ zX`f|nSyBO445&YM150|=vaQBuaf(D?!xwbU_-qs2QjfQk&C4xU*eZZ^oq4L)Cr3E& zrj31HXvpq=O1FRFQglov>n!@x|;lb&uKg)Jv4IOT<5A8`E zO}EF>ez%uh>BIdhy(X~OmkYnyu-|p9VZ1C?-Z%0XwYplRk;AkIw#MFcZk z)LNFNa9?6}=_34m1uV=`h>E-{yKF3g6C`g6ku_vS0H0~X8CL(g4bzomQ;xz+-j&=H z7eY+-t!*V%WV!V*TA)7)562HyD$p$l$NI7@&DY@5O&H8cm0Uu#etV%3-`$l8NQEV3 z(Q7sDN38MhN4$lq`_1rZ{BV8hM!S;!euejwc*faqjg#Do-srKcuUaDCS%>?1g#~K^ zr)dHHyu!hQatU#j08_y6{ipStKJr0Qk@ISxETEGOEB2Rb@b{csZ(yiW zsYW^mBiZDQo`lQNO!x0euGO*0B+}WcDncf41-FRA9_*DHnO3KTy=~9{_5j=;-{%p) zi0I1tu=Zs5cvE(fSa8L0Y0COwNeel%c0DHr4yXdV@JRen00nZE!*HZ&^wt2KQR3+X zvGG@B|ym)SXDA^nF-XQ)v7wG5uamo&B7zDWatLh;C-IC4~Ja7BZwvTUCz)?8#&raA?J-c)WWufonxB^6SZjF_=ovvnP{D z)4L}VW~;K$P(7K92hW~N$^ny+XjluKXk@(1uP2kTZAeL8J((~dYuA1~nLt={O6vWs z&^?(jFvC%;J=xKAIk$)D$pnT}&TQ_6VNCxQ1 zc(N+536}!P{fTE!CS%|N%YbE`y?Zhl*M)syk#kv67Y)zQ!!x+jw}Q+rQ_L#}&I zmWOPwo{VQ*?99l6d9`Bw^;98N3{MN~YF_=ovvnP{D)4L}V zW~;K$Pzg!KgJ(}B<$%dZG^~XN|75&aqbuG$nUrlqO7iN-gaKK*_Up+6!lF~^`Xd?{ zg0GvW8aD31eKt zHti3Qvy??Sk}(M=upJ~1dI?0U$->5Z^u?q5;lB8|?~6y0SYn@hKIr$tA8%T!Fv4w` zuO&Lvs<;FWbjLfh?VVY7n%=xNjhA3?bpgH4yjCdn3{~nQ@KF4Kw^BJ}@b=)9~XBcoWM_)m+Y+nj*3JWgvC{{JkHr zDAg?dv!?4YWVobZXQ@(MsGP=*l}qFSubjpb6m@8uW}4KbTA_0vJ3ah8{1dcexO}qvW$1cOhc8bC>+nDt95ocz2-z+gFQ~ zd_&@;8h0Txw{w^L)4|+A<#+I9>D9}u$MZP=muqT(V=wy~1Basb73s(@ zbZQ`CTJjJ|DNkNb@+7QM3BflTa0|;^Wp>+wqxuO?c10{ZJ%jAa)>XKKhbZNKZlO*g zE5r99SZCR(E^Q$m_(|JTOok91{BZ=5Tew*%7?j#wDXaAY3mShG!RuI#D(e+>KWvSQ zlJIg$`Qd}SU)8YaGf zKLW`;O;j>B+G{0ggzBkN)Q!-Zy0S%u*&gi?KRDXA}VBp<%qaLIO*MKM75h${(eLE6CJEKzxxq0uLNBZ4&# zBND@bkA#a*;LN{{(=RwR9@Lg}HH7ngLxh1pacVahzMc(%;7^^J42DTD6gZT<3^kV~ zgL%AMjb13>;G0hE1@cGH3{$Jqh^;gjn; z6ew@YOBm%v03?H5@RLo4Hj#>0Rp~9%_i76U-!$95C-Y1YVe-V5OJPF9R5i zHv3O@^R6_?DoD#;>pipx4)25OZ_;rk7^-J5AKo%xF~slz`i1FAKrv>!wc=y)j% z2uaOf_NIgsChft~E>I2Pp7x>Tlt#J}hc&#Ma4Q5@^EDT+hUJV4A|A|?LV$?9^65$g ztVxP!_=XEy!?G!cjp{H0OU(hUVIMDBQ(@;GDv(#v21$$S!QXZPY&hnGAmJzJAcE0| zAO$e0uLZOGqVG%f?gbJYzV6sC)37J30f%(ymWQE>u?D{F*d{ZKiUGo@VPojIwaE;D zvQ;T+3XOGZQDAokq-~;8gUqrggb81HYfwt&1s)Hc-LPGH+M9gcH)LZ>hC0vKE}4b` z|HO?K36NaZqG(~Auw60>$+lJxY?n|~@Rm2l4od(A!lIeDV7sOva6@eB9KEn#GKE$N z`*p;23BgPsTqP^nxaiX1k^xHFgp*y-pHL^*mcNsA*ONm^knMqb;R zthI~~gB!NXGwGkYIC}Lq!DJ{$Y?nNq*_*7^6=M?#?OyKoBC?(7iuD+6oKCkhIW14R zVY_CYy|G;~gd#uj&9WjfI%2zIXr)Hov0Wo+(O6utUGpFTuw61pIaQgw72L30GGMNi zXInCJ!*&Ix#U9K7Ulb_1V7medNlGiHl>(;l3VTjj4&1O^kxe0~`O7|#Obj{R5!>z5 zh3#U(=MmdwIOBqdOY566BjPQn8qVKuB`0KXeVhT15LTWXv0a8uF>E*>jtn&yY?pal z5-w;3?4bgAWH3lN47STMCj<#UNf!aaLogZIjO~(XVDPE5?8RfdE|oADz^g5G+qX-GR`BMI?HUM|T4INNyQU!mG{j^G zhlbc~-!2(2(+8KTxMI`L9!NG5IISm;*_85=6bN?2cF7>v1pTpHj-E@>;s{tLEen+h zm1z;+eS(G~w#zf=pSn1DCbTFJ=WhFUxtYppT`@M1+Sf-*!BUtTwrl3u8`~vAcqK8n zeY<4nEP}{1>W=LiNsGqfg6*0IIlrmQ4cisC;*wdQg-SAnU4G)17Yy;*ao;YP5ivy} zojKr(0!0^WS0EutX`!)QGNNMhj!E`z*e)5A$X-Zl{<05~woJ*<5!*dl7q*KDpGRz$ z;fxC+=N-JsB_jgGj@T~4rU>0QAdU<*7i^b#ToNwMKWu~N2X6azS>}Ww;V0=LK)B$f zcv@P>#%_bDk{vy&L71I@&YU?l2n=r6F2|b?9)$BrFOYcQi0yKWiUGm_Yvky; zwaFJcwu@PG#CAFPhzGRz%DeVJ;kIx0nA@2Q3^b)ZH^B*mB6Biy(2i#^5b79wssY<2 z19-K?ZrCmvTEUw;wre0BLZ0vexM{Ji2 zf=y6ph8=<^N6#f`aRellZRkdrjOW-0yY1U0gJP)*6|S$;i`3QoA z$fl6g{AC}=!3=Kfi0vM;!FIVKGBkXHXP^Or!R<^2hBGdRoOi4yXE+eH%djazHx7s+ zL(K)-WgeG=i}R1|;`upZyDW1;knodq5g=T!T|BKCuw8~{YLbp=sBomcHlbY;MdxDF%Or5Z} z2;SL%JA!CJh&MqrQ3M@-@fhqpeQ!W=cP=X${=9FM_2}vqe=fL}={+OR|E_qBpqgVY z2Kh5NmnfDV30~>i4mJB?ZByy)A~o5hHhT=|SMtuv;BAsn2=8fx$qC2-g;*3wA0ee? zkWYH8*$FoGn2!48tcX^*UMmtv<@+QJmfFBEN)CXh(N z8oWJ7WG`1PL39vKRvM|mKz2iY_BQpf-5%|rc<`?L~ESdzjhLXNhGJrGdMHpZ4UaQ*|g36 zO{U5_d$T)thTZgm_H1W+*l%x)N3+Rz*xl@J_t6DTqm6Ujm2!OBRYVYnWrkcAtr+&F zuL_U)(;2#${cPO0dY5Lu6uvI~gS+7kELT`}nanNC7iSTf@p8(Pi7S0H@wv3p#qOD` zr!vWvXP?II%c1zD@a=r6UYNG0#wAk6adE1qQy?af zyOp*Q%sl>_GGVVke9Sr!hpvdRI;2LetBAk88}2J(F?OpO+$Ja;P<)sjfd`5u zG?H7`k5}4lMkbM(^sOPNJf|#sP?m@dZp^8uDtPR_16NkD=!@e>7N(!s4U!Al9Sj`< zva(ox4J%)bnWq$z0GqWu90fCh+~hfB;BR3SV81ZJ8TfkD6+YjsW-|L$M{KEW|I%Ja z%B*L$y|pupn`ouoY5`ZjTj0c_iZP7K-<*JX|4B}Up-Lh@@yp~+XS#T=i7?H(J;{~k z=Sh$*EK!Ev=r=<|F!&q^Xc}ye1iCJ^042V1BzQuwITBPS;2cS*9c+%I2P)>+Qta9) z*1tUC9DFN+7byaWu!~&sF=mTYq6u}uW~wBz zlU$6-1GH1@Y;Q7cSKpBpJmQJF43djc*+ntki%~h-;2JKTDI!;TLHZAAqcVzAi#R?f zinyEyE9Gv|EZj+2vT)4FU%*kZ{L~UWPtJ0@aM$?m?c~L(N-CGbOZ0z7E!D-Kd@(4G zvgLucu{k2UL4=0GH*s7nm9q%yoq)@wGTZ|&?VauO*U~+YTrGde`o5(-@vimVTjS0C zkbN~`kE+OqKUjc68Oh^_?#WJj)*nsBlkTXu)t{1AFW$94M)=F-`d>8B80q!(``4?W zPV3c>*!7pPags;0iY|HGv__FPmd4NJJ?*-2@s6~rW{S82HB%(sVy1{I6Ej892AV12 z?#@h+SR!vyYN%*G{B?LCYgOomcI##Sl9|6v3N`1!

N-HOWE7=n>?N7*FpEr=Uol-?##_LuX- zMev^l29H@YNJ=YFcgG~dc=4PMq~KZYm}~GS0&}mT%-Pq2B73hYcyW0#8X;Mrgr@SjPk zM$-Ke%B4|-d9QX*O!P;zOuDEQkECExqt^6jd|Yl3_KHOdqY_DrnUomRmohF|Bx5O9 zG->gR5nxN3;7+sK8xHd|i@c@9{3i!+gG;25T+07^MEu2y@I*)W`Ka*oG2!P|2|r&a z{CvIe^9{n!H^O@_hlRmtFw5CF$}O?*D2!w`(R4aEGfG}%`w&_7#gDyG^H_m$=0{Q` z*BU>~Q#XD-uO0~fBPROj^HoP6`*1M7<`GEXCv!6MK-Oz*M_;Tj)L6O1V$s%1GUOWq&_YI$&&-1wF)LkmR|6J@1FSnXRdDSO8@)7|MkrNz0bV- z9Y6J*4?KO-kNo=YEy0gp8Gq>tScN7!5i4GV=*i{iMEdm{`kJ7xZ$n>~UjXQr3+d9$ zYPNE7;PR-ku(-RqcPZWj=-B1r2)}qAG|@gj^w8U*DBcUvR`KoG{M*Z5ACt7vFVS*P z_M1FEr@z?)jbTb92H2f_nEAewe!m~~?9i(UFqz!m06in4qKNmJbQX%hA8U zlaOzwN|~MXPvf!DnV#(pclzlh&<{DfP5-`(gpmtz8MQ&^ql4_c#0>tX<3_3pML!os z@PEUeb^K}rJ6g(pnn5u4i+Eu4g9Z18VIP`AuX}nt+3L;W#p!HwD43jX%{Z(cnB z`|)?^|8Ktiel-5ReEi#xwBv)Y9}U($)!TS{dotdxfBbvl%Js>(x4F@q&e9pA#}wW) zIWtYoWqJgSpMA5EP6YjClKv+9CjHIwl~n#o<4ALL5f(a)=3ck~c4te2$)!Lw*< zTt)|TX9|rM@m7xIO91}`y8^7k?$^I{r#CFQ&2Hp2hhX`K=zyzNK-{^k@S6SmF4+U| zdyz&Tg@s!Or~!;&VRNuGU4{WVHu1@B>T0ZtkyVq)Ax;=edNHt1WG>2a_Zrx98#;~> zosg)SrRdw(MfM&zh`a-?1-kf}PG>Me3pS}x71M!jSi~=vrK9!GC)2N|(kGC=A{*J< z*+`#HjT}|)qv`e-`CgA&|1SOyry1E~rvy_d7q8%WsNw*im&9LvDVQ-`V*m$_vpOkHfHjhKHe?76q!8{RTFO9YU9wTS#!e(+HGJdR(; zAaW-`f+Hu|FE5_R{;Dz;6bJK9V2|P~cMQckBofTxHj1Q!ClBpG$zngdwF>>_3WTT# z+hPvwNe;pu{JCi-UnN8_ovataa4?-+ox0g8u8xjGsWgU=?{0A!4aLH0qrDD|)*tpa zX7MGk7j1gj9Lxse(IH4Rkh)0vHA^KbSGxyxA5WR){1O>x9{!io$ei-WMvE5${k25m zx52^VD4tFFo3{*3pYBi4@vVKOVW>PLbt+U%9N#&md{m4$I4mk#i8O2~L9ET*P9>kUR}KFN!PM!akJ z`Dm^EvD9+1?T+l9kY}|PSFT;@TzjPR{ImF*!;_89;iHGo^sd{y;q>WV|HzH;=fAiI z-Ue5(@XPqrv_Co9OO+n8!Is^16SaQ0w2=T6daDQHeM^5u{)^C}RK&PM&k@Sl$0^{f)zxY`4W*1Hcb!F!p9mRtays@Z<>UY5FXKcG6>Hcqf|+y_(U@wvvAr@fQin>?)l; z=Zu_VXsD<5#x_2v7iu}PRPtGZqr{7=>1$$Z@VYWw*VSGW5eHW#mf8Qu8BFL2=H3|rJ-jjA2qWf4X)q$tCS z5?kgSX7Y5z->@wSXlP9`Nc*0)?pgcpvQLi8feqFA1|EfKQ$uIP8PmdZfn4$Om*iTw zr$6$@Q!O8+As8>ZHXcp$wO-R?b)yC(KGA?T8l2fmFWOd~yEWd+rg^5+^Rk5yqxzLm$RNP)i?%5ZhFFNloFL@cBRjq>X)eQw}3auDSOTrNQLkHydzs z0Dr~}ei$=CEq)k6!R*l_#AhNfZc8UAkn1gkP8>%eE>9VTmNgw-_98YO@bLziE`KZn zzR+t}3nd9*gKnGfwJ?E?DATlW0!a$V35|%gXfLE`Ka>*0|QEJcLpi z8-RaphTVve1P?U9Zuw({|9SAGF)R48Rc;Xg0V9r)wx_y-R5Ek`v6#1xscZ^PJ-~p} zG6awT{=f{@pqZ2q0*ert!y_i(m9>C^OHa4}{*G0=VQ>Nylu-zwn4~obV}lgX{pT!t zTYaJ53n?ITVB@+!Qb4G-HJmX>L3xqf83Z6Bv;3&LYa671P}K5LAO&3hQZ!a_)WZ!< zAkzaIq=0-_$BQ3QU^4<;_RseE#*HJo1+c(K0hwQQq<}oBlbtPVj7WJA_W$d+UaHhBT`^R zD40E(`Y0QufXrCkh8RYXwIVK^ESU)EJ(#_o>AI#2x3EGA2uZOeF&1;=K{HZ7KB=)m zZGYk=DS3xt(3cfbK-m)d6l=*9MWYvoCSGiig8bD>wix3!32-%wra5Y9b`!X?WBC~{ zHb?>fsM0Um!O}dif=;H081E@CVg*zPHSH>(?_skpJ!h%j`h$Sgm8SOKA^<)y$1q_~G03_wP<jW zdGMt(TN|w4XuikZ9xK2ts(D`l2{Y|fIt8yDz<>mBu-aE})L~x%k&E%|3Uccq&*eO@ zf}@qN0xE<;6+!nE(DTSxm)2ng4F0R_=g|!>`LRq_tjA0AqyeuD2xPKWyY|V4HT6SH zSOFQCkL-%|{cyb0f)!Nb#Re-N6m@#x8m!7#fz1f#6DuI|tBw_rCv~#3#|mtySz!fM zgnY3AGKd#eK#1rBXpa@xP~%M5j1}0BOtAt(`CeoTmxhr{!fIau`LIT$mRNxWB|EIZ zicsBH0hxpqRzNmJhQuc zLhLKZ*)oC(V1pIpDyfPU;K$5ZK~BD;aRuI{(T>)4UAk~QH7l$j=R)v6BUXSP)0zZ* zf!~7z8?4}1Wvl?VsAjBygc*z#a1cSjGYV|>6&!QG3W!|HSOH@L;Z7>Bc?aQGC9Hr7 zp$PldSOGGmyVNg{-Hoq+d_(VCe?s0nVf|VM7^rc82pwC{^1r2!iNRV#0?=`aZwSVt zcALSw|L!k56_zvgUW2e)=|BX;)obU)6*?vEWH4QEJ(vh51U7rFspsnteMuO#q(>Zh z`v?5TMnJQt=t4kGLa#0aq{iY5-ojIY4POf>REjv%Wby&UYsMa!n^wKW4}aKz+e3u` zj6wvoH1vu?Kx$#_)(~sptJ)XfCHbCfd&)>afV@y@Y9zZ8qDm1JKHGp})yzj|hBSu+ zNMw`*vsFcpWYDU%Px5z0bR}Y^nly#3shTv4D%N}I1_UF5--+NgmaJ7iNFmD4Q1rcv zoT+nlSVIKpePYdP)tud?%y6D9>`oB;P2Hns7oUp=hcCxsL`V-^wza@aO@cR9kxJrc z4+;l12vB(Oq3{4tNv7xg9jRj9g5y>ZoCfn?N&{bpzGCk71|> z0*TUWeXXVXN@p?H2HpH6i^^tF7^&axi3ol}IKdWQ9J z8&Q&q0!z4yCGZUo1#6Z#N(AqW_#?TMJoKjM+6;6l7r7@Zz3QX=M1OGRu^By(x4h?n z*RHp}jxT{!+X~(z9Mb4C5&rIogJeg?()uFW(UHDv%H*VG>rm;A4|IvqD*70_JAzwT z`*oWPLYW>XcO}GRE_>Japt9rPr{wYtK_U?vxRXT zV6OW?SDtACXd^w}l)EZGIiA$M^6BdZ+2FOR667OrpSwz@Gjw6hMt8c=8}=sMjmP>M zk9Vg#8|cCybR$r9bJu5ukf&ty#Q4_XxHoH)Yc!IRk}FUpK}0Gb&AeEtDA72{X&tG{ zrmKD2qMeePJITEUj#kBoJ>@V|XoVG8hd2Z{wyZRt-Pe~pY;9VR!^;R+-j%`2T!2zn zUkyNqz}smkz4a=+`YN&eI8ta*L0pAC6n>YYgz1%9=qQwCy2xMLq=jz7OfNOyP6ksN zz-VWyi>_*$qMI4p+3kq!b9HNqQAIWX*|jU}&+Htc#8-?jf2`^k{PJcMF&Z@psE>ZN zl>@QNWQ*n9;o$Lp-FlK$9cZD(Mt$%{5v)~aZ9M8vGtSysLam!W_cG?c_;ELvcqr4J zZO!<7bSI#@_Tub{)Wlo=UKBDJ7cO`ickgN%G8JJJe!2nISLQvx8Zy5k(y|g{y*dlM ziBB|0JTz9wNhOuHH=tixDzvBR=KV}tT5FL%BGd?7kD1@@TUYVs?)3jBg3Y=`z!c=Q z6-v$>)4yocCxC$cRBeLORE>oL2|XPAWds{_Sxvc%tBv=v(O7nes7XM#Mh^!Y=ep>g z`Dxp#g0c)8nkEf5k9COyezpns2H>hx@OS|Ub#a@_>>6n{pZRN0=~7x);VY zj%(f+RdoqD!!z!mLM5ylcL3a)xwG&|Wpw_Qrx9Gs2!+}rOBZyB7mTqMi#E&s-jL5k-2 zelFs#(NYqe-x>@P4=S&feaSX7XE;(!z-zlmOVX*`;wMsY!rf42AzB)&Rf>a6_-o}V z^US2TIY6tKQ!ZdjA||_#rHQrh7I>L%lytoAxoP)EUc@R^{@Kx#nuPa9@Dkl%w+@EG zJj9tDO(M4pDh;2#&|TFjEAXE`Xh9~*~Yne z=??UDHkh50A$jiO<Z>&ld&VAX04>#d- zcmpnL3pszGuUdjjQnSWrIe&&~<`H-(exN4JbXQb10zVbO-QGGwl^0I2?3NlfyJ|`w zNznv96Y+P8J7s=;QWXD_jAuDhc%}i}aI#t_o3U@pi^-cNXQuI$(qAW`b(C-yUI&pc z2{+AFr?fTuCGg_aOX7V?e?|U_gdYJVzkpN9+q|ORlWeMR4|l38Qms>@F+f#0{dNQH z3}yT|V87LG_Wdk1y8XW1F2=mDkHORN4s_+Vtp?nguj40M313C**MA8fA+0tTab%n}A zDKy>EG<-O)|79^5M<1L~LaV99YAjRrFI2IQr)%-s_Sls4-Dg_@}<0LV+Ho4KeOXen%yFYJSWymS&G zCKwLnr9+S&H~`2i(nKKd$3lR-l&%Uq3<~nn(THRM0Ed*OsL;ZUY5{ZUxH&J@4NMr+ zmkzH|90~@-Ha34~hmJ>MO$gMNQdKLj7SxxHOSw@k2>^QO@Dcx}9H04l%i#3s{se6a z?MpgL?roP*fA++9f({bg7;pF6+&Rbjsi-<6p)h>`et-{00XCVHsuPAAWOlo8howD) zRG62J8TeaG*s4nd()_M!LhH4I06m3XF~lm|8q{|;z<)(fF+qHGCV7Q=!+#v=->T@y zE~+TEcFAAPSc5M%;GwzqoSaKRuvwMhY6_qeWPEUgX^`lH*wXJP#Ctp@mSL)6FJDC~7YckVp~r{m2qJNFbpBj?jT zI{eorOy;d{4xFp407)I@e5iXw)x8O);y2f-Zp5R#s(-FRDQ`yLFzS`Vakrt3VevlZ zhh*+ir0_Sz?M3R-e z)zX{c(fHx|w1oC-8LsVyzhB|~B%X0}L>5XR$J*(QwK&P8-(`K(68SE%4fpd33-*bU zfSB;-6%HPhONgrkm;#RPKds;Nkq=T<16mH>x7nNZx~IpJt==qNME%_D_us53t%^tj zf6uw~CU(JbEjmNqTHf!NYNTT@l1<*|Nw_S{bpM{@S{<8ABAuZ^|wb3$8dWO<5l-X(4CUuIp8x8gMEiLu){l z+l5EshXN>QS%4b;? zWR<4wXjUa8^AI^Nr>rL{0u$G-*|d0T@SX}sm6%1R$b-J=s;r#~2fLbQos(aIk5<^j zZYEoQ3Y5sjeha%T1700l_*7ZuIX#Ni`Y0<~uFEYqmjESKU{%SuWhPLIR;%hrz2EXV z9@i{9;!1|bYz^M$nfq|ab#Kh_kZrx~)Xcid-HY&S3wU_IOCeuK{YxU;L zyC)O4R^%eTo=g~nspdR;GMO~Jdop3RDhmzOlgW7S?8&4YFd2!4wa|%1#>@PAGAY}J zl;qWu2?Mfr?bnkDghi(;0!HY*OBk5pNQV%4_hd)g<=h^oCleS_IkUMZJKAP@GU`F4 z=D};(&ATU)39?F0)(Oy)S&F_#vio}$_A&N;>dAQ4RqkGher`QkXC6J7Oale4=bOXQuX^42N9zo-7a9UOgGly2{;)h&{KSY-JujnM?x(ujipB6S!97BENMqVGO34 z^X$oF()8}hgxRVrG*m*8@!;8$NjaEDMw0PjpOJifGAY}Jl;qWu2?Mfr?bnkDghi(; z0!HY*OBk5pNY@8?_hc)z^<)A=DrYwLWGi#&$v)<}cTpxtrYC!;VTWdbj?7Zvg$Z96 z9T_h;(cyx0Wc(08Ix-o_038`mR^>C{5@5NF@ht0Q49qA0m07XRf4&`=jB7*s^Xka> z@d9;ZJWc)7sRjzJr!&E=;od`~_ z*~b3$*X%VI%U;a-<^em|)9E|BP6mIwVSfdmY&vUS!Mf zE5WZe?GKT<np`F$pNJ9V8EW2}G;O>b}(K(YKK9hx_8=zHcE(f)0Gp?}b0!v{Ye) z+caNGbf{HvbaUJps*gwDq4)uBeQ@?-+jD)&GxL-y6}mIg`nW)6*X6H7myrp)E3sLA zRVsq_`@MD`Xnb+;jZaPcle4|qU_7$F0Y)Yk&Qw%>#`9`=^{a_a_mJ&8q{$F| zya8`wxv84VSyNLaHopwS4uHS+0~V#4g@4v`J%$XIH0&%@stc9V*s*enJm8hnSc0Ms zsig$ZRP3nud)xHcgn#1q{vKxoDs^!g&FA%9ZnT{1m1G_&3;eExlo3rEl|>);Pz2YM zBh0D(tha&=9etudX%9w&*`PPsEH40@-A{ga(EB2ORdP5PVNQE#gsH_%*t zXL%{?O>5%bVAP*v1kuIKXpC({eRY=|4=Av6Jthw9tk`dwP6ua3@gn}3=sD)IdYKrf z!&D?Y=uj1;oyP~GGu-K1-m{L}Tj%a^5BFS8!cX9QfXq(jmd5q;0=is%SL;TaiP~^DzQ&M=*m3y)u0Gic=g)6N@EVpM9DCW{ z7&zp-PvTe}jFj@!-vjMlT%vEN$EjX&5@MKrSvePrjzHD7u_w`VP zJcgL0{k;g*S$3*RTZjjK(l!;7MLcnr6#Q`nl3Tc0DHxR6T`8-T1dS@T-l{Faz@J6% zI+ml#dPUujp~iLBK&ytZ@x2Dz>dCMeID7>tySkR0qJx!PSi$3eMxLP>Ig$`Qd}SUY zK$wCM{C)(Id&o_yP2115zT$X3>3J_(KY)7Pg$%vJ)yDO-+Bgi?#ILZbHp&gJ=3q4V zN(8TFd9Vu^lq)V=TuRD-#Z}A%7Zx?MNDW+Urkzo#nW?WYIy>>WaTANdBU<#tWX65y0)`A0x2*lq%B{m z7f$CTB7-mhp@EX4jVpx>B!?29enF?OKZ;~qJN>AY#poB-*$$QC0j^;mFI!XL2OeUS*S|WOL~0ak z-*3ABHXL(8knodq5W#3fkOCOh*MeDo(f6f#mjs7ee4bCbI^MqS*f7(uC#(Tqu;Jm` zj%_l-s2Cue8a9TWTbs-fC|i}HrqEb`ww`d0G-;dY)F89$31Pxl-n9n`H*A-l_9kEV zRW-(pr)lw5egjDu5SUEa5!)q?EAUU;c#!}Z5Zfi=^5Idn$<__qC1WUf%bQ|{B>)3q z(M(*hUDFV?G{j_3&5jBmY?qMF^r5a~ExImo%YjR?MP^g3X9HrpWDsnE!cIqrAj;8m zNm?8MNzy9Um_?}elCr$qHR#$A+vS<`PhA{6lXHZTmm{{z%~V$Fim{1=_6s6~xna9z zp6kMP$;fs|%njQmV`LFTrcrln*GO7478h*SJjnS?Wp3E6z}2%YnYm%R0@Gp-=728> z6kV`gfy9NM(%i6Jkxe0~`O7{`+A<|aaKm;xHrQ@p_y&*3hjvZ#?uhL&oN+-U5M-M( zQCc1nAzOPk-LPGTO)+dZAdU<*7i^b#ToNwig{nsL!C@ObKXAi#S>}Ww;V0=LK)66r zcv_mxX={$yF2k?1I@*oPi}M^D+^}7aH!eId<$9zOs%#^ahx&|h1l+J)j!`i{IJL

55)w#zan1PMP$7XiS-5Ow^z?b~JemDPTvz734+=1R4NSiue3<#^-5gYCef zu-%+rV;phBb~#4H0O5c&a`fEVm0oG64cjH-@Ajyg8&ZiHN>46)zOCDEp=Z@_fNsGqfg6*0IIlrmQ4cisC;*uF>Uc%?sNl%(>f6BBE*_PwY_;2G9kE@GH!eI_ z$2!TbDR+}BR~KSba>RByM#TW()UYwotXrGR5Re23T2pukGen?q+_%fiTL=@r^42+d zh#-=_&~UQKiDKMsaN0w6Nwna)VSEKp`oenD!iVy&(L;8xgJW^WaLBG?cs_9GdxQx7 zqT;z5k^!@_k&7Uu0z(41MV00$D>S?%1@CIYj|Ne;0KQc^$wVd5q$WGmmK5GHb?Rjz zcxMCd2%-rg-UQJ^5m@}iWAaC|wj{YbmlX|v-nYtnboC0C;0+VvkAtZpT*3dYc#fc& zV=e|siW7hBg%51$k>HiC?NGBX);5*yE>e?CYO}`+G7`O~5hf=f2NYsaAbrd;8lh{= zPO!1Zbkr|rMYPH-*SMq>l5|d_4)VPVl?!re#3o9wHLG-8ot#H5_LRbe(8>t4k7jnV zy2n#8@B^P*TPRhic>>5Vsm0a`fAJKYqO>t+Pdp71rMue#xRelE0!UxeT1CNR2w(N- zuc+}^S2sH4)E9ixJq+%ZC&T5lD5|`}r*r}DaFa-$-;hgL3K_Yv( zatWe?U~K9F6%1roZf9>(5Bt5z2;CgtouFIZNdZzmEfSvoiT(@u9-X5MyWgPFN-zG1&dpN1SM!{~`OsZp-?Bm^)WSN~O zE~PU|LCimaE(%Xmy6Q9RP3g%?%XG>Xo@k9z``1olFp1<;c?M@Dz0E;?G@G{BzsXd2 zXK!}r&aj(4(4OsV5Bu$n@n|*~54)TF?LNBwX|!>!yHaircNG!D0U6JA(TZVzT9S?W z(;2#${cPO0dY5KPBEByDgS+7kELT`}nanNC7iSTf@p8(Pi7S0H@wv3p#qOD`r!vWv zXP?IJS?AH0#P?^=LYuh#W%x<{H%VJRxKvpwkR(mx|Tw=BKafhf$ zpUoB)5{K<4#oSS*cuoc0O;+wW3|U(+lB;Ftm06n}CtlNrYnRQeM2GO}zO@75TOEE) zwL|`}Lky+EA{%t`g6C!s?)#arZ5Wo>uqI+^?bo5)$h99J5ErLvIt60#xQhvrXPpBv zN8n;0?zgjMrrWU%#HDwEnHtz7s1kittb*}@`u>5K9O))AWMd%454!|nGOS}DW*&b| znXp$N?zC+KvDbDcEPJWBRtx3<0&#H!hRlAA6)}0-B@mNmodYpP;5lW*UKR1t^A(7R zKBSgyD76Dr#N_>JnIRhkF@D%35R+jY12OaXbIOFh0`XDnKzzO{VywOZfw*{IMP|Ro zKujKY3B=@C=RnL6curZdS0FxS8;EH&;Zpt*;k!g#NrM<55R=W)GD9{7V*Id6ASS~) z24d#%=adP11>$4Rnm~-z2VdU}_m#02yHyQt6O;}pKFp551H}>=$t~>1EA2KTlSoba z)(}*lQ{usIS`C*T}OsU2*Nqz5YI*i!8nqnX?{NPR1U7byaW zu!~&sF=mTYq6u}9#2+lmDzv8hq2cgN92ZOFEP{F` z;Bu)9_W(?LXZ!ro#m|G?XM4k)etgOLzNJ0!uJzqpQ%)AU8^hjo z8ea|v<4dzsm$DDy17ua5{?`}a1=+{$MXwW$CyMyJddp7uw|b;?F`RFc zov;_fd1l&iXPUkoL7sZmX6f7DP3tx-?TiOJ7n%&-Tn6FvnEiG1WasW5D zMA^xu{Le?kU#tjEbcCOe3O^qcetwnk^L4_{*9$-2ApCqIy!Ucg7>ow9oSmcG5(|&Q zNOluVr-L)2)@zXqYOZz1DUF65;Gez_EdR-pm7WHS)yEIJx4j0?e)w)5pk;jNHL~L_$rCl}juZ zZM`Hzz99fDWEA<#Jg5<~V&-<2``j36#q4gbM3418W>T;BAyUl6g$%ht!gl54ID(2*%v%HDGtFgJ0xUfcoC=zusA2&h_ zYd$h6!ZbH07a5WG?P*6089x~n`GJ{{iyd)XxZfmTnn#cQyfmATm9asJALa;Z_?lF_9v6^q@68;uO3h6pNr^l16)gw1V@Kgk*UMh>$h#w7*%;r4IjopWHUT!e)lI07Q zleeK0@TM69=<2kV;c;ZKSk#CK(;0vCvn0mZKaex?dfVA>hl@*8^V?=)L$ggqV_|Ih zCka<3V3<#RF0z*s3U} z&T>@Ei%3?oQ4#s9wTkEiwko0zunak>2v-0$Dk8OlrHb;8+o~x4ILlEr6_KQ9qayNI zYZcK4Y*j=bU>R~$5iUS%R76S@OBLlGw^dR8ah9V(Mc<4-6N|WpnkJ-U>d-2AfGDft zQ#p4(Qf(}jNr~Ff5{V{_aFfrIRL64sESKb6Po-yaKDqOb`P!&7EiMaFRW83Ama;Oi zJKl~HIHcNqz5KnqU~f?+e)X>SD|cP266ro_UiKHO#QrclD;t&X7puhK0C`)RdK}mk z@}w@a$#3MdBxGtLb7q*Rp2{0C+x$i!Fa%1QLjGcvh}J@kDxyW7t%~Ra0aSFcO0-cC zE&6O#L>~yCqKj2xe=20czhEviU2_mNIjwaA?b~J&GOjI9g0Jr#LX9wh{$VBAo zfK)_|nU0+p`rbLy33PlMkYPz0&fP>s(Tcosa)>b;2@g^6blgS7(~((~Tdd;fy2dJj z2oaY*WNxvF zEaE%IDuw_Nl|!szskp}~mWoV7Zn27?>RMNEB#4-t9FUDMJ`Tvz2`4IwU%SUDj)aG( zcslN);_1jl^-A11imrTLs}ks~=?%4ilK8qC$E*Ij1fo3NY<3}4%DVT6#q$ZAfp*x%>NvRME3_&Uv n-N^v6l8MdBc`3u0?Ya9KLx+ei3^8x#WoIT4-*18;L9mH}9pex~VjLR_;smh4kaYnY z;^cbS#PO}YYgg^6+V$F1wR;BL5o{DGBy3kx6r%F276 zyEVDw)TB2UUH`hX{n2c#bI*7Fx8dX8h_VmfyZ0M+?3-Ts|7M?C`ON3`!dv00o$+w6 zd9Hi5H<`A_r>6bM+1_k09>x3bANDr;PmYIM{Yh&-?Ae-~+v&%RBT?)7;9z&WJKNcv zb*E4EcKXx!O1OOOP`rQnugQN=+*oe5UJQG(55%&+!Y;(@*uBj#!$#U+BXQVa?^J&{ z<+;_(J93gm1iu@>oh-Yf{_N@T5aC=+uhyK;PiO1-NxeFbK7_{owaU;rS9giH=V|9I2>O$9B;0z+YQlr3G6xB8}9bw zD>mZgX1s{h)mT|>A}d)(|5z5At)!&xWmsCv9)V9p@H)F8bzE%^MzKFnR}!8MY`}69 zFSHi_>5V`AEXzyOdO?YoMQE%ogyN(3OkI53V#mh^8*rb$J|4)7?m=vG@g>}kNhm9B z?S<$BiYHAUJ4SJ7+V5|5;>OjS)kdwCmNb0;U~M^#OX%Nv3t%yQGJOy}+z4UC)7?{X z^Z4$mB!oxwo(V!`FjAR35r5<%e)wb%!ENsj=AyY0k?X4 zZrr?eYbylTNF_nYAp0{pVjc#(?C`S@yxdNBF`Vp82E9|m{>e`3QrNdS9?ej-)Stu+ zlrb(R1q&M~xZ20W%^}#2a`dFX)gAS=QMrVOx~FM)Ujq)?@zuU{Ivt!D#Y^~WVkwTq z-{x9Zz(G_?47Pf+KCTDj#brIG&RFgELxn1-IV1krt7M;kx)0Rz+kwbnAtr z+`4SzAS$=gdT_n9nD!a)?;?1a&Gb2Elz5Kw+)9ppDRFFExUTcnltw=AGYzvrWvUPy z&`JP_qm~OJbQY1^Yc-N+l|2?d8o?`U6x|kl%}O`{nAETX|G^?>+|!$(x+Gr0UlUcY zoKy)p1zVPV^m;1!8EOQlJ7W|vdni^e@V`2h{wihY`%KE%K%R?Agm`cMjXCP&tlO)( z;O|P+lYXFmj?ddNN*2|39&1zL=n`0^qp_?ly?F zyIQqIJ~tI&=Q+eBVEh@yc{oA>k|8{YH{JPOsOhK>%(Wg7vQU|M)C^(Y$dmKXEo zPXD!Vv;o>DD9g-k|M4Ojfl160x@l14?Z5jO=c^|`b~C~h->{F@Fpaml%}SqXDiA` zvm~?*|K8kJ<->67pmOZtR02o|8N%(m)2A6+SbW0Nkmp_&`YrHT@51q^XP}4cf zeDGhJVK*Wq!2>OY6ogd`Q}9Kr+#&!1MjRt+Oshqwf>Yb}lm`Qp_`vU(!5XBa5<*}R zB6Ezugo3aZ(DDQasFdLETE!a%Con-7g%Ic<_LzkYQqY+PQa}coD^frPJ>N(HnFq46 ze4hi|6JU)L5E@!u3Z#H5_Clf_WFB%}ZUGylfY8wK;)fL2jF4?!;R^{vJ;bik6K8Y- z2gv+5pu!3%AW!OKXO9%vP}@d}|HjVDijZ$-Mg~#g4OU12A)*tYJyKvpjWgviqVz@z z$h?Cf1%#MJq?Sm51*LYn8Q=o5M?>5I8>E1+&b1+27&nX}YcyP71u_xiegu1@fRGeh z5@Rt(9yB8bD~?Aud^3OD+(H2HUej3OZR=&nd_o zDZn)6h7{o8)Gn%N>16Kd(kJH}8vzO{q###`uvx&^1mnldNI_1%gbisu(L*Se!2_^C z3UV&0A_e%d!v8$@(wG(ep#@UVX$we!QNyckJFI|k4!fIE+(W+PhQ^06f{+;qKnT-Kduv&>Hw7B%7-c{gI zSdk&*t+VB;0XDk|$Us_N3alVJF9CNI*zm$NSe17b*o<&Ku>vx`>R17JQfCMDSb+^S zE3CkZ5W)&nu$9;9E*abkDKW4@Xa`I)YpmI|$@t?x7-Nk5y735q99%yX0@na!osoe}?gB2XD zj1}M(HK`V#QY95{5oE6n6ztpftUQrys{z>TD>&+a6%e_Yu>!^h=DV-pXeF$G3Zc5( zt+0Y)7Zg@NW)lJ{Aha~~Lz{U6!KGv(L)4BHkb$(k_+bTP2stkSu>vxXju)=Ms*Dxb zjDWEM=7oOZUv66k$dFw7UuoNd)xH8Uj81m;Sb+^SE3CkZkng?%GKdOsu)+!m5uE_- zu>u=voGF{J0vnRvSOFQt)GS-Qfk239L~4l@SWvRV3akhPvqw`WWwWn<%$RFLN*%!k zgB6hJf(=$cK3o|qAP<_c0`f_X4Ql%hujDUpeN0wigB4J=gighX6_5u>QghomL8+K{ zv3UdGm;o!W?z&xiLp-8xKqFR=&xx*rcv!8D0~@U1SY@mLx2R^UfCPwa z-%}B5Jb(cnE5K(I*kA?69Iyf+7c*AC*g(PMJoXhFtArI$ArzCeRYkf%m}*$p)4r4HSH;0mrJDkI)QhmdhkE zN`l#{qM5M}fwHsm)){;*f=)GQ3SCn*X$FiVrjP}{6~SvPS<@e+Uez)SKS~k3Ppo;ZnzP%K6zAC`zZ3us!6((dX(aJSU#8PTmu)RDQ05TQ z1Cslb$#~K}j(@s+aQbwA(jRU1<3puiB^3pha2HDqvl||4k8mhTUU+-NAIYucp*Ka> zX3V;$Cga}LW^X!6kC$#g-5;EJaz+p2EyDr}3vYWZ-(jj6ly?aS8#+ydzdPa(IT&Gi zV<|lmADvUpq`PM8>eBJ~OR%_xJ_hfM;11S)-6k{Xjh>R6Roqmq2)AM~;e8u>(S#S) zq6I+5BzsIHy8vTK#Bw~n$%H?TSIJq)KnuS)2+L!1FG_w_iXOv4=bLg@1t`ap+E+gP z0D^4525yR-$b{X>~3!M(ZwW-_m1z|flmn`Ps!+s@g2i) zZ`LN)Xe1|}sAXQPtt&4Dk?fvHT{hhr%q`j}xw(hjYrviIto(S%s`#*{e2xmuA;clT zv1KLU?8cegVQbTh{8k!-3oldyhtVPMb{aaqyPaOyv7Oj`94R!ZAg&@RC<1%hIX~6! z?{v2Y+tch=XS9jw#9-^0^jZ`^ml|*~P&ys9&(*CdSvKS0 zd!<{Y{h6IZl=zDGWb9S-3x0XCiWrsIh^z&see|oX9EfEmdkMcg96Z&pTTe_*S1Lpr z=g=wse-Ht8os^Z}#-si;LT&Q1-)M^i|0{y6x<$YgEw7-gAvo5PCcX74x&bf^uNJ!f3M!MGmT|H9V z5^7ZlSOyMb^H`TS;3u2#Kme|2$|MDk7m!dFx5><|k!JImzXpq(RK?x?xDi+}^QhI% zlYkkYu2$b89OL7{wg*SIrVfzTj5Rn_5|*a1uCzt`;cqu!!#{@L%P(CkR({`tuM!##1+Wf>P6Qjc4qPfCv!W1ye;dIotxP)`th;*Q z7X645JJ1dRfryU^cwmYgRuYmH@H6Yg8b`kqtzgd3_TJiYK&#(NBo8`r6JG?g_P)7 z$f~QPGOF2UUSF}q-Rn|GEc~_5%1%nC+z(2QLO4}NojrTs#?+tGsoV8pe%knM%#*RUX z=J|d);;+$C68!F1gZYAOXwGmX3SQeqT9Quf7C(`K6YhpG3(=a_&$i%il&j1$lit<< zt!7TSfNdE?CGi*D4Bx98B^|GOZrVMP7qQY6G24Zg_e5}wZm@J=B@c0C))TvV7k*I+ zR4Ns~&qn-PIar;A=*v7vc5h{7*G(U4mmEqeMcYN<_een?_t^c@62Spi}r8yck;bX z$#DtL>Z^|A!5WBmSK*hMveh8>*<={Iba(?UYYRDlp|4tkOENEwouL|g z93G7yu1RCvRi)1UK?L`C>kL(1IK{GCY9wWjWq|}g5%G76JJ~HiDT@DjOzZOv=!TQk z2HA{#XI@O+Iyp0quay2e39X}qyYM=Qd`TE;wmPM)+1J2J*Di_oFaI_9FA{zPl>7ot zDR1+NeowNg!ab_1vPj@g@i!ZAPdIZ!y(~tK!;3ko+bdMj%MPuEUkYqncn=Z660&cS zuu2N<>clUj)&W>Lf#;5P@2X@9|0oJ+cK7zr#7*>nRS=hA#;T^`I}scY=NfsmKq-Ra zLjiS~GS9uzhNG>dSIx^Sx&=tZ{SP(j;J#CEUx#c1%i#S%gCji?VLo}H2Kjc(PJ{AE z$TFf>iwx*2ND)adoM%gmskK$RyrGPc9|-J!Sxm;!2WOPfYO1l?v&||$+JHyG=~Zlj z@oQYk1O9zB?HWjSKM|fL0yf^$gj1o#QErr(HaTe3hpqI{fRYgvs`9-e%5Blk`4M)8V3&02$~%7(2|^AU7e=7dJG6 zkT-DgQfztGzAdcr=@5CNJiWAT!yY+@kdo9VV6)G24NMr6h!JO|Ub4W(Y*(rH3daW< zaDPZ6JTVyd(PfY5{}cbvz_3e<#y>MKM6rvQI+zw(1U!LCz1aY*P@wlp(YvI-7{Nn9 zq?W=uZK>mODpeo%(NOHlOyBTL|D|W=hmjdO)xwE`8&6V~V2fnTIi}|PqbNOx5IzzD zb54ye6X5amT!eT}_Qn1+Y0afLje;-Xt$;3I!b(yRyGa@C4B!Se}>M zRw&*c0&${KLf_!GDgwt7%ZYZAcNN>{81!UxB=EAC^@Mg5Mzo| zClOs8X<;V#Sco!$&M~ieiZUXgIFX71WD`17A)D~EgS8c+1w((BjLs?j2EAE!*fDv>pCww3Z zu*s}coiN-Wv&+E}*&(DlT#TkvfgkwcCT!QG0cn0$HKFy|L4ck@uNY!6ZVl=?8{oep zr0;ll)s#@&aSm650rq>@404-v?*2m13On&%%`pp^y0mqZ z@vmwWExqrhVx*NFr<}~iNUO{5Gw3ke8cj28*^t_kI?ca6;^~Diww6eOWdz@<(IhE6 zRj<;L`Q}e5J6G`Pt0A_+E5cu1@fDqNZMK{OTeaym49xZ4YYZ0*!c&_RP`2!@CgZvy zp0qj)fc|`=0sh+%_4dtTLA8k=BSZfC^A)TRoQS4D4iB;pb1 zdqTJJJqf4dtuR~p6w!q!{J%C~GH-=*;9PA5Na`?iR5uONBdWzMI2FICRxKhPt?dkc zszNDmM&K~&mBVp&qK#qke&&Z{?op(0BgO4SYV<9Fk2c^8ZjVL&JeZ(v0{Hq z&GD@qz2Q%r_F2}JB^7YRfckScu%u@#+iGkUr$|`)a$)S}BWHZ732&~)Tgv9;mMd%( zz`D*H)Egv6IPj)f6p7%o4cYxq>Gn^23KOeo&Di+XIh+-Qi~6jKa1HrfQ<%OgJUCtT zXW34yqr(mMqdlpk>CSlC?<#KiV=j>U^@jbfYYpRNxgwWX%J2Alxmr2s%oQ}v{0hXm zX`l#97B}4T(iE;xJe6LF1V3E?3$qlWA}`A>8w=nB$s0pt4Ve+ZC!27F)xU1TbS2r8 zqcD?qC3nSz5R-jtTZxtGxploT;U{S&{{%c1KT@fZx11$bEPSE~gE^^^OQ_awFH~X& zfs*yB(-dE5u6;Ar;+x=!__6x5h;}9Y!wT;w@gilzHBNFTdZQ<^zG|6#m)M5;d4+|- zRcwC zMzYBpJPDVjneN|{T&rV~Nu;w?RfJ693T_dJJ=iNZGObPvd)uS|>|uB)e$XR;5z&?P zVcU7G!$+F3i^PH}j!RS42TNMWnYHUVDR4j)*oDXAM*}F3vmAyaO{2FC=!_CiA1Ifh zv=fhPKASA&9lzQufm5a>|r;Ptv>}yt-If8bjxBtu zEb|QLC|2vEtZcb1x7=I;lw5&TCF7QvKrLFWs-xYP&#}2?;SpETycWNXUW;FcpYhCn zIOOsjL0q9R%R{#Hwo^0fDt9j;_S|~1qit0^60j$eX#nkG-oMqG^}46WlkFb5z89^r z-SyCIx+;i@$GayJxK`vMzn)AOgQ@g9dor0cy?ZiYwkit^)sxA1@a)N?99&QtNyfBy zPbOvCkdnN5GGRd0uKjv4fw1V5)caeZdop2Qh9g}x^6trww#&IaOiv~-q;h6+Pj!^o4^XSQB z8Yp&WCGWs3*@=TuO}16U@AS&o=hf9@19JUt;#|}B_tUSo;{hA112L`SCllq zg-%^EroDSIDcg1SWWs=~UHkQ90%6f9i+~ZjCldx{II6WL>(thh2@I*6+1!(LxSotU zhP5kkD{HE)klsC+OpsN2vef`RnWe}JBfc=!$#~I;E*GRHZzUyQ%t_=y$Yn_ZAkF+WEs?HQGsA@y^ zWO8P`domny-Fvb;WP9~wJZmatw=!tX=!{WCCH) zDRuo(5llk&Yr>=$ZoSvZR%`3Ygh{KM+1!(@ay=Q>VexBS*7jar2wH#-d+uG736kl_ zUS`;#8K5Jx6gba(_BL5zxu&v=DkYB3r=*nARQS$M39b5MlwK0#*5tmA?ADw&LHL;CaT$oTOBb!0qI(GiOn5xV2z$0f{|R^{L)D_n~s z*)^C2TxCCI7p}d!F`jLes~2Lo#USBNBRIik8~fK^v)5oOdok;q2kc~D@aeER_IDcg zSMbTElNVIj8brg-Gth%RuX-tEn%w6_w*0;l{BqO&5IIX(lp`6FfCAe=@`#r}w3;kz ztVdrwdI%njkNduOB#9;Vx#xX;FZ}VQr3xe528~*xL#>KaA5p}-m41dQ^>KJKe%M>7 zoGr0E*T+3GPq|W|I}@#s3v_l}{z`Ni8Fp79SMR%ZTTQ;<$QH%mJ$|npNSaor*Sd<$ ze&bWq{^V?LHW-iWZ-9}Bg);}0pA{iS-AOgm7ca^}k?lo;`m{eVTOX5pVTU5_EdB@H`EmFhy}GW+Hb=%a{;C;w?+=Xy*W*jcgPI-L&AjN&EyHPLg-XZ12MPKT{4;2=`w0KFmB z@8+UFTj5^-2fO3l+0O0^|8g2HLMkG}{VqB&3#s_2!RQSCwz#htemQ~_tt#7-J~~0O zxUxtuh9b&&PD~{Xjk|Q_>n=|}`-9%@l24g)m+-b8zymezLd0(8F8QZb?m~!t_qhw1 zxt+V@pRV6sz9_wVne})+QQ&ed??7(d>TS30#@T1^R+Nd*zwwl0pqpt1YQy39Izt9x z3G;WYNAWT$$Z=7=h^|@}a)P0bhi^pi8kQd%d)eO@IOM#~gQ-%UyPV`XSSvVW?xT-P zwSgq>!PgsbJIh>UcH4rZ`Uy{VMJzi#gY3)JrOkF8+L5PRlW~1Jf(@3P>e3eCfuFRC zc;O)l6VHDbf#eo$Rtg5Cc2~=4B|)Q-`Fk+=64-;w*OV*o6$r6&EfpC6&R_8Xm%$GfC`lW>$Z7_VE7UOfq;!&5Rn1? z*r~~2m=r^SL)ptvb7?Y|$II2|g%S?F?$lnetO@mjpPcPB*SdODx-Ukuyvl|{Ny&!- zffkKuXZUHiH|@^Qy#nJ2X*6C#@lGH>8HUQN`3oTp^{aDkXO{5}LReFne42DD5IXMhxF9R5iw))R>^RL>y}{z?IW)CAfr`n>M}C#NWo&5Z+Ro|-q`;(*wtS^t)Usak zP~!xcbA$#;jyA3oHjo@jfcgcU!u}wVZSC~u`NBwIKi-7nEH73~J#Wi0jm9{Q&BIud zhijOB8>cdZKLql+QmTXh7QwCFTye>atCCD?1x{1tC9h@Nge*- zmU*;Yn@`|}??evhcqt4BNzGsOrkXE|1HyAIPz~aq_MzpJM!FM+b&Z^GOO3(sMW+S; zg0HxMH7sZ9Afg#IwNr7sl7}Efm=?v8eE(FA9>dpM;2M@qF>F+a5m;&ta1Hx-*_sMF z_Yh-cuvQXo;F~Uh4ab}iB>W^DL@*jlP63SSYr!nP==)N=dw~RpuR1o&H02Up2T3RIW=i40)Z;qqm>p$T`ycFBye z2?{$M9fBxF&n0Pb1SCnzYqfw7SEN3Jh-LMy8@5a4fu-)Bx;T18jq7Ei24SR0+X1fQ zW-6<7#n>bX?b;>8mKW39F38e4Cb(g{W}Z20*NXx9+9MxH6p0bsuw5f}?%1x8v}i0Y z*sghy^P9@tuw8+x@3v&-hV2SWi#?bFz9>+1!FB}_l9Yyu3uRa36%6DEjw!IaVY?!m zLQ?aW{hZR88@Ai23){tn&m*?WaApc3F5Pg>pAoXPdV!JGJLXhy2bO@ewu#Y3Q%kV3!cS(Kgu7@DO!42Exc;mtY zhr}3cGsMylo=Lf{E-n4IVY?ioVt{aJlNs#NtxaYKNP>i}rtmPe%CW~0+vVjggb81H z*B&U`uw8n)V*S`I8R|S^yJQ*){FBFaT`FNRfC9GViwad+?1t@zwhIc#_)?4GzFjgSVv532r31bwP;|j|1rm~!rt8DJ!k)YnGX-`xY*%Dc z=*RqJKc}?jhV34;!FIX8uERTAnr}P<4bJ1ZZO}fl?MxrnL1*-EOSDT@RM{AAUvd?KFzoz0%;1?UDf$9WNi0 z6s@-7zFji3f;V?;*Fd<`5<6hKrXd0}#AFDEhS+W2E*UV>2bZcC2QoIMvh3v2qLJCy z_0Ep_cF7>v1cheUA&7GHT#^WoXcumF*f;NyJRp!W@1CUHa$9GyJQ-fk4bsvuw5^$&41snkvn&6*GO7478h*S zJjnS?Wp3E6z!jIwBxsA5&58X}>wICm0@Gp-=728>6kV`gfrKQbg-S+Y0>9?iFC zQuCL+X%Qq4_iotkF&k{xrOn2KHIOZvydv$kL$@;-7|ysL!d1j`z-n?qlwyqtB028c zW!MzMh6CcrP;uw8~=<3#I^;<@aYt;IV^j<~D>wIbyrK zyoE5~EAJ3|X)`T4tmb5u6UDeo;R3rA-fX#d`pb)=}6C7jPN0| zyo>3ffC&Dw;<+1=`FKHY*#)BF?tgy~l>(8ZuZiFtP56Nz$`-)4N++49B%0J@huUc2 zFL` zj-Z-ju8Q$z(kg~jing+%2EAHP>Dmr8`(kZV>Fy#m*`zjmblRq!B@E@e(%xE!cQwM~ z1mu81EDEF#1DFy8j8=fIH9Nt^9@A03oE6b3w_M|rTF4TKh~Ty!V!GOC)XBA6>6RUGE8c*HEq4)s}^~+qmw>LcXtMG zDIvB5kiMoZVq%J?z~IY1{S`GnU3GII#1yoL!M*Zixa6XWHDl)^{J;-x z9!{#SQLtM!lj_(d`*?RXSz+giOXf|p23+(Z)?yW&8BVkZ!%Th)0^G1JM5+pv}e0J!+v{nJep0$!|qmpr;jdh8f~8I zu9gGkLJ>h6mMC&vv|>1rc8{a}bcQZwKN~l$UC?w?*$wOPzCG}ImMg5gOy-v6i?fK# zcsXUt#Fajp_*`1)V)sneQ<>!Icc0lx4y&{`+YO|LTer5n?AG-fK&jQLtBiH{$2U9 z=tFA8dPIC3e$}^jKzysiuc>y(A9jeLbXa7AZeH;G48nas6AlrE9mx}vRS_Sx4aB95yPiRn=tFAR^;g8?{cD*a8v`+Z*d-8?VI2c8^Z4`1guMds zQR_ggvtgImjMX8v;Cd_K;(ZmF{Tc%?dE6xslV_a+F-PEeWyM~B_}KXh#6%xbGp;`n zlg-jHLpBCt{IE+PCc`=gV&?JZl?i(V;$zl(&QS$(By0 zy!PMH`T0E{xsct#(A6;Vb*y|fW}Z?=0&Ldua1_i0a+Bwkfxnqmfc?Sf+*{^kVC`%iK*3{?{ONez|U z<4hOtH4&zHpC`G}{5%QLg(b@H8~kR72nL@c0ZoI=k-*T!7NEpejs#B#Hb;W$1e_x& zwS&!(G(5!|TZ&y2SnR>__WD7JYjI-Yb#z3u*#yn4Y6phg1@wBE5u zM)|Ae#$PtkIE(0DkOLc4;HiyjknP6H*nrAIUqw5;VOq?{J5b~2^5%ELxXMR*S~Er5 zs+uVhw=z@2C5)LO=@-ouaT{o+NK6*~CcK!nDs;liPQTk5ZFQ%^!KQ;{T}KRSW#bxZ zRtp=_|H0Q5VLvl^yo7B#VSFc18hxUjcv8iblWL~SzgVL(W$V|COeUY3a$;@D#*xv+ z!ZekbDH2mQQ^clhrbtZLOc9&1nIbV|^t#{D*6iF4GTO`FVCO)@{0DyvuF4Fp{033H zd^ZX|v%%~fXLbjkU<^|ktn)oYu8ykw(*~3; zMS1EHfXPZ}M-WZ90Sg#de9cqYBp4QqSIx)w1WX zvoN0?{3n6IW7Z6k(n{3bvBE+f=9N=b2&tO>;g1C7UPYO+uLni;URAK?@}>vLu?hy& zR-kzCBr0j}2Fj$#L`f+tvUo#iW6K9p@T^g4NrN|1=8PJo^|#30jav3xluCwDFlbO} zNrN|0CXE`*TL+QFn-nwAh*~QY7e~fZD{1g%%A8SywBHfgyIH}WLC{(jUGz>S2>+Rs zY9!qsp+@@kibNqFpq#eoqXOr0`Wfb z#P@P?#ajiKT_vWEksTSigMGh*ni>L^SS;FlNrrrV09wc>@|k&1BWA_S?Oyk}G1Q9L z-ByVn>wV0mUgtxkn2QS;hAATOI~``jknx%idKfPoGSYFF4@;()j~gL|H6o$At4j#Y zf+1AQ$w6m%ZI4%Db0=|OjRa97>S8`_gc#O*WK@J{ZcZ*TBJtbPju)X$J9=Ho^vkR27NlC`6i#`25a|9@TitxsOtSV;f-#sB^D2ll=9 zhyVO5EAM^o*6;t--&uwqxibF36|e?PbRt%~1kp1q(TViyIrKF_U*C$puDl4)FBj9L zo3(7^=HTT~V{vIubKi2j7tpcG#Swn-erTe7eCVOKMNzyDqV3|_v-!7|!G0!bqhF%s zpzJq!eolY07aGHqN(`_k`!Mr;C;k2a?9GogOn=%3i|Jc1=-UKM00GPBJ2ZinYl?|q z372;cMUCd-o~7k|`wv`l@KAjD3$9wZ3ih*yY%HD{kB135N^a$9;dnXv7kCEp%~UC~ zlm2NuRyxycUqCtu^h1tr)4#7EVdO$wLERPl=pZ{UF@wKjLOoJVDEg@=g8u{dZs5bq z*wIq%(+q;SU%~^UA1t^(4Exa}dfn6G$#!oRFHL6?bg*W!gev&E9=dJqARNHoq5r?_ zu7}Y0`}6VdMAD8A!2vW__f&85sh!Dqr~dIDfGam9*UNdHJ9lT zG=BEYYB~}0n@Rec?3?sAD_2taCygV`wIx{WG@AS1CfJiL0VbCM5#63_m2w3g%#AmC zEa9yjE0+NN3w8zAfIY8!%WiL2a+^KKZLWru@1X;(T>)|DvchW)?7w6$#P3ELeF7Hm z7@!6)hQ+PH_H+dX=-9+(x~Z$NDn?dKCWkm-Ea|1dK9RX7$K7jS@15v4N_0Y^YL=pJ zV;9+X@DTD2xE|=@YdW352rby8LRCx$c3=s=T$Ya3L!V5)o=TrU{)%j5Yj-n!J~eVw zy^p3lV|0RJk6Ql%|A$k|A6`%hb3g3s>`jH)T#n&2kiHpHVoo2q4rQS$mH+k7ytjXb zJe~%;H1MHm5MNz80MJ>^MnlmKkkzEy3*k1{fB&$z*?)38-0Dxtw!@ps$`!B&`#JuS zET+p`$ek{E0PW-$Z$CI1q#l@xIQv*CFo`)XPW%0>xH%ZjlE&2wE0>{9qhEB2-(1BR z=-M(Ybrvr_7&TT775wCO|4zjGEwJ~2es6gD;4Be9`qd)xWBI|O>GPN`WDvQNAi}-oR0FAtq+k1~9jILGe%Nz7Wt#I#WT1KYUrr-)${!moUJUft5{cglhmNCo zHtBEOJ~(~4KS9U0_LYXA@{rW2P&ILU_muKcG2-C3nA_BkM(M+uE0AatzYq?gNukhz zB-BL#GTryIvUp;!^$ahaD`DSL{r*ljjigNIFc$$=Lq<&^lf&pQz0s3|7aAanWU>aZ z+*wL_K;dBxmeY@C*)eyeG$n0uKBaheGo8ec(`S4Y>_a=RX9IM$F3Jrj==VI|d_E{X zCE;fZaI!ZU^iB=Y78ev_vib#QwX|M3$j2!m&0Y}m#-a)_b>l7`7c5{B%AEchp6>^rELbN43s`D13W20o%-No4o-}_B=wsIFq zHM8BSjg{pl>N(cYzm~(=P7WTh3e{f>|A`1*$12f{tL=g2p!9!w<4->uh?hvUXAJ`J z(R+sIrhITc;^UUu`S@T1?(^3NUm@`YQ^H-q;->Gca%chI;tJJOWa+)>DIi>%r3AUi zpuh_o6+ zgeDvOl?HH!bvt=hoI*+=xJD`oLI&BNf$Hcqg0FZocrSLFBiC8LSEZO$3_Vrkuot zTfi(0D1^P5d;&fe!40M~o-JUprQ%!Bl^pb~C(YgHu6ri=oT}Na0{<@JFA|d3RXTmn z8Aao75lACmT1#KVTDKlw=9R8Y!_PF#29>Eoa6r5Lv5s2S`kX7Piv2b^MSnDcS6JDp z;A>XG(Y)QUUU;D^x9}e5?DutX>cPtQ_-l1dJGiQ6Di)T+|_wSG+ zqEm*x&!mieDIq3n8pbz{@}ngado`mQzxV|XyqsarBW36v7GWeqiZZ+?v1Pun9b2)( za}j^Twj`jTHOV0Dd)m5Z?Yqm?IWh+}RO%ae6sk=PofT)=7XG;pKK^Vg6hG=FJ{T{$ zHXiYE%N%(5XanA0aAqsLXj^&i)_5oj?Atr1SUjMfgYqOqV|v0bl4ftc8+R zVK*Wq!2?aOTmD!ORy9n)7p-!O001Bk`EbxdVb8$R%RX0Qg$q=XPy zgvcBoF#)fv1%yKh{;pNLVQ>Nylu-!50kg0{3h4fG7QNLi^fR$h+s&gBSMu$XS8T+~ zbVq;}q(CN{g1W|Xk$kwV;jA8{fQ-!NDOH0LjF(!Ff@-|jAO(b?PA`5)fz1eb*&i7* zZPj^gnJUO&umPE@6;ePxtdpHRQeZ=E+XgAHBIJt{kU^CDW^9lGLPRG(d!)dI8fVJp zCf9}}w++Ch1t5bMTL2rRfDqG&)DkJMpwvz`1B43wMIBeG5N(hG!n(Q*F$JVR7yy|F zTJdD|dZv2`WH5ylQb0(GEs3$1BM+L90`f_X4QiJuz4R|{S`7M1OBf?kK-m)d6l=-N z$--@rg8bD>wix3!2{3hKq+M;*>?UyOJn_811}VTFRM0b`IP@t~Qwm}MVE(8xWA_e%d!v8$@(sA&I7DxfPq&&{w%`&dD zqo&giE8yZ&!E4w}1z>{}&|9pF-2@gv(iSUVY@p!Mb87+EUh!`jSh4VefN@uW%?K1L$leQ%8%O52tANZel_`ZPtgr&| zq)vABSb+^SE3CkZ5W)&nZsoNEP6h`Xtblx2CqR3wz=j%U%H}rLhNL%EKn4kd6%b+? zky>H}7L@F;0xLqk*cBPXC`22qfDq#v5tj~@5VGs_`KWGaZzU_ER#*XOYRQQ(% zUpfwKu!5uc9(#MN0Jo^-eFY@Uv{UJ%YAxO!=w6=3xS0zHq6b?G@{1!N9v<>HSO5UQH`fyD{}!T@As zmLGM0ZL_a{P}K5L3syizw&TSHDb`%~*jA$rLLv^>s9*xikS}5>{^@kPmA_YKawC zP_n}ctO)sH1!NGDD!>LSAjG&vgoCe53YT;(KB_PGEzbK22yd0K0`j04DdY(5V=)0`g#(xf(6pW?w;m6faw5a#%s&?E-i>zG~4mT{im) zFip-yFT8yPc#>9FL9UXjSOI>_j1}bMOBz>rqDDKx(S=+{HdsNx1#gY%?fB2k-oQKl^N0 z&eZrpSgr~Xff1{6aLMKg#QH7%tOGhF?qo1saXpv_Cj>TouBqqi4}Gl}$qzU5_7C`v zjeurP(S?AXgkD_;NX-Uk@E7V=LJE~44mFv4K=E?>6uiY~=q%7(vJJQ^R2aZ0L_kYJ zuQ&vx#^Mq63--&}ZS9hL&$T@gu)%-IrCv&cI7F2qDtxK|$Eul+&g4bBGrawr5tD4j;ZPb$B9Z_o$ zRm2OeTVScXIqXfR@iFMcM~36gwe<>Nu1H31808 zq06=wn5jwd<|$~Wm98Z@OAtA zC7i!t0o`P8RoEIDdkh1Gbdc>k?wg1t`z8Qo>+3DmS2~NqHt6OrSyVO?Pei0R(FcA_ zIKdW4a4>3b_j{x66kmAK?N27-NxOXgN%=;Uq@utQ?qUgi!-MVn84M3^kN6|Gl{}#E z-k+dLxyU_P=~W-?r~895PtNFpyk%fvVc~7Bs0`;)MeAvK5o%Y$<00FUIRy~;=`VD7%H^F(k!9k z2oT`dveJBZUtjL9^U#VG{Z*WElNQ`54xJtXZ>OR3)~odDtHkc(NTEpuait(&dSe!P zd#RbO!50^0rDs0V`a7@qT$zVzY80~I%(N%3zbTeZ+yB)E8ZjPF&=08X~s6VrF zh!S5hzWlMOU+~MDRm5mk66&L0ZRJ2LGudK!cQ|;eU$>rQRR`)ynM9#fAN)ZCT&rp& zxbdhz%{Xgk3AJwi+{>8%;>X=w;-O4?wl(8lpgRF88z$9WoL!Ncc42ZL31SGH_^`G~7JaB@Xz>COi;;E1EJ% z!Q%xa)WvNwvumW;eCDsgA}3XGw?A$KR?IwVweuukp9=Bg!nOxTx26t|*NindRT7q_ zv97d+PyTiTHvD4CPyb{SMTTN;gfuJQ?EcZOnB0_g%vj&A!A%A zfOR-@BG|wceSjS*1^+gJS6Z2NHduGH?xLT8EHtYVXu&N%0gA3`q@37+b_fVWd{n># zQ{=FckW?M3rB1AI^gGdJjTzeBTRRRY%RjdG(y!QBSk6tnH~%K#)-`RM(GoGFVk-(4 zg%hQV0Ct+66CmrOh!5F1NX82RVXG^llEBO=`0a?_5T-N)TA`2QgvXQdqzqWBiJ+F-ti^0avt3 zMHox0D7flzQJIg3K;WO-E~RY^&c=Hm==X-V56)UHl4bdp-;ZRFqB$shI^wUL8KFJGjqneYYM(465&F#)gbA}vX$c8i}#!3lRmnT2T0`r3lOQLZx2OnO@b zw3<2P0=7qBeKUNoZj^Mq?zw6ANM6LU@w%GI(Ds1$L~xC6usa6BVIJbltS5H!F63+< zzNQ@dVXR>!U;IWXBn(gkOU4CaKy#Xq z0Ilw(fYp&aSOd}SD*RGYwi@IE9yQ;Dg_y-Z(=dCkTdEpexZmIF8hJGUA?-qBmPkvGq|MQsE=Nr%sC#wyz8T-z> zn7nmzW*T29{dE#rM+tY~brAWsI;9=i*T75HE{XRq|26q95`HA*LlaQ)3pk~`%`5sn z$)*bTsIJN)fjh?~AZDU&b#xn9;g9o%;c?(2|kU>UqWXmF%wBFraG)F9suH4l~j0LmvJ%ZOqv zGN7{{MI^m&o-Hk=)|Az-ZtZ;3uBj6ezXCPgww0o0_QvN z9`Nt8Y1crq`-$*05wP)|CY%Z_j&h^Sw8=rMK5V6r29%7b&?1^`(GK|$cw>ArOouFL zSs62f08n%^RpY<`8bOmo9$J$5=;}1p)no8T{Kk58r2#X3E`o=>y((Mu!*E6(1IQb= zcqz8Lfrni%&}{vIPlw1G<>{q$8}`UKgp{N{0T(`Rmr{bDM2t8y^^yfPcP)Rg0r!VA z!V`mGA6@o{{y*^#4Gg=)X#6t+LlnCRTH`x`D!JJJEkI1OmrH52pz7#;F@lGJNG*kR z+ET}rUp$$z8bai&duJU*d_*jTCg3d9o zc#1M2pg56=0%Q|9)y%yM@$tpJbtXXzhW;=aom2V^db0w3v3#4Qlt+@lj$8=HS|q;} zR3vHk&tEnPE$9FcFV&pXIT6H5)4F0eyU9I+fV^~;xqBjk(q=uNP%~8p0D0+jGZ!rq zX4K-HRMo@<0eR^pL`*Oo$V-PHJ#YY!SEPwR-VcWWc`02Lco-DqrK1ta1ON^xO;MqR zkJbY6(s6TMtlu0Bv+a-$uTmTe2E{fue`tq}M`KM0)R$6KE3X#RmyS!hQDXGyrNhVl zn{s^S=k0^jr~4DMCA2T;FuAu~M*Z2-;|V%QaC5xVZ*vFJ=BJ|Sj_fE*U+6mF15toY zW~J(c;Rcyq4wl5M)bc|}Y1$O#rDFzuxCz@VEvNltiaAU3yQ>*A4>o6bdGVeCyVr zzOw=TD{_hn;{e69hH)@6{}q;vNdf>zjx|7wm| z$kg4fn~Z-|qiE@UFBK!L>^S9QE=F2iexE^y+16;9Y0HMxp44gn^%3VTe6cpkZ`Ej$ z6rQSAX~}%^rz2)Xs}}fbh^_F7@RwJ7MWc%Hm7_QOY12N-+Onhqt{6~%?gp0htYuq`&Egb^ z!iF#Cobjn9yty84DVvvDuCP@A>pJsPuTPF};7uF*KHHGp|CDb3#HTQ^n%0bsZ=J(g zLAa>Tx(L^h&ozbVtHMLlv7Bt+tNzx}!3WGWfv4iewGsS!!+zJbhVimokxMM$-zK(K zAkIw#MFis{(n}>`x+A(Q#0B{23Rsw>5EXe@cG*|}CrI8HB5TNu06y7-GpxPqRt77{ zrW}QtyeqjYE`*ruTiZ&k$a3qGv_O9X9*ZBTRG?c9j`d|*nyO_0C)Nfy`XK!Ep!wT;w@ghY>Wlj6tW9{_DTAbw4@3OvX znS5s*?&lR2ifC_%$*semRXBK1E+MWGU4l5yZsA=Ojw&&WPLT(F(^XkJ6%KYa&pIc+3LmPlhuuuJ{uC&Yi~SaM zTLzep&9`!Ciq-lkD_gG1EjO0{tq;yMk4LPp!UlFTQPB|%jS`dpe2&jG z&xvTUB!e?8#&rD0n>&J(<9@=mPn2 zpI=WVjKNfMo;{gNn%+H`Fk6*{hU&>=Jb3nGQVuStj3i?ke0wq}+lG|n)sqPWvUcs) zlL>@Hrz`?S=)OxBnBhpX__prdlO1iB3zRTDnZS_Bnaw@f(YCTD%NEKCop|(X;Mjdb@EJe=uWT^~`28;ijRlnxllkuVxT`tdqemxmK1U7lko=ip(y;tnplksF# zUK1__mfhd8u$M8wWI*~-PUu#P9~ZM_4cB=0Wc=tVwT0`+`0;WjuGXtM{o+-PJ(-+Y z@16{YT=$+V57}Nl8PA%^ZmjK?=0fkDtTT_EOr`+H#d0a?5D>&XPdqEi+D zBXmzD49sv;YfsjxttS&0QaQ7^C+l!M8LjC&8Zf@`z&jv)dor0ItMp{60eUh^krzf> zlA&qeqin&sCC%Rmao{S$NNKYms8K5WQ$*R02TnY@{lgSvEPX;WrVxIwh*U4mD z8xo+`IvGD+pq`8;Dmr2jHbSq1@#7L^OzU#qJsA$U?mbx^vb}mTo^_SG7owlrI@#(x zdNP>?3SQ4cPbP3Jxu&5%BRIik8~fK^v)5oOdok;q2kc~D@afQr{hfyW6@0SkV-iqcJ4hb!5{Oolg^l&-%S8{tgYj|Smy0B^ z#6I`D&+mml-n3L*Jo8r(CJfor%`R1vhH_lrDvRX#w0DjQW$f z5ig{3oMzX*HrBEqwr1zhU9gQK%9|n5q=257^tZaB-ZuJZypRTVuE)fIofZ47)9K*M zC|<%}6FtX#RxcCdbePU(7agjCwDZ(pbS683iW;S_2CsmF$WR98pvitW7ZW<+N_BbH zFGsMVRb_k9M<-|&R~G5Tpc@#>P z%0%ejcuF!5ce8pFFQZx-*VBvWa`lDQ%`_9W;c$GNArrCV@<~Vyd*b~0jR;=D@`Gb9 z`x^s?ocCQ$ato{#3^A9)>-NmiZbHa4KB5qlAAG$5x3kPuX16Uks-N&=SH!Z@GswPd zT@tS=l81*)$%}; z%W_m%uc-TBYh2MyTNhXyfp0h94o`-~z~L)E+10h|6dkOr<>cixg&g=Fk!PqzjwFN+ zUzyJ(fc$O*l6%Nas!iL^w!Y$cA?(3D@MRkZm!o(wL+^04aU-oZ4#RcvE3B%Ga>J`R z7!AG@!K+yw>_P_RiVGK9L7^EmwM?;?aOSiWqb?=2&eB>YO88>KCEHOJ#Q@(Ch%p8LBG1#XAPW zp=_PlS7?HnB$5N>eTTpcGZd`63}7(Y>Oa%XmnWKjMyGDu(-Yp;;ae^(Ew2wQHF1Vz z7+jJ={Fp7JH=&=#EdQBHQ_Hg~CaUTU7EjNispSbsl9g_?$SaP?>sN!W0bSc!9f1^> z6w;Qj)QiOBB@Ywl2o01RZCojAAUTu(^$R+M{Xrz#+Ud_Tm&C3ljs1ZQK#l!E3pe#% z%H-mgA~C{`H{m$Ti&az4+p;as9QI}$?Pv0aF;jJVmk8|V^W8IXyF(y#ki%k??evhcqt4BNzGsOn!Q7r z%@hvkBQto;1*$>Z(>}DE(nxorfodGq^71ixztS}A@D&%ZhUJV4B3wq%3@fXKJ-98! zni6jNH5a&sWm60r)nNpdngd+JK3=vaZMg7|tUOp~Gz;H!0c<$tgdpK3X$| zKy9K^gUqrggb81H7nOp0Fwg}XaKm=#X>am%UsYqwc$yY}ZE;)F!hpbJ%8uABd0c^i z;>L>v$bfAz8JDk}RBf>vwoArP@Rm2l4od(A!lIeDV7sOvYQc8Npr$(C5!)r?GktKC zY^kJ_!K=Kfn6Q9A9X6RWxM92GacqLZPDh6z%F%O4S{wmM(xR&cE*%^pu2{~rP?@l# z>4@!;d0?sgr!J0Ok-EG@WDo|pVY}Q+Wwovtn@DJ{F*C7#t4)vKhV7bpt_#~GBeNW; zUu|*2cF7o-Ez30Oj_n#ri^k%D?V1NUFW4@bmLs-H9?!MHC9}YprxiXqf*ZCgFfI0A z4)~%#(FNNTNJvsz=;cZ>qFa^&H*A*-O2UAU)cj>Xr?lpV?RL%swu{H)L%R-&bHjES z&bS~VVyf2j2nF`$?2Ew%+^}7SO)+dZAdU<*7i^b#ToNv51?+-M?SIXDJ^@|hV7DZ`9KmD+a+Ttcyq^g4TMWAvBSPy(-7ySAtsY@#CFN!nLfBwRmwLP z47N+=?1=4>$FT|eW4jzZm!!oJkR+|pjW8L{u@QFLw@U`aQuj|?9K9lSxnR45lyUjQ z4cjH;GtOnTt{9tY#CFMC*brX_woArQ^IR9UOGain42|uQF_hf7W4lJuqOrJOyXHa8 zZz^-!w<~bPB{Q$EFqcf|i0ukYi#?bFz9>+1!FB}_7k)}}!*)eBg{0;$`#GgGH*EK) z4Yq3ww6p@_Ja}@B`*s=5xFBN9jASLr1^B`9D6b=lz>g!g%djbi4F|-Lq2_|^GLKgb z7ao$W4EH)>yDW1;knodq5g^e zF2|@CARMqpj-FeatjBgMd$VNmCWJ$!TYwz)?Q-%F4`}g~cL=_;3ngyY?y(EzOa?L& z)~oc5vD=vpWE>2G%n#coNxfpDoMcEEN`L*Ry(t}%ysQI!m8 z1fJZ^WFX`-eQ>FYt7Hv38f6<3T-s?en{quX1C|}JT`~wZL4RzQqvw*eI0BN&R-Uvh zR3hxSEdsocZXEaR@=OX9u0!hL=$V`&5a({#E;mzItt-YRQv24hZ3rB31WljhZ zev&Q%ggZ}K$i{8oF2k>^_9OLeU~CtU%GZ4w69G4Dm*b5K529OI&!pyf;ka*?V^jeOqB3aPzmx0YT zmI&U_gdYf^Yyo_$v?8LCXi}3MrUF-%6#kNzM;?8M}MifENvu5n2%B9J05VK!u{DX47cS(h7J1mw znHx%XcLs1NA+`jNemJ4%*#ELme?^T?SKV9)F$L{maIZWWF1e_}byXS}%Q`AwJ2!?O z_#vNOieXu-VuHxgqnBc*R@%aJfyI`$1&QqC$|Z;n<`Z8AvK#8Nx2cEy-eiPsj_*#; zE$^fNDW4Vz&;7!y`CO`cDYyqOE(h{#s8jt}Z&f|S>C>lQ8)Ar#dWh_zb}nG3xY&f3 zD!AzHOb5g9NHfT%uj|69%z(Hf`rubsqT z63MCZ49-k?TZ8^+Hf^(ild1Ba-t3;;VK;rCJ=@(G_S>7|(QGmvcDMRFeRTWNX!Bfm zwH#U(iU{I>jOV&&#c&`+zefG(3|-8AHf~(IpjlYP*QLL254@h`3hOSDxuyByEFv>r zPMI=srH>{)msYyiJ(KlRCb|0EXSR~NW$ev%1L@(`tu3S7$_z|ht*vYA-1(BkPws*H zy(|_L0c15pX0_L{#3fcsA9skF^x14-A#vD#Qp_D?isx0}-D2gA!;rNFBe`0JUYQB& zp(4E*yKK`XI)q>KtsM~G>hNo-9rA}AVkjLJ*`S*jJU@eQ-_L|?!?4#XI<^>TtxzqL zgMT0{PStb@#N=@o6C}?%2V#!E#X#I|XU$BvV;hL65xYcCB7CVCR}bd_0x>z#O=if( zK#U)D3B+Vr$3Vs+XiA;F;Ojudi8zC+0r~3AP^TvV94y(SP_%QT>>$A z);SP!1fEw`>{SsTol77l!oP3=F?s)5X2`}sj30Ih#AH~>yftY#xd1b<0f%uqpAU@v}F;*XZbq_pP#$xPNHMmVs zI-vM4I|2_BOK2pwvmdXt+l)*iHR(G-P@)N&I?s2Az_nHXP zyw8(dX?~sr>B16a_ziwDLD@TGS1e+rPdIHXol-j}ONE)7E zjxEL3umX1V4N~8T;3bLxBJ3iUe2m#5m1sg;u-Ph(-cSjt+>pXQ-lyCdh+=YgRU4JL zonE@A+(|A*R3Lf#qT?Wa;sO+Mc99AsCleZi%M&{S-iFMvH~d&Av+ ze96ZC<-PI3#-8o*R(}Y;w+L5fB#$GyCp+y~e>5FWx})B9e@b4xc*i1n_^+B9f7wKj zFQOAe4s29Goi?f=u^TUA<0Ox06q_}U`uXGV{gux%%d?<7j2PqY(Hs@Qx|&F1+RYg9IG{koCKH*%X# ztZm*na=zEeOp(~UnIg7%Geu(aW{TM6%@nE4C$O|NJGX<3_A)rwIS?`b!QX0FjpEd7FgwSY-GL_B|fj|ApkMVYg&2SxT?RoJ1+UO1AB6+%+0D?Ptb z@&;f8q6TlEOqxuTl(Hg=Hw4!Q?h_R}>-_Mh((_M`Z72$v_3iE*d05Ico4f zlTwYO`y-S~qYCq0?U0!0k7${6Q7axv!JCyPO+#>81ixx&Dk`^;5G02oME-E90 z75o^SP9j|RTm;zCCb-k=_J+fJ%_47UG5^Uy+~5*vB$x6(9}$1CDm>8LN?v9A5Lxz}P6ua3$tyLF z6(}be^{l+Yv}OvmXJ+2J(3` z55)V(6W`0p6>k+_c9obuMs{T64)*;LYHF-pVzFrJB^mPd0catk$YLkx>z*xjDJWh{SJCJ7UQA$*9N= z%#2*@h$B?Y$wfva12HplQ9nbbn2#HwKz3B5O4g268p|(Q`9H6K%d=eBJ2mX5yX*$( zH`Dl=m&gCl_dBP2@A#6XM9+o_KY&0CloyAS8Q++^YNZ z5q-c`Mf3rdAx9PAEN-JBlJ6~5lz-e-Mft~Bj;g7MBsLork-M92F}1dIXwS#5Kh=cISrwnkx%>WVW3fz1)P|Nh zG--sJe4eB_mg8r+B=1}*J(Kgvomp(c(pWC3Dpy_(%UK!N9q+^mT&>zbz4GmQVP8=t ze)-<`OZQ%^66s!OUiKHO#Qrcl?;4fw7puhK0C`)RdR%&sv9XXRb(u|mBcCN9Qxlmp z!$kE|-jLbmH~Ih@LL`CGrjWl_C8C81ql#$JXR9LmKmZk8tP*WhM2kLK710L*sOVyq z*q@A0lF4hQi&diR6+n`L{i{UyIqA!U%qkm7RNQ_$&%o0IeqvdP4+T zqE~L#j%cQrukNX*CoKOdrbj-dpC0*?IKBV70h|snL+T!Z!!^^xKG5`UXFpz<0C5yg zDmnXa@S6)8|MId3ez5`7Zvr57Oe&)8DiYEiUK2xDU>;Ar>EwcvoJ+*cP}VgNtQ-UO zGEx0h175>Yu`>qTYGPZebV3o^%%I1Kh`eaz;S#(RdO>Ow}DtpyTXl0v(yC-roRj@yW{3c)FQI-BsDlMIuC8 z0Vw4HlN=4m&QcPYN^_El%*g=}WoorfnT{nuL{;4ZIV#Q$$Wf7r$khRl8XqT=bei;AZsvnsb(#nE+*RRR$rE{9kpQgV+~ zA|;u~++r13yTp~0LzH0%5K(zjagSB(Tx240bwI|k-HHWfVqUR|r{m**3`+rH6@~5G zV--h&Sd>G|Kdy! z5=2Z6v5KeTrxColJ}$YCxr6!ikZZIO>J&cAg}qLM$)@sa$j?1I$V$HZSL; z3}?3I?r#hoBDyfdyq%YwnM8amYp^sNPp9auj3aP8yt1sM?S606oxWwaH|cM6`;*Cd P($1Ge@BWF0Kl=Xx18;<( literal 0 HcmV?d00001 diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587848505.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587848505.Akashs-MacBook-Pro-2.local new file mode 100644 index 0000000000000000000000000000000000000000..68d601bc958fce1ea8687d1a8c5132d66b66c719 GIT binary patch literal 267629 zcmeIb3zTF>eIGoxdvv3< z+o-2|=vgN@_OA4;ILo~H{;U4)|NmEYtFE2H|NYz#{=m|nxw*Atula@N zjwY9#8h87{8{c%UH=M4vpZxF5%bxgFlzs5n?r+_>XY^;=+2@u%|Ha+#4!CA}H0W=f z@0{z7C#}(`NpF0vJME8#@!tCf-HqPa(O|PTUVb_3nw;%!_u?y855#*H|BCz<#f`=0 z@?O}rIX#d5+Bg&~e;@4cjCQ8mJJU|~$RxfRWKY2k#O&DJjcUV2PQymxu!HWY-eAIW z>%vc@l~Dx09l>2JyTjh}>CyP9&SYb>-D_?3hLc|Fsor?l8?^2j_NV>sp#P5ExHaw$ zH%D8Yonims)uR(~GZK zj~AQqJW^w0X|ai{WDWgeCEHw1N(xPQ zrv2%8TZx%$F$C}Mi3n~oadE2Oop41JDi=o+Qh#tptFGYImT#s--V(nG2(e9-m+dJG8a^LLfV8m>Z^~gr$u;+0qEz=KnSH{i8gP36f5r`d z7&AgGei%Z*?9n8|=Oeh@R)}10!G&?dC~0XWw(wr2QR+_wlmQ#a#~RSG zlhV}m^n5nf^;Q4D+*jqpsa;er3@(2xan^WYlpYe0 zd$U0Uf+xejF~e>|NP-7i3MmMy8m8dOR=Gt01dKRF)|gg{PLkHXVBP~VfZs8LHAqJ# zguo(1<`{tq1z|0qV966MfWKoEZy21w1Z5OLC?;tucPpfzJqx6O3^G%sfDCGh6mVsk zq8V|@w`3j&i^}&o&^-YuoRF3b#?qJ2(DG6s1ui8F8A3iy%O!ssq<{>hzaV1y(a9TL6qmfekgzl*5S9 z8z~_3HX&J7NC6?H@n=hAptiLhC~*aYLp%t%2_zJv{>O{LJY7k26)y~^N$ z6;hCMA$Xvrkb>}~LkfQ10x4*>1f;+?`IVL(R=~xlf>+xHl}8tpdkJi?g0=%zK;&ZF zG0|acAl!}u8?2z6DI(ufV8jZj5UQ)f3M)8V6)RwRYVFFuQ(sMHV*nMb`fEaqOHW~U z6_6q1{dK@y1!N#CF9lY>=t%iWf}eh*xQE@Y0vleq2CMR}0-F&qR=^3$P!WX#7Rdb4 z%wN1DV6&@$JgKt-`&|V#)V6GP6<87S#R|wEsuvEdumVCvCqVmM1vb<;Q#QA`HYB~V z0y2oHS+>Fo2r-RFEwKU%N_JR*6`{JZ0x}6Jtblyj)QGr%Y=pL5`_H!{hL{L8y9&to zm9YZypcyM5pVZi3ur9wS#C()tg%wb?gighX6_5wR%+(ksY<3kKt^q5+G-rkt;NdtI zb!lz$S%(7#BwH4&b`|6*sfrcg$IMtkPQHGuAQxyoSV7K(;DN?=JD-!!%W0jQhA&od zxH49NThzq3uYiP^?DdNx)?5Ir;IIQ$K;&Y^3K$!hEmm;25>`NkVEy6&)tMDmaHJ|$ z;F2K8AO;A4EmO2OgcVjm2G!ILEzdU)$Pl$-1!N#CF9lZMr6V~E!0JYcKUlqiV8aX7 zU{%HnY(}_{SOJ+|b*z9qsj~xntiXnv6;@zH$QLUhgLq*DgosXn_E>=pHO`dHSb+^m zZ>)d}Vr&9z-asJ4G$OUc3M?quVFgx%g4v^~pSIaoKxWJ}B9cW*ugG5NPGnmJ_&{&s z%?2wVyj8{u$b)9AfP7M8gWAz+31$0W>$zZzTdN2`A2yFIM>gPzl|G;1J+$Mzo)Cg$y6kQ1DNoYGlKx(M3Q@nXl zghg)&!`DIzl_Cx`nS4O;a@d2n7!CbN1i#yWyF-Nmj6wvoH1vu?Kx)Wt^*tkVM4q?c z;j7vg;3dgb-S(JfErBt-=f7O)fFxK(1fOldk!t26G((!@GKq|mV7977EBvJ=@4yU- zgfB+Wt|m>PYpN#AfZ^KYS@0VXyup$+{Xq(Eq($rzXvc@D6tSw)6e4^=B41djo5Cnv z_tU&q&Dm`_rrNVLgUA34U*1bTEub1vdg!vP1!igzyt#^05jUxrjDU++UKvTZ&l!?JFzviyd6#@RRT62 zYqSlz`GE|T&7@%3?^K2eepNWZ773<5Y;E!k=#lix)XG5#u*##IAB9`On^(Opzyh_eW z23i=`epno#n@{rFP_!UfdcG-lRe*9lseR?sX9Zc7QG+!wrEuX1c+g#?$lj;@4Rn!7 zchDVoHqQ1o(5)sr8yh`zG0FU~(S6(SSs~;p89gz&b1>>oTjUyz=G}~=uMe~`$sLNIo&TgE^9kw>D$Zw@VaPM4- z`SPZ$E+`u5A@Ft@I=-`&UfHpg*nJ!+G^rr2A}S~Xd)oOv)$46{Hv3zX>{w@X&B=-W z<}>N-CV(zA;7$fp8o+R8tFt}sP0&I1tv8``I%u7*TT@K>Q0^d;IvIlQQGaIV5GB6i zT^V~-{eoZKtRhBbHX{3m(?0t3Ru056lf8uB8T6m()vYI4)xmp`Qho4y5pdT@SqW}5 z>`gMxT3JG^o4;~>!bb^ZWdr}!kGq+~Lz(t$YsNp1V70PgQtid_clHN^#9RMf6fzkb zU2ypdfc|i^_Y5t%;Kv$pb7kI-_XfQUbX-0DYFP=gUX5duPc%q8G-gaT@XiMGDocg- zG@ZPkX-SJw^2cl|?#1i$|0jaYx<$Yg-4+P+frc6@scmWA@ zahpu-8fiA4`D?JqNmbnK4;z6MGmBd70twisLj0t#?ZMHlsRQISV+~G~gr$k=N}38v z^`kY80)Mjs>;5qWUw-LYvGV&Ce3fX;JN51K@7*`5l z9S)s{zH!olPK8$gT?DVSGVN@z&dSAG^dnB}Ksy8kB0ehMfhlrWNk~+@%C1(Y9D5x7 zF0_I#FZ7nS4Cf=QYlW^;rHqK~?7*eqr^_0G~Y^V7b03iDjTL;N_As}pZ zB~%iaSp~lt@f*UFhCnM6Qle)etFBU8vSwC|rV6Dc_1ZYZ-5 zt+LD^4hNg?*UDAqnQ?csk5)4$T)?)BqLTOvZ-ejEjgpSnIX~$f%8OWbB<1>;ba@j6 zy!S_Noo=vnVI>c7X4VtCc^7_BZe~>h?589CtsJb*LiA-GB)hjVv+JY}wMq^p6_D-X z3slSzsKyTawdGJ z38%vwa9LZ(`3rs35?qp+HKqubg(lBXO+EpS#gEjaN#tRFKZ5(bb%rW0oMPE6HEfO2 zTvqOLkyS(gAmZ;9cd}c4QWXF5nAYsKoA+eGvR&VB5lbh!B>LeUpS$QgBx<8c23O z5uPS$yWZb~Q=!FCZj_leIcU{~t@N>gk`WbJn9(Nfgdc^6NqquVIbjhC_>>X^C1S*xsh2FUxoi2u4Y)s~5uWG|dg!u8^#6%} zXkgeSM&q9u7^2uk&>G(fRLM>IXaNGfHj0`}V^8z*5j+$`YALMKk~%K4NkseFyJA;n z`i5_IvLm`@FO1CCsTNKg+<20@1Y2m8b4<&qauLWnb)H zlP>07W#>OqJ$Hhm6L3mr&Ukisg9?Bn(TQfVOo zxEVr_+{5!xtWdb;o**j}bXwBxxxN(&Ixco)f0y71c2>g5f}3It1x~1Ax3DO$0}MGz7>?>8ilPpdc?DjYuW{a7by23Z>4r zTnorc$IW@MeseSo>Pv@LDGmjLVjG)3v_r?Eu_grSOR1`rR|~pJ$EDn;f>HX?;S>JQ zk$B3dq{HN16B_oWPmjjvAi<5%cCW>qbDW=wT9|hgh~Yy~fK6tl>V)A2nOzQ+Xb&N+ zGUvb5gsr+XAkFWpCbV8V2+&jL6+twDWH1N>Ly6cfZ}XOdT_H~hz;{;i6R@|QE# z*zKF@9baE;z+-h8Hoan)iv|d;Q?_qK{%4Khf(^Y3ETCGOuLh^HfPHDOZv4*P&5eZG*lS&m~Q1e3#a4FFkAT)(S<1dzcpbz zYlU;*Tx|tN>M%1@H!VhnREwK%Dt>FNT0}fr+Zp^sg;L&(z+u!YhvV)-8^hwg%n!-T zqe$UKirb6ScSc0;@dmur)(dP!BL;-Z8NzlTR_t%DIlh&nH~ewaKFiv&qynxOP=DqI zmUOLUTaC@)6p6xyFF-1=4(#!P?)jd4%)Ibg)059kt(@_YW?;?CB8c=6_5%`%A(g6f#z*goo|IFvljNN2062${qc+#(WtuvczmTAfz+t>q0G2_J!n;s-qrVGO3y^X$oF()8}hgxRVrG*nL}qE?Hn{Q7h6J(X1tR0{yvlMw@#CkY-Ry`RnI??5F zY|^hMG7S{G zR$nj!-*qy9YtaRQtdj|2FqNKXPbQP5cTXnFR%M}~5|WGu&z?-m!7MV8j2HWiSo=hMtI%N?sLic3Czzj#V_GInadNP3_l{1@rvi1w2CzA=XN>8>D zpeM5wdAT>8E9|;@n+Cq?WW4A^mkZL9@k8LM&$B0!kqpq2@nltA6D|do8yU}@OvV6{ z0d0%Q!XiU2sPPlT{ClsH@kCW>3%5?jk4M^=dR0gFh=lIRC{5@7I-OvV6{|E!&sx_S6SS?1Zh zBa?A$NPk|dWc+x6Ix?Q9=!nL!K{Xb-W8>#Vn1vs&a4n8x*I*WKmHn7qxc2JCc(zoo zPUEb7G55z2oM5w!{p+vU+bx#8nDxyAcCs({bm+wXcEkP(KG}5gf(l!MX!v;sdeG-p zFQrVA`@G0XUPJn&ru`vuma-^EGA029wu9tRFM((^S=d;YzIgNyJQyGMeep;VOYGhC z1AZ_3@usB;BODl5OLUlx;?7XTJpqr!k9aGNvlphX()D@pNzcqvE}N-H6j4XL+|@)^6v}ob!Vfp#VV0Y!xtujMMPl>IKaZ}b&?$`_6@PD=(wguKe(&#bHlR`$m(hG)-{nTjxn4+m;0#_+SJ#lq1Zk-n6@d4jp~EH*WQZ{b|2D=$z{gcF>)tw~vOCsnv}L z%WtIF`Ute+L#1rZhgt4AlT-cfL~C>lggI|r{71}AsaI!n@cTWv^C);*2MMqdo z&UVMW&Cam9)r%YPocH7}hdpWG-0cs03K)%nu8nQ1&ZU8!>oIX)XT^SWGU=Zg z#tZmsqUV^;>Sbb_4wtWj{YaU8bZSShlTQhOe5!l5kZg9)iCIW{PxXgqxVH;@g#^D8 z!ID;$t#J>Xpjlj5q!&XG4t-5eAq#!Ih>zsf3*&n>k%Cs?Jh*@cJ7jYTIDW;81F7MIzTm7)VK?oxt+V@pRV6szAU|Zne})+2jFro??8@j zcDGvh;Ox_X2g*d~-)JJuK&I0AR+@=ga4^2Xkcrq?@E5a3@ggeKak)N^?q;8ZZ$s(Q-V)B5;3*8O9Q^yfIC>`Dzn=X9Mw;FvMXZQ z=^12SwysoM%53K@H-(=Y%oe^A!8*%Mb!iLnz)#vnTcdD+yQJXvBaqy}%}T+b)Xqv- ztt4o;&&v=8_*87t>mNmM8_Q8;y`t_%S>qZ84;v8D>!$fKR~E70I}NzglVLG%_zF;V zbuBwZ2P<>)@|r>p{8{7~s*xiJ;lo$vMZw|Whz9$H-;O|X54lOTX~WslR~$FfBH|$2 z5WmK%h{*cbmy+uJiuL`ddY{kGzN~!U!-nR_MEFVsuV;C%3mKFvb-2(BDhn0PLMR1_ z%Smaou$oB(zT9xhc9caiK=_C&32Q-$z+e!)vJP?80emBZH4h^a!-0>4i%{UojdqRZ z3r>v(wIy8*!ByWn6*@Dfju-F;PVEN67Z(U5y@B(jjs9&P>;BNG$zYfiLxDrt%TRM^ zGMLB9)u=Y(J1anr>S+uVzUkCnu&fF7fuEdf4hd*qjAVJ04TqAF4+R1(8qvz|(@uBN znWB3IMq|=wFhQcJ2~>^c*VCe61zPbzo1%iwiSJ38Sz*ywT3kc8MEVb&k7PG3_;Q>r z01!UCre8)i(X8kAIH3Y!f3)e)CQ`|%D!utT`-6dOo!D1sf|(?e1Ll2)zzZ|91^3d- z6#)I=X78EK^nAKtaj2%wLR29sZ&1zr4bpTT1(GUCzAx3g7fEpVx?{sk!=A7PEWvU;v?4k=TIY3)KJabFHkn~m3=mEY z8$-{nO=bv`ttyBw$jb*!ijh<@X`ATOAhYZVVZvA5L4>0b1Gr(k^t3nmx^KwFm<$!H zF?Pdt$ut!BCvLn*fOM&Z$pDhQ+jV2RWM~C%c~k7L1YjU6nu!axYZ@XzLrjKPo)xxB z2F&!qRkD&)=Z`EuEWGP+^A9LoWc7Dvw|X>kN3NlUle(Mu)rOlFe08@9_c z>7TkddPVB;l81?NH*A-isjSu&V^fgO{tB|4>8kY@?S)P^?=Y*5te@3^?V5S!uw8B) zNwJvdvW8n9|Cz@Db;}+wkvSOB{R;z zOl)0{%;$*h3QUXX%mH5%D7s*~0*Q-1rMY3dBAY@|^OwB|l?~&7j@WLyE^HSQzJS;+ z!x~+qX-GR`BMI?HUM|T4INNyQU!mV7p|9nPIzRz}m1~GJx^@QoI4U zN)rxl*e;n7HbLPnc84I!(Q`>!905sX8yeds<0-PC0r~(pY?lm*r7l#szEUqzmkYK_ zrehrws*+YD=!w(10-29p=5xe$$sokhgmmVBFA5Z0uw8+~ z#h=pLuw9W&A*uPxenx4{ZQt%;8*G>JyU_3ro`D7g2Dg2?3}@;fqGLv~IOGCP<@^~j zBS&nPVN--|91ur_nhUneJYF$ecu2M~-0Qe+mt{@}5`K~{0)&SYG?wg$?K1qz>RnRb zddyi3+4A6pjyE@Km*ee)5gy$3?Q)EY0m7+aW1v~LHkl!CF$W2^eY?E8g)reO@7e=} z8@7AoB0Q6U%*oI}r=igSLnPF*dcADK{@5-V-~wQ~WM~C%?%1w@aH%D(OD}~Nxd0Y~ zi*mzj1fJZ^WFP}(`ruNPq(JrJs8pg}sC~N}%K?%WN6#f`aRkc#F4;d2$jIciZ9Uj7 z&!m6q;^>*2BaFNpv0ZMavRYS+O{Dhqa<>b+Z`aJTH?~WL@JeEC*e)5G0BWT@1n$_b zk+f(mF4(SlkO0e+WDti`=C*H_447+$OJ?DdPbdVK%-Rv#C65zR^v8AuiZ0l$K;q(0 zX>QoA$fl6g9I#!1VwkjLN{){Ec8}O#yDsnzCai&M?cp0YY?t9oA4EcKzIED|hiB@D z?J{hNRj>o%$WU{^cA3W|;lgX*E+(zm$e;oLBz3nVw#zan1PMP$>&{asf`_{kI%2yF zzp{Fl)VJ>Q3KAULuw9NfAv`!9njkABJY*x*I*$8xIYz|*;ea)A^xWEHJ+>?FW~(`P z5T|j&b~*V3*tg5kt36Pl7aC4hIZ=$e|4n=7E{PUv^z7W5;7HsyoSiG7&x5u2a-VcV zVYDnconH@vMDQ0C&)tyBrwqA?p1Ul8$RfV_G0amITD%1b?{2~m1W~rotyDV6L?zLr zCOcFV5Ecqh9_{b1iFY;No*w^>8k~muI*5>FV;4d?k-Z3O=`2pkbWgEnyLpicyA+2 zPCyPQ#G*j@2q`s#e9~*pPO!1Zbkr|rMYPH-H^QVA^1WeHvNc9p1*wbY2+*}=m9DFk z^T@>>%bkFctg7)9NvZY{z?}%b>yv8>r3y7q02wB=*qQ|XC|t-_+VZfYGdGm(?h4>i zLTm{j{cu9jvHw+{{)!r(uDY>yK40CdgHL*|gnQ-5aQOm?DnI1YOED~qRZI{$dh}B4 z)Jp547g=n1XOPHVu3UoXU^elkFT0^Wdz*UD>yC%$F8IzE-5^g2kn;Of@Z8V6o`)tB z=Wjt5US1C5*-)o?)9#9Th|{M}Ti$}MAEHf$;6k2ax{~b$>22(=0k2eW(c7N%2csbw zq%5kZKX*ghPE?=R1;-Sl>}+o$eRSBr@*OUFuXXF__V(a>D&u$${Wf{E-NQ-sH41ji zW>Ot1W*_gYBunf(aVecy3S$0gbWwPk(p8^fZ%R*IRi;z6@I-5z+P}6FgGnT($}>1K z?r!#b!|9~O{!OOJd%M$ncLtsGf!1_qd(dlbjE2+kXwcd0ZTHXxPQ#7!ot1KEohu@U z!>U5Ai{%*hr8w5GH<_Y~+0Vs|>*q9}Om=BHd|(&6ndJ)WE|a;X`Qj`hGhR-aGI6Dk zCO(%|y4XFF^;9Og^1?G)$=x#cX1k8`aCCFi%Wj>oNt9Wwy3AgK&+LNxy(|_L0c15p zX0_L|#3fcsA9skF^x14-A#vD#Qp_D?iWgPj-DKsC!;rNFBe@0$y)K(Mi9RX@Uwn1i zulUvuh;McHHPsIJ!wxZ&4vTEi%?nKsSu@jVU#LJ#^dYru{bIlx_y=Ngq?^o;je!_H>=KB{u#SP4 zdHh9X!d`*6-I_@t#&ydjaRi3UevK6|dE6xslV_a+F-PDN#NWcF(e#N=_8Kun%>4#XUR7nK!z1>z&Nfmmn6EIt53(ch zK(T~IatHhIO1sU-BvO;UGX#|vm1XzK60yOJITZx}kNtNZS1-gPczzd1E@XEwbTy28 z11n#RnWq$z0GqWu90fCh+~h@N;BR9UV81ZJ8F*zx!*>Un%$U^?JKHYGzk@LcKf4E# zGV7UbZ|w}?CR%B?TENxs7C7;wVhrQ*Hz#1;f0C17sFKJ}YN+I1XS#T=i7?IkJjs>j z=Sh$*EK!Ev;x|J?F!&q^Xc}ye1iCJ^042V1BzQuwITC~>;2cS*9c+%I;VI_0tlBX~ zm(V=p9DFN+S11CAu!~&sF=mTYq6u}1*VH0`tcK^v9<<3A9lRGQgsLbv3(naNV zaw#hJ(N3{*-SMPVeMeUCh$rqcNG?TX7sceTViBIa<#;J7kG6ZQQX!VUr0_l3sEi`j z0*=p#A};5_O1Yaf3wIY$WPWxLULt2XoO)fOq03m&$NIz@&Sw#~)qHyneA;Sxmo#+&_QW`rgIe@!a~Z ztm#>G3*s+uX{4%AGMc#D}L?ibAzNx@;Jh`T#8 zMPiBU+cvv3XW{IFzYZ^Ft!i?%yWQ(_hnti6`N10**yPZjmqY&UpF%OMsD+owaptxMjH#$RAQz`Y~D-} z+q{_~v3WB^Z1ZM{)aDad*qol~HUjnE&8!z%`knmERzW7w9cGc~@++pxD|ic7mAtdbFzUbdD?e;Qzey(oWU3syZ3=2w zmAHw>Wm~IG=QsT1%&GOb244~wJ4_ifRUUa&USjRAmNk!;Zsp9WHHTlj{m%k}N30nn zrIo0=BZc2NaFbK5>~d=ovBDn+%)O2>XI~GB?7dDhP{Faw_BfJu6+Ejoh~M24HFy(c z(qy8floeULNih?RqiBWVVg|U%E@|*)%A8SywEh;^yIDI~7Y&l36mqMo!CNSkMh)hz zgUI47g+;muA5&FuQ_TuW8oZSl}K95q{N`UlyUJxGM0jCgL%LgBfyq6#+_!T zI~e3^7I{mH`A_!a2A4=9xt#y`kob!g;fc2J^I_rVBf`(G6Mnu)`1xkx=UarIZ-w_= z3G@A7f10y%m|J4uQJB&qR(@Jk@;cjx$g=Nb(myjyUaNVmKsm`EHyS_9Q#XD-FDMB8 zLniv@^HoP6`*1M7<`GD`AagSFfCDo5ymOy_{U}Rsm*L ziRoixM@H^o-!Gx2#>yoYi?&{pA>SN;7BY%_W**dtSut}v<~}!uS~0uZE74=UkD1h) ze25ftaUsJnMFf7A!)zEbUh_c@<7GoeIu7$;$rST(BgC*qBy>-8387gqgo-&i=qzs{ z@M>)CBrdFxAc{m?%*Txo! znURb788XFu+z17-qaszZcC^%3eAz3$@auPf=K985`rpt0U(fH`^DCY4vDf^X-wmV>h24{_!7xtJlY)?&d~!GEHZY9#eR9d}flG%j6ImKl^4Soe28P zIQ>obP5PUqtEv2x#-Zlw0?fA?%{_1n?8=q~lgojKZdbNSxrEN!&J-Fi;H?}>mjV6@ zb_H06U2l5(PIpjpn_b9lu7#!Vp#!d71#$a|!fW>Jy=*tc??W1W66WvhqXsaB`OW^; zWC{A{*u-Z#sjIOnMpjKGhd5y@>6O4fk+~?x-5X%{UFbMUbV8zPmZEQC7umD_0P+sF z5$NJ;I-ULyE!d<&RZIuAVFABfmX6j%pG?1=N}oXfifm+aXCr++HF8wF4=39rbb@1- zTK^pXhf~ZSUQ!5iFYIaWPKDWAjNuKCz8O#>9PmV zPL9#mgTsF6fvJeIaF7a2Vvh5ZUT-sQ_J`A?aqZmF73kCG7wzIV*Kh{9x(EyH`78HF zjimzxKe^q%2QmLf*!@7SJGi5NjtC(AY7zOd{NST;ecX9xB14eS2K~wO`qa%{bA5CuN~JM?e0Ph>Xebs|8|^kUT5r(Xn8ugE9<=FUvp?;RhSx%> zfz(CPuURTlx!V1(>v+mE=a%g92o_?`diN zM1S)c-ms*D*&07AvpLk7aHO4V zBn~_1p6U%I{IEK1g{#*rxsBkrg{v!gY!)Lnd0)bwl#entN>-g6oUQP^-(+Vicac;x z+t%7xT5O`8V-5Xtxl1+&4_JjZH9I96@aYKN#46E^tF6A~p!9!s^G`k-h?hvU!8jBj z-Diky$_Kt9KHgY6A0KYOeg693Dwc5yZEcr43 zF742R@XI`kVAw-C@8bVYi zdxT$Z0C!lolV`;#q!fZ{q>>L^JxT7@TFb9EcHU*lRpXkSCh+eg{vsin zU8U1EIivGTP51b%@ouQ&<)<2EgUVDPIH2ABSVt`vM(8XexqE3OA@Otgcm%JpvQxp= ztb`MQNew&jA1!hQ0ao$XMAa+n-m5BwoPsTsepo~{y-dfloy<1H7SEnaFXBlV`aY8~ z@}-2BtZ5kEILeQfOzhQM@OQPa(t+1Cta*fYTZEAeDa!Do#FqJDg0|9z=OX@wZAm~w zYm!0Q_q27-+IN?Ia%2u{c+)rVC{&voIxEhY7M@EJikH767pgt?;U}J5_F+0rJ7IXy zwee`0Z}gfbs~a^S@$m+{#o)|VdeOG>+^z9mHqA4oF4wc2?QHK&oU!RGQ;Gw?vyCuz zKpXl{R%OIatW=6zjYuu>lbXUg;q!q6NE`d`rW{6;TyyUedo=AmewpJ_4Y)mkKjQ{J zj6I?jKMbK@_GpM3@O%WuZRsQha=itYk_@Ayb*_jCc$r428M%@yroieVT0g-C^05Y( zE`KbdgV1YO3nd9*gOWxdP&|_NBnEw@Y2O5r6p|Af5o^f>;?Ts)CnLDU947RwKWFTk z4rp0}5jfyBNXF^B;6IrAs{EsB7uC&L;-2oaErWJsBSpNhnw}H020v@Y;BtQQJ*n0h zT>e<%tntDqJtQD^I)et-{dfpqZ2q0*ert!y_i(m9>C^OJ9`W?^wkf1}88<8HEsv zNd`p<=>BsSy{*2`?}ZeQIk1hJKT<%bwlthENI`j#+!+KQBeVRdyK5VyfKb%(QVUW* zMz-U{1}PvEb-ef?1vVq#Wq)Sjo%okq^G}9M#YCHbUS_6p%LXYR!{}sZj}+KY+d_-~ z#?H)&5bDenJw6e=8-hRvw?YcYhjjw9M+$7Hai$zbl-@`InRgJRfDqG&)DkJMpwvn? z0|c{2gG&aoOBFWQ#FwlfZ>S3i2~xY>)!{QO-prUgy&OV#;No z69E9eqR_RFHb_CP5&^(7Ho^EYGg6R~FKJxii5hOSUg$1wE2JRjLhwLKAr=1TK1 z=Qa3!3#5QtQXc2;W*OJnQPXLM6>#yX;PnDv1@so{VmEj zJq1RrfC|BSTLb0BYF7b$51Vyq9ag}^bZtM6PHoL!wY+LQUZigic!3IJvQ{hcxyP{DF$4f0(K{Z}%umVC+rx&iliiH;hjJpbKM!;ABW6Zi8{egECkoj@I zg4M19@}y37_E>=pH7l&ZiV(sI=vHl)E`|)ELL98H0zyP5Kzppfh8kze<~G-cq&HST z1~E1PHoFQ4F^xzqu>uQ9c36QGp9VDexfrVZ?Y4OWmJLBy7s999r` zvj85BcMeTAYPG8X)8t$<5LS@Sw&d{jVglP_sJc;}x>PwtvJrQ7T( z$hi1@t^J)}?h=K@AW9nFAZy zwK#-Te@&=r>W7-J0x~kokKeumLQ%_0Em#2=*^U<*tbkC|@xnD&m9YYw5iTTFK<1aq zlx{+>!V1WfI@#G{1vb>IumUSWzP%Y4#0x7RM05hQ#|mtyai(m>3T#MvV+CZ8AXot* zrV*(nR$xKN4lA%C6wDq?os`YK0y1N+5vhb^O1c&oM2<{kxzx8f?<*iAD`N%ZK{HlB zKB=)mZHM9|DS3}#vI-llfU+fYDn_h;JXjQsUL2ZuvB3)Rqj=devreo4564$6x~9u! zUje4cxv0eJT)M)1wk4md7c0nBQWYz}kD0N8oP0^+T2J(lp4<<0O1HrZaxMf9G-3t# zG2Nbk!Z+P{w%S*4q%u~3TU0YvK*9{h3Mek(0Srh02P>@LhyzwYfA-TuA_H<&fC%nfy)gh+=#;pV!F0v- zU?Q9l*zEa|Lti;X@>30hxI_Ug{HI1h1EJ_bP@78vQe*o%e@Uu#_9?4rSik;mC6{S}qwyQ}~=$fiYbCz-D zHzIh0C2RVF6eL?7qWla+-;2k&f=u1kM(G}5&1==1-KNZNp6xryQe+Sy3?V9L*pNho z^w4En3(V9ccykr0B!2dwa4`eF!iJ)vkMmSZdd}aWD)ucnZWX}=o3sWziQt>|`Aax| z!2-I;-m0)Qw7|zOL^22jW$PO))mJ);!8YjTFIiMJlS1Eq&`1QoDx6@8cAoTyt*u^n z*qPu9PddHvcr}zkSi)T_fp2&yShL)75y876{zz^m58Vm6HUnMCMefN; zuli^`-Rqw@JEaHmmd`qvn|tSNe21xOP~IyXZ0Ix*{_cnaZPhT&{2Cr3>AfJE--Bmi7 zq6=d-I+Km=pgZntob7Eq)tT&UpbLZ0jX>GWU7r;~o|4fMqdNzq?zBa&(MV28u0WLp z5vhPQ^J1k^1?Ed#Hr=qqE!ru$xtH8);AmBR*i#Neg=UKoN7P(Xz;W41^VxlUxx>yv zD_YM{yt{@V=(!Y!26PC#orcm|uhOfp61$Hhg(elml_G-ajahLT2v9LyT$GjWucXL| z$|=#M2HeSDN&^_~Y<198Z4-1eV=KEIv30&~P06wuS^!{Dh|;Y9^=EbtQQ|Acmp@kZ z3x0XCiWtpGLVfh>tsIDDCR;4;4Ej&?E@nObUIc5ESsM*|lZ>-gmQd^F&%KQKuYTOk zBp%AV%(iCyb95)5yY}Mjiqyng|6UX_85b^i1$Xai8dFt~^T!%+b7kK1t0D6%A}uRH z)~mD7oA^Y7#6x3+oK#YIX9Iebr9yj}PTtS7q_r0LW40Cd;&uA}6TxQPB47&g+6pD- zj_F@E>Jvb~eyTRX(W0wLpZ1p#Y}92nN24v^Q3H8@ogmZq_;v_^>kW&_s!V+g+d(zRmc_bvD;v2a;yA)u49tM~J> z@JSYq@WPY6BdoaD2pQu_0j$HJ6Tt?q=mYFfDfo90yw=LJv%xwmbr<~%w4_;`Knrg9 zsaAAdBjv;nv_n83;-dl{m?DRjgrw?NEp=jzqu+(re5Yu8??sIR{<5uw<=n)(^KTMv zUDL)HEfGU1Hl?0O|Bx;M*lGR+0LXsC)$T836JxYc{Jmhw81+w5n43hOqh`V>x;#I2X@F~+Y68?%%% z6mUhmRD`j_ih`>i7tL|*U)V0CZT8Q_yC3Lv2Y2+(SuT=g`IdiU#~?*>Q21QLU!$cY z_`+C&`I2pD&TyodfY)}BmZVd=#ZRQ*gu9{4LbPW4+JwJWt}@SzyPJKqnmOSDwxqV> zD}42ymAAq7>PAV&>zto-4&_CxB9^hEDdQl#KZ5IYgWcI54Dt|XW<9Z+ci|WI0qmzE z{;eFW&O-EM9wZwtGPCQX54B1TWhE}DsL>Ms`=ua|`|N}$T+Km@`bTU{H9m#_=5bdnMFE(YXLGH7~BH0)Xr{mEe6-sZ2E;!pbA1~g6zE1no^D-pQeY{*c zPXv{YsMo>=d$7>i7@+mf_%V1SerPb-Sd}c0`?905WcW}MPKP((vbK=(7y7CtxFoaE z*cqy^C*ZO8k(xBtSyAfj??-T-x6V-Ig;OlMrN*Ng`Ueqzx42X0=O;z+KaXiW-+)dy zS*?@J*mvc{PB5Pyx9_rbym%oEzP2LCt;X?DkYXW}OMzbc4JF*8+D@!bfHhjWcQTA&m` z@u7e^O_}FjX;Z=yxLP!G|3i&BxNjHS*CE@$GWcN7;7HFzm`$FjLB4IHn*+)xAkH)AFfA-$g6%Pf=9f)DqHl!a7G>j z$Q!tLDYm?Uhh0a|Z2f`Hg~%J_>7{iW_Q)B8l%zfZ7xuz6Bud{5f)X*}%+yO3*xa@J z;Rf6v(g;uV2R(GzBl`cuKQu7x5~K0Y3=C21B4~~81ghkweY5~E$zCp{`xg|k0)9S% zhk{5gg>_m|$CY0uk-m?coejzD-%}r*(n!zF4ns)Z8=H=d*}!KxfqeY8^)Hx?uK zSP0BHHM&fI$J28W;$7Jn``4t4xmVfwk5qpwoRu=39p0b<;7GiP)2&A;ueRQb{pk>@ zM{l`HRByM%JJ87(L2?hz$3?S3L8m3{p6gqopyOg!_IC-MAX)@-SePJjq6zN`fjH4A zp>Ob86@lZ4>g5f}3It1x~1Ax3DO$0}MGz7>?>8ilPpdc?DjYuW{a7by2 z3N6g57Lb>YoAY9Q`dFB4hje(A;!rRswz2s`J9Io6YeJyDl&V^JwV=CnT*{3S2ShI& zKH=Y#<1;_+=$}5_8>20uJxPbjz3npWO`jf((LsV6qwQXcJLfn*6}2$$DiFhmq5zxB zO4SL&4KlkNERh{TssX-~{J`I8!d6`xkmh$)6I!nw1n4RBit%eu-_rp96*zjx|7wm|$h5$-ZZiILjiROZ zy;O{}vg4GKnHXtx`F*}oU$#P~nYL_5?Ma>HUmx-G!WUahr0EF$q(+mZ@KkN7B~w~5 z+x%&{0ZQ3J!`DM>g;#{Xyy7c5<=SjH1-5F_YZ#d8f7Tc-7=))bDPZd!)s>8;)uHtU zKec^-s{#Jo5cT%WVnMZuA0tEl`|}m75S*xT%og^X|F5XNdX4TtuV0}wC0QpiK>L`_ z*5w>x%bo*hkJnbXrd!?LS{wB|FTfu)VY^)~aqK4ETu(rMww|!>Csd z$K8cChQ)iCACj3zk;07>w->3=w+KGofVbLufvsr7fKWL@*bc;s{p~f!w{rA`KW^G* zSzDG=z!d}P&)mS0uC;8dv00oVQP}VWNCnmj3qIR~x7Fh*!b2toE3zN`mBp^4f$MCn7%4JByG#d2EOWV4IO;2 z7rlBqoNSLKz0Rq)aeV}zYS{0()-Ya{D{_g2{Ek1{D-dU<0bUT@R1n77UrVwpu1A8O ztbm1C3Q>`lWtWWwaDwFF5LrWJ1n`+AoMG)9*_5L&lXoR|#f1=)eQR5Z6$evw@!eUefK*sg7QMD8mEK0x`Br!` ze!M<)qCH#cw=dSRw=e!-h4+(q#?cX3D03WXr8m~%B$s}d^;L`HyTmr!&nhewO0Wbe z*Wgbp96TtO5LXE>1svahTEFQdAEc~?g4fFZo4~vLLH8b%(PmA(@593q$3)G@Hgw(iG3Zdn+7OViuhu z5BjF7vUVyQ>}sBMPJRVGQeh9fnQZ+jP$C!mE$p@oFddt1QCS#i_zYO)IX#Ni`Y0<~ zuFEYqmjESKU{%SuWhPLIR;%jh-aVgVbIrmdu4HJ;*5Ic+a~}@5?u}UvzB%{|%S7Tc3?ZA7+ER_KKLTiRmGS%VDQC-LtvBl?8#&#(R;nINn5WGexBGE0${*)KLR@_(~hI`dv9<3%UBT#%lO9|Bi(o;{h2 zWPqNGC#&+Ba4E3dpLq6UG6rUo0n4n|XF%U|G8xx~1nAY1@#B#;rC!zPHxx4aL_+su za%R1IG8}T0YrmdMAS^m%5immc zWWvA1-66aQ7?gL zHAxL-xl3OzdI%njkNdt{B#9;Vx#t6ZFZ}VQr3xe5rukZ;!)z3HhAQp}cr1RzTXCGd z*!EnX^vpcvN`>xBv_3A-*>(9V(Pd=VU5No!z?$WzA%ge&y>=jJTA5z!8aDflPEC5_ zbKPlwG_=0~MkW@{R8)RegqTXrRTI4=FWrLp!wq!R@`zVXV+o2nEbJ|GN@GXG-`l3NCj5fm`+J-XsMN(} zG@sXZxzTd2SCUz%EbzV@DI=OTDhoRB!3b_BN0?K+X?F!3I{I{P-0Bbe(|&i*IoBQR zpgT`*9}Oo{$sGu%PoKVR`HeI`AAxp!sFa`iz{*`zZ$_Sv{R1ZWOv-} zo*MK{w!NoxIqXRb;BJ4|8^?`!E}i2fyZ*JYn*FdjJ&*2!Z5&cx7=dyDdScw$>JQIkCs)z3z*FDwOA#z-RoNQ%&~`*we_G`(gc$EGH99~wSJb!*nYo?26 zdYScjJ|p0AE$={%Zg#g?_u%Z)e+SA$=-+5UG7xvOdK53B5*nA$^XPK*x#e4FCThXK z_y$8JV#no^ka{Bu&Y$0k;0-K4IQFu?F>t8%KEu#yU>3d5gX>bByOQKNSf#>+Z#Li# zmbuF8wggA@6Q1mfSax~_*_W+L;gA-pQ07Q?oxpb@SZCR(E^Q$m_({9yHr#nK7Nox) zf#eo$Rtg5Cc2>%2y~u*bA4PB*%TZ;$qV9*Sak->zVXHF_8{cWbot_Mffx}mTva4&^ zDLPnL5dV-D=N3v80ucOJ_S!aeX6 z>-!g@cs>K|aJ6wWtu_wA4e@KNs*Q5Pt2r1Az7oOfSsv^{2IYzi7tXUS)d-i98ew4- zi-U_q8h#|j3w*iZlIVMJm$@R4u;k!YBQ zFE}+G)RuHLgtdv+5HH{loZ1bBFD?*Rvm$9VoGsZHMBopdnhb_XF%&qIy$m&%CWCpr zT#YK442?Z&K{!`Uz&D-R3zjvZKJb%s%^?Bpi;*m^vf)rt@}WSWMI%}ne%k3yI#YB| z^=K?@G@4nh&{$erL&!w>Z~67KURZ%ve9)#|pfh3diKb?QOaq>eWH&ANa-1yy5I(Oa zHPOCxKsC|aGOmY0@u!3!;YXVeZ6cMSs?wXkvp*Qf)`@+ECYVVgIbhy*2)r;uk?t!1 z`oqoMGo5^SqUmRJ>Z&_E;e8GM#HFR>^`TBphQTGpq#v`T6eVC`nB_lpX=-_v#Y9!T z!Q$yTG_^bdNwU(x-(G$$FKG?B26Sy_bp%piQb=3AQZEvhmpn|IBQ#KQv~i`df#gsE z)Gz83_Ir_RYo|ZY7e^BNu_hd6d9iBhd0UogG{$Lc7RHi1Qp5aP7dDmsj|h%>bHyby zN&Dm#DXj7nTPGUNzk@$-z}?28@H3ztceY|W25`f6>1l8Bb>A5_#$+aJZBYldOU6;)pSbZN0Wu)AOU7mS z3ytlPF%-PzO|iogfPt`RCN9{nX^2`HVlt?y&UeIi3HeMPrb^bO>mmazmy(qXuH{Y7 zgtfV0yJSY#1cjZB4ndTo=aRHI0+OT^8rvn~DN1IO)ZMUMGANe1f9m4s6{*Y1L`|H# zVY}Q+Wwovtn@DKqHcy9Ho+NW&{Y;<2+^}6TPNr=s&vjwDWMq~@brN&KcF7n@?%c6m zBWclCT(Dj9AQv{3xna8kS1+_==7#MGOp8631HLFwbisB75*L3;bHjE;Hie|-FMCtX z7bfjFV!Q1NfbHTj`OvQGINh*ahBGdRID@aUtq(4+PtLwrNjqY@44Y!ua6lXxYA)C= z^LWK@;UUP%NRA`6%Q7bf2|q~}0l-5FItOyYb{T$UwI8W(126pGQSF0+8@9{w#)XGK z*lx~-=;Ww>&fjRSx{Dv&uw9N(F+e!A$qaVs)+RFqF6JQNhVAn57Q%$DylW2>ZrCop zUGairyJRM8_|P}TZu@r0I2Z;gEqn3UF1Pbvb@D?%0GLe9ao;X^T)~?=wre0F2ETX;Nrn{$&9cG`eVBsJ(r}#5vZ87yi_8uZA}QL z8@9_c>7TkddPVB;l81?NH*A-isjSu&V-u-;YruBNTgyLeRl;NXVsa=g7T!h;*O%P}ej2nVc@qvzHp>#<#0Q@H!LVz*PZK;yV? zmy?fpK#Q-uYY!A|*zOUxGZ~mfti@lKYv`cKlpW7xAdh44$$;$!gl);Ve0Wsg#g5o6 z8AHLFJGN^eTxy9OuwBy-wKT+J(3-GaLO#<6m#VzC)yhg-msXBUv4jqT+rC{w51XJr zw#(6TNm?8MNo5;)-!2(Xk;zO5ryI6Q2E|erDqM%u#nCHLmzRN=ICsN#xtYppT`@M1 z+PB8c#D;indIUFY*UWQW*e)5FOC+Q+Uco?G2f!y}(GW<%bqx@Q_z}RlCX9oC*w79h!w#)Ixg$LV#Wqq-S z`iybJao;Y-s2Cue8a4)+b!(Fu0vB_TaND=b%UcK&zVg;NLWm%e2F8LZ-Qu|2rT~MF$n^# zElKXqWktiE_pP!XUA@BLaTh!S{qKtB2&y^esu&4U;;%h1f-Fnml4_AIU4E)`ZHJnD zv9_sncafTGQky+ql#%GYjW9U@IiL`W0_kIx(Fk2@c7lyPrlWp2E233yxyB{6kfd`W zb&%&Jt$ewmZVe+q*P2zju1?M)7khl?UJ5e;q#}eE4R?}43cl-;YYU|cHBSH;CbigF z;V-^wk(V|G=?kS6pmcXv0GASCO91JIlaodw@KvAwiW;A;y0L^WqUHL6Px^kEd*#V+ z$wd`w#w4YWujv~j68w-)FU7DdRxv^3=+R5DQ!8y@(pE6OGOrwN;GIDtd%1E6qJ!DQ zm%i+V`s{7$L9aU=qPyTbV|0T&DL~4*&fvM9dp%z~s5Xvu;pOE(o(*-XH|?&dhd6!u z^lc%AXp|gl~ zm%Zh>b#!}sa6XlByoY|9yxQ*Jr1}~KyJa(}j$N{kcUF=mcAmJD&MXBn|1`QNJWc7U z&#*V8C$B2gDO-4=HBRkc+lj#>l2hdwoEdjF`@P|G(qjK6Q{}zg>AgFHPWnJ=y0bm# zwKhh>>3B5gZ1%Q$==P`K#`(@lIke6d5yWAcA=kxn4ExfOY}lJj(Z%fN;>PuJnk|X= zy7Uk1f;Y2VVclgiw=`dzMP$ayDN`n{^wGrU(n=S*XR@BkBv)Q|W-GZ{#@=k#ksgk2 zZW`^DuJO9wa40oVbxpk{@f7&XF1X*zVo?!5Rx@N)dp%2BVzu;fhp0)P%@!6ChwUfD z+)<`@Q3c*jR_-_qSz9oYYk<(}vfYg6Lu%Q2M0^c?#kY1qe5=E+sdmU8c8H;LSY(54 zUhv`!!aY9|whhCwJYOw{dX*cw_5%ds;#5tiKujKYF+uXIb0FpjTnfa!R@TgP+7~Ji z6MbBuKunHwlNqux5aWkk0x=oZF%UD4zo<;uD-gF^wt={`-N3JpL(Y@tMX3dA;(LHV zTpWQRvtMIHOdfX$#N=7$K+F+%QCYE9MSR#c5Ywj4rTitrm)fkM{FOC#{uMEK|5|3q z#z2f8b_v8}SjRxjJpQ6GVXr`Z*g6nj=!zJtFF+tJ-dB;?uQ3pl$6W$3dDb})a|B*g zR_qmskIW>fzUtY@~pwKIsDXruP{D(1MX+A&5mxo1#=Z$`Sd zp{`i1^PH{XG%1yk%EdVvzU}0ds!A%C!*%+#t(tB#&qnUGlnV zjUsO>ji1YV+I8dN9cfj~6mbV?rbxWSOc7TmW{RW@G*iUgotYxBME0$r-I}v-_Q79= zm$OziIosXtb-KgN&ScQvC@kv+VpvP-*HN>YTbKUNe(-P1!(L|ecmdmX!uU?2H2Oq4 z@uZ5)C)I48f3Zeo^VY8$nS3L+`NZ1hjU(rKqs$bE&6_D=n>SM=HgBegZQe|g+I#{F zo73~#$Y`&C{q21b^B?>TxF$2S@*70);yox%P5aaHoZ0Prk}*tDhVDj(=Pz_N2HnXd zz7h_^muIIgWgo-`$g1keug}BFvX4E0UMCuj74duNGPLjW9CEwW+wXsH_0aOAaK24; z!d?pJnQ6z3L;7(3qo`LC3{4TzMPp;$4b={@TqPC2??$knpHIAp`T*x}I{!#aJc-QQ z0sA|no$2<@w3D$viQ8~!wQ+4!VWc2%;%Bz!8Z&(Q-X#rEbXx zp|O``y2C6oU4F%Mc?E9)t1AUp{%gPT#=i3EZYXpy^ke$Xf(xdTl zxkcD37A=fQBrRrAVo-AY1XHuQ1XnVaf<=RQz!xLHmNv$nW~VzC|;pZd5HzDfA`X5r^sgr9GP_gx9|{b7HavvZhRV&PF3 z$!?;_qt` zs$}hGsj>L7rBzrMj3yJb>~aWhgx9hu4yVwqUU%4;ynUxT?rnB@y3OKmBGBfBVY#|Gg4^GXhg-o`-L9}SSq<~+=e-G_5tZZYwa za=CU zRZ&iz<*1q$k*s8+BJx>l710N5RYV_P8FEw+t^jOQL}~>~73CkdRZ;$NmZNGaB1zFk zMdY*ADxweAs)#hjKt!nf2(sz!*o}x

aqAM$1YWgbRRV@`%6_~Z;+jpjmq~+RpOwJysbq& z4r~f}QkU7}H}Y8$GBuGoGfY%Z#QEqOfO#I#f)Ld`dq(@+on8|EK|6rH&a=_Xr%W znI05^rhfL(iT29}DQG2m7c+ft>|jo4-eJx)aAMI#r_eI9Jc+F5XO zG%A|Rh8$dkAwb0ANyXjK*ty88$H~!*WNxvFtX<+t$|1@y1c<0Sskp}~b}ljzxjG=@*lxuFGcm7N z#nbU|K!&A&v5LZW?y-s^K`hE4=J9mgMa9#RS(RI?;^?}@DuDYy zgomhjI_{$4>BvOo7OObAuCYoWLd4||t3*ofu}Y*Q6Pa7ABD-0fV--Vyh{_>Wu~gh+ z6-z}XBDYw@P<4$}90?*Ohgika@o_*?tolR)D$S2u1>r%fA7Bv<6PTl-LxdKFST4DT zgUn23X}{KhN&!Q~c4lz*hL~ni&xazH3-QrFvy+MOBMqn&OgJ%86Gy$!JmWPn-8#OCF^l;Oc^^Kvdv~_@1nv3< z+o-pD=sHn6UFpB#Ec5F7RekU8`&HelgA4e-pZ~!hIJ9SBVSRJueb3yM zTybjB8;ovv-TD4#w$@o+{@^$bHmt?4KPExB8RTUf8uYyRhAl8!J)k`(S@}yffS0nRTaU zd)xhKd@WqPb|Bun{8!|^C~hn_TQ7uN*#~0TUttGgcI@6}c~~8R$Z1lMIP9=@sz03a z+}1Jd??iAH%kHQ@dvZK^qC4FjZ};0<{n51Fexg4a^@r`dM}yg*HypgJKWR^Tqpk6| z?#^g%dOSJT#^T&{`*<{+wQhl>?&h#JoyKiA9A7^iZ?3J|4bgfL>^k2Y?)2kpHsa-G zyol7*IJDeER+FWqadmDmiv4-ImhiN11D2zBp|$wW zZ~CdHSze;n^GdudLStBpp@BL;X@6$*Iys^XGZq`wz>EU?#CpQ z74wRDaY-@z0M?e%FoOQAw*VHW{r*@$#r;!R^kp-$g~s6NAy2?2$aK`$Q`EcM)7~DX~evw-qnnjY_ZC zV0OV)Vsi-wpNQZ_6Bnljy(w2zp>lCNCH2Qmv^G2p*T=80str?$t>=>}>Z*g1Zf(?XFgB z-(O8b^eVLD!!{wB4x(t_)8;+>$A(IM1CK(rsiC8Ta@j@zCYTnUD|!^2L(7Z#a`XGl zhaP)6O*>(DDcbn()4Y|{jT(^nSOeY|!s58zU2NsU*)-3Ty7pUFH}hVi=GytE8)58# zHuRya%7~rVx|#~dDngr&G~siB1V|hE@TMF_lw5P~BQj09lbJmHY6ET$;Lo_h4`Yw0 z#ScRWHTfowr>&ijkl~9FyxdlZTyG&X=Qs*+dCJ5{FmW7afQkp@U z|Nm(2tMcJE7nL}e2P2|;wVc5O_&GBMm-8dKr3r(}A4{AyUKpi^sO0dSrgN6~Km1!W z>_&tnc%Y?_g0QM#3chTWTLeJBh+|}pX|?EN-5*OZ0RC4zwgA6t25XRxN(g~Ph|Dnp z6AHpwK+6*xp!NWN&nn(9IDrYuD1<-)rHRiF=6NC6p0%S(Y2aMh$@T&I$e%tOxbtzd%`5E?pO{Ez~h5wguIyn+n% zkbK$S%WNtm1y-DqrGH~XWkc;8TKo?~2z6#EQ}pV{2p3jJ0r{}b`0SAa8)}>>hY_VW zQb6WyYL%^!0zyn9QcI-3f>Jx(3=qs7O&%Y{E^Uwk!aCQ6aADjuS4q#}6`M+3iww80 zLJA0PVoPEy=E#F)q=0-_Y@eh0xAS+h=6iqg%uo`2Ub7^nJZR62IbPz zMc7q9<{|H|?RFIq8d_fFfEAE=$a%5D3J47yFIy7{@P|&0cA_*RE$^wd63qEPAdbHiisB+tl&ruSOKOvH>>~;XL3=O&M#-A zFp1-VM9Vg>jyGggq3FJlFAhtV|kTHke-x7DtKoD0DNjqNsmtUjy&)3?G3j#S19 zaEqE4_Z5%;k-dIV#F~qM6&!KE3W!|HSOH@L1()-{3XW953aAi@N!qHy3M)7|53GO; zGFPmC463Og+RPgWF69ClLf$$D+*d#b((*C~tboiz&afR;KxpWA;To*USb@z57ZWQW z^Q(>(kSBGrv&RZ-s99kJR)p%>S3m~0!V1WTbpo`<3T&uxrfkLvY)E=z1!RyQSOFoX z5ve6sU_r?aE3hKui(QdHj6$^8S3roF8WFE7Rcb}%gB1|oDq{uYK{HlBKB=+6qJHv{ zp}fyAG}Dq^$7){zWlQK(j93AA&}5rogaS5L!O2E5V@GK0>%dDv``D=cT$1PI|xTB zVFgqO#UyQYk{Z%6^-E;;;*%TS(7S++7p!>V;5t3B;6^wacML}sNJi422I&!%?Ba(0 z+yZo*;v0hT$Up~N5>V}(_7_9><%M_s{hxUn!g5uB2sKUE@+<>%O5Dj{y5f2;5l#qf z_N+PTPP9tjCywOrD)>vD+l26+8Uf9oq6+~%3B9@ykeUt7;4NYJT1cT%#GxjW4=7#^ zd+-*cp+AY>_Zsl}P+z2Xp%8jDBNFW9eYUx1fnVXp1*pI@nikzf(RY0dg3 zJ4(w+p~ZsFG~j48^AVaM&2pJUMoBPREh;#FQM*7EnKZ?d+rAJ%r-5nQlIYc^U$ z@J;*tRrRe3+;k_lhL*R(sl*D_E&J{)%oYbbs<041+4=@c^=*i!HQEN<{6L1vX5tBi zL{w<-E5Zr3NHBv@`&_>_>Q3Lflbv$hpG?M+_Hq2v9fQ-S`;-1?vmYNQ{VJ&_u!Or< z0^jgJr)K-2Mg;GO_#?TMJoKjM+KgHE)MVV-+U!kd>G9I-C;NjlXJ_<4-g3vku<-WR z@*SoEZ-93T2OBy~gugrD069Hjd1EO(VjrDg%|IW`*1^(w{7bO7mYzxdt_bdA?bmHG zliuhFa#nFn=CWsFH=4`BS|m(HB^Pu|vd2`i3oxccEXUKEO!)J7m7J9fR1@ABh2iao zg-s<#SYMg{9f1tQ2r;SxGp%aVB@zd1yt8{wh#p1RyV@C~|rT zyq$)Q@0?4o>^PU$eHq0(et)~WH8?lT4uVG4oSYbJJ(b>W0_ai$ z?qo2f0gQIeb+;$|DLTl${W_FRhwTe<)KoS9LE1t6nVmzF_={ay(etENs7>ybP z)JMPC%7IvBvX}6?!@(2%y7eThI{0N4rTXCaBjB!+vJ%{Q)SqUYwX=j;H-GMB%zyRc zZZ7dqrajx5@y{bzt8AE5d-39(gW)jo*1s2pOvZ%^UIj21ZS|j`1(fnl!yj+JO_g~+ z-XHci(JA%#t7Rp~dNqzsKG7iY&{!cSMY4gnH=tixDzvBR=KV}tT8xrEBG49H5{U0| ztF26Mcl!Sm!B*WOU<&fuB*3+53VpXEZ58~hMtuSZ*iY3aIC{F2(2MCBIQYv5HtVvQ zau-({?`3<&?rfG#U3ZV9z21S|uGv3aaZ9PkrOxIX|_G-Z;4#|ucPi`!&o*GRMZ z%wK~=PO9Q=f7A%9n0eG{7fHZA72+p_Z4ZuaO&uVw8EbH=BrHv1U1<%U{BQ#{{9_2d z{L-~z<@YW4DzR``YayVMv#a;>^YBRnOvHkKTDS?mBdoaD2pQu_0j$HJ6Tt?q1D6Ul zE2>BM_Yu6(%Cxh=x~p{;{anebT%AA*ulcFabX_Cm#16DWKp^6y0v?zmhn0k66uTY~ zw8qizLMxavw7s`>98i{jZ1JU!+ge!8O}sn*CgIjKZJf~(F{ENM>M30Wu+#jr0FeE# zt%GE|5D>Py5-JJItb*T)_zhu7L!cE3Dbcf#RadFarP>bI+NG?zRo^Kx4p60Krz ztM_It<#i;t+0XnG)@@StDV!=PEMSi@eofezrJSLFE83-^D(f87ysO7Wvy=Tx+oiOv z!TEUi{r%qXj=_1$MY1g4@^9=Iq-dV+XCwX^EhWKc#~RF+Y(sN~BT?|$F4B^8YPa}_ z6r6B3lv#+@tgkKjYvn5Q%%rz9K&zQkE?`S)JIh!LZ-MXCjgpSny)f;rtNL+Rg)YNqXx{9dQSr$ z4JoPF&NkZRcV;r)LHjrQ+xcFn zyqJ@^y+R8*Q-LXC+%E>UExd;aVF}qcNmwNXcXgFm3c2lrr4yK^bXV^`i9(v)vHqF3 ziTU(N@x{=H(T|`c&NiP@@j+I|cW3 z$TqMHJ`glG(lZg}lP7ABZ^!I3D4&EZBZ{@ifX;#xk@UiOwzQa9TQ$l`n{eQRf&DLw zy*T>dj1pQ+byRzb;8ci~&niFKfQQ5BRcwLrYh20w{(Uy>8c23O5uPRjHs0HWQ=!FC zZj_leIcU{~t@M$Ak`WbZE?Tx|=ln3dDLxsda~8E927sbeRgD7&Xar3Td1y&$)fFvR zPE(yc3J=F`sz)apFym(SDcC|?N`uT?%a9>CxJTVyd(PfY5{}cbvz_3e<#y>MK zM6rvYHNF$5lA8_C0t9+(bfsCSR#7&a$TAid~uMi@Vv&yXc<1 zFfwDOS~ziV<4Nigtme$5C_8YFCI-G1{AdWwIW@XWfXDG0UD+4=*Q5)Jtg`bTsh&H* z(Fr)EGiN+Iyg>!Pk?2G-*%lJB-FV$9#R2$K2-Tyv+$E~F+u~h1!=PZRiXjBaJv<+m z%nAjamb81WZ-s)6i(T2@C3u3UZp`D6+X}@yLLg3bO6VK>Rz=`=V!4sn=SYrU3n~Z{ zWbnF#KZ?W(H^7<%B`5U*Ekr~tEm9p*bTEQXgeW8E9P^5&C?f)j6R9XbHlb6+*~B`D zD2fQQVCWB%(K)5xpf@Yf7t6O!=NB99gRpP0B}fYiV7{vs1`7nj+^shf39MXZHIJtmEuq^ zD7LZrLpyXl8f!wJzLaW7c?u+_P--n`Fr5$OGCjQSF@NYtJmpi;VREktjry}E#}jmr z;O2O{-{#Ia&QC=xBvGZa{9qJdlUb=cVYoqNw+nYz+CxaI%=vFO;apuBkmh$)6I!nw z1n4RBiXj%`)}X$t0sbp;iV5PgGs!E|oBrcaKUPIY7H+Thip#<}e4zo4)Mb$8NayY^ z1g)?W|J59`kZE&e-DLcm8bwR*d#M;{WydKeb1~BD^7~w)zHEg|Gi}+B+LJoXzdqvJ zg)i17`KL9SB!#DHOD&nwlKJLOZR<<2kPf{1dWfy?itv|Lc15RLn=PlnR&9F4Fc%FF zT&HZ`iu{in!v%xz)FuUN-Q&8Fv9vlw=#SmeGD>I&zSRK#ZHRjNX0f2!#E+37|NZ$2 zRtQeyYvunds;^$7JJ9P_Xc0NBbJqtNaJDYz5L@;fKzp>d!ZqFM{^r`K=XnACxCz_! zicGtcvNmVR`AY`1Nl-M1!d@qI=iXU39dCu%xu*ykIiL19@V_--GH-=*;9PA5Na`?i zRJUzZD(qrJ?P3c~#c!_FE|4(bCo7clW&{qSUO60h7upyW?`3{S<{m{25qcIm2 zeXId*w)FyA(TD+|a)z)Sh!y)=YmRT_=na3;w9m4(EUADi2GpOsfh9d_*;Zq-I7Om} z4VQGz_)HVtQjfQk&C4xU*eZZ^oq4L)C-gh;ra9m|g3mW(_dliEKk+F{tfn<%<6Gx& zRuC@gvo69l#>;X=F0qu~ z@%J*da?qJ8Xmvtc{Nn; zFV_pM|ABMsP3(f>T6BiIwS3Sq)kw!+B%8d^lWHa;*wK_JLL^@klMaU$s;1-eC zgS~Pi)9SRkZ?!gQBzy=Sj34lbgha*?#eK9XyGSg!;U@sPH4Bfp zk|{dDPkZJ*9CG=NAg<7ujKv;CjB4C8>$%KI!j%w}6jGgBeNKbx*yM&3OcFFMiXa%|GCC*y~}Chysk$w;F2ihX-Bo~+7i z!ll5n`+FAlG6t9oNI|X8JsCePX3M&k_3p{|(N$^-*OT$%&R;MtQ&IhaRAlJR1nk$ihHDcg1SWWs=~UHkQ90%6f9 zsrLt8Cldx{II6WL>(thh2@I*6+1!(Lo)tZrOpsN2vef`RnWe}JBi3&bn%6oRFFMiX zg7jqk5V-2|?8#N3A(S(Vp>OM!U>oUYL1p}?~zlQA%#3|MBxJ_Gu$lgYR?BtWlq zGJZVL#>`1|p?fkpGqv|*IOMwbWO>N;>dAQ4Rqk$W$27Ei_hhT{=*eUnpe#5vXWl)T zz_lV5`K^-)V=$GTXHO=Rrgu*!%vNQgp%Rje2hW~N%E3G`l8hH?Pxf9Xld^3{NnYz@ z!hoz@`}Jf3VbLjd{ZSE2La(6-lVZ5_-n+P3TTdoTTII~_^V{R;6#TD(vk5)1nJ0RBm;D0JXw{`giCCdYpKUvMfMBbvGMaF%)(DpxE4pUYcLDA%6`l)Tzhq6 zJX6kO;FC=!FQ~9Jh=!kM zpa*;g2^hRT$wmXw(uN=A*bXRB?~NBk@DtisS5s>8o^o9(>X>^OP$Ux--%G zxIkyu<*!7Sk@4A;*yOtl%V<8Y?{cH%T(2hcP+8!e zqEbdo0SP`3!S&?`bE-e|BqD13N4B+osdOnNhri zzb1N)`K(?h#_2HSb_bn-g_QQhV00$k>t4hWU$(D-{YbHct=_EP&Dlld75DbQFGX-j ztIGDIk514mt}N1vK{ZY=_B8I&nXkJ%`ScHZyGuUhXSKT!vD>*z{%MuF5Ms|hcOf&k zbC>+n!Q4XSckpHD)yu5M^N9kNYk3E9+g9&f`)-_l25&=|2>lyRNd~%wW}r44j;}Xl zAeJzH*LoB$qskmt=ZomN^#wj-sJE?UkHEJgcn!-Bj=k(}3>7H(Dw2Bmga%WA#Eg2o?5@LHCm%6diJkFv%!2v}C@ zdZ^PX#=v(PaHl83V&L!3SXCTZv-^C~>|V98A2qv+84@U|HPlD)ufta&cs0v|UC5wZ zsl$b4P+6#O7D6ddTuF+NrL{~V@a2X}wxcYH0m4UINmvUi?Alp;@&dx3JfX{nZ$z-} zVMJm$@R4u;k!YBQ&pS09)RuHLgtH1mgn>VFYBw0Zo(+NEkDQtehDk9LIF!8%HJ2uX zdAwYWwn8|wzXQQHo!SeQHK9K6le0fZ+GSg`FGjMw%7#Np$%g`g7L90U7-zRP?at7> z0^`w-FGjMP7JNCreLR}Z z2%lclFE0Y1Squ1!3OW9QxXli)^l4M0sw(${`BIT*E3aT=(hDFzQR!1NOCWW-+EA>E5XAlM;G*EK1aiy?< z4S0@u?9kI77|-)hq%{C*_c+Ud{prIEybya~ryUaXpW-j-z= zjd2>Ahp{9N)iD3|#Z6`ZJ%Zc3x#E&p;LMZWG`-2~E$~i`@aGM9y*JZhI`bD}Qip#8 zir4Zk&Je{V7%_Y|azMvRVL(V~{<04wCWAoW85gJqaZmfua!MoJiNm@^PPi2UtofP? zSi^G01(A}|ivTK{RU=K)ne9SF4GFj1$z4?N4HvkEWm60r)nNpdngd+JK3=vaMeVAA zA0YI6DNu_tX4of>48Jt0i^%DeVJ z;fC$f)86Fkz9Ac9GL!+bWmCAmF?Pdt$ut!BCvLn*fOG)>$N-WJ-gRTUWM~C%c~k7L z1YjU6nu!axYZ@XzLrjKf%?sNl17`Z*Dp~JtRkks~%UZ?S+7a93SPqc1IC?Hgiz6UO zTA^ClBEb8o0o<@%o=N}I#nB7HnQ{5V4cp~rDywzH*c2qRUtDb0%rl4Wa#g-!G0|xe zmdrzu>A(%!B~-JZDbuJswreCU8jB0IYaZm{rZP8dSKx|EW@74SU2@sY5!)4*7Sovn zz9>+1!FB}_l9Z-gzDLOF|y6J}Pifjr=&0qF&N^5S|Zl^A67ZcW?GHm~)1%<#3 z+hsW8f{0{+$OU!E`74DaXKct3+hy1k!-fOm$WU{^cA3W|;ezgN@Q}B>imrwagB!NX zGA9HHKS>t>!b30`5jbMI48PLqXcsOoJmwr?;l~Zz<#^-5gR}q28f6dJh=Iis+vOM) z1B3(C$kB6alUeYyv|{w`G}kNedS$ypm4)>>FtX3W4mOii;V4( zX(;ede{7cwpy+t{+DXxBgWJAcGPHs>cWl={xYQCm?AtXB5uhO^L(C1^B?FeRT@G(C zG)J=R zwcVRf2yxuEE3zphHGkR9DXqC-yGQE6b}`|Li0v|*aX};ywwv=OLNKW2&5sq#k5S;; zFddlSwr`hVQw$pph$BPI1>0pFw+{a2J*%w#zXp1_-Byje%y}+GK{nr5q&Ouw7o>LYVNC zckO|~4ck5Hb|!;Mtw)9m))>3(+a=S$Fi2^E!XMiu16%}bmkh1o%^lk{5H7XE4%n_~ zhyV>S8DehOE*UV>2bZcOm8_Rfl~SMg)+tA9mt#3V(&Fg3BrT4Bb<&!5BTQ!Li0zWc zvE%!vE{M(gwI@(=FJh?6_^&&nFGEk zP;|j|1rm~!7J9jojOe&r>9%i|3`)X)kktHTZ+ej=Oxklili{chw#%e2?GD?P3v_{J zpaFp$_w6#AaY4jmj-?Rd4CKl}ha0xbuqlQO2gH$~=7Q}qk4wUZ*S=j$S`itl;lqyD zF3X${B>W^@1ON}))QG@w-!8+ito9@It;d`%9=6Nz#)St-!|cIfxQOA1?Q)EY0m1=m zLcR>Ga~d~nmy=I`eY+gJ+5-i8q2Xke6UDeo;a9s*|`$>e5Z?V0!j~_j9QY@`Snmh1b0BdLq!2$p#b4f)4~`1HSvxH+#N&{LRk_-69E}DbV3oJ zwI#{jxvXgT^S)KqqpMfE)fF@X{qKtB2&y^eVvwYY@#i~yU`t;usB~?IntidhsdRUd znru>=J%;ovd394gsKL7%VR8a;Kp_?d(uXO81e7YEYt2ruvBz}OFK0!x$}Km-q!#kM zw^hc)5WAz});SHivWWVqy_ ziln0P<<%H0_$E!K*dOxgr5KjQDkg{=J$fm2YNahqmso6hdyvRpu3UoXK(qHY+)eH< zklj$9y-hvr_a-BBb9{G#Zh0pKNcnv#c;@F{&0D?drQjaCupG#=(2z1 zJ6!e>?DlQj+rtZ~jN?7@+vH_-4=2^vDA+BVNp-B4eZ0Gx9Af8*OXi^9{C zuKEmnQ+o2!GM%!8CtBmw{f|p23+(Z)?yW&8BVkZ!%Th)0^G1Gwh}hv}Zfp z!+v`cT?;W854&6a?LNA|X|#EvyV{CjUs|?}`qLS@LH&H(c=>{+^}$!6mrA{bqN;I# z&U5|2`**<`Sgx?{GMQVNFU}$|ho6Ic3Z;&W-Gi`_F>Pi2y;&pxx2+%02owi`$f zw{2}1?N(=l&bM&NoIzcUt;45x!F^s9i;4iUnjy2=m$SqrR!bjuh??}-Y+)gB*nU#X z9c7B=RN&oW<&MLUwFM)&T83Vo3A^k-Bl@Tq)3e*u;Fo=C2gJ8J{F-Wq{9%U}N{2-@ z=;j5_%^=+OGhy2>TrpynJ2}77A=iF@KwO-v=@f{`<1Qvho^=ky9D&P$xZlp2nQq56 z5Yzo)E>V{V-zDlw+#x_9CP%u-4A~fn@xv~Gm<;O}h?&QqQzq;ch&yfDKum4ep#ejl zk|sf^1#8M*nf3Zt#KjR9GW#`F#N=_8Kun%>4#XUR=adzDRm4Z;5{QZLrDm*OqRwu; z<5#a10t906{=KB{u#SP4dHgwL!d`*+h;<;=b#*SW8LLBT)SB2Fpdv2b zSCQGTF%Xl-T>>$A);SP!1fEk?>=lTQ+6H2s5xW%0L?2Q!HpJ!tftYM=mKm}!5aWkk z0x=oZF%UD4Kc`ICD-a*OXn`23Lu$dgK>YPx@IV=hv0K&PHbLot;=}9+JWwp5k=((4 zywYwnGKtir?+iiZIc3>{vP5ifV@^eu;IRyN(+ARQADmF+7VCE2yLRD=yFhXwyMv)) zKvov3uV>|}G4qr{5@55IhofL7kefWG4E!yu0_+z?I0N6XT;ZdLhVS+(nX9NHw#-f7 z=k`ESW<9g*b34PhiB{UJ7I5{u1x`G!7{j>y%?X(IpX6j1swDCgzfA6Nri=HQ2-CdR zlU!+jo&@Q_5@q;}eltV_gU^wGrorY&(ClIhP~s~`f+qx&XJVb!RAOBo??zg zs`ZU$id|vwtq5ME2q3~Pa>>V-EmDal)P>_hzNcHkn+$6Hr+bt;15r%wu4}XrMeuHF9+pO zwmi@_Hb-PPh|qBOCXUOcauz|o6L7gyhWh}fz4Lwk=;G(WuJgU&PCve4WAF0rcwuAL zx$#zi$i79gyQPZ4zq<$rGm^&<-IJa6tUsELC*4u+Tz^Vly?Ey$8R0LR8-LM6V=SWc zKK5-?L7g_LA+Z}TW#c4|Xcb-ZhG~r=Z!C?U%X``lwQf7+8 z=FJqb&6_C_n>SO$HgBd#Z9ajet=WZbWVBbo{?5LL`49dkT$dSI`3<6Y`EC@aW`o%U z&g}L*&KM>sLwBPCO_sWw!`^fnUkwN1E3-qHvJc_|WL3fY>x=M$>|^(%x0J>cMf^T= z1={y{9=YAx?e{&fwqor^t;=zLn{1i=|99MnA49#GU}%bvE*e`s5&rig*w4==-a~zW zb2y!UBqp9jW;CIobqDP4j(29;JF{*knrYmDm9@sfsLDV1N|dKA0hp|mb_CIs8{lv+ zJS$G~QkMo%ipE}%=?=5Vbomw2eTXdk;>TX8d8|M=^CPK}8;qaksT)6^mnnq) ziitk@eAN-iJ{-)ic?1&p$(+nQ;OI*}Zytd}IP!#f1nlYL^X3tVtC1(Zmy;{rD!}Y2 zF@22e$jBY+`z6%WSh>Vv(bh{cwnD`t0lC3>v)F_U_o z50PRnE@T*{h`{f1m<>b5Yd+{q$PdhnT3_fQH@>)U&oz^s&6W2(bKCd- z@^3H0k6asn=^9vrCOQ!-UV`YUL(z%!>jm^RL0{j7z8-o3pkFSgOE+uT%FX_(qsHRW zuI8TQcsHP9my09(;=RyB`}oj9Z;zsQ4@BpRZ_nr7UIlxZq>X-wmV>h202>urA-oURmu%o5irx^ruzk~-y zKUi>o81|w`^tz|VlXJaUyfmFn(7~F?5~|=||KROw`(Ypc4*mb_uYVAYzc(NME+p;v z0PI79bx-v+pV*#^x9cDOez!V1yQBQlToQ1KY5KUoJ~W>!DAkUr(h^Ab&+RvbD3BKA##ns@_M_?J+tSvPZ3d zf&arP<`2&+gt-^?bato0Y%a&}8c5%aDKV#yTnDnymCFAHXddgIA&;j4FAaQX8pK!E z_5pO3v(Zqr17tPn_CmNF_TD$_ZT8QOhg-qSIdl!|!hVjwB#Y@%7ILR6?ngU0 z#^)Xw4N?zGMVx&s6_~^v7pMLHR@@wnW=Z4V!lA3sr_nDu#c!_T40LT7mO6`9?~fXX z4ix<4cKK>F1p@?-hIBkA*)FJut8lOVy7gX#}0oyh*GG8YsF z^G{%p;w*Oz#X2Ms%+fZBq=Tmp?ncRCAG@^*{pK2ks0iC)4(?74z;67xX(!)yfMPn? zfB?h6boTPp&0g{HXeCOeF@$_~i_2&z7FHYWwP>{du)jHruYf&h)5F$aHW-f%LaKq( zMbfYR)DBdxb|36Io-)n(B{I-F{4b}GIpvRy7B2+)Yl+0~f&<4_Xs2Fi@T+D6iN2B!N%oRwqiJuP#(4BAKiKEO(Yt9#D8#gXQ$& zS$51_DNRXRoKGp9-ApGj)8OEt&4KQ3Hm+HH=hrRPf7Th0-Wqk2E9{5 zw8aI*n5=%mSuL%X_VaN{$ap9$rlvaU4Mu4`$%}`GUSm`|fJpShJWV3Vl$l)`+8@FygeY)3Qxh4MG7k0zj;5rt58K0W=C+B;q(qlH* zvb%1g*7ub*3!p-8?LfSD`LD=-5!zzeY`u`MCi^SwK+KNa%WTbm*ho8UBn~_5o$3## z^$+_y!qpW#Hj5FPyf0x-%10R+C9CcZ&Q|!|Z?d!XMWk4|W+Ps1X4_gDhnAbD=U7Mo zd{&{Ea6N{KGd@VE*M#tv2>@K$p$Fk3jSyBmMfo>P z!bu2^=sgvL%wR;s?6oUJwT1K3K?H|VEPksqwCRQrA=0Yx?1g7QqT+ZC3Kuns8j7mm_XR-AbD#3gLSLyN~;g;7d zNWuFXpxIW)6%81nx89wQi-SKP-pS^)i~a5nQF*B!OYGl8pxJH8Nldr}%+i2@#8ZuC*lG#-{ea;yr{^dN= zl6Mh!Bwkue-~L+XYXu!IKix1JRHh2S0qyq3I%>HvLT3?G-m)k0bNE;UuduRH!Pl&W z6M#vbVt>;jXAoc&e@#@qvhKaAQphRTLKe>=vgsW?maTPmvTgC~sq}syKn#7KNg4T4 zLQK{)jBgy}M@uI5YA*P@R#@r4>l)TP!aFU(NQM+;cu``@d@(^=X~Q!Sf5WyUprJL% zAnkkFx@YaX%RV_W2R6Lv8+a6|O%0tDXG{yvr3ppnAi3-8nGZeobjye7H0^}pMc2lo zY2NBJO;$H*K;mN!c%#9Yt@NU8<+)qqy=%B^0tt{d_Tf!Aj40FQo;L43u}9NA;)|GmwE?#W@MqlM zhp|W0;)fv=%pMJK1HKr6aa%e`fn0CFr6j{BX`L%J`d+3{YDR=s9~;O=8(_Npv4{>r zuVF2eB!mqH@)&soVxb)Vu-hn2`zDa2ketwnSW7Mt(+2k7lM&o(4inC;zXKeDM3)Z7 zCwN_7^&icBRZf$0QHj@i%(mon?f}5gnK8JWpL|cMH3pYImN;u%(vlZ;>LES3YwDE# zZ_Th95t876CfF^1EC^pZ4!&%aTLeJBh-0MfsqP?^3>`o$=B;BYn}XB8_`vU)!5TD^ z5<*}RB6E1e1iZ2qP;lvs68t@@c*EcXCMcs2LNUppNCDk{&Z4)~7y7-B0x}0SuKObe zglb#E8G{s*7s;JL05USmkGi|IK?(>(EiVO9z?GPa#!8NQxWNfzdSHVTkPqv4@k0u1 zM!?JdNKmv@=e1=hj>5qPWU^LB0r{{_cJ@et4YhM<@!!~)SrPK>%*Y_heKR&l0U@Fj zpgmGxLya@#FrxHE3dp>JAO(b&Mx>TVfd!>@x)~stJ(`@{3yvY&SRn=E!(1D}g>lnd zB|VD^Qa~osDs?T+NC6=!wj{=4jyz~a3dkolHdxe8USg5ABNobWK?*2aLZ4zSxj;-C z*t0?k@>eg}V$61vfJ^t79|ps@kgnyjK?*QU&P63&=P}!o&((_*khfv8BL(b{0{oa6 zDagr}G+r2`hp6O!sL`+lp8y-AAm>8xKqFFsA8Y9*M6-*#7~l^qkOFc^d7QtSWn5=R zO{X1Jz{RIRtrr0+pto2Ty9q3Uq%Bs!*Z^vq_*W(?ZX}>m0voKLlPMy`dkTzL0Tlur z#D0H+&8`Ca9yaUJI;?<+zS@2so!XkeYRSDl-~}p>$y%+%lMid^hju+!0U4R^J{3KC zI9@IuRzMiG!V1WTb$a0%tXOzKz__cxW(0~AWbXyXjTPMn-N3sF$ox|KpiqSsRzRNA z$<7`tu%Tv!6<87S#R|wEURVJkq7$G!R$xPoGi7s|YeO={3Jm3Yku6+0BQgoF*;PP3 ztP!atR$xKN4lA%C6wDqCs<6Te$c(v0q|`dd+I2q380Hdc?*W-E*z77GAFhlQkO$3J z0r{lH1|_{h7$>RxA_3lyVY91%vL$pX#xj^ZIHxdSv#TIKf`~0M>%|<<$@ndGJASYkac;R*)49^WK$hi2^v^o z1xG4j1yl%yDuV7Spy!dXF0I1~YJdR99N5UN#UZTvYeH31Kh%U3kdaw_{Pqvx`RHoIi0`jCzcJ^3-4K*vQz>1J> zZ$<|3!U_lxodE5z0vl?aDVwnZ8BG^AjC8xwZsZ6DA{2JR)m7tqoF^y z!V1WYxkkhV<{^YyV)xLh18A_q3docyV+G_vGgd%8sj!H3h;1z)uL;lP_sp>xnM#K`y<@@PXC7f}D%0SV2A~ue}2~4V~HAUs|7V^K%b6NK2+LIgA~2gml!HrNPC%^R>d!i$ zQ{qkr(-qf)iEu(-vnRgS^cMn$53pu#+Sd@VuRXwjY6LWUiY^57B=qV+Kx%AX=O0{D zJAA$tQm7PhsLA95ikHJ4{Kc#9EF$>52E0C07{Dk*KubffI0U4I?A9O+5?Uhv&;no8 zz5p-D_olYT-xf;Uj|8!ZDuqG#OaqQqGasQD(i{>Xkx>%NR<%8nzo_M{lEM7aD-k=@ zq$zYw)ub8F+)bVZzY)P}ELqbZq`*~8>dq}{Ne)|#^1~Kyg{AK1us5B?N1+q149A;m z>lG%Ko;;>`t(vpjlo`&mOHL?25P}2hIFw)`zMP>$mu)RDQfn* zrKd02?)s0?Y<+{J`buXp*aqGFC5y^tQgG;ph(z!!!U?wc;?Q&b-l#jp7oK$clgW6} zE?<9Az7Zv+Rt3~gRVT&yo}aJ&o||+3Q&$GwXb~otRTxWYOn^@R4%ta1`oKabUH&9#%y+{ zo4sLg(%n4U-+ZDw-PuGJ2B8~)vYWd;BZNF9qbJ694#&M&n_Q!joRnOFDhVP|0cqyN zN~H=Qmbz@ZVToI`Q*v_;x!1tas`#*{9EJ+5utH0yI06JXwyZRt-Pe~pY;9VR??*$( z@Z`==<4B=N1#uPnPz5z~5Ndj*7J9v@nJ)4dH)*Bk zIipJrxRb$@1~A$=*F{&gP0`Ja?d*2M_Jz7N#i*j1|12z|{h6IZl=zDA<&RbUf?wXO zB1WSI(?0suRu056lP#8ahl3~jb?Zr1b)bbB8}-5ON5HkJR)QOk`qPZFc9u}<=C53z zu)Jw18~Cq&+|4B(%Cu)&GyXZc6R@&jQtidr6{(50{=Fz;3D(>FZG-N8mD*Si@ zZmP_Cel=u%MWkgV$a-}adJ~^$ka%dUkdsO(Z*M@qvQ%hK)6M&twzSqFf6TVxUb;^I zeMZ0sE=i1gEJQ3kMQ15RUOlVcUbFTT=(fYsMO!DhW%|SXWxZ zCqLYP4gVN|FTZrHSowVmzDg|I(pm`U);RiI zXtTx)ZSSoe2b3Wcw)oP=Z7nS4Cf=QYlW^;rHqK~?7*esBRe=@(>@+_oK-NbQAGURn zj28mJR#!qLfvLb9sv7vMh~E&VGz40qkPQiN+RSa(R z-mIm(j^sA`nV-VCO{zYHQzd=1#~x$+ny@iTIYR+gv`a-8OROlk>T!`elgiuRU)nCE zZ4J)HyYKJ!hIb6kTP~7i`IdiU#~?-Xd_NoU*JvpTK0DT6zGNGkGaM-<;I&<(CF#^| z@e?UH;ch6i5Up8XTkzM)RpyyVZ)<>7GpAg@mPAa;SPO4~@70Zxj@P{~?XKiStmIV~ z`^`GA^!R{Okhk1xIv!2+^yO6Ve0Q;GUe=7&8vk-lm2g$~Z%NQ@dVOX@uh zcr>J>W;@$xm*1JmcxSu2)!)wdIwi*?K&!hcV09!9)IhYm3cuKttp>Tz7K>zaJep0$ z!&E5!5xU@P^8$0+fdt8OA1{~AV?m`O>b0<}KHbaz2s{)&SlY|)8$mHU%2MouO*kFi zfXmuK&cA#&(%Wn>yO7ipT#|Wd>s@!A#f-sQg{|3$)&fRbOpDdlZm(eFt%Rk%lWRTin%DJlvkBE|@Qy#e=x zGdI-BV&piyn3KA_LKVI2&}#U_z_x|=5Fsof`z8siq~NZu6duKt_QBE#%oEzP2LB`q zX?Dl@XW}OMzbc4}amMi72#$wyjXYYQ6hZN!fI3Z?=U!>U(N@x{>dUJlF#J%X4(>Yz z_jSm&nhdCIDxxHOAZT!;XCllePt+ja4mA&z_oVqGWEoMcMFw;hq==*!&a8c23O5uPRj zHs0HWQ=!FCZj_leIcU{~t@M$Ak`WbJc<2`GkROIO#V5mb$fA~&W*Y>6qNAxA2M*8( znjG@blFUa}r>U+Ug@@xe)uSs7nDMg_Jml?F*`gnYGx8Wf-oV96vF>m5d^SYhC{Hh~ z+ptH@A*3Yr3ApfiyOa_HC1S*xsh2FUxoi1D4Y)6)5uO+f`{=Sq^#6%}XkgeSM&q9u z7^2ukOx1e}EdriErQU3SRw$mdnF)M z{jqRX%6N8og9?Bn@ggoHX1noP;^vRz=`=V!4sn=SYrU3n~Z{WbnF# zKZ?W(H^7<%B`5U*Ekr~tEmECabTEQXgeW8E9P^5&C?f)j6R9XbHlb6^+`EtvU+mlF zmUO|;A10%7O20vGR-iAIZ?ly0ND|nQ3jtY+04g@GvOIOGhJ;2>={YnxaChpLp1+1r4SX%$W|e?T}JcDGmjL zVjG)3v_r?Eu_grSOR1KWM?s!iP+vMOS?cE8P?bDW=wT13ez5W@$f0GrH8)d|B5GP@isiCL-ThmcaOR+;nP zZo)a1meYPR#hj)2UDbrvYX<>(3I&rS-#YA^BUaXgcQwF&MNTn6e0C;zg?iI}9O~by z=qP_VW1Zc;sXR~uzR-Y2>N3c4q;vNdf>zjx|7wm|$hN%&v9bvMO^u?Z_q|k%w6f!r zleriv4VSipgwHkV%T~xV)0PdXJ*m_D>m#0C_+l%`!X*E+Mw6uQRBfpxQ(7|L{7Gf! z*e1i*Lu`dtgulGvD>~)cY&iwCYSU{NnCpMk7%muur#2~I>mJvYjHT6K0QBcu4e;NF zsJCwx3#v{07#Z^4pRZtr)U7lPa(Iw+D2M+Q)mN|49q9Edvd!ZqFM{^r`K6LjW}o3LH4$h12tYjdWYzhqFG1Vw|ecF@AGCv=P7SvVbUh1ue# zh%Q9o|E&p=c`KX)=V~iJQiqwNx@njmQ7vx4srb#cY7y~hZD;V46-s$C0*6tr9FDsS zZ48U|GCw49k0J=u*t^x6^}46WlXJaUyo4h1>mR&bv*jqFo(CUmz?*Hoz*aP3K&YG{ zYzJb+{??k~TRD2epET{WtSw6_;EDnD=Wbw0&sw(C*ep(wpa$8x2&uq6kbI^IZ>h&y z%I4*kD{K|Oy3QQbt3H6dh(#@*{Ncbie7+&O|0&)6iBDl-HLanKZ=J(gLAa>Tx(L^h z&ozbVtHOi-RezPP^IJy;8SF*tF{A1Bc-rr#*8~=NgkNpg@4D77UY09zi6#8o#P$lr zxoMz?V4P@Dv6}BX=N`!}U4);ifQ4BKQIVHrmyHE*g5*shvWCnE;L}Yw!#byKWw4TL z%2AlfyOO)&LWs$}wXMX8EVrJe1^VOgX#8-c0^M?OtS{S)x(=Ue!eCCSQ;l>CMzYBpJqeekneN|{T&rV~Nu;w?RfJ693T_dJJ=iNZGObPv zd)uS|?jd+Ee!wHZ5z&?PVeQHA(WdMovEYj1(voK-R0re~CR`aro1rJcw;oH;qn)<|+)YcFvxx--6WtJd|wzpijRt$dbcK~`z% zj)66)z%ncL84w-CYJHTIE!X9in@fP!2j`l{Bi2`81G|~1=!k|!iOGLH z$LgBrMVN)3_RM`av{Q!{HScc-z|-k^PZvLkI(%@MFClWD-A6{F(u z?#TqMMHdLtlL=!m)tqNfCX=RjPbSP(Wuc*ZG8qq^J(-k)d1NFRFZK@X+mlJzHl!r4 zo=g~!wQIkgOdu>erDit(laoUCWWuBvZoPZ5Bkgh}6Q(BNC*wsYx?G+I{dzKf2yF76J(-Lodau~GC*#Sg zye3=tq7gid^KkP9}`O zRCAs^nM|7AJ()0Dm4${%NHQKgdon2pOh%#`uR|vq887w@?Awz`*{-uE69#1M+OH=Q z2#Zcx1dPyqmoPBHk**K)?#Vi}^<)A=DrYwLWF4+2qaMU=u zY;`U@*+)G0F3JSS^kgqJ?9dF*ky#47FyZR$m!zBX?#Ot-i4GT}Bjbm_HJ|4ynT%wB zj*KU(@|kc6u-wLYF15%Qm{0yIvtpnBd^<83*M{`x)sgYzktSwNstetb$(b>BWIs{i zS{%u)!7Sh^`!Tz4?bVI(Y^z+oh}O0X%oM5w!{p+vUYcQ6*nDxyAcCs({bXXnx zyAAs*_+-<`i!E#oiVXCC&#PWanI`vokuATk1i#d@KSa(_7Uf9BB%r`{kUZ=q5UnN) z8|%@Riynjr;^V$A7fE7?eeQX`-wS`dX{o{pw?U(p=uoTTtlk(qLzVg%JQ6?TtyIpI zn0^dC>6v-Tl?vUNXnkCuv+MF#qRYszyAlJefHliaLj>>jd+k8dv@*Tcb!_$C>-m-AZ%tQRu`gr5wx$R_+3mQ-j{r?xKZX ziQq-rDaUZKHyQL!4f`iMRwUD~Jw4l-^tZaB-no9|BqD13N4B+osdOnNhrizb1N)`K(?h#_2HSb_X4*g0%C*V04B%oy(i; zYhXVzi~%}vvfs_wMJHT65B7J*JG1Sb8UE!oUW8N(zZAhCtt#7-J~~0OxUxtuh9VsL zYELy`wYzk5?!sIHtrUjDr=EQJ2fcj?)b5f`nRAzUdxDjJYTSj0-OgR|PpjO85PSBy z3z@l{yX2p)-(9{ey?UATcs?WGaxL#bZrkdeYu}Bt&){t+6QO_ODak-DA{pqKjTmL1 zWmGR`8LhR5?n_^2-9j@_8xF_U8!{0)E^m?QmjiJA{8j|7Vfn$am;H@_L(cm=W?jlR zSCf1LYlU#QKxE)ek`>{b4Y-44t}?rA!BPE$C%Yn+ot{DVW$P+j!b27E9B3hJAuGdo zBG_QrsV;3H9{5Q+h;R&IfIo;patk*r1%pz%t7Wy4pi#;EYl|@O#}T}i<*2e=QTM~v zxRRvpVZPxb34EskcX~1`1`b~V%C4?ur|4j1+QQmHAx`lG{%7PFs*xiJ;lo$va|s~7 z6M^I&a+7M)_Oq?8IG#@`j;l8Iql#lOMd5-f!uYdW-vREDZbZ}HB-a41_R_7$37CW+*LdEX)M!i>7TPETTI z4zsulU@+S1Kh>RGNY^A*nqEc;f9lfG^7>GxCKFSV{@2UHO0}whrDdXoKXYknd6vaQ zRlULD={YpDJON3vqI*ERL@G~t7K#;XSafY?bp%piQb=3AQZEvhmxv6)NYOya(Z-d+ z29iSwP`{*8*zZTOt)2cnUm8j5$D44R<;AM0=WSW0(HN((c^FIbP!02M<5WfiPuNtY z*%E={`hSn$HgB%DWF|`=Y#C=f_dtfkc*Pnq!@-|7;Pu{2i|Nc?j7c5-5hz{-?XrLMgI6fbfcU&C^^)(l;hUJV4BBIN- zL;%%#VzOxBE)V#I3tYppDTa;eFak@>0j^;mFI!XSWu1Jq>L?I=+Xb-Um=l78pQMWb z;ch(|0AoD`FsiQwv;3m(OZDz05*)tn*f7(uSIZhOvCVM72EOgsCNqqR0m7+aW9YfH z$qa$ARR!?{c}1Z~F_O@fwuw#+GRvM2CVb@`L^v8Tr0+aBV7v6RH~G5n92;XY6E=m{ zf$frU6!<4@yhwlyi0zVbS^h#}yJQRnZ+TN(znQpTyN0>v4Y8q;F1##E!%QaSi0zWc zGktKCEVuu_#8e<`m&^uL&BDHXS#$1)?UF&T2?{$M9fBxF&n0Pb1SCmIo^UKwBA~K` z>A9d1WR~ED?UKi_)csQzM=uO#M$E|#+vR2|t938w6y}EQnt84Z+a)90B{4T_myD6w zvP`4y*shVZXe=(+u6dA)vP?;)1#Z|bc|6z3vn`pqVY>p;Vh`qkFA5Z0uw8+KB&DHx zp_b&NfNKo765Oy|kxe0~`OAJzY0VAW?OX(G7mvw@c3sEmhV3$(aX~~<W^@1PBi)==gQRb{T%9 z)lvR>df+1!FB}_ zmwrlf!*)eBg{0;$`#GgGH*ELFMZk9Pm~8TjwA&8duw8~TE{Fuec5@~~2*!OEup_q1 zuqi?}4u~T|%>~#^Ms4j#gx9QW;V@(~Yc z@s+nOO9tWD4ck3>$(+eRX2RAM`o`GpOa?MeRe4KW$i*buv&$w0_w`ruL(mm}xY4wKnfw!>~`GLS*A2@1`yLlEWYxg;%) zK-u3VHD(cxU9e0tp5rDbw|%>0P%L$!!u6GUAbA+9+!5R5IG5GBVr(L{Zw=TknF|}@ z^(o8^+a=><+LrQsQL$YzJx6SpJg(%<9osdM7LCOP+cghzaZ{NawkvSOB{K;u;}tne zz>Ms@64uF#hdwxBy8_c<59WX`3KU(iU4g`2RCe&3^XGD-R=Z@GeFK;1C_{zJe6g-5ZNqK2tJXz&LG43`v?V-CQ zT5#Pkz5*yc!7a)U5&T8Pb2lUdX5FJYf*58XD>D3L zP^vW~eN6=KY{Cx&QMLfSRXWK;CDEiNJCt~O;Z^>Um)9Z&BY<}_;O-!r5aLY`O&Hpj zD4_V#Ghrlm=dz;V&-+$ckFH+1v?9>|u6T~1nqw{oSw(Dmr8`(kZV z>Fy#m*`zjmyd)#hyBlG00&+kh76sBrz-VI%Z0WUTC)n6yI_j6RB3k8^Yg|$bNjfJ| z2l?KGY6)^g>9uB+uB(&t$i*Jpv1Jj(m+w;C6Ht~2zUz}~3#AG*PXHMvwb+_A%J@oK zUhSwGj0jM=yDNZ839%)B^ozLSD-<33U-jv)sPXBl8*AsQFZiVIr@2?243}I~5gnN? zUNl0VN0@;h^68}*mc=S2h#Wn7DRye5*HbUC*z)!uk-c2G1ku5K;>$pGLw)u(^|0TY zjL^;T-3hwoofIHku&{p_3!eG;S3?h8SPtabP^bE{-m2#ir%#`Ltzn3c=Md>d?XU?i zR&deZo(_iN5gDX>u;!DWy}rW-A%z6t6T9G;Vw9cjEu@bw`&YihWpBA|-?qIyypYN` z-b24lUS{`jQhkkr-Ljcf$1d5&yQ|3|cAmJD&MXBn|0KF7JWc7U&#*V8Coe72DO-4= zHBRkcJBh(0l2hdwoSF2t2K~`&+GhVIQ{_Fq**!bMZu&rbwzEC#w>QV5*k=7sKRIkYYm5yS!f&2`a=VPE=^@2EeWp^Mqi$BmaSXxborUHbcX!5dhvuFX{C$ZGg(h%lB>@?vz6Q}V{f(_NDsGdZ5i!WW?<^#Y+Y;T zt{1Pur+2}9UKWdr0J54Pv)Y%l#3fcsA9skF^x14-A#vD#Qp_D?isw||-D2gA!;rNF zBe@0$y)N6$h(0O?UwpCAFZ12IS7av<)vvu38-u?@s@D~LI7>Jq2pHn966^J`++dv$$9(L&akh7(EG(aFOj=+%F zudyN~kGlk7@~m?p<_J8etk|m}K4Ke)Wky^*gcbmXOFSn!3=oLP``0oM!1E`gW~>llcc$DdOs>=lTQS_fjC4ZBphSRGOe);Z4k$j%j=%%O5*o=J?8htZHY1ZrP5RCdRGw3oJt#}W1~=wZWC_VCWc-mBs4oS@~+rJf)BX*sSH@D3}T4CeJAYe+#Ps`-KtC zz&9*c_;m5xcki6cd(=TZf}h(1NtyM`w$JSh<0e{Zw_3o}?-n@mxMB?B@;4`7-hYyl zVW^VGPy8~u$C)nPYa&eZUQcqR`FRqg3rm#YH~P&G5ez;@0-6S!BSEu^EkKE{90{Hf zY>ot>2{=bmY6qJmX?Th`7O0eO@KSC6`c?!lQUnlT7rEqP%oeFc6Y9coA>Y%j;7wAx zz(3ui+!=^sa(7i5mARc>x~SYqE=T17+9`IvH<`Ap@5l-s@x)yQ$>pf*qL}XGsGMzZ z4HwT8kt=;k;d``E8AYlk9G??KT+V})ayMxf?!s~69hskAhUdvyju(%O@7+#btg57P zIlM&wchpi{4$7B<@+ey#Xd9a&vKvHbID8YwqaKu z$ZbBcwt3^o`Ccb8MPl=2irD7O6p787DPo&9Q=~SZz|z+2!ZtG6t6+a;U&Q;h+Y`yOWulT@a=TQ8!gE?o@=;w!ULm$DDy17uaf`|FGFg6w1W zqt}VX6Gi+!bOqY?c^*2cbG+{%deO&ui`CWP4doW$g{DW zj`@!OnJUL_hk_bbC2k^e*(ucN`g)#%6SW>C`|%_AlEByz%9yG0$gA=aYey6lQ}1-m z-j6L~;EdWGHnil^ga0frc+{FfQd)_+J6h<@VO}{^g^((0?+*p$UPYO+uLni;UZvH9 z%l0^upA|f-4FkWsCu;CU%B0CeNhvF`c%z>NaZ{+^##4hgQRa*qr1iJR-c1F2F6tt5 z6$~2GC9$5858h0fG-@z!9YhvymJCyH?c$brAO+7FrGn^#w@~Jc8l?S>$lfiAc~qBu zAru!=AQOcDLP|A~?pG+6Miu70+5s`quPB(*2s^me@!S9%jgQMM!d|gxVN@b%F_RL5 zs|p6^twl0J_(BBO(k8gm?DmGke9a>?WE{2WLjft85=4%f9%rS85(B zP|o~Fs^kXar+Mne&*udNp}%6Hk3L^@1hNkY^J^Y~WDqhZGY>fWlFyq*Ac2WIVIBc{ zI{CbL1mZO0iSOm)inj_dyGl$SBRev32m5{rH8oZ)u~@YAk_`EV0JM-%wV0mUgtxkn2QS;hAATOyBuc2knx%idKfPoGSYFF4@;()j~gL| zH6o$At4j#Yf+1AQ$w6m%6MS8`_gc#O*WK@J{ZcZ*TBJtbPju)X$J9=Ho^vkR27NlC`5ljpY{{`a9Ra)mg6Wof`Jj zU3P=?n`!**tKKtb+fUd*`}hgFt+@Yge#M=k&ov* zt(kf9R1pd7HYy^YCCnIAL?5tK5q*HQIY$-YG-{(Fl5p+f0ZH?=D$1#|998ool38q2 zL_TY+BKm->is%C@LyjuKS=>fNB;Q-ADF3*vit>-M992^hNo+PMBA>NZ5q-c`Mf3rd zAx9PABEm*Rq};GnQT}mT73CjiIVx22%?LEHh-->zLOQArt&#_bvMN56bNBt##$uV2 zs0}T1XwnEb`8-K=EXU7sN#40sdM4+SJG0n?rLkO6RUUd7EN5k4cf1`Za8R{@`p|cd z!JeW@{OYmzE5|NZiF7YCFZ;_?Vt<&Oca6&T%T?lVfV{0uJubb+*jUJuy38iOk`z80$>g=u}UcVnW#R{0B-Tg%F%ebnMJ+6 zvYCrSh`0h!$^|Al8jzi(Br=ueBompF10u@QYMnA2OMr-~x&v}loE?y(A`_9T15yz= zW;%9a=zHf(C(!Y6K!zo0ICm2jMJw{k$sxvYBs@gL({UFSPe*1|Zn27^>l&*BB1Bvc zu}Y-m9;-x3GLgB(DzbKoD=CL4!w?{%@}%M(tJt~7MC9s#jAOeM3(Um4Viixv#{n6Z z0>&x|+quUojs&qNhnUCHaTgU&M`l%Sv5KSX8mj~%L|hKBN~Gi-t3*mNk-5bxvWV{- zs~7@AR1UFgmiPshgrO|j||4X89fZWV+Ft$u(-I80!UiVhK47-G5P-V|hJGE4il z22=_dDz-C&yC=jni+Vm3xm<`32b!Hsj2~`5rC`E|k(xN_h3_l6FvPr_mz|kJe23OxX*ixv(OVfSa09%utfX`O-l#i$>rQXd V-|F@!lkudTFNwbIdEVac+1c5DS1aw6gqF+0WP&lYSoKct>NOms(MU+hMh~>B6?!(kGrg;p zdb@}2o|Tq7j1vb4u@g?5R^2@A_}OF8kc_XFk6N-U?T3jfaDc^WAg3 z$+SH_HSJH%^=53pAIoC1m6X)I1dFTLBk+j`US~I?j;phSQS8stm4v7L>#!8XyIKqX z{Q94KhUFz{y`aR)0yI{4h2o?4bX|PhWXHz`8*q=mKJLqm?tW}@@g>}kNhmAk8G30+ zG5Y~lm(nnT{;jnD7N-6FX52WmtF;HB6DSZheH;JaB?upGgs|f2_NlmeeEU=q z!XtW52O%>UsZ5?oG4hZid@_jO&TbFm=3U#vB(&*<5FyfP2vM~R1`fa6fLpvhH*Vgt zxfz0Mq>>+V7(B z76hf!1E8qYsQ3jj6Ss#~~8D)bt!M}^(YDNaJV~%<`>-K7HT?zY8(C&59hP5|~ z7vqJyU;n_Zwx~g&T6l*=7&*}!JsvNl|DV`0U)YW%kg*2OMsTM=wB41e?fYwJh+ctq zeAp&L(?JvseA>LH|Jd-RZ{Sg=HZ^o~P%hgDzy#C6d+}9n;-CHCqn1s4FkXr_K0Gg$ zjr>O&@Wv1p$Mx=FD<96LiBC0KUJ_%wa@K|;O&75y;sfwZBaBVchCYmkpq4%iA=Fgm z5J==FZWS8#nLq-hjeU4i4kJpgx%aYMbw!`qC;VCiZVlkixWNx&MySOPLnxR%nuPd; z2wrI`M6S0Gnrj>~xHS#K&}qFTcK=@XB6cY7kp{GFc@vv`YoR0|Y*5>LcnL+`fEZMh zcAF-Uq$pv8M#Nfjfg-MnmybtqgV_qQev@4#;#z+PR0_!_r5U98|1EQ0l@G_cDCdou zo>S(D9vq8)*b*y!asnHX~%4SDf2=;*4$(1DRhse6jFvgA|Y_b+WTZ3T&vI zwLuE32%*kQ(c=?ox*-T;aIirN$cJ?Tv_}eTsBxxjZgOo%rbvOIcn>5}8>GNu#+FEd z1*LYn8Ni4XSP??@I3FR}AO&Q`TpJRo&nL7|JE33auMS06Aq8Z7u_ZAUbL2rYQb0bb zacgaV;(`>AF)MElz#yCrQb2|fmn^L%7bXmt`~+-}f=<@ea|-fC3NX#NAq99i&PC^q z6yzM2JlP2;$W6&w))Poe{TvSC0@;TA95f8p}SjQh& zAO)SafD{-tywbMA3b^=G#2VWMl?MjAu!4>QRzT!p+%eH*Y@pzB9#}yqQ$)U}z=#!4 zAvo-Bu-R2`bRJj%8Dy?l0U4A_PwHI-T+~-JH7+#;nTNayeQyUH@K6X50TtP=wX40xQnQYXYIH%G{gTObcz`DdMb` zh!xIhg%w!Ml)Nrr+*M#hjWgviqO2V&AbeTvDj*-$__HNeU_r?aE3hI|H&#F<0XA3x z`LMbXG2t44fCVxUJ7B>ID6Tt(k zT?M&Hs$vEBF*8<>ldm5u$Ymh+LyZosb`|7Y2p(u`xAQr9VFfu29S1g8!O_ZC0d7$f z_OS#0tnjI$pR2t1?z#Gr~^pDD_KWDwQv1S_n75YY+HeqVtNHO`dHSb+`66f2-bh)bJ81~Ih~ zR#*WcrV*(nR$xKN4lA%C6wDq?U7-zDKxWJ}B9eB(OWR^Y(ei%9!3rxNyj8{u$b)9A zfP7M8gTb)^GG>kY3dj(Z_Z5%_=M*Mv_7xm6U@$!2K@BI5eZHXU*<*EP?7`-XaKn$QWDeE_-r^KBMrYo)o6XArwW>0*t89+8j z1@yfGzLe4T91(l;A^fLCK(nXlLO@SKuPy|n#`bmoo~hcH@YRq)rHDgKCLd6|9QNQZ zUV|OO?=|4{p~3)0Ap%+&dc`3iHDtF2X^_wo`G*$xiuMI~Npj`d9)HD`Iv5ESAsm}* zl1{nBf=@NzST*wznjy_{nM6iOFk4kLGZrFH9;@iBGx&T2oodn)x~6K<3~25q&w}5K z;I)>l=?_x4i59WPpc5adQpBoGQ;4b^r5L5_ewx>+IlE2APk zmtul|YDDRw%eEGnsY&qWDpE=O?4dATU#V2x0+Fdkhyv+3e@Cj=x8S%{1Q(8TjrqYh z?DJREw<>Vco!A;$-VUb{v)#|1si?q8RBu#JNv7HQ8cX%9i>Ec(2HpHXhRS9_XNgvh zeGYMq;8%qcY>|WpqxRW;Z`7T>Wji}Yxj&hVC+*|-r`ranPxmMN(MCT$So&2`QD6yo zu>`*1!Cs#P=YzLL{E^&B9(q%BZN{v7YBKI^ZuF+J^mys^Q~kl2$7l3F-g0leYuDRu z<~u=EgYqun6$Gb=@OMWrxuMxQR62Zr5f)a{6Rr=ELl&0S7t@pY<6ZF12ySQX*KIPB z-snkkR&h)Q*}J|64YF%B5)QCb$psyg>@k(>0*omU%klIk6aGA2C1)iA)tpY#D17Pw zERE5fDfvAr&GdXz?y3OgcvAbyr>_@egEg?Ga=HCcxZho+$lhmz4Rn!7Z`hl3Hy-bA zpj%D0H#Yj{Vv>cs$MR(4>O6TBrzqvftn8ZVt{)vvZ!&`%EVWn@?w#myk50bV_uo0e3Q( z(f~%=XS-XI{uCW#-+moRr^EL7Icln!{~+id^=EbtQQ|A!ov~NdFZkunDq>XWhQ{=? zkA97n1F_6xFX4BGgD3lS>q!>E_~W6K>Vw~pfV)o0N^s**f0}XD&Jt?f{FUnyHneLh z8~Cq&+|4B(%Cu)&Gkzz6)yjrRwHGhkJ{S%YZ~c2w$Ye}it#ui|V6@qPnifzh?}Q&~ z!1a}RKi(hqH_&nQ_^V|l$a*!7O+L{e@z7WyCsnolwg&VoONI6{-MpV^ON&wR$80NZ z-A?b`yZqk>HtQAvQ;^T6P*Ui-C26bRUp49zK)`;gHo>W3C{3UC7ZGgKWi{n4t~TDw z_F&l^q9y@dR#My&YE=kW1`cEMSeH29$D43p0Iq1tBn6KbkWd%5$;_^iX7ic92Hjwh zKWYS4%sgtf9TKolh4^t{+k>N9QwPXv#u}U|2}{#hS6c40i{NiHVBJ54;L9&vD^`Br zg0B*bGSXTI=;Z9`{ro(9Qn81Sz(jnfsHqI#+ro;QjgT>}6u>$hIuUH(s=Ck$m4bgC z!K_9sN1R_2v;DIS}SV>5#j@1q+w8zo!Kr5Itw7s`>98i{j zZ1JUEwzaUFn|M$DO~S2f+Bl;nVo1eSL_viUrHcS|nx7LO>!XMd**ZwZ3jtxPE1{CW zl&(T>bNKCu-w>uW1X`hx5mITtHQ=CNFYI8oM=ubfaDMApHoNvb9}Bt{Js9-yk&-`#*mLP~13y@hu9 zotccc(FTwHR=(FMIW7TOebtfNUjxzZ3jAVIwi@IN=MXdVOf3pKET89VEjPoeE?s-$Ly$l9zM{7)8P%ctS#jH zg}!PDF3G$!c7|%~QFu6huqKUlSCl&YhY{T4tus`4;S|emsqv_W{!zr=E$(Eu{G=%U z=P|7>G@u(!R_kOl_8oaKdCTO?G`>>$>m;;}67Fc~Ao6W>N}IG_1}|N`B;L35m*l@l z_>qLJCZOaOa7uZbSM+<5O%?7@U6nLGhu0I!&49UTMS8R?@5H!aRS!ojsd?2v@Wic5?ADmG_ ztErA^k47oGtnwobcqp7+#TFR9#+BUX-)GaVfn@g+;b|gZg8%vSGq2VJTFlQ>j zOtxjGE%a->75ft*RFB?r=aw6=>g~38m(DOKm=I3C&{+rs$vr$D7t9I;otCtFu5X2c zj*Io_?-D#gR5#{%sp+y*0dEh1IMFGgZ}3|cf#ZqgMq-~MIetB;AW)FO>k|Gb5-Z#Q zYZ8>4)DN@}5wWyLb#l?62tF2~jG%MOE1sf^2q;dZq5#>1P8DYp+mum=lIsf61w((B zjLs?j2EAEAr&**|~TB($IdK)h6QQs-nLUd3+qLy^{l z!+^YWmbrT(fzoC@pinba1ps;JbTbz%5@uw%bQlEWrIQdb!Ehii9fI`00YF}nCIWds z90KH}bXDMCP>`37MkEseIHWX1g%%;M7Lb>YoAY9Q`dFB4hje(A;!rRswz2s`J9Io6 zYeJyDl&V^JwV=LqT*{3SqeqALJ?ak~iKl!@I!x|0p;3SK)Odmp65JSX_1oMz$N8zK zg?U%$EI$wh*ko3!P8e>G+2vq~>=054-Bp?M-)_R$x-=lo@2V!WUONcTQ|J}r(w7n| zYr;Dl;J+fLm>@nole|K`;Xe-bZ&h@Zznrng!nEp>;XdDhhwC!Pv!rwP7lKyUiT`Sj zS;*Agt(%O0Q=@3Rwt?W4EWG+TpU4EZw)R(Q0X{Ie3QhQRT`PWB0z3|1>5=r2T z;7@BbNeWNZmRd5UCG*XnRCa27O5YuSEyPxMMfl4rzM@mE&6ZPOt2Vucfw}%?jp2ep zcxsaZw(fCV$yiz)20(wl*#Q4-h8ZjVL&JeZ(v0{Hq z&GD@qz2Q%q_F2}JB^7YRfckScu%u@#+iGkUr$`hwd_m`oPc`9*dc37Tx(L^h&ozbVtHOiRRex1s z#x-=x!9KJ(bu`@?Py1cPoqrMhTEl+VwTAJsT#-vG=6C$PLaiKh<_el!eg)#(G*Cn^ ziyLlvAqsaXo=Wdbf}gB_g;@$wk(XtcjRkOmR-2Ex{_?lQJBfQ zlDpzUh{?XSt;C8fw?0k_^vB?l_@PP#y5-#LT?cZqGdpI2BY zlwgTQuEC#GICxMlA+8c&3OK(1w0_e^K1eEZUJa^Can)_*8Wj)x1LxM8*agS6=nQ#l z`G8}pk&eMgHhH5b;j%Q-{do555fcS z{T>01h_0*;`&9LjrtBiI;ELnYl=Z=q7IJ3odQJ))Pz843(fHv23gj$@;Yicyt)*v_ zc=|xO45gjOJe)Z>%+^S9U9*?C*VWnm>k7xy%4b;?WR<4wXghV_WF8_nR6LSiSk}hT zSO)K^a8!v|bc#Iao36^*sc^8XdDc1k75GqvJ?v((^`}6ITB=C|hElL=faa*htW$WF!OhWIS1w*Mv)f zK%zt`iq1QSYPgJG0aO-6Jc%+S~S9PXnLEG)nJ)4|a z@16{YT=$+V57}Nl8PA%^ZVG3xbsz7ZY-JujnM?x(uRG9_30y03k>5I*Fa}fUdG=&7 zX?pi$!faI*8Y&^lc<}7Wq#Q6AiH5DviAKiD{CYAe+jaJ2!hoz@`}Jf3VbLjzfDyVU z69#5DsB(Mh*r6Gq zBeN7Z&wTbaSz*Xcf5u9MNjs5Gd*=sPCy_ogQ19q}6_;l#R{%*tm3O?C%@?r~H zgJ}492D;zpRWGGXlY6|#mfu%`UuxPPB4;U!awKCCP+&Vq9`X{1R+EK|_2`R755WEL zao-n@B(cOk_q@;Vg+Jc3RAGbz18a#6^HJOxs<=nt;rKys#c}px+jD)~GxL-y6}mIg z`nW)6*X6H7myz+=mDuFF3*{Lg@AZ4_K+?1_z1CH1_8Xs?_9y3hv%z>|e*=t6ES#yR z{HzEu>Q1VOt|_#9KMa1j0dHctshZ1KQ&S{1zYN3?~EP z3zgH@v2uw#=#|r0f}##>(@e84%+R@yogV%ke!=hkJf$n*&+EI~XgM=0NsTJ2 z3j>xiqG_YDI0WyH;OcUOIn|%_R?wlNPxUA5!DuiW^oHGYz2P>x^Yqs7XgX`%L=*8b z=)^}#iI|U|+)X8?2ED1|qJ`6^Pv2~J(Za7r@DlBmV>sEH40@-A{gWNNClt>!bxgHY-c2?}SOs9i0qj(X2 zP4pb|S-nh*(_!lhIDnKn*zC>v-JBH!qpI#rMzh~WCuSiPKRFnk;SpC)Gk{-;U|Flm z_N0$a&@8Sj(u+aK1NAxI39H?uqjQ&iuzPcM9$gvSIHDG`=iDSd_0%&zcr&QoC7&|q zE@-CS)*amEH10ygZs#ufr&aDki1F@HqXSfPMUA_VncKNb{^|PNBy0nFO;3w^(iE!u1Sdjif1d?00St%Hl+FdED zl>`lsdF$flJlBY>j&XMTnLxgu%BPaJwhNV&L!4+Z-=i(u;yVzVmR=TZ~&2Ln1|0fH6GNK zbTx#t3PXf}KXht07{0hbVC$L^1gqsN24lp4KXPg^7$(I~;86B5)Lfbj=J9eh8o>60 zgoAH5wHGXFLVe&T=Kx~Sg#@%OMzXxhhC@lohXR2Xjc8|JS+_Us&d|LA;|XarUPEhz z6==tYZE6KN6c0HxvwESiytIbkhV&m>Gc+}ugB$RLNOseLFUQ#eKoCB!Bz4jLbwG8| zu9l=GQlQfCqfLi4k&0MV=`Gwo7!GCY#J)lk%p{Q^Xo@FsnRd29(dJau3 zPe7Nfyi_7jSZbgIt4LDAqH8;=Bai}$V9Si}6=JDbY>M+CQc zbHyby3E<*oPT}(#e5V1g_hwp5XZ~VL>hO=sIISuf6&;I@I^nyK13F#`142^smwg!S zOvw?Rb%AOS_p}c!r!>-?aDiRZ8@eo!<}+xiQ62cI3s}Q)#sv{;W+c(ZVcm=HSBf(;`V+;supasg~O=7b>OCut4%z?@|7 z!7lpJ{G#tm_3i}{9KPn*Fw?LntN|PSl<#*9){F28cE6%WfP`;3w#f{mVt{aJ z*cf_lZ8AfkY*muwmJlvqJ@%w+qEmy+vL}QIUwH?SgvMgicOD(EU3%J^eBC!>V@!tH zVQiO7LxF$d#)||pD(1Y?tB8R1tgdSBeTE zWNXi+8@9`^DTWOP#F3%qg6%SoR}2>(Dv;N|I)ie;W20RdXBxvMq9eA;$w%D8?67J+h}-LPGHyJG#=E*WZvv0X9^1^(%e?UDg@0NW))D|mCq zb`69}EwRJCUDFV_AvSf6URwys6v_>=?xE*ID}Ad=IzG&}HB9e;O1%k<^U5YBs z*pTDCU4~6DY&al}3^f;Qmw8+gE-Hht!C@ObKgWH$EOSDT@RM{AAUq6FcWl={ zxYQCmV7sOv0yM;A2#1E)?MwzTV5Sc)RdHpxp*@gf6elQ#%*GA~cEoncAlL-`v0aXy zOVZ*9NRpQB{W6!PkBm20Y?lm*rS6})IC@2m>m?6^l{;d)+)QP)t{9t0?d!$M&SJY} zo;hsSi_`h<+ck3Mj_n#ri^k%D?V1PK*;M9+?Fw9R$;`{ZN}ZQ%-z{G>86t@!UpnsF z6_^%#Fb8~5py-0_3M3>cO%B#rNKH88GECriJd;6WQ%Gw5vNtV)1mfNe+da19*e;$y zcx;#9j0+-MMXW)@B;M_WUFZCj!jA|7J7T*Gn_}2-KpYurF4!*fxOKQNs6ZJiTtGNH zKSykrWljhZev&Q%fICk**vApuW%!lVyQIGLnDfqIyBu%Njqu=hCIiQ)7$6+5Mvk6a zo283dkyOP%>)dG#mJ!KLqScq85&U_@b2lXODMN0O#bbZ2OXY*4dx@%aFA=<>2|o}- z*#h`h=_C`CM3b8AFsC@^Nc|Mr{Xz+YL2-oCVaS%wL{*bOqZW3UE85%U#x8^-Cd+6o785H zPPlzkZ~TIHHNxZsI40qN5%ny+hq@ zhz)qDf{XsvbTAx`G=qHl>W}t`KQoD;b}@&eTKa$J$ZSVPT9f}t#NAq z+DQy1k(?^e;LN1AIp~jO(>D7znJVw<&F6zr8UY%_ievceB6MM;ADa zHqLif%7JoM5kVY~@mv?J81|>=*Qh_8p^Mqi#f?|)(kxGBm!`w}cEcN3uCVShnOmAK z<47<&-HCSNdq;b7`fE-7{HFWs)n;J+qbEEn{!C>qrl`Y;GFumM-5o-!&;SQFTeN zCh-*b12IS7Vj%9fvu38-*{MKG^dU84{nF%GfIv)+ zbdwpfF%aX2T>>!~)-ez>k3X+W*eeis+B+49u{xv{TyG#Qj=+%FudyN~kGlk7@~m?p z<_J8mtk|m}K583?Wi6Q@UP{`s0>E&oqKOUz1Y+|3wak!>ffzsR5{SvLj)9nY{CQ=< zUV-@Njs;?@jvWZZ#rrBU`!xn)^0-SNCeJztVvfM`%8I=L@v-MpASOBt5Qxdp)BqVuxjnE|z`Os4FQ(;A^|#{xTM0x2nNyg3dW&*j%^UA=VU=?7$Fv1zQEutZ72G!7mWWzE8MSJd6 zgZJ!(q|AC|+h@0jaTBeyTP@(~cMF_&OfiOW`I{3k??1`OFjPt8CpA=Zmor_w*F>1+ zJ)Y!B^YbJ~7nUf)Z}giXA{cy*1T+mcM*>|JTYwT@ITAb}*c=H$6L5~C)DAXB(!3ON zEKn)WVAN9Wl>24`FHr;#VHdgNW6Tz*L=);H6>d1jNeJ-F61+gpa=dW&_@1rgrK(CQm&42Se@89V#h`pK zD37w`fwr+ZBD+C^hQl{;Tr8Ed2+?q!zW{ch>kYU2@g?i~miENE z)_0#BZ}x}ms}XyYwee>c;7~^LIHG&9)1LK5)A6J`>YeRR$*UL4n_qi0#~kf%y<>q) z|d_}PpwyjY}a4T22>vUD%$CF(_%*6ff_%TH^1w~RX)WxD$_maNTtQ@#;`Y?#+Spv_|oi9rtE|G09jQD{j~*n zQTDO>(Cb9wi6VY4UxN01o)jC+z>(F)p{U9~Z9w^Ql&3BM zn5>j`1ksQ%ipe>@;a63Ul6U+f_@cnrQOcO9^2m$r5^G1bs&Lq?k`Gkb zF7Ru&|5;%0m^Fi>v=Vi9OfeI+2gxK~7=VLF@~Oce3d~(gnX|74MfR@M>d$4*G0D#g zx#>N74MYuIN0~I4C@Ez{7O(RYe*W@65dRZ3cs*s#s6kqPi|kz=T!Un&KpMP(GHKLc z-a3dZ-k@dC#iQ^*3I;W$DDN0-vOU6=gZBQ(H zJ_2lM6WnQbd&6PAW|6nFSiQ*s+~5*rCztX+9}$1CB0SL%em*Mvd`$THTH)vGgrBb$ ze!fBY`9^s6<*+aq4Q4qzN4X^y9)*$YCYnwMXGY1jwhxhIU;NmsHIEhGWqu@8a*grR zJayye^ICw=KVqVfK3{bNvJVIIYaW553o<7&4>XXZhTm=!a(yWQu;P%CD4 zYbAQD_c4=toez;>E-qvkrij4raF`84#%n(4VZ3a}NXKD5ESX|HZiE=th=lH}E+I4v zhEOpl2c6|j1YV8Joy3JT5=4=xi}|<_Vp#K$Q4yxOIl0J)#BWbKV#xT(sK^h@j9l!9 zBUH@EMMfk8F*9;eKSQRNj~k&tc2uNF){d4NOE0?l@BG|*KKaVVuJpg3|Lb4azjyuV zJD#}q7oNT4`+wzkmf(l4jK6pVtU?o=h!rnF^z?FcBK>+EeNE8Ux1z7hF9P(-g>>m= zHCwqkaCy{NSlr#*yA{-W$m$9Rz z+@~1?bH9iOMn70^e;D?mN%Xp>$CIjq)LBE-#zsbHyf3tigm4DJW(p+7Hg-)Zn7jA&v+0tNgDG<@^&Q>Xx(ZSq!H^?I1 z%CUS2;J;v3fOXjYy0>iih9$SzjojuCEPoFjaP@yliDXg&1F^y{hg3FNQHMmD!M(&tknN7egi zx-~{8IQFRZ@8bV(iuuC}3SsVpy`4R&Fq=y;ycW_u*d^xlk?UXEUH1I@epXUOAe zz)J%kng;RJ)%^gSrED}5?EqO#y1fu?g?;x9dmH`7$HUG3q-;C9sVrXsyRo0+FUdl> z%!S zg2m3lExa-#k6 z;)(39Dsw?`F#iPhD9&=nP^?2D!7Of}NIH1>&>oa5_On~7&~L6lh>EZ+=FpzxAnd`P zn|AVT2PmeK4G1tCOlPl5-RxDbjE+R9G=`AxZgCk6#lmW%-Hb--5BnRl_!8KQHa%<( zW`ptQ5TqJNT_pY5PwhbEYWKqK<0;deUm^p|!~aqmnN$AQXz^m8zm`b+4mfxm#j{C& z^R~h1)BOoLzO}D343&qZPKBz80AkWpX~Ryx@jb3LWj8sI0P9riA)Zozw}0r6JBV5D3Zx4z*1*1 z_d7 zDZt6zWY9Y`L|a@?jLGU3oYm5L=>Q+6gp7y6Vrr_h-e8pGle}1H#JiSXh}PO4Pc0|g z?#TWLc~)z2<(iewHAgxxJcqwIJlW_RK6>~}@4C$!PM_}ekK7o4=JR{tt#B0!zl={! z`;&9MROvArY}vgQQS19k8wpUMw|X$%xAd3fzX@z)1mA@K!M!d<}PrthqB;A4t<2`Agm&;r1f z7%IZZ@>$bEQ0d^fkV6FF!;KJDJVp68O~OeCkLW!egv?+>q!sx{OSOgblR*TBQY?O} zGqmZ35FyfP5D^*-9Dcb0++p2Lo)xE%QV6b*N`jC<_Ge%`gmB?=5tx?a^VG&&3RJom zOt=*lhaN7Jo4Ao&ALOvepHCLv*8t77Lau1Q2)*_0gnR`4fOsdH3!%Wu>-tzTrDLii z_;(R#cAIh%6K(;sG@!ZI^T#5%&XmTp1uV8yd@H(=gTD17xrl2mzv9?=<0a<^nw=)_ zA0qxDA(>sJ)90MgmU_C6g>8ILFVu2osT2VSjuJ1frf+|(!A~{J29>Eoa6r5Lv5s0U ze9oEkAiMZJ8o{fq>{ReIE8zrSQfH+9(IRIMU=@E&RK2q9y{b~kDcC}W%p$VsWjdDa zWVW%kc=l9!5f31SzR#qLd?_I&YZ}Hkj`E`=6MHom{9P%mbl`OjYaZbp7GWeqiZZ+? zv1PuPpslpw*@(YkTN2RFnqYUr#u)3)%> ze(=#}T0Ya{dqq_@5{#FkjW2iTh?kq@z{^J)@J53(Tj@pH%5%5Id)YMcsiwDRXjjhb zTwQl(7u~S<06fzOW7D*u591-Ir4K_0HB|~1AZm!b)Bv9eBtY8Ohd1RgqD-56lV#BZ zXMF+~{8|HU4dBnX!4G3bsKpOMNZaIN_Y5P%FGOJ6mQFGo*ISgze4)|DlVlQdWHbv& z7vg0gA8CN;^2Z|J3%!Q5P?8WfSh-op(k(aZfua>k`zDa2ketwnSW7O+Z15HR8xX?vP{3EJ9=skC=d0)&dGHeNlqHXBBT4oWKNS6hbH_85Aj? z`_EbQw)#T97XU!!z=lRG3Sos55UOnrXZ0WjWMq~f<-&V7UTQ%Ks_|lj6cCC!z4##o zHX~G8_Aj;d#2MWn1~R|YJ}AUsg%pq{b+WTZ3T&vIwLuE32(iomF5L+kM1?n4Aq9kp zPJs4Efekgzl+8`94M}brfR`u@c!L#EU@>D$q`-nwJKYRmL<+13)r}O8Nq`MfKt9a1 zAv=KqkcqTPjf@PY02`!$kQ7@IV=+e_G$RG%lNz_yb{k$!BwxKWxt0x5K-m)d6l=+m zJmrSLlHr3O1^KI&Y%#`d65xzo*EMQ(6Sy>ucwS(G6yT4lT~s$~2}I{U+p=?tD1#MJ zK;DMUjufy*3h-lQq#!3>(zw9oTNNPuW(SV35bfQk-mumXCEb+MbkB1qa|1&j@#ebW3ZlNC1-6l~dG z1)WS0G2T;P#0sbothY53v$5G#K;Oe=U3$(~0ht5aTl-@LgsP@~XxD=kkdaw_{ICK- zQOip$SOFQ?ju#uOfKb%&!Zld2@PdGGSAoq4JBbyL`BldX$dfwR*<%GZ)U2=qD?+|l z0U5*#D=B?9BF0?L-qsTj*(@?apQ4eVL%D#(u@V#`bpD+mk$;NkeHMb~uMUOYRNt-wJT0)n3XbM`?Cr4v+@hNI6_7B4u>!gWFDz(ag%uoi*jGU0Vtl)T+umXA>8SBz>#tO(B*g)%#6%eYL`k^MQfQ-!Yb4MkSvHxS5> zI@#G{1vb>IumUSWzP%Y4#0x7RM05hQ#|mtyai(m>3T#MvV+CZ8AXot*rV*(nR$xKN z4lA%C6wDqCPHwfYfXtX{L|j(82_d^)zgn~^uZ>#mD$;<87s)imo#1&r3+S&Gc5N*jfQ1DX!8a_&V}HCMyw#8ChaUeSk*9P zg%upDj1}M()r=L8FthXf3XVBo1w<}ptbnnB!g%L_6&$OC6;L5KU;+{QE!s z%mv3)0V0&~0W{&8{3p!kWCc0_=#;pV!F0v-U?Q9l*z9@s^p#V@0sthkG{cCPA`IO86+xrT;TzpqD{~FJ5s^cvW7mUcgpBGQMSIuQhlYf7;J-X{*pyyGbxPJ4-tvrSA`R7 zkrW1__St@K)ScoBPrCidWISn?uRkf@h>}zkSi)T_fp2&yShGCKA%eF@{E^&B9#9_W zPtc`Y@Ffp-aqG&)U$zdM4d`kJjnr3*dK zjYX^J!MX>@eOycHi%aOx<#-poGlJV$`*oWPLYbZ*{3qg||AnyMZU z&@ss#Q^_tGn-Z}cPcQaKxbt|GoRw_0Fs=j4bwB9JGfe<(r01J*R|P1?liF84eZ3$X zc7l8q?sr$|bcQaB+2~F;dc)qNyYYB`9wA#ll+9|oYi`;AAXjOdJQw~FgR#>5Rh(j=H%S!Xv zeSNvZ)}|FMD5=Ys5O7RbuyXq|l^-xC(tJ0(%M>rdMjk zX{10Eb8(YadY&`7)POq~Olbh4?Xz8URofKZ%-GIuM{J+3TT`-ZhH!mGflBl|?a%BS zqQqB>FMq7+7yR;O6)_q$2&j*Kjg?&cB?WnN}mGkyo%30T=MsrKURiqyng|6UX_85b^i z8F%k$8Zs4O6@IJ%*H`8}zZx>XBGR%FWW72Iy@^jWNIW!F$VnxYw>6+&St_)r>E``R zTUu+8KW1BT>vno3sA)W0*1P=Q2sY~$0aK9IRwy}lO#iA;p8x{(Q?&_>?%}D@r~O3) z8+BPtxr?ifcg}4TL84W*tVViuCAu|wIM_JfMfc24+g62uW#G^>X}Ec;OC0dyO}H-r zS2Sgkg2xL;sEgZVX4gow`OIH~I`pH+SAr|TA2k9iW*)WL4hfj?X=TGDt?%DmDcdd-)g|Re++NgsrZGN&=&zR_OwMJK{HlDGh;ED5ONsLRMX+wgh+J zGmUb!6@I5WM{~)eJ~$*=#o$)&&05OqNN%&A`6;a1r0P>RRZ>{M9%KBfurW(HLjhN` zOGOwf0HBJq2rAdvg)geY9iL5%uLPL`$RKG#B$gv!veLfJZ_~YPP+FcKMx|jJLPCoBgePuTyed0<^lD0#-+I ze+@*tEAWd=*=mscY_Ui-#-rI}JWPesAE67*HqJARyit)qmy@H&WmNf>IjI;Gv(FN2q^UJ~zH`b+X(B>V^{`30O(-sTnko@7&ndsJ6t zk!qbHZRu5&({D83u5jjtdRdGdhZl2Fw^tC;QuZ`j*cyH@ux;TzLVO?w^U9=>Mu9F2#&huJPRnj)!xNJX)X>LGhu0I!&49-UYyT|=8N5GeaHMA<%qLIOAm0u(4`r!`@=3@tqF9Rz=qyMP zNiUpdON*)16{D>5`Q!tE{V$8jIQrm>5?W0)R(ny)F01@V10D*eSFr`ocjDdW-)GaV zfn@g+;b|gZVE&eOk zWAqmPLflwtEH9<+<7P)ba(=DbZ23$GI3bl8JJrI8gBwp$mtZvqszpe+%mFN%L&HZx zV9r#4$J28W;@#O7``4t4U?c4A*S!(|NcDGzvr@*h!#E*pPyuixUc|QNyhdg3rox6# zgit+t%bi~;vMK@j3Bv(=i`D|p`g=}cF*;#P|$I)KK)&SCy44shlL4Zx=na{ z2*im_34Md#st6oUEH@GtM{?M{ML`4+!LJ7u1PU^EUBVwlVuc%EO@fk>`hgZAqQX+0 zTy!XckA)~B=p6Hkrzj%=iW8|QKsKRM&D^_?5LX*mCz*7?&>tqFb4tHKZ&si$mT$9^ z@<%ViWtJ>=~f`37MkEseIHWX1g%;*j3&=~y&3UoUEkRIUI=o78C>Rvm*!-a#Iv$NRAy8jR zRjs^QP+vMO51YnV+`}PM_{i(3a4?q{HOib{X|&PmL$&Ai<6CR=>?1 zOq-vIszVYA(-+_e_&^k3lUb=cVYoqNmxCoSD>d;DQej>?X5eo(;VetbX+N1_&eHs@ zYC`L^g8)5+f=MCYx^1C*X9N6Kq^Gb>X6;w=LQ48Hyhx;4N-63EEZIo_%Sl%zdv8Wicq(T zn*nV@IsEUazIu)BK(Ak+G$mOlF+ls6AFspF+a80+mzqvN*d0v1&Zo*c* zBGc}qtj(En{*pm$5)=)hu-6IQx%W7njyJ>X+*1UNoKJfn`EN~_%v<3cI9FQ%k~+*z zsCz`!y$Pq{H`l6e#G}2cf1*MuZ${uS>XpNBcc6`7@jm8l?>9f%eCTWXGP<>(E6(zMUAwk)ZDD+bh`yMZM=YuQ#~vp7Yfu;B|j zXMCy&Pt@ZrW%F{&6}AdsU1y%^^~n(qylEB~BKTZGcK=hl-4UO{#A;eIHokQZX9eM+ zKI{WlITl$#2wcOu&F}j$?Hr=kluQlv&re?e4+`1 zIjNFMsMc>URN}k4QUR&3q%3-EQ7Sz_)%j+4EPkXub)r36rrW3gafSDjc*fCDS<`;^ zSUbJ37ALv%yR5HTBHtyp;eK9W!P>y7w&2ey96TtO5LXE>1svahTEFQdAEc}Xv}V3< zvp4H?Pmd>Od$V{E^>eR(;8sm(wN`Nb51dJg%TU^h%)^6Vr0Lz0 z3A0sMXsDh{#)D^1Cgorr8A-;AeMa)_$)s!>Qj%9sCJe~hwO>yr5Eh+M*B=$ZBy_(f zOp4*wyC*x^E>|*PdNN_sDrYwLWJlZ2g`P|%$SOTqCqPeTDe}TlOMR_rv2Bg-*|Nnp z@1Beoo#=9T9`x(U_#v>#d-h~9lIXo+-=2&otMZy~DX{GRo`t=P0VV_57L|pOhHl>Y z31a@edorG=N^RkKGJd>Vi7WN0j>2J~doDRMrXJ9{C&MAvy(h~jv(v$I`6J0JyPsR@sq$iV+4A7JD zWK~`hE(Hef$z%*L8PK+plfB%qLo+}}W-0J;XS%aH7kR;n z4i}^&T-MPq*u2NgL zRWg1&(xl8ub)h>hIWwjX5PrPEwK$SpgIT~;_G5P8+N&Gm*;d(1QE*syV}BCC2{zl< zzy6xN-D25`S>HThC;Ng=hfeJ8Htet9lT9Ztwy-rQGSK}#uX-tEn%v_>R`MFsFE#BC zk+YOVxsYcPP+&Vq9`X{1R+EK|_2`R755WELao-n@B(cOk_q@;Vg+Jc3RAGcmJ+(xK z`6%uTRotWSaQvXR;y8P;?YTbgnR&`Z7u}g?eO#ck>+)Bk%gC_161hd2>?Nn~@AZ4_K+?1_z1CH1_8Xs?_9y3hv%z>|e*=t6ES#yR{7fSq-EvnGo%bQ#g80J? zcoWM_)m+Y+nj*3JWgvC{{JkHrDAg?di>B)_WVobZXQ@(MsGP=*l}qG7ubjpb6m?kG zTj-R=j*7pxO=(T|1;6+AI2%x@i_2&}ukUiBdp58hhO=pse5>B5!eRJz3nxBtBCq7cj&wOCz zE-*PY=uPb|TKLrnUZS0H3@3Y&LGRSCf3nlM6!xYCY;Q2?PvS|BqD13N4BTc*>&nNhrmzb1N)`K(?h z#_2HSXB!==g0%DGV00!si;5bZCu6NE-~cj|0Xh++-_6C0j=EA^-t|inENfNSp7hZP zn#GkxdNCB?(3cI>gw^iSspc+EJ@bP*?k@S1pVRI_#BS#<`KMLxLWn*0+=a~C&Rz0P z*Y7T0l3u;cdOV*IaJiOuAh&Gx&bIHw*=O)pl!?&4@swmB?q>BUUP2`_&YuhDa`j!U z8)+tL!{PX9LndO!<&%&hTf;XacrD8hj=k(}3>L)ze6|wB}46-jMkkxg9s$IaI;b{D7Cv%R_g^8H2ye(n^}%3>lJlBY>n&f*Rm4VU5BNb8@}Cu+dUZ; z1Bb5wWmng-Q*^MhBAiAVw)keADRiDJa249ZgH7pNyA%k+og^N;M za4D$^7FRJ>Tuq>vmMm_#$^yRBaLIO*MKM75h${(eL51DB3P;gb8&o57`SA4!);x?z z3W@^YA&R#)I0Du7=prFz|;??FPda7YKI3%}d1r`b7@N<ws#aT`gP>`AnO~UsOQsk2W3JL@F9prMGbVU^tYm6Z;BHFq1@b zz`XAecwvSD`Ii9Yx+;a$n3>oRuT78e`;lyGr$5hJ5@V9Ja;Z|X;*IjNXM+B*CLCvZ zv1;mhTb5}w#%XLG#*#c(!~EMjo67!21h;r|#U(RY`e1!Px9&2Z3!-wueyLWEN5I039~kwlQ*LXz*2L7YuLxj)>PQJ zyDbEPeBfIyfDOl-5G4F0tpPTelkDAvmI4^n*MeDo(f6f#mjs7eeBRz`zUJ64)37J3 z0b9qqImZ$BmSdaDFe(NJr-qH8=hh}O1j<&Wtts4HpnBW@)FwJL$Siw8nDCW%kO-e4KW$iROdTlyM%nE53Z86f>(J{v49)JE&47fBbhU}VY}pUY=Xj0 zM~5KF(Q`>!9D#~S%SV2Sv}MquBeu&k>7TkddPVB;l81?NH*A-isjSu&V-pGOHDJ4B zF09|`QfqF+ZEXqlA6El z=akmmu-(pujP2sdIbypEXX+rLX>?@)#06Zy^C*Xp(pEe-Y?onEgl-%VM~0dUw#z&& z2^VrlH>g0lv%y^#j@T~CoDd}ZBwYjucb;_kup_q1@GGnRNPQa^+r^`@m2ITu%?;b- zc;mu@hYCu)w&>#4TuEY&Beu&iDh3FrHkrXL-P&Y^fFwxJn!-a7Ap(sfw#&;~2ot{Y z);U6mAOddKF1=lG$FW^96V}o6jjfbEjGupwTb z!rZW3GESy#DbICbyJTdRLv<2!!*4J?TwtLL&Ookd7V=@!A zw$L}mZrCmv2g4xq!*aFtTLC7fpspBHLZ#hV2SWi#?bF zz9>+1!FB}_l9YyEGUc{9FI~wSJcBwMv0af(A*uPxeokr44ck4o1K2JelXo=@I_HS( zGMsTigv(NzVI|(}gw^ESmcow+A~|Ba44Y!ua6lXxYA)C=^SC5j&Ji z3PH*d+hv&(f`p%>ivZyv7>x)Vv0a8=S?x#a+rZdvu4h{>1vhM$?_;xNoOh^54Rz$1ZawAM?AxY;%>LA~{ zP%Yc12+*}=m9DFk^T@>>NeAW2juyv8>r3y7q02wB=*jnK) zzB-VXHX2b&0vw=pcSiu15@JgL>1$f6D3}c4D?a@dH9lQ+b0NeOw1>gH@?^Mt4n>t8 z^68}*mc=S2h#Wn7DRye5Elk1uLb0o%Vr+R^kjP%HT!QEzoUAlbfr0FX`s{7$VZS#S zp_}8o6LiZvDL~2xZNalY`x+itP+jKJgBOI40qN5%nyQrNj zfmB>`m#(%gc1i7M^I0 zQ~TFWVlav1RCxwxCcVu;e>9u6*}utDc~@_C*Y>cRKG2?RZw>qHjqzwU84tUg{jEN_ z{b{suzPnO{3x_44T=1IK@>7$9yrIjvr&tyH7Nv=Hi%vSP3v%T4_BR$-*xoNc9vi%gM?0k`` z%#hV-a1B1W8}9Y8SX2a%)eM={zLF&_v0D1LL)4_tW(y06!}gP6?kH0{uLAETD|Z}* ztSuPH)iU(TtW6JGua#TfU4_}Z?1UmZgkSNk9T4B@@N23a@`oK_C><8rpqm#wKZ9`J z&xCEmu-8@_Y%$XEK@)W)J`f-f7pH1E1!D5JiwTlvodYpP;9?-|x3gxZ+p!JA)QDXo zC=tHYjH`$90D+hs=_WH|V<5&4y98n~tYaW%9)Dh$uvZ}NwC56tvFxQ5Joi9c9DyOT zUt>i~9(M`EzX>3*i7^xwQNId4iJdR``0oLV_liyzkgO%ZRqRe2)?izBp0$f7`hrpzM7S<#>`U+Nr25-9*%;UKyLE9GVmu@ z1=uf)a0Xr((eT|+C^Ket5RYU*{5^XiDYKs0_Sx-W+(awwRtvcL-2x{bQ;cC;{^kVC z`%iK*3{?{ONez|U>^uRM(rx&Duk2Wf!NVSOLbE1gLd9YIMCe6ZKow2rB)Q4x5;01D) z=w2jRX*$pBz9KMO;VyT?HBH*1k%B3>g z3oz}S>+{#ry#RKf>kYU2@g?i~miENE)_0#BZ}x}ms}Xxzif|5}U4TOw$>WIb$xeIL zA5F)T?x=URKP9hTykmik@E6VXKX0Nj7SM?y``4?WPV3c>*!7pQags;0iY|HGv__FP zmd4NJJ?*-2@s6~rW{S82HB%(sVy1}uMKeWGaF{9L?#@h+SR!vyYN%*G{55znYgN<7 zdt3c(Z?xH+4hI{BWihV^?a{mdy1#WbvB2f^mr?uMwJ!Z1e0>4-F{8(e*tQeKcM_%1 zC)$Z8Rct<~X7l`uH7c98e%;9A8@bIV);4b(8Eq^~Q;C@(v3WB^Z1ZM{#OBQuvCW$) zQkzd;adUQl3mNTYaGWxTK+xg_&rCaR9MOhf_)*lW35KQ!>7ub!3EcMS<@X{uz|SY%OMQTIIGukaCZ0rQ zG@+n%8yx74w`W`1vu-AuY21M$tBpfZm4EQ%C{JAiFj*<>2%;fjCTe-M?22wzT?a^( zj3Pj2>?N7*FpEr=Uol-?##_MZO1aZ#PLA+j`<0)+VOsq~rpmF~p`eCUiJORAcC=hN zzu_lmPOaKC_@cnrQOcO9^2n?55^F~l2BsFV;+0uO?GAff^69~U78pEc%^)eQMBN=z zFsKo;R@)BDCQT+vN?DP` z>xy9NBK-V)g38V&5o}3=*Hh+<8l?5N$lmpCf|g9Lkduqv$plLpyn!-l)L`B^h%DZq zove#e@jwd6XpcX(X5OKOYf)u_8Rt5q>@@{CrIK`C8%U>x7@L7k<7$`1wY7 z_vNrK7!77QJ4d-C79NF>>?WE{2WLjfwYCqDWncW*t2K`mC}(~oRdS8-(>!(K=kr>C z&_80Lk3L^@1hNkY^J^Y~1b#9nGY>fWlFyq*AmNNWVIBc{I{CbL1mb<%b4^e|pFWTfLTAC^opA2&h_YeYhKR+kW(1w*KqlY`Fk zCIYX<=1$_m8VRCE)Wv+<2r;br$fyX@+?-rwMB=xn9WiA5WK`q_W=1Y{#1SgyiBGTdo(ybo}67hazg~qL}2QrvS^L? z(E!P8&coc;eK_ak1`{t?zF;|dD>@W!nlXT`PHPz+M;41kjhHZ<@kc*PVx0X0IWw=b zoeg)mxI{I-VKz22+f+0b#+HAQaAh(!^6{LfRWnbXDk5pYMn&Yagc+lX=mWMYq7Se( z=cpo_f^AeplC@ntASHsWigM~KN7cNDWF;FFk-M992^hNs2ZqBA>NZ5q-c`Mf3rdAx9PA0>nl|q*Sp~QT}mT73CjiIVx22 zjR-Wch-;{6LOP}nt&#_bvMN56bNBt##$uV2s0}T0Y0?Nc`8-K=EXU7sN#6BTdM4+S zJMWmUjY`wvvM^QU@+)8|D+9aZtvG>0s?FES-@Y667FFU`?vB5F_r)ra?xW^qf3ZsJ z53{qfQTcwcN*oT5x3#IqflVP#>N1=BMm|eIrY16HhKcH_ydksAZ}b5}ptLFEFII_Y zEySoITJ+hfh&~WNMHj0?8x_%_&sIhBfdDGHSS9u+Ba~$F+Ua7Ih_>sNE52Y=BK(Z> zMMP$W4kapXuby%V&+w^y%3Qr&rZqro(SzO)!ItQqp4B6o>E$bf>gfqfhl=TuPwA&e zJ|#}?pEQ80)GirGi7N4vfji;Mg)axsoxk!YFD*&ZjV3K16*;z^=Q)x~zkvTaaqD-yU zDbuk8h^VSNAVev0U|0-D(Rw{A|@vX zWMhnv1G03&iHhRa?y-s^;UOxXj=QLMIx5;EEV@y#Zr-p$SqbeR9#~gM}mmSAy)Bpd>qget3K9%O7r7ZL3q&W z2Uvu|1m>vd5TS)3mP_tUL1reiv|n#PrGTMgJ2SYuLQJ!$=R=Xph4@gQ*~!HCp$1e6 zCY%_liKAZVPUlHdD#QXqkjh1OGQg~4V)Jrd%5Y|T?*7KmA)*UI%-ea{nMuU=S3mom GxBoxSeuAX{ literal 0 HcmV?d00001 diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587860969.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587860969.Akashs-MacBook-Pro-2.local new file mode 100644 index 0000000000000000000000000000000000000000..7c59cdfdcadd549ead313ea8df07aae86ba79bb9 GIT binary patch literal 267629 zcmeIb3zTF>eIGoxdv6z)> zZPeR6boZ>Z%9qbLaexpz;lw#Uew~vqb~v%)*g)Vwz(m0h9AZd}V}rmXfDMM653o&~ z93M;^e|2x&s#{gJUbm`l?@Hf_v&^gSzv}<~|9@4t>fjvy?-zdfhnDut&8EqvuvJc+7`&)PJ`REsqXP;a8%;$H*GjQehXwct0 z*E!oAPg*eE+-|0F2cfU_6U3;g4fv%spINQe;E7obOqsQ-#RQt@!azK zzq;Y4pJjQ8mS0@rWgZ%F4-U1(Jz-;l*&GJMMQk2E7w)ACGtu?AaU*r>M~CjpGJN zHkXkKiH%xZ7BDced;s>M^gixwb%xzDsFsT7;JpnvWXD(Qw#lS_Y8WrzuZg8N5`UXZ z=e;xObx`^7WPf-nZd@G&ahS_tKPplBTit077m^uegAn02Be=#=V&h(SD_*=Cm0r{S z^qj54g79YI;$sn9Z{lL3-<@zp6)G1;6KVaSuHe>|Z=glwA-E=fwN(+B4!!&mQf^(k zz8{raX+5~MJfHR%@E;?1rOos?XDpk)``k+2`6A+-xNu$Liw})_;Aa|UgUVDPIG~jP z5=Sj-ea=~BA6Wn<_7HtJ?a{1QH-^?8BRK7*VFpy~(n2v?=nkZ1{}^+#bN6af2Vmj8KaohEOnj zG^hf;5W&^9LgadjQa>Pc>o|s3cI)`Vl6J+%7vaMVXxZ{6Hv85>NkZ5liS>RSu`KU4 z3??9He9~^y1duH#dPq<1hZ+sbeDL3!VK*Wq z!2>OY6ogd`Q}AW0+#&!1MjRt+OshpF>;9O>093F56&(D58LUA%Dj@_GAu`7ZOehFz z0n47?K*82Ow2C(jPGEvE3L)65f)t5XNI`oRNC6pSrbq!9v__1vVpOn^)v3`Cig)y%L}z^VNP!JC&Xmnft_?|U8-SO!8af;+q`+dvmPmmG zrB=EbAdF!LvqzHMMF-r|N*r zL*9fxn-&em;+N3S@={<0=ZP1)T?ID0a1B=FT?IBHV61=>l%eJi2P}~JrT9znl7P*w z0`jEJ4(xXo*if^=3akkE_GV-dFRXwN(FxEVE3l!)nXka-3lu>S`{l`qNDaL0jIv2%*FsJSoPP0mZpAadA@-_hETu& ze5O+158-&30al=A*bXZoG<1658m!7#fz1f#6DuI|tBw_rCv~#3#|mtySz!fMgivp$ zaDgJUxm4d|aIo1|Kt8M!pgmS#Lya?KGge?jGQ|o^Jq|5WyhLh)6A z6_6n+V+G{FFp<@0;Wk*o(HgJqqD#Yj#k17s1S-tT4M!hgUY??m&oqJXEVN`dk!5hSn8$98;&fHjHCer(jzL_n5ac003E0JhG0B0(1AaP<@XZa`Hz0q@&tphTooWfOaSe(cVcU3c{`j+tcX~aeRt+Y#Rd!2c?^KE z^|hAjTNh7jv<oM4Lt(;v3Z^t!{&crl8ORLxQiw54G-1z32%@1Be|74bSLQAjA>_MJnC+3b|=&H zco%EjclabZt2ice*|WYI&1G&i5+#^rbdY`Pbts(-%zxXUT+qi?oyAeNczCH&5y|77n%*3%aw;I5Oh65MFmn`E4| zvV>YUf9_?>fAiyRCh<__WwtfrcOzJ>Y?xGg@%)|r!65P0zZZo}#zj)Q6rexc>OEZ) zSE|VQ6AiebGVjNFgWe`Or5=B^tOQxF#<9sK8YCVXE99h-%G(;yt1K1T({%EFrX?*# z$se<=xEHR||DOo9>J|Z0kk?iyId@F|rcs{&0`^n22~Jm3(t=Fr;oz?$*sRNH%3WM- zyqAr}vO7dg0=jynxFyu85U>m!#^$juallVD;eh~L(UeIF9xouFE^d>lT_errGk*;h zIZ1FS4X)gPKWPM3%q(iP^CV!VV^%g?3a|q{E^K>nbZhDWdCgdZQzc<(;=0lnBPooy zRDil_Gz6D<;7Tns(Cp|g4dOtr4pESTkJT$7Q4B$J$ikpp)F|HKA zIvhF?p>a|)1X_3s{zC+>u`=y!u+GYbTl6DN>_9sN1R_2v;DIS}SV>5QcRA_A8b`ki ztzb^k_TCE`2mGk5h2`ADyYp`nZe7#H87&b*DmJ5@(pP@$H2*vRWIt%@AQ>+NgsrZG zN&+*h;CCZ_LzvPKXoW&b^ekl6RcfQ%BKg7em6uk3uR2FF$)i41CR)YdR`1PP%IipO zv!D4XtlOmOQ$ngNm3XQZ0Qj1)F-ti^0avt3MOD^0sCie9i)JVL72Bn>t^V0~_XEA| z;Ew)T%SEy*-|}zl7^G;P@24aF8Z9Nk*{#7idkuP?5x6cnf^LZj^Mq&bdkFa9+eJI%akP z%5EFp6Twxw!P145Jj9t`5HQnXzp zevcFca-W?Lg{wJ;QJ)F-vPU>c7J60m3-B)+aJ;N3Upb*Rh^&KElT=M|NQ@dVOX}SX zcr2u(raRkcm*1)JXa{ZZ=xyhFoswe`pw(9$$%8cz?X18rH)X3q?z6=r*&Ge0cvCv|%TF%xA^qhBfb<-oRu_Yfg0A^Ro?tEAwrUI=d=EF8ykZyau_ss{MyQAo49 zw|6RTqW`OcxD+#1H5K2B;8-}<$fE^H5fmQ^sMC~r?v*wiZ6&=bE~8Y6p;Cb#Y1F}e zyWqYK*#?%u`+^2XdM3hb@a?Vrq58C@XzF zd4FL4%VIK)J~*R@j7u(|J%_&@{h4{3zQ`-2|3>=FHc;vX6q zc8SsWX9k8Sb`iA3cN|r6(>_{&K<|~JW|M{pRY&)W5j+$`YALMKk~;3o$+nlv*JJb+ z|9sq7Y%DFN@8ecG*%95d7e;36R0}5#Zahg{f-SVlJFNQrqX@}7f)9tloKvIA1b8ex z7a`u2eX)N{x|n;Fo&QMn+zF0Oz$u+M=D6vfBOO6CqTO-g1|y z-foL`=?numLkN<4cs`013isR-WQBrGOWHlxw?aY3#jfn{59SM-Zx4Yu z(J7&C@LLsu9ek-UTP>{jv68E8GBU5|o_O53~>wv9w5aOwqvzJ{F>k zpmWSCo}!EhC{CoJ0NI326=xG#5%RTzZIVeB4EBif`GhqmbrT(fzoC@pinba1ps;J zbTb!K11&{(BA#S-A)R#^$XF3e(OR1_9hk`+|jm;n0q2tk569V<6R7=VuX=k|>)R&G+xly9N z=%vHQ{h=fAlut>A$-O2t>`k8(3cX^8gxwm{cQ(L(MNTn6e0C;zg?iI} z9O~by=qP_VV~yRusoV|*pKrjUbs6Lt(z*K!K`ZRUe>KM}WE#Y+`>uaiqiE@UFBK!L z>^S9QCPrFaexGU7m#vU#rY##%ds3(Q*GHVY@Wt9B|GY+%r0`U2sU=fdGTZ#AZGA}= z(t%fB53vCsd$K8cChQ)iCACj3zk;07>*BGhMw+KGcfH&KEfvsr7fKWL@*bc;s{jD{}w{rA` zKWo}&SzDG=z!d}P&)mS0uC;8dv00oVQP}W#NCozR8rwn(^Y@y{!6sg zbU4`_O?sUTbW=}pk)5qetf4m&7$Vv2fAAX(`(4)>#>;X=F0qi`@%JjVa?qJ8h@bol z#F=S;7ep$}xgw~k_b#ucdzblVQz1t1(-p8VOCc)qvh1?408Ws+DMZ$g83BB<38z^7 z>o!bRl1(`ZGkI5XS6m1&*|)ZpSdrz{)3iW;0v?MWsZ^j_4vzI@9X{X_P3X@^m0Uu# zetV%3-<_2TNQEV3(Q8c?&KFVVEmYlah9~03>Qgt`mGsXlyr0A~&W3B8q%?y7Zna3luL-K1egMj??0{I^pOveikw#iS~Hj4j;n4f*Qj{l zA3L|+#4b3lMQ6xc%ZD6OjdTn~vdJ4g374gr?%$JKt7DT%q_b63giPWJZV`z+*ef?O ztxl=&EN{{P_AopYKj;y_i0I1tuuoMVZptna3$8dWO<5l-X(4CUu4kmcK2=}`9*-Xl zpg_)Y7>+cJ-dcJ_iKh>g%TU^h%)*(ILu`#C*EM^IyKPb`Hy(5RulTMI{%wWhY2~vl z3$jX6cchg%a54*#7g0#xyh%D>p`XioL3me%qe{%8Q{+M4bXC?)g@awqv(Cw{zy~Yr zVKYCq$d`*6tRJA$}EW0r?(>uslI)>Q6pUCGhV?%k6eX{qXwfIXQ^ z1C#}a=FGb%6S!97BEOzY7=x+wJbN;kG`)K=VYVs@4b_v$c<}7Wq#RsO8A--8`1WK{ zwhbxCt0xl%WbN9oCld&ZPFVzu&^?(jFvC%;J=u{~Ik$)D$pnT}&TQ_mD4 z?8$@yS-bY@$ppfpQx$pnT}&TQ_<+RuxgOeV-GJ=sctp3G9@ zrecS0S+l*3{IB?}lHTiNyy!%i3(}MEL*S~)w;)A=|4b<5^d^ zdr@}S7LeXO*~%<>GMNSnUe7~MCUC9DMSkmK!Wc}Y=h>6Vr0Lz03A0sMXsCoF`_ABbuua2hLq&BP9_Y<+O=O#CJ+{#vIrQVdop2QhND`0vX$C; zGJzqLGn;#|m6`NpAM)J0C=(>plfBZgLo+}}W-0K(gfEO$GG1_^!v*Qc_#uLHWHOQg zIx?QD%4fnQz;buuS=P%Km`(mGvtpnBe0MI&xHhCeua1l#FHlFu6BQk?2pge0GJagb zjIks8$qLuvNOlco0aw|N*@bJbZj5JJ@!dI%njkNLiMB#9;Vx#zupFZ}VQr3xe528~*x!)z3HiYo4Lcr_sl%yN`>xBv_3A-*>(9V(Pd~6aX)Hlehqh^kxePUQ?qjEizlUG) zdw-9!0hPMAjOO$DE;m}v%t}(D%Id;^rHp9Ws4Nb_`y#le9AP$k)9wm7bo8m-xYZx_ zr~U4rbGAF!L3f_sJ{nG@RyQIn-$JwXQE119OWB$av)pwi8~yG?avQ_RlP7P5UytDB z+9}6yqC4()HwL{EZM&JG4dIj1-EnWLGwhz}#f^B*d-5-WJ!#?G?GJn7j3Bzh8;yak zjcu&X0SfF~kBI|2EB4zalm4k;ynw$ZdXD+5UM9xrFipri=nO2Rk0<-XQ~b$ZzB;=c z_9H{-Z*`}=PR=emI81c|^RGs*q*Y~W+(Rd57FQPO#ZaVQUp7<|R=Z1E=Ptg}xq@6H zy>A^BBV?WPPd)p?w_4e@#$EC$bM7*0Pq5=b?Jh*@cJ7jYTIDW;81F8XOuA0z0=2u4 zncKNb{%Mt426H3$vh?a@*5mmcfXlVK1G#Ohd!}_a&OZHTP$okEMiY{OZlcMh1&88m z49SHhyv$w79>t5OAjd`dJi3>Cj<-ld9S`4%;I%A2IQFu?F>q+|J`X-iDd;kif?%~^ z8s(*}>A5s79gd9U!Z#am2g_V#c3Xm@`Uy{VMJzi#gY3)JrFCBqRmfwA$+*4~!8*%M zb!iLnz)#vmT=0;DiRV9xKynK=D+Pm6J1b?iUSL7vPb0XM<*2e=QTM~vxZZx9n!Q*6 zd+1mUJn)?c-08`%7&v?dD7(6rouY%41@RB3NFfPZ@V_F@P>mc(2p_&OH>H;l0)*d> zKynYcNwsOi+0s`WT%yz+Z+HVOO%A~|@vE&$ldR!=2`Te0UEhz&{P_$i%!(5DN(48v zJlKT{$`uzbtQnC+1!vyGn1HSW%lqMl)r?X2a>FItQ5MAj;UlgjtOZHjvoLjMiOM5| z#xUU<5v+L_kr)nqBpiT$9jBjlYCNbd>1qgPeW|^hZ#i^){jpQK!SMBb2n2uP)MPMB zilM-v>}9CAG#Sj}r#OJaI5!pCkKX_n#w)BqhbyI+@+=E^`TBpYRn)TT-LpMv0tiHg$5h5{1+}wEzh!; zsH!(uJUxe|mM0)dR$(kkUaVRuR#>S5bZuvK1X5s9NL#*AFA|rR)ft2V2o01RZCojA zAUTu(byW)U;)R3QOPPtSceUvez8J~2cKY*tVI;AiXu>g;7ptb8w`G||W1PljVJykR zHO#+tepA{1h~PGFuDE38A#9;{|C%ha2tofP?Si^G01rZT1Xz*2L7YuLxj)}#m*28j!GYvC;JqK9w005%+RLXhy2 zbO@d_;K*7EU{qfVX8A?mm+D;-92kfu(#>MoU16Fi0$+D*m}%G()_|?qku2(Xa41HD z^0Lmi9ouAvQ87R`HEaw$w>Fs}P`0Wdz96qlss);~O>}CIS@wi5;VbVT!qJEU+^}7G z+M9gcH)LZ>hB835Y);oV#%|aynT7)Y#ElmTkS<*d89=gkJB4jo{*-vJBeqM1R`8ZL z#STjV2EwA5xL~`cAp$hSWQgTiVY_6&Odnh&E6G4!wpPkPGPssEJrmaEhV61J2S{2R zJ(r}#5s)M;qzPr6>ZKBSCNoLh4cq0J^iN$Jy&`pa$-~6C8@9{MR95SXv5ACsrdZUb z8K}QnL+_2rY z4Yteqou)T*f^YB)bnuNEw##tF1rbR_?1IVU(=J5?XXw}w+hy1k!-fOm$WU{^cA3X3 zh6@iB$m?H?nL1*-EOSDT@RM{AAUvd?l+cs*OZpVGQWEu+mlgDPgYUI<+Qb7775^5!)qH6}-7)y9UCgme^t6u4#w> z4KW#FX4oznu#D}x;4Oq2T00cZOz{SCC3_#h4cjG?#3m@b#qJP9IeIQhiz6VZY~=~Z z07OcXP7&aJbnCcpmuFI_aDAm7hBMPPDaU=g9OtrHSBy=h_Vv+HuoULDZ`aH-hwYxX zB<6XqqOFR?H)Pr*e;%d4!&{2b{WpNAi`xSO`|I*Rh_WwoWD}65kX)_ zY?onE3>yxJBSXyv+hrb?gbUs71{YK_m#n<7)@ard+hv&(f`p%>wYy#E>wfMMa@@De z@GGl#NquYWPwoupf`c2j%kjpAhd@g|xosM~o<*Uw&^WkZyBwopfN*Nq7--h5O=bv4 zf<$F+*0#v*Hs^@#^70nKgs;4dO2VBlU9fT7w|ms>Oa_GA9HHKS>t>!ks5A zWaGAPm*H1d?~?k~+Mhg_Q=jbOcqRkK+w&tlxM8~-qhf$?z#2JvZf&w2+x1$@z_SZ- z_=_X9%gHCezFm%<&dEarku)%#sB)qhcmJFA&|PV?(0A(5@)|v8_j))Qw+#pFN+!cY z!1&sr^f8K2PqR~66QkPK0V+$42_KO?2`LDH&3d`V@HF1(`&KNLjS!p6!6+LFRwO)ec#1aEJ^-9a=VlqEqlQ3Mu$@t8~!y*D7aJC_v=f8MvsdUW-QKNlP* zR|+cCobZ2FJV#K?vAT*eNIi84@ggCAAz1~i^wok&*LJAc7i*hJcNeM2CbijPNWXFl zJCaZc?`nj}3CID3SQJPfA*E*U3qaSJ9cN>Y>8M}MifENvZiGoKUMmtv<@e#aV#T6q+iugt5*mbV3o?B&WOhz^3q z5M7{xzU+qj>}~2nuR9*1o8voUbjv#_K+0?G@Z2xl%oh)e7mc9{FDnP~Y^aUiw7Wuv z;M`V0OY+|F$&)8{|(MnlaYpT4GTC#sL_f_oLC>}+o# zeRSBr@*OUF33ltY?d`$2RL1ch`fc(myN8qNYZUC3&7?Y3%s$>(NtW1o;!--Z6vX^f z=%Vm6rK>)}-jtravP`FJ;fdBbwSR3V29roO$}>1M?r!ya!|9~O{!OOJd%Dwmb_SjF zf!1_qd(dlbj)v3mXwcc}ZTHXxPQ%S}ot1LgHdjOthlP|}7t1m1OC$EMH<_Y~+0Vv} ztLHTD!e%$D!+UqZ8(6Ne?lPHMnlH{GGUMfxDHB)vXyS8erHkD&Sx;q>E6+c(mE0|3 zZ?@}554UY?8SR!X-#A~nDKk-ZOR*;L6!_#WxZlfSQ4v5^Gh|kKHA`G#we)d^s7asA z78VkR?I*?DQKoo71>P-I?l=rtTQHKVW$2X|pdKoc8DSBwUG_8)9m22q)((hob@(;a z4*A0lF_aFAY|zaMUYJ3+=V!vUVYt+p`IQg3Hl^i*CN>8M#KozaPJx&_?qY)ES?55^ z5x5wLd#$XQ>9n6uftctpKp-YZy2%XL7>M!1E`gW~>llcc$6rt;>=lUHt@9O#u{zE} zATEx;klC-XA|{W!1Y+{6b0Fpjyr8Vut0F#P8;Et)giCcx^dU84L#Z7g5R><>Wrl1F z#Q0&CKum^p48+XiFDMiC3dBdwTOh{jkXmrP6>;&tip+kEftWn*5{SvO&ViUC@Pe{p zuRwg%HV{*E%_Zs*;Y)4S5OrlJn?E!`HaE)**%*lN!!CiC4C@$(na5vHChQf6k6H&} zYQql87#%EosRe7IuFQJBz6%~KV=;EC8r&u*9Z-CT9f1dmB{Y&d*pFA*ZAK=Mn)IC^ zsJx&oyI+=w4Q|Y-$Pzr(mCF8`YO6!j5qx16NG@b|FmyGHd<`pKjhUwuk^q~vJRAix zf!yQ;W#DgN6=1(G!Wp~4c^MeuS(01eg&C@9(yE#%;tteIk$8)lBCbr#6iFLsrii;cGeu&Fyh*7sr+x6Z;bp84d=GLYEvmg8$^XNQ<)J?EW#|!vt!uU?2H2Oq4@uZ5)C)I48 zf3Zeo^VY8$nS3L+`NZ1hjU%Itg=s1=QzSNTrig9cOp(~UnIg7%Gev6i2`p?)&ut^4 zy%hGh_eIQq@OR+K%+Shj5XFmkqc}C~PtS2?x9ZwB?7T=tHwM5yqG0)t1b86>5ZsJo+H4CYi73~E#* zWA2Xy=B}g6+1G<2d)G+@DmZr8F-P*Vf@if-(=#U5N*cVLGHEhVQp$=fUaw`*!D{n? z6bx#V3Ze#Ypv)OHNb7Hry&DwsFbG;J6c@dd36?Z?BW2R4!Mt@4S-ep@Sr-lBffSOf zqQRReb4Cr)en({QCO?NGbQRoGw*&aEq*Np6{xIdzsKUHgJ0K?d!;0x?gdJS#cy54> z#>eFrVXs)UFe;I>m`RC2rj&7UA3|6mCI>BkJ_2lMW87(Wx`RQ!W|6nFnEzxyZg7b- zk{9tm9~OVHB0SL+em)}nd{p@PI^pN*g`aN_e!fxo`6hVxWia0#_NO^JN4O;x9)*$Y zCYnt8r-sROwhxhIU;Nl>G>;W1XMQAAa;@>xJayye^MZoVKWw6pK3{bNvJVIIYaW3F zeljOB4>ujy%Igv`s0h>CoLpo?;55h~{7A|sN4m>IdKpCMDs$Bj@RJ1SBo zYe!3s#g{(*-CckA$*UW4>3_fYe|=%!o?p4_p=Yl9rRQ$@!C(8mMflMx;xAnetI$Lz zV#Ny(J-rkiPrsf+Ula888T57OrGS1ppDx|3W-B-QFN+%U3%iEr#&#Az6FE6P0$1ou#CP#6Ii;cnD`ZNS^GfLXwL6iSlqLB-zEDG#FxGJ%B3s$ zb3f-dMx#N3j*?qCC>$?G{{l}#zL_dzcHBFO$4Y0KZ5~J`fqux*ZTj~mB#c~$OQ^d- zAMI!7C1&t?x*zu8@6i9>{`!Z|_ zw;Rnpa3k!>mIjj-0TJDmSchG&d+SbjP;#4H$ZZb7 z()ZB;S1*USeQDt}`}SV48{&5(jXnYMclJ>O7{mNl|IB0w`smoir#q>uu_{JZO(usp zVJzttz&??=D97DvVfS6=I7)OvqH30+Z(|qPv;P3{4!9QR;%hpc{tzwLq(W6p2ex4W zzg(7%) zHWy=fEu?S8l$g^;t^---qSS-Ko{VL!)TlKFI*3%S!J51^eKqcaZ<`>6+}BF;XR3QS^- z^OIh0D{l6O)1+~5Zs}6=Y4nSB@tZ3-16^H&h4%bq`=iFvfr6jh?%#=+zXf(b(CZHF z=$|D5NWWS{ek?zDG<_cPg$yEh5+pcsYW~u~@$9cEb3t)1{{;3Z&T>aktV1HfENr7l z+JE}sZj>zcv0JOqZ!SlOim)x_;O^uA?8cv)wDVO$6w}FiAq@JH>D8&5z543taFj}8 z0Qv3~m(frxtTx)MXtds-w>gb3fjwx`!&ZOV9}N#es)5u+(yzVL4pgpoKkPb|GR^rV zGSEEyFQ$<><&TXPF9Z5(iNx=M1IJK28~3*E=$|~<8>8b}drHGlc}VJ1sG2yov!Q%c zj5s(h<~H?XQTlM^3MAUZFM$JSQYdsF33X6_O!qx4%^&Y?JuCaVC8?S+&F6dqP#G5vU!9dlPoQ_>daQ;KId(@6|C zea2V99<=j%+DB*WqTFzte$VsG=Y!%?5`HECC%WT)cVmFIxS$x5)h{@!rS;N&K28Z4 z4~50lRHxnkFwG}RB_nYi&We!JgYBs<$_-pK%+5XqY(qa=eCu`{6vcs}bjxxX}B6uCEL^rO^^yxwA z3{R12+ppxn_kVT6Pd|GB@X>vm=%#$CJeuMy9#iC9P_?J-0}Z&(Umtvh#1~8ncL9r= zzO%|9WeC?&sIcO>Q!nx0?G6CAEK48E&jV?Ch!#H72w}w&lz-DCoP_X*?$bfY^oK-1 zk%xd(jtHL&A~=*{@mrmtO*e!Hkye9<&|uT>(FSmbbvt=hoI*+=xJD`oLI&BNf$&49EGTZFqBwiKBt|8ek4Kg61QIS}&F@ZVfM#1ES2SRR-gHcqf~yyiBj!vKGV=elr5iZc|QT!YyEy1~e0U{#XRpo6>l;fW?-I&!8(g=vz;c z`?c2at;NgEyDYidOtV`B{$s>nBqXz|bo!h#O8m=trX}wp@JPI{n!f$D#@8DZ#wIWtH&;37JJC_h>Kw#*k3v=uu%7x6c2O9C2NldRajbN4{gt~~9?v##7_ z>l~Q_8{V|gKpLt|4V@Kd{w@4-A9(!PWgn))@lv$$T*^~4-4Zwg@p6kFFCS^Z8x78E zr59~0&)pjDWz(#|MVb;R+u6P>?caS=pAJv41bDU)#-?dQAI3vaOCN?1+thnWO<~aR znLq-hjeU4i4kOC6xmWjn*jPz^_4DW7Xu$0O{24d+Vay1%_+bbIvqys};0qBLx22O5 zXzDGz9H-P@2;Doj94>j7c>8bzOqV|v0bl4ftc8+fb7rS&2{jmj!;%n^aXK&fPv*WV|0w68C6{t;*;D%rfYcFw ze%_41@MWvqA^-wL93yQ{ zbqA?r=m26dZyi(F6zyqPpaA~B4A!8Tln?@o5ShaxCg7E|0Ms<`+aN1~15`@z53S-2 zgAE2lKWEWf-9mp2NCBAx+qn571%zr#!x@7Vlo!dJK>#u`%a3vc zffZ6fC~A2rkOC<$ZXf^|*^U<*q<~P=@#2RR*o;7z{mHQ{Geio={Hh}blt__j&4qnLpu@*=`bW>7?**eAS}CAFPl9 zOp|lbl1rU;qySG6Y>6jI@T z9(?II_#+FXfLu}@=kI12*V$3iX@?bX@hMenu^*tD?FX=3)aIWR_7d1&1@so{VmEIoWHVFm3>5i#CVV8jZj5Q<6KD!^)20eugfb?F&n1!N9vWcSAk2vtq} z(5eS3AS1K<_+bTvqL!ChumUo&9WORm0imelg=?^4;RONXt^%78Fjhb}_q){KWSCSm zbaMbNCqu!=Zd9<^RX~Q+$<7`tu%Tv!6<87S?ajy_D#XDGD?$CFLP$K(rG3oV68&&lgea`Af?Oq4u>$;<87s)imo#1& zrH2IMPN&hZ#Q(ttE6BMJJkW?0;Kzcns$mLju!1A`9(#MN0Jo^-eFY@UWLT!I-{=2U z4M(urS8&8(UjdPe@$Cw7>tSjGg+^zA6&$IA6;L4*lMIR#(DTSxm!2_JK<2=pH7l&ZijZ$_Mh5Z13J4LM0PV2?8)}>>o3R2LlHOPW86*f+K!|BXYKawC zP_n}ctOy0OM^h(dv#)^6Slx&iMoFu&5U>E52lg*gJ!IN zd{SeBZp;6Y4x=c5V^({wwb@eKs>;mZ38$b%*a55$WNR*)aX%a)mSVg-0OzG~4m zT{c(&rm1#O4Skt=x^&7pTP5do!U}SgRK*JLV`i)%CtuQdVRRl6m>E`(b5Rv5z>fuC zRby6GSi#ZCSOIQP%~$~mGv^;GIO>2E5V@GK0>%akjm`oqI9drSph74nX^j;i(CJ?F zOJw)qD;}qWzj7RM@gLnR;pM5qgXKMT) zELS=Z0daN9thhp_#GMGHE3O9<;e^0u&zBtfsy$*i3lOJmh7oZAEc};7K(nXlLO@SK zuPy|n#`bmo!A0>Z1bi)|P$}Y2lgS4ZZ;CybjGCT);4OamTmxPoDhyy0BA}(AR~!OT zLv}ku!G2ZSeoFE^*Y=e94+%IBRf~iY{KDUSKGlGu)yzj|hBPg@L`F$4TP4XL3Xlxs zt&+*skUn&7)f?@t-_eRJE@z(DBUBhd99kW+msp3vwbI7iVOlWAw&fY8xqJQ zJ#^XD0y8xU-dsg0iJv`afkR&*%RO2n%k-ST!xd~z1RSdft_WSeZ0Z>LrhWbr&R;NY z^T17aVryuDk70;X%PHQ^$lkF4l&!C|RA1>V2HT*UzhqI_ObUJbAtDj{x^RLml0tvj zI@9Y8I}?22NvAg+kH)R?^(W;UQId)ROSp?A@C^^PXE{Vv2JrTXKayL?1Ih!vF}jqC z+>@1F_0f8&*FSZ7N)O~MpARrM_qJR4M4)O=-X$E;=rj?{-4O>%7kZ!@i&oQxq66eU zuEq6*MRag;JO}TL;7-BI~h(mzmvX$nu`}%T+txYSo z=&#~J8w5emr8qR8L*VT+l-_!kUVW9=eHg;J^8CHxWmO1R1`bV=hMUK_!~s9qga-m}MN=jzc)Wmwy0}fIc8xTf z&-^u5v+zj+OvHDJn#usaBdoaD2pQu_0j$HJ z6VW$LR;U#GhX`I{W!l+bos|o>=trE`fp!Q8M0`}h15@O%l8}mos4CiZ$^mN}{Vudw zV~V!-UeGw;M{O-E=O*5rf0J*W~TK&E19L*$;`c#={6@y#7H)|=cBe~6f z=BKc3ld4bYQzc!aDVKg<6EWyGTpYsomlyQgFiE zP-Y=ov%a?AZ@fkcJnU$xD=>VDuADj__uPfIt$U4d5~5Tibm6J=?+&$Wd<%HTGw@^}(91^1jRAx>k2=8vd zV<9Cq-PuOF{7#KWJKLSD-gds%DLEzqTHQ?nt0Q@^2BMu6_~oW-HOPIoSR|XH;dDG2 zq(bQp(FJFl=ic4?FL zRq%?{OX9tYe?$I@gda)rYo^IB;FR(lA4WP*qO9-GF<-nH%b5 zF>)MU%t_r|p^9F1`&Z$Y1KSqfLxiw|?3*O4l7hRsQh3w=_rbz(%u~8I?Vm>>&FZtbQ*+VI_A8WuP;q)rD!1+$R2mJeN+BJ~uemp!)1Z=#g2^*osQErr( zHaTe3hpqI{fRYgvs)8V3&02$~%7(2~@uD;f$; zQk^^okHl}PM<*IEJzZp=lMn@2uj3=GgB{FU~|{<2O4mHNFzMnAN0^=kLdps|Iom&ON_=pGcZK4 zi=Z{WtKM#lcll%t8kz)U>nsF<tqFb4tHKZ&si$mT$9^@<a=F)LzuTmTe2E{fue`tq}M`KM0)R$5%DNhju3Z>S92GjXaF4LC|ANOy{@tL1@^iQ7b zjnS6So}|O%-gX)GrcaH==pez((RQ!JopYR@idsmbN@w~0D8MGOQgy;`gUl`mOJs+T z>TogIDhT|*-)q8|x-=lo@2V!WUONcTQ|J{#EXJ)teP;vwSL75E#Aj!cSEx7r$Dw|# zijMM^GuGG_YSjCKKHq>x>oUkQq;vNdf>zjx|7wm|$hJ*CvCIhmU5%op_q|k%w6f!r zlbIMP4T-jbgwHhU%T~xV)0PdXJ*m_D>m$xxHft@?Kd;dwDLhqMYRQzA%r<{|gI8Y< zu@zns{_@JM=#*=-T+N6N3dsJ64mR5%d{h@JL3Git50K&H# z;J*z~Z{I8yRGauQGUUHMU%`q2C$f&kP-fZgywxSUOLgfr$I}j`O zx7Hlr%F!GCtZAQRZCO$QR}82>a|27d*0Qa}W^szd^CV|{stIqY$6Lzg<(4aK6~MaA zEY<6iBOG|s1wr_1Lw5gDy4?|)TthzB6sE5V4^CJ8akun! z(fz1%wkdTDy@(*(@W9 zZ-yu0$Ldoj+OuW2Ya9Mqh4+(q#?cX3=#p-HV=Yc{$GgMRSzonCzOxSZvkD7^5-c&f zHTa7P2M@|6#8m=J0mt{B)^GaA2T4WFtD)eva-+Eh|Jb?pCU(JbEjmNqT0Z2MYNTT@ zl1<*|Nw_S{bpM{@S{<8ABAu%I#C-cHr^&(E!Tjtcu|l&>1D3K2R=0 zX(uuZXHE{WHIiJ{fiH2FwRrGvD;!TNpJiE)RhqgZt<-^&S%|zaRIW?2Y3wATn}c^% zII6@fIz=AzO;=^@R5;kxJnNkN3Vg7_9(FU?`ct4pF7{j4Z5fbtY~eFtnHBpCh>l{l zKFZ3L>vGG@B|ym)SXDA^nOoGN%|>-T$K#rXM_kF!n61Ijc;-GFa`}!RuF#m}A=`S} zshM?^yB86AWsJy&;GJ$JFF7oTigfW>@j0ex2Ov=G5GLnoJ`;6q0YrmdMAS^m%5gbGJUBbW&N1DaA zHSeD6NUNON!}MeVLn>!B_hd&}&xf8&Cdev1Svx>aW+`%hjcp@av7~!)tG8+1JsB@L z(dBY%(yu4uhrlN9*^|jgqW6k@dorG^%4@=ZCah<;{0S$h^enM?x( zujgT%OyF8{fgtN-!Wc}Y=h>6Vr0Lz03A0sMXsCoFZA`tYqqt4z zo=nb6?L8R|x$Zq#9D`kFvsGDWsDvcr!Luima=>IH8rDK58W}Ivo(#S{nUw81dop1_)~@|} zGJ&w@ltsV@-IECeGaS|0ldaU&lL-u|oY~xyt#CaV^&qY}tJZ+=O-a_+!G}EeF3JSS z^klCz?9dF*ky#47FyWE}U0O5mj*J(a=x{+gGJc349hr<|fR2nOtMZv}2{3p^CSzbW z`LE21eg5<9$Yfj_(x2BV89!d2j*KTNI${wvLU&~RxP%#FNA{ByuEmk;8q5N&vLCYx z*IwNi&$i0dt!>-WMRZ(6D_!hwOcM2A`xM~nVbR3DGSqw&Mu`rz!vw&(h|XXYtaDs*R} z^>KmDuFGGEE+Z3oS7KAvZN10uwF61h%Jf=Sve|F6G3kxZcBlQ((EbJ(nOHbeQTbV! z*W%0${%@u?#hT~{kTv+x2E2*orfM!{O-+&5{4x+b0RG+&Sd?lO{&my!7&2Vau(MRD zE>uoq$I2!0uvboF35q%_%BIkniX9byZ<{`w@JoL0?{PMuQWuxed|uz>M$5TgNoJw4 zz!;Yp58thPNt|z--UnP zw+@R@Jhwc5^5n@|mv5o@`6#sG!=?Pp2UhL^lZ}3NVt3KPuSf85?UZ9U(H-}@8-w17 zwiU@VG*3=<$Gxr2uzRK#HxNUw26nE;#DSd^ z`)!j+|I{#Ez+V$R$9z^V6XSGP1PxyvFNgie3i{}H$zCVt4egOpUDAVuyo1hiK}PXp ze|Re0P6NLh!ID;$t#J>Xpjlj5q!&Xt&J;X=+Fjb!+~uiffB3w+OFrf2wYw0p+qp~r zX_dPWV$VN!Av3pgm;BT9yUUlQS1+?3&u0W&uH_xbZCl+lt-Epd=|6)q5&AcpkPP&4 zl7TK?k5L9%MD=o((U#}Yo#}JSH_=SgfFC<4hX+^iG~O6{za)p~&i zjX#axR+gj6dPUt2TjP5BwX6X4Z~!szz;_yOrzgW=;P4fo?CM%}iVjvLap52?;}lQe ze?^|58aa{>K73_v>PACI3w}QW$vxyI)u!!dOJ8wtiBfln*-J=ia_RbhRGQ4EP+ne^ z+(65cLvT&}YOAs&of>>4f}2?$>_P_RiVGK#D#LYN%c85JU*v7>!YXFmR&4O)hD)}i zEQ$fbM_fr*3tE(qEqThi4KHF!7Q za%ju~{@AJAVEEz!fvs!G$^%y;G)D~h6Q?GFVNwhQ4rMPx&85j;9xqp;{`MUtpujhs z+6$I7p+4}Fa{#gETnFuoku0yW;ZRcYp+KNTBU%~k+38L?Q*==EXiORn#RlC*ARq}{#(A8)(b1piVxY;3v|lxg-CYOf-lF}0s!H&ybge7?Kf>c7^;BS zA8$IeiBvSIN^kzo{$LD$IF*l5x>98yOVM*ERQxXli)^x@6@e zEJeaHS!=lp26Sy_bp%piQb=3AQrEP#g46kk5D*$DIoh~V*g$e90qUw0R%2#jL%cRU z!WSdi)=qz(FN`Gi6HPeA@?zE0^R_J0XpGa?EQ}?2xQ6++a4Ms_{=%jzlefbEh~PGF zuDE1&!R4+}69(UH!0Wx47Sox(7?V2uqi8gI;&Hf?_Pxjf9WR9eA*uPx-sC7@#0by1 zKsAVa+J}}?8tG2B@=((oE^9Cyhh>m_a_}`5u!iM~3nHb8*coY+!8smme!~T>Vc8VJ zMs*m0rRD(Fu#cCmsjzc*At{1|&L$@CP!(77!8~Cuc|R-stRrMS_igErtFCAlE)SJCvLn*fDDN3l5yF_ zU%62j+^}6ThJv@eDRx)_Fc22a#0A?m4N*%&Oa?X8`QV1_67rcoxJvedX@|)a%k8im zwoB+?6BKqjIs{RUo=ei=2uPBaJmFMFWs3ms(ovIHI%2!zaV&NJ)Wy*=IY%JQ-LPG5 zrm|XBj7=o8Ul1wG4cj&I%wf9>r^+ZC7=doTxlQK0C8?Fu9$DUGfg zcnLOCVgxsAS7cL2YW}hhleSFB(GlBi+hDtaAzM5q-=F9@PB(0q;fxC+fxw%biGbjZ z8@9`^DTWOP#F3%qg6%SoOTtANWi4F81wYGGAdeq9gL1=mS>}Ww;V0=JQqb}1hV3%^ zN~@#%_4M-`9Ne&7jyEnma5aWTsG$~iaw9gz5l3v7V^je4KW$iSfRUNyM%nE4|S@VQ9Dd#W7!V7VY_4y zY=ZvSE=SKLX>kN3Nz1DdCd3u{7AzDizRT{mZL`+hV2sa z8RxQESBy;r+pS^WE}07(;B~iV!LDvC3o)Fu9388 zEH2otc@Uh+Ou>N$rZD?@$+R5z?UKiHtvuh7ncKcyfoZV^bHEn`iZ0l$K;ptrX>QoA z$fl6g{AE9*wC1*N_sDs`cJY{eXxE@9j{9~Q&bT0AL{Y3K77E_MGX*znmtj*38xDvg zL(K)-Wggds3m5bOlUCHf8taijKj4P#vdjrV!cWpgfbbBEMg)%gb{T$U^)9Jzt-;X1 zATXXDxM8~-Z(MlrP(iJc#Sz=(7!?DA1J=mVb8D0J*e$FnA-oauOMmkW>z95&>ZCo(z5eY@mw1#j-yu7Pl=C3e7e zO+(az?UF%_z>^!cOUP&X;8K+fN=B&JVY7TGYyuL>yC%R5+a;64Cg_jta`ar17Du4$ z??qhpQi(imSx7w$w#zdqRJabQi=!7vL#l-#*vyGGKYvAAHn=0VPHDs$VnD{#dnvr_XU+d9jej3F4v z;2EFz?6_}NU|Q_K9PmYfq6@Ywkht(unj5w&vMD4rf7zRAzA$Oeao_II^MLK*F?m1L zK~rw~b{WpNAi}j;nqg(3!3A%~xh=&m@hAvKY?onE3>yxJBSXyv+hrb?gp1Joc5}(f zOJa>?9kE@OIUz{+NxBFC9`>c<*KOY}!>_FNBlT@y$TnB1t(Q7tyBu#^c<@kx(Z#Jp zo+S1-V!IrpVt{aJ*cfQmtxaYKT*yJf?Mwz<-a?r0m3MvncJZ{+z<8p{iDKMsaN0w6 zNwnbnT3?(;{9z3EzM%AQ#wb6Se~lirdp#VD+lGU7C3A(ps(9{(ghm;1D}@0`mlE-z zz$Az=YPRgp{5zWPLqU`+fNzyfGEqr1smTr{OT9(<^_Hn(>Di!OczXlx4x$Mm-UQKv zp?!%0iZ6X{Kyr63D;oa1ZXi#C0{!ob=Lo7f=Bk)F0*2LcK()Y@EDgoM zl_$d`7gcN{S`9IyT=<9)4SvL@mtt5JtC%2i^ysD7sg+(&Ra7uKtJ6WizRvZwAd$UX zxdhQcQMY)qsS8xlm)%gGy-hvnb;m<=g?nd=E`BEkNcm+fc=&*m~J6!gb>(*`C+k+!uN;2cNe^Yj5+t!ws-8x@CDYII2m9Yk&+y(c0Su837$ZCenYOiLAORSbY?hrNUv)RHz z;;{Xsm^;c8FQ~w~#mXIrA!`dpat#oAUG|0%eN+s-*)?SFYreGu;#(blO|?V*utN-` z!y+4W^MV&<5bpVzaELJMP(I|^lv;4Tfw(wT($6ZX2JnI~YIRY00aj%s%Go7|= zAf~VRxkOzee5uXq7i8BY)pvkEOpbJu8L}}D~teDW0u96^H6cR469IFq$z6%~KV=;EC8r&u*9Z-CT9f1dmB{Y&d z*pFA*ZAK=Mn)IC^sJx&oyI+=w4Q|Y-$PzrJWrqJ=a+#y(V{#;M7Wl$0kX*>_VCZTX z`5IQf8Z%ERBmp*Sc{mDY0=dZx%D~^kD!_hWgfnnkM1xwm??x7xeXA3J)V6VnNyv8Pidq;f+F|6-4FXCR8nofU0V=5~7NqH;UA z7?t~Ir`XxE|uYafJygkk3YKj#jxvacd*loFInHaxI3O(-*slR)f=#{ zM(kD<+3*MRa4;iz4ADK&ZcTf`$!OdecF*)C|3t_PpwyjY}a4O22>vUD%$CF(_%*6ff_%TH^1w~RX)3g=GLYE_%b*UzbHFMDf=KkKvq>ke|;Wantkj6^g7XK ztcc%Bm!N&0XOY{j-hTgstA{T}>CraX3H$%=C~Y56e;oB{f}trwx@c_HwL_Iu0H2Fs zKR=&%5A^}g;dK6yn0OMIxdZlhMmy8(ooOdye-gLh@M_~=ROO#GpnN6DQbkgcnELk#&QZ)9GOm~a$|EnfOROEyvgWYWBp*m&w`$Gd z*KYq%VDP9lgQT<)b$7IIMF(zjstRs2s*;Em{#an{I?9}VJt(qwon)YbW0&o5BtI*7 zR%;NyyC-V!ddj59L`f+tvUt6gMHi*!?-Nw_KT(4>Q09ypr1iJR-VIvzTr^0A3Z%gs zDU(JG=B|#P+as*CJ6tPlxig1 zAEsOyRhai`2gF2wSUX)89paG`veW4h9gUC6Ey7;0Xkk<$X)%)$gDZ-e%vy_NhVc0a zu%(T0r`hQa2Kkyr-qK?Jll{2CCDKS<#Q%I){Kbm!L|gdzi171K;pgjwpRX5wzCrl; zM&ak1;N6$Oe1F)V=Ik8dmRNWcMzWh|GU=ZhCfC_MM3#N=W3SOXR)ClJkyOdG#!vIq zji1j83PS&|i9Y&#)e*=(9L%qI1d=YuoXkAn=u19t9)ScV@`QN=92>~z%_9(}Ay0fi zCs({xfZ0`I`WV@fkvrHAN~ozJaEZmDt(RoTHw2)Cj3S?z2Q^|=%-rsEpBqE1nBDD_ z=&|0%OzL$$M2fk%kYSi20>8^)HVhfB`JjjKvLPcKhxxE%iut$^Vpt;*y1TlB&@322 z#he^;mNyZ2H8yt=7uHA+MWQa|<3@;K%|}K>nC9l>A|n#NJ?)4g<0qpcKQJ?Lu_KO9 zF((%pkqpGl$VL4OnPNU}gaX-7kt$g`T52r5bm>*Fn5D|jXgf~eplajH(s%BKJ!lVJ zzq>I&OEh1-H~z}K@VgP1itpUXR}aS;)DdR^!Yi&}Y(JhJr?+~zeY-k0bJW?hJ;!LF zbabY(Gwh!njnAwezA=JlBQSMKSv*GkXrN*?t6^^JKAiJ%qluR+Be0x2gU+~{WDGnP z!Ls3TWRqCbhzZjve)O{>irGJqGxIvz*>J~)OH}jQW@AIMO+{m2Z22b%S0-a4AJ2JO zHS^@DA`-my^; z`K+~y=mWMYq7SePIjRWfcpDXwYQR!O`NwTllz*J%sG5pM+Ots+`K+~y=mWMYq7SeP zIjRVk7&a;*#fYVf@{il9DE~OiQK6!5MxcpBT+2)o(ouD2l{`R{Rq?5uyC1AJ7R#hW zZDJpuMnOpkm@KRxm(aeDtx12`REhSb{UD%SKR zPc;2I*pF8xKpe%BO3wZp{pKPuG5Ey>R6hZhM-Y&Z?(~`%s`7F#^QMyvO7cEBJ44iDR5WgmMn%(EadR|=0I?tk zM`NkDI~q$xW<5@hhAfeE=h%X(I|xsLh{>Cdv!e;~k%{Vk4d7q}D@Wt$W)}7O%4RMS zA>s-^DHoXJNEmjOlE_q=lT2hz4u~jIt95cfmH-h|bqD0AI6ELmMJ6Iw2c#m(Y|6=T z7!pKG-gJB%keyFBQBkxaubdoMgZZ0wrxy34GZmzM4BS9?6$&uL@ z#!801=f#tYWF;VseO8EESoE++r0&)iqXeB#4+CViixv#{uh!RTRH=k5wEA4^i=S z+(pIHky({ntm5dp#wvjb5tl=(5-GXIDv^>*WNxvFtW!A0Duw_Nl|!szskp}~mWoV7 zZn27?>Kdy!5=2Z6v5KeTn#3CFfFh@m)2rUe;Tyk#;GBcT_ z{Z<1i1q>D2nZex?Vwwmb$=jY#ZC5@>cZF@CTS;-+T=Zr$xXN$My>d?85XqB{{_ zRx+`9IWJ{6vpsizW9Sgkg(2qcyzI;*;#*pUg~4btLHq3v!?p05vXaj9y2H-otvlUu zZ>!TAk4NKHz9hQz_b-RbvKW_c*h^2(%XjX5`?C1|z3k%7J$ibT1nJJvXuM 0.0, "your penetration rate should be above zero" +inflow.add( + veh_type="human", + edge="119257914", + vehs_per_hour=8378 * pen_rate, + # probability=1.0, + departLane="random", + departSpeed=20) +# on ramp +# inflow.add( +# veh_type="human", +# edge="27414345", +# vehs_per_hour=321 * pen_rate, +# departLane="random", +# departSpeed=20) +# inflow.add( +# veh_type="human", +# edge="27414342#0", +# vehs_per_hour=421 * pen_rate, +# departLane="random", +# departSpeed=20) + +# Now add the AVs +# main highway +inflow.add( + veh_type="av", + edge="119257914", + vehs_per_hour=int(8378 * pen_rate), + # probability=1.0, + departLane="random", + departSpeed=20) +# # on ramp +# inflow.add( +# veh_type="av", +# edge="27414345", +# vehs_per_hour=int(321 * pen_rate), +# departLane="random", +# departSpeed=20) +# inflow.add( +# veh_type="av", +# edge="27414342#0", +# vehs_per_hour=int(421 * pen_rate), +# departLane="random", +# departSpeed=20) + +NET_TEMPLATE = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml") + +flow_params = dict( + # name of the experiment + exp_tag='I_210_subnetwork', + + # name of the flow environment the experiment is running on + env_name=I210MultiEnv, + + # name of the network class the experiment is running on + network=I210SubNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # simulation-related parameters + sim=SumoParams( + sim_step=0.8, + render=True, + color_by_speed=True, + restart_instance=True, + emission_path="/Users/akashvelu/Documents/data3" + ), + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + sims_per_step=1, + additional_params=additional_env_params, + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflow, + template=NET_TEMPLATE + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig( + edges_distribution=EDGES_DISTRIBUTION, + ), +) + +# SET UP RLLIB MULTI-AGENT FEATURES + +create_env, env_name = make_create_env(params=flow_params, version=0) + +# register as rllib env +register_env(env_name, create_env) + +# multiagent configuration +test_env = create_env() +obs_space = test_env.observation_space +act_space = test_env.action_space + +POLICY_GRAPHS = {'av': (PPOTFPolicy, obs_space, act_space, {})} + +POLICIES_TO_TRAIN = ['av'] + + +def policy_mapping_fn(_): + """Map a policy in RLlib.""" + return 'av' diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index a3f6864ae..e14ab5850 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -7,7 +7,7 @@ class ImitatingController(BaseController): """ - Controller which learns to imitate another given expert controller. + Controller which uses a given neural net to imitate an expert. Subclasses BaseController """ # Implementation in Tensorflow @@ -16,11 +16,30 @@ def __init__(self, veh_id, action_network, multiagent, car_following_params=None BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) self.action_network = action_network self.multiagent = multiagent + self.veh_id = veh_id def get_accel(self, env): + """ + Get acceleration for vehicle in the env + """ + if self.multiagent: observation = env.get_state()[self.veh_id] else: observation = env.get_state() - return self.action_network.get_accel_from_observation(observation) + action = self.action_network.get_accel_from_observation(observation) + + if not self.multiagent: + if self.action_network.action_dim > 1: + # TODO: fill in + try: + rl_ids = env.get_sorted_rl_ids() + except: + print("Error caught: no get_sorted_rl_ids function, using get_rl_ids instead") + rl_ids = env.get_rl_ids() + + assert self.veh_id in rl_ids, "Vehicle corresponding to controller not in env!" + + ind = list.index(self.veh_id) + return action[ind] diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 8c7d35b27..5098f0314 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -10,9 +10,8 @@ class ImitatingNetwork(): """ Neural network which learns to imitate another given expert controller. """ - # Implementation in Tensorflow - def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, inject_noise=0, noise_variance=0.5, policy_scope='policy_vars', load_existing=False, load_path=''): + def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, stochastic=False, noise_variance=0.5, policy_scope='policy_vars', load_existing=False, load_path=''): self.sess = sess self.action_dim = action_dim @@ -21,7 +20,7 @@ def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, r self.size = size self.learning_rate = learning_rate self.training = training - self.inject_noise=inject_noise + self.stochastic=stochastic self.noise_variance = noise_variance if load_existing: @@ -58,12 +57,20 @@ def load_network(self, path): loader = tf.train.import_meta_graph(path + 'model.ckpt.meta') loader.restore(self.sess, path+'model.ckpt') - self.obs_placeholder = tf.get_default_graph().get_tensor_by_name('policy_vars/obs:0') - self.action_predictions = tf.get_default_graph().get_tensor_by_name('policy_vars/network_scope/Output_Layer/BiasAdd:0') - - if self.inject_noise == 1: - self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) - + # print([n.name for n in tf.get_default_graph().as_graph_def().node]) + self.obs_placeholder = tf.get_default_graph().get_tensor_by_name('policy_vars/observation:0') + network_output = tf.get_default_graph().get_tensor_by_name('policy_vars/network_scope/Output_Layer/BiasAdd:0') + + if self.stochastic: + # determine mean and (diagonal) covariance matrix for action distribution + mean = network_output[:self.action_dim] + cov_diag = network_output[self.action_dim:] + # set up action distribution (parameterized by network output) + dist = tfp.distributions.MultivariateNormalDiag(loc=mean, scale_diag=cov_diag) + # action is a sample from this distribution + self.action_predictions = dist.sample() + else: + self.action_predictions = network_output def define_placeholders(self): """ @@ -80,53 +87,89 @@ def define_forward_pass(self): """ Build network and initialize proper action prediction op """ - pred_action = build_neural_net(self.obs_placeholder, output_size=self.action_dim, scope='network_scope', n_layers=self.num_layers, size=self.size) - self.action_predictions = pred_action + self.stochastic = False + if self.stochastic: + output_size = 2 * self.action_dim + else: + output_size = self.action_dim + + network_output = build_neural_net(self.obs_placeholder, output_size=output_size, scope='network_scope', n_layers=self.num_layers, size=self.size) + self.network_output = network_output + + # TODO: add this as a class variable + if self.stochastic: + # determine mean and (diagonal) covariance matrix for action distribution + mean = network_output[:self.action_dim] + cov_diag = network_output[self.action_dim:] + # set up action distribution (parameterized by network output) + self.dist = tfp.distributions.MultivariateNormalDiag(loc=mean, scale_diag=cov_diag) + # action is a sample from this distribution + self.action_predictions = dist.sample() + + else: + self.dist = None + self.action_predictions = network_output - if self.inject_noise == 1: - self.action_predictions = self.action_predictions + tf.random_normal(tf.shape(self.action_predictions), 0, self.noise_variance) def define_train_op(self): """ - Defines training operations for network + Defines training operations for network (loss function and optimizer) """ true_actions = self.action_labels_placeholder - predicted_actions = self.action_predictions + network_prediction = self.network_output + + + if self.stochastic: + # negative log likelihood loss for stochastic policy + log_likelihood = self.dist.log_prob(true_actions) + self.loss = -tf.reduce_mean(log_likelihood) + else: + # MSE loss for deterministic policy + self.loss = tf.losses.mean_squared_error(true_actions, network_prediction) - self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) def train(self, observation_batch, action_batch): """ - Executes one training step for the given batch of observation and action data + Executes one training step for the given batch of observation and action data """ action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) def get_accel_from_observation(self, observation): + """ + Gets the network's acceleration prediction based on given observation/state + """ + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays if len(observation.shape)<=1: observation = observation[None] ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] - return ret_val def get_accel(self, env): + """ + Get network's acceleration prediction based on given env + """ # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays observation = env.get_state() return self.get_accel_from_observation(observation) + def add_to_replay_buffer(self, rollout_list): """ Add rollouts to replay buffer """ self.replay_buffer.add_rollouts(rollout_list) + def sample_data(self, batch_size): """ Sample a batch of data from replay buffer """ return self.replay_buffer.sample_batch(batch_size) def save_network(self, save_path): + """ Save network to given path and to tensorboard """ + self.saver.save(self.sess, save_path) # tensorboard writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) diff --git a/flow/controllers/imitation_learning/multiagent_ring_env.py b/flow/controllers/imitation_learning/multiagent_ring_env.py index 538679ed0..4fa72addc 100644 --- a/flow/controllers/imitation_learning/multiagent_ring_env.py +++ b/flow/controllers/imitation_learning/multiagent_ring_env.py @@ -63,7 +63,7 @@ # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( sim_step=0.1, - render=True, + render=False, restart_instance=False ), diff --git a/flow/controllers/imitation_learning/replay_script.py b/flow/controllers/imitation_learning/replay_script.py index 5e3984e0d..9d41afea8 100644 --- a/flow/controllers/imitation_learning/replay_script.py +++ b/flow/controllers/imitation_learning/replay_script.py @@ -20,7 +20,7 @@ def run_experiment(): action_dim = (1,)[0] sess = create_tf_session() - action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/') + action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models8_vdes14/') def get_rl_actions(state): rl_actions = {} diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 2b7e823cc..5ab94b425 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -29,13 +29,13 @@ def save_controller_network(self): def main(): import argparse parser = argparse.ArgumentParser() - parser.add_argument('--ep_len', type=int, default=3000) + parser.add_argument('--ep_len', type=int, default=5000) parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) parser.add_argument('--n_iter', type=int, default=5) - parser.add_argument('--batch_size', type=int, default=1000) # training data collected (in the env) during each iteration - parser.add_argument('--init_batch_size', type=int, default=3000) + parser.add_argument('--batch_size', type=int, default=3000) # training data collected (in the env) during each iteration + parser.add_argument('--init_batch_size', type=int, default=4000) parser.add_argument('--train_batch_size', type=int, default=100) # number of sampled data points to be used per gradient/train step @@ -47,10 +47,11 @@ def main(): parser.add_argument('--save_path', type=str, default='') parser.add_argument('--save_model', type=int, default=0) parser.add_argument('--num_eval_episodes', type=int, default=30) - parser.add_argument('--inject_noise', type=int, default=0) + parser.add_argument('--stochastic', type=bool, default=False) parser.add_argument('--noise_variance',type=float, default=0.5) parser.add_argument('--vehicle_id', type=str, default='rl_0') parser.add_argument('--multiagent', type=bool, default=False) + parser.add_argument('--v_des', type=float, default=15) args = parser.parse_args() diff --git a/flow/controllers/imitation_learning/singleagent_straight_road.py b/flow/controllers/imitation_learning/singleagent_straight_road.py new file mode 100644 index 000000000..bcebad140 --- /dev/null +++ b/flow/controllers/imitation_learning/singleagent_straight_road.py @@ -0,0 +1,163 @@ +"""Multi-agent highway with ramps example. +Trains a non-constant number of agents, all sharing the same policy, on the +highway with ramps network. +""" +from flow.controllers import RLController, IDMController +from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ + VehicleParams, SumoParams, SumoLaneChangeParams +from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS +from flow.networks import HighwayNetwork +from flow.envs.straightroad_env import SingleStraightRoad +from flow.networks.highway import ADDITIONAL_NET_PARAMS +from flow.utils.registry import make_create_env +from ray.tune.registry import register_env + + +# SET UP PARAMETERS FOR THE SIMULATION + +# number of steps per rollout +HORIZON = 2000 + +# inflow rate on the highway in vehicles per hour +HIGHWAY_INFLOW_RATE = 10800 / 5 +# percentage of autonomous vehicles compared to human vehicles on highway +PENETRATION_RATE = 10 + + +# SET UP PARAMETERS FOR THE NETWORK + +additional_net_params = ADDITIONAL_NET_PARAMS.copy() +additional_net_params.update({ + # length of the highway + "length": 2000, + # number of lanes + "lanes": 1, + # speed limit for all edges + "speed_limit": 30, + # number of edges to divide the highway into + "num_edges": 2 +}) + + +# SET UP PARAMETERS FOR THE ENVIRONMENT + +additional_env_params = ADDITIONAL_ENV_PARAMS.copy() +additional_env_params.update({ + 'max_accel': 2.6, + 'max_decel': 4.5, + 'target_velocity': 14.0, + 'local_reward': True, + 'lead_obs': True, + "terminate_on_wave": False, + # the environment is not allowed to terminate below this horizon length + 'wave_termination_horizon': 1000, + # the speed below which we consider a wave to have occured + 'wave_termination_speed': 10.0, + # whether the vehicle continues to acquire reward after it exits the system. This causes it to have incentive + # to leave the network in a good state after it leaves + 'reward_after_exit': True +}) + + +# CREATE VEHICLE TYPES AND INFLOWS + +vehicles = VehicleParams() +inflows = InFlows() + +# human vehicles +vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), +) + +# autonomous vehicles +vehicles.add( + veh_id='rl', + acceleration_controller=(RLController, {})) + +# add human vehicles on the highway +inflows.add( + veh_type="human", + edge="highway_0", + vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (1 - PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23.0", + name="idm_highway_inflow") + +# add autonomous vehicles on the highway +# they will stay on the highway, i.e. they won't exit through the off-ramps +inflows.add( + veh_type="rl", + edge="highway_0", + vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23.0", + name="rl_highway_inflow") + +# SET UP FLOW PARAMETERS +done_at_exit = True +if additional_env_params['reward_after_exit']: + done_at_exit = False + +flow_params = dict( + # name of the experiment + exp_tag='singleagent_highway', + + # name of the flow environment the experiment is running on + env_name=SingleStraightRoad, + + # name of the network class the experiment is running on + network=HighwayNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=0, + sims_per_step=1, # do not put more than one + done_at_exit=done_at_exit, + additional_params=additional_env_params, + ), + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.5, + render=False, + use_ballistic=True, + restart_instance=True + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflows, + additional_params=additional_net_params + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) + + +# SET UP RLLIB MULTI-AGENT FEATURES + +create_env, env_name = make_create_env(params=flow_params, version=0) + +# register as rllib env +register_env(env_name, create_env) + +# multiagent configuration +test_env = create_env() +obs_space = test_env.observation_space +act_space = test_env.action_space diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 937ab4793..a390502d4 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -34,33 +34,32 @@ def __init__(self, params): # vehicle setup self.multiagent = params['multiagent'] - # TODO: remove print - print("MULTI: ", self.multiagent) - self.vehicle_ids = self.env.k.vehicle.get_rl_ids() # neural net setup obs_dim = self.env.observation_space.shape[0] - action_dim = (1,)[0] + action_dim = self.env.action_space.shape[0] + self.params['action_dim'] = action_dim self.params['obs_dim'] = obs_dim - self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], inject_noise=self.params['inject_noise'], noise_variance=self.params['noise_variance']) + self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], noise_variance=self.params['noise_variance']) tf.global_variables_initializer().run(session=self.sess) # controllers setup + v_des = self.params['v_des'] car_following_params = SumoCarFollowingParams() self.controllers = dict() for vehicle_id in self.vehicle_ids: - expert = FollowerStopper(vehicle_id, car_following_params=car_following_params) + expert = FollowerStopper(vehicle_id, car_following_params=car_following_params, v_des=v_des) imitator = ImitatingController(vehicle_id, self.action_network, self.multiagent, car_following_params=car_following_params) self.controllers[vehicle_id] = (imitator, expert) def run_training_loop(self, n_iter): """ - Trains imitator for n_iter iterations + Trains imitator for n_iter iterations (each iter runs optimizer once on given batch of dat) Args: param n_iter: number of iterations to execute training @@ -68,14 +67,13 @@ def run_training_loop(self, n_iter): # init vars at beginning of training self.total_envsteps = 0 - self.start_time = time.time() for itr in range(n_iter): print("\n\n********** Iteration %i ************"%itr) # collect trajectories, to be used for training if itr == 0: - # first iteration is standard behavioral cloning + # first iteration is behavioral cloning training_returns = self.collect_training_trajectories(itr, self.params['init_batch_size']) else: training_returns = self.collect_training_trajectories(itr, self.params['batch_size']) @@ -102,7 +100,7 @@ def collect_training_trajectories(self, itr, batch_size): """ print("\nCollecting data to be used for training...") - trajectories, envsteps_this_batch = sample_trajectories(self.env, self.controllers, self.action_network, batch_size, self.params['ep_len'], self.multiagent, use_expert=itr==0) + trajectories, envsteps_this_batch = sample_trajectories(self.env, self.controllers, self.action_network, batch_size, self.params['ep_len'], self.multiagent, use_expert=itr==0, v_des=self.params['v_des']) return trajectories, envsteps_this_batch @@ -126,9 +124,8 @@ def evaluate_controller(self, num_trajs = 10): print("\n\n********** Evaluation ************ \n") - trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, False) + trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, False, v_des=self.params['v_des']) - average_imitator_reward = 0 total_imitator_steps = 0 average_imitator_reward_per_rollout = 0 @@ -137,9 +134,10 @@ def evaluate_controller(self, num_trajs = 10): average_action_imitator = 0 # compare actions taken in each step of trajectories - for traj_pair in trajectories: - traj = traj_pair[0] - traj_len = traj_pair[1] + for traj_tuple in trajectories: + traj = traj_tuple[0] + traj_len = traj_tuple[1] + imitator_actions = traj['actions'] expert_actions = traj['expert_actions'] @@ -149,45 +147,45 @@ def evaluate_controller(self, num_trajs = 10): action_error = np.linalg.norm(imitator_actions - expert_actions) / len(imitator_actions) action_errors = np.append(action_errors, action_error) - average_imitator_reward += np.sum(traj['rewards']) total_imitator_steps += traj_len average_imitator_reward_per_rollout += np.sum(traj['rewards']) - average_imitator_reward = average_imitator_reward / total_imitator_steps average_imitator_reward_per_rollout = average_imitator_reward_per_rollout / len(trajectories) average_action_expert = average_action_expert / total_imitator_steps average_action_imitator = average_action_imitator / total_imitator_steps - expert_trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, True) + expert_trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, True, v_des=self.params['v_des']) average_expert_reward = 0 total_expert_steps = 0 average_expert_reward_per_rollout = 0 # compare reward accumulated in trajectories collected via expert vs. via imitator - for traj_pair in expert_trajectories: - traj = traj_pair[0] - traj_len = traj_pair[1] + for traj_tuple in expert_trajectories: + traj = traj_tuple[0] + traj_len = traj_tuple[1] average_expert_reward += np.sum(traj['rewards']) total_expert_steps += traj_len average_expert_reward_per_rollout += np.sum(traj['rewards']) average_expert_reward_per_rollout = average_expert_reward_per_rollout / len(expert_trajectories) - average_expert_reward = average_expert_reward / total_expert_steps - print("\nAVERAGE REWARD PER STEP EXPERT: ", average_expert_reward) - print("AVERAGE REWARD PER STEP IMITATOR: ", average_imitator_reward) - print("AVERAGE REWARD PER STEP DIFFERENCE: ", np.abs(average_expert_reward - average_imitator_reward), "\n") + average_expert_reward = average_expert_reward / total_expert_steps print("AVERAGE REWARD PER ROLLOUT EXPERT: ", average_expert_reward_per_rollout) print("AVERAGE REWARD PER ROLLOUT IMITATOR: ", average_imitator_reward_per_rollout) print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: \n", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout), "\n") + print("MEAN EXPERT ACTION: ", average_action_expert) print("MEAN ACTION ERROR: ", np.mean(action_errors), "\n") + def save_controller_network(self): + """ + Saves a tensorflow model to the specified path given in the command line params. Path must end with .ckpt + """ print("Saving tensorflow model to: ", self.params['save_path']) self.action_network.save_network(self.params['save_path']) diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index 499e06f1d..a1334066f 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -7,10 +7,11 @@ from imitating_network import ImitatingNetwork from flow.controllers.car_following_models import IDMController from flow.controllers.velocity_controllers import FollowerStopper +from flow.core.rewards import * """ Class agnostic helper functions """ -def sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert): +def sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des): """ Samples a trajectory for a given vehicle using the actions prescribed by specified controller. Args: @@ -24,8 +25,6 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto """ vehicle_ids = env.k.vehicle.get_rl_ids() - print("VEHICLE IDS: ", vehicle_ids) - assert len(vehicle_ids) <= 1, "Not single-agent" observation = env.reset() if len(vehicle_ids) == 1: @@ -38,7 +37,7 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto while True: - # update vehicle ids and make sure it is single agent + # update vehicle ids vehicle_ids = env.k.vehicle.get_rl_ids() if len(vehicle_ids) == 0: observation, reward, done, _ = env.step(None) @@ -46,43 +45,65 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto break continue - assert len(vehicle_ids) == 1, "Not single agent" + # init controllers if any of vehicle ids are new + # there could be multiple vehicle ids if they all share one state but have different actions + for vehicle_id in vehicle_ids: + if vehicle_id not in set(controllers.get_keys()): + expert = FollowerStopper(vehicle_id, car_following_params=car_following_params, v_des=v_des) + imitator = ImitatingController(vehicle_id, action_network, false, car_following_params=car_following_params) + controllers[vehicle_id] = (imitator, expert) - # init controllers if vehicle id is new - vehicle_id = vehicle_ids[0] - if vehicle_id not in set(controllers.get_keys()): - expert = FollowerStopper(vehicle_id, car_following_params=car_following_params) - imitator = ImitatingController(vehicle_id, action_network, false, car_following_params=car_following_params) - controllers[vehicle_id] = (imitator, expert) + print("CONTROLLING CONTROLLER: ", controller) + print("EXPERT CONTROLLER: ", expert_controller) - # decide which controller to use to collect trajectory - expert_controller = controllers[vehicle_id][1] - if use_expert: - controller = expert_controller - else: - controller = controllers[vehicle_id][0] + # get the actions + action_dim = env.action_space.shape[0] + rl_actions = [] + expert_actions = [] + invalid_expert_action = False + for i in range(len(action_dim)): + # if max number of RL vehicles is not reached, insert dummy values + if i >= len(vehicle_ids): + rl_actions.append(0.0) + expert_actions.append(0.0) + else: + imitator = controllers[vehicle_ids[i]][0] + expert = controllers[vehicle_ids[i]][1] - print("COLLECTING CONTROLLER: ", controller) - print("EXPERT CONTROLLER: ", expert_controller) + expert_action = expert.get_action(env) + # catch invalid expert actions + if (expert_action is None or math.isnan(expert_action)): + invalid_expert_action = True + + expert_actions.append(expert_action) - action = controller.get_action(env) - if type(action) == np.ndarray: - action = action.flatten()[0] + if use_expert: + rl_actions.append(expert_action) + else: + rl_actions.append(imitator.get_action(env)) + + + # don't add invalid expert actions to replay buffer if any are invalid + if invalid_expert_action: + if use_expert: + observation, reward, done, _ = env.step(None) + else: + observation, reward, done, _ = env.step(rl_actions) - expert_action = expert_controller.get_action(env) - if (expert_action is None or math.isnan(expert_action)): - observation, reward, done, _ = env.step(action) terminate_rollout = traj_length == max_trajectory_length or done + if terminate_rollout: break + # skip to next step continue + # update collected data observations.append(observation) - actions.append(action) - expert_actions.append(expert_action) - observation, reward, done, _ = env.step(action) + actions.append(rl_actions) + expert_actions.append(expert_actions) + observation, reward, done, _ = env.step(rl_actions) traj_length += 1 next_observations.append(observation) @@ -93,10 +114,10 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto if terminate_rollout: break - return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals), traj_length + return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals), traj_length) -def sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert): +def sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des): """ Samples a trajectory for a given set of vehicles using the actions prescribed by specified controller. @@ -117,9 +138,10 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector traj_length = 0 while True: + vehicle_ids = env.k.vehicle.get_rl_ids() + # add nothing to replay buffer if no vehicles if len(vehicle_ids) == 0: - print("NO RL VEHICLES") observation_dict, reward, done, _ = env.step(None) print(env.k.vehicle.get_rl_ids()) if done['__all__']: @@ -138,7 +160,7 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector if vehicle_id not in set(controllers.keys()): car_following_params = SumoCarFollowingParams() - expert = FollowerStopper(vehicle_id, car_following_params=car_following_params) + expert = FollowerStopper(vehicle_id, car_following_params=car_following_params, v_des=v_des) imitator = ImitatingController(vehicle_id, action_network, True, car_following_params=car_following_params) controllers[vehicle_id] = (imitator, expert) @@ -193,7 +215,7 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals), traj_length -def sample_trajectories(env, controllers, action_network, min_batch_timesteps, max_trajectory_length, multiagent, use_expert): +def sample_trajectories(env, controllers, action_network, min_batch_timesteps, max_trajectory_length, multiagent, use_expert, v_des=15): """ Samples trajectories to collect at least min_batch_timesteps steps in the environment @@ -204,6 +226,7 @@ def sample_trajectories(env, controllers, action_network, min_batch_timesteps, m expert_controller: subclass of BaseController, "expert" for imitation learning min_batch_timesteps: minimum number of environment steps to collect max_trajectory_length: maximum steps in a trajectory + v_des: parameter used for follower-stopper (applies if Expert controller is follower-stopper) Returns: List of rollout dictionaries, total steps taken by environment @@ -214,9 +237,9 @@ def sample_trajectories(env, controllers, action_network, min_batch_timesteps, m while total_envsteps < min_batch_timesteps: if multiagent: - trajectory, traj_length = sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert) + trajectory, traj_length = sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des) else: - trajectory, traj_length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert) + trajectory, traj_length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des) trajectories.append(trajectory) @@ -224,7 +247,7 @@ def sample_trajectories(env, controllers, action_network, min_batch_timesteps, m return trajectories, total_envsteps -def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_length, multiagent, use_expert): +def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_length, multiagent, use_expert, v_des=15): """ Collects a fixed number of trajectories. @@ -235,6 +258,8 @@ def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_le expert_controller: subclass of BaseController, "expert" for imitation learning n: number of trajectories to collect max_trajectory_length: maximum steps in a trajectory + v_des: parameter used for follower-stopper (applies if Expert controller is follower-stopper) + Returns: List of rollout dictionaries @@ -244,9 +269,9 @@ def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_le for _ in range(n): if multiagent: - trajectory, length = sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert) + trajectory, length = sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des) else: - trajectory, length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert) + trajectory, length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des) trajectories.append((trajectory, length)) diff --git a/flow/controllers/imitation_learning/utils_tensorflow.py b/flow/controllers/imitation_learning/utils_tensorflow.py index 57000323f..1636da035 100644 --- a/flow/controllers/imitation_learning/utils_tensorflow.py +++ b/flow/controllers/imitation_learning/utils_tensorflow.py @@ -2,7 +2,7 @@ import tensorflow as tf -# Below are tensorflow related functions +""" Class agnostic helper functions related to tensorflow""" def build_neural_net(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): """ From f924d9cbc75de9da430785734f3b080f8fb94167 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sun, 3 May 2020 23:34:33 -0700 Subject: [PATCH 037/335] Bug fixes to stochastic policies and singlagent with multiple RL vehicles --- .../imitation_learning/bottleneck_env.py | 150 -------------- .../imitation_learning/i210_multiagent.py | 193 ------------------ .../i210_multiagent_ghost.py | 181 ---------------- .../imitating_controller.py | 50 +++-- .../imitation_learning/imitating_network.py | 93 ++++++--- .../imitation_learning/multiagent_ring_env.py | 99 --------- .../imitation_learning/replay_buffer.py | 2 + .../imitation_learning/ring_env.py | 85 -------- flow/controllers/imitation_learning/run.py | 16 +- .../singleagent_straight_road.py | 163 --------------- .../controllers/imitation_learning/trainer.py | 87 +++++--- flow/controllers/imitation_learning/utils.py | 75 ++++--- 12 files changed, 211 insertions(+), 983 deletions(-) delete mode 100644 flow/controllers/imitation_learning/bottleneck_env.py delete mode 100644 flow/controllers/imitation_learning/i210_multiagent.py delete mode 100644 flow/controllers/imitation_learning/i210_multiagent_ghost.py delete mode 100644 flow/controllers/imitation_learning/multiagent_ring_env.py delete mode 100644 flow/controllers/imitation_learning/ring_env.py delete mode 100644 flow/controllers/imitation_learning/singleagent_straight_road.py diff --git a/flow/controllers/imitation_learning/bottleneck_env.py b/flow/controllers/imitation_learning/bottleneck_env.py deleted file mode 100644 index 820244a87..000000000 --- a/flow/controllers/imitation_learning/bottleneck_env.py +++ /dev/null @@ -1,150 +0,0 @@ -"""Bottleneck example. -Bottleneck in which the actions are specifying a desired velocity -in a segment of space -""" -from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ - InFlows, SumoCarFollowingParams, SumoLaneChangeParams -from flow.core.params import TrafficLightParams -from flow.core.params import VehicleParams -from flow.controllers import RLController, ContinuousRouter, \ - SimLaneChangeController -from flow.envs import BottleneckDesiredVelocityEnv -from flow.networks import BottleneckNetwork - -# time horizon of a single rollout -HORIZON = 1000 -# number of parallel workers -N_CPUS = 2 -# number of rollouts per training iteration -N_ROLLOUTS = N_CPUS * 4 - -SCALING = 1 -NUM_LANES = 4 * SCALING # number of lanes in the widest highway -DISABLE_TB = True -DISABLE_RAMP_METER = True -AV_FRAC = 0.10 - -vehicles = VehicleParams() -vehicles.add( - veh_id="human", - lane_change_controller=(SimLaneChangeController, {}), - routing_controller=(ContinuousRouter, {}), - car_following_params=SumoCarFollowingParams( - speed_mode="all_checks", - ), - lane_change_params=SumoLaneChangeParams( - lane_change_mode=0, - ), - num_vehicles=1 * SCALING) -vehicles.add( - veh_id="followerstopper", - acceleration_controller=(RLController, {}), - lane_change_controller=(SimLaneChangeController, {}), - routing_controller=(ContinuousRouter, {}), - car_following_params=SumoCarFollowingParams( - speed_mode=9, - ), - lane_change_params=SumoLaneChangeParams( - lane_change_mode=0, - ), - num_vehicles=1 * SCALING) - -controlled_segments = [("1", 1, False), ("2", 2, True), ("3", 2, True), - ("4", 2, True), ("5", 1, False)] -num_observed_segments = [("1", 1), ("2", 3), ("3", 3), ("4", 3), ("5", 1)] -additional_env_params = { - "target_velocity": 40, - "disable_tb": True, - "disable_ramp_metering": True, - "controlled_segments": controlled_segments, - "symmetric": False, - "observed_segments": num_observed_segments, - "reset_inflow": False, - "lane_change_duration": 5, - "max_accel": 3, - "max_decel": 3, - "inflow_range": [1000, 2000] -} - -# flow rate -flow_rate = 2300 * SCALING - -# percentage of flow coming out of each lane -inflow = InFlows() -inflow.add( - veh_type="human", - edge="1", - vehs_per_hour=flow_rate * (1 - AV_FRAC), - departLane="random", - departSpeed=10) -inflow.add( - veh_type="followerstopper", - edge="1", - vehs_per_hour=flow_rate * AV_FRAC, - departLane="random", - departSpeed=10) - -traffic_lights = TrafficLightParams() -if not DISABLE_TB: - traffic_lights.add(node_id="2") -if not DISABLE_RAMP_METER: - traffic_lights.add(node_id="3") - -additional_net_params = {"scaling": SCALING, "speed_limit": 23} -net_params = NetParams( - inflows=inflow, - additional_params=additional_net_params) - -flow_params = dict( - # name of the experiment - exp_tag="DesiredVelocity", - - # name of the flow environment the experiment is running on - env_name=BottleneckDesiredVelocityEnv, - - # name of the network class the experiment is running on - network=BottleneckNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # sumo-related parameters (see flow.core.params.SumoParams) - sim=SumoParams( - sim_step=0.5, - render=False, - print_warnings=False, - restart_instance=True, - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - warmup_steps=40, - sims_per_step=1, - horizon=HORIZON, - additional_params=additional_env_params, - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - inflows=inflow, - additional_params=additional_net_params, - ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig( - spacing="uniform", - min_gap=5, - lanes_distribution=float("inf"), - edges_distribution=["2", "3", "4", "5"], - ), - - # traffic lights to be introduced to specific nodes (see - # flow.core.params.TrafficLightParams) - tls=traffic_lights, -) diff --git a/flow/controllers/imitation_learning/i210_multiagent.py b/flow/controllers/imitation_learning/i210_multiagent.py deleted file mode 100644 index dcb1135f3..000000000 --- a/flow/controllers/imitation_learning/i210_multiagent.py +++ /dev/null @@ -1,193 +0,0 @@ -"""Multi-agent I-210 example. -Trains a non-constant number of agents, all sharing the same policy, on the -highway with ramps network. -""" -import os -import numpy as np - -from ray.tune.registry import register_env - -from flow.controllers import RLController -from flow.controllers.car_following_models import IDMController -import flow.config as config -from flow.core.params import EnvParams -from flow.core.params import NetParams -from flow.core.params import InitialConfig -from flow.core.params import InFlows -from flow.core.params import VehicleParams -from flow.core.params import SumoParams -from flow.core.params import SumoLaneChangeParams -from flow.core.rewards import energy_consumption -from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -from flow.envs.multiagent.i210 import I210MultiEnv, ADDITIONAL_ENV_PARAMS -from flow.utils.registry import make_create_env - -# SET UP PARAMETERS FOR THE SIMULATION - -# number of steps per rollout -HORIZON = 4000 - -VEH_PER_HOUR_BASE_119257914 = 10800 -VEH_PER_HOUR_BASE_27414345 = 321 -VEH_PER_HOUR_BASE_27414342 = 421 - -# percentage of autonomous vehicles compared to human vehicles on highway -PENETRATION_RATE = 10 - -# SET UP PARAMETERS FOR THE ENVIRONMENT -additional_env_params = ADDITIONAL_ENV_PARAMS.copy() -additional_env_params.update({ - 'max_accel': 2.6, - 'max_decel': 4.5, - # configure the observation space. Look at the I210MultiEnv class for more info. - 'lead_obs': True, - # whether to add in a reward for the speed of nearby vehicles - "local_reward": True -}) - -# CREATE VEHICLE TYPES AND INFLOWS -# no vehicles in the network -vehicles = VehicleParams() -vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams(lane_change_mode="strategic"), - acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), -) -vehicles.add( - "av", - acceleration_controller=(RLController, {}), - num_vehicles=0, -) - -inflow = InFlows() -# main highway -pen_rate = PENETRATION_RATE / 100 -assert pen_rate < 1.0, "your penetration rate is over 100%" -assert pen_rate > 0.0, "your penetration rate should be above zero" -inflow.add( - veh_type="human", - edge="119257914", - vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * (1 - pen_rate)), - # probability=1.0, - depart_lane="random", - departSpeed=20) -# # on ramp -# inflow.add( -# veh_type="human", -# edge="27414345", -# vehs_per_hour=321 * pen_rate, -# depart_lane="random", -# depart_speed=20) -# inflow.add( -# veh_type="human", -# edge="27414342#0", -# vehs_per_hour=421 * pen_rate, -# depart_lane="random", -# depart_speed=20) - -# Now add the AVs -# main highway -inflow.add( - veh_type="av", - edge="119257914", - vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * pen_rate), - # probability=1.0, - depart_lane="random", - depart_speed=20) -# # on ramp -# inflow.add( -# veh_type="av", -# edge="27414345", -# vehs_per_hour=int(VEH_PER_HOUR_BASE_27414345 * pen_rate), -# depart_lane="random", -# depart_speed=20) -# inflow.add( -# veh_type="av", -# edge="27414342#0", -# vehs_per_hour=int(VEH_PER_HOUR_BASE_27414342 * pen_rate), -# depart_lane="random", -# depart_speed=20) - -NET_TEMPLATE = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml") - -flow_params = dict( - # name of the experiment - exp_tag='I_210_subnetwork', - - # name of the flow environment the experiment is running on - env_name=I210MultiEnv, - - # name of the network class the experiment is running on - network=I210SubNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # simulation-related parameters - sim=SumoParams( - sim_step=0.5, - render=False, - color_by_speed=False, - restart_instance=True, - use_ballistic=True, - emission_path="/Users/akashvelu/Documents/data14_2" - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=HORIZON, - sims_per_step=1, - warmup_steps=0, - additional_params=additional_env_params, - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - inflows=inflow, - template=NET_TEMPLATE - ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig( - edges_distribution=EDGES_DISTRIBUTION, - ), -) - -# SET UP RLLIB MULTI-AGENT FEATURES - -create_env, env_name = make_create_env(params=flow_params, version=0) - -# register as rllib env -register_env(env_name, create_env) - -# multiagent configuration -test_env = create_env() -obs_space = test_env.observation_space -act_space = test_env.action_space - -POLICY_GRAPHS = {'av': (None, obs_space, act_space, {})} - -POLICIES_TO_TRAIN = ['av'] - - -def policy_mapping_fn(_): - """Map a policy in RLlib.""" - return 'av' - - -custom_callables = { - "avg_speed": lambda env: np.mean([speed for speed in - env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]), - "avg_outflow": lambda env: np.nan_to_num( - env.k.vehicle.get_outflow_rate(120)), - "avg_energy": lambda env: -1*energy_consumption(env, 0.1) -} diff --git a/flow/controllers/imitation_learning/i210_multiagent_ghost.py b/flow/controllers/imitation_learning/i210_multiagent_ghost.py deleted file mode 100644 index f3357f94b..000000000 --- a/flow/controllers/imitation_learning/i210_multiagent_ghost.py +++ /dev/null @@ -1,181 +0,0 @@ -"""Multi-agent I-210 example. -Trains a non-constant number of agents, all sharing the same policy, on the -highway with ramps network. -""" -import os - -from ray.rllib.agents.ppo.ppo_policy import PPOTFPolicy -from ray.tune.registry import register_env - -import flow.config as config -from flow.controllers.rlcontroller import RLController -from flow.core.params import EnvParams -from flow.core.params import NetParams -from flow.core.params import InitialConfig -from flow.core.params import InFlows -from flow.core.params import VehicleParams -from flow.core.params import SumoParams -from flow.core.params import SumoLaneChangeParams -from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -from flow.envs.multiagent.i210 import I210MultiEnv, ADDITIONAL_ENV_PARAMS -from flow.utils.registry import make_create_env - -# SET UP PARAMETERS FOR THE SIMULATION - -# number of training iterations -N_TRAINING_ITERATIONS = 200 -# number of rollouts per training iteration -N_ROLLOUTS = 2 -# number of steps per rollout -HORIZON = 4000 -# number of parallel workers -N_CPUS = 1 - -# percentage of autonomous vehicles compared to human vehicles on highway -PENETRATION_RATE = 10 - -# SET UP PARAMETERS FOR THE ENVIRONMENT -additional_env_params = ADDITIONAL_ENV_PARAMS.copy() -additional_env_params.update({ - 'max_accel': 1, - 'max_decel': 1, - # configure the observation space. Look at the I210MultiEnv class for more info. - 'lead_obs': True, -}) - -# CREATE VEHICLE TYPES AND INFLOWS -# no vehicles in the network -vehicles = VehicleParams() -vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ) -) -vehicles.add( - "av", - acceleration_controller=(RLController, {}), - num_vehicles=0, -) - -inflow = InFlows() -# main highway -pen_rate = PENETRATION_RATE / 100 -assert pen_rate < 1.0, "your penetration rate is over 100%" -assert pen_rate > 0.0, "your penetration rate should be above zero" -inflow.add( - veh_type="human", - edge="119257914", - vehs_per_hour=8378 * pen_rate, - # probability=1.0, - departLane="random", - departSpeed=20) -# on ramp -# inflow.add( -# veh_type="human", -# edge="27414345", -# vehs_per_hour=321 * pen_rate, -# departLane="random", -# departSpeed=20) -# inflow.add( -# veh_type="human", -# edge="27414342#0", -# vehs_per_hour=421 * pen_rate, -# departLane="random", -# departSpeed=20) - -# Now add the AVs -# main highway -inflow.add( - veh_type="av", - edge="119257914", - vehs_per_hour=int(8378 * pen_rate), - # probability=1.0, - departLane="random", - departSpeed=20) -# # on ramp -# inflow.add( -# veh_type="av", -# edge="27414345", -# vehs_per_hour=int(321 * pen_rate), -# departLane="random", -# departSpeed=20) -# inflow.add( -# veh_type="av", -# edge="27414342#0", -# vehs_per_hour=int(421 * pen_rate), -# departLane="random", -# departSpeed=20) - -NET_TEMPLATE = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml") - -flow_params = dict( - # name of the experiment - exp_tag='I_210_subnetwork', - - # name of the flow environment the experiment is running on - env_name=I210MultiEnv, - - # name of the network class the experiment is running on - network=I210SubNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # simulation-related parameters - sim=SumoParams( - sim_step=0.8, - render=True, - color_by_speed=True, - restart_instance=True, - emission_path="/Users/akashvelu/Documents/data3" - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=HORIZON, - sims_per_step=1, - additional_params=additional_env_params, - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - inflows=inflow, - template=NET_TEMPLATE - ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig( - edges_distribution=EDGES_DISTRIBUTION, - ), -) - -# SET UP RLLIB MULTI-AGENT FEATURES - -create_env, env_name = make_create_env(params=flow_params, version=0) - -# register as rllib env -register_env(env_name, create_env) - -# multiagent configuration -test_env = create_env() -obs_space = test_env.observation_space -act_space = test_env.action_space - -POLICY_GRAPHS = {'av': (PPOTFPolicy, obs_space, act_space, {})} - -POLICIES_TO_TRAIN = ['av'] - - -def policy_mapping_fn(_): - """Map a policy in RLlib.""" - return 'av' diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index e14ab5850..70c483596 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -12,34 +12,50 @@ class ImitatingController(BaseController): # Implementation in Tensorflow def __init__(self, veh_id, action_network, multiagent, car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): + """ + Args: + veh_id: ID of vehicle to control + action_network: Instance of imitating_network class; neural net that gives action given state + multiagent: boolean indicating if env is multiagent or singleagent + """ BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) - self.action_network = action_network - self.multiagent = multiagent - self.veh_id = veh_id + self.action_network = action_network # neural network which specifies action to take + self.multiagent = multiagent # whether env is multiagent or singleagent + self.veh_id = veh_id # vehicle id that controller is controlling def get_accel(self, env): """ - Get acceleration for vehicle in the env - """ + Args: + env: instance of environment being used + Get acceleration for vehicle in the env, using action_network. Overrides superclass method. + """ + # observation is a dictionary for multiagent envs, list for singleagent envs if self.multiagent: observation = env.get_state()[self.veh_id] else: observation = env.get_state() - action = self.action_network.get_accel_from_observation(observation) + # get action from neural net + action = self.action_network.get_accel_from_observation(observation)[0] + + # handles singleagent case in which there are multiple RL vehicles sharing common state + # if action space is multidimensional, obtain the corresponding action for the vehicle + if not self.multiagent and self.action_network.action_dim > 1: + + # get_sorted_rl_ids used for singleagent_straight_road; use get_rl_ids if method does not exist + try: + rl_ids = env.get_sorted_rl_ids() + except: + print("Error caught: no get_sorted_rl_ids function, using get_rl_ids instead") + rl_ids = env.k.vehicle.get_rl_ids() - if not self.multiagent: - if self.action_network.action_dim > 1: - # TODO: fill in - try: - rl_ids = env.get_sorted_rl_ids() - except: - print("Error caught: no get_sorted_rl_ids function, using get_rl_ids instead") - rl_ids = env.get_rl_ids() + assert self.veh_id in rl_ids, "Vehicle corresponding to controller not in env!" - assert self.veh_id in rl_ids, "Vehicle corresponding to controller not in env!" + # return the action taken by the vehicle + ind = rl_ids.index(self.veh_id) + return action[ind] - ind = list.index(self.veh_id) - return action[ind] + # in other cases, acceleration is the output of the network + return action[0] diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 5098f0314..0ea5c32c8 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -8,10 +8,29 @@ class ImitatingNetwork(): """ - Neural network which learns to imitate another given expert controller. + Class containing neural network which learns to imitate a given expert controller. """ - def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, stochastic=False, noise_variance=0.5, policy_scope='policy_vars', load_existing=False, load_path=''): + def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, stochastic=False, policy_scope='policy_vars', load_existing=False, load_path=''): + + """ + Initializes and constructs neural network + + Args: + sess: Tensorflow session variable + action_dim: dimension of action space (determines size of network output) + obs_dim: dimension of observation space (size of network input) + num_layers: number of hidden layers (for an MLP) + size: size of each layer in network + learning_rate: learning rate used in optimizer + replay_buffer_size: maximum size of replay buffer used to hold data for training + training: boolean, whether the network will be trained (as opposed to loaded) + stochastic: boolean indicating if the network outputs a stochastic (multivariate Gaussian) or deterministic policy + policy_scope: variable scope used by Tensorflow for weights/biases + load_existing: boolean, whether to load an existing tensorflow model + load_path: path to directory containing an existing tensorflow model + + """ self.sess = sess self.action_dim = action_dim @@ -21,8 +40,8 @@ def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, r self.learning_rate = learning_rate self.training = training self.stochastic=stochastic - self.noise_variance = noise_variance + # load network if specified, or construct network if load_existing: self.load_network(load_path) @@ -30,21 +49,25 @@ def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, r with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): self.build_network() + # init replay buffer if self.training: self.replay_buffer = ReplayBuffer(replay_buffer_size) else: self.replay_buffer = None + # set up policy variables, and saver to save model. Save only non-training variables (weights/biases) if not load_existing: self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) def build_network(self): """ - Defines neural network for choosing actions. + Defines neural network for choosing actions. Defines placeholders and forward pass """ + # setup placeholders for network input and labels for training, and hidden layers/output self.define_placeholders() self.define_forward_pass() + # set up training operation (e.g. Adam optimizer) if self.training: with tf.variable_scope('train', reuse=tf.AUTO_REUSE): self.define_train_op() @@ -54,31 +77,39 @@ def load_network(self, path): """ Load tensorflow model from the path specified, set action prediction to proper placeholder """ + # load and restore model loader = tf.train.import_meta_graph(path + 'model.ckpt.meta') loader.restore(self.sess, path+'model.ckpt') - # print([n.name for n in tf.get_default_graph().as_graph_def().node]) + # get observation placeholder (for input into network) self.obs_placeholder = tf.get_default_graph().get_tensor_by_name('policy_vars/observation:0') + # get output tensor (using name of appropriate tensor) network_output = tf.get_default_graph().get_tensor_by_name('policy_vars/network_scope/Output_Layer/BiasAdd:0') + # for stochastic policies, the network output is twice the action dimension. First half specifies the mean of a multivariate gaussian distribution, second half specifies the diagonal entries for the diagonal covariance matrix. + # for deterministic policies, network output is the action. if self.stochastic: - # determine mean and (diagonal) covariance matrix for action distribution - mean = network_output[:self.action_dim] - cov_diag = network_output[self.action_dim:] + # determine means and (diagonal entries of ) covariance matrices (could be many in the case of batch) for action distribution + means = network_output[:, :self.action_dim] + cov_diags = network_output[:, self.action_dim:] + # set up action distribution (parameterized by network output) - dist = tfp.distributions.MultivariateNormalDiag(loc=mean, scale_diag=cov_diag) - # action is a sample from this distribution - self.action_predictions = dist.sample() + # if a batch of size k is input as observations, then the the self.dist will store k different Gaussians + self.dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=cov_diags) + # action is a sample from this distribution; one sample output per Gaussian contained in self.dist + self.action_predictions = self.dist.sample() else: + self.dist = None self.action_predictions = network_output def define_placeholders(self): """ Defines input, output, and training placeholders for neural net """ + # placeholder for observations (input into network) self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="observation", dtype=tf.float32) - self.action_placeholder = tf.placeholder(shape=[None, self.action_dim], name="action", dtype=tf.float32) + # if training, define placeholder for labels (supervised leearning) if self.training: self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) @@ -87,24 +118,30 @@ def define_forward_pass(self): """ Build network and initialize proper action prediction op """ - self.stochastic = False + # network output is twice action dim if stochastic (1st half mean, 2nd half diagonal elements of covariance) if self.stochastic: output_size = 2 * self.action_dim else: output_size = self.action_dim + # build forward pass and get the tensor for output of last layer network_output = build_neural_net(self.obs_placeholder, output_size=output_size, scope='network_scope', n_layers=self.num_layers, size=self.size) - self.network_output = network_output - # TODO: add this as a class variable + # unpack array of array into just array + # if self.stochastic: + # # network_output = network_output[0] + + # parse the mean and covariance from output if stochastic, and set up distribution if self.stochastic: - # determine mean and (diagonal) covariance matrix for action distribution - mean = network_output[:self.action_dim] - cov_diag = network_output[self.action_dim:] + # determine means and (diagonal entries of ) covariance matrices (could be many in the case of batch) for action distribution + means = network_output[:, :self.action_dim] + cov_diags = network_output[:, self.action_dim:] + # set up action distribution (parameterized by network output) - self.dist = tfp.distributions.MultivariateNormalDiag(loc=mean, scale_diag=cov_diag) - # action is a sample from this distribution - self.action_predictions = dist.sample() + # if a batch of size k is input as observations, then the the self.dist will store k different Gaussians + self.dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=cov_diags) + # action is a sample from this distribution; one sample output per Gaussian contained in self.dist + self.action_predictions = self.dist.sample() else: self.dist = None @@ -115,9 +152,9 @@ def define_train_op(self): """ Defines training operations for network (loss function and optimizer) """ + # labels true_actions = self.action_labels_placeholder - network_prediction = self.network_output - + predicted_actions = self.action_predictions if self.stochastic: # negative log likelihood loss for stochastic policy @@ -125,16 +162,18 @@ def define_train_op(self): self.loss = -tf.reduce_mean(log_likelihood) else: # MSE loss for deterministic policy - self.loss = tf.losses.mean_squared_error(true_actions, network_prediction) + self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) + # Adam optimizer self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) def train(self, observation_batch, action_batch): """ Executes one training step for the given batch of observation and action data """ + # reshape action_batch to ensure a shape (batch_size, action_dim) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - ret = self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) + self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) def get_accel_from_observation(self, observation): """ @@ -144,14 +183,14 @@ def get_accel_from_observation(self, observation): # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays if len(observation.shape)<=1: observation = observation[None] + # "batch size" is 1, so just get single acceleration/acceleration vector ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] return ret_val def get_accel(self, env): """ - Get network's acceleration prediction based on given env + Get network's acceleration prediction(s) based on given env """ - # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays observation = env.get_state() return self.get_accel_from_observation(observation) diff --git a/flow/controllers/imitation_learning/multiagent_ring_env.py b/flow/controllers/imitation_learning/multiagent_ring_env.py deleted file mode 100644 index 4fa72addc..000000000 --- a/flow/controllers/imitation_learning/multiagent_ring_env.py +++ /dev/null @@ -1,99 +0,0 @@ -"""Ring road example. -Trains a number of autonomous vehicles to stabilize the flow of 22 vehicles in -a variable length ring road. -""" -from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams -from flow.core.params import VehicleParams, SumoCarFollowingParams -from flow.controllers import RLController, IDMController, ContinuousRouter -from flow.envs.multiagent import MultiAgentWaveAttenuationPOEnv -from flow.networks import RingNetwork -from flow.utils.registry import make_create_env - -# time horizon of a single rollout -HORIZON = 3000 -# number of rollouts per training iteration -N_ROLLOUTS = 20 -# number of parallel workers -N_CPUS = 2 -# number of automated vehicles. Must be less than or equal to 22. -NUM_AUTOMATED = 2 - - -# We evenly distribute the automated vehicles in the network. -num_human = 22 - NUM_AUTOMATED -humans_remaining = num_human - -vehicles = VehicleParams() -for i in range(NUM_AUTOMATED): - # Add one automated vehicle. - vehicles.add( - veh_id="rl_{}".format(i), - acceleration_controller=(RLController, {}), - routing_controller=(ContinuousRouter, {}), - num_vehicles=1) - - # Add a fraction of the remaining human vehicles. - vehicles_to_add = round(humans_remaining / (NUM_AUTOMATED - i)) - humans_remaining -= vehicles_to_add - vehicles.add( - veh_id="human_{}".format(i), - acceleration_controller=(IDMController, { - "noise": 0.2 - }), - car_following_params=SumoCarFollowingParams( - min_gap=0 - ), - routing_controller=(ContinuousRouter, {}), - num_vehicles=vehicles_to_add) - - -flow_params = dict( - # name of the experiment - exp_tag="multiagent_ring", - - # name of the flow environment the experiment is running on - env_name=MultiAgentWaveAttenuationPOEnv, - - # name of the network class the experiment is running on - network=RingNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # sumo-related parameters (see flow.core.params.SumoParams) - sim=SumoParams( - sim_step=0.1, - render=False, - restart_instance=False - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=HORIZON, - warmup_steps=750, - clip_actions=False, - additional_params={ - "max_accel": 1, - "max_decel": 1, - "ring_length": [220, 270], - }, - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - additional_params={ - "length": 260, - "lanes": 1, - "speed_limit": 30, - "resolution": 40, - }, ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig(), -) diff --git a/flow/controllers/imitation_learning/replay_buffer.py b/flow/controllers/imitation_learning/replay_buffer.py index 77902814c..58bdd2cd7 100644 --- a/flow/controllers/imitation_learning/replay_buffer.py +++ b/flow/controllers/imitation_learning/replay_buffer.py @@ -8,6 +8,7 @@ class ReplayBuffer(object): def __init__(self, max_size=100000): + # max size of buffer self.max_size = max_size # store each rollout @@ -34,6 +35,7 @@ def add_rollouts(self, rollouts_list): assert (not np.any(np.isnan(expert_actions))), "Invalid actions added to replay buffer" + # only keep max_size tuples in buffer if self.observations is None: self.observations = observations[-self.max_size:] self.actions = actions[-self.max_size:] diff --git a/flow/controllers/imitation_learning/ring_env.py b/flow/controllers/imitation_learning/ring_env.py deleted file mode 100644 index 20ced1ce9..000000000 --- a/flow/controllers/imitation_learning/ring_env.py +++ /dev/null @@ -1,85 +0,0 @@ -"""Ring road example. -Trains a single autonomous vehicle to stabilize the flow of 21 human-driven -vehicles in a variable length ring road. -""" -from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams -from flow.core.params import VehicleParams, SumoCarFollowingParams -from flow.controllers import RLController, IDMController, ContinuousRouter -from flow.envs import WaveAttenuationPOEnv -from flow.networks import RingNetwork - -# time horizon of a single rollout -HORIZON = 3000 -# number of rollouts per training iteration -N_ROLLOUTS = 20 -# number of parallel workers -N_CPUS = 2 - -# We place one autonomous vehicle and 22 human-driven vehicles in the network -vehicles = VehicleParams() -vehicles.add( - veh_id="human", - acceleration_controller=(IDMController, { - "noise": 0.2 - }), - car_following_params=SumoCarFollowingParams( - min_gap=0 - ), - routing_controller=(ContinuousRouter, {}), - num_vehicles=21) -vehicles.add( - veh_id="rl", - acceleration_controller=(RLController, {}), - routing_controller=(ContinuousRouter, {}), - num_vehicles=1) - -flow_params = dict( - # name of the experiment - exp_tag="stabilizing_the_ring", - - # name of the flow environment the experiment is running on - env_name=WaveAttenuationPOEnv, - - # name of the network class the experiment is running on - network=RingNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # sumo-related parameters (see flow.core.params.SumoParams) - sim=SumoParams( - sim_step=0.1, - render=False, - restart_instance=False - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=HORIZON, - warmup_steps=750, - clip_actions=False, - additional_params={ - "max_accel": 1, - "max_decel": 1, - "ring_length": [220, 270], - }, - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - additional_params={ - "length": 260, - "lanes": 1, - "speed_limit": 30, - "resolution": 40, - }, ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig(), -) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 5ab94b425..17434d63e 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -1,7 +1,6 @@ import os import time import numpy as np -#import tensorflow as tf from trainer import Trainer from flow.controllers.car_following_models import IDMController @@ -11,22 +10,33 @@ class Runner(object): def __init__(self, params): - # initialize trainer + # initialize trainer class instance and params self.params = params self.trainer = Trainer(params) def run_training_loop(self): - + """ + Runs training for imitation learning for specified number of iterations + """ self.trainer.run_training_loop(n_iter=self.params['n_iter']) def evaluate(self): + """ + Evaluates a trained controller over a specified number trajectories; compares average action per step and average reward per trajectory between imitator and expert + """ self.trainer.evaluate_controller(num_trajs=self.params['num_eval_episodes']) def save_controller_network(self): + """ + Saves a tensorflow checkpoint to path specified in params (and writes to tensorboard) + """ self.trainer.save_controller_network() def main(): + """ + Parse args, run training, and evalutation + """ import argparse parser = argparse.ArgumentParser() parser.add_argument('--ep_len', type=int, default=5000) diff --git a/flow/controllers/imitation_learning/singleagent_straight_road.py b/flow/controllers/imitation_learning/singleagent_straight_road.py deleted file mode 100644 index bcebad140..000000000 --- a/flow/controllers/imitation_learning/singleagent_straight_road.py +++ /dev/null @@ -1,163 +0,0 @@ -"""Multi-agent highway with ramps example. -Trains a non-constant number of agents, all sharing the same policy, on the -highway with ramps network. -""" -from flow.controllers import RLController, IDMController -from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ - VehicleParams, SumoParams, SumoLaneChangeParams -from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS -from flow.networks import HighwayNetwork -from flow.envs.straightroad_env import SingleStraightRoad -from flow.networks.highway import ADDITIONAL_NET_PARAMS -from flow.utils.registry import make_create_env -from ray.tune.registry import register_env - - -# SET UP PARAMETERS FOR THE SIMULATION - -# number of steps per rollout -HORIZON = 2000 - -# inflow rate on the highway in vehicles per hour -HIGHWAY_INFLOW_RATE = 10800 / 5 -# percentage of autonomous vehicles compared to human vehicles on highway -PENETRATION_RATE = 10 - - -# SET UP PARAMETERS FOR THE NETWORK - -additional_net_params = ADDITIONAL_NET_PARAMS.copy() -additional_net_params.update({ - # length of the highway - "length": 2000, - # number of lanes - "lanes": 1, - # speed limit for all edges - "speed_limit": 30, - # number of edges to divide the highway into - "num_edges": 2 -}) - - -# SET UP PARAMETERS FOR THE ENVIRONMENT - -additional_env_params = ADDITIONAL_ENV_PARAMS.copy() -additional_env_params.update({ - 'max_accel': 2.6, - 'max_decel': 4.5, - 'target_velocity': 14.0, - 'local_reward': True, - 'lead_obs': True, - "terminate_on_wave": False, - # the environment is not allowed to terminate below this horizon length - 'wave_termination_horizon': 1000, - # the speed below which we consider a wave to have occured - 'wave_termination_speed': 10.0, - # whether the vehicle continues to acquire reward after it exits the system. This causes it to have incentive - # to leave the network in a good state after it leaves - 'reward_after_exit': True -}) - - -# CREATE VEHICLE TYPES AND INFLOWS - -vehicles = VehicleParams() -inflows = InFlows() - -# human vehicles -vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), -) - -# autonomous vehicles -vehicles.add( - veh_id='rl', - acceleration_controller=(RLController, {})) - -# add human vehicles on the highway -inflows.add( - veh_type="human", - edge="highway_0", - vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (1 - PENETRATION_RATE / 100)), - depart_lane="free", - depart_speed="23.0", - name="idm_highway_inflow") - -# add autonomous vehicles on the highway -# they will stay on the highway, i.e. they won't exit through the off-ramps -inflows.add( - veh_type="rl", - edge="highway_0", - vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (PENETRATION_RATE / 100)), - depart_lane="free", - depart_speed="23.0", - name="rl_highway_inflow") - -# SET UP FLOW PARAMETERS -done_at_exit = True -if additional_env_params['reward_after_exit']: - done_at_exit = False - -flow_params = dict( - # name of the experiment - exp_tag='singleagent_highway', - - # name of the flow environment the experiment is running on - env_name=SingleStraightRoad, - - # name of the network class the experiment is running on - network=HighwayNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=HORIZON, - warmup_steps=0, - sims_per_step=1, # do not put more than one - done_at_exit=done_at_exit, - additional_params=additional_env_params, - ), - - # sumo-related parameters (see flow.core.params.SumoParams) - sim=SumoParams( - sim_step=0.5, - render=False, - use_ballistic=True, - restart_instance=True - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - inflows=inflows, - additional_params=additional_net_params - ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig(), -) - - -# SET UP RLLIB MULTI-AGENT FEATURES - -create_env, env_name = make_create_env(params=flow_params, version=0) - -# register as rllib env -register_env(env_name, create_env) - -# multiagent configuration -test_env = create_env() -obs_space = test_env.observation_space -act_space = test_env.action_space diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index a390502d4..801c7517f 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -5,7 +5,7 @@ import gym import os from flow.utils.registry import make_create_env -from i210_multiagent import flow_params +from env_configs.singleagent_straight_road import flow_params from imitating_controller import ImitatingController from imitating_network import ImitatingNetwork from flow.controllers.car_following_models import IDMController @@ -29,12 +29,19 @@ def __init__(self, params): # environment setup create_env, _ = make_create_env(flow_params) self.env = create_env() - init_state = self.env.reset() # vehicle setup - self.multiagent = params['multiagent'] - - self.vehicle_ids = self.env.k.vehicle.get_rl_ids() + self.multiagent = params['multiagent'] # multiagent or singleagent env + + if not self.multiagent and self.env.action_space.shape[0] > 1: + # use sorted rl ids if the method exists (e.g.. singlagent straightroad) + try: + self.vehicle_ids = self.env.get_sorted_rl_ids() + except: + self.vehicle_ids = self.k.vehicle.get_rl_ids() + else: + # use get_rl_ids if sorted_rl_ids doesn't exist + self.vehicle_ids = self.env.k.vehicle.get_rl_ids() # neural net setup obs_dim = self.env.observation_space.shape[0] @@ -43,14 +50,16 @@ def __init__(self, params): self.params['action_dim'] = action_dim self.params['obs_dim'] = obs_dim - self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], noise_variance=self.params['noise_variance']) + # initialize neural network class and tf variables + self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], stochastic=self.params['stochastic']) tf.global_variables_initializer().run(session=self.sess) # controllers setup - v_des = self.params['v_des'] + v_des = self.params['v_des'] # for FollowerStopper car_following_params = SumoCarFollowingParams() self.controllers = dict() + # initialize controllers: save in a dictionary to avoid re-initializing a controller for a vehicle for vehicle_id in self.vehicle_ids: expert = FollowerStopper(vehicle_id, car_following_params=car_following_params, v_des=v_des) imitator = ImitatingController(vehicle_id, self.action_network, self.multiagent, car_following_params=car_following_params) @@ -59,13 +68,14 @@ def __init__(self, params): def run_training_loop(self, n_iter): """ - Trains imitator for n_iter iterations (each iter runs optimizer once on given batch of dat) + Trains imitator for n_iter iterations (each iteration collects new trajectories to put in replay buffer) Args: param n_iter: number of iterations to execute training """ # init vars at beginning of training + # number of environment steps taken throughout training self.total_envsteps = 0 for itr in range(n_iter): @@ -76,23 +86,24 @@ def run_training_loop(self, n_iter): # first iteration is behavioral cloning training_returns = self.collect_training_trajectories(itr, self.params['init_batch_size']) else: + # other iterations use DAgger (trajectories collected by running imitator policy) training_returns = self.collect_training_trajectories(itr, self.params['batch_size']) paths, envsteps_this_batch = training_returns self.total_envsteps += envsteps_this_batch - # add collected data to replay buffer + # add collected data to replay buffer in neural network class self.action_network.add_to_replay_buffer(paths) - # train controller (using sampled data from replay buffer) - loss = self.train_controller() + # train controller + self.train_controller() def collect_training_trajectories(self, itr, batch_size): """ Collect (state, action, reward, next_state, terminal) tuples for training Args: - itr: iteration of training during which functino is called + itr: iteration of training during which function is called. Used to determine whether to run behavioral cloning or DAgger batch_size: number of tuples to collect Returns: paths: list of trajectories @@ -106,17 +117,19 @@ def collect_training_trajectories(self, itr, batch_size): def train_controller(self): """ - Trains controller using data sampled from replay buffer + Trains controller for specified number of steps, using data sampled from replay buffer; each step involves running optimizer (i.e. Adam) once """ - print('Training controller using sampled data from replay buffer') + print("Training controller using sampled data from replay buffer...") for train_step in range(self.params['num_agent_train_steps_per_iter']): + # sample data from replay buffer ob_batch, ac_batch, expert_ac_batch = self.action_network.sample_data(self.params['train_batch_size']) + # train network on sampled data self.action_network.train(ob_batch, expert_ac_batch) def evaluate_controller(self, num_trajs = 10): """ - Evaluates a trained controller on similarity with expert with respect to action taken and total reward per rollout + Evaluates a trained imitation controller on similarity with expert with respect to action taken and total reward per rollout Args: num_trajs: number of trajectories to evaluate performance on @@ -124,16 +137,19 @@ def evaluate_controller(self, num_trajs = 10): print("\n\n********** Evaluation ************ \n") + + # collect imitator driven trajectories (along with corresponding expert actions) trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, False, v_des=self.params['v_des']) - total_imitator_steps = 0 - average_imitator_reward_per_rollout = 0 + # initialize metrics + total_imitator_steps = 0 # total number of environment steps taken across the n trajectories + average_imitator_reward_per_rollout = 0 # average reward per rollout achieved by imitator - action_errors = np.array([]) - average_action_expert = 0 - average_action_imitator = 0 + action_errors = np.array([]) # difference in action (acceleration) taken between expert and imitator + average_action_expert = 0 # average action taken, across all timesteps, by expert (used to compute % average) + average_action_imitator = 0 # average action taken, across all timesteps, by imitator (used to compute % average) - # compare actions taken in each step of trajectories + # compare actions taken in each step of trajectories (trajectories are controlled by imitator) for traj_tuple in trajectories: traj = traj_tuple[0] traj_len = traj_tuple[1] @@ -144,21 +160,22 @@ def evaluate_controller(self, num_trajs = 10): average_action_expert += np.sum(expert_actions) average_action_imitator += np.sum(imitator_actions) - action_error = np.linalg.norm(imitator_actions - expert_actions) / len(imitator_actions) + # use RMSE as action error metric + action_error = (np.linalg.norm(imitator_actions - expert_actions)) / len(imitator_actions) action_errors = np.append(action_errors, action_error) total_imitator_steps += traj_len average_imitator_reward_per_rollout += np.sum(traj['rewards']) + # compute averages for metrics average_imitator_reward_per_rollout = average_imitator_reward_per_rollout / len(trajectories) average_action_expert = average_action_expert / total_imitator_steps - average_action_imitator = average_action_imitator / total_imitator_steps - + # collect expert driven trajectories (these trajectories are only used to compare average reward per rollout) expert_trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, True, v_des=self.params['v_des']) - average_expert_reward = 0 + # initialize metrics total_expert_steps = 0 average_expert_reward_per_rollout = 0 @@ -166,21 +183,27 @@ def evaluate_controller(self, num_trajs = 10): for traj_tuple in expert_trajectories: traj = traj_tuple[0] traj_len = traj_tuple[1] - average_expert_reward += np.sum(traj['rewards']) total_expert_steps += traj_len average_expert_reward_per_rollout += np.sum(traj['rewards']) average_expert_reward_per_rollout = average_expert_reward_per_rollout / len(expert_trajectories) - average_expert_reward = average_expert_reward / total_expert_steps + # compute percent errors (using expert values as 'ground truth') + percent_error_average_reward = (np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout) / average_expert_reward_per_rollout) * 100 + + percent_error_average_action = (np.abs(np.mean(action_errors)) / np.abs(average_action_expert)) * 100 - print("AVERAGE REWARD PER ROLLOUT EXPERT: ", average_expert_reward_per_rollout) - print("AVERAGE REWARD PER ROLLOUT IMITATOR: ", average_imitator_reward_per_rollout) - print("AVERAGE REWARD PER ROLLOUT DIFFERENCE: \n", np.abs(average_expert_reward_per_rollout - average_imitator_reward_per_rollout), "\n") + # Print results + print("\nAverage reward per rollout, expert: ", average_expert_reward_per_rollout) + print("Average reward per rollout, imitator: ", average_imitator_reward_per_rollout) + print("% Difference, average reward per rollout: ", percent_error_average_reward, "\n") - print("MEAN EXPERT ACTION: ", average_action_expert) - print("MEAN ACTION ERROR: ", np.mean(action_errors), "\n") + print(" Average RMSE action error per rollout: ", np.mean(action_errors)) + print("Average Action Taken by Expert: ", average_action_expert) + print("% Action Error: ", percent_error_average_action, "\n") + print("Total imitator steps: ", total_imitator_steps) + print("Total expert steps: ", total_expert_steps) def save_controller_network(self): diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index a1334066f..198a2a4ad 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -24,21 +24,24 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto Dictionary of numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples """ - vehicle_ids = env.k.vehicle.get_rl_ids() + # reset and initialize arrays to store trajectory observation = env.reset() - if len(vehicle_ids) == 1: - vehicle_id = vehicle_ids[0] - else: - vehicle_id = None - observations, actions, expert_actions, rewards, next_observations, terminals = [], [], [], [], [], [] traj_length = 0 while True: - # update vehicle ids - vehicle_ids = env.k.vehicle.get_rl_ids() + # update vehicle ids: if multidimensional action space, check if env has a sorted_rl_ids method + if env.action_space.shape[0] > 1: + try: + vehicle_ids = env.get_sorted_rl_ids() + except: + vehicle_ids = env.k.vehicle.get_rl_ids() + else: + vehicle_ids = env.k.vehicle.get_rl_ids() + + # no RL actions if no RL vehicles if len(vehicle_ids) == 0: observation, reward, done, _ = env.step(None) if done: @@ -47,27 +50,26 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto # init controllers if any of vehicle ids are new # there could be multiple vehicle ids if they all share one state but have different actions + car_following_params = SumoCarFollowingParams() + for vehicle_id in vehicle_ids: - if vehicle_id not in set(controllers.get_keys()): + if vehicle_id not in set(controllers.keys()): expert = FollowerStopper(vehicle_id, car_following_params=car_following_params, v_des=v_des) - imitator = ImitatingController(vehicle_id, action_network, false, car_following_params=car_following_params) + imitator = ImitatingController(vehicle_id, action_network, False, car_following_params=car_following_params) controllers[vehicle_id] = (imitator, expert) - print("CONTROLLING CONTROLLER: ", controller) - print("EXPERT CONTROLLER: ", expert_controller) - - # get the actions + # get the actions given by controllers action_dim = env.action_space.shape[0] rl_actions = [] - expert_actions = [] + actions_expert = [] invalid_expert_action = False - for i in range(len(action_dim)): + for i in range(action_dim): # if max number of RL vehicles is not reached, insert dummy values if i >= len(vehicle_ids): rl_actions.append(0.0) - expert_actions.append(0.0) + actions_expert.append(0.0) else: imitator = controllers[vehicle_ids[i]][0] expert = controllers[vehicle_ids[i]][1] @@ -77,32 +79,39 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto if (expert_action is None or math.isnan(expert_action)): invalid_expert_action = True - expert_actions.append(expert_action) + actions_expert.append(expert_action) if use_expert: + if traj_length == 0 and i == 0: + print("Controller collecing trajectory: ", type(expert)) rl_actions.append(expert_action) else: - rl_actions.append(imitator.get_action(env)) + if traj_length == 0 and i == 0: + print("Controller collecting trajectory: ", type(imitator)) + imitator_action = imitator.get_action(env) + rl_actions.append(imitator_action) - # don't add invalid expert actions to replay buffer if any are invalid - if invalid_expert_action: - if use_expert: - observation, reward, done, _ = env.step(None) - else: - observation, reward, done, _ = env.step(rl_actions) - + # invalid action in rl_actions; default to Sumo, ignore sample + if None in rl_actions or np.nan in rl_actions: + observation, reward, done, _ = env.step(None) + terminate_rollout = traj_length == max_trajectory_length or done + if terminate_rollout: + break + continue + # invalid expert action (if rl_actions is expert actions then this would have been caught above)) + if not use_expert and invalid_expert_action: + # throw away sample, but step according to rl_actions + observation, reward, done, _ = env.step(rl_actions) terminate_rollout = traj_length == max_trajectory_length or done - if terminate_rollout: break - # skip to next step continue # update collected data observations.append(observation) actions.append(rl_actions) - expert_actions.append(expert_actions) + expert_actions.append(actions_expert) observation, reward, done, _ = env.step(rl_actions) traj_length += 1 @@ -114,7 +123,7 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto if terminate_rollout: break - return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals), traj_length) + return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals), traj_length def sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des): @@ -143,7 +152,6 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector # add nothing to replay buffer if no vehicles if len(vehicle_ids) == 0: observation_dict, reward, done, _ = env.step(None) - print(env.k.vehicle.get_rl_ids()) if done['__all__']: break continue @@ -171,10 +179,11 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector controller = controllers[vehicle_id][0] if traj_length == 0 and i == 0: - print("COLLECTOR: ", controller) + print("Controller collecting trajectory: ", controller) action = controller.get_action(env) + # action should be a scalar acceleration if type(action) == np.ndarray: action = action.flatten()[0] @@ -262,7 +271,7 @@ def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_le Returns: - List of rollout dictionaries + List of rollouts (tuple of rollout dictionary, length of rollout) """ trajectories = [] From 0b08c331f7af272fe17ee4df7c8990311e73f56b Mon Sep 17 00:00:00 2001 From: Akash Velu <31679538+akashvelu@users.noreply.github.com> Date: Sun, 3 May 2020 23:38:12 -0700 Subject: [PATCH 038/335] Delete Untitled.ipynb --- .../imitation_learning/Untitled.ipynb | 856 ------------------ 1 file changed, 856 deletions(-) delete mode 100644 flow/controllers/imitation_learning/Untitled.ipynb diff --git a/flow/controllers/imitation_learning/Untitled.ipynb b/flow/controllers/imitation_learning/Untitled.ipynb deleted file mode 100644 index d412275b8..000000000 --- a/flow/controllers/imitation_learning/Untitled.ipynb +++ /dev/null @@ -1,856 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", - "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", - " PendingDeprecationWarning\n" - ] - } - ], - "source": [ - "import numpy as np\n", - "import gym\n", - "from i210_multiagent import flow_params" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "from flow.utils.registry import make_create_env\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "create_env, _ = make_create_env(flow_params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env = create_env()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.reset()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "len(env.get_state())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.sample()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.k.vehicle.get_rl_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.step({})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.shape[0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from flow.controllers.velocity_controllers import FollowerStopper\n", - "from flow.core.params import SumoCarFollowingParams\n", - "car_following_params = SumoCarFollowingParams()\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expert = FollowerStopper('followerstopper_0', car_following_params=car_following_params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expert.get_action(env)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "len(env.k.vehicle.get_ids())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expert2 = FollowerStopper('flow_10.1', car_following_params=car_following_params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expert2.get_action(env)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow as tf" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t = tf.convert_to_tensor(np.array([1,2]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t.get_shape()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t[0:1]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mean = tf.convert_to_tensor(np.array([1.0,2.0]))\n", - "cov = tf.convert_to_tensor(np.array([1.0,1.0]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "tf.random_normal(tf.shape(tf.convert_to_tensor(np.array([1, 1]))), np.array([0,0]), np.array([1,1]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cov" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tf.cast(tf.shape(mean), tf.int64)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mean" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "np.diag(np.array([1,1]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow_probability as tfp" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tfd = tfp.distributions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tfd" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess = tf.Session()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mvn = tfd.MultivariateNormalDiag(loc=mean, scale_diag=cov)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cov" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mvn.prob([-1, 0]).eval(session=sess)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess.run(mean)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mvn.prob([1, 2.5]).eval(session=sess)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess.run(mvn.sample(1))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "inp = tf.placeholder(shape=[None, 2], name=\"obs\", dtype=tf.float32)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "out = inp\n", - "for _ in range(2):\n", - " out = tf.layers.dense(out, 30, activation=tf.tanh)\n", - "out = tf.layers.dense(out, 2, activation=None, name=\"output\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "pred = out" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "type(pred)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tf.global_variables_initializer().run(session=sess)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "obs = np.array([1,1])\n", - "obs = obs[None]\n", - "ret = sess.run([pred], feed_dict={inp:obs})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "obs = np.array([[1,1], [1,1]])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ret = sess.run([pred], feed_dict={inp:obs})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ret" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "type(ret)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "batch = np.array([[3,3],[4,4],[1,1]])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "log_likelihood = sess.run(mvn.log_prob(batch))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "log_likelihood" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess.run(tf.reduce_mean(log_likelihood))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "np.mean(log_likelihood)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "labels_batch = tf.placeholder(shape=[None, 2], dtype=tf.float64)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ll = mvn.log_prob(labels_batch)\n", - "loss = tf.reduce_mean(ll, axis=-1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "b = batch.reshape(batch.shape[0], 2)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess.run([loss], feed_dict={labels_batch:b})" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "from singleagent_straight_road import flow_params\n", - "from flow.utils.registry import make_create_env\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "create_env, _ = make_create_env(flow_params)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "env = create_env()" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0., 0.])" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.reset()" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[]" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.k.vehicle.get_rl_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Box(8,)" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.action_space" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "for i in range(100):\n", - " env.step(None)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "24" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "len(env.get_state())" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['rl_highway_inflow_10.0', 'rl_highway_inflow_10.1', 'rl_highway_inflow_10.2']" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.k.vehicle.get_rl_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "rl_actions = {}\n", - "for vehicle_id in env.k.vehicle.get_rl_ids():\n", - " rl_actions[vehicle_id] = 1.0\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['rl_highway_inflow_10.0', 'rl_highway_inflow_10.2', 'rl_highway_inflow_10.1']" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.get_sorted_rl_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [], - "source": [ - "rl_actions = [1,1,1,0,0,0,0,0]\n", - "rl_actions = np.array(rl_actions)" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0\n", - "rl_highway_inflow_10.0\n", - "1\n", - "rl_highway_inflow_10.2\n", - "2\n", - "rl_highway_inflow_10.1\n" - ] - }, - { - "data": { - "text/plain": [ - "(array([0.54393322, 0.06077194, 0.56137638, 0.40959813, 0.0259221 ,\n", - " 0.4041333 , 0.42759098, 0.02818569, 0.42912874, 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. ]),\n", - " 0.1718155323023197,\n", - " False,\n", - " {})" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.step(rl_actions)" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "error\n" - ] - } - ], - "source": [ - "try:\n", - " test(1)\n", - "except:\n", - " print(\"error\")" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "ename": "AssertionError", - "evalue": "blah", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mAssertionError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32massert\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"blah\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;31mAssertionError\u001b[0m: blah" - ] - } - ], - "source": [ - "assert False, \"blah\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "flow", - "language": "python", - "name": "flow" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} From 21ee5ce71739a5a6ad1a59009fd7fa5a4751e683 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sun, 3 May 2020 23:43:24 -0700 Subject: [PATCH 039/335] Removed files --- .../imitation_learning/Untitled.ipynb | 856 ------------------ .../imitation_learning/Useless/Untitled.ipynb | 438 --------- .../Useless/Untitled1.ipynb | 96 -- ...ents.1587254017.Akashs-MacBook-Pro-2.local | Bin 265723 -> 0 bytes ...ents.1587339098.Akashs-MacBook-Pro-2.local | Bin 267581 -> 0 bytes ...ents.1587776769.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587779365.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587780241.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587781276.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587789385.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587841939.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587848505.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587855757.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587860905.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes ...ents.1587860969.Akashs-MacBook-Pro-2.local | Bin 267629 -> 0 bytes 15 files changed, 1390 deletions(-) delete mode 100644 flow/controllers/imitation_learning/Untitled.ipynb delete mode 100644 flow/controllers/imitation_learning/Useless/Untitled.ipynb delete mode 100644 flow/controllers/imitation_learning/Useless/Untitled1.ipynb delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587254017.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587339098.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587776769.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587779365.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587780241.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587781276.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587789385.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587841939.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587848505.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587855757.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587860905.Akashs-MacBook-Pro-2.local delete mode 100644 flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587860969.Akashs-MacBook-Pro-2.local diff --git a/flow/controllers/imitation_learning/Untitled.ipynb b/flow/controllers/imitation_learning/Untitled.ipynb deleted file mode 100644 index d412275b8..000000000 --- a/flow/controllers/imitation_learning/Untitled.ipynb +++ /dev/null @@ -1,856 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", - "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", - " PendingDeprecationWarning\n" - ] - } - ], - "source": [ - "import numpy as np\n", - "import gym\n", - "from i210_multiagent import flow_params" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "from flow.utils.registry import make_create_env\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "create_env, _ = make_create_env(flow_params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env = create_env()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.reset()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "len(env.get_state())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.sample()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.shape" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.k.vehicle.get_rl_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.step({})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.action_space.shape[0]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from flow.controllers.velocity_controllers import FollowerStopper\n", - "from flow.core.params import SumoCarFollowingParams\n", - "car_following_params = SumoCarFollowingParams()\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expert = FollowerStopper('followerstopper_0', car_following_params=car_following_params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expert.get_action(env)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "len(env.k.vehicle.get_ids())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expert2 = FollowerStopper('flow_10.1', car_following_params=car_following_params)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "expert2.get_action(env)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow as tf" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t = tf.convert_to_tensor(np.array([1,2]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t.get_shape()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t[0:1]" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mean = tf.convert_to_tensor(np.array([1.0,2.0]))\n", - "cov = tf.convert_to_tensor(np.array([1.0,1.0]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "tf.random_normal(tf.shape(tf.convert_to_tensor(np.array([1, 1]))), np.array([0,0]), np.array([1,1]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cov" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tf.cast(tf.shape(mean), tf.int64)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mean" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "np.diag(np.array([1,1]))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "import tensorflow_probability as tfp" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tfd = tfp.distributions" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tfd" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess = tf.Session()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mvn = tfd.MultivariateNormalDiag(loc=mean, scale_diag=cov)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "cov" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mvn.prob([-1, 0]).eval(session=sess)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess.run(mean)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "mvn.prob([1, 2.5]).eval(session=sess)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess.run(mvn.sample(1))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "inp = tf.placeholder(shape=[None, 2], name=\"obs\", dtype=tf.float32)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "out = inp\n", - "for _ in range(2):\n", - " out = tf.layers.dense(out, 30, activation=tf.tanh)\n", - "out = tf.layers.dense(out, 2, activation=None, name=\"output\")" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "pred = out" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "type(pred)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "tf.global_variables_initializer().run(session=sess)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "obs = np.array([1,1])\n", - "obs = obs[None]\n", - "ret = sess.run([pred], feed_dict={inp:obs})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "obs = np.array([[1,1], [1,1]])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ret = sess.run([pred], feed_dict={inp:obs})" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ret" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "type(ret)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "batch = np.array([[3,3],[4,4],[1,1]])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "log_likelihood = sess.run(mvn.log_prob(batch))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "log_likelihood" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess.run(tf.reduce_mean(log_likelihood))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "np.mean(log_likelihood)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "labels_batch = tf.placeholder(shape=[None, 2], dtype=tf.float64)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "ll = mvn.log_prob(labels_batch)\n", - "loss = tf.reduce_mean(ll, axis=-1)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "b = batch.reshape(batch.shape[0], 2)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "sess.run([loss], feed_dict={labels_batch:b})" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "from singleagent_straight_road import flow_params\n", - "from flow.utils.registry import make_create_env\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "create_env, _ = make_create_env(flow_params)" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [], - "source": [ - "env = create_env()" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0., 0.])" - ] - }, - "execution_count": 6, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.reset()" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[]" - ] - }, - "execution_count": 7, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.k.vehicle.get_rl_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "Box(8,)" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.action_space" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [], - "source": [ - "for i in range(100):\n", - " env.step(None)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "24" - ] - }, - "execution_count": 10, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "len(env.get_state())" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['rl_highway_inflow_10.0', 'rl_highway_inflow_10.1', 'rl_highway_inflow_10.2']" - ] - }, - "execution_count": 11, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.k.vehicle.get_rl_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": 12, - "metadata": {}, - "outputs": [], - "source": [ - "rl_actions = {}\n", - "for vehicle_id in env.k.vehicle.get_rl_ids():\n", - " rl_actions[vehicle_id] = 1.0\n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['rl_highway_inflow_10.0', 'rl_highway_inflow_10.2', 'rl_highway_inflow_10.1']" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.get_sorted_rl_ids()" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [], - "source": [ - "rl_actions = [1,1,1,0,0,0,0,0]\n", - "rl_actions = np.array(rl_actions)" - ] - }, - { - "cell_type": "code", - "execution_count": 17, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0\n", - "rl_highway_inflow_10.0\n", - "1\n", - "rl_highway_inflow_10.2\n", - "2\n", - "rl_highway_inflow_10.1\n" - ] - }, - { - "data": { - "text/plain": [ - "(array([0.54393322, 0.06077194, 0.56137638, 0.40959813, 0.0259221 ,\n", - " 0.4041333 , 0.42759098, 0.02818569, 0.42912874, 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. ]),\n", - " 0.1718155323023197,\n", - " False,\n", - " {})" - ] - }, - "execution_count": 17, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.step(rl_actions)" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "error\n" - ] - } - ], - "source": [ - "try:\n", - " test(1)\n", - "except:\n", - " print(\"error\")" - ] - }, - { - "cell_type": "code", - "execution_count": 21, - "metadata": {}, - "outputs": [ - { - "ename": "AssertionError", - "evalue": "blah", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mAssertionError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32massert\u001b[0m \u001b[0;32mFalse\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"blah\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;31mAssertionError\u001b[0m: blah" - ] - } - ], - "source": [ - "assert False, \"blah\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "flow", - "language": "python", - "name": "flow" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/flow/controllers/imitation_learning/Useless/Untitled.ipynb b/flow/controllers/imitation_learning/Useless/Untitled.ipynb deleted file mode 100644 index 982ef03a7..000000000 --- a/flow/controllers/imitation_learning/Useless/Untitled.ipynb +++ /dev/null @@ -1,438 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n", - "/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/utils/flow_warnings.py:26: PendingDeprecationWarning: The attribute departSpeed in InFlows is deprecated, use depart_speed instead.\n", - " PendingDeprecationWarning\n" - ] - } - ], - "source": [ - "import time\n", - "import pickle\n", - "import numpy as np\n", - "import gym\n", - "import os\n", - "from flow.utils.registry import make_create_env\n", - "from i210_multiagent import flow_params as flow_params_multi\n", - "from flow.controllers.car_following_models import IDMController\n", - "from flow.core.params import SumoCarFollowingParams\n", - "from utils import *\n", - "from imitating_network import *\n", - "from utils_tensorflow import *" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, - "outputs": [], - "source": [ - "create_env, _ = make_create_env(flow_params_multi)" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "env = create_env()" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [], - "source": [ - "obs_dim = env.observation_space.shape[0]\n", - "action_dim = (1,)[0]" - ] - }, - { - "cell_type": "code", - "execution_count": 5, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "3" - ] - }, - "execution_count": 5, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "obs_dim" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [], - "source": [ - "sess = create_tf_session()" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Restoring parameters from /Users/akashvelu/Documents/models2/model.ckpt\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "INFO:tensorflow:Restoring parameters from /Users/akashvelu/Documents/models2/model.ckpt\n" - ] - } - ], - "source": [ - "action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n" - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/subprocess.py:786: ResourceWarning: subprocess 11185 is still running\n", - " ResourceWarning, source=self)\n" - ] - }, - { - "data": { - "text/plain": [ - "{}" - ] - }, - "execution_count": 8, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "init_state = env.reset()\n", - "init_state" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{}" - ] - }, - "execution_count": 9, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "env.get_state()" - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0\n", - "1\n", - "OBS: [[0.4 1. 0. ]]\n", - "SHAPE: (1, 3)\n", - "TYPE: float64\n" - ] - }, - { - "ename": "InvalidArgumentError", - "evalue": "You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n\nCaused by op 'policy_vars/obs', defined at:\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/traitlets/config/application.py\", line 664, in launch_instance\n app.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 583, in start\n self.io_loop.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 149, in start\n self.asyncio_loop.run_forever()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 787, in inner\n self.run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 748, in run\n yielded = self.gen.send(value)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 361, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 268, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 541, in execute_request\n user_expressions, allow_stdin,\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 300, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2662, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2785, in _run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2903, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2963, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 1, in \n action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 29, in __init__\n self.load_network(load_path)\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 111, in load_network\n loader = tf.train.import_meta_graph(path + 'model.ckpt.meta')\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1674, in import_meta_graph\n meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1696, in _import_meta_graph_with_return_elements\n **kwargs))\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/meta_graph.py\", line 806, in import_scoped_meta_graph_with_return_elements\n return_elements=return_elements)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n return func(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3299, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n self._traceback = tf_stack.extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1333\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1334\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1335\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1318\u001b[0m return self._call_tf_sessionrun(\n\u001b[0;32m-> 1319\u001b[0;31m options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m 1320\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m 1406\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1407\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1408\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[{{node policy_vars/obs}} = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]", - "\nDuring handling of the above exception, another exception occurred:\n", - "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0;31m# print(len(obs.shape))\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;31m# print(obs[None].shape)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 15\u001b[0;31m \u001b[0maction\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0maction_network\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_accel_from_observation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 16\u001b[0m \u001b[0mrl_actions\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mvehicle_id\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0maction\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0menv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrl_actions\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\u001b[0m in \u001b[0;36mget_accel_from_observation\u001b[0;34m(self, observation)\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"SHAPE: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 89\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"TYPE: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 90\u001b[0;31m \u001b[0mret_val\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maction_predictions\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mobs_placeholder\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 91\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mret_val\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 927\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 928\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 929\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 930\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 931\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1150\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1151\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1152\u001b[0;31m feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m 1153\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1154\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1326\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1327\u001b[0m return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1328\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1329\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1330\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1346\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1347\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0merror_interpolation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minterpolate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_graph\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1348\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_def\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1349\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1350\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n\nCaused by op 'policy_vars/obs', defined at:\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/traitlets/config/application.py\", line 664, in launch_instance\n app.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 583, in start\n self.io_loop.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 149, in start\n self.asyncio_loop.run_forever()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 787, in inner\n self.run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 748, in run\n yielded = self.gen.send(value)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 361, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 268, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 541, in execute_request\n user_expressions, allow_stdin,\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 300, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2662, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2785, in _run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2903, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2963, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 1, in \n action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 29, in __init__\n self.load_network(load_path)\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 111, in load_network\n loader = tf.train.import_meta_graph(path + 'model.ckpt.meta')\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1674, in import_meta_graph\n meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1696, in _import_meta_graph_with_return_elements\n **kwargs))\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/meta_graph.py\", line 806, in import_scoped_meta_graph_with_return_elements\n return_elements=return_elements)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n return func(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3299, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n self._traceback = tf_stack.extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n" - ] - } - ], - "source": [ - "for i in range(100):\n", - " print(i)\n", - " rl_vehicles = env.k.vehicle.get_rl_ids()\n", - " if len(rl_vehicles) == 0:\n", - " env.step(None)\n", - " continue\n", - " \n", - " rl_actions = {}\n", - " observations = env.get_state()\n", - "# print(observations)\n", - " for vehicle_id in rl_vehicles:\n", - " obs = observations[vehicle_id]\n", - "# print(len(obs.shape))\n", - "# print(obs[None].shape)\n", - " action = action_network.get_accel_from_observation(obs)\n", - " rl_actions[vehicle_id] = action\n", - " env.step(rl_actions)\n", - " \n", - " " - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "dtype('float32')" - ] - }, - "execution_count": 15, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "t=np.array([[1.0,1.0,1.0]], dtype='float32')\n", - "t.dtype" - ] - }, - { - "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "OBS: [[1. 1. 1.]]\n", - "SHAPE: (1, 3)\n", - "TYPE: float32\n" - ] - }, - { - "ename": "InvalidArgumentError", - "evalue": "You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n\nCaused by op 'policy_vars/obs', defined at:\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/traitlets/config/application.py\", line 664, in launch_instance\n app.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 583, in start\n self.io_loop.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 149, in start\n self.asyncio_loop.run_forever()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 787, in inner\n self.run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 748, in run\n yielded = self.gen.send(value)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 361, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 268, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 541, in execute_request\n user_expressions, allow_stdin,\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 300, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2662, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2785, in _run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2903, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2963, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 1, in \n action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 29, in __init__\n self.load_network(load_path)\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 111, in load_network\n loader = tf.train.import_meta_graph(path + 'model.ckpt.meta')\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1674, in import_meta_graph\n meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1696, in _import_meta_graph_with_return_elements\n **kwargs))\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/meta_graph.py\", line 806, in import_scoped_meta_graph_with_return_elements\n return_elements=return_elements)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n return func(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3299, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n self._traceback = tf_stack.extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1333\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1334\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1335\u001b[0m \u001b[0;32mexcept\u001b[0m \u001b[0merrors\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mOpError\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0me\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run_fn\u001b[0;34m(feed_dict, fetch_list, target_list, options, run_metadata)\u001b[0m\n\u001b[1;32m 1318\u001b[0m return self._call_tf_sessionrun(\n\u001b[0;32m-> 1319\u001b[0;31m options, feed_dict, fetch_list, target_list, run_metadata)\n\u001b[0m\u001b[1;32m 1320\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_call_tf_sessionrun\u001b[0;34m(self, options, feed_dict, fetch_list, target_list, run_metadata)\u001b[0m\n\u001b[1;32m 1406\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_session\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptions\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetch_list\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_list\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1407\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1408\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[{{node policy_vars/obs}} = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]", - "\nDuring handling of the above exception, another exception occurred:\n", - "\u001b[0;31mInvalidArgumentError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0maction_network\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_accel_from_observation\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mt\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", - "\u001b[0;32m~/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\u001b[0m in \u001b[0;36mget_accel_from_observation\u001b[0;34m(self, observation)\u001b[0m\n\u001b[1;32m 88\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"SHAPE: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 89\u001b[0m \u001b[0mprint\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"TYPE: \"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdtype\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 90\u001b[0;31m \u001b[0mret_val\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msess\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0maction_predictions\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeed_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mobs_placeholder\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mobservation\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 91\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 92\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mret_val\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 927\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 928\u001b[0m result = self._run(None, fetches, feed_dict, options_ptr,\n\u001b[0;32m--> 929\u001b[0;31m run_metadata_ptr)\n\u001b[0m\u001b[1;32m 930\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mrun_metadata\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 931\u001b[0m \u001b[0mproto_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtf_session\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mTF_GetBuffer\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrun_metadata_ptr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_run\u001b[0;34m(self, handle, fetches, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1150\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mfinal_fetches\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mfinal_targets\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mhandle\u001b[0m \u001b[0;32mand\u001b[0m \u001b[0mfeed_dict_tensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1151\u001b[0m results = self._do_run(handle, final_targets, final_fetches,\n\u001b[0;32m-> 1152\u001b[0;31m feed_dict_tensor, options, run_metadata)\n\u001b[0m\u001b[1;32m 1153\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1154\u001b[0m \u001b[0mresults\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_run\u001b[0;34m(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)\u001b[0m\n\u001b[1;32m 1326\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mhandle\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1327\u001b[0m return self._do_call(_run_fn, feeds, fetches, targets, options,\n\u001b[0;32m-> 1328\u001b[0;31m run_metadata)\n\u001b[0m\u001b[1;32m 1329\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1330\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_do_call\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m_prun_fn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhandle\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfeeds\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfetches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m~/.local/lib/python3.6/site-packages/tensorflow/python/client/session.py\u001b[0m in \u001b[0;36m_do_call\u001b[0;34m(self, fn, *args)\u001b[0m\n\u001b[1;32m 1346\u001b[0m \u001b[0;32mpass\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1347\u001b[0m \u001b[0mmessage\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0merror_interpolation\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minterpolate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmessage\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_graph\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m-> 1348\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mtype\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0me\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnode_def\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mop\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmessage\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1349\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1350\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_extend_graph\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mInvalidArgumentError\u001b[0m: You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n\nCaused by op 'policy_vars/obs', defined at:\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 193, in _run_module_as_main\n \"__main__\", mod_spec)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/runpy.py\", line 85, in _run_code\n exec(code, run_globals)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel_launcher.py\", line 16, in \n app.launch_new_instance()\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/traitlets/config/application.py\", line 664, in launch_instance\n app.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelapp.py\", line 583, in start\n self.io_loop.start()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/platform/asyncio.py\", line 149, in start\n self.asyncio_loop.run_forever()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 438, in run_forever\n self._run_once()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/base_events.py\", line 1451, in _run_once\n handle._run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/asyncio/events.py\", line 145, in _run\n self._callback(*self._args)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 690, in \n lambda f: self._run_callback(functools.partial(callback, future))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n ret = callback()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 787, in inner\n self.run()\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 748, in run\n yielded = self.gen.send(value)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 361, in process_one\n yield gen.maybe_future(dispatch(*args))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 268, in dispatch_shell\n yield gen.maybe_future(handler(stream, idents, msg))\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/kernelbase.py\", line 541, in execute_request\n user_expressions, allow_stdin,\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/tornado/gen.py\", line 209, in wrapper\n yielded = next(result)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/ipkernel.py\", line 300, in do_execute\n res = shell.run_cell(code, store_history=store_history, silent=silent)\n File \"/Users/akashvelu/opt/anaconda3/envs/flow/lib/python3.6/site-packages/ipykernel/zmqshell.py\", line 536, in run_cell\n return super(ZMQInteractiveShell, self).run_cell(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2662, in run_cell\n raw_cell, store_history, silent, shell_futures)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2785, in _run_cell\n interactivity=interactivity, compiler=compiler, result=result)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2903, in run_ast_nodes\n if self.run_code(code, result):\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/IPython/core/interactiveshell.py\", line 2963, in run_code\n exec(code_obj, self.user_global_ns, self.user_ns)\n File \"\", line 1, in \n action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/')\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 29, in __init__\n self.load_network(load_path)\n File \"/Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py\", line 111, in load_network\n loader = tf.train.import_meta_graph(path + 'model.ckpt.meta')\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1674, in import_meta_graph\n meta_graph_or_file, clear_devices, import_scope, **kwargs)[0]\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/training/saver.py\", line 1696, in _import_meta_graph_with_return_elements\n **kwargs))\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/meta_graph.py\", line 806, in import_scoped_meta_graph_with_return_elements\n return_elements=return_elements)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/util/deprecation.py\", line 488, in new_func\n return func(*args, **kwargs)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 442, in import_graph_def\n _ProcessNewOps(graph)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/importer.py\", line 234, in _ProcessNewOps\n for new_op in graph._add_new_tf_operations(compute_devices=False): # pylint: disable=protected-access\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in _add_new_tf_operations\n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3440, in \n for c_op in c_api_util.new_tf_operations(self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 3299, in _create_op_from_tf_operation\n ret = Operation(c_op, self)\n File \"/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/ops.py\", line 1770, in __init__\n self._traceback = tf_stack.extract_stack()\n\nInvalidArgumentError (see above for traceback): You must feed a value for placeholder tensor 'policy_vars/obs' with dtype float and shape [?,3]\n\t [[node policy_vars/obs (defined at /Users/akashvelu/Documents/Research/Research_MobileSensing/flow/flow/controllers/imitation_learning/imitating_network.py:111) = Placeholder[dtype=DT_FLOAT, shape=[?,3], _device=\"/job:localhost/replica:0/task:0/device:CPU:0\"]()]]\n" - ] - } - ], - "source": [ - "action_network.get_accel_from_observation(t)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "for i in range(40):\n", - " env.step(None)\n", - " env.render()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "env.get_state()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def test(d):\n", - " d['asdf'] = 2" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t = dict()" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "test(t)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "t" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "set(t.keys())" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "b = np.array([1,2,3])\n", - "print(b.dtype)" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "metadata": {}, - "outputs": [], - "source": [ - "placeholder = tf.get_default_graph().get_tensor_by_name('policy_vars/obs:0')" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "placeholder" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "flow", - "language": "python", - "name": "flow" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/flow/controllers/imitation_learning/Useless/Untitled1.ipynb b/flow/controllers/imitation_learning/Useless/Untitled1.ipynb deleted file mode 100644 index b93658a05..000000000 --- a/flow/controllers/imitation_learning/Useless/Untitled1.ipynb +++ /dev/null @@ -1,96 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:523: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:524: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:526: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:527: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n", - "/Users/akashvelu/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:532: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n", - " np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n" - ] - } - ], - "source": [ - "import tensorflow as tf" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from i210_multiagent import flow_params" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from imitating_network import ImitatingNetwork" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from flow.utils.registry import make_create_env" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from utils_tensorflow import *" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "flow", - "language": "python", - "name": "flow" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.6.8" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587254017.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587254017.Akashs-MacBook-Pro-2.local deleted file mode 100644 index 0e64e0dc20ed4f255f8e9f110f2f9c0bf6a3ad75..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 265723 zcmeIb3y>Vgc^`hWdpI0EfFLm>C1~_8gOX+GC~9#BfR9tjJRB)f{Qu2|2#^#-QLDw> z0^D%1d)nOt38fM%iOW$OJ8`U(lh{txKXECx<0P^yl@u!~TIEEFttfuPaU{i-6gzsQ zWGQ+$~lxH{Cj(00@v=0 zhl8z)-3z_Rv@}~a*8V|Sola>9jXM1*WrysYDMk_x62fO3l+0O2)JAJCR z)1SsS!u9Ki;{D6tWB)~QYq`Dh64;Y{AXfbqb|L1+>}|~kS5i9m#!R3r~O?49Wk z18^G|xsBlWBe<7mchsLfGoC!%oo3}~tMh|V?9bDUjHd&eupGq; zD~tc)wtxH_&r7uOf(kE-&{|&z#YgYirug_lH$L9qf(QNe@o;8zk5HS7uMvJsLRoRj z(6ry*UW;3Y1&fPTURKf40f6=8G^UV$8!G^d>66)mdm%baqEg$#8{mVj5LP_hJrlQ2 z?Vd?Octr2nAY=xkm0UoPhk&$h^P52gcYb#mx9{H_CZSC?gb0aNt@ao|#PH!3ob>kG zxP5YaI|SE6B|*ppvOh~efIo=f6>h?d;nCh?&^t5iKe{G{RLdAEhhRU+zLWlTchoyi z3L%nUJQDD=uy1QTnvnvnKgnoa&uS$u(7}6KaElvXos-k);M^!)qF)n7aU}jWmkMTg z+V7I`y=E3U+-3x3O|tQAFMXH^;AXDk9UNS6;-*t?M=q zl5#7p2RBw0)BXZ}D}tB1OkZ$D^h7GIbS-lwTBUA?er^&*)yLsyT6TlVR3SN_lK>K= zmRHoyVPQ?{rC8C!uSM`G7e#j@Uvm;p8AY)3Gqf%n@NtKnaer?{>XLYgeof>aIjIwB zN_HIk=Z!S7&PW3|-5Hbk+9UCALHyO3^jGlP5&VEn37f=YNof%8&A+inyPRcr^{(6q z`$@3wb<;+*H;b3z#RuN-*j=t@!A2n;_*)L)<8*KIRJ@q}f8xq~F@W=w;8PLYXAy08 zwQk$~+9KkvLMOh(CE^x?Xd3slX;1(0klMrodCQm z!~fL#pL{M&H(_`w+xPI(bZp>%tp#rkVR1swF1PUEY?}F0+fT6=C}2Z|0^qq;7&|B$ z`A}A6#Y|jXjfp$*6Su}W;dcWGuxRMRn{pUYrcHg(w0rrwz8}f#6MmxwcLnfg+SrG& zN7CAdAw-&c3;SV&44;YMm99b*dI}*W+2&d;{{6hH(w9grJ~faJwV>n5o806(3nd9* zgPNwpL9E)w!^ic~v~L4P%D5^uAkLBtW5G5*fsaOTyWI-1K9gS^5?X!)8b#!j(lqq) zd_@@<`>FqA@2B$N^e$>?U<2T9jN^)P%Af)KGduQ{6O-N2hP~yFC1{P8M(LsR5D)_V zx*cv~LXtetQAj~pHJBBA-YK_8K!6p$$Qsjn(W&G#Fh1~K>>v%%Q3WAz2$4BPU_(K8 z3n;ntgbU#BIK>+dC$K>or4Y(Vx~jqnDOj5aQosh8D^kD)bwmmZdu3l2pIg$%f;KVXp`nMtlF4P254kJo$q=3!a+5)&B1&o-% zpB<3`2TGlE6ToF=kCwRMgcLB=g*HUho7W3jf#3r+yA`=Z0T-lzJuSB+)?$u5Xh#az zCk-}unIHvhrcOu!`>?!J=`6XjX!MG>HVbz_3f8i&UQm!XQb1|W4Jn|*2`(ytd4`rQ z&`zCh&av!=#4+<-&H45MC!`=(iL^<;+62?b>_|aQzJd*f-kigPQX4#QLJD#&>LLa7 zvC{uAph!-`U{>(Q4oJaTM?wm$8eZ+VVFf~ba$dm}_Y3H@5EP;V7p!0nffX>hSa(e1 z29`zPa61ZIu!6Nr5&511D^^el!Ce(>!O00LI5rQgfDJNNtbh$Fq$mBZ0!%VxL#X@f zfLH+=$jFNyR=|c(^AZp%U;~+W5gM%8y9!)JD6oRsuLe~11F)!u+nc~9>x31s51VA? zzN^56niE#wM93E_V1sC15^%u^7!i{I-FFqZP!mkq-sZZH^u`L!7eYiGOz#g%8axDpvqSV69mx>x~y z%#Ic0n1U}1)K(X8)u z?^NC1J8;1Yjv=rDCKo$az}Y~lwR!9-I93ZQD1=Z>GU&d7<8`qDE;<_D5GeFZMmoUj5XLcUl58^jAMU_?yD=Z+P)P!mkqjup6&^u`L< zAhu@N=?w%%%pg)ntiXYi8&=>%D40E3;)WAez-BBoB9)HdvcU@2be*sQ_Tk!C0ejGn z6|helY_P1Kyh66RA+eZ%VkOlHD=65KIu$Ecz#gEPFMk>WvCx~u>v-PqQ8FE z-~HKuSOFWz$crCVz=lxs5)dn31DSXc8m!t_fy)RKE677RZbtwcCfyc{+MN*b@PpI7 z0yd;ccJBKMT&Ou=1x|!~downO7goTCm;~sK6}V6nOxcbVxRCV53fLf)Rw8o33K%hi zNFA{P2TE>OffJ!%_GoStBPXnY%~)te0=+@NXs=W{372A(XoC zLF9xL{^9)_~6`Vj|1xzk> ztbntDQloyLK?L%duHmB$NoZEYRx?O0FD1od|w363?gBPQN#u_9wIMnaQ}fz15q}(zBL3&-4f9o|>so zr`5b;tY}OZ7T$R${FrpwA8I1vMGA-5kpjz`OX(5w9ro>;j~)LDz<4j zC8$*z-a%L`_%dc)qN zyY*Cmi(K)syS3FPw{$E%Fn(|c{$m8MaiV{Ee9v&)n|0Wo63L@)b$U)IAj7;gj>Zd1 ze{lvmQA1eC(yHx#_NKpZ=MYD(XjyTRaCZGl?y!w%MSVpJK|>c( zNR-!(aSvB|BD$M~PVJsg@2@zYxP6=`G^rsj9gLH9sZaO&JKgQU`Du2{1W5_L83PFC3SpHJs7z`R>l7KP9KScV162>9BLLX-!p6q)D~JYe?@upO8*^cjqhK zld)IVFT^_DeYnm=tp*DveL8~IIyn%pr0mV~?r`vQziB;jIbEv|WiTWw5PukfaJQ0^ z;Krl=G~=w3CDf+*!*)pY(c-aREnOa zoA)yvWv#{jn5{WozE1ytBG_(P1YAMhSfRv)^|n^=ina=Vwbh&e0`^ml36ACM6{#09 zHE{6V2)3HCnrau<81GekaO@6AlR)m{D6h(NDg+z@hoMQM&0|yIfS+!|!vVM|QYI^S zynuwJxJ_nmjWnOn{54oMq;=fwi><(lnMbX5i3H3gf_n8mK@~sRf*%U$0C~;WfKw%5 zY0|o~NClPp(ewP3HvL6NZ-T^DtVvKMmM-gzA(7*x-2g5=xXi;RHCyabq@o_4 z^vwv4)>Sjbl>%4?q7#t}T!Z==VJP?y5xm-o=WMX<>g8MX6HeSfI|2j}K5F2BDRNj# zNZMN64H0z4(eEW|xwHP{^2Px_>}p{-H}T&5o20fgIwFRJ*vcrV@tt&Ui<{=32Y~Dc zTpc9ig@Lg3l~7Ayb`|_i1jn5sRHa1ELRM3yHd)`#PiwEewEBDXIhsoz^}!*@Du&xK zR-vW5jzsA;sr!_aDyy}iE&zN%x&}ns6ic|GTPmuv&QS=ieq6LW*}rmKO4}Y>i1$9+ z?+x!BTyR_@%kr(bHN_%DZT@7%JbW?|@0QXMe15FKe9kpAXE;&;Ub{tFmQLLkKZ$}9 z;o2?-(c0~68~#!?hM$}Cwg+T2bILFk>ZSO=`j_GRO{1jabuUi4NAn_9xgzGe@baDr zZZZv)F0AAs&dz$`Ht$l-?g8v)BY4y_&+6Y48@DpE>!$MSR2)hvMb|~*_b5T2(lF}I zPKv_y9K@>6q#;R`dex<&UuePUsw{lvMA{&>4pvW64ap&~YQUZuezFB`2`Q=B?he`I zcWyG?B^x>VJNaIxtMJQh)oKuawp1ir4W{^7V9VA@%XXIE!H$4`m&?5WO!d2&W1POs>o9cRK}_a z>(|8lm%qpUi=-ccB)@=D%G ze{~QSoktX$z+XggDx7QN(SoFi0v`&f)0BDPHdZYxL94}rOPv_tueX}uey!xb3E2jg z!LI}jj>T~U^T`tp$al?dHYA^fEF;Rb$bim56p{48dA72cT3`onBhEQIDKMNJ*L#aOv|x z>d^XT5R`}$XQp1Zz=mJSdw&Za3TcF=2g5$O0g?QF;vX6~cA3%mX9kWabrG`0cbZgk zvjJIvAn%WoX449G{9*);1(8|`>vWWktA2Sm-Dp{Fv*mLk;Dl6W{8URP4sJY2Q-UqE zDmtwGz^W*2El2R75SVizz*FhD2=Simi~DQR<=pG+{3oh^Ae@zQo*l*sd4mdoBZ(q* z-gKV6@2GKq_*e+llegT3eJsPM1rJl%(D<~h2lp;AWp@U$T#$@ioo&2aU*eYj)}WhvlI^hKByo_kfG}m{wNYJ z+yHA51v#Z3Fk^~zClMWvj4%^?Bt#ie%rUQc$}%FLIEjh^WRqg5nR}Pw6Ke|J0rD1fb>Lx8khd6(NhSbrSkROeS~%NEBOq@vZqAEysS{?~VKKZ;aU>X& z+t~b}onpKq)`UQP3#t|6X@Wqh)JD+YVm<|z@bL(q^l!@L13ncUru+Gxr4Z_%3-5~p z>@TYnCLANg;&PnCc>ZXeIseTzoNr3_%G9pwKbtjy0PTcXG0b9cz3IDJ;J+BBnIJtj zlf6B?|3WCY)6&>9O+sh$i3zZ7Bvye9JH^<3E>H)hKzoYk0K!+=|V)?m2caGl1aK&^Y+R326qh5<02 zKW%~k=0mf6^9WF5;-`3!|E_!uD-0c~9kWFk3I054u2d`boj0%0A|*K|G5D1hJk^wQ z$c=gqo4uv6nl;_v{34%_*?{84}Q%y>dB z@Yovf^gHCO?P;G}Ew(x6O;2ubQ>-ih;a;*6EZ)!kkjy=RlpdtHsmOG%)O%|*I1W;g z!$6)F9&?U64rB1be!DYa{R>fQP*#TqO{jb5b>S!5`3twIw(H4o`r1CP=K13J_kie@4}Pu6SYFRgCTK_Z#bzu0`d-; z^Zr)#u=n?71nHuIo_-%M(XWZ(4D{eO(e#CU8{+MIz3lcKa#Mnyv)iC1t-jGY z``L%c&D;Ip9#@iNkHQCP>`}0jb;y{(-M_{vw<`l&LUhD9HcBJiDkAOU@L2pvfJj^Q z)byww=U0_oxvuuVLIP|c+%&5?TeK)vaeR(VvI{rY-YWczXYQjRmu9}xz0p(mrv1$_ z`z~<}+0JW=cGh(6UOI!T`@pwr9QLQ0*m1`e&@23?G(Z-s_iy)Rz3$oZrX$-E?^X$n~()8}hq}l2$G*m)T z@!;8$DLLRWQW4fd_hc$w=GT)c*|wx4ubxaAkhg2Uo=hSvJ7pO#Lic3Sz#K=7_GHHz z>&YaBbk1z=$&LvCC$)Qwg$8Pqr4ICvy~eDX{ttm?*k1>tv$n zWS0xllZivn64tXPQ;`hNlZj+?UK1__R{Imr!d}I|d@^8_755D2yH2L!+L8dh*2%>2 z0`+7fQP~m8uo1c^6USxDG~ScZkc;oh@{sM-lZmYB+}+uZ3mtj)WNY*2$y6FBSU@b8 zdG}-z*Rl%)*$OF*!Bu*mJ()_H-aVN#Tb+f5N=PamJbN-F2lL2CDqfsD*?XN#$+jgW zdG%z{fV^G%^<)xZ*(u9_5xOUn2Ie?wv?p6@tS6Hg(mAueCtDMGGLOEi+D}Ap=VW>sfHa0>dB-5dAs)O$t1$EQb7p%_wkq^wO|6rC(DQvVl_0sE?B$mC$pUm_jsh=D*j8f+qHe4s69p$bT#$}T z93n_ZrXm@jBNNH$d?s81thO-{?1-gCLU&x^xQrPaUIaf~<0=}(-s3FbYWp#_aP8HNiEQg^#<_+o^aKAZg42Aq zFrtP}y*Ghv6?yWe%Ubt9i?72SbnR%*3S8?ow^I?5x*A=fJ zSCK)yDb}v8-k9GM`zL;{f-9O_>mU8iRYqj4(ClwlhFJ!4?h6?-VabzY83uu8{QUH49(>-g+w0r%4Ix3S%-zC z1W#rBsPuc+RMv)H@;ggKumPQ?37O32)pcRCoa@zO9x4mm(kdCT1tj>T2yU*r)|vjS zw@Qxse5OC?3`T?5pf~JZ=nZ$t{i1h`N7LELZAAjU1#XUCRY}0A2zrK`O{loi;OyD6 zce-6^@ZktvYMg5fkM<^m-kD+l(X}+LPM_*c`rF-6?|eUQ#S8dZUkm%vh|wF2`jd>( ze%M2ZllxX%>kDp^|84|AH%UV|FX)rg>EPTbUZP(UGpl0$t`ZZpwsHd;B5^Tzo zetNXgedKi4*v(=>MCE^D;ZSfiMegz7 zvyy$TXW0kVOQr?qwz2!w7TnD#bMyc8@-?XFhUdYJ`{uSakv&rxl?k}ifXS-t&Q zRrGr3SPne!%@*9_$*>$ad&>v>M0slRM*YZ5Lg^YqLAzUb` z3^Z@DtCKt1^A2}uoiZxa7bX@z-@;_Of<-w%_=qbBYeAa8U=W5eC}5QZUyfkI!-(W? z;3Hwv3P~9z(477Os_`gn$y7sN=M*Fi{0XYv;P?^(0oqqp^#WQhatMJhqM8hjNjVfC z%3h8drpe$QuU4atICuRR17Ah87d&fHec&f&f44UUm2_W=VtJGghl-Mq1Og)((aBKJ zZg1M1k(+eJ6V_;ONv~~~Rcqz71&px@o%j|P7^9ez2xPRg!lHF#d4qr$=|A{P1ds9j zIn@jWbxk$Y$$)&ur`PmvWOdPjO(1pA!iu6M(qPT-6KzDBXyYwjyk{^Rs@8jXF$OS` zq$$0WKIEmD>i`C$?f$df9QAD*)`QZkBR1e0n3h)5hme{$2a0TitFn<7`*KEFks55u z^0zTft;n*RsOmRZB0WS?D-uv7t72iqOQdxHuuQ~?LiA*MEPM?E*a*x? zLBdbcL8M^fmjD>`*MfO|$@i6d_c93%UqUv_Mc9+pfMvJjYwXAXL1lZ(8;lq5HDsI2 zF)9ZLRKv#6!?npAfvQyn@rBwEK@pnLHWAez^Xy4s!dKowgkuU6IBd5#txdh|8?rHG zLs=kO-f7K^F%H{h({RFe8*7W%EEJo%32eQ9#C91~4R3gC*Fsn}6AZR%8-i}0_kxkw z5G(V-cG-ZLK7>lPQc|kmRo)fmh5)lU0}k6|k5dzrb~+-0s6Y>sv;+c*q-EOeh{6BZ zc$&pXTPujecG;j*>i(%qpjW0YFL~HF$6>p|OjWh69Gh6}%Y}Ahnn9tSY*vk)wvecDctD;lfk_W3b&^ zvg-I@&@2+$<(ZR$grB61h+z?9@e)cmry;Oij$c*n$Ld@Byn+M=9JVX)_WTGBIBZv7 zR1OfRHkrdNacwe3;BpQUIBZvxw-hFP<=uFoz+t<^?TF1|yKE>+zst9c#-NhSq8nkL_AX zR}vXw->!X-0N5@Yq?)R%eul$#*?_rLo^QzvhwVyC%jpb(FG>_K*serEk1dk04JJwB_U-b_NkPI-(q(|~Fhmo-IBb{W zH#D}JYc9k=EI4de;7te*9x4bI3l!^0$bGv4qjG>iV2uJjT$?;!Y&SPGb&+9m8XUGO z$VYw$$XDKt2MQdvd;BsylY!020s5uhPvLm(Pr+?fn)z)T-PsuId_3*M*5D5&)WHXFySPB?6r4MI)Of8VY^ z50kV60#$!6<|V7n5D%Ri;z_U+1SN=eOM_O?ZkFli5P*zWO5j_uMJ zgvWL{&V(Q$RK$knUg6y+>^kSK5`JV57>Vt2Y|3E+fjDy1FxW2lxFTGH#&&bbs!L*n zW|8}LdFG@b;V0?lw&T$$Blqoc{3@%X{CX%17@BMD(qX#-Z_khLfZMk#Fe(QKRKvzW zv$!^yBcKQpwY@ow(^7L=W>ma!BF|(HE?UwMb%%M@(jf1oz_pD<&)K8-LVEkd;P z{-5+Hsz`PIthsEDkozauRJ<^@NL5u}+8Q<+CyXsZz8lX535Br<^f19gAfN~yrcmG| zWpz}t85-{0c#%o}5F*ekLx`6=Y@B~F!km&R%+yqKc+FkF)I3884&#vc6zUIR!t&dK zUffpt4h8HH)qy=CB*q~zE!QH5K3k%Q!8jxmmw!mXVH`4>Qc&@iy#?cl!h{_p#&N<0 zZg_4BV43uevBxX^=_RL^J6hRW`O*AH>_>{-W4!OcUgu_1&2rkSo{ z2|m4?9Jb(P8ZP=f)4^~&VuMsg^~@)4UUL)GNA|!2no)Llvb*g0zbdZtDKMy=o#Dk) z#__(B?AzoOZVxB*b6hbj88N9JYxu{ztH}|5p7dsk%u>2L<7dca+G$4CeTKhAJb8JQ zPSwH_qj4Jlx|Uc>A~{o?!MRCqdq8eCpLY1asZ@D?Z+8Fgu$w;6neFZj`<<=vXf_!S zyW9PpKDp#*v~{t&S_OU=yn;uriYKa~V!&lB>@@vsK)`;%>H^L=Pvox2<-oGBAB< zn;rHoE^u&mj=jMVesd2zUS`+qsjT*uJaL897LOyMX7OwWOeYBVqMSR4+gDWJ z-R9*^!;rHD6S)Qmy%@NX>7!=wrMHp%Ti@D&@U0KOw%Q?o7!gB@VR5ws|M(SU!hJsz zz7$~?)3s5f3lNCQlP6Jum_3dO#OzsgAQlK*QC92~h}SMvAZGfwM1hzcd!{mEYapf% zV*)W778!`S$FC?8_6o#n9oIlyfy21^786*hI+PY{D6<0u;_{dXmHk>PV)i&D5VL2| zfmk4LMOm>|AU<}f0x{Ev(u~d9FE+rxB4+O>s|?v1i0Q+aK+J|k24e2BqzdMZEWVPIB5!SrofB#!GW1yC>lLtM?$*YUL z1dH_&=E2Du{booA2A?B?5`)c=0lJ-XH;kuZx~$6%Hb;ha1e_x)K!eSZwE+R=$YOJ_ zIkKc#&avDOgWV{wqS-A5EjlS7_QExfUHTwR~2)@r4eMw+i zqH#FU#NRwNX*YUi(M@x5vL;wI60hL{%kTm4(3?y2}#og!;E?G&jq*(tID#ZHlS zeRhhhnX^-*hH9tClD}w3dYI<`{1tc!Z%g^jP^0bcl!Tzto^EF5bY%0U!k88|mH%@e z{IiSXn1R$es3pfs^lQR-OrkaQ#5i&By=91sFF2!ykOF*!Mu2qSv+oJ5wp5V2MVCU69sct4Hgxj%-#tjdl(I}p)?HE)!?lK zlU5Dp#j4EWtwt6x8l(eh7_8%iw-wA;HCR;GGJCg`@dTqTMpwg)MO_NfR-)+b1(Q|{ z=FNf3;_aHr8APp^SScrr&gs-DQS^?2IjaVXjz(th4h?(tWnZ$0F$Fe3_#;-9u|$8g z;L@tXyc;_tXZoYTH9|+y$WCt>*^y^H zZPL%TOF!QM--=;zFdEEqc3v$kljx9}{D7t8HP%nF%`wyI;M~am`Mf4$^pD!;lh4;3 zfz}M}Bd{dQp0M&;w zCifTJ$Jm~U{9|`1sJUS2bT2_pdiO`a@XtSW^8fkO-&%$rxiS9S4X_Sva+XxQ z1ktlcqSNWui{xuUzP^KeJ@R58zg$e0U)HnLmxI?wt;MB1?S0GfUif+h<>7bnerS{Z z4CJAAMp3*EqVwgq7xHhfgZ`ru%E&5t?YM@zX+PZXluFVTU?4-VYl0{h7%dfl_*$@$(a zUYgD(gMOLXhSk!yhdg}MT4!k*W^eYZEPxXm8oHizNJ z_Z0)K-vII2b*0xF*niDlh<}o3^sTUX&w#Xeq&M0goSz37;yo!lcv&j)o zI7@mN@K0neDscBY*n2NI$dQ~asGFti+tfw&9Xv$516~8g0%|&)!H6ueq(aq92XLCIIrexO@Y29Xronu5{Q$t)ayA-?c0jBq-2@1C z!TyJay{-OJIjvOla$?g7Kg!vzYy$|<$ z!@CC;m;lnR4w0YA51vS$r+g8E$en};jvd5)Wa)JFSDm?#IGBHedX!+fV<=Y@iC~s? zNF*IRdw4HN76BPka;>_xy~$fzk~atrxOZ}b%7r3esZGFb;$URz3eAmL#hmeY@C*)eyeG$m~d zKBakfE1kqp&}aNY*he--&j#dFT9O-17vGC~i}|4Zl!Birz@xp%pm%0SwyBU9lhrRY ztEKhQK`~BA8IOd;)Kq7^!6?lqd9l!n7mmD0yhn!n6Y;E-rPbH0uD#~y+KVpGZ*G}v zt=)3$mUF#Zw{Jgtw%0#;NBrgA+Y1x8mWNu#XQnY9WcmGO(aH~0mi$S9w|*$zzx+M+ zUqrS-wpU)lSW^8Jb|L1+>}59QKWtN4vqOM8MZ9geN3I1Qi{SPC zT%8{nju!tHxBcVi0`U^*_M3&`qxUS+P4$R$!p9Fb&d2*(@Swjw#L7r4l#)&TH7Y;EDU=j~Yod}MWCGcrO?ZD0fo-8ZUwja* z$5C9thuG@Cw$)WG#gZ=Iqk)V|t&#nL{BFy-$IOv+jmA zdOyRzi{NHQiAmR9IU3Mhg26{3xYfo*3OiQsC1dr6;*zP2{0d#{epWGI>o<&Sz;8w3 z)jKM?N~bS4qu>|@>Z!dE$bodF+51>A>48?zELD7#;V1FZdipxo2K-FRZcv#jBnNcc zxs24ZW`~PKYT{8z+-4iB_~6$fc$Jf#O1|bKoYK><^E0$A8}M<5oWTH9`ZbYz3$T~s1b&L(2HUCwHCZFgvA+K_;5Cz8eGvw z^VVmoZL?PoR+JsRkFmgWtuQuSH1c6QgtYQu2=UF7kD#Q^eg zg@gytH?Tg`f{wF+NIizLP?8WfsA)PJ#HwvPeB_&UnKp2wEMcSu#94B43KJiV;C8ze zpj&;VpnPP2#r4n&3dm*57f8Hylo2gEC4X z6oa^97A{CZu_xbef!_-$U~}N(x<69Ds0w(rv~{5wq=1dg^W%pUFp5TAN~EB=(2oWI z*vM|YxF7|LqKOwjq`+l_Y=fZ4l&xqC1`e?KrNb9Mg%eW1o;1nM9Vu|3cHRXka3VxH zGaYE<)z-4X!38N`A2tcl9Vu|3CYZ9l$#o$qEdOKL6*h>ik#Ir^7%_uL9gzYDN}Y5Q zfE6ilB1G)b=J8?d(gi7CGZxwq#iF8@wk!RGs`;-K6Tt;3V7$pKiM5zx589Ce_DO>c zYWfqK2@oA9XdiSO zrMn;nITw-#Itt0u<(IyR=NbOk0V!bDdB^#?Sk`rR(sa6E1wwo>_50%A>fs1ZSV3{$ zbGe(qAxOGn1)L4c7b{rH6cN)s1y-z}5P~yAP}rc;u7cv-bl#<HZ9QPqON$0;@ykAJGt! zvO#o+gA-Q3h?oTEjup626HM9O=DLvd#tPUV)+WGZR{+4T&!>SC174s4o2*lR%|2|XA9$=FAPm4p7W+mG z{t%9r5-U*hf`b9-@#3dB;Di;hCrz?*#|m7iIbj7( zgnWB5Hi#Eiz=)Ux=#CY*P!mkqjup6&^u`Lab*n@VgfPKP zsaUZB_Mpwd!wAJ`UqOC6F<)k07FYqDmlIY%KPtFrAP9g`uKKJD0f-f)sfl!X10h#Q zU95mUX2%M0@)eC=23P@|niE!#b0K-46)T{RmHvgn3UV4IHM{I9I9?kope?E$D_~)U zVg&+3AihV?_`ZVU2&{m~#f}wlHc)DG9$3NgT3A6LgmRLuFM&AiD=5w?=Uv)_73d*= zXn`Cuh)Kk34tyju;t)>#HKS^%A9$>Q!$7p}3nu!rPFR5x zA>ZDN4dR6rFd`-ax?=?{)C5zuV+AfGy|Dr|h^3W?oZdlT#0(;J#0nfJxnTuPgo4?l z&9Pmu0yblz5fM_7Wt31m2G~TZeTxEXh@7wjMzS_mz#g?k!oK-l%HgXX~^ijoUj7=QN4@mo4QEl zp7_~Tol^o8B5=V9*v(1Vm4)u770}1*SV2y{qH&`qdI&}BhX$oMVFfuCk_TF`0{WQY z9R%*UMLf?=`wC9f#tLYQYR3v#m{DK_wcazx+@suzAmR*8`wC7VumUC*J66EiKp9c} zu!0EWGhGS)gHh*vzc=bm>D3?I{$w(qbgCDDRPP3{p3B%RB$NoZEYRx?TsI<=Mhib0 ziRV*mr{9}S`xA0A6}v1dz3-y)On-3hsaf#?T6J-IVd0&3iv5#rt^LQOefg+~h!-gw zV%r>+HRZ_9oq}r}|q@cc;5sAYjcG*580HrT_T0l-jchgXM^;3HPQ{wh-vRQ$GZ>LxmYMer%L#A1g~{+AYMsXSJ)j6p6*}Hdiui% zHfpmr9`&agXPqpeHq9TtjQOk|cXNq{Dlc=b8NWj=#=~nb&h80Ky!G!zA(L_Gg4YR` zkEXFxSI)e<1-I4aJ-;6@zbDbL66C!)3%!X?G)O!&rV~aXKX_*g`n9D}^fcYPpXn%T zE%wK3&FS)W`u`KbcGDu@3i8GZC3MI1tF7h)5U`(WOmM8YTB*~geK&%wrmUvg#Wlt| zSDa}>=StPGT4|F&u1+2fwk~$bW$V+9Qw8N1I1Ei1Z62Ev2mEv!9uB}&rQq=b5}M*R znYlI6d_MEnpb7mb@{;U|@WobO#mu8tyF>!^sSrQff*%U$0C~;WfKw%5XlM0U-MUR|m;> zVIXXMCDam_T?M}r!EvVuRVmT4kkwSFP4<4|Z;;nsTK&EH9L*(<`c#=@6~k>AtI$$j zN1}9_)O|`ymF^hh7o=-Iv`w*uE4rm3iY3+*T>ZGH%|{F;fq&(?l(s#%5bu4s-y7aN zxZt=*mgQS1CcW(eSt398kLE?JvSM*vczI6*H<<>zXD}S* zAr@;{DMi;s;`b;)pwckv&Q6NL^&G^i z&!izqmU`9Ip})|A(^Xmc%89f=xKPrN91^Pr?2`J)7Q7{-q-MK2WS8H$$#{3CyWQW( z_c|q~6hN!5I+8~kAlhApUv8^bgYdJZBH0>`W|Q$S6-s}!-5+gTjF;~tUuT2aMHP~V zACEe9@?=oy$a*a;s}D=vtsz26hGdA`@@+V>18Q$9A3;x(_Ud* z2w--ng?HhX1KSqeLqxEI?3*O4l7hRsF7%gjI{-_kDNkh28vN5Jq}e^tKNq*j|J6ZU z%9-h!iob~9R5;hjqXkJ31wIr|rz!KoZLC^Yf>w)m?*DqL3GUZQ?wgQpU>W>M(BN1c zH!z<((SUr{>}EsqNysvyT#F3oEJP7WFPvv9i>dWhtE`L(2i_Oh|FYOiqYup}q1Du2 zwHK`j*+Z9X;Ky6==5TtITi|>r-oyTVcF{Gk?0z~tO#*DZrwwO9i=)~ob8T|as*hUf z69FY7E41{`?V>||GrTE&G)#vqYgucyK>(;?G+pB$02)D)Lmpa^`RM9wp{uvRo8vb% zqbmcL@pBP8?(J3Cq927b@)*G0z@7{iW^~gDdl%zQUmp<>7Qi7mF zoH#S}vIRE0mfzokhe8_R>A|p1Za^gepZJFcj$LLn{+WRzN?n94&g;VJ3Rwg^O-jAl zfUHnV)3;3F7bAEqh}2S8r=xUSZYvmOvt{~*Z~8AiBrc51_^Fmo9Nc)4rUa{VSp88& zQQTT42Udl^oKvIA1b8Yv7a`u0eQ|$Hx}1BR7j1~@9|&iqoM(qOr~o*UDB@CLuDj0_ zZvL?lswZ!`D^%~c#XHc+7(sH6$Va(CfzLfbRw#;TDZA(9Rw#;bsVn=tgh-IojfjN_ zX7z3O(GZAJF(vX1eXAmHJaODeT%2RV_=}w1tAKwWR1hS{&~*ua6p0sZfHjGNoYD_U z!D-eI4@YJc=_4V^h+>X;#Z#6M0mVsF6d;=vQ_bAF6rWh^yXKa3!O$NjE9R7bL*A?) zUmV|NspOF)up^fOau&%af{G-~{`t!$p#>cP;w?0%bWQ^CI=!%>J4Nhp6b9riW|_Mu z5opn@2NY_issJEwG2P5XNy01|InhaV%W^?L-eM9mCKwLnErwt{Z~%~3rb$5F4~77F z3%WY+Feu1djK(Ar05~jY$_g#bs}Yd57&qs|y}}8C`WD0M6i0$VxsA;q+9}2>VoeCt zx1g$5UL&Y)F>b+)4orfNNARS7Q%;ZjynAr=Y=1(wg!UC3=BxNoM*Z0{;|V!PaBIBN z?~vW2(|&orXKC4W(1rI!0rr|TDVDd&(I^&B>POJg-_y21TzjZx3@0(`v< zJI#vBxPP)SXUh3Y28~HjHi*IuS0=@Ny{F)8yd7r0o+fDId^$S|e5DPOc`ICiK+y9-i9;r+Zt6j;n7{y2AJfSQ*+2Sx1f!jth6wPL@6 zA#sjxI8!+S@(!Bw{#Ny{_xEN5>7s$2ejhK#$oDB(ZlUO6%Q<`SpbJ5V}MY$H*fcYdt6D9JqjPFu}8s9)*)jCcmEoz+^!6$65>`7Kp%(4;zt4m(5eHv zR?HjeQ9I7BD!X!B?SF*?*g&|Mlj?kqMY0PoSL3QJCc)2m<~|y7Y34iK8$ESz+TSd* z?-JLL?Yy>VXIPIX<&{cLr=Eh-IE>9?=}zLlSvHeoY~%!9q&BdpN#rL#U<%t zUnMpUwJuQK0NN*L8h8ZHG^neZ&^RSe8016Ema&w#!?nTqSG+QRi@;&?gdt8-Fa z=$=f?jHM^r@b1ZI$i??$dC2za$wbz5NwSRCbL+{D&7&t%X`tlw67*ye*P2}9*ON(O zaFw2CPo|QlcTXnGR%fB15|WAs&z?-l!8|gOiWm2c&YaBbk1z=$&Lv&S5GFg zu5NLJ@H;Zk6= zk@4)wR1C}~16Ema&w#$`WGb#L3DB!26USq1OtY%fAECTKj!@nx^IzzmP0g(LIvEYQ z_?|2e*c$xF+FNrC-Iqy|6QE;-u1?kAd zA%b*dDv|*@GLfv#XTl}GY8&HO)~gtpPyVa2;-3F}J2Dm5mh|Vc~W*vLlvZ zBXq|lj?0*_u9E$9jjL!Bdylh#tL?|!!nId7CbF$_b!Qtc^aKAZg42AqF`Y=O5=>rR zP}y*`hIo{S^J)L8A<}L7qU_0Oru(;hvtIY?cyhisi{89urS6%B&f7V+iM}0oipL7PJ!EDeQb}#gXyX1b+yT+sGOmXSJ*|TTwT)C~t zy|=*4@vADiR~1;#kW&hsZaRPuNAObPTw{2&HyQNK4Ev9+Igw0(tm#v|Nq@UL>YeY$ zt#|>?!?mz44J*CDs6WXlk&AW-adIhZYkdK}8-dVG(ooI|`s8#vI5&!y=-0%|s+hm4 z#00IC-j7z$4RDYcz<~5-{cg?_IetoW0ahy3U2;|z(f`wf(Yf@bD)@y6ju@4fy_3#$ z6lVSdu%wy1!6nw}xx_Qi{qUu8iF~flPnTeVc5{jR(>j-6#6*{7vOF7bKgb;Z0pt0H^eL7d#~o$uU7v&i5bB%_dj<0;D| zFNHlcv)r&5FGuk*DTQesTqJjlFRa{AWRwov65s5UQRuj00Kyh<5F9nNIyQ@{TUS>h#>k-_^b5vWe zq>JH8R+X#P3S$k&68L5d?(t+;4jjG$R9*MVPZ42dit;sx%b;Qs{O`y!R1-&%!iTTS zgGj>?i{bYpP~6>ZQ*GK!cFYyWi&({R-R413aV%!w8?83b?Gb79wr3a9R_|zJTC9Tq z9>HsQ9^67k!Icm$TnAM}1vICX`r=wvUo5R>qJ+=4FxjqPQ4SD3;!46=kTEczC8`b! zhP4j(as(S5MkI#=9|@PC0L{ON(?38p9)&HLY6xfVX&_#}pP3GHFL>6Z`oK@l4Tl7>uSKyu z%7;Tm$wvZ#5sm0%m}a*(?as&z*5e6lG`JwqHstlziU4D@v!bGPWO;)?80o*2*A_6w zDsQUVrS>beG6G-8-utJL_FW!Eljc5~X zyv2+642DD1dM_`=0A`XjrI*r&t^*j1w)@X^^YXIoWlM&(J$s=FzJY0JMSTdV$;UWW zDz&E6Ak%liI3Kkh=A%e>@NG;}E3zyns`?Fmy>vP+5m|(hqJfH|-D^cAB8Li~u1jGJW+uMQG^R)R!$`F?)1PM{ zi8YX)cn-CR5#HT~Q#>z-1g+9&p2p^3EXm^y%%7GRrB}^4m3=FMlipkj$xN|Zz$;RO z&u{RZ7QDfmX*r$wi!o`!Ka6mUmts&^4*nuSpyOgnBR@$=&0qGmny++FADO|YFi;KQ zx9CHwDUEd}1_TDNDwmHbhIT{K4qw24H9TiR5b>CCC0>LCe&EX(xQ1s_4jc7h1fCiK zuHhfAT9YwccxXi(EKCOVH4I=QFee2GKS>*a4Q^jOUP38=QGYF%=a+n6sdq1v;P546 z!(4>%xkTwz3AoJ`=VZvA5 zWu@R@lBU3j!*+|)+SKd5b8L*+O!(Tu+!*7qT{eyrwi~c5X5;ew>Dyu)w#&xQ@P@~B zErew=!CdoTpPC{e^sRuE3}qAW&^Ghh5^@WR8F$NYq}bQ8;)Ahf3Q-1hy;6 zTM84t@@{SsD0e0twp-kec*(I{HWR+KFgD%DeYGomKwkL?Qd zFiA@wph#L?*e)ZksckKYgChMQv0XL~Ds}(VCD1EVvX_Zkgn`6%g_){qT{$+f+P8sy zyKF9eh&QJ&+`e5lPNr=o&jPk9l*O7ONE9bdk@sqFFV?W>A@}XF$F@hOg1gRVY}?{Tr1DFWQN0bC8p&b41q676fxMYMB?&KX*g_GW>ZRP z{<1F}tkJZF!*-9kV7o$KH-Jc?;Tt+R@=TN@1m9Jb3dCj|*VNtXe_Lkb4BL}I%fzpC1g)wh8m zTRJLV*%~2MB(^K?_WTGBxP7|iV2uJjT$^mhc8fOI!xw5t1XY8K!*&Jv$nOC8 z%DeGEfx~u>XS|t)yjR!C<@gK`w17!(qD;S3)wYY`MGavK#VD28n682SebC z5=9KQE0Iv7w4x95ir1+8rRv6T*sjc`l+^rXKc}>Y!*-8f&e$%U91`2*I5P(k8Dtf6 zjsY`MmBtuEB4-na?Q(3&VFQ6Ua?~)`F88=1T&Nx0BH*BCa4u;aw#zdo1qnY%H@6)x z=NES-1IMqjI?Aty3XJX2QJsAdiR}u!JwL(&4%-zNl>-E-VdJ1#T${`hPy~t43qZL& z)s8*LeY>K(r7+rx{bQf&YOzyyb)$+hvWgu&8Qo(j)ODR(e%Xdc0)haSkn; zCK6*~k5@6aT8ssP)7We(aN3HZCyJ058yh4P#wO6i1P_6LB6xT;B#by}|74_UNd6qP z^`FgyO5Hz%2=vMjg6Y{9Da#h+2)-C$PRV5C^@vPfa|oDP*ytcu9+ohL+`lIADbyd& zgJ6Q&za}v)*CL2MTcU`;I3yB^kYcJwyj)0Kk=P6lhjGYkN?j3&x>n zV_30)DYodCVldHnx)BoN;5gF<5yP;d5R9DL3O*}AD-rVDWR6WaY#=&sjv5Bz;2y6T zE!3tm6O*|TTw3^Bx- zeu(VCav`qixY&Z1X}IX`Ob5g9$S}w!Z(eib<0E_E0nI48JK0V2{9hH<`4kw`&d%^+ zD&u(HN%n2>3b%)o`Z=x`mW-Ixk2U<`-PPm>KTmqIL}n@7o$)i|GVL@Y>psI@Bc8mx zN~db!iP1QXe_cx~CXt+}&fwgnw>==Yn@>CZ-&Cr+zc;&oci2rI=*)I^hW*agcr=@g zhu!V|PM_S6G}^k@UG<6-x!_k~IFPO|jr!9WxvBg@+}Sba_%HxE6t#b67i*<+A2rbGC*zO@74TOWRHwL|_eB8C>j;%W!}@hi%N z`+g?u8iu{PFlsS%qi(eaTL=({%abQjftWpx3B>GKbRZT8Tv1l+6^Ped194%*mGxSJN8Uv$ksqiAI1b?HY_p_bB|w9ChQf6*E+6&*i;W=5HhL`rBOFq z5tqkAsO;BT5wpiJftWpu4#Wb1E6R$!0`W1|Kx{H%Ol)TQP@1tNHV3GP**nTAL$(HD z`Y~TyWX3wGnu|VL8vSP15 zeB3ntr5Vimk*h?ok? zE1@+^1x?i#0GcQ_o~i8D8i?8Bm_W>)MF(Pmz!hc1UV-?;Tmmr@J|Yk+wqh>?sEFAv z1(hLN12KIV6NuTc$Uw|JenpwES0Fy&9EeR_9VW0+btsKmAKxSR`}-iNg5ufo^7-9i z+$O8#PK&VS761F+su=^dgq=L-Nlso}^d(rVmoN`b-sm?&LNNFo8I%}ojtt}Nl)GU( z71L#1cCa}ztRvtYSpgbsj;u2XI7b$ngUyj8&2o-q8HnCkpxYKkUh#RUCSFLVF)2RA z?P^L4kf)#y7IWwm<8E!|v=zI30X zcNV)EeY5?d-g2C4^!=X^e4jD;lEAb?<8Y#hzj`pbllkwYF}fOjuLfVT>7Q&^9S+Fe;tRdWw4>Uzc(wGUvC_ANm_z9c4*^Vj z7y9BQYA=927kb0petga5{^h;#!sedyMM9$dV>34gh z?e3I>pwgbWS9JCo&Tcudax=5LBbzrBR=2RJ{2zRFk(}zAItR7nc!_>ZIFCuRrk)rl zF1~!C2%?##JHL`+^N8Hi6K6}ej@+e|Zl}mB-A<8Ox}74kbUQ_A>2`|D((M$frQ0bM zmY%@U_Uz&gG1cqf;M#$R`w#vqyf8Db>U%r!@_pp}o!MY^Q82axZ{^I9p%#0|F+fY* ztzmCEjjxA8@wLP8*7`>FL3*^QE>Qo`BD^^J*u&(#p7BHztB+jM+3sH;&bEHnLyxQ< zUAY>%ozu(z-J$y@Nb@C`mnMA6Ce}&HlAc=?uLWi@$PJA zch=3=pT=u&biH*rs`HQJG<_KP&rzPHBt+4vm`Ws>XwG%g=V-+q=ZJ--Xax&D!sjBm zCes#Wifhzuxh{9l#Nl+otr3PohlzlBQgCxnC$&X_-_(}YXuTvV~{0tnZdPE z>Kg{k?R@Q5H)@v|(RjZ=G;{}se4d%s@%;IrB z4N|HaZag)3qF~Od!J^`m**j6Phfx=!t6|WhE`{|}G2r%-0?Arj_!a9HcEQ zlSXo_`14Wu7pu|}Ytqlhq@RyVKcA3(zE%49HtFZvrJwJBZ^f`U7!77QJFga&Np#3f ze!x=l8tbRo=9uYpaBgJ(d|sw8`bTZ_$>-~iK+6dC5m;VkPgr>__Qp`2?L4r&;XVS5 zx$N`y5m-24PuO{&{>?sbAAva-d*b^A%~g-F$^Aw5F}7zS|JYp$YA#qh-Aj-YdA$!2 zVrU*@n2nMRoQ%x9h}m#tyyjDQncsYPGUa>_jFE>q)|iCut1lsuP&p@r>T?8GupkU; zEZh)Lm-E393M8X4pb2xr$cR?&-ZJ9I_{pe@q?~1A>_{L~&IuzU7OS}#Vbsr&Dd&SD z6v&Rsq~dKyEZca>h$G`CqcWRuGs4)BK&Y70k=F8yk9_uexIW8^y)(mpy4zroelv|f zc76OC*Te5bU@M^Wn7Mv5zQvXcnZ{{F;`;IQsJZo{Yj-(P=dZV&kVVY#`R?v$aCSU7 zzkc-g2%d|;*70Pkf`p%&Z6nf?dMGz;AI^EX-H9^K$veoAaMO$da`5zu<#D26Dr&-n z?S#AHvn=1aKaex?de_+qXA~<`^KrYek=dp~cQCg6lZ-2yv9XWmJgwV#%9M{+5leqA zDq^2y%ve=aJm9LL;sKr^q>5;%;i4i|l(@wMRtmYQD5uVIRL_f8_H|Ja`>eByiU(X( zR6M{lgj5l&bX-)#Y9dD!k9E33b+-XDME{;SoWX#;Lnh%iIFTK#GFDYIJi zYW24@-szKVUL$hIGpn~;f+G7Yv$d<$AL$0I1^U(MPwSCat3R>^Wc8w=m~$;HiU;^a zj4Um#R(~!kDvCK*6%`KzP|@?D`h%ZSzMIFr@=Owtu!B*%L}vL^t^omNsnQyd72ZK_ zNMJ_t-pl$?!}Ri%H~sXar8&*?*r&|XW1o_z_y4uvP96xN4Dr8FtxPkNlVG-F`gijm zuT6mEd`~Jl`)~A{3-2^}g8{$Tg8FCP^UG}ARU)K&ye3AlyL?g9n@%pMQ@-<2iRvG< z;C1!sQjD3}(x%7bOhjHZa`8OqGY^$$aE?}pCbJ=gi*N*(cs!}#9gUxhN<=6}Bc35f zqxudal3-%;rh|4gX+A1Z{Yndj#UiJuE7Hv@>J7EcTqeTA6@XGMut)soq!Jm*0SUK- z)uJ*TPk@Q4z5@zW&<-e2QHcoafQ5*VnU0?r`5v9=BsxA0$gw1#I$lLVfr(`MI8a4_ zNWw!@CFGJWyyHbB(otCzE>;P2v9U@b!o-D$RWc=ftdc3IM23r1Y^@Pnl@r@4%$|t~ z5vzDA_*lhLQHcl_t2nCISS65PVnP%PA{`$GER;s#UpP?}i?Qj!hqzcJknj+dNCz(} zk&a4KxL767#l|X$2oo0~R>_p`u}Y?-5*aR5u|;=utl|hTQ6XX#PX!;Vcq%Fp;bIj> z6&tGr5==~pSS8Z&almF`Re?+6W0gR{LsTLiyr@JvDyzc9DuFIGR!KydxDc^Qri71G zG9{JBaIuQ*K0(JSjsO!CB3AKK@Ue=gq7o4ZB_j{x6^zFO7 WNq@WBpG?M+PQHZs+kf_@$NztIWgI^M diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587339098.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587339098.Akashs-MacBook-Pro-2.local deleted file mode 100644 index d1f73ec681fceb20a96903b3512fd110cce38d52..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 267581 zcmeIb3zTF>eIGoxdvN?CE1dG-BQ{onupuj*D^HHZKE`5*d$rG0aAZ~ff$zwpe> z$t9=8-Tv^}+s^fd)7AEmzUa1Z-}cQY``~Z?;Pu^ozjep|oqcZU*FL`&#&GHOXwcs{ z-#OPEPglX(BVgYHJ}>}asr8!zvNJ)6_>+r7APBwGGHIM^BOOt*KYoypnmc5f12 z374-PiuW)6HTf@!8;i~57r~zF1F`I{umdqWW_M%S9}WG7jkLo?;;@76soo#}x29o# zKY}}0c89&`lcVtyoyo>%yVu(64JW`(jMLH{kiackTiZjQD(JH!6z z(Rizc#kuE}(QqaH&e|_AS8QuNZ=Hg4ZACpj4%q!+4 zCB+;7SY1rR2>Q3S3@|_G^)}^Z3rG zB!owFp9(^zKUA4Kkz(W_MfhY8!ENmf;^tjDgCw-+h7cjrY6wv+8w?zNy#Y6Sdv4sk zd2=%a*GMHn$RPX6z17FgjKceD1TVJ}UJNI@<9_$lpm(ypdg?$^N;S|+ey>UkCa#H@Vfs9KGCZl+70}k8q)w+2y>7NqFxn@1S%&%SL0YBX^8&swW!2zuVkT_~t z>vOKGF3UYmr|6GF@CqA6w*+6a5>DYs&K&9H{t1hmaaVVW>XLW?e@(<5IjItAY}@9w zRJl{s_f57(D6Do-*qh^jbt?N6$*b`1Bltd(64sGxqtYPWn}1`Db~(%T>MhiDCG1C0 zyW2@S*6uW3i0AKq-2=B+^AB&g2p=c9!?W>x`u~Y7^Th-$0gN?xCW1Q+qV23yZQEZ> zL-Pu>;=?wfnGT|8+|#B#{l|t#edCTow5gG!gK}9$03?{!ovUnAEw2~z<)-(U4?OmC znr_1IQnc^kr+E{r8#NH|kp{dWgvD_^yV$~qvuWZ}&1El%vE4W?Cw0BICgK9{bR&!% z&_+I#RT(i8TURqT^FC^T&jb=6ZRo?Bau`u^O}&rEG~GaEpYWRvxFvu;y-Z3Jr+0x{1Ti&Nbs=;ZZKOx)@QP-LtM+x8M~(QalyEdAk)D$ z=m7jXb3c_+QM;&a))M!00P6@NCFc~;2KYHM_LdV9-O_};<&P!K8rQb}9zv-O0Q_q+ z+(v{Xc%Y?_3jg!qOJi2>C9B*b00Bn)B5O>mMJHX4Z{0idL^A9E{=f{U1fevCVcPpfzJqx6O3^G%sfDCGh6u6{)G6XW9 zeCq;T=1)NYzH3yV4^~J48A!{EA5uVukn<7{DIf#sc=1CDY(~g7t?&vm)I;*+elN3O zAqFd?z=|`nArTLhwLKAr=1TK17At6HipcjA7_kB>gzBoW!U~Sg0xKYc%oHmigL3I<7j_kpdB~e^ zyIlo@hL#sUtbhz5pJqU;fDEMLg=?@X?<%kv0b>PphrdfHMTSWW61vBquTBg_77IUE zVFhGJo$Ty)71&U-!V0Vi`CPHVDc5>{9N`LMbXG0j!djCcjLQad8U8mzDa!dqpmfIMi% z3dkolHmL14yd+g0RzTSjIu#>UKprfLMlTKxwr91g;Ajn40j4=KtN;(kxv0eJT)M)X zk&@4e5CC6YXu4~wT?M&Hs$vEBF*8<>lfMwL0z5S0p|A(Z1xo#b-)UUT+CPjV*`aoJvSIw?JGE12`ivN zuvZ1CaICO`W3#{t$RIPt3do?E`k}?Vf#8x%$q@4XI^eznGLV)RKdgWZA?GC^RzL>Q z@xnD&m9YYw5q5K50hu2MGFV{+sA_felu0tTI-BTU0YvK*9{h3P|5+->D)) zFdtT6wXfiq16DxfV#W#>8z?k73#{N+C9Hr7!CnJIXXyr&yVWm|-GfhTe0}#kI##ga zNrP+j$b#$OSll)oSs)ongBqkqQnCe*{@em|jNU2#2_2qy$Kd)6EyLxUBun*~T@X)gOqp546w|Dh4k>?yhs(38-s z3jwLIID@|=Rc`}*HKb4};!u;x2NW-dJ@|`P-&sWPxdyy0R2aZ0L_kYJuQ&vx7S?V} zg8hp21$apo=GqPYpNzqWAY@$tG|rkcOrO=C2K(rQizssqGjb8J0lo~zPWr=>CpWJm|sng!9GL| zSy)_ONKfF8=inU?+|Jss+hoSw;S=PHVs0)987#+SCi~X+qM6LCMsw-$$R1P4F20x$ zu^dltFyYVPRdQA`(89P5!r}FP^`>KNuw5`uC!MZ#N^ZwtN{tf4JFu zswl1$wIcjP1Fo;k`|;kOw}DQn$6qZgLDs8rZ1RZ)iHF9D5>zFXw>F?xSt_)r>E!)P zOInPQKW1BTFI=bpUlDB9Edr(>udPs$G(&vcQ`x}3Xw)Zwfc;c$f}_EtN}u*u5p2|D zHRUd@Hr~rd)37rd)FhxgNQzrQtqK9lz+r42>kqffX~0T5XpE>{B6rOxX6|=+@K$@|v**r%J-oG}e{2h(G+T2CVzX z5Pa#SYsJd%Tkut4;j-33KqqHc@8@UXlZwrT)$ycn3oC9mLdLjK0PAq*M6iJ?YCuc$ z>EA@~N-NXO2J5U`xJ5tW#16DWKp^6y0v?zmhn0k6Cwmp@WRIiYfmSf5XnSw%I3Tlh zqXhhPy z5-JJItb*T-_zhu7L!cE3Dbcf#RadFa_f(aZR)4QLM>ENzK2;`K#o$)&&05OqNN%&A z`6;a1r0P>RRZ>{M9%KBfurW(HLjhN`OGQ=IIjDJ8kBep}`{%YxX`B6X@!tD--NCK> zbC!!_S-$1p*fB_v#$VOM;nNX+jh2$&?ABm>a^x3nLvw~BQSjO>(voy)xA=(^oNzak zS%}uGuTA(HcxVza$tC8-afi!9CtOLzNd!vFw%_%hE?uG{HZN z_`AiO?314q#s4JZSzl@d-z`_YU_r{T?G9&zxD5Tll-8&OE(f?IJ zTvSt|oWOS@I3CV5@@Rol1jUB}>NI7Zd!-FWTS>2q%P2GVKisH;`*y*79kLB9gZBju zj`U1~+2n~DT*-a@eKzeHNOnIFo+bh|-qVCrp~X>dl$ka;Xw`?U z^pSv)5fxf^=qBxuABG3xlVLh!QOnAh83cf$qp2DP4$ugi9P-eT%tlwIsjeP{hvNt9 z(Uk_w_}K^^^7g81(GSBJc?=+L;NqoN_cwYz9U^a(rf2mX6#f8Ck}2rNnL`a+7plk z^=VI0+*pj@!yzzdD!}9Exd`!|?2G+t(#71X?EFWn=T2~R0#5178P5*mgsedYz>(-g zGudjq*ZRGF_;?7_qqp3-nEg1K7lb7X8DUl^=(MEW zbA2lmbX=@Yf0y71qPo#xVS<=P6W$gAaiUW~-{7|@0>=}}jl@1ja{P8sL7*Um*CqT> zBv!Zq)+8u7sUHx-h*XCYT@~q3q>qLuBj_CSil-e$cY=x}&HnkzCZPo#0OF;ZlR760@hUdB zo7^)9$V+FLyC)JTZPo({HB(gpke5z3b5S+WQrIT%lR-dUItdXI3E zB9Qk(AwXVAR|Osh1$pUcL^1(@LrPOrXkkXRfV_0voEQ5pB?#(EhgT^M1%qN6n?JNe z$D^?(1nNtvs+Cs@>PyF^+$b@6ba>xm{?L(l%BQ5mjyh6R z7Ba!#?Q`S5s!_D`zL$!TR(70nG7}@MF2B#9!)$9b&9r4hYESAk|N4k?7rt2cy?I0Vwj5t2(DAM54ZoV#&E$P zJhe#yWy{V=GO8=$Nvp#E=+8GB;J*z~Z{I8yRGauQGUUHMU%?8&iEKkP4IuoAG5n9H zzIu)BK(Ak+1$(vvf%i4wY+cSFw(L28_GoQ|Yr56_jkQtF^8);76SnIWnRX{-ZO)YQ zm-K6splA@*X{a#lG2P;K7EZ^TVYc`wq6<;@e`&&a)(Ypqx!MYl)L~|*Zd!~CsTMck zRQ$$TwTO7Mwlnz23Z=Xmfy1a*4#(YrHipIfnIDpwN0Gvf6xSGOF4}^RG~kW4USKO4 zF(6dV5VixcVt;eZ@vR)a;m?}(S=N>%6>!CX`ZG7Mq-!nPYHSv#NE9}_3#q_9kbJ5M zZ>q;z%I4*kD{K|Oy3Q=s>yslKc+=hF2cK=o?teQui7v6j9FK|{#$o2a_q2#?2))~9Z?E9oCsct433DI2bF zk~`5Ip3VBIMe)PiMciYbH*D4%OE1zXq zkX4$xqpj30lUaz|(9cOErtjz8S>dP>v*;9g&^KL`wNv3>SM#iM@+DX+G$~rS(ndkH*!fIXQ^0|l?un=|j8 zOyF9Pi~M>rVGO3y^X$oF()8}hgxRVrG*nL}So=hMtI%N?sLic3Czzj#V_GCv}<=h^oCleS_IkUMZJKB0S^kgzYR_V#w0eUh^ zkrzg+$G|O{Gw|)nc+rV2mv4LV>&f^bu*rM&WHOQn!T0USc(N+536}!P?(bRH%NSrX zAT8`egN^*Sm@R7z)Vn9+M^~vWTu;W2mn+e?C*z5Vj#z|^&^;MHE@7tjo(zXv_ns^d z*Rs!^7mLlib=QlB$3L*a4Y)_M729oor*ADerG3bA@1jhQOi%VQ!w$^=9hs%TO{I>c>*9aKH*ww_880}|;evEz{18DpG8xGL z9T`tnHTh zCwn@5hu6vA?>6kO;FC=!FSf8Xh=!kMp!5E4X!2R)Y-xrT0vBcVM;k|w@{PCux3L_jCSW9%6jpEKw#XSa( z#1DBZj7}eVt@0COdOnvsQgP)tu)bF_m(>j3;Fy=10H0#sG7rB6H_EKzs$o9fWP+x z6r~!4Uun7?KZZ*gc9tsDh00~@Sh++V^2%i_K~aa)QkFxfGImt_y=^LM!awtSZ;!J9 zmAbf0=JVPvH(Jj1N-_(T1#W4jjFApV!wGZ>7NTOS z+Uw-(q9d#nmomezMzEw+Woz6+CubH{73syG8m9=8p6qGdr9E4BdGhHW@^+Ve%AC7s z&33n(N#ia=>~`*we_G`(gxIssUC7Ms+$H~XFte4@bRTHblwyxHAq z-HEeL|1BsJp?{+Z$v`*K4Ag?d@im4F#1dZSE@O}4MO2yN>U=F28 z1g~NF!LgV9je$eG_gzks2CNp-WbSB=ObK3=+T2A4-)O+CEOV9FZ3&L*Cp_8Ju9^#ThT ze;UDSS&k~}6?H$#8rLA;umK^xW|}W^Wf2R$-GJLY85RSFuK;CN*0NJ{u(As)c>MRs zGgKo-62gbC%#F#4wju5T27W&R$^GLd)uvr(OJ8wtX;*iw;q|mMISkjtudpgjvaa|A zum^Y9m#rUMjN_=qbBYeCw;;4FH1O`$PN_<96u9!4aF10Mf zwRiI^hfafk?9^^Bd_5Zi!Jjxa84QzRC~zoy8EP&~2J?8i8of}$!8e@R3zjvZKJb&X zzuW7AO1d9LvaHI6LrKYp0)ZBdXk{2@r#tCP(R~7=F=;ejO-q0kXvK$ZN&q?(k54qS zGN7@vxQ4Ka^dB26ni`H9@P$Zr!-6lz*#bZiKCdL@&VhA6<<8u)1b$3Nb5XcMW3 zRh8cS?ft<(wodFTG{H;~$-(lzL*RuOT0whR;AH^);b!lt&h&h`U~#0Wsoc}!L>y}{z?IW)CA0ZFowmzipm zfyzs{#^j@0K-YCvM<4|zg|y`>^&)ZkaAtHdga%5EG_DjjkQ_>Y`URcB{xFhl>GbFM z!boC2(S+kHFIG)GZ_6@`#yE}5!dQ}rYM6g(cT?Gajo@Z)uDE1YNBUXziZx<}guiIO z>%5s3)0w{*lREsP@@h=YDeb$F13F#`142^sm;H>=+A}Ut4dR~mq2-iDx)TQ!IVT(h ztof=7Si^G01rb{uHP8YN5rh~i(27sKk0}Ru;p;AN4a=q&Hmbu2EHwwXhJCzjO@$wL z*ps}9)b7(m@2H5kU%IR9_2b`9fH+@IDE~qVWweESOYft zDc|pE#2z+)z_%RRWQI{OKsYsQ3_Z6tnITZNDn(6U5VNvHc8@fG+C--YnPpE16Tb4U zJy5t|yY!Sd`MPh&#+VGX%h)cNh64Y@jTZ@!E)515K(fIbLDw91t=<*vhV7D}6};t5 zvBMI8fv{*MF4(SVhyV>S8De=>*e)3`(}%i}wEz+kc+PFj_cF{!905tv(!B}bC1rWJYtXeLw#zf=pSn1DMe6dBhlz7HY?qsyxJBSXyv+hrb?gbTV~#zWrnVz(Of_bD%3n{nUMulnPQ9HM+^}7aH!eI_voqs3;)v~XjEVum0c+&wxwXkG_*q&p zxB#$S%%UT<%gIOF#P2Kb+5?3fwo7kStRLGYL+vuQOQxZ~KmD;?GJscG?1t@zwh01Yu2VrJMb8L*7)%FtB4+%!O8veicy=#5OV^oFV7*GO7478h*SJjm{*GB<2j z;7Z+hSwzkd3zcL(rM$H)ORbLkcF7>b6oqu=fG-LZU9eq&#D$;I+^}7dO(CiI%iffb z!lXTL!*-9>h3#U(yNK;FoT-C|1`(4LzAo@oE(jwC?1=3$Y>Lp01LDX~bHR3*$0gxH zx4XeZvhu=Oqgh96mt{@}5`K~{0)U4Ubo{#Q+hzEb)qbSD^_a67vgKP;9dB;fF2@@e z9s)tOcz|-z;J)K? zH4PDoXTr%?t3ng&HJ9A0UA4hCgU|Q_K z9PmYfq6@Ywkht(unj5w&vMD4rf7#C{t+`>l$Lhj%G2varb{WpNAQA|?$@wcq6?gE) z?Mw!SO)+dZAdU<*7i^b#ykfX;K?rjd$m?Gn9_V%^1IwHcB>W_;IS_KK2p;Yda@@De z@GGs3X0dNKSFsudcHFni@%HQp4{q2l$EX+}oZ4gtnssZF83K|ZQQ4a%xNH!oal>|b zc?)5}SKhS;3KSSmRyk3OyChC~=q~Ao*XSX;*TJ#4Z8&6CLZ1g~@l8PKSznRtBw9Tb z5W!zoJaAM=)a;A3O{Kew)MS&|?D2w(MDJ{b$qC2-g;*3w zAG3@`=vuQAZ0s=|^~+fit#ZpXE~$mMLP6cHM&9?@O4_HqTmp2hS*7dh;?rMIa(}02fglih;EMW zjL|LcqyQ;j<^#|C{Hu9KS6$}Qg%_0rc{bFk-n6@-9^&-r)0VfO>xXEohp@kBeFI*i z;G(xZ=?_Lj%^;t?rfnyxkM4oH6{GBIZz6qk*uU}}E_(@f>*nq4!TD6i@jm)(@^ZU} zlj>^}?3T@>I(ErE-dRbO*m>eoICX0`*V-5jr{mF}v)SA3p$nXb8|OPK z%P|~CaTbvo zFQ-hIxY9=xpGzxU?4HSbDwAA!_L;5ZZW(*CT}OJjd2`cfw>leizJ*if4C*wv2A|vm z_j*|@DgwxAhRkZOW{FFzmOkzfHR-e2!b0M({iK*X$`sG3z`M!H9fu)n3r2Fa488R2 z5|`~}MEDg0xZ<{&Irx{pwFBZ?9ez!>!~)-ez>k3XkO*eejXTeg9i785Q}7t6kC)Rp)^fQq;{0z+oM#)_Cc z?h=T}v(AB-Bk-KEVy}w$sBIwDHFYjQmFPoi*@g%oAP|%HuVsd848-_hmq1L0bqvJJ zp-k4CR~bS ztPZJBYvMb6Z4caE#$xPNHMmVsI-vM4I|2_BOK2pwvLCOs+l)*iHR;E8xY}WE{6wCy2ljoFy zzll|V{lW-m;I@c{&cc0npULc79mFH}xqXn7S|A#|X;t5m6+GgJy9|voo5TW7lO&k|XM07H=bMHvo+f64cJ#B_AV>J9sI#OT$PbLj_96j zx2C<}WHjy!yIZ{pdG+G$^JIj-YOeof6OA#C&igp9UIlepuZF~~zl@EOJfc-}$?K*y zioCHjelG86*NuyJq*XOj#2u)aBJmb8Mcgl%DUyQ2Oc8f?W{SiTd6QD(LI>an;YF-f zp*w!Id!6oZvojg=Hww$Th8Wh;`b()<&8C6W-b@kOyqO}k`2-d= zr{}kk(Ow1z+Xo`%Klq#Q{LIkGZxF?cccM5o?N85hW_RFm#xO}Oy%!y+w9wfYbSIPe zayS%UnjOlNeGnfYs|wyAG;5|r8F8V;`h=eXy4~KByo|Si)fGjjZ&;tzipP7!7?7!Q?6xVWVO8QLBA4w#ch0Z#RaKf~SAPUw6c{^7 z88cNLc~xFw?Wk524qH?5fhyZY_BJp4M}fg()(n!;O4Qvk#Z1)hB9nY!01hI_rv`s4 zF!w6ToP9kgviGWDW-i;~NRCwqN$lN*P%U^wT|=A$&dpY-wZMX?D7ULB3{@x3pNj$wA!U5@jcs@;@ID zf3YGw(H4F_D*Sv*`1w`B&({e*UoZT8gYff>@UF{YzCY|wb9Rn$ODsGJBiT(fne@*L zlULb3M3#N=W3SXaR-l~ukyOdG#!vIqji1kJ0Yd+Xi9Y&#)e*=(9L%qI1QPhkoXkAn zfJ{Db9)W~2@`QN=92>~z%_9(}Ay0fSCs({xfZ0`I`WV@fkvrJ;OQ@-_a*4&Ft(RoT z*9V}5j3S?z2Q^|=%-rsFpBqE1nB6Uv=&|0%OzJitBE?)>$S_P1f#2aU8-|S6e9*&q z*^rTr!+cmW#eCcdF{}{@-C12iXci2iVonY^%WHeQ8k;+b3u`2ZB2gFfaU;aA<|Csb zOmlN`kr9dCo_55L@sm-JAD9`r*bzsln3Ic)NCskN_S=dS;SXKw!fpZ~o@_>n8)FJ1ww&_pL<#S0KU zwG^F5zn({56ZG{h=w347P^;brV-Dfejx!Q3z4fzb~Z+#iPhXcFDd>Ct$rJB=46 z(=j?&Gg&|t{OcaLW%VE&z~7<&zvXogpz-(T2H>mtsWOHXDeLgjERJ{);+aq)^WS3h19RG(?%paat2y;K|Ywt~k*<6g_ zHITj;Q({gZxejHaE0zDX(7d~MhCH4IyfpBkX%JssJpj;N%tk}e4v^KP+Y8|q*njV! zyU{y48f^B)W!vFRW$6lX8Z$nWIhjwFxsW?uav$2sG1|I+*iSt$6>%00Qh`a#aemV4 zZN|<1aGEr(np?UIeH#6uUHsZujp%%-;li@9T93xAxBw z0i<6oB0rWNJd!?-;}}t&!Tb~0qd3bQL9q^r1hcS>B5D7r ztM;N~ae&=gg?@7dLR5rpF<0$P4#8ghxk)=;B}6futQW$dKbc;gy4fqPj*djBGzO6G zZgCk6#lmW%y%vqu8}v4&@g=YiZF<=3Py3_cRgh{Rb&>RIFSP@etKAEGj;Bm>eu)e; z5C4m4WKQ{Gqs5DW{#qjOJK)f96wk)J&0G7YPxr>?_|~4%FjO9rIu)uWj_;gOJ}O2W z92aw&`mrc|ICBLOZQ>WeAv7rzI*^1qC_twBo|fiM^f#a4rE?|hd!pCd?xc~F2_5Dl z;3~+dNn~;u{iQoROL(CHqDUsI0E_K~lm`?ZR$(#yc$OV=S4vaTCg)R%XE)MG3^;wp z&xd_z=k>IY&elb_;ROAj=bO(5#iu0vOaM-H$Nlc90ovk%VoX-Q;H;L`O9%NlC1gAl z7E@E5cKgFLpX9|tBc5A&0a|N6n_5n`-I4th@~q{Bm1|ep*B)uV;2i$u@OYzr_~_v? z-Rm}QIDNX?J91W4M%sUq+`UF&kvr-8Rwk_mwsfpaO68P`rQfugQNA+FIFM zei30w_E*?}m>sj5*_i*Zk#^Wf9Cpw>)f)uh)->$z3s+X~m@GzW@_vLpAs=OGl%zU4 zI7{LCzR8YO?oOy?ww<-HwAe(w#v1y!?687BmYSI>Wty6sC>rqb2ySDQ=*HDnUvohE zFJ1prPY2>9Qf?p)#7FljqMP!W_=u02YUkqv4Yi0)HC$n=Lq%#n|nRa-Yd8ANa> zwc@uqLz`|05hAS`Pt2$1DY=JS`1J;Ghjcr6R-8gfA-F~=2|^~2{TZAYJ{y5)89q;K zrptibHCVbMY+1NH5yu@Swd$^Q#hbU@+W^gWLau1Q$h`Hggj^i_!SGHtr`T>_P+91` z4>>>ZTM=k>nQ{^nZvV10pdj(oARPE;1lO6;c(!oGmWppdH+;~yoj5boZ2ef4+|;#} z-+}D9GZX%8#9tsJv#WIaCTDb=K-m=uED|rQrf+?%@wI|RKJe2Gvq5F55FF5MU#z2+ z3nO$EQIOrUABo@O zxW9F?zgc$KYw+(Q_&$>o@}+~AtYH}6Fv^dTOzhQLsOw5$WdpBc*un;D@OF#vks(7F zK9txpUrf+iw9iERjoOkxhSng1wC!ojp0({RyBo3 z{)`*@FlK~W`!Ix%J*pd-kV5=I1jcRXBn6s!3LkBj`wF4O$4qm*mq9}C;Rdv<4MgZM ztc8+OiBoWMTpE{5fk9bT0p_2FG}#ER`G_x2~1E%A%tR*L6HKw-<(Bl zt1s|-Aq8X(Y+Uz83JBGfhO>H*0x~koj~`M%C~A4B1t}mS+wo$96cCC!Ui^>(n-TDG zKdOpptIl(1DD;+t!3JcqR!9N)uugXNNP!KtEgPi3iV*6|yxLbXhzf78LJ9~GodE5T z0vl?aDVv*I8+E4M-lEM{zp6j)GdrJDeZNP!ihVD@Mdq770&X3VuAReJWc z$YQr#rhSG>|4Sxf2P;@11%#y7k{F9Q@}LUC{Np?_{cY1k2WF&lr5o8 zv6kG7!h{V{kiTll7GvDzfWSxr9*)oHnqCxakOEATb5WNDFrQjDSU}?YV1pFoDiHuY zV-t)YGb05#`I5#9qx6uT9G=r?SmMc6NI}kp;DMGx3c{*}DflA`q<~yX9_R038Q0lS z(`knlaPcV>aIu%5+I|84S$-da6;?oRtuA&GSOiI1tbnnBLZh?53fh?>V!Wroh!s#F z6q5{!70~ytS(lzMRzT*!Ms|O!fKb)c53PEz0x~koj~`Y*C~A2rumY~gR4wI}-yCq> zdO)TJHdq1qu#Ok5!HR_!1B|;0Y(}71LG~VS+&D7BT?J%*seMqW!U`)OPwHf6j}_QZ zv%(6j2>JGAWDqZ`fDq9M&>kzWp~jiAxy`j9>5UbTL5$0VHoFQ4F^xzqu>uQ9c36QG zA!LuHM$rq9A(H?btblx&Yecv(ZklT#tbj};AXY#~R>lg*gJ!INd{SeBl3t-Gsa(2# zGNu~{VTBctAzDJGVl0EngMm0S*q+s{g8axKw#?+P0?ybq?UC+90+-$s&&vucz#rvY zwAO~-x6(OrcKKh&6#6;_aQA$Xt>E5MHx z{^!A$jsqL4;Apx9H9Huro3U$1q*79xu`p2D~;PkjbWr*0{JtKCG!9 zYQhT0$luQ9c36QG zpZEM20y1N+5n)oT8OaQU6_AMp#0m(>%2)w;(2Nz3PikyX+o8B%1!PP`k7BY4 zn|%djh{{+2d9WxNy*M=SVzaLxKYEufGdZjv@V){(9ACBQnl77t1(+u1qRy`fq~<8@ z19Isnb3R4P!U`+MRZ?|d0e;Mk73Abg8ZV5}gW;L&E6BO1iWT6;g0QMHTN|w4SY@mL zx2R^UfP|Ud#|n-)U#wi$4Bn*GxJJO19!Jbl4&r2`Srgm3bnFrSkZ z;sSI!+{s|N;(9O)-K8ST-)QX zc(Gv-4?IjyLx`%y(sD-ypK8FdYUU#}Lz=Ds5*a1IY*mX^_)A~jJ(+9Fu2mS5cLsG+7y&wY zO!HbbXSXRcoM&^jtY#1ye&NetDJBT$N|YY9Y-@p;ngnmIB9+9?9<&bZD=4}9YP6M} z^LM0zt%-nR6~Ps5>&vE&p>NpdFX8+J<2Dc6bSJik#va4K?ULYMk4O!|@wJNxP`19- zQhlYf7;J-X{*pyyGb!}#hloV*8^X!8NL>A4Ypd5Cb|(0OlTL3u9*tY&>rToypd=Lq zmT(tJ;F}!^)+}+92;LU)M{z58KzX1yMwf7rd$H21Jz7up`e)8gO@zNQg2@fd<*P~;d7vALR@2v650Q-@i|Y$$;|KZ`yd#3!S^IUH3__Wn zAa^9hWRQL9d(j|ss}XmGe^a?ijE+b4m`ZlB*o27XczTgf!kxpbfBZuJ=J# zoN11nX{6_ya#sW>$CKJuK7G9)8*HVa7UW}azq?8&)9$pt(V1*?2ie=ZUD+|>iU!r@|2987~MV?b*C+IeMWLpa^_~ipr-#4W zX(+wrD!u9|vHLhuXi`C3RLrUXmO_T<6(yE4O?;w3;-RrZPAaLqwE?}#QlULfC+}xk(proBG24oJ;X3{QieR&D5ikXLZH1C^ z$Mi26^$8$gKUJIH=o_O|`n11_V52UpDR*(T@m@9>%kB_03FwySL4V_X2i+?_X;~El zmVralq~YeVE^)xSn{ZzMu4u|61&Mr`rD-pbU zm1`J)II#om5DHI9A<+N?1}+k0!r0cH8e7GL^xTMNs% ziTCE;B;2~DjWb#zhE!}+mV5~1ca@wgh~Q4tKfGdenXhj z5NL%$O7tvb)m3U+h9%k3>hD$OXeN17W?Z&7BwEGbR`1PP%IipOv!D4XtlOmOQ$nh= z#~8mVY|K*5P{0-KQW3@yD+;c9Tr@k`Ket^<+w7l<_ukj*4sPwAvs@(0@-6?yjzNm% z`F=X$uhCKxe0HqCe9<;EXE;(!z-zlmOVX*`;wMsY!rf42AzD@XqPAV&>zto-j^stGbmh`E;Jqh;m+A()y+0V_A?14Spu#-^#)2EJR=CL9+28GrLauP^;unQYqRNhwqVsK<={>qHr|_G3qlpS(cXj zTnkAW^$NexfD<9d#&SY!kXtCJNe+oo12m$jL8|UN2JJHu^e|lbq zDqSXPg2`hNr-iXSL#`uB~Xm>s17zrP8m!y9l}Tgdqfebo|NlA1N9 z0=($z-x;dO$Ka9pp_(*_JnSDtaF4gnQ00YFEW4$~qZ;~$5r4P1lil)@qWGU>MJAa& ze4zoIaI#t_o3Zc6i^-eEXD0EL(qAW`b(C-yUI&pc2}8~0c4@cvOW`G}m&E%Q|C;<4 z2|uREFW{8&Hm~USB%3PSqq-`KRO=K?F#tZ%fV;w(8|q~-avWaFN!?zd$~cy&^e+dt zExd;aVF}qcNmwNXcXi^I(eeRUIDzM`+ne@JqL5~HckfKxME_R>aZ#$9q(}H}1joa< zMjkCtilF#VK%J({bFZ}FXe;Se^YV&tsS*SHaH9_H+XeS^$TqMH-WN1D(lZfelP7AB zZ(C(8)cwa?Vrq4T%0wwN?RyXJ5A1(gOvcd%XOz%tszqVG8j$Q!tLDb{_T&!!;x($2e3_?m$pMVRWw`){_phS!~Gxd@M zHuv|oKG1-BLmJ_U{-B30dqn@A_=g6DU1Bu;nSmjST?DP=oj{e`w2u}bCfU2A#-8RE zBX}T))KXZdC3Rf+9TDmKxY?19oW?cp_NMpjg^?LM)xwE`8&6V~U{wyQKL036588qc zhrpauqss(%JUtg7-jjW?e@(iWdzGF4NcDGzvr@*h!y8lp9ElgP{mc7m_lJ*%P(6Cf zU7~usE#BpmF*L5OG!h7sdw4#I6$?$5 zgtvu2oamI$H~6iJ!12U#BeBns9KRh@5GcstbqRkIi4|^uH3>>i>IcLyBGutUS7it8 zs!z}OXoxa`&M~ieiZUXgIFX71WD`2o%)RZi2`wGDt{`17^oPmloYHU5n-%Db<=ZT! zJdy-<nc&X;3&WRvi#p^Z(ucT6kARsTDW$vCxptM;J zDAY_<0YF|l-ONQ%!j$`-5`PH-^3q9&m|!@NmkvRC-~b@6NE3m)9|{5TQo1VeFeu1N zMZjrK(n5EvPRY zmvW64=|I!JJ1wB2iQ=N#v!q88>| z1!8!A6kwBCsXAe}L1veOCE7zstIYXtH({$T4M_95stK*v4g&NPdc_cnaoa-ojt2Oz z$SEd>&(0*TP;dB;L;YJ79px`)tg+iS)hEM!z5$QaWsqB>bN3g5R@jOEYK~dRw7|1& zGX7PKqNVq}RE)H;N>%e;cCSzF91&Ht}O*$bWynf)xW!WF5i(M^s ztT{*fn9tVb9Ae9!189%dR=B2H-QQRn^*k@YpEhB;UXf{cQr6~7Ie$sNHVKLbQP}I4 z?%X>Ir{m2qJNFbpBj?i|j{7f77|&Ya95`270g^h*Zm4@m)x8O);y2c+Zp5R#s(-RV zDQ`yLFzS`Vad)7NVex+Ehh*ka1YsKcH@nkr=k#d2)t$x*C?db^fm<|purN1ae53(y zwDkg8(TD+|a)z)Sh!y*rYmRT_=na3?w9m4(EUADi2GpOqfhApQ*;Zq-I7Q-Fk~2Ql zgg4dWEoJj^%N4c?U|naH>h;MH4!r4tAbhqVyZp)b%;Lh7bMT2^mo&h8C{pQ?a`Sqf2+mt~iY1#p7o!4O$PW(4rbCY)iNQ+LutCE1jt zFq3yBcg2MelYMJji4|FHJxdGp$KlcV;YtO%<=|Lfwx#(Re7p($8L5&>sMc>URN}j{ zQUR&3q%3-E5mnwq)%iwvJbtu3b)r36rf~I-E4-h?GmehPn)EvO##)@@PIQN7v%YGP zd}kf*XB8F-C0Jr|Yw(>42M@|6#8m=J0mt{B)^GaA2T4WFtD)eva-+Ehf8V+FCU(Jb zEjmNqT0Y>IYNTT@l1<*=Nw_S{bpM{@S{<8ABAu01h_0*;`&9MertBiI;ELnYl=Z=q7IJ3ox?TmUUw&FEl-sAu?Z9L4BLS4l zSrx-=dPa$-50uML+KJ4)66) zz%ncL84w-CYJHTIE!X9in@fO_E3m3$+%mVQMVpQ4e2&dE3y-*xp)p&7pZ3gsIOOsj zL0q9R%R{#Hwo^0fDt9j;_RM;+qb*g<5wItdX`tYB7kV;*Yeg>d>&b*Mm}<_mCzDCj zyC)N7tFq8gJ(-LL&z?-m0h5tzU8*q5(5XwtH2C&pQnn2#$*U(524wBpuO|}-i%wbS zC3N2<49sw(S$tdb?#Yg}%DFvEPbM&=a%OW+cC__u=*eV)tkRRU1N3B;A}@^i!sy9( z(TOgXW0QV889xLzdC#6qMiRYO?Aw#^WK~`hE(Mm|-?OlnF~DR%im8N8Y-Ltv*OSS( zHY7l=o{S$a=iIj^0YrmdMAS^m%5immcWWvA=WfgzPMn|rc0*OPH= zM7DXc&*M)syk#kv3*Vstetd z$(gCWC&MAvy(h~#x~sFqXZT_00ozvM=~_=*0eR!~P0B*>v(^3tNL|_<07p-{(~?rA(81 zyvUZ{SAt({+8-ikDT{I>V-iqcJ4hb(5{Oolg^hLT%S8{s{qb?%my0B^#2!3(uip!Q zylJVz2)Ajzmgq1W#hsywdkh|lAM#clXD_xr*T+0FPq_f0I}@#s3v_l}{z`NinZUae zn|#B8yRCoZ_u7G^X=Qq?OWEu;U+CKR{8cQTUan>+xf_lwoJ7QeCKA#*URs z4}0T`AiCohje)L+yU>z~x%rdEC6&-D=&5vrqpmC=;Q7qY24C+>Po{yogF@ zTt?5MOV#I=Z={*11&8Bn44H@>m(LjL6RmLm{AL8NVfn$am;H@_L(cn3N#k;oG+?#h zkhv}1s;dxFo4ah_8x6RXWv(*2Ex}R!geSWimYtqK_GRl@mgg^n6-a!w@GTE%e>;M8 zmYwR-7UF@Qv_tS@EJ*(-0?F;$tP~7N?W~m5dVvLvKaJqEEJu~~inr01!T}BsI~2bwD-I+%m3*Lh+|Yj(@!A&?ZtDsw%zt+xvroY@OIwXo8s} zl7r=ahrkOn6s)`qpg-K~J=K|>PuCodH2sWDU3I4yQLMqAyR@{tKDg9miE(9v%j#4w z3oNBK0fxgYf5)Y%}4RD-yueP}tQk?uqT)i|umEW(bt6>cR;Us7-WgkXiPGFySlj5WzqfY#gy&ddi!8 z-S5TjBD(32Sr1cFBye2?{$M9fBxF&n0Pb1SCnz ztAiuN6}uY@B9s)eDqv*e)4E$(=j4Ya}fiiwm}E9t5W{gFh5r7iJlgObgtw zUGjLYm1kQrbHjE8ro|r20bdj-x?sBk2}w%Rz1-#%!sT#{!H~fX+ZEXqlA6ElO*LQP zfIc#F#CF@efbHTj`OvO|;@q%ZhBGdRaJ8vsSP8!3@}^TK=hg`PfE%{UuqlQO2gH$~ z=7Q}qk5>#AE|6L-S$VL~XcpYCU6wf^Ncc&*2ml^Z(DCbr?K1qzYClrn28L{NrP_L_ zBeu)&_Us4`ZrCozs2Cue+GGa1bZe6t0+Jv>+hh-4s2mYw4YC`y%gb8`6Tb4+;j$ru zh$FU3Z&iHuV7p|>j@T}F9K#@`Wv|+>T{13TJB7t|$ruXW+_7B);ZjTNuy5Bi#I7{N zWKxdUE_poD2bZc!C8ccZEN?1?7D`t0x-=zZ&ftdalE<+L`eVBsJ(r}#5zr+qA7Sk_ zw#zf=pSn1DMSAiQk->f)v0ZMavRYS+O{Dg%0ox^WVg0raY?q9a`IwaFy0Bd`GRt9T zY?q9oBEkaJrq#KjH1 zj@T~4rWiIH5J!fZ3%1KVE(sU9r63Q^_9OLe zU~CtUY9Acj_U&@KJv+jK8@9_aDh3D#tdXPV)+Xz*-OAo9!DWk#>mF9pHql|>y`f z3_gz7F2kl6HXIN~hMEhu%RDX#7jm!GXg)Y>gXian?Xt`XLBdbcMS$=yCyf{#v0a8= zS?wpOZ)bWYL#|6VY?tGW3lAPD5Iq|8zxnl1`El7kp~mq{298lNKsYsQ3^eQ3CNl&i zK|)tkc<82b>~X|)d3g(A!dKq42MY8;!^tWqigCBWX%F2c(SqCQgs%WfU-gdiLul9N zA-mVXvAAtGWLGjg?_&CDJ%Ycih~G;F^n%VqLoYRO#9dHTz<1Q|ay^HQA&#dz1u%kgnzwoZy{} zFgXD^pb(1!>BF#q76lX`pli)eu(8K<)Gud6w8|~lxTF@6?1~|Ekndfn%%X#?DT@GI zYgXyHIysMA?D3s@De?$yRK!Oeb>l0muHd^qxwcTMQ1b+kVN#2&DP9IK2u)ZlES$TNq;aJ`VR8xYufP9J#e>Tl%4HOq>m2!SH8n#Z@F&WyuCd* zpUODiN54&8Zuf9feT{8WLJQ{R1 zd)qy9`_pjad}pN`TIY%g;sAr^x>%0kKw8@kdy^@;nEhPbxOz_0QQ_;--@6B1&vJ!z zm&x4Hd~p_$884?ynYhwN6Q4^fUF@F8dMcA#dG?vD6R5H;zu*}_8Nu>GW%JIWN#sldC*${mLx zYYRqlwG6#p8+D2BD@Iy;+;M5`a}NHcZ|#8iR)=3x?T|n05JTy($OhfK;JF!udwwQt z8-}S3JJbrf>ZBH2EtCfc#KozaPJx&_?qY)ES?55^5x5wLd#$XQ>9lPFv8IgIWcF*Uh{@wFftWn&9Edpr&nYYRs)&!;24bBNy98CD52+a&B7A^AOy0ki8L}}D zL~vgRkv@`^#92-Kqw+2}%bPA7)43fno`bHHGabtv)1Rmkr%$O2P7A=I~Y0!WM#4X8dkm-r5<&O|;T(wScSNEpXy-#Tdrr zZ%)9x|0E~FP$iL{_+@gJGhMvbM40A1p5#jN^CU>`(ZjM*ZUXhNN~ zO7>+;z7x~SYvE=J`(+9`IfJD#+v@5l-s@x)yQ$;GJb zqL|LbsGMzZRRz;v(K;@)^}eb3ftvo~O0@z|>h=kN#fa8*X~IHG&9-J14> zlhL>{>~8fYl2AIUGL^q-uK#5dO=cb)EplMJ3Ou!54YFN-85>Y}=&NX_ z*G-EVc?W9zT;BYy8&~;APiv-#TU9ef;#OvgxP&oNB>ke9B5niC6p6{g55kLBtD2nc zZudIf;bvzt=x;b!)-}YimeyZN&1!C4`ak&kJnUyij~B3QCyehTN~2G-6Hls`a#GEd z`4?+crfmJXk;&w9Q%iSnZ%dFq4?75 z)TQi$_yAc&tqNb8hZklayAQoiG#V@7_tGV3-{(2xcB{ACd;jW@i&1*CO?JZm?;WN2 z3PHUDd;oqN^=g8lDMGqvY*hlc{d_(b!9jjL@jmJUoWtq-BQfzLGIJ{&?2LA%+dI=v z#{ML3!;#g-RZ*3H+JN%qC{JAiFj*<>2%;&sw9Fwb^+1zqTZ&RN_L59@m_??`ub3_` z<1Juyg~Zcn*?ZAe0UA_ZNR9xRD#vb{f*MwpYa(*lR!mJTa><^2#sHk(@%y5{*ip)u zsq)B+?GkH8wU+9z)g&KCAu+Ww@oTsLQDE?xHG`zI5_NYhG=up-3I;VM0e>to_bSSq zeLX0$_bM%WF8h>8epWE3)?oHJABY;fjxuR7QBul^EMBK&(ZNLXffNi@(ctxzIim(? z{VlS0eGx)k)J5njxG|_pBHEG$Z=g&XHJG;!B8xW^EV`%*Q&ljiQJ0K4(Fbp&%o#OE z`yG+J8zlo39J?r$45i@Npj7xzq*Np6{s`sLsKUHgJ0vFhBLzn;I>clZ{8ZE-y7m;8 zTZFx0(ZZ-i(qbkh2ANXEMP-Drf<=SMBo+jpj{sZR7!D(=07=z8(bod zujr4l{X`atf=e*ot;w8%$ zEGKV4C*Vyo2GG@M%ZA61#bQw-CQN7i(a(|?Xa7LX%x$)_;SLv=_k``=KL_SNHF{+3@V5=hf0BduOD#9t)Mnxo9+rs)#-iKt&g;#NK#_l1yGZU91w(cHMHt7pzKzpOwCd$gI$zM8)mZQ!e2dK9x_I ztGCOv`e-e>-yI;>61~&2dPFn5d}UBQJz?okF+K7r{q)GE#OeLN4d5zu%#gZA;Bd|K zpb#|uTiK6SCO{m;lSUjw+sCo4ze z>1G!7y2@rQ5+ULWKq(iP%5#bSwcPs_G8NQE_%aj*3h~ zt`0~=h0bF5+r5K%eADwc|StYWFiMC2B$7^<#y6-R=I$;koP7~|uBES+$oqWHCYtl~&` zh>EAdEVUa+1c5DSBv%vq2=;0nS(L3SoKctqQ`KMMkAqzjUH%MEA(u7XL?tS zdb)@1o|Tq7j1vb4u@g@0 zZI4e+`;&9M*d$SY z9@D~qiMhWM1L~s58HQ+2D3qLICx8c(w_82TjTBS z?r3mkJlSqzaqhWgJetm0H^5SNbJ&|s<2D?LuO5y!*VgTZXuSyboa+sD`|)!(;^k(% zh}6|sS#BaLSx5g^$u?U_N!`n^w3a;rpNQbKc0=m8+8&Hzf1a)+JRR78;*w$x z0IV&iVFdkKZviY$`~9uBad@G%7ow9W5H)=q00p#^UMD38A8v%O;_2?`xOrmtbP~cN zdQSx*GZ?8%o=7qBkRp6Ch~Tz&hjH_+-C+{ibVG;`X*GnXS_T7$UunS2-kuvbZ{FGp z!8KAz5HiUAa&PsqGo$c68^O!$gcrl9-ek}_J?x+AREt1|U_Z*flm1qB)Z0dd5K1r} z3HUj%Z*x4Fp+c=c$!J|cY9%J>!TTC;#E!4_&C}`N>?mHsUlU7lB>pzndM+G9wZven zH|yhiFmAj&GO4VE#JM}|cTv&u#9(wbdnACRz`u{+YDDZ1&K&9H{+}#z#$CM`s!QS}{54Va%1M=wW7|Hjr%Ilo z{%^W7MuD=2LgWJftJB%9NYn(x&<~iDv4LD1l?d_P{2O!B%UQQqbL&djk8(k;n>MVy zS-cc4-hIadx7f2y8G5@#7&+M+JsvNn|DV`0U)Yr;kg*QWL~y4;wB6OJ?fa`~h+c(u ze8eV1(?JvseA>LH|JYEiZ{Sg=HZ^o~P%hgDzy#C6d+}9n;-C58qn1s4FkXr_K0Gg$ zjr>O&@cIxI$Mx=FD<96LS%cHne(UOH-b>V6JO6Yej7`&qK8%N;mOcz2wyE-xnhFj3 zOdtW$#y-3$hY@Ak+^ZXNY^)?5wR}Vkzutgb0{Am-@WYr9YVpGm3TBT6Rlw&Xc)6_* zx!yw3GWck-RcafA?j2hWw`gMG?IR6n+wvwh`_@89LfByCh965q*YNu&BkeX#AW2ce z2#tuf|Ap{m7GRFu^C3H!&3T#HmHm~q`-B1SSk+ z6xdMPwm}N42>EtqWDwOR0Jx(3=k^x7kR%{A=)4Xgms|}(XY!_@9fj+OkA(@;T>#{0>Ycvk{F9Q@}LJSYZWZP%b@%-Bmz_koVUCcNLI< zw7mFX1!M?0F9ERvGLViJuEDCjtH5Rij1@?C{d-wB+y1lmY}~W}HduibXXG^jV@qX2 z%?c~9BIJt|kU_k#0zyP*eD+v@4K>b`&26p?NpGxx3}RYzw89DqF^xzqu>uQ9c36QG zp`|9RzN*&P646I*mjm{tq@-LC%HXfyQ<_pJoVFHB5mGR&cB` zR)Aa7#JI13gqaM>v~N(6(^Pryz-C{;F$b)G$i<8mFg8#`%z0o1$0}h3R0zc+ZDAX$ zHxQ0j#R?cCRol-y?JFR&F@OqISOKA>sUO;&Zy=B%g(E3lwshZR^63TBU{uFwW6 zAT#C~5iX1yMhS%#kckAu3JA%{SOIy^j1`biYHU!_D-k@+f}DIA zD+tl&7nbcVMk}l!=b|cBfFBDnOWk%AE3DvnWvl?VsAjBygc*z#P+Y{eK~0^)3>sLy zgK*pdDw@vfKnMODmfPuf{DYsf#1F!9Re%UJab>yd z0G$$dDwwXg9!!K20-HT+j;0L)5eonir){?UCC?%f@Sht2&7Qus5YUs*s|x|Cp}tOW zMgfs<N|@F{-6POgbD)~g$QVA=oN>6)L1;Ce!+f4 z`vSZq3#;26gAQe_k3WB<4oGsELIj^`!0~G4BQ!&r|WpqxN>cH|kE`yqg`P+@DOwllBSx)2)LuXZn-=XtN(5 zD*Y;{D6oXPSOVYhV6RVteZkuz{zz^m54|b6He=R3JsJ15Hha@qdc1V|$^PK%<1>07 zZy6R?Sa|DAe21y%b>N-C(F;x!;qQ((L{3jw-dIYH*hi;XGfPy>*5T55{7bO7hCT-G zh~PHXe%&TB>5ZNsXBBe;D`e1$$%OZ9>_rn^Sc?__9h2-amFxnHDG|$w^d=MjJYFSd zB?B$|<{&JO(Y+}7T`77D3!QJuT@|1lPikNJ^aBX80guA{?kYw0J{xSJi%fdM-lV(v zcz+Y!YO=ez*+&ev7hb3a4x>Zh?KE^^ zcRRhZV>_|?I8ta*L0m;tPz5zqB|p*c?{v2Y+tchYXY|6;$-&lB>Fp+fE;Zmz22&cq zXm`82GwDy!LH6y}qI5cJpRZd}vTVk~_sY#`X@6$t5GB6iJsEpd{eoZKtRhAu`k407 zueNd^mYM7&{O)k@M89r5F{xFl5NR-6Sb#r_fV)o0N^s**f0}XD&JyYz`P0-a2mr~? zilWsH{5L=D<`NHOUS?Y}{&@szl?{_>FJ8QDFdQb{`uC!MZ#g67cHl8@TXl#~}17grna zWus}>84YR@&>bYjEumJ0fMwt?Hjj0Q1AeLr_XXgJrc6@scmWA@ahuHS8fiA4`D;+K z-K|kHM}jNDA2$LkW*)WL1ro4Nh4^t{+k>N9QwPXv#u}U|2}{#hS6c40i{NiJV8cI# z;L9&vD^`Brg0B(_x3m@lIyt*~KR*wjRBSe^jwgLbSaGuvGRBnxScgL=f(=|z1Ez0- zC_yKxe)z2jUTJ08*IDnLi5+N%fI!4Y1w1fC4l46BxS zqu-8JFlT6cZ|yjs55Dv(wicFi6YtHxNw{@Q8)vjc45`?Z>LL9@`pS=;=D!C3*$>$| zNX82RVXG^llEBO=_??K~5T-N)TA`2&Shq>lr*NtyZe0Om{Hm}qOF2UUSF}q-Rn|GEc~_5%#(V_-%62JjYj7^! zdtbjdymfHSa*-^{xBMGB1}U27`{{_kMoUR>erqr?@3CDg`+{v~&Tu3OUfV@ll1}Xw zKaqkH?uIf8(VBz!7W}nxm3em3+Zv$N%qbVJJp${S;QMu>q~mqZPrFC+B38O0X1nn6 zo(Nu|8!TN|$wQo(^~7%8Mmd)Q8~j|vzm$RiJ z^YQYX=<94SJ1;}>+{eq8Y99?M9Z|1^W%cR%01v~1@dKsz0et-)v!jaT@PQ_r32(q< zZ6W6`^i@l6N#>=ovs7b`!o%@{HEFE7s?^y(ir^k^ouSGLr&xANjYl=~lM#QnxRc%T zlcM;a$Fx4*fNnThZII2_x97#=&6Bg!_)6)olh8U!xZ|dR$hXxg?Z|!!ym;-hc>nTW zk^ds$M-sZ4fRbOpDdlZm(eFt%Rk%lWRTin%DXP$K((1!+HsG#s=7xG%j2wp-b5gfg z*sTaCW85zVwk^De2w@4?H%V9}1$TAgV^Qk>ESQHS|Eq$y zsHR3af$v6eBAjdF(E_CiiVp?UY05nJN*j*0l3rC`UKQN`NTUw!I|cW3$hMjcNDx$+ z0=z$HaHMA<%qLIOAm5HkQia)|d=j#ZDApnaItx-n(hKL=(qd|DmC8gZH0>(q2Lk(F z7L#%G!5JmAn(C#CkMkmy6h4Cf8rk+7lc2^rqBFRO-zJXoUj3SBl;xeQyL01d&<_>$IhgD}Q-6eQ~$i%e&~Fy)ZIkr&>61 zaN|kp60FK$)n{}?abr1xkA%RSQ=`iScp^O)A>NaHv42h4ZRbByJ$Hhm6L3mr&Ukis zg9?Bn(TQfVO;_l7r!Dkvy%qZtAykjva+j#yZi{#631h0@C~Upl-> zaVQuR+t~b}9XcM3H6c)6N>#1AT2Nm)F6Bmv(WArr9`%Qg#8W;c9VYjh(5OFqay&r? z32u&e`fcu<{(2$*feJFx()s%fS-aA*8e{t1{=m(}eB1G$76I zswT8vI|$HI=oRDAml7*$!aEw^zapoYAU->jyh6R{KMwV8RdkfUoUzWnsi-_K4nEg_ zhwC!PZPK~>3qdRF#D6u%EM)5L)=kF0tx>e}zL$!TR(70nG8ZGY6&vx=2tI=jv#rrI z)0PdXJ*m_D>m#0C_)=Y@e_EqSQh2Ifr6u#tpWfZ^*FtQCSA@U3;ww7k+H5%mwrbOB z7?|sS)fg@qgr_zsplsP)O~!RaJZW{vZtznAJX#lkZ#KYx8=~I6SuChF@ndAje}BG$ z6$4IW8_MDTMD^8cbO(C<3Z+@zI*FzGm>;jpImDJd2hbj=t#D1Zy1%hD>Umy(KWV~F zy&}`@q^!-Ea{iJ*Z4wj>qOjKq-MRNToQb!>?A%iXjhs(=ANlW1n9N(@95`270g^h* z9Mx?bj|;mPQM=fJ)A1W?wF@K+_~{CzycvPRs8zA#4X?#s21+<6Aj;!?&CES=N>%6>!CX`g1q1q-QPLYHSv# zNKk`pUDP?_Q%!hNJ>F6_FSlG_s{qz@=BZwv(C@&TW>F-9&o*TDKc(A0@hMEKrZr>Z zTjy|A5H9MoF2XhBb4_9Ts_;+*;@QEf2=A=q4p<2JaP>F8@O3qQ8Z=&jaBRm#AQlC1}o-IRAgI}od zeiAQIHeBN*cd|EnJnO5L$#;ouxSv;8ur_d-7U0h+96TtO5LXE>1svahTEFQdA0!ny zuLjCO^+t0Y{-Ja0P3(f>T9LvYa7;DQF&N1vulFQemS(ztPjan}O(v1fR#g!)i7U88 zB=%sh+{m;#E$nTR2Dk^|f%txp07pbu)`ztx!$+F3i^PH}j!RS42TNMWnYHVB6{tp@ z)(hnhsB*jTX#8*h<#JZVaGRca<>>?EGL&{A^KjG z@>!MzS*58v)=nKbnTN;=L)Di)646Qfxp!7Ls>CchMIQ7`S7q%~IM~%Z>zw>5e5k@6 zb~D-fQ=mjH_FLF(8DKg#%Ow>inubr6WuDWcSgnt;vgNwma&rk#as^hEj9X>`wP>}f zj)q>JV{^^IBd%m<%+}#&J#!xpxqL?uS7^-gkZrx~)Xcid-HV8A>ptE+*|D~&<_Or6 z$u!{5ic#@+_hbUsid^K^lL=!m)tqNfCX=RjPbSP(Wuc*ZG8qq^J(-jPCL__X7CO<$ zc$r^MCS}`@lDv8{VL;Zd{dzKiu;`SUg)pXy2;GwjlVZ5_?#Yg|%eg&FPbN%S<;>=u z>{$DIp(m3GvPw_Z3DA>Sikzk}Q`KX+#^&3T@uCx5F5mXz*OT!>V3YUk$z&wad&Ryz z8BbQ_HQ`cV+5J5Wdl>`s$$({6>@%QmPbTBqkN~}UGJd?AbKjnfCn`E(5jH~iWc;{< z8B=uf?#Xb-b??dYknPoz@vN)dy%7D}da}+udNP>?E@)3Ca4ot(o{Ri?GGPp+n)B?* zWYYBR$%NUeEHqR?lJVf#lSw(4M@EwIVxN(G*U6-88&Z;2PbLh=+O=O#CJ+{#vIvf$ z`z~Q%h9g}c=-rcbYU{}ahE&dM?#Vh_Po~+$Xl?K1USq*~olGXkDm~e1fS$}!Jd?nT6&TTiw+kDg4XfeYG` z30y03kzY?HjKNfTo;{gNn%+H`Fk6*{hDt~>9z1(8DF^e&NHSjRGm>vlCS}`@lDv8{ zVL;Zd{dzKiu;`RUa17m(2?H}6)!LJ-*4C2=45^&i+>@w%JM2fbqY|UQdG$ zd+uG736kl_UTWB(8K5Jx6nJ66rW%WDf7AE+YT3ES3r=*nARQS$M39b5MlwK0#*5?DL=RDw&LHL;CaT$oTOBb!0qI(GiQV5xV2z$0f`dSIK^= z!nHV(U4vP`RrX_c;o7Sk|cM)UW2jh#jI~0u#~qii{9gFuO-mIqeTbk3%OuTFTaZnB}fBIX&o2(dx_|Jm`T9SdQX_ z*5a8nXKsRDi{M4tDaUZCHyQL!5BsM&>5PI@D`Mb(7*ANWS|>p za%sbn_-aFPVF|amOWC7%8CBo70$)V;vI{xEP_)B0BX|wV501U;ZwwrA-ggDbO|VwX zigQWBd}OZaAt^C=!Z#XlE6ZGEcH4rZ`Uy{VMJzi#gY3)JrOk~Vf|RF4gEHYe5p1yR zRF}385B#KE#0w8en0Wr92qd>~vr;f9wYyqY>qQnc{v?8%SdJ>|6?H#sje8MAh?Xpb z!FL*PnuAMs1g?%>VO4QteeCl|AA9-6LDa`CW@w+J*3cY`249Nc)hrKoA%k+I4i}n1WyOTE z5X4dfRs4A$zOWOCh%p$*C&6#oGqMp=_PlS7?HnB$5N>eTTpcGwSv_yeV0oz)RYfk`24`AXerd34FwM_wr!C^_1=QrJLp zC;{phbqf2#NVc`ppXZAsiTy+qPO!XKHTAqL%QPC}G&T=oNgk|W{%xGff_P1lG#O-?I4o1;goA)JUv&X%SkAa0QZHEJE-LuC3tYppDTa;e zFak@>0j^;mFI$tuWeeAEmzyGfXh1gjmJ49RF((8GKS^tV4W`j_mrx2|R9_2b`9fMVZIDE~qVWweESOaz!bEz08EzS7A!0;`{Hkn~m3=mEY8$-{nO=bv`t?J?l5~xjd zYLHp>gfQVN?;zn)7Z}~JU3%J^eBD>o7`qgWWGDk<%Q~7CH+IB!$ut!BCvLn*faJh6 z1rF2Up2@ zIZ&w#uA{kj#CAEB10*euo=ei=2vkg3URtWU-!Vhz9kE@WN&nQv(JNAympn{Oup73^ z%~V$Fim^!&+O=uMFT8X6tjo)jZrHAwXAaxFU`fmk+ck3Mj_n#ri^k%D?V1O{sm$OH zfxNDS{W@a10#{rz;|$EiR-h$FGM{qiCQF8n*e)3)%ik!t2pjhu@I`^53$`nekfb!P zRRcm^fz4Q`SJ<@Yi0zVj5ZMb!&0qGWMUXHK2yWPJr!H(46TX1hF2k8Rh-eyJ*^XQn z;3pSEsg^G7$#uha88*eR;ea?Y)LgJ#=5a~5(Cu#UkgUA^)o9ic+hv&(f`p%>ivZvu z1s%U`*e=7btllN{t;d{0?9z3^b~)a@H^PG(w#zXp1_%eNk)!9jM zC^B?TDgsOr!cWl={ zxYQCm?AtXB5dhmIL(C1^B?D&q;8Inf4wnpI*Wt3uCdPfhj@T}l5jH`g8FmPw96gt$ z#Sy4j*}7nvWIRRMGW5od*e)3qOI@gNeWhNcE-!hQICsN#InHIZt{9t0?d!#h9Xr?lpV?H;qicDXebO+Van!FliuG$1gz?b~HI z^-X-;|$DA)5w#)JMy%8SVuw9N(F+eyqYz#E()+RFqBte3%7kk)Mh(MFR z-s!M!mzTE?CVb^xd!TT`c8|NA$>7p1lA$gzwo9g=z(09x*Nab|w%Bdou7NjqY}Y`z z)Dk;jyQU!mG{j_xxna9xz%sVWRk8-uOSV(N%P>pztaPiB+nEd;%K?%WN6#f`aRhWp z%S)v4v}K`Kag&qVnG8IW{;7+jSEMd45gCNxcqRikQ(3Jm#wJqxdhv2$v0XFI-qIaOY}ZIyG!_?Z*F4CDO=WJ_uD})7cUk2V-ee5HAau%e z+_x(*Ev7REd{Ln2g6#?d|5zi~^Ib}J=eY+x?LQ?aW{hZR88@7A=f@8aQ2H~+? zhBG0E@MqZa@j?TU$zp{I0Fg5+1~bPq85lOju;G9>GSpnKUFPwM;le|Z@?fD+q$9S= zGA9HHKS>t>!b1u=e%(V%pmgX5VD9HU}@ zaKIWldTwp99^2K`6dtBlJxI7=yPSLi?Azt&)gCC&VKt|!oG8ZK|E4{3mvqDH^z7Vg z;dtCJoSiG7&!f8dCZP09UmxmdX8NRIncLvdfP?iMIgyFTrD4_V#^S&i_ z=dz;V&-+$ckFH+f61+h(Eq{yP5WG^jg8y6b96>e5TovO#Z>t!>_f95ZmN(0!$Hr8; zwnNRnSld*(yGTtosm&fQ%1HFiMwpy{98icwf%FkD+L!_xpli)eva!c>)Gud6w8||v z!lV}Ryy4I}Hb#-zcx!6;7bMG5!OTkEZ={e#9k&fZ-`sCU|sY1;Y zK!!;zwx*3Tz6L0-c68E5>F)LbE+xd40Makwimy;~?0?0lzoN#ct8RQ}A0nopv&Y;k zPln6yp{VjBKD`vfvRK6gk)ua1#ZIl-JJ!C2SUtA9HArMHS1v(xpgDJ!>7YD?1_Rj* z_1W9h!+vivLN~{EC+L=UQh<~eWRS9`p8WLH9XnBdY!BS67-e^73+bcF z{*~`=*-Nn7H}C8W&!;ku_t9^Ym)Sj>R9~ZDw`?ZWv10b|?rO5a&J&l?nWZ4+pF|gh zrzu_a8TO|1T+w9+Ds=TW=yK8sYO&@5_ zc6WyT_U3prn~aCut^Q6QUC=bzJl|a{mu(A0h2j8%=elUca3Dp$M*ZmwUCe$iZoGU! z(*|WXti${E!0TA9uFX{C$ZGg(h%lB?f)W-GZ{#@=i< zkREQ{+A`WL-J0)ulcdx{)iw3H#8cptd*EI#i$z5MS604<;J48+TY__nF zIBY*D=8iJOvnudzv2w>@$l8LDTmyt&mt9RnA5zQKW2WoytG=}Z;#(blO|?V*utN-` z!y+4W^MYq*5bpb#ux%KY<@sts)T`XcwI3i57pH1E1!D5JiwTlvodYpP;8Gy&x3gxZ z+qqDInCRmI1!8ifo6L}nffzsR5{SvLj)9nY{8?qfUV*sNwhhFk?FN2*9CDsCFG?*~ z6W;>_;^GJlnf)3oV)D34ASTZ`2V#!Ev&xFSD&k`oDi9NWNX@wZikQ5AEi+_eAjS{7 z1Y$C*V<2W8e^!~WS0Fxi!2&T>htz`W4aCL!Dl+>u24eEKOCToCItOBoz_ZGVy#n!Z z+dxdwH5VL$2w!TmhNvq;+5DjivbkAi$i_g7A9e}EWLU>Q%sl?AGGVVkeEfn1Vyq6S zQR@Qn*Y?2uWh};SRfF3Er2~qOup{t5v4lo)EBo^{vB8Zw z6Wl=laDFJ zFfM;{0_OcEITeN~iTuPble?Vh;=LxqH1F{wSDK$ELAtO+8GgOr3=zTLb0nZ?usIUw zy4V7g_{x#s3Bl$_P@RBtB&Bw+Ig%czm}5(|V~qA%o3xC|?T7qilJgZETLnZV;j2@J$?-O64qq zdMDs=sSNi5Onc}0{L#hFgFWYZ!`*&-*~b3mz45}vp6&5gf5^TXu~$`O!|yM`;f&-7 zME6vuJ?oFA<4Jea+wM=vs~2xyBqRJ~bK@_XXpBX4-p7HBDyY*&H6(W9rEHw!5v`(2 z-Y~6EyHKk+gwkinzNoQzVwio0J;dKLCFn zUdUS2^zq(KzuOyab*IC@W?@-Z6T@2BcnLMDg$?Qd><9n)BJ5{IkC(7*CyehTN~2G- z6Hltxd{WKk`4?+cHgEm9k;yl5n@_B5-Z*l;H_A+r*u0q{ws|u}V)JH-*yha?sm&*_ zv^6`wgN*iaIM_K5G5^8egsU<`E5AV$FW-sc)NC+2&zaqU#~8yTmFZq|q|#D%bJ&|s z<164${G9AirtE|G09i$e313@;7i1s154}z_o+#q?%4KNZ=Q-qdYq#8c|Ju>krEtDO zcEVl?=b358jiY-t@5dg1A4k2KU}%bvE*e`s5&jP%ILOZ@-ba0ab2y!UBqp9jW^RRp z-SO^hXLr`k*q_E7IJ(w2998*;1g#DzUyAb7B>U z8hcr$JIo@}YthL@qmpI-TF}tExxI zJAM&-L163{Wz1B0-XI~GB?7d1cHN6AMrdPLm?cOPMs8C@Ez{7OyR4=Ayy; z<$)mnCu;CI%A8SywEh;^yRKl*Meu}fFgqw|@OsLmQG->F;xYFo<4X3 zWzMKU+V6<$-Jp$zE=nasDR?$GH~d#ps*!YmlyYfQVcx495)=JV#ayb%-_H>6+|p}L zak)jt}%X^r*8awUQiJFM@{t6=c|rD_TgZD%_EQuLgr-V z0S9FAdGiP)oRKHYBVbP_pEr*{oQ6E{{hVC!Rsm*LiRoixM@H^oKPaK5#>yoYi?&{p zAzv4O7BY%_W**dtSuu0F+kI{fwPJR+RHDavA2X@f`VcAR;zEXDiU|C6huJV>yyk-* z#><9`bR6cxk}2lnMu=gJNa)V$5<;_J2o-a3&{^I@;MLgNNnBVXK@^F)n2#GFhBY4< z6=9m2lZ%W<{PwgXhK!$#iu}OL$i^V_fd3{G28=pV0? zPpw2J)34{z*93ii3;Me90zki9OqXufvXz^IS454)r9I7k%kf@7$1WE~_{IC7iT3fK zhu#`R@ji&Qi*L{6-(C*;nWT+=iI#)1-{ko@{mouz3{xsGz@F^G%=ex2`vb5yKh`k) zX&)@6Z^58%6Epz?ETiwx1Xf;BO#Di?qH`!}G#B?QE$`cZ;Ie~<;wzqa)yh@;9SV!5 z$Kzpwj*?qBEF3RK{{l}zzL_dzcG5qC$4Y0KZ5~J`fqux*ZTj~WB#c~$E2z6dA01@p zC1&t^9_+3b&kHO+?1JnS-6H+3~u#mK73>~RP9zxy$*8p97O{X&$p#_^%sEX;p4lLo9%hJ($ z=#%N!)9Dk)Uy+S$?QW*er$&yd_tA7`j81UuQR`pe|8Sc5!}AJZ?uUJyy{Ryp%Q3tL z(l=vD%;_W7p)7Qz^1lX}clXbd$J2n920kzA>0D{?;Z9w z`;U)@Tm4Ddc6d`+c`oD^_2DncV!F(Q-08CW&`yr=_Wh$l>Vc_o38MjRX$ zbDR3HD1A6{1rlxI=ffd1DHJ-8gt{m|ru&{&7Ecbgp5mo*CG2~m-{0w`k(3D?<|5!Q zWYi=wIfDMu8$C{Vp#h>uCTjr8ou!lq6du-KIsJH+9dlPoQ_>daQ;KId(@6|Dea2V8 zKD6_CHb7_VqTFzje$VsG=Y!%?5`Lxtr+Sk?@AMFDaX~RAt6y+dOY5bBe4G+89tw-8 zsm^+XQJPQkVxbW)tUMpBwLhL(PPW~V{S)%6*3#-VtDS3(cAkF@e{*EA**S9T$l2bt zTi2gC)9W9-A^yzg_QG4>Di(ejpPu$7=X$BqV>Z~byVs)D50o|%ph9o$P`rQnugHH9 z+G5#ky^ydb`z!21%#Pj5Y>gjQM?ko=!*WuQIP9=@x<8!OKkV-bS6A@ZEJkedzJxs~ zA7yNmth&26Tj6`Z$<9{hQyf}3;0+B*P7CJwt&T!if=(za?rP)I5X4j zERo#UwVvOF?7Bk}{zJrHBqXz|bo!h#mfVLx8u8Lv`o`8eUnA&vl3XF^%*)R<%m$UI zLU2I4{jrW()_RXCs|rWf2pT>b!7HrnRPZ$`;S`?a%#m*H|H&d}5MUL5O;o+I?!Br~ z$SK%DX38S6>18^WU2L{js(AKvdJzvGhJL`LjC?5}CTkkTH;(e7B@=r!7yMl*taRXY z4Qn3Z?G|AqLy9uID6wVUdIFRacqZa+*p>t|v?f`xedn%rrd@g3k!M}G%Wg+92R2k{ zp@B41n;JSR&a^H3Gar2P=~gIyB4sxcj2B%Sk9fIp4!nG{0k1bWvz1=7tvq*Yyq8V0 z2B#})*Rw<}?caUvgJKErbR&#S(}q5bhoF`|3?bB1DR3+rvAir4J`+fQw6PCw%3(yw zHTPZysBU*MlZRh#z%2p%88`T0%m}skVFO>)9lZmvf<%`N$0vAQU-ci&eN|4AbJ3vPz6YP8 zx5K;57+lUzz9-chgUcUFoHbq;r3b@7Ug!IFX4s7gN$@}u?3OH*0x~koj~`M%C~A4B1t}mS+wo$9 z6cCC!Ui^>(n-TD`KV7hQDf-ATsc2}!&&$bFhce)U6;eQk)XB~sDX^ioZG#k85vmI* zAcI>W1?0mz0oo%4Hq5@}0gOn26(L`w zfDB?%1=t`3gc#R`aOhabpcyG3pVYXu zcA3)0N!%a^E2Mz3CG;uQk}E1(FAfd12R2AS{^})LjB%R;0wV?a`7oRd=~^xuqyW=Y zyQqe~1Z8vC1DC%CTXs$nv9Ll4$lI{lkplKe0e;Mk6y)Sf8rOQF3wVGBQuv`pDPV&X z0k8ski*>P^z#>T6 zVg-y16kN^&E9hj3i1D5RBUV6#P)yQx4~NyR0{R{{>(X<^3dkJTKFc2~AXGK=L%SZV zfQ-!Yjy9#VZz*qs(s}uifI+fUD$W%;j zxPcW`K!(xD&K@hUp=N~@SP}B=&B!3Cy$M!W0U@FjpgmS#Lya?KbDL{J(tB3{8N}G; z+UzPI#55wc#0o4Z*ep;IxI!Q{a}OdHs}(s*#JAeVvM zHFZk2!3uIN1P`6*u`WGl ztbojc4YdAP0imj?A8Nu1$jB@|epmsasO6;=tbmMc$BPYCKq%^X;To*USb@z50rnM; zVN%h!;R7uOtFXcf$dEeO*<%GZ)U2=qD?+}#85zV2D`|9RzNE5MHhVO7Hv*kA?6D`N$?MKxmuB+N*#0>0n(!4a&mg5wTY0g;Ot zD`0G((5NS905({`@k&?$6@oQHK)Fc`>2CE)WcT1JAYa!z-=C28PFTNo0}P_{AVKf@ z2H6@$j~bxk6kivNN9{I)cl?8&dpay@adqRoxI(AIoeHKat_Kt0gurI+ zj0Qw38bBPg=`U~i&y9d)Ptk>do`haq2uO|X>->X@;?M;6YDl3{#GxjW4=7#^d+-;p zzHy4+4;pYss4#$0h=7)cUU3LWEv(&|1p5{33-FSB&$T_0eFMVL%7*bJSb~3~E44-N zsRkUcW}jAgTjFg0#rJ$r|qQY{2i@eYgt1d zFW-rd8kh}c=XD)&$(#ze^_;ZU#=l{ozl8G_ETEh0tqNO13w#Vi=19e!3w%r?%GTFd zs;_hwgKf~wU$UrdCWXHJ5RnLeO*p|8NntQ*Z})qn?i62m((O+s<4L=G{Ym*ol%%4- z67FJ&VRplVEsj#0N(66<_#?TMJfJ+#pP);*$URx&eYozC!a#sZ?$CKJuK7G9)8@yIkf_xP2cUS3jhAxcR>`pg(!``I3`FMZxiSBfF z6I~dDZUoA1?)sDv@|2989N#t^_hxN!jYe`xas{d+h)4ybnU})RSYfF_I&|I`w`iy2 z<}PxtfumLNVNW>>68)4k)mMq#$B{yl3gSvZ!1TtfI1L1-{w*%bO7~vU*Hl$bi7qwZP6ksNz-V{7 zi>_*$q5`j--HzBkU$>?hRVW=r>SPNGX@6$t5GB51eEDNlzu=cQtB6sxKZz(;umA5vaUs)=&r|IVXOj}xOkw0czaqD(^#w|3B3WBBcyM$?r z1OG3At-3|P6y&uPO3oeAzi!kgfPno}ZGxjkSCu~PFC*Bj%WBG9Ty4B_Zleeit+Hh` z(k21j8a*6rp6{Z2=BI6|LclU`Xqq(KJk})+_^Bq`7l138GD*SX1tiqPZ8EcKq}hDt zuR$I9QRFMZ72%H?ffX~4TI~V}nDOaqeHf|`KQ3&0aCB?x0C~+=gHt78X&UQFYxv}E zH(##1+Wf>P6Qjc zD#cr&Qt(?5ywb|Fv%$Knbr=2RYMw!aY%MJ3Cf=KWlW^;rHqK~?7*eqr^^_hDVyF2z0kS?o--m1+B;$pE zu+^1NNnmCb{7%Gg2vZsYtx!mbo`tNsN^J@5U>m7^w>n32$)hskdh1OrSIXiO&?*ME zdT-WJUPp49{mf5c-6mC^!l{zN0`?f=SA~sP${7l{qFpM&SYk!NRga5iC;L~nOKDq! zbMfB$`n}<;gL9UPWLduD_ahmks6u=xAMojjzeY<*aDHnr&OI7hEBk_NXwGn?n1I)I zk(Q)WyTwnW;Do!O%tEwOFDk{s7W}nxm3em3+Zv$N%qbVJJp${S;QMu>q~mqZPrFC+ zB37}cVi)k<6TwS#gWWb54)YLaW<9Z+cOhr{0QPec|5grGXCeAB50Z@+nb~#IhuS5F zl1kBb@$o%U5XgOYLKLp%AVz&Ar^?cDpKBq>La&N`0sdJ7PKF#C%L%nXZlRt2%2tEiXNyI$ zIUda><6$b4{s`?l-aOCjK9(SP?&Iasc{HeWM7>ovNkGIZH<%Lr$yQRjX z8v4nIzgyhNZuv=3{Ew}XG@zVh_VD=zbi>JNgKWmWJufD2o}8V=S4w}Kgw|2QU3eWt zzQlN&txjoc_DkT!YnR3Qm;Z|V7YRQCN`3*Sl(%_BzbDyL;U3jhStM|$_?r#5E1bEZ zUKS(A;l-TP?G?ndlpv0FBP;w;VB5lbh!B>LeUpS$QgBxsF&Rf6oKZrnsg7z-5u6I?aoGlbtN{;&)2rA5=R5K4^Y621*FduS$?!B$ z+x4C%oDMCHa-+<&$w8|=Y^4tel#HlQmG2eiwM9GRhu{tIsW2U~sQoYi6s4+a95_HD zXmZFyOH!+@Xu)!Z>f{l4D1JjdI_a*ej01i?f(O05DqHl!a7G>j$Q!tLDb{_T&!!;x($2e970M`pMVSB=2C}J+d)twMx2>?$pRa*CR!0C`d|a@4QYfY2g5$P>=FHc z;vX6qc8SsWX9k8Sb`iA3cM?@{vjJLwm}W1R(h#928{oYWJP<@`DXi0$I|c{E<}U2+ z*S!*uUYl@tI4fm5JB$;u1{DBD;zgXk-l+0w>#f+I2%&oPmOHoHfK_j|#XHc+7(sFm z&quLB;a*_`S)riQl6KGatx(W$u|EA>f+vXT#yktTtx&uz1mZ-egucOVRRoSFmK%wE zj^y}_pn^a_2Cqx_qe!f91FT61PBnAy>Ze_`xUL{wF!YDX=$z7T(3=(Li{;xar96@ZcI3jotdr|+2Ng-0{qvVi zLJK+o#7i|Nbxs8FD$WNpcqNs37?79FGIviTP}-~q6l$ib03a`&Zswvz!i;#|Q+DJ* zKwdft5fcmt^3owl4;%pG6=@=n_roDTUP@O59tH(@>1aeU0f0kFQ&cE*wpJ}*E*&@L z#X9ALL4E1)D#f8-P;6uKhj!?AG}eSbeJNG7@@heS>9~{|Wz<109X{&cl;bl$ZylUD z)1RO%p?yh*$-V6|>d&4WPtZYvo8z5+n>&~`KNVGnBowAEzz^_&D8MGOQgy;`gUl`m zOJY`P`5~kfs1xR;V+Q_C6Si4ePW#CebC%|JRTEmT9R%np6if>F)~!K(M+5v<*C@U;+I;T7R8ulR~ixi(u)fvwu~ieWAqAUO2Oz7_djHHHfY;i*ju*t*Ac zC1YuI7y$kGW&`}UA?od$#e!-RKSqZ9_vb5E5$aYEC$f&<|0k-iUZXqE>sM$IR;@Wl z`81T~- zN_jH^hf%K_j=LRg42$hYGcdAa2ZTLrMLGf(yUgnkF!bU_e4+mPM=lx}y#r!cXa){KpBox@o{xTw#% z2-lF$HHGP`!b8%roNVB${?^fn2K&*%%xJnZp7y)xReurudc%I#wTAJsT#-vG;ol~< zS0K(!14RVmB+^SIV!FGvEW`!)nF?5#r4SW)S$5f204GS^5F%^Hi~v5_gtM%@>sAIU z$)+5InY=5xD=vhX>|5JPtjKcf#rlPbA{YW?;? zCBD0>6_5%`%A(g6f#ywAoo|H4;z#OJC)%^6e*0oQd;8)SD!iY>GmehRLYd=uJH4?M zC%N>ytgl)o-&u$Id4&aQ1E<=8KdW%?pj<*+CBPJLeE(_vrjLA(vKlB0)f>%q_=nD| zH?a$jYtb3<*75Pt%33*`=|a=Y+o z{BQu}a#qD~o1RhP=>z35ly)NXaOUI)TO-MJJ&(@r-&8oBRzAzJAgeTW$FeFRnTNRGls@&9e&m` z_u-K1-k9Yf+j`rnnRS)B7oxXZkzDug$&R&EHAldnOs0W?*UF23{ymw%wdevtdNN@Q zrkeBY$z;;>?#YDNsw^~APbTBRvnP{sFprERt zkX3rJPJo`wQsjjZmxO6rW0NdYE_nB3yy!%i%k!XLPsR^{P2RI7laWO475nyNJXw|3 zgiC>C_xCL9Wem(G1D08_4h`VjlgYR?BtWm8j2|!Oe05H$3*D2+nW?=e!y(tbC(A>& zS5L;Xu5$MxV$ZE7>&&AklWD+Vd5nt3d!0<+T9J$V*2#o1m`cyHCzDCjyC)N7tFq8g z2}#C-XHO>OU>+Gs#*2MM^6kl_Y#UONS5GDk$lA4EPbLr+osxQgD|AmL49sv;Yfsjx zttS&0QaQ7^C+l!M8GVnk)K7RdXW-kD$pl%YCtD5BlUa(q+?y^Gc3l#yu}$;t$#~I; zE*GRHC;T=RLZlF3L0=*W1oDxV3L0LyKRXGbPufXROpQ3;>c%B;+< zBa?A$NPk`(89yFrV&fka8DmHGQx&eok?b1G0*k8dXn@(PAVQWxip!1-66aAuoYwHCfnLkG@>=0Nfv+@O`;R5=-oJ&-?sd_~T7W z6-KyC^R+~W`6%uzRotWSaQvXR;y8P;?YTbgnR&{U3f-A#eO#ck>+)Bk%gBV;mDnuz z8O*N4_xQbbAZc2eUh670`;AXe`;&9M*uepY7Mb-AmF-kV>CA8o)J zSZ=E3a@N!oiOnwqu>;`m{eVTOX5n8pU5_EdB@H`EmFhy}GT-lR-JkVV z(V?SH_9yMZXfPY}hTU_$;V!!K^p^2xIztO5d+^T(Hefl57g~#F&YZcabtBEs$DtD+ zE#+rEuyPleoF4S1b{8%DS_CiBPC15Cy~&_=de}eJX*~z_r3GwnFzQd@2AV0lWqF!i z-`ZHqe%P9wM|Zh4jw)|QTIm>g6n>wHDFk>Ir$S)g5*h9qr!!6G~ia2xytOe1xNK0p6rTPc6tWcm#wRC z2@g@q^Q(nAg**%2iC}|er@FL-c;F{(Q!yDrc<@INNN(X~rC?BMceSk6i!5mTNdz~s z997mU>VDW7S5lyQn6FhNs1_x^(}3GN85RSFuK;CN*RoS|urft9ukQFk9O-{Yo}n5! zk`O+8WgdbrQxJmRi$HP@xkJ`R7!AG@!K+yw>_P_R$`me~87Kvd=a8~tX$?z=>nLmEiVh65i7>vpvjVZxcy&pI_8)RuHL#03olUv_FY7{0hbVC$N) zR>M^_iHzTwnfu&fF7fuEdf4hd*q zjAVJ04TqAF4+R1(8qv=1({69touPxO#}m?MFrL-a@YQH>Fh(;gDjF-x>j=U~|FvFC z3yW1~$46`m3p!=^d?dSR!I$G~0f6vXUI#$4_M0}}Stw9x`0=Jgn@B~Ys`M6b8w`iC zbz)zk31*T=4w&~H0x!%^X#R45!Dy@hR5xGVXnNU_rs_^lcwdJF%R96ac zR5mJaD(XH9e^}Bbd%4%2xiqyr%VMIc-eB?c9GY65fFxN}MYE`|1vIOu>b#_muI;Rj zKnhF>Y0FpYCg%vmIYI*^M;liP8%Pc%KwXu>YRpWm-)hq%{9z>9+Ud{p#gW8*q6sHh zUaXpW-j-z=jd2>Ahp{9N)-eAzPGzQeUQ?=r{~5u}-du6XjHw2VM;yp{EuVl{?zd!_ z6#l#ccX%@`rZayrCUy8nn0TycxJ*`CUQLGYMh@tBDGUfn&0qF`#AFZ%JmUh@Ans`& zT25)CJJCQj4(lWNm~8TjG`-zAeANZ4VL4L=5d(B6?#}Swwp56h;F_U(XP(j1DTmT!6IUz{+NjivNG$Kd=jOuH_EWhacQoVbT z1c$FVHq12a32VSyjiC{$tiJHDYjb$`mSdaDFe(NJr-qH8=hh}O1j<&W#dJO7TRCo| zn?{@(WR^W4O!&&X_CVo=?b6fUdfF*xEwh7`tJ+WE=(li5o8xAi0R5z+ojD z!JPqMGC4~u3Xw- zG8@Zw*bUnygJ2UBb~-u)QI4KV(&7kIOj=%2mZvR)uE7o4<(c$PT^zk4b$Q9d#JL-` z%gt0)>x!|7g!US+T{0K@6y}EQl5sL0lk!{_wo684IaIzQ0dCkX8AHjPJGN^iEgFjp zwrd{b!lp7eY**llOJ;#q70D2G`H43{Q*d#^cFBy0DGKS#0bdj-x?sBk2}w$$s{&r} z+9`iYdO$~PS7cL2YW}h})qG*no+Gx~xd7NM9+USg4T|E3?J}HkK}5t+;tk6qA) zoP80^0XJ-yVN--|91ur_nhUneJgy5D9#WGB3yt;20)-p4%Q7bf2|q~}0l-5r8WA{R zy9~dw+K<$?9CV?XMJwhJYkUgmCZ>4&{jL^70nKgs;4HE1*GmcEfh*?TQy1+a)t$b9x=vE*S@d zPo-tA3ybZN$vI-XHbXpa|kVx^cvIc_#f+7e~+J9Dz7@ z!*;ou%4%IPHc4t$+ph&CSws_#m#wMVbu-W6_^%#Fb8~5py-0_3M4N6l;*Z? zS7cL2YW}hhleSFB(GlA{W`pfo;~jzF8$3BjY?tAT3nGCq8BD%h;)<9IAC5HGmm{{z zuqlQO2gH$~=7Q}qk4wS@T?%r+z;bzqfPNhJ?Xt`XLBdbc^+7gx%8uAB!>_FNBlT@9 zK5y?eZrCoz8y6l*HHNc8%i3vo%Xh?fIYz|*;nXHG(5zdV%n-PkgM{0@U0&WonDCW% zQAxPlkuKPzca`+2zjb!p?M#LmHa(G4QsETvd*aYt;I z41!J2AKT^Vxg;%)fFx;^Um`7x%mq#$WJ-gFh5r z7dF*7p2;9^#U-;+-zCc@yvdlDmmobp!PxOk27zg@2Xnv|1&S`%u0TSP(&Rvkg~)?0 zSCV-;?%O4glQ1A8HGkO`4(4N7#>&y{OornZGq#H-=ZNhxoN+-!;_)t>GA3Uxah;(c zM{Jj2Qw$pph$BPI1>0pFmxK#1Y&Vy;JmhE~^KkoiS>}Ww;U{Sw+)@M&cO!Mgb{T%9 z)lq(}RA6j3*E83*YSinvZ5CrgVP?mEA58y6+r3XlTm&s|2jQ8_gXj} zcMNCeN`|je{YUT@70=y}449RTTm+F682)UP;)A61iK;RG_9pyL5M>MCTcwjsR1!^U zvO_1dERqxvg)e$57v9!@JA-IKC`*E9A|RuN9wtiHmLzxQvZCS7`&Lz-X0TYj%>2J*K06IV+-7Zn+UAwGbDH)IClld*hK5OrDpf0=m|$ z(sgxm9=X_K&6cE4@?^&d$3*a5pIlogRj7Fa$S|qJ*0fQ^SK3v3wJ1$@w+C=3A+`jN zei1i(g`#m}_=-<|MUBt8y3tIf&X|JsFt}Hq43}I~DOEHAKq53hI@ZI-9rzKSUW#E^ ztYU)5(W94Qr&ij+bdklDw+4yq<;o?94(1bI2C^IKv$v^-{oZ7R?t<@5&<*mW04eV} zgJ*v6)jW!zI7AY9@WOH+&xShPpY>MB5L_{!pd~qw^30htHyMWLkRi%kWEZvDj0~Ib zVg(odo#|jW9%%;o^wk|ZQGIL=+^ra8cV`Rfqs#u4?{L{$uG=^7>Q%sl?A zGGVVk+-cheVuUsFo1OjoIOIHOUX)sJy%lkB1cuChjTJF@+$9i`XPpBvN8njy#abfgcPU)uxsm$4YTRSj+vlny99 z!j8ZL#S$9Ht?b7u?KUHmNKN{-5LBL3mOUs-#0EF!R1^R__TDZlwR?TN9Kq-JfaF4U z2SdkztSnYv&B|9}<|&0Fz-BEEN5M=WH+fbW_?uV-*e{H52EGwsh0k~IT(u%Sf_LwO zq|AC|+uOUtxQSNUtrl?gy9G`@rWnJx{LKlN_n+ic7^)=l6TeLEa;A&-nh4Xp$CF%X zex3yB!V+cp^?ox%1cT3!fTqFbNTBOt3sB-KM}j8=nhO#$dduZt0W4!!9U%n+!=^sa(7i5mARc> zx~SYqE=A=5+9`IfH<`Ap@5l-s@x)yQ$)%|5qL}WbsGMzZtv@QGmlVEF80iLe{F#?L9mFZf~^Joel?^g=Jk$3~OcMCDg1IHl+WvAN=c! zu%8({Uc$DWFus#0jXu#%JgH*yNj012U#wBty!GowCf~?yKC!lWSOWHlM)K*6jQaGTO`GVCO)@{0Dy%uF4Fp{033Hd?$)iv%%~?Yiuk>98QS-G z4!PahE%)BPcC>XVobQmGu$RJlX4-M%=w8jSWC!5KQLiQ#nj)l&#>Tt^svV+LB^AIQ zL~xLwPrQ%%0OxQz|42+ciOk#z2fO3l+0O2)o3THQJ8*QZaX6~-4+&ZwP`(u9sY?JR zE2SMlG$hPKt$J`sbsZo(wkt(rFUxd?S!BBWis|xl-U8NE6`j6eeYV{3M}@?^SBz6r z<=E{|P{XRkO++p`3NF=JSl4ps`c5ExL163{Wz1B0{pb8k2x83(UQWGG|{8itN2g%bv@QIg+0h45~GV-`x{6 zcr9hpWTK>$6$leXb%v{t(=qiL{P?yAd;J=bm zjimddluM%u^Iq+cnCOodOuC34lU1--%?{F|@o~9D*eez-j7lUeW>R9%t(4IxdKYsb zGlb7YfGur;JI!uyILy~9@|G6!pB%&uE|Es^9RBB{;xAT(CpyB<$Aq7c3qQX~`1xAl z=j()@uNQv40p4{5EDlD4SsDd zZufhm?)1&Oy-9zo+n-FvlXkWYzIJrw?>-l<$Uf9NJ?y7D^aklS)A(Cg#Q)<8_?-w$ zrFk9#){e#+OcLh^A_-jK*nT`cY;Wyo=N6VNzSiWZvnPK}&@$?HySqCYoEcBH*N$Ev z!P60#dZ{d0BYreMGMn=-H+CP+dAZ)iOO`KKPTqn}z?)_apsUkbhR2b`Vo@U|OlSPj z&ypBt|3J>nYi(!49WE|W&2O5G4b3(cjfJu0pCnwFjE#Ie=V{H%lc$PETChI?GWtFCtmVMn&Ya)+(Y8*s6#=z%t~h zB3uF3sEE`GmMY3WZmXjF<19zjR78@Zjf%);tyM%HuvHO#fMv*0MYsU5Q4uLsELD_$ z+*U>T$61aF6@4QDO)TOXYMPLat3#{g0ivvmPvzYGV70MWCM9Y^OC*{!!c9I;QXR|j zvs{vQJ(Zrx`Q*+!=4+$Uw74uxRk`vqSkB78?sz9o;IL}*^~!hdhJ8hq_?5fkFWr5q zN~HU!dD&m868ppKtZY=iU#b#^1LSRO>TzIG$dkIvCclx-l8~v1%$Z@LdMa5)%~)BDE_;3{>@kh(|UaLx3f5H$T;*^gHyKpe%BO3wc4{pP~Pzq~Ah z_coyVEdr#DNk!CMMMApGYhnls%;SkSom^0oGmY38%DM)Em1D49CaRxqz-w44cE*5P zO>9e*PB&tk8T2?2kr$0zJok98A!}#B&C#f6G8=Mm5rzN}k0%v(M`PzAvmPf$L!QCd zrIVxaB#4-*JDNbp+0g_#GEu$10o>w~m80==GmE;TvYCrSh`0h!$^|AlR*;>gBr=ue zBompF10u@QYMnA2OMr-~x&v}loE?y(A`_9T15yz=W;%9a=zHf(C(!Y6K!zo0ICm2j zMJw{k$sxvYBs@gL({UFSPe*1|Zn27^>l&*BB1Bvcu}Y-m9;-x3GLgB(DzbKoD=CL4 z!w?{%@}%M(tJt~7MC9s#jAOeM3(Um4Viixv#{n6Z0>&x|+quUojs&qNhnUCHaTgU& zM`l%Sv5KSX8mj~%L|hKBN~Gi-t3*mNk-5bxvWV{-s~7@AR1UFgmiPshgrO|j}@ z4X89fZWV+Ft$v6_I80!UiVhK47-G5P-VkJFGE4i722=_dDz-C&yDP*ri+Vm3xm<`3 z1)7~qj2~)1rC`E|k(xN_h3<5oB&9+uFa)Vwbf*H$N+vch=cNp1w&(6|3>_l6FvPr_ Pmz|kJe82GJ&o%x(Y2dEVUa+1c5DSBrEdA#r(FnS(KmSoKctqQ`K6(MafFqm|IER&O=EGrhZw zdb)@1o|Tq7j2#Csv13l0<6~o=lh_U?Bu;n)4j7v#*f9<^B*w9^Q0xFUubcq3iIbCK z6UV80>sH;Wy7jtMb$eDiE6y^nzJJyK{{OG)R$V=Z|NF%s{DGx?b8~CI^|kjseRFcj zsd2YIyzceqdc)~z`^eA#=>K^1TT%AGV|%}K+rGPQKbC!N>2qJ$3vY$1wnu~h#`(^< z?s(D~otpH<=epDWXc+ImXVBf~ogED}d*kK(uxE36e!CYp4oA!12M0T&o$2<@v@Kcg`9-iN`#>!FE9^kbj@{h|Gi;g^dacKM<6&>mx^vi{_Pc}rTYKZyxI5e&ZFP2r z{nMlIRtt-B&n=_jWV(D4EOa&o-N__w!J+ut!DwT3&2EV07sH-&-N8;TzG6LIY{v6Q zU5%y1CbE(>^p90{b2%xgdl42^vq#`l5xm}RNF7&O{bB6S)0Kp$1M9FD#dFK^|LTUH zd5YyFT7G_smw9Nc&V}Nm`(#~wyw;A74>#a$e|_AW8Qp!@=Hg4ZACpj4%ro?ol41@3 ztS+Wu1pQlE2AH4pdYf_M>bd2;5S>7QsOjSXD4?bE+9^T!Xd{FbPj*hl&Eq?#k`Nxz zeKH7{{*Xv3@{yLxZ9W}Da9cZrxOvykAPH@{Aw-C@8bpK!1BYK}z|G#C8#iy>+zi1r zQb`apf$YyDyw6AQGCSeLaI!n@cTWv^C)?E`&}Fb6W#4gcvoq{&p+X2H7>@+}JlMA} z8ctE6)*ENEE+@4T6ZPN&4LD@SSL^1}@FW|3ZK99H|u5j1jJq@!#WvUPy&`JP_qn5Qk=PV4l$7xgn zACKS_Hi~WuzGfwy!qc2N(#`!pTjY$px>Hn_#0&UqqUx2CDj_G;-4wNTlkE|Tnx{}y zqZO@nYrV!3>83;gG4y>VWvn9?MQ^>Wtj)m*6UO4yI0cej&vuH9+85YHdG zHgmajl7 zK4cTi>1-68e%j@y|I$!xYUn5}F53ve1k=KMO}E^{KmFlHEt~jYycBJGcwQ_U`Hwf? zjUg)pjxKAcUbwE^v?`9eWm>#V6edmp*OQ;je-O&j_!9)eo>Fof8q%12O2H0*PM z1V|hE@TMF_lxcHsvd00fLU zM%I{Ci%tbMw(Y3|4B&s2fei5bX0Qh7sDuz$gvcBtFrgr<1uT1l15`@z53S-2gA5PQtR1}SJ)MG9P!DjCE80I*S2i$Yi-1!Pc5q<}#{luPqYWjC1zva)=i1Kksl z!U^Gc@k0vq(+r3dkb!i1@k0u1M#wg=5T9!LDK-e+V1*P|aYmN@jSZCzwJjT@z>1J> zXGR9`LJ9~Go$=Wt1vb<;Q#LobHYB-i0ABIf)Xvx-1r{^5L<%e@wbIQ1VGJADqk(-A z9^Bv-!i^PDKt9a1A(F*KFKw6F8d>wtCgI2=AuFVS@Fun-#$t{0 zpxaR3qYNvgfU+g@Db|uR=`WCl+aLw)tgGh~#32P-{iW*~UHU3KxCT-H8>9eJF}bM6 z{7Me$j5}B%1-VLu%>u?I7(Zr43Ucx#Y)IjWI-Tph&|ThENI}kp;DMGxst+l^(}F*? zKnmI|0Vyy}ex+rH6>#xM(&iW8#RERD!V20BSOJlXamPf5v4MgmPtd>$D`;nm$oCW& zu>vZDVv@FUx7t;3q$*ax#B^;x@6>*g*%&|ttNxnM;?k3PR{_IUSge4|!v`xMG_<_< zVFhFe`7{G!1!N!{FI~|H| zP_x1ctO)sH1!NE}tbh>F3D6!Zu%X78vboK*A?b}3kU>nXgw?JBLQEr4ORT_xk{woH zMW}A9fK0*)DH6mW&rqqhe1}h-ERmKX)gJ!INd{SeBX(_8*ne-B?e38_UO{}m2 z%9hZn7_kEKU{N%BacJVj1}ivXzzSSiK1@>s0r0?=rW>`w3h;28i*f~ohJI>&;!6b{ z9G5&<1Qo2Xf?Oq4u>$;<87s)im$3r68z{J(1y*pR z5>`NkV7;Aya$|)R9Ic8KxFj7ihyem%BfA!du)+$+pql!j<@p8z8A9G)2Yds845a17 z4=W%;$ax8f6_9~+yl@RxWvswvgx$mn$o#5f1>{Mc?Ch}u8){ZqffXTNtbh#Sg%uDY zIsw{a1vb<;Q#NA-HYB~V0y0Pttbh>Hh}04*u%Kjz6<84pW{;*$%H|COGGlckVi-l% zjCiH$Qad687OeIa5Z)?d1>`|9RzNC!&t9G9Ha2`k7|QWYz}kD0N8oO~H8 z2+`*kmhCP^E36>rqAFH^9}6){-QEN%tl(&6tN^#DW~_jO8H^R609(r$1vc*>9Cg47 zh+NEA0b>J&MrVN)9Ib>EP$3kPw1sU_Lpr8@iR^BCa^oAi=h5+k6;B*oqbCtu4@cv+ z;UogdNE*~2J)+VcH9*HHz9ATo40PbnVfmGVcmAWFwZsp?a#er`HF0H$B%o8`P6pEz z*Mo_0LSVC(pE~F-Om<%ZYx1VOzY+YGMnJQt=t4kGLa#0aq{jAj{$f|1V))gNLZyg9 zO(q{uyd3u6FJ66T5y2lc;EqsX0HY8AEe*Zm5Rh6}yEO^+E7}*}C0UqjdrJL>1RRJe zMJZW>|UDas{QMIEKqjcR*^IA1$x9OOiXIpN5$V^S#{NT%b zDXIvlMwA}9Y-@p;ngnmIB9+9?9uy9&hKTge>nYmwoWH|W>|1c$DuOGTPtQqffR6~i zX`jEUzEy#n?!?y6@^(0tSi!nw-<>JF08NzJB~Z4$&Qg8r;%SYxK{r2;p|Y72O#2}s z5&W8Pf-MqEf7sgUb%&kFTXwQzlzZdxXxuuEf4a4Q`gCvH8*cRC%SyjWDhe#&E|$PI zJQS?i>J#1`@keqidFW2iwHedSsqv`0xzU|W)8nOEPxSg{&Q9rpyk%HmZtiWbhfB(#Syv}CBG|0 zzw-s1Z^~U2pd3$XU-|S|L2?>Y2yMA6?FKvw_qnSS+55D=fi5!X4!Yyc#@XHmy47T7 zW21*ICYe7rx_cWwBZNF9qbEkU4MyE*i(I3ToP4a7d9k*xew#?{vgv9cw`iy2<}Pxt z0e8x?^5Z3|;=`WuIV!Zm3avvN0vwmEB%IwilRIo}T9MyMgK*)wYTz(B1l~?V$9J~U zD?7FlyN@GW`Apvo%4)dQ99Azd@{SdgzmCcI4Yn^4Y-rRlm;-| z+3IYMdlPh!ee3looeo;(>(-RaHF)@5=~jUHGdqVU@fGjM*sJOn{PJcMF{<__QBMQZ zN59(2fmmj;m+(7-{^Pw1SxxK3BSv zWRGWBSi)aMuv*zLsrKUe+xmk+;;nx#3YmlOi1kk?iym1}s*j_Kbv>Jvb~eyTRXsWH0-&FRZR4+no8!A4zHQ|{tw z#SV3ML*)i4zxo+AmXC}9+)DBm4sAymjd+DLWcG@`t4{1 zbBeb2UeGw;uh?2x&P}{G|0dzqHEo>H5;3G=GwLZ_1hCWmvjC9&h^>QUybut!x)Lf0 z%&daniTDj+N<*L(3MtXEkX2WyEx{f5HcP#<`n%ORnn@n@sWQ2+aa-03k zPhs6ARiDDCQd0{m0yTV9*qEi9p@1verJ^e99Mrt4$3?S~{Tth*w9Wpxc<;Tv?%>w` zIm<<|EZ_2P>=>kIp6_QP{u(VM!P%|BFmPox6dzsB`9<5%oZ(0myta$9B%RtVej)`Y z+zn+GqBZMl6aH4Y$~-gfZuZe?=7bB_mQhp^f8ovWy}D7-@jB-xox^z%t5{QEC!o|M zyf=btbc3Y}D|v`Bv!2+^yO49anNNDY9_6R4*La!<<{)+~jC~L}BPN)qc>tNL+Rg)YNqXx{9dUpdJ z2`Q=R&NkZRcV;}=K^r`J+xcFnlDU;G>LUnKlULRS+|@(Vbnyv-~6J;|mD_o%MQB7r-_-)z8L;mi&7vKToI zFXp6fuh4?d#D>%j&o2eGExd;aVF}qcNmwNXcXd_hFXVOr7EWNE8i$+k^HE5%JJvfB zH_`u9L0pQNshW!KMsPfwYvj=ar3i`-1=MNEJoic)j<%9sH7~Cy)~DkBhZ=Qo-!8bX zL$-ls@WG(Lk)DY#n>P%@%I3p3iJ9r8o)KzuSxhb(GY6+FWLP;@j^NM5WBk)lCKs~zBfEhm*!Gqpjl`Z;VI3te%z zjellfh+-E(YkVhAB{%J(1qk$B>EULfT1D9a?~CC6AW}9T)4<-z9j0sBX-WwWiBb1-v~3;zXx}zQJ!*1db<` z8;O06L>WQnm{&YS84*yNNJRm% z37sm=CKTo7YX|F>deDNQKTJmFlzxNWtUzBZ-)1T0ktDDqlVBBD=Uw>ipdv}LfBv#b zXh8>nc&X;3&WRviYF~=o>;^v#1M<>Y=I)6EN}Kh7Ld{ea0OY09&0Mrdn9(xtq^c$^ z2*^t(A!35zKwdfo>45`)ydq5m@_sY~$V=&}z{8**FCC3YCIE0qX^ILhe6$vjmyVnB zVjoC@Y&)dGs}zTVL9va^AKIbg(O44#^`%rx%2Nb^LaDW&!E`>9%k-tgNByBA@sv+V zhsnJrH0({E7>&_Ef*YglUW+^DI6oCt*FY9h5+Q~UMFBRMm8uhl8)SAlSfV|IRG62J z8Tk86*s4nd()_M!LhH4I06m3XF)n>6v7{!vvjP4qa*7G!vopyn)EoZeP(M~hNBPSc zYwTK!ddJrn8t`yk2DwE#cYh&hg`N1X=9q;{t7PlG>)+KVT6*70#YihVPC1#0kye-A z=g?ucHJWDHvLUr6b((*D#JLM!tWEOIYcxp;Pt~imWVZQ}B#p=s#e%Pg*b1)*e|cqB zbjr2aatdtKrq?hq*Z-<9TrdbvZBjtlva^zm>WX;M>JXtnZlDi!3;0$8{I?#ofm{?6~#>ThK;jAEB)Ms6UYslxC!t_<)!Re|$%SLJq?ML5__M{Fc z+oMUZqqyOZ$tl0yu-|p9VZ1C?-Y%0XwYplRk;AkIt!ypT?1_a0FkSG{+6 zE#14!Kbs0Tf}g2?g;@$wk(XtcjRkOm5IkR?MuL5b^+oyWzz@zcQ0rbLI4#SbA(OXN;DDm`xav4fHky$u%a)_;w zW;Khzf5K!@*=qCOCO2ognlmT1>s#4jw&&WPLT(F z(^XkJ6%KYa&pIc+0w1ZchuuuJ{uC&Yi~SaMTLzep&9XCpwnM?x(uay`7*1UT%fonxB^6SZjF_=ovvnP{D)4L}VW~;K$ zP(7K92hW~N$^ny+XjluKXk@%tcYxsAlS$b&q$IDNOc;>0YrmdMAS^m%5immcWWvA< zN4553M_T3F9;PP~7*aX2xhFf)VtX=f5iRWriWM_!doM2(&3X4^GC@}9$=U&WGE0#c zMtouPWW4A^m&>=k`1NG`5ZL5BdomeG^j@)VPsWo~c}=(!SayHU!d}L}Y%*Y(73+xL z-IK|o=nQYEHaXe z7yFFl+mlJzHl!r4o=g~!wQIkgOdu>eWf3q!_hiDr3`e#0WbN8|GJzqLGn;#|HrJC; zRKqqqNg6QzS6O4{-IK`#S*0gi3DA>SiriG}@Z!n#HuAsXw@P}ilkuVxT`ovZ#t(t3 zKF^*^MlwK8#*gb{2OMZfwfA5}*C#q6gxOFmqJkrM0 zt2(NY&^?))8B>_@UMIsL*S#mpL$+5>#@B#sYf^=juk^wq0o~+7e!X?1q9hr;)CjZg0K6InSkBix| z$0U7sF7l(R)E2HIHThC;Ng=hfeHA8}?W5$)=MRTi6;z!_PC& zeLk;xDP@}6?L}7d8q)il_J_z>%A#DzGYKfL9V8EV2}G+&>wHicW0$_6bU)k|ANPGj zNfLD61AZ_3@usB;BV6jKB|6lqI9fKGq560f9*!UM)(2-VwmsJ;Ju^?a=%PClt&a^C|!>5b2Ir~T2;{stJCSU6Kr`I)9_ zX`-V%EZ+vB@dtjS0S~a;RL$kAsVNehUj||az~B1;i&D+Pzizr7LxxKlc9tsDh01B{ zSh++V^vY>0K~aa)QkFw!Dt1)-y>0qz!oT!;e~+^PmAbf$=JWb4H(Jj1N-_(T1@3{R zjFOZ`xf!hmJnc8@Kwy{+CLC__YXLtetWUC%fZ*_tc7N0lF6xjcMHdi65v@qD7d+`QS{YTb#mPyekb z6QO^j3CTbxj2Dqxy_A8=?we@3YQdrST0^>GXR*xP;2y<`sEo%n=g}qZbMUPQUc>T( zV=wy~1BaaVRboowsvdln^5o?tPr@o?8ot?pTUq8Rv)d9J)lYb`D`MH{8Dw9!uEHfe zL@5s#CL5Kc$L~b2&azWo+Cn_=leVdt3?V%D;|L_TaI;b{D7CXvRx1e_9!k~60lsoI zs2ct>g4eMeRn{x&e%KmU=BkxuBr9<}WF^KD_)Y_E^JG{I9KHgSU0usg(ZR}6DGC*C z2$xWl*(M|V-;rmiMvf$e4_}!Fk%o!y--|$U54lOTX;<3PR~$FcBH|ES8^6M;h{&4V z7r-9eJYTwga50MKGbAu8A9yEC;9zI8Gu_^q;%;gZFT%oV_Hp=f1g~a!unQTKD=u7c zRfc9zSyXTqf*2D}TafqRgvBp4T(TWyQ4A11;!46=kTx(lOH^KFX!Hr+h+xgbh{SN< zBjF+xIP%QjHWH3yMp}?W+ zWvIC{8O-D5YBYfDSN45L2!d}qwHGXFLVe&T=Kx~Sg#@%OMzXxhhC@lohXR2Xjc8@C zXQw;qOwqjpqcLeTUQKI-6==nWY-$BM6c0HxvwESiw77=ghV&onM@`M<;08Ps$!=Qk z6;7|7O%eT62NNg_F5-ggMR zFhjRWcv;}30R7=+@5xTSHqz9z{^=bRYw+hTEiJDPE;W_XiO}O_qq43LnWiYKW_^SN zf8o;9@+^yqs(ORP({pHQc>6w;Qj)Qy%G#3=!x zfs&(*D}@athZ3NEL8q`kjAUCo{dvAHlGsl+;W*2SRa4K~vP`2fPGhq$mgK=2=HJ@g zRQ5k3xY?U4E}30$xvR7$3xC;wJG_||)0w{*lRErE(hC*6hfi{AR5wI$jC` zLQ?aW{fyGu(=JdA;-2=Q<&;Lc6Ne>?obYl8u;!~SU=7O|7eq=Gu?V2@)v%@&mQ}*; zqJnR@z%?wJV%Vq-Be2vQ;2QSvvNaX4!Cgp-$Y4;Cu$Tnjb^&ZS=7b>OC+Q%9(TE@g zFsiQwv;3m(OZDyr5*)tn*f7(uC#(T`*k-Mf1-|XrCNqqR0m7+aW9YfH$qa$ARRwYI z5Dt~LiB1hN%bpM>eB~X2FAdVUmcH}ofbG)L-sJ1PAsb^d)GlMYWEu+m6E|KYK)O`I zWB|zqZvLBeqKjX8PbN zS#JM;s{8RR%n!#jHk$k23><2wo3-Zj_;qk zIC}Lq!DJ{$Y?nNqaW1QMFX$BJhV7bp=CEBZ)+iPem%Rdtd=(Siuw5f}?%1x8v}i0Y z*sggHjO`jCK~XSzsivID+^}7Nt7ls>bHjE8rp0vTfG-LZU9eq&ge0YfDS|>RdV(9a zE3zphHGkO$axjA%gB!Nnt_$16gm)3!WjN!4NU12@b!->U)Dhcd*c8Kt1LDX~bHR3* z$0gxHR{^^~YIrn7{j0+|-LPGjIUz{+NxBFS9#YWp>xS(z{L1QG0$q5_ImE(`8@9{w z#)St-5A4BVxM1Um?Q)EY0m1=mjrMD|q!*-c=QMv1ckTQ9fBxF&n0Pb z1SFNMS0hY_D+*ad-zQ0!j@T}l2bQ`};rdFwNL?-{6PcRP#oYGo67m`6vRYS+O{Dhq z;)Q$5-Q(30H*D9;Gl%VZaVm#x`8`w$yx0-jHFD>U?HWmo#^QqQng`k4ROW{53S4o? zOwxCG*<7jflI17f1Pzhaao?`MwAh0=;EMu97i?D`AxUXEu*OR<@=nYodpB%XWK&3L z{<5D@T64p8kJN?jV#2$K?J}HkL4>P_HO+mfUAeg+O0h((YS1SCO1S5tUcQ03U;xNn!2w-6?LIT92F&!qrK(a%>D|7{YF?K{j?CF{-!6F^o1j0o%h7X5S{#9jNy|rmZg8(7w#zf= zpSn1DMe6dBhY133!*;ou%4%IPHj&!bip|n7=ESIQGTtI$DDU>-!8`+7aq78LnBmKec>SrV;phZx63gq1_%eN zk)!9Ldjswaq6wia38D$ZUdbq+MAI{2BzNbs zqT$c`R#}g(Uh&sE2g-Ryp#NL(96>e5TovVsmUg_*`pKg z5lO;FNC$jZBTP;}4k*N;K>7&CtSPXi*P5MRV~^>mU(SkXm0NCvNiF1im8+zEl2$?L zE+$H^HLG-8ot#H5_LNE$0TL0)6wT~pJ(j1?;JZG#wos~2^8}D#Qj4uAqKrZ4$_;hC z(nsm;_5dy=#FhZkFXD5h!G8b$fuWLSQe|8AaeBRrP!&JwlHaH5nn@;H&>dt1#b%y*~^tn5FN}WzVu}` z)Msx~4|?745MANk8KaBeNdZ!Rp9-G-#aHtvf?^#Xy6~cMAkT(6)th!#)I*#;ecJLC zbo~%*^$_+Kt#80f6kPPSC;h=_s2Sw5*S76M^@%-jOfkyN_9oIthy5$x;j;Hyw{G6v z9-L2Q9PgvwCNHylIH|ry!EV`1s$<3MND(3>B&pW zbjlW(pp!n(n(k~5daaGoa5^3h zI-9-i9=gD3xN*L-vK+&Kv@!=*-7@xOyN>j5^X8_}Zt0c~ z=SwMNmak6xYvm$j4nDmH?(wo%R0NRK44KuwoFy)?TKc#{)TGa53k!+E_LE}nC{sMA z0`Dd(cN~VSEf~qwGW1dtcG<8rpqm#w zH-m7`&xCEmaNtrP*QV5{HL*EBATCbTbPB}eaTgOL&pHQUj=;r0+-qgcOs74QKum-$ zwORcdt+b}_55(k1H<=+D12KNsB@mNg9Ro4*_;bpHy#jH&WgCe7t68re4mn$zM*{@n z;s^|x{TeG`^0-SNCeJztVvfLb%8I=z;v=?!SXWKB6v;#%F0na4MNHnmmKm}!5aWkk z0x=oZF%UD4Kc`ICD-a*C4#c#pb3np6SoTtL)s(;WKoiCLDl+>u24eEKOCToCItOBo zz;nuqy#n#k-3r7+A5zOU6v+VsG1=TKGh|~R#t*v$Vlu2_AZ8waPMNS*AUn8PKI)Z2RfaF4U2SdkztSnYv%gR?{<|&0Fz-BEEN5M=W zH+fDO_?uY;*e{H52EJ~&!Y8vqb)c1OSY}qGJvRx#FYJS)%z9?qTRVfeiB{UJ7I5{u z1x`Gs7{j>y%?X(IpX6j1swDCgzfA6Ori=HQ2-CdVlU!+jo&@Q_5@q;}eltV_gU^wG zrorY&(ClIhP~s~`f+qxBX0q zQA-P*jX`%Zi7$uC;^$?DHDv^{gJt3C^YFs#$9vIRN~5tNelJ~u_I;j1Znt{NJ@>61 zUcMO4x5-Y}i{U&o?YMDxujaM+1MuUhR}&0P5z<9tt0%($K?Dc+`NaFE4{#2r^N+;D zlgP}iaIiDlnQre)I~n_vxDAI_8&^kF{vkoD1Im}9Jaq}cWTmtth(^(*s?)b*$%r{* z$`cO&gvMTy=?=5Vbomw2<)yp@tge(heP-JJYrpar;!Uf+$W%FY+Z5EWDsdB$%eIzF z=a>HE%&Bc;4ZbKac7!r!syynUmQM#`j7gL&&9 zvUp>`qKgJGRRx2d8oY@zXVf6=cSQDXl1Q=O*hQ&iCrJ zz8vQJ!~Qg9=LomN!lN*f-9(c~|I9FXmF+`h*%v?dO3h;h%9$TYm0V~1G*8|5`MjVY z^becpqt90zf$YP<{F+A~8HCKq%mWU{@|k&1BWA_S?U?)A7;44rZmC3%^*&}& zulFHR%*BNa!xRzt?GCeH$au{MJ&czP8Rx!4N9u ziNLF|xs$lCMuI33buk||LJVs@GAhC}HzyYvk@)RtM+_N185Q|~nURYfafFIFxyXoQ zAZA7`>SxFl^Kl~-$c~Cs$=cCUWATOWdD%^8KmGE?T>9Vp{?0Q8_Wi{B@BXc?z3=Ip zzyDW%cM*Q%%J_>{z$!G+iCFOhL{Bb7C(^Iy(boiheJlF9^g=+toKKf-RX^!o#_H$T=e{b?V}r*FZaZxb{D1T3TP&;*vQDJFg;T;9GcYBcBf zEG+KZf8dgXm&KPq|Ei^{U_X08@cgOKXpo>2>Xxn+POYPVfhQr~OqDV_?w!VCr8CVo z52TYoKji2({reIUMlQr9)ES|V_OtU6Gx$3u)Fah|qMwc;_gfJpO1e# zl6HI<96*D0PIWgP-yV;)>mUDKxN?0w>TYgyC)0EW=`n>jkIzg}bD11Q<7eNjq!U5E z8K=L=zDa+xbS0I4(m32)U4Z#^qqz@m1bq2Z@;o4-+mkKjETNNe@ota>yp?0=62O1K zt^n(>=k;&d=?+S6vj@4&)v)wEbimasAZ}k;c+G+Rm+Xc3-AJR4!TfD~)BwgXzuDiK zEI}U~oA_iWbv0JS$g0WY5GRZ!y#&}NG8g5zdkyTp9UVuB4qjBvQuJ-?BKrv|2k+M>zyHwrvWbwd}tcP zS62@Jv=_6{P_zSNHR<+3xCQp#Gw5#g&W;9~y>Z!gcvD%r0`_1($6u29beRje(%$F~|8yueTXD`@?C{xO#5sQuJx`i+1swt2hH)U4(`9{N)Fu z#?oa4Ke^q%6EXi<*n4lUJGixfjtC(AY7zOd{NUmAdCV6wh}=n#;K-5EOA9BmzpBgy z#lid&*rPbh9YL`Ui3GE-jUs9P$*cFGWO0DqT7`ad1wvGWZ82BxO)i7I_;ZtXzDkH< zI$1A-L4PuRdFp1bczJXXebs|8|`&ywBDe%F^w;QeQ49eW`Eir4X=h& z1F4IoUwf$?s9fzH*mFE(n)6F!pn3RTOe1s39~&)R1oYPuiQf*F9Y^tO+}pghfBJN9 zjE-;ZDGfvAA*oZLYU23LDdnSL#KCbfx2YeC(uXrwAkij%0bGVAg+d3CPzMFbbl=m` z{E7bNle~1Ugnf_qdfS~ek}{#gTm)PV88wMa4xztvhi3^dG(Z%|WEEhsy^!*N!ow;o zrXSC;W9~|6O4{UnO7ZMQI*9?N&-g0Xhjw01`{-<4lp9Xa?|HuYd{BH!!p{WYWOv-} zo*JMnE-1!i^$X5wX}xrik5fX%Lt!yB)oHgsO!G-zEHvV|r5B*J_Oq$wWZNCtKOxUr zURb$qrG4Gu_6yG8Zw`$&+J}xDI@7&=^TyMsyS>9V#h?4aUU(~9#lkP6QtK*>|E12H>xH?uW@L-IcKz< zF*XG zDLl<}ewx;04gRx5&LF@l{+g(IW!-yKrI3?uaVvn>Q`yx#^mTRaZ{6%~mThmw(D#{? zkuN30WKF~P#!-H>WMZ%8LTy(HD;;=U!xkV|gLhbjkqjxy@S?<)`C@|BqJ28zZ`hUu zG_)oeqn(-e_=UE4^r2dG6MDFPkPl)$|q(?aEmj))mkA?i&UlfTtQ^Y??OoVLSx2^kE3G zO_i5dsoMI_1ri`_?8BRK7*TS~y_YD}9V&jA&4KQ8)SOk2b*RU2!62b<9zoS7^rs!%+l0#|V z1de{Y7}h>!#iG{J89W4czp@J)A~;Y(Jz zMF0ehI7Zr@>JC!L&;i6^-a4kT;qifL7y$n)k1fFOo5311lM+H;5h8PV#00#u7EtKS z7bWAq8X(Y?tMa6cDN{4QC8ez{Pw;GqPM> zNq2@5$n;Wx+F1IM4{Ld;1t}mSv$|2E9yUk;p{V1<4=J!20WbUKSf?9OK!!<0Gb5yc z45^cyJyKvpZOaBJup)#yGnHFW$l0Y4B7=hsQb0be6QDg(U_*^FWpk5jL(&^5AcJU@ zf$fn3LQEr4OQgVpQY+mI5GGtAdo*-FMauQEOBONxG8_8=E@xp3)D(o6xa#e#Nzy>KGZ^LFs3fLnB_%Sn5kdrTITDI&&u3XE6*6+$t|pjZKY51Vyq9ag~f)Y^U?oz9xSYI((a zyhz_3@B$UcWUcyZ@?lN=(Aq_;fJ`qOFSTF=WSV^9QZ#*FgB1{pI=yfWRxG?AVBA$; zGXlm67-O!q1%NoC8^}QB$AJu1y9&sYI@#G{1vb>IumUSWs5kS%1jryN#K8(HAVhQm zw8si;sBxxjZgXu&dSeA-5MvWyv#Wp*(}>g(E3lwshZR^63TBT6!~txu0y1N+5vdfN z0s#wTBIQ;_1}uQht^z``GFCtyG-CzilNuY;b|@}b0U5JKtbh#B5;_%Q8B88DIe3_e zY=sr%M-Z`PCWjSh-V<=?v+!_y)uL;`NkKnJmZ7sBQZ z1bQAB>(V-`pauwl%z=&US{%Zvza~^Q^#h9)1l%$}MrQd@uf^N!D2E5V@GK z0>%akje3FxR#?H&N>~9ELNQ6}eFeynj;UWFyBl8t`G)TK-k7|1!uquvU=X2W3zq$F zDP+ndJ!*iCQ+z`(9{sl&yz?LZ>{I^iLs+f~5P=b^615A*Y zL^vU^+4CibzH*9|nIi~@(>DF3#~~8%Um5|;o}vo@Jqf+K5Re+%*ZBt*)vlPYh7>AA z9BMN8fa2w_2Y>Mz`~&`=0e6H70~mz}Xldvbhk(?O-5R7pB1GgLA>b?87vLrNo@;x` z4Tt}ftPqE&Qh0^WG~j48^AVaM%^?92870ANRof%^yPdpMGMFErD-qk(q$zYw)ub8F z+)bVZza7DAELqbZq#)Vy5amZBdR`;WTfVv}i~z0sX^5bF^K4;vg5YnOQ6(V6 zvM_9gbt)c^{k^SZ`i7c)!pqE6_GpXMh!ELPlx=jY5OplYh5@IsQzV*Fmkh#@p zj+IMOxk`+VN%ojZcG1{`h~;>Cu}{LC$E)P5WV3~F9b~TiL06t>HkdWi^G&&{0+i!P z?JJ+YUXTqtK|TuixvO+CMHj|wbS4|!L3iBQINRHJyffL^Ko-gmQd^FuUwz7HmIp=;J^8CHlT_errGk*;#T}lg!e60~!F|(-Ec1gfK72+p_Z4ZuaO&uVw8EbH= zBrHv1U1<%U{BQ%-{bLBe{L-~z<@YW4D$(|hL}Y{G?#}+#LLht%c>>#C!8^5^i17#u+UULn=0< zddQ-1`qr|Y=AQ+C>_==JB;$pEu+^1NNnl1-f!~Sv4Pi<{pcTq&FiFNID#8=LTb-ks z{=$eM+AyiMyAvNBF9+F-ti^0avt3MHox0 zD7flz(d=aZ#&#)fvwtq$dvC8hxV3-Ia*-^{xBMGB1}U23@@FId8Z9NkXU7`M7i~jx zh9kuUyta$9B%RtVej)`Y+zn+GqBYysCj6~(m3d~|-Rz^)%n28;C4jE&V$Yl5dv&9v z<8{tYI*0QjmL*#`;Jr73YjlI%)*lS=5NBpRv72`xXZuBxpN;spabUMUFVK06@_S91`fK9iGWX}Qm}kYu4(MKcHgq5&sDj*aDn+90=3 zQj;7KqXx{9dUpdJ2`Q=R&NkZRcV;}=+3sxiw)4GC$#DtL>TU{H9m#z)5bdnMFEwSW zLGH7~BH0)Xr{mEe6-sZ2E;!pbA1~gCzE1no^D-pQeY{*cj|P>FsMo@>da%&h7@+mf z_+fZ3zJD;bk0VThHQ_9=CqTiEjs&Eg3 zsw@(?Q~b>a+!fB;P%n#-h=m^TIyCui%S1eVB5lbh!B>LeUpS$QgBxUD_0Ggi^nXNI7Zd!;HYlHjEF+4w$bim*6p?hpdA78e zT3s>9O1lIN9}4V$Sxm;!2WOPfYO15!Q{X*?Xt`_yKh}VU!s%6Pf%Bbs_xktQv}+*Q z{X}@02-tXU6HbK|N4ZgE+T@^BAGXqm14>3zXkkX1v_pOf9*9qd>5xS&D^qO{0E&*L zY8*H~BWQBSLrXFnU7e=7dITPdAE-xH8ZhJMB6!f-tFlEu3}@sqfV_c=mtx)b`Fu7+ z-Y8Emt=q6i&LE^D^$ED}c`kJ*eKQD3#E3IfFIix7*YbxOa8F1hJkcNY&}EP4{}cbv zz_3e<#y>MKM6rvgY6G_|5m}KvilKl(tz6kCQBDECOX-OTI*(9QU?Om}e zGkwE1{g>Xe7e;36R0}5#Zahg{f>k-J`e>&JU11!-$3kGvsnKNuJf5D55bw#p*uN%S z%w2eyM!WMLss30vD`h-8yg>!Pk$4f?Z@kvH`KLmt9=+u*QN7(3??5MG1j#)-AH@oV zd+rIcLP4h`?Vjsfp`hbpSN3-ao*=3l9Tp}?oM^(^Lm*CcO6VK>Rz=`=V!4sn=SYs< z2r38^WbnF#KZ?W(H^7<%B`5U*)tN+BXXorH56vL<6u~D#lo51}dBszd5dp=CR1_ea z(5YtbP35r67yH(4HlYPWf0&HUDg6e$S%JP-zRgm~BS~OKE(ByPlHU$0k~I70FPnrG zbO4B#YEJ5$EX1ogJIvsxVL)Cw%iKMYKxwlcP^g)z0)V`9x|xfjgh|>n!ATYUT^Nv; zPC~>4!-2eX2+{)w0C`242;}`}2#}Z3Re^^=L0&ob)2Dl5 zv?a7B=`gvsU535s6QeOYNN{7c-D`2@9OtK^7Uo?AV)#%LV3S#?I$^j$W|xB{+CxaI z%=zy(VXH0;Nb|d@39Z)-0`wGm#Sn{e+d}uw2KcYYDJF=|&Lpo;Z}^Wx{aY0sNNlQh^H66*jge9mJ$4UjV4LqsoGLYrnF?X`IE}d5ygV9hu8|Q z2!DCSS9Hp?*>VbO)uvYrb5*uxzSV~RRb#kd5T4qkfUSE}S2C7XhXK%^Z#BSw8=~I6 zSuChF@ndAje}BG$6$4IWYy9wkqWbDJx&ys_g*xOew2%30UCtr4>^Xq;NNt5{y4C$n zwNcOW0{m$cw(Av{b|+dA2cC;}p-p~Ay%sh&yZZA?F z`pcYR`0)n3$<_;OMI#1;${E6TAXe;esX4xtqc{9n(>}}EvZMm87*K!a29|WKWm}ES z;uMKzNzV986W&~px0KDxEmznofOVZ2sFxyIKB3=%H_g7w2tMDC-T#zs|HP*-v6|M5 zjc=X9SwXm{&$8rv+(zcu|d)1%&4S#Fs7zXBc!1RVcCa3&*!+zJbhVimo zkxMM(cl_C2fjBb_lqE7(;9;D!(z_HR<8`h-QvnOJ6rv(8%Pt!W-~`D7A+m2WC-ID< zBeEvF&e2wSV=Yc{>33OQwMf28Y{UJm!a|`0OEhu~{-VOcgK`ORl>k$~@%^Xun?CYE z%4#Thy%P3scBkFW>Ct$rJB=4mV|T~>w`gjtHTVhV)|=P`$F=ATd24yUW2%vk!ALfF zqbK3AG}HZil52HrGKqAys)~?FT){0Ou?KtQMyAziVQ(8Wz&!}}$M<;zI3l{TKI~J~ z$C|Q>#DXi1OH*_hd&}Y){6uV*H93-)p4nL-uW&cTXl0WR;$*9iS((6nSB&oG@(` z)FdmGbKX4}FFMiXa%|GCC*y~}Chysk$w;F2ihX-Bo~+7i!ll5n`+FAlG6rUo0n4n| zXF%VcOvbe#0ebah{CGL%zC9UFRCL55Y=rK~_;Cp{wfAH=OU=|rk z#*2MM^6kl_Y#UONS5GDk$lA4EPbLr+opN`2GGS5-x86NjySAQ8n6%27%{^KBS<#cp z1X-mgTM5vUS&F>eyPEbk%33j+JH2}{UUZ_%1?kE7A#l~_*^|jg2I$FnvMR3$mjcU; zjAu_KV_-HJu*`~m2K4R8WLz5(pjS`Ek4M^=8L2LGPbO!k_MQxfT=$+V57}Nl8PB@P z-D#|~H)!9ziz~C}$z&QRc-@7bOyF9Pi~QEfgfWksmKmN5&Hs9kI|z=#GpZ zmoQ`O$bPEAwK$SpgIT~;_G5P8+N&Gm*;d(1A$D8Wfj^7j1e<;pZ9XKA%^;lrl~3_99z;UkTpdv_C}7QWoVxo=HG~ z?I3x`OCVZJ7B<$UFCN_w_r=G3Up$h;68qlk1AZ_3@usB;BV6jKB|6MTac8LF9)*YF z2fY=?*^6z@^-0gnQ!cvb&P40u0-ar#zY<+WhTWAI;7ze6-*9AG8{xfvuN_F5R;Jgw zip_qbQ zY(S+hE~ELpzRQi4bG?$(s513g88{x$m>anDl`;w+jNsaGggMolc304$qfhk4t^Tk- z?RN*AbKSuXy7Tmw(Qq=AT$FJ7^y$|vzn13bqtK2Im+~_oSh)*KPW8JJyNecnErJ(o zryRq{?zrDQHRzperz9uYZLN*fIV<{U3`b9ldz+nMcdHjS;yG`=o(KEVD!SVr_Qoke zK!Kg>F>zpL#eVZ-(mykd7x33a&oQ6X%fvVxrVQ?&LsgJ=9`6s&aHn&5b9x0FM26Bw z2Z8iDx!lolR;tUp-XFn|R+X)B51pV{Tv?0D73=Y06fdG$8t2b> zbYJ@1@=Y`owct>EtsxV!K{nE_|~Ax3bJtX166cs-N&=SH!Z@GswPdT@tS=yu?Ela@RK**LNaVXW6MP zZ6O}`Njr!nOg#T_1d?00St%Hl+F2>9^#ThTe;UE-SdJ>|6?H#sjeEg|jqfzzHcy7d zz~L)E+10h|6dkPW!U`V$JMs+G$dQEb;Vbi*1d!j0KynYcNwsPF+0s`WFCZ1irRxV# z#WA0ucevWPfmR!b;M({VR@Fwi;Z+~WzXo59;MFV-b|HguWeOM043uh&=aCv?VHHb= zt1omceyQP-?I??4fbbDl64rvWfx%hy@|r>?;crB+=3zu)IPj5h5el67*KzuJr^bWY zlCFl>)iCfUPVEN67Z(VeeKj>`5hHWfqZmTqYfeoD!=xAr9LipXnoEC|4ZtO@mjpPXwB320x8WO-u!L-!9cc7>?<_EOcKcf^S(pig&7J~UJB43ZuXw+`7d0WTApPwQB`lSczO;^ zElo3MNo3Fz9+>IkI3q>#3JrCua1AI^+BUJ)87Ioh~V*g$e90qPfY3j4!I zwzbor=L;i={bUo4v%FX}^}H?1G#cYHHVb1(9;{*hEu6|sA*QBO2mdpIo4vW>k{Jhq znAqBt%m;tjfIGaI7Sox(7?V2uqqgLvXzUCchwnxX=y)j%2uaOf_A^RrPrE=hhb7(nWyqkb;h11YlHO3ugI6->u^$ZVEN2}{@%>4WW( zL9hu5I~^T@C`ZpFX>kN3NlTt^EL5U=f51mKj@T~Gq<`w-=oP8Uhcly#xnaB9Ol7sM z7@J6F=QdA=S)L?wVMDw=g}Gt7WSq>$q&(M!?UIpM4%JD_4cjGSD7kaTc8#P(V{yTD z&4cW2Ds#hj1+JcL$;=Jg6_^%#Fb8~5py-0_3M3>cjjjrK#p_Us(GlAf*%XqRzwBp} z*4(h&whgu$7_!A<@}XVVak^o<3}@;fq8V0J4ZDChIk%-+ScH_p4cled6vKuC;>b{Q z!FHL)CE)_K7s^`JL$dPtq0y`(w#zan1PMP$7XiXU3Oat>uw8~ZwaE;D3pq%*VY|G%g)reO@7e=}8@5YtSFDEZ zGSHN^d0nodgCGSpnKUFLB~xR86bM)PSy5)=)_ z^K-;@S>}Ww;V0=LKzJCUMvN(d;jnL);a6H6Llj2ZoufBXq6V2{!haj{4=Sh*r7f8kf{UT%n-uoFZB0L<%O)`z--oYgXyHIysMA z?2(|fp0g}9L+BUz4Z--vqKGg1G_r*6`sCU|sY1;YK!!;zwx*32zG{&dKsxE8ba#6I zml9%20O=QT#aAdg_P^rOUs2<;u5L6_sWYaa_tV@fPlih_s#pih-3UMMLq5F}!?IY# z1d*dhFU3x+w1w#ci!E;p64}d@OAsAsF8zsu#UAL(Zm7@RrXKXV;~}~^zB5L*ypsZ? zd|D(t{fn>WbE)d3;4ZwV9LTewPW7hU74;COPoI8Wh#}hQA+n3wxqzYKVgp{H;G(xZ z=?_Lj%^;t>wr$79C-%TG#V9-5n@ArW_OE=0%ieO`x_Ntha6XlBypMjHyv*+5r1}~K zyJa(}j$N{kcUF=mcAmJD&MXBn{{*@yJWc7U&#*V8Coe72DO-4=HBRkc+lj#>l2hdw zoEdjF`@P|G(qjK6Q{`RV>0LX6PWnJ=y0bm#wKhh>>3B5gZ1%Q$==P`K#`(@lIke6d zQNm%BDA&bu3OG8Gz-i4{_qd%fj6*RVclgiw=`dzMP$ayDN`n{ z^wGrU(n=S*XR@BkBv+n&W-EE2+1_l|ksfZ|-1M?r=i4S_R;#XU*WlB8;2tlFMMVHv z&5&8`%UR+QtEG=SL{0i^wy=;mY(FXHjxxn_D)4Tya>rrF+Jcc>1B70eT}?zE6@xE6 z)c99@YX`))I{cbyhx}oO7)pmlHt6OB&&?p*^D|-FFkCTWhAua9?FR_N#i^Q3ftWn* zVuIvZ=RnL6xEP3gt*n{pw0A2I6MgJbASOq;$qd;Ti1EWNftU>I7>Jq2pHn966^Pp{ z+dy2}BJJ15A?Hc+qSS)xt%!>wFl6>?tcc0uE`gXl>l}zV0?#Qc_Ns`F*al)*HKE@* zv8JskT&ifIL-4PN$@|wbLpBCt{IE+PCc`=gV&?JZlnHwU;v>5jh_O0$ArKeutH|uv z7>LQ^E`gXl>l}zV0?#Qc_6o#DZ3D5cns5oKL?2Q!HbnRUftYNTmKm}!5aWkk0x=oZ zF%UD4Kc`ICD-a*uwLpy3A+=y#Mf~+Wa9<2nNlOyhv^8O*#nXb*&PfW1G2JMeJv|rjhUwu zk^q~vJRAixf!yRdW#DgS6=1(G!WsB_fE7O9y>qfgQys)3_=SCtlv&SgduwM9H_=MF z)dH@5x4?aim@N;qjLi|*4I(rgzKP>vshmYn?*v>f zmEj(MN%vfjzn1R#u;*NNu+xh#S>M07H=bMHvo+f64cJ#B_AaZ6#owQYt22_v5#5vR z*0eXAjK-Z|cdIucuU@=ko{aF<&Go-(qA}*ti6IBptDsKn)sWcrm$Gq^N3@DAdEK-| zkvEpc&*eSsx^eN2w5n!`xC1p)B;I1Ch$|B_MbZYEDdO(VOp#b3Z&GSp=m7j}coAz= zle69JUZ*?U>`Vszjl#07C5E-MehoFNxpnFP><9nGJnUyij~B3QCyehTN~2G-6Hltx zd{WKk`4?+cHgEm9k;yl5n@_B5-Z*l;H_A+r*u0q{ws|u}V)JH-*yha?sm&*_usJ=y zjg0nEIM_ZAG5^8eg{v|{E5AV$FW!mb)U-c6&zaqU#~8y@1!You??tDdEOa&o-N_`r z94?EWmz~Cx5y%dfg|E-U3$q{ZMXwW$#)|m8bP3w`c@DYV>Mi%&w|aQ_VmRL>J7F(| z^USp4#^Jr1_hS#hkE32qFf>I-7mcl+2>%BW9OUN{@1s7zIh@Wv5))4%Gq=LQ&S+=4 zy)*4(>`&r09A0f)9aZ^<1g#DzUykzBB>FR^w+%bLeaw{qsx%EYhT{#Sv)qt*HaX~(x}3`SG!D1^oI+MTy%)ZD)_0W zLv%DgF1HAK#iE5#iKN9$N(` zAQ^APnbu*;eveLJOXhV^2GOYa>ZK(m|Z2NkC7c2xr2Sbgqj*F zmsl*?dP#~5( zRLR=WQe*LjOD}`PELC{djtu-s<7@E$ZCNQD@Kg9HWKO(N=7_F@mQeFm+2= zJVyLzpkg+wVQ%a`obz&{iI*%Ru$;UVopCqG72AGcLe{&AM0 zLPg(u5eQQT=oSUc*wcGX@+)uq{=3 zGo5W_n8w(PMlPPaJ=lZrm7UbY)EERW0W2wlj$H~!pVP`3cOr<%=MCRmx zh%&WWCkJE+5K&ckK#q#D19DVkB64*=Dx%D$oE(QCLB!-u$HxKL`GgY{MJw{k$$>?V zgomhjI_{$4>BvOo7OObAuCYoWLd4||t3*ofu}Y*Q6Pa7ABB+#WRZeiK?Ch4j+;oUl z%rvqaWe@~jy(;JCA`_8YtYYTo8ml-G#G;%WnT;_%4#?68Cn^ftxyLGwgomhjI_{$4 z>BvOo7OObAuCYoWLd4||t3*ofu}Y*Q6Pa7ABJiwptYQcdQ8~mamP#%rhgii@k%`DH zRxwmvV--h&h{+*V@pOC~u%1{&@oV>3#gXt36;H=qR6HG-Rk_6~j;?F05{M9SIm9ZF zl6$NYDal0U7OTiQg>$T82oO;@#447Gd#qxq$VB88s~D=Tv5F%>#N-gGcsf1~Xo^*z zXh5a;aSJUxX!Qdu!eIh)RCI{Y!Vt?P_dt-D$t>+R8c-=mK( zXC@Kf(kd(rMw1EJZ+953gIAW7wAJelJCnEUbjQ8TPH#LOja&JW=+Zy90xr*DT)JT| zJv}eqx%chMl^Z}M3M-}1D$woz_$FfvW{&8Cs1AULa>Q~9pex~VjLR_;)I96BI^P+ z#L3FCiQ`*)*RI-Cwd=L3YWIw^My%nydjD1b`~SbHU3K*W{_humB z_SWQ*6O-Oxbp0F7_D8d|&M*JpTX#JEttk88y}Q44$DXe~b}swe%IChY8{Q6AZI6e8 z&2!zey~(saJ~8c2&h}=5@hIMV|FE~&KRq69^(U>D!mjD*-gZB}eCE?L5-`?tvrv3Jl{mH05Y~MW^%m%&T;O+fMd(sdwAo5Z3SEY!wd@i2R0MCZ8&b#BnZYQ|dE!L>qSDBV&{$jW zVf;$M_`VHTj^c&Z;y=CdXPybgNAGFEhlg&YQ@n-MNz_tE20q+?`~3CsKxT9gVw;OE z;eJd)S#fJOM8{CXX!@uVnai}_-|EDTt2wKUS}!lz&OU&(-BIrhDs@oq@Cfpk!k*3XXod>4{v@MyIjOLiAO#<2z(G5{+P6-pgHxk;34cv2 z#gX{iTGis>JP4tQde;6ts7|(c@VCPUu9K9 zrbD-0Oe(3%Huj@RDy;|CTZ?I*0sk(7SJ+ISb4H1OInS-+U6&H?!iDQP@A?|~z|S_! z29>Eoa6l^oB#v5E_?(81G<+%j%~j6u@d#dJqv*EaYgWQ3Jk51}n$~3<{)0u%xTiNm zbxFL0zb2|)IjItI3brge<@Ho|Gt>x9x5p@A_E4-`;D2=@{Z-1)515p(fjk$L2=VUx z8*|jlS+`en!QYjz7bS&WH|<<|vv?_9y!Wn$ZnFgl*5O?iVdPkEbUI#4|39&1zL=n` z0^r#Q?ly?FyIQqk|8{YH{JPOsOhK>%(Wg7vQU|M)D zzRJz-vmbu^8I~V)^BatpqKyyFi)ADK@dmspgvD{ayV%NyvuR#I>hfk??7fLIHd|hz zW^n*`rV+-bX+s~zLr_Z}h7fA16b(6%X5=MJ_*@_X(#Af#DTfiIzPZQ7O1lyt1BKsg zz-MeY<*(&uHLidg>hwH%%5`vF4pl!>W z*z8*iB?)1J+E&6#tW}YOOw4JwX#z=#5=Ll5tR)vFvYPw^J{iGHW-G}0O?H)tYyBNi zDI}kirlzOo6TGI;gnw`DtMcJE7v-EqLqD~?05}}sxa7$qQUJeT#^7>dqFb6Uxcsri zS>swy^e_XtA8M2W|Jn?@5g`d4Xep$^|2+88aqwlU+#&!1MjRt+Oshqwf>Yb}lm`Qp z_`n~S!5XBa5<*}RB6Ezugo3aZ(DDQasFdLETE!a%Con-7g%Ic<_LzkYQqZZ26fm(- z+s!*w{bV)A z>NdpCv&&i$FE1-KBQjXQ3MnAGi7kn-m?ICGkpl8bjSVW6DJ{gRT&nbuuN6{2*%JB` zYst;Y!flX(PS(|P3gVCgrml>1Yo#t5UAlO14WwX&6ksZ97uB?MGWT?89&<)Y&gp~{ zYM?-YJn7V+5%Ew zocwCr4lCf|6W{q~+W-poJ>Ubd!3sJKSOJlXamPd+KC+D?^Ti4}nIiH%1xBoZ3c(s8 zpbE0uRdBc}R^XB#$sh&@fW?5dID{2eKnCT~Q`lVvWC(eG9dK6x8A!`ZffaChSGAPu zrAR3_atqk(DzM>&Yp^QsDzF&=V+EX`Ol2dpqo4Sg8^}QB$AJu1SOIxbX9xCJfeke) ztiXzpFIGSX@xlrS5uE_-u>u=voGF{zTpN-pR^YYZO$ITxxi-5B2r-RFEwKU%N_JR* z6`^4EXowqNgB6e&bB%~(CDJQsmpTkt`_Cp}-7b5)Hg2=4fbdosD$f*jSZ%y ztg_$m603YQ(_|Gky9y{dzH)c&YYK0Zx;W!ud zn03h;#eUy<@)97zYF9z7lB!q%e$0#&1SQgTp26CEox%iS3trH?xH>L03FU?g%uojzzT?5%vb?q1BFICHyBv$D>z&U zE1*IsCTXh^E3Dv1Rjhz%ueANV)4l>S8w03dg%uE5n);#5yn(>*rF>h!k0)t={Y*Gs z3ar4T^daJs;~oL`6_9~+df^(Z%2p&= z0)vpO-at4~16F`(&J8QT!*MR^(gNnQE_vhJv4UJBRj~s6m>Da`$=8n+5A*YL^vU^*%RMu zw)_Q%Fe#@O79#GX!vEL^X!aCc2FrHDgKCLd6|9QNQZ zUVUc~!5=o@u25kBqYwct4ZY$JkQ%aEgEUBJiTpzgd`AZI1+*@VA9hLnMJG zA~xHbwlYNUnFbuGW}Ph#LpgzSnbKP0=F~>Uf30tp7VF8ihT=?TSai;IM?t9 z-?YzPRo|+>O?P5zXn8xFN~~brvhR*8ArO?UueVg+hIm?|ZP3jRWT=@<#WHO$#kK&(hADledpY%tY{rEuXS4l;ICEUdl_=bmq zHCuhcJ0t!`ZY2-BDY`ae);%#9_qH~B(^-1Fbo;6P;MD1v{6JnsCn8y|Us!m@>)}1Z z(F;x!;qQ)Mwn#KvSC`JpUxLN8^n~jJy47T7bF+^w zCRx09eBU;FMhJOIMvsl}7>;|hHn~P4IsQZ~^HMk(UzRBZn6CD5i*`zG?jiRYaHl*g zKVGsbKI|!XaPokPl`{>tN zIS|WC_7Z+~IC!#uA?xXnBH*r*vJ%{Q)SqUYwX=j;H-F{&gpJ{v$_D<6A9r(!ht6Jx zYR12aV6C!YQticycMOKZ#9RMf6fzl8S8H7cFc@w1pDv0kMOcNOY`~3`c|Y18_BYWf z_4uo0CCGX;j!iz%Ao0*xAt#ko-qC=5WvS4frknROZD}z|{+MmWy>OlWe@C!Yw+NVm zytYEAT*F&-O#h-$p8x{(Q?&_>?rW*ir~Op~n{`=Dxr?if_p;G6_++UH9H>b^SC168 zgjy8>mVv|AJk})+_^Bp55P&P1GD*SX1tiqPZ8EcKq}hDtufZZGRdKgJX#`fxJZiP` zBw$&K^^VuGZmn$3;FH3(2S>N24v^Q3H8@ogmL{$%Z84Ic z%I{n7Rif=NMiCVYpp&z!_w)1cNyTQv>Uh$3g%vj&A!A%AfOR-@B0}S&$m@Fa>EA~1 zYAe&u2J5a~xJ5tW#16DWKp^6y0v?zmhn0k+#FKPDVBu-jIQpGv1#^bB_g>IA;IG+Q zSk6tnJO3u()-`RM(GoGFVxuBgDJ)$Cu+#jU09jcf!0)G47Xlx#b&!k~0>V~TLM4Hj zRq(qJzadO%2(&^WC3+UJ>MFG*Yo=^Hro$wEuR2F_$)hskvaK@FDh9WDZ`M*?M{=9} z%uiw6CRLxpsZy08DO^x4{k|@2%u>!!z!mLMQI&NLYTni3qS?v*x$RQg*5GWs`+Cb2qbe(>z=!1ZeeDNAh3|M7yi-D^1yIko#=0NH)i#*ufMPCqwew$IF*$9}g-WQLlw%_38TnkHW+8L#6iteElA?qsp1^p(dOR zZ@^`3A?GjjRZDP5=B2SyRAZ0Bqw&KvX{@`d)Y(6X;687ip~?%VSawSdTcc=tN=eZK z|1je37I(5+eo_?wlZraF6P$EK;pgq^-NEa{BEC+!M~+ zP%n#-h=m%^wKU65POW^R|4A>-a~}2gzTFntdfGeI+a6fA1ocib4R;(RWgNt z9ECKyd;6#2Ci=fBh>L1!loR+~1V_WUMjkCtilF#VK%J({bFZ}FXe;Se^YY3V7=E-- z2lt(V`#NM>O$LgXSXjUZg9b->Cc=F3L=E!oP|>Ko2T(opu2Bhs5;5Y;)JqoF+_n7S2HYRg2#*bheRSC)`v1f~G%)NE zqw&uS3{mVNG$5$Tc(hyL7%KH<1GGYc-YZ4#lD~9e$7Nnt)4uku*p->S z;hX+T@7W6@Gj^(l69+e*q%Of~&P|c{E zf=!j3|48-R364&{DV;gv+2IW;0FFc_n#nd@x#yj>kfH5Xb@)^W)uXrEC91dE;$1q! zpkShkAq2@iJRcX#3I&~(w0o{^g@TTYUD@9yc!H>IbXb@m=Fxk>gk@LNFzfr1QPm+(iCSm6d(lc40Jen91+I-Kb0NRJ|YB19QM=a^SK zMHvxLoJd6hvI(6k&L*@*RC7}2L=dmy$S{LfQmKaldFd>3_e27v&3ZtgW~vGR^3v&ME{YPS z!bufv9RvY+=_Eu&(0*TP;dH=L;YJ79px`)th0MH zm2bYl7aH(rT?ToEbngB_&8U(GQK*|xW!u?+rIjiROZy;O{}vg4GKxfm%ekZlDC zpF@Y))@Yh(%ZAjR)M@_p5$7&^u@>ofYBWg-Pt~imWWM>+yE}d^#8!Ak_{%H4qEoKT zmQ!G>Hob;{x&C{N;etVUYLf!Wmfh84Tvx=CR)_2cKQ|Z%zSRK#ZHRjNX0f2!#E+37 z|NZ$2Rtz|ibp-#PQGN9q-GN@eLTQy`4>h2D%%|&e4zXp=0kp?zD_qm9?r*7$dY%{H zPn)n^ugJ7JDQk14oWEpHn*>FJDC~7YckZ2rlkrxVoqLL)k@IQqBmcDtlX)wg1LtZh zKvIV}AL<@ab#K9m_${@n8}Vqb>YuJq%9{~5jC$p8+?{A+SiG0{A(?v=L72wgt=_EH zJvpA7>7k>%QAED$q1#%TyQip&!p9r%7F#c{6^$4WDrX4WfmpG>t>*Yvj^6NRP5Ufs z%aRJXVnF@58(7k_mTfgQi&G@1LAEaFobj0^ytN*0DVvvDuCP@A>pJsPuTPF};7u0< z;qwjI{ZHxkPkag!t7*;H_|`d`6@-iWtc!3B`CL<&zA8M-<*L7RbjrbAv^jM&-5yW- z-ClNe5BIAR=vNW^X2X8hwTAJsT#-vG;ol~&R4WIaxq@bwUx7F`4HOZ~a0hK!h{6?$ zsSp?7XDVP}mO@nIW!Yt80h}OtbBL@VGXnT@6Hc-E*KL@tB%5*+X7aA&uDB3lvTtoG zu_DW@r)h!y1Uwc$QmH_<931P*wlrUdPc>mMCslF@)%xv)N_-!!=HF$9kjFSzonGzOxSZ^9l<^w6{bf*Wu4A96TtO5LXE> z1svahTEFQdA0!nyuZDux%8lka{C(%vo7e@%wIYQ*L(kBInz*tS2i1lfD{~NHc=>R5+@{EILIV^i5Y~?Nm6} z)jaE*{3?8;!X9=r+4@tUL@xGQ*lih*b!_1?V3`&B42X_mwLZ$qmg{oM%_Ts|6d9n0c=lvc4w#H& z3v(2P89H^zm&bZ0i7uCKd-3bZ_#v># zd-h~9lIXo+-=2&otMZy~DX{GRo`t=PfeS1HmU&LE$MEXOWLz5(pjS`EkC!v#+mrD` zMMtEuvK6`~=uti$zWRbRB0tn4h5o07=8 zCzA=XN>8>LpeM5wd10v47sbk=Tku{d<3%UBT#%lO9|Bi>o;{h2WPqNGC#&+Ba4E3d zpLq6UG6t9oNc}f7*vOBI*|J_=@b1a@(N$^-w@${7N7|SXI?--IGb#uCpf-24wBpuO|}-i%wbSC3N2<49sw(>jS-e zvenvpGJzqLGn;#|)w%R!ANAb3C=(>plfA;QLo+}}W+`w}sl#_&=>ntd1vKgi-W?e) zIMLyPbY%PxK{_%S$p9T0PgdnK;Syl*j!eb?lmBY#$oO&R-I4L5tJD^*Bjd*l)RFN- zMMtC#AAFUJAD1v=3RCb?6|TjR>>A7huCgDq3)f!V7|*uKW~@;Ksvr2X2#&GY#{Tu! z>@^t6Ud;OD0Xx~#={qcw!QW}vU%@AvPF`$bYY+`T&p;3Qyy~TtX>y+z+4B2J@T*Px zL*y)FQ7+_}1Qgf~l1ID*qSa(!V?Fxf(L?ZHeAM^FBS|c=_FMRX-wS`dX{o{pmwIZ6 z4)ammDXO@~;nDbEZ^d!;V%u|l(lhgvi!Qn|(fYVRXV>MgM3<3q*p=8UHx06dFY18s zKEKxvBuy*RYhA@=zwwD_e{!}r8;nQxH^9im!kLQ7&oos_6WvZISs>ImS3lN(H?!PS z&E>4ADH5As24V-m-}?cJQq96YYq}mohD#cDmMYbS%4zIaxkMiJ%4sY?QHQo^rb%6| z6*~8^)5G7xKk<8ikFx=ly10zy^ZG6~TF%UBQlrZ1>Vc(}n~ zjy~0&vFwKq zE5Kj)gATD^y3DakS~Xq z!k)Bn?hQu$Nk$M|1&qc(*Ty#1761iyuE)fIofZ47)9K*UC|<%}6FtX#RxcCdbeM9x zgU-N0N_%oJI+Y=O)F^$~z5@0m#SYMkll^YaE&>o$mv{YY1S?uqwkLgbf@X1LkzNc% zl=EdnHDR^8bmr?WPd)P^-tLl5nR6Fk!D-Fj)fqMJLd0(8F8QZb?m~z?|J;Sl+|FI{ zPpjNAm>a>DrB^Sr9?vHVT(0FE$gNwwGwr)^_8Gh#Wg_%%JS7?EW}1Q8a4^2kkbzjj z{9Wr&yo@SyT%9kXtJW8IBUNu(%N~JmMesV79~^tx-xxULypOpqJ~E|lS<>?%T9G^3-Q2D z+C^OOkc5fnuSOubg`1UvL8;xyq@K#vR+a5qpWca0zNNc-!Y2n0Rp~9>F&GYI>%_i76U-!$95C-Y1YVe-K)#pIF9R5iw)#(ZXXnxd zi$hHf7`2|3v+y04mX_BCmzpFiGF}{&%1;5_%D7-%ONYNK$% z)ba!*$x2>ks!;|kN!=Qgk0JqG+gTlf6qpp!mao){eD1@U(ZvuNC^_1=QrJLpC;{rK z6vl*hZJO~5?=K_0@f8~}LgCX+kjZuMN0Dr6r$5hJ67$lUJU{V~M3ETbC!27T<;AM0 z=WSW0(HN((c^FIba1HZspWjsW-y*oxn=3Avc^O#v%q78f;4d0*mp9X5I`bD}Qip#i zx~KD&(!LitpyQ=5AS5+^+0QAhJ?jG1Ans`&T25)CJ8@X1$_WPnYrgIR*07v$L4?W2 zk*4j-_L!n}gWJsHA&64=DdD!?aDi)BHpQ?}9Y$cOIlwjS<7I17go|omOW{oJx`1!H z05%+RLXhy2bO@ewmYf0@)z^Yqe$n@(diMee4$nC@%rxxj)_@JRIoI&;ZO1m5VN?td zP7NDF&#g^n2$Zc#g0BW~4|(^k(J!Dj(WyaZ*%QKque^guLdUZkwo6ZYldt=#8e^Ag zo(y%Kv0X9^1^$T}FA^ZV`1EOu-LPE)Z+TPfumoTrESiZ6wrd&!H^im}#S8l-Q)rd2 zUq@`05X@|it7N?lt5lDA1G%zx*bUp|SPqc1IC?Hgiz6UOT45|ko}*0GT1JQ+v0a`? z|J23ND^i!2)f$A6CT$0}j+?2h))iwD3GGZT86Mjuvohp5M{Ji+&%%n7XAawCI90xe z;)Gz6c}QMm$`&Zyuw6n`$(=j4Ya}fiiwm}E9wY#^O9m;YD&rc88@5XZ%(cQLv$^CW zFAH_Vb_J%zbmo9B3KU(iU4g`fpVHj0U6D;8srk!(PHD{z+wIha?P9{`5!+=rtw#%?7h7AYAk)h^-?J|!`!bQ0XEnLGxvhtExqgh96mt{@} z5`K~nA_X14ZrCovudMbX^{t2ILhRCY!*)5|xbPs^4CTS0(d+rm3pypbVY?ioVt{bK z8aaAyZ88gfmR1acW4n05)z_6Av0Y9+;wFAydDk8&+^}7GyJG#=E*Z+u@3LN{ZMq%z z?UHF^O-eb_@z^d`a45DXIW-+*7OJ+`4cjGD6}-7)y9UCgme^t6u4#w>4KW$Qp&@qL zw@U`h^ueVn&ae!qm#ms~LD|S`TBXb;!-l~P+a-fw6BOQJcL<^!J(r}#5h(k65tqG0 zDlcR$6f2xCRXRFpFeDuOOa?O4dB%3hG!*#foUmOoi?G-(p{n4`9osb!F15rC*sf`a z01Yu2Vs6+j8SuiucFAmN#dgUcp|D+!o=ei=2v{er>ZoiH;9c5hGD}BnmpqQ8?w`6i zdZzX@5a(|DcDb3#YF#llk=oZsOTki@+nEezp1rYMGK3;O@r}1)(bMrv1~PPJ%QB6+ zW4lJuqOrJOyXHa8Zz^-cb_K4uzDokGczKryue4-qkGu&Q;^Ot>?v}H<;j@a%I8*JAFzQKewkS*_5I%vub z+hsW8f`~OxEr~WBf+)or5o315b{RIsu;G9>GSpnKUFPwM;le|*l_6tCY?ozD2oiph zE&_yy6m$;cwr`i=S6UtA*GhTJ`TSwK9B*8BF#4%=+>&jTau*i3VY?ioVt{bK8aaAy zZL%KQ)zuX4(o<9vI;U~Nb~*V3*tg5kt36Pl!)lIKIZ=$eBu;ziF6oBX=^?u}z>&CP zIAm8spKporO+e}Kl~GG_I={YRjNmUTp1UELPgXYSBZ#D{i1;XCn5Qg~cncEV)r21k zqHJMfWu=o$R1!^UvctlKyalL;DHggE_gTKP0e1({giw|Q(L_jz8Ds-!ZAo%>E-M=T zyl<8D=;{@&{x?jhb1rpr>eg8xe5TnzG`cc~7lBZ{R*f>*k>L(RTe+f=%{ zNKH1W%^tNISN!!S?}`le4)1A%$qC2-g;*3wAM=bx=vuR5Z0s=|^~+fit#ZqaFsX%n z@9mVdPtqz#UFAlAt~IN4U7egqF7|K%Rx`G&s__*`DQXEQO9bEZ$+d-2g_FlfkYD4;2l9Cd%1E6qJv;DL>H)F zAiJSHdz*UL?@dPN=J@Ue-SSQfkn;Of@a!+XmWL)(YwkUGX*rN*L!Ic)daLRoPM$nz zc?-IJh>m&)`-|2$;pGZ0`rFgNa6Hls^4aS;cB1;kF1S}Q%Fgx{(npv5E8pR=mtePV z-QFIaOJyAIq2DI2w0k(IzDB`r*-WZq#q8tV)ntX8CoZKkOF_&(g)RzDQ@ZLi>`m#( zE6Q}r7M^I0Q~TFWVlav1M0p0MCcUjee>9u6*}utDc~5V4&(5%$KG2@+Y!CbG&GBe9 z84tT#{p~)wz-hF3uDe=}Zwo~Pae%>dU9@7@mx52D{&a>eW?xOHpGXt%T^ zaK3U=W}@nrBD*?)$5R*J)4Sk)FN;M*09nnDS?#qfaf#K^#~q?3eKuQINF27G6mv(J z;sq6Ww^+I3Fl24PNUoNlS7v~E+)QSKMYwj^qDFKGzwTQ*AimY%*Hk;?4?DzAIxMn5 zH!pZ$2I0P+3EPI@(ki-N`H*W>$A);SP!1YS^9>{SsTK3{>D=;J&EV)FjA%#e+N z7(eV1h{>>yftY#x1!clsf%x$ACJ=KB{u#SP4dHe-s!d`*+h;<;Q2(iO5 zMiv0K&PHbLot;)Co6JWwp5k=)LHywYwnGKtir?+8KV1!dWT zvP5ifV@^f2fya?HLVIrng)i;`$%X6=hK>PQS**T}m9NIkQwm9d%~~Fgf|)>W@`5t( zx3UVbUl`#Ge8X~uPlw9+?gNt5H|n?^!7uEAq|AC|+h=x$aTBeyTP@(~cMBYQLNSJM z`I{3k??1`$FjPt8Cw`gS<4hOtH4&zHpC`G}{5%QLg(b@HoBU>o2nL@c0ZoI=kwDkQ z7NEpejs#B#Hb;W$1e_x&wS&!(G%v**i*;Jx;HBCr_pJzCrU)RyE^^7om@QI?Ce(%F zLcSGK!JDLXfnVCA+!=^sa(7i5mARc>x~SYqE=J`6+9`InH<`Ap@5l-s@x)yQ$;GJb zqL}W*sGMzZZK(ndMGriqb$UVi_i3Xtid0KDJ|~K}oChoAZqh8=NkZg#3fAzzsi%e&)+ja_HPTm2#XYQ%0;IEO!2gsU@>M-koQo%XCh znvN&kQSVHDN?yHq*CH9=ubLZw*+gS3qVqoXZB#*>HmV`98?RvFB#&qnUGj!$jUsO> zji1YV+70949cfj~6mbV?rbxWSOc7TmW{RW@G*iUgotYxBMBb#-xX?cMVR$KPRnybG z?S8j6+Uib+gU!OSt|NxEvT+SHtA!2e|Lh0<#v<%xMvs@UZ6}QHBub-Cv=dLN*nCpW z=J^+ER5ow@x{=8@a+^=AZQeL?zSqf2k=VSMBDQ%mMPl=2irD7O6sgT8u(UNhw~dVU zGT7hQ7cu|A--N3&Lo2^Q6ffV6;?!&~JI9&bz9$&NR0U;Hp6*6REiH97hrQ`Ez8nt3 zmu81GWdyQ=W#PF+cuDr-1L$?4@k9~7S1v*OKF=bzTf6Q42iFd@E{5}MvJ>`VIL}Nw zZXDXJd98IH`~>RN1VdAVbkW%AiSU0I!G3-|@gC{}oWtq-BQfzLGIKlZ?~Zq7+dH#v z#{M+!z@fFq)lrpyNYLtl@|7r0T>>y!DeVZNDL2634pdMa!KE$@B0y;DC7JFpi%gea zFYthL@qm8bvnQFCr6#tdR&Ju35*@4 zjF~EryecoTc95*R#U%^)eQMBN?nVlbzwU{Iqf8FPOu zF!vhDoP9kgviBOr)buVQd3TR-TbB)TB&(M+cmrk9WTK>$6r+Mq`{jglSU2Zt%JzoOyjlv|NO|L!0 zkGZ z{LhEPU#tpGbcCM|3qKzbetwPc^9{n!Hwr)BB>a3cy!Ucg9E=9DoSnnm5(|&QNOluV zr-M_Y<6)0zZBvo>~@zXqYOZz1DUF63*;Lz~O>?-pm7WHS)ywb8^L71(;nWrjL;w8M%Y~poE$lE0w_ahMNFrkIZ#A%-;~p}VU~2+e{aRLsdiXL%EWS7UQ0abb-FQ6%bOK5m2<)_i1C zglTS0E;1tV+tZF1GJY~D@&hv?7dzqz6?1Zt5y?Qzj9k>ukSXTlMktUS6{(W7qm{<; zORoEWzw??;U)xwn|9k)c@x^_6e&p~E|I%~sfA-cN{Po{kh9A2!{?Zk&22FG#R=fn! z(<{-j^y@kFH9=qBj=rwE1kf)R)1{lWY~^PE8A8VNY zv37P-`meF@;0xQ=P6TcEJ?;MC4&Ba|y%X{|jyJY`?`0^KBwQ?2gWslri zJTV>*6Lgf^%GJX0a`Z3oG~}D9Qf4RplX$FjrrG9!bQ0)?9NngWUqQmig}8z`BlOWh zc3xrzf5(J+q?%Cl(@_NfC+yz9uQsrwrQD|(1arTH2Sz_waDNc?qDl0+C&!aBy;;08 zolVfen#mHX;O~0qwzd7R4}XXL|F*jxLgVkv$G;OvJ3avW&|uvYz0D`LC*$q<$A18> z+?b4eTbsS2I=c(%-CHN#&n34mH=7V6oF^ z?tz;CU;dO_3Pg0fvZb6Ablx`J4YGu{a;#he_%GNMU;}o&;cYv;VaaWFA-B02R=$r8 zxON4^oy!WZ*|+zS-4MSQY4izLykmeGz!(;{24|)#FhIv9KHW`Sja4zSYBD**31dkw z2lk1~MLF(X2fOb?$5Ema5>>MleH**Tp8W@qcfj>P7hluq3`S_dCKak;Iy6o`t?Nm1oBs8BU?M0>GP?Pqw0M$-5#S89DCII7x+J%VE*u;LYRADPiJ>3%;s_o zuY>f>m=bgP$aNqKU8(%9hvvQgQ{?e9;H7~NO@sLA+CG5JayA-@c7UuV-ChW{!QT6a zz0LmV@o=j@DccTjDl1pOF6`&{OR|_Qb0K%SskV|?bp(IEA}RK(fGQh`a#adF!3 zZ^g~QXqGgtURb#deH#6uQ~c&C&Oq0eVX3ot`TnS}a-iTRxBGV^=5K-B5A=J(+XrWf z0Mf4(ksr$s9!;Oed?AC#odgMv98|xubS(R;%3M$!%s+uWinH7?6zh;kFiYDgk`A7} zdN)cI``E2j=r>m&L`B#ZbM@}z0PMz}n|AV5LKM@(OZaVSjTLUjlp3riZP;Y%m^O4XFlF7fHYNQ#(+(+WoNWXv#F_m&ic# z@V}f!=9E7+TD%nKuO$+{6Am0j@oduHx_xl+WPgH=Z|y4$L**fFZW>9M&|xkD zu7-@7L?#E(UwWg{gclkhie$0|u-sWnc|hS|4VKf7XW21#r8FgNaXzJZb~ByCkke;; z73@JfuV({vwl2yI$LRMw-+VqOJ|*F23UItP8T3vJ(H0jJW3u`MXSK9m+Rw)+A>*O2 zn40RWHyEY)Brg^k@xscB(OUcI)N-=zj_jY1XSJ4AuV3w4f2i}~v-q2Xlg-Y-!v|0G zZrHl%YUS@0l!$#U+BXQVa??it%t$*0x7p|`0v004RcgQcn2TGN0}MeqhziEdn-8E6hl z|ED+p%rk*_iBx;mLh;din&_r{Ha_CxmfHFFa0Bl1*9TuA@dZ=DUBKd|@2qm*V~TnS z2k&qIz?B#(!pQPj(^HzbkV6FFqm2+&JVp68O~OeCkLW!egv?+>q~*0MMe#Znd^(8W zP>RKGb%r+G5F$ic4I)B=fy1vgfIF<)$+O}VQVPK}Qb`apf$YyDyw68qT8__garLD@ zhB4CRbGF_>C74hADcv+C+>e;wozMWywnDCGzzDtd?u1+%`~mS!HW$P@Uy))P~~MVbn3U3O#F zdVUwO>vl}|cM*S)kj$>q>2uCl@(2QH#7k@G8(Zsq!C=_wBD$(GFF)Hb8&swW!2#{| z$2w|Ru@A-|6Tw#sOLZ2`$0K-^m7NN{W+fc|`RNQF{)0u%AiygAny7kZ-FsD~kdr17 z7Klyn(6Q{Ov(2W(vnR6qcgQR4DMLSCQbxX%5R)|x;~Pi$(UOV1nhUjEDXethbq#A~ z;awJCBtwcayeP3{zL=n`*x}iTzhPSv(9oJ>#rB;E(b^`Pzp|e8`Du@z_4zIv>d0i% zP;F}HtT@xQ@Xvnu@n>2-)8s?Hsv8N$OVP$NI#PlMt#YrA7{0}eVfc6h-ehoQE4^r2 zdG6MDFPm;j`*UBF(+-{2zq%}3PwNDDrV+-bX+s~zLr_Z}h7j9Sd5Ku%NS_NNK-$=c zH{~#*HydzU0Dr~}ei$=CEq)k6!R*l_#4ko*+?Gx<8`oP1&2<(gD#a98 z_wQvdVz&byYk=wU$0Fbhy@s_=k`Oj1d6A$fsl0?z6$QZI9;JN~NK!~nXhf_f7l=cX zg?}=Fo6KRN{?^~+O(MLf7XH1tuR`i5wf|hDWoTr@=H7j_Ezu9rFY;|8IqCqvV8-Bb ze)2u3))-v=SmLblz%YQj7~x-=VK*Wq!2?aOTmG1?l`nkLgM%+yO)X z6PTckLI~DS1?9#HDWLn$S@gF0LcbRPK<2tc(}?8+Q<1^J1}PvP)(OxaDX^i&nXxa0Vu|Wz5F^xzqkpc@! z?Q}DM5h<`Dl4k;-00~ZW*K&ESj6p#;#Es3$1BM+L90`f_X4TcFa ziY~{{N=s0RH0>La0?L-qr&voa5YqMEU%h0DF>aH7{jP!f11qE;=R)v6OCbf}OUJ=iEsz3oNqL;Vn`K;QM@^?4R=~xlg4eK{3cv;{pto2T zy9q3Uq%Bs!*Z^8!> zo7-F)lHOPW8N}G;+F%8Qm`0?QSb+s4JFLKp5VA*uOBN~DW!0HX0&K7X@?oJ7VP4Hl z-v_v0umUn&E3ANgxH48i9yDVGR^F8+VSOIQP&HD;Sm}#fd zDb#u%u!6%5`wEC$jBi(vTMv0IJwXF2tl)4Ztbhu^8X}-Nv%(7Kd1S0h>#za_snz!L zb-aN4j^sDq{sU zBVeo`59Lj#1#L1+s%AHQfPDkPYF_~vQYSlmtiXnv6;@zH$QLUhgLq*DgosXn_E>=p zHO`dHSb+^mZ>)d}VrnJ81}h-MG$OUc3M?quVFgx%>c$GlB&@Ik@?m`=a=~B)WV%*Z z0r_xctbjad#tO(MH8x1gBEMKKU%WJdPN;eoUZl3#S3ubkIu#>UKpr$Xco?Br?JLNS z;$_RsII;1<=46_7A<{;`51 z4p;$^iy13mY@o2zd0+)cDq#gw2*o6=U-UqRbg%j)vitBAkZrcpgC#+xV00a4n zgXxP15&GBizon3g!CFKD&~b`y4925&o58#P-p@S~mNR7^ADF8GL@2+UfVjG4URF!v@?HDhyy0BA}(AR~!OT3v0I~!G2A{Z#bn6NP>+-@R8U$B5~vbQR14UIj9fxKSU&g-w;l)MN$}y+GqN`QFn?jJn8l)lkuco zzW$_qBT7l^o$*B}QxLWAN?> z?qKcLZ88XDdXn6gzzwXBK`SN`-m|eAO?Y7~T1byc_Lxd`(b$xT`r$!(SbEb=~YMY`0ubthF*gjXcrexU+EdVeYQiI&6KeKa)5??XC{IRNE@XMQ3#AsF$ z>Z4z4i_^T!xmv!$$AP#Kv3himSc|X&Z)>`C`*;d>O*XjRv1Y32B zfGNmpE0oHGK9_sdp7Adl^$8$gKUJIHXwg-rPy4F~HtVvQau-({?`5O0><&?rfNqT* z4mQtq(LM9iwpAfu88|de8g3rz5(oTL6CMb_6-}9>;PCf$z;*)`H^KJ(Y04*e+d zmEel-Cyl_0nMbX5o&@YuA%0TW_TcE&)B*CEu?D9~!qPO>mDcdd-)g{ye+}6u>$hIuUH(e(2M` zjo{T*rkxGeUA=INe#D6#XorA6#76}@Fhve43CR-?w8qizM4L5cXnSw%I3R<+C;>ky z*uJm;zh-M;IXCg{{F{VZ*R*j)OT>_hO@%BJK@Tkg*lGTG0LXsC)AK^{Dv^4A3bknNu!cdj!_E!uRV&NyqD+n|2T7MXa=9v0Zq1Uj*0a2D@W0 z9Ofa;%z9!sZz|80eE|Eph<_^wtFsV&nFq5tH^HSqGaOX9uDe@*_2 zgda&#YywJt0jHF=c}2e`*;L^k)m2#}aHsg&4Y((qxuISbBgf&zoYd_Vs_12hR>Q9Z zwk^De2w@4?H%V9}1$TAgV^M1#EFHsh*X>RF$5BYLySINTZleFIg19KvP0}WOFM^}t zTqBPbC`C|wD49O;<|^T`u6 z$hTuQ8#Q^nyPW&0F9u@ArCFdd~|h^>gq9gBz|)}y3&9dKOe!v z-d>e0`e8UDj{)QjT)Y%pepSa3Qh?8f$Q$M9rF9$j$T@_Rq&@)`K5y5k1VM=yac1fz z3vBLM{%`~C4{3zQ2E#tO>=FHc;vX6qc8SsWX9k8Sb`iA3cMMf>vjJLwm}W1R(rQ7~ z)4V@|hk{5gg>~9e$Cbalo1P1qoejzC6w~g-OwY~_BQtiYg%bxio}@0pY7Tsk;>L2Z z1DCH6;bS2%=hWyj0Uk}yMTmE0U+iC#cH8-nRDW+cD`h-8yg>!Pk$4fOEB7j|w%(8a zsSv71Z@EiUZ@0xe(8(ALOllp-rhNRd_dvye# z2vJ7RIp!5lQAPw5CsI*>Y(l4+xi|4eaaF=Plb{7df0&HUDg6e$S%JP-zRgm~BS~OK zE(ByPlHUm`k~I70FPnrGbO4B#YEJ5$2;x;bo}J@{0eR^xbN567rOkRkp=PQI0P@o5 zW-eMJOwmbIO&1jB*6bO_P|2LO3Rnh50mXb6y((p7W3 z6jGFr8q|beL_2l&VT`C>Rvm*!-a#Iv$NRAy8jRwWK@>>C}Sy(s3y_N{k-8 zbojV`Q;yI4ynS%;WPgITg!UyJCik|>s6TsZJV6HuZjQJ6ZSG*&{8ZE;g9`@;=&%R) zP!wR3S*bc`d|s^``$g)W22HkzG`&_~!09e4zo4)@6`qNayY^1g)?W|J59` zkZBONZZiH=jiROZy;O{}vg4GKxfrRf*l6s8&o%1HR>(BdmJO*rsnh)HBhFp;Vk=wt zPK_o>;i=kEOQy7BzWLLx2Uhpc@LY(k@QU!4SA0dMT$?SYz*cQ~4Fhxi_Zq_mgYeWQ z1#I2px{|T9Iz;FX&1-Hj5PYiv{@W1s_RV5JwTT}iL;m~o6|4w#tB4a>NAUj{)mN|4 z9q9Edv@kns&e1;R({(wA*s|vU+GDj9uIX0yx70?Rpfi8kgzb7orrk+dn=|G7C4<@| zC>lgzuM@g+?=+l@x5Dh)Qv{8iPiyoK{%aE^^Hw+q&ec|cqz-dF)IFl=-hvbHTWVD| z;?Z8!KV6}eHzRNu^~&M6JJH6lcrWuqGWRG__?zPPA~pIJ!N(i$7F#c{6^$4WDrX4W zfmpG>t>*Yvj^6NRP5Ufs%aRJXVnF@58(7k_mTfgQi&G>D8@`}(#%G%F)_T09Y+i1; z!d3yS>&!vDnh;c|-Y0)J@C~1D$nJkiw}0YOm{?6~=;K@Ga8?j5>a#AwHRN+mVfw1@ zkaR338~Ccfb#$VEaMfP~zuB1gdmE08+q>24CbUtE}>e#y--6QSv##)@@((kgqYMFeO*oOOgg@r;1 zmYCc+{CS0g2jvptDgmZ|cVd>eM=UP%CR~(n$C)JQP3Z5ebQmC5ro4Q+AP9aK&+H%KBhQ3pulPU9SS^I@y5g zr3;V8j|R{SXE_W>hcPj4L6ImZa z^a=m6!tu27S(XJ^rKvldRSC&FL|z0JP9QdoounyVfcI25s>CchMIQ7`S7q%~IM~%Z z>zw>5e5Ar2b~D-fQ=mjH_FLF(8IW~s;S*b#73r>Xl$9;l<(8XEfYt}+n#Uv7 zS78IYnW*T9hDNEVjJ=M}HP4GM3qR|b`*6s0Z_M(LZN2T(%$myGi-B)pKm`cyHCzDCjyC)N7tFq8gJ(-LL&z?-m!8|gOj2HWi zSo=hMtI%N?sLic3Czzj#V_GE|KV3YUk$z&wad&Ryz z8BbQ_HQ`cV+5J5Wdl>`s$$({6>@%QmPbTBqkN~}UGJd?A^VK=2&gaS!Sz{t+#?%9P z_hdNay7y#x$oA^Vc-B?!UPSD<^<%y3j|Pu8ieCleS_IkUMZ>pU-dGMON&^kl06dNNCqm)lv>zDL>e7&awX8xo*bPsWcI zs3+rzijGJfKKMEqKQ3XW_MQxfT=$+V57}Nl8PB@P-3!s%7LeY17gy)elgTu2UiU5v zT#GIcWSvYHgQ@g9dor0cy?ZiYwkit^m5^jSc=lvc4w#H&>r#aihE82FroDSIDcgpW z^TqTo{4A7DBWK}*B zE&-O?7|)JO#sHK5YU{}OaWPx->&W=gRcZ^jbCDm9G%<5hUFcOZIWxwN?58SRiz8W_ z%L1;lAF~VBUfmeaw#sITqQbUxgg=Ym7@KYEUw_TsZn5metZyE$lYPOb!|K@IY1m)E zC!0=QP+@COWS|FqUiDJSG`Y`(LjF z9)btsqrNX5Nn(k8?)iY<3xB+6slo`CdTNOd^HJO>s<_AD(fDC+#c}q+^i{e(4?gLc zdCEl>-I-{8T%fb-@>inE$OPV%*yNjv?3FQipWkZ-lBSjEwXR~b-}uC|KRMf*4aOt; z8(?H&;Y>y4XGMsqv<0z=&ijz9d%=%2;LR*IRdYFOYKp|>mx0&;@b`YeqExf+&zi2s zkl~Vsoux{3p>i5KRxXi;y>c2$P}HGqnrRk>89Mi|)5G7xKk<8ikFx=ly10zy^ZG6~ zTF%UBQlrZ1>Vc(*(I;5Q<8nRd!C z9PdpAy%WR!@s1VAG(}BM_a^y>tt#7-J~~0OxUxtuh9b)OYELy`wYzkxxyw_}{K$ECmwd{c zyJ*e!d^wZGU5ME2+$I0C%3TPt=byWfncKNb{^|PN<;&8mmsyYJGXgHx@($$It=^gT z-8lOU-i|U6`Zu1E48+~69>vS3gvMp`BD!3Cp>;FOL~S@2UuVcf?6|ylvjMlW%vEN$EjX&5@MKrS zvePrjzHD9E+~^@)d0I3X*LNe>VA-iIZ6O}`NxO&_9+EKe{M85~w{WvkFetUVT2|`? z7Bv1eg4eSgRn{x&e%KmU=Bky#n5+Qy(6Jax;JXdD!;@h#aQF&Pc6BW~MF%Sj;vY_t zLK3#%e?*?48aa{>K73^!L_nqh^7|1;?jbj+Hf=xK`ikSlr02bCV?XM77cSM6i;VTimmgT`NWKgcSaKTj>nn7hz!I@JrCbTXk^~KU!CQA5n z!zJ5M7R3PJBd#Q@1-0Z(Z4kY(M|af$d?SK&4r01!UQ>i}rhe$(a~7Zniu6HSLUk%~rD z=`G$d7!GCY#J)lk%p{QhmgM0Y=HEWQsqDW+aH}_0Tr%?t3u4}B z)sY(EFB)){H`8J|^A}@Mhkq#G7(@J(V60&RKYTB8K*vjAKuBu-vJbZ z(>}DE(nxorL0}wKH}Wyr(#91eTfuT*E$IwkECPdq`GZ5^F$V__hmR!!aiW2|r2ejK2sT?h;A?jOuH_EWhac zQoVbD1c&Dw8)h2zgf(DyF_${q($b7|$dkk#__kx4%rGhj2&aaPq36~nGX%<3b>RdF z)FwJL$Siw8nDCW%h+v=#HjdaXJ?%}t?mNfEn9PKAG<{?2hV7DZ6!<4@yhwoL3JwJh zE7=I{3;>hKfg83<9#`;|H^mN100zRMnYduPrXgx+h{>SF3f&FcCFC=GaFuKzWR?t2 z&R^agvLm)jW`s>p*y-pHL^*mcNsA*;F=@Gg7GylP4%-piC4*wA`=>6Bp2<0i)aD~E zM{JjysjSu&W0NGbYtu{}%adf%j@T}FJkz$6=en?6GBTT_)vva=VY_4uC3o)Fu9388 zEH2otd64s)%G|JBfh#VVaaEE9geY#=_X!(zna>g16_^%#Fb8~5py-0_3M4N4l;(!* zifjr=&0qF&N^5S|ZpQ}O<@``IDj7icJF(+%5Y z*c8Kt1LDX~bHR3*$0gySG6)+S=*cDRhV8P<2|>b7(nVeCZarGa#tqwL_?6Xuq`s}i z=dFDZ+^}7ax93NAaKm;vM#TW()Fw07rCXcK5Re23-3#*`?om1Rq-~-Dw#&;~2ot{Y zu02q=?c1fdE1q|3m&}9>AKIqdao;W(2g4wxWiKAv4Y+TYjLY(;Zj0TpT{4D(H+O8; zK)BQrJM7yv4N*%&Oa?X8`HuT`3HeMPT&j|kpDrCP8NjZ?K%yOtsVs6+j8AHjPJGN^iEgFjpwrd{b{H8KDY**llOJ*F# zWC(DQtuIb*+>rU$Wj;r2mkdHoQAlSF_@Y431=|%!NKzVoxydIUyRA)1U?0bQyCR!H zQuCL6n6za|j^Kvv9=5@D1H(6XOg^-0AXGHA}#o6rNINWWtRBkx#+vOM)1B3(C$kB6aljn==;t4B?3{C8D#CAFPhzGRz%DeVJ z;fC!VaXXWthQ^r8gmqedW9+tXmyCnKr}<&KWL%a%bzAI)?UFGRyt!k$2EwJ5*a6!$ z4N*%&Oa?Vp=x+OV3HeMPT&i;Ex(KyWeyQg*R<|=5$Rx1|3eB)X5asB(BrT4Bq_XvD zgb8s)jZBm({vrY$CPq1(Cwsuw66Hbz!?? zWVZsM|g1Cx63gq1_-Byje%y} z+GK{ng&ZW@_U-cW7Q%$DylW2>C@>zca-tY_8=UsgU1>LruK-Gq{)}4qQ2up#cJ2*u zB<>i_&Xo+$R~7nFG=jgZxHnreU{kmTw5qrsCfd&Fsa4X3V-oci@dZk zNMDREDBax|z@>!P5E#tdMSowv5E;IM~_~Lomy!NleU+dd$@sj1c~hB$|Z;n<`Z8AvK#8Nx2cEy-eiPs zj_*#;E$^fNDZfz+&;H_T`ShwdL=t-N(sCfrhC0!o^;XqGoIH8*^&y7nsE5cdYUcum zii=Hnxq^%S_H-~DkH{c4)FcOMKK0q_I`D~IaIa#No$W28k1qRHzQbj2xo+ROy*)ga z$~fLbzfE3g_i$2uje^~>nN-Iv*~h!9$qGA9TuNt_f|!2_T@;?Cbk%3ro6?h4l5p5E-9onbe9pgr5!9`@UtCfR(-XhZs{&Z=PN>G zCaSKf*Cn0;pWX%cds!?h0?2BH%xbS?iA$`OKJE}T>9g6wLgKLfq?kL(6fda2yT!^K zhaqbVMsl?bz0`zVc9|04SB$ju`a8a1UVvZstsM~G>hNo-9rA}AVkjLJ*`S*jyfA}s z-_L|?!*FS z+N>V=m(~>iftVcWCNpGXAjS{71Y$C*V<2W8e?ghBS0L`RZ38haCLEeJef>c7e`>o?AKTklgC{GF?rTG5OV}xP*&_!5g(pQASS|>n(=uF#N_>JnIRhkF@D%3 z5R+jY12OaX3(ADO0`cMb1Y#_EsRhqH5Et*O$n4h`h{@wFftWn&9EdprFDNVa3dBcj z1F;Mns~$oNB-sVwAUX^HO_0sfGD9{7V*Id6ASS~)24d#%7nBKm1>z&tfjBfYfz?qp zHp6qf;K4E$W4EfoZGzGP#Ru6Dc%WE9Be|XZc%|KDWD==K-w}e!3(B$wWr^6}#+-^Q z!DF3ud+(2y8o56HNASg6Ai0p;!O$@vD~r|FvGUcJc}gJ(uvyE)Q7{w8O^U~?ouPrx~nQajijN%K<7v1o>TgO_TL*|#EinIeD)yT~OUW41^onouXfe8Vv& z-}$X#_m}o4cLt)E++Ed1Wp1aJE-H7Di&1%ic8Zv-BLy2KUjpTGm=LU-Q%71tUsELC*4u+On*vV zy?ECm8R4&*8-LkEV=ST*L-uV{L7g_LA+Z~;VB;i@Xcb-ZhG~r=Z!C?U%X``lR4APKSfd z!m^l`dUk6rN7&c8j#%Kz#x>Ob7B-~+gKsRtUS{-o3EOtU_)elU`b0bNq>9Za)oh-B zu|{R{)~_3xd?UB{#METC-b|6&d;&{bvvb?XXfK2P zoqZAWAN)1Ah!2oe z1@Gq;;U(F}9zd@XjVFruy>bcK_jwk%-P&#UKe%?NbupZ8lbx^^!+B=fapRCS{K8M5 zUQIAGMMxKot)2+~hY{@O=M(RtKEOGg&OZ_pPa-q7!~X7gXSTgF>t^gv;|?5JYg`>w z`G@kHI-qU5vAAd*&n><76qj3sy<*YA zs6^6YCM5=$QpQDPgs?(P29-%f1z(5&TiOJ7n%&-Tn6FvnEiL9h*^e7sB8}uy{^vvD zFII&oI>OI~g`bZIKfgx!`3B+V8-<^55`MlJ-g`ML4n~7n&dyZK(m|Z2NkC7c2xr6Lv~G9vNY(~cN2eljZZ12ZERJK_ixb8?Xp$w17ET-48yDdyuwD3Bc$sgkv$mB#W* zR{pOm;PNb2_D&4@=`OoL`pq={_T};acRBoS1g7#jcjL7~u?APfah`C4s}$Rhrw8V( z9qQbs&b}OV_VmsPS_&PX>F$gMC&!aBYlm)%;F$0pSwW{I=QH&}>uD zSQuOWNy3%M*vQ9op4QAfd8&wnb{iFu&k|;gDxweAs)#nzDk7h?RuO%`Rz>symLW$K;Vf>WB9iYdRg{0+Rz>;8S&piyh$J=} z6_L+ctB5{et0MXU%aEgra1mjnB2sQxswn@st%~xGvm6yF`ep>0Sj08OG$9>PhgQi0 zL|GM|%DMZ&YGbiXO4NoHsx)bYn|z+6I+o*Sxg_sgDm|0)$(>nj!qQkSsVY}q3Cmd- z*d1@j30$q(K)v$adtpyeC4TMR_$&8btP<&7XkPXgtHk~=JMS8m?-#4Y;Q)DCn|fS& zkFl|kCv}-kej}eHAyX5XGs8snRNj!;<~RBP8$u+3(x#BVSS6zM45Nx@(Pyh7`al2` zU91vqR78tDTNTj<0;uR>mDrz*P?E`Or;Al0+HzYeL`e$vuM*+sr7sgQt86Gyar^C* z%W#HI<<7*_+htk7Z(9{H4hdgN2$^!}d)a5}&Y zse1$t*Gv!lK-0gS{di>p#8Et{7%gZ8oe*>!D1VHMTR7BlXB&0jMCWf%U zJf3*d$ps}jmx!IAtZN`xIR@-yqWb9uypE+}XAHR2#I{uFgd(mJ$j#BHXfhjea1n+85sxPocSmFABC{SRM?;>$*rk)B@g#_tsymuM$Jx;YIxLIIc7R`V(5G4OefIsaX^M8X*hQi6-6uZ%E=+da3nlL#nW*Y6;DTI zRc^70qw5;01R_LS4zWt4l&*BB1Bvcu}Y-m z9;-x3GLgB(Dzb?09IF@tL{tv3ilyQnt5_;B5xK=GhN^2_#gQOla&ka6#`riOODCMD zD1Pl8t2h!KqT=bei;AZs6O~)6;^?}@DuD<0Zp;$6Ah>|KW-I-2d#dHML0}gj*1Qu zS{P!vdvZ6uDf8j|7^XOpG6CK&4>9iIJK(>V@uh zo+PD0EHDJATy)0+%t|IUFXyEUXSV0=ZwwtGx-i7NotK@NM0_i2urwS`r|7MWLvTI3 hx~!x#{obfMecMiN(%c^^Kvdv7D7_ zZPe2}boZ>ZsHn6S?RyxEc5F7RekU8`&HeltLN~4zx2aDw6qU>`^(>W@3Xfh zmz)}R`@`#Ad#*Q}uC{Oa{Eu%v{;eqc;Jtglb?3f!{sm;8Tl(x5_QG4?s_oIBzj3~E zt~;KzMyDpd@wx7_KN`mS?;mtGdS^$2&E9xy zcDH+z_)55Z^-#Qj@vq5$QQTN;F25M|WFLrSe}x^0*|EDD1>X#8YnvN783wKjXhNw4)}Z#?V`T6Yio(|&i*e`{~t8h3}A zqpi-)uzz|q-fCfS?zw$5oJ^N*f`!h;pgWnwEjS!sI~Z-OuGtN-{1Vu6t~=Q2#aFDy zi_Lf*sjIQH*hE&chW@dVZ7wGzbuYrgYW4_xGJ@CI4XNX5t3Ql$o;X8$L22Z9Xspip zFn%Rr{J=UaM)BP8{J*&2XP*hhNB3#Mhlg&YQ@n-MNwlnx41A~o_xbDNfz0S0#5NaS z!u^4p#?(rOS98VnqM zwE?$zdv4skZF4gO*GMHn$RPVOFdjm<@VN+HVJEy8PIkxr?x{iVWPAA#>_-W8+}rF7 zyIZKXLFvFFZeI%fHb%oKs=a#SjMn9({9ywbm*5Ptf%iAyupM8m+a{C#nPI$uzb2OA zNc?RsmEz8%*FojSll|eDxN%JsLZ{1Dz(G`^^f$ZH9xgL8$|i~Z`v|VJl-Rh}-HaFS zMy1!ZKRs_Nu^_yexcGPk*PFOF)$dNYq6(FZqY0@$ZlJZ{VYoJarB!X1Qe1u^DUdE( zKZpvXw9H#uo=@8cK4Gh?!ZrAh5xm@H`kXU5&$#4wmlD6jRqC4c#3ca%ey(9Qs7w`t z16m0nan!Qb=bQy8_c)!RKNi6&Z4})Se9cNYg{L`lq?`MHw#XUxbf>5;i5KwKMAa)N zRYFdxyD4hxCfg$vHBX_aMk`wD)_RR6(oKl~V(14<%2-D(j!J}hZ~l!r>gBB4tGQ6y zm9QU0?`|jUT)WeFA)deYbr0Qc3m>e(J1oM;iSF=hJfHr5V#~bs1Slu)Yy@{3MB7=Z z+P>dRL-Y!?;=?u}nhv69;M3+k{l`W;eFKj|wW*<_gL2tM04A6g-ixnt^ZV?F9)E`A zN8S7eZwO&=T<AY*gCE9>P>UaiP%wKm z3Gs^&Tw^OluD9?~ZK=Ny`uZ&BREkitKI5YeXxZ{6Hv85>NkZ75w)yaqw7dZ^Sp`b_ zCXl2kVT4A+T5_}&T=wG7#LFilxY29{S-;7y5^=4+1B8X-lhO>*{QoC&UzHEXxo8+H zjxbX4S!YZDe$kA<<)eyjX~N+0#}a3aYdz7!4CH>OYo`C58FnK=5?Fo4GgGLQj&-wf6u9hDFQix8P(1SS-OwSa<4UzFhQ zTg4j&Con-7g%FBK+N#0|DQM3EDIkN)6e%EsS|SD9T8N?g$WD6ey1skNmV#bz8fd!>j zx)~r$xYTY3zNz=Zu7N=|peTuc@W)voDkb-vB)pH8+MhY;^nIQ#uIL<}6s!`L@Nl-SI z?LD2G0g;Pw$3$)} z*~Ssn9(ipfpdtYqte~AKBHvSB#0sboYT8wBv?^A>L`Ut)zf*lhW@7*qtomz0i%U;o zcNLH!b`&26p?N$*_+WDrv;VTBbCVj7WJVg(kI?63kW zLc#3O0NYq$1!TrtBO+O{bZJis?Hb?%u-R2W#;=SOkO$3J0r{lH2Gc53*>8A>Rlb^O zvI-llfU+fYDn_h;JV+~3ziQ0Hiw#zAv<9pI)0`PrfQRE;l&OXzZDT0$WtSE)=Qs>j zkgKFBR)8NfV+A?+3lS^8Q?tSfaxMf9G`8FLF|A2ZB--5&tgwQkm9YZcq9#TxlY|-E zMI#fh<-G$Ntl+2vRzT!p#tIl4C?cvSXkdjE9Ib>EP$3kPw8dzw_7xnfiWRt|RWgVH z0$`VUwK#+oRzL>T)DJD@4FoRghul{{<{@vwp9#l{A6B5BW-uR2yhp47?C9xJe+W`z}45%TTL$RJ)=0U@FjpgmS#Lya?KGge?j(i`|9RzNQgRs&XmY0eBQz{7Da8VCZ& z88JctY=x}FCam@qOJw)qlN(>(J&%qTta#!e(^zYp`sFoxw!!sqEN&alHjqq7123d!S*FJ+ zzCIX_40PZ>@$xGN@B9ZpZ@JeXELR1HP!m^{NCG+~?qo1saXpv_Cj>TouBqqqwZ7Jj z63cjNm%3r8o3n^5JIMihF0maK<58h%l^d}K~ zz5%Zb6$UU05zx}mD-HpvA-gpNh=BI{RqeKRNpj`d9tkob9Ib3(MbbF=Ta(y7!KWK= zteW`<&5&lfOd_Ksn5`BSoWE4$t&)X;gsm0ug$UZ!q$zYw)ub6x1_o~7w@jG^N2(ODs?!vrYDXzX>AIihwQA08(=j>EW~y}^z!0U4ns^SXFYluJBDgU> z3mzZJ7^R0U+gf0zCc&GlNG0*J2Q6^uD=4`~t8s!TJ?HO875f$(w~F92`y-e`UOhzs z-?YzPRo|+>O?P5zXn8xFO00-jmVI~TM#Tn;>^%w@1cI{lb(ZQ|7f)-n4Z8V(43*8K zVA>B6iQw0T6Ks)S`oq>%uRH8a-m;UOx!fC%N8{FU{L>x%)2Dmm-f*KAA1eJSsVK07 zyI2C>@KCU3t50})#2?A6#r1{sh<$YQHIpuy%U74q<6nUJRrE1=6;Ju$j-FzQZQg-s<#SYMwy&TSsXK*Hm#rk6-8hpwY;9VR-%5kv-nkU># zAvFLU0&l0G<2zgFl^t7&-N%talM3Q0JYV5==^Ff#z20_bv%fXT4uVGKot)@zKAqlf zl3r@SoeZWlfZ@(oXM5b6po8pNuSMx}&^lkYrkIeeJdaoEWC*%P{h6IZl=zDGWb9S- z3x0XCiWrR=O#A4YtsIDDCVL6LGw46rt6NX9s)P3=rTX9xBH*r*vJ%{A*qdaWwX%d- zH-GMB%zyXeZYJ^2*~{K`D||PC)yjrRwHMFd*&hrNZ~c2w$Yfl&;AH^);b!k?T0p7R z2H>X}a6@I@kM{<>4RlI9{%TnXvR;j2lTS2AJTz9wNs(;eZ4KyEmJ01@I(a|Sk`|-n zkJ(n-3)kuYZv>lli-0M}Yb%t>g}zW7schh1H|i5Wz<#PW!D*_-!hys-?XM!(sLN`~ zU0iLvGufo63Z0TRyOA~t=(3XHmQbrgz%p728J|`b^3pyGRfwMuwmmqyHFbczW~{-flCU(5b)_{z z{G$z6_m3g?@=Mo>mEX7EtHdG;S_=W4oL#-2pM_5<_7GOblfEOYxY-C9<4OUn!=V!q z3|vtR>d~kF5W%agOgkH_vr>1_&qO-S>I7PF%P+R3>l%PKu>?dtG?P3k zGcMb;YqW~Nt=^lpl-H5mW&Shq>lr-W2#k1>8t*qEi9p@1verJ^e99Mrt4$3?S~ z{VUt0w9Wpxc<%$f?%=>k|ge}N!TKG)FU!$cY_}o~7`I2pD&Tu3O zUfV@ll1}XwKaqkH?uIf8(VFdR6aGfI$~-gfZuZe?=7bB_9)b1E@cp_`((yXyC!HgC z5v%B!*)Xo`w&6Vyyi7M(y0DUmI5X>s-MkAq+Xt|pkNCH8usRFTmwAxv-pb6blRnfc zIh2*Sq@qTP#P5-UK<={>qHr|_G3qnnUiJtlNprmW^DXHnsHrnNPW<1(K`!{;q`Cg~wxCChRRY&q*4MaOD@GDK(YLNSEu}C&X z!|8Z5NQKfv>$Ss;^YP-{=cvCv|%TF)d|JqlK;ER|4A>-a~}2gzTFntdfGedLg_6uy6v;9qry#n%zH* zLYm#Zy)$tW{a+QtrI@j5o#A^C91rIjd9*+&g5pB~b(%8Iz0!uGt)y4gmseCO@FR^n zxNjHS*CE?#GB5_m4+IU4^h|`=1%;q)rD!1y(;cdw0Xh6w`3N6fNlXl3Dz#HR}VLD_{%gUG;1c0KWsTv0k z&sKZ{Xsk*zyJ*b{#>p z^#?u^B5#zZm)33ABWDm&lKKR!a>61O@F^t-O2mjWQ!iOybJy~R8gPF|BRtU`^w4FG z=>HS{(7>=ujK)7RFhsG7r~(37`#XUuylEdTOrY0EQ4>n)e0XmJ4+W803hT6_jw^q8 zH+^xp+RMAogn$!LnXywXoH)4gBy|avYHyiyOwGw+QQTOJ;G-chXDYzs>A48;p6rYL zYtqHsg_mh`uLJ;6J$Hhm6L3mr&UkhhCu9vO0FFc_n#tDw<$aBte=>yX(Od4^asyVq z-4^fC83t~K5G42Td|WUq6m(kB?zz4d3OX*=}}jl@1ja{OjcL7*Um*CqT>Bv!Zq)+8u7sUN5gAi6rzf=9##BlviTGJ?)AuXu_w zBA_^tiUMR4I#rxaNaZKg4%RRApanyJn2gRT{RX{RfxcM2%~HxENnl5&hGLy};kSc| zB+dT$%O;@(9RT8`nv*&w3-Ky;vm5*rf`GhqmbrT(fzoC@pinba1ps;JbTbz%5@sYg zsiMCN1M<>Ih?rnFke3cYdf)&cuSgTYQ6C8b@>04g@GvOIOGhJ;2>={YnxaCB5LXMx zOUKQ5u@9s{P+vN{N^vL{6x-PRp&dFNjWr=qUrJT2yjsv*Ixgi#1rMe#9X{?49f_xW zN;*vLHKAc|`qXHQ4ielLZTDK-Imh{_sD*h~ffzm*1=wU(s!kYgklE#6iS`iEDs%q3 zP1veS1JeAiYC`L^g8)5+UNOXC+#1w(Ho$*HPBB4zb|!g+dc%Jl>ffs9$S$g=_kGD; z{F_^YFErrMx(srQbngB_&8U(GQK*|yyxmKnjntx>e}zL$!TR(70nG7}@MF2Bz< z>dRKhG}D$1sXeLF{OcpmUHD=vTlljYO_IV>wWXF!X~}H!Cv|5>6brr{Vk^8N{N)v2 z(J9wv%PFu`n_k1fT>q=aaKRuvwMhY6_o%L9EUgX$pg-Sgfd4i`y?wJ-P;KJJ$dLd3 zd<826C$bIIMcm<5jNyMr_0?;12YUSqwMH1I9N+^DI9r!rTUkTBq9DwOhO1P-HKIUIKv+87q^XMRX#9z_a& zQ`}ypKK(a>k2T;;wq9T>8ZjVL&JeZ(v0{Hq&GD@qz2Q%r_F2}JB^7YRfci5xu%v4( z+iGkUr$`hwd_m`oPdDMs^>|C!yxekytpZrrnWcJtLcarVnthiMe6At8|0&(>h)-c+ zHLV#N-#Ul0f^bovbrG&1pKA)!SA_?stNzgamuRc$aI!s`^g5@|MLfj~Nw#Iw8anuZ zA(Gwx2fxv<-*v5FyewDb5)1hqf3Hw02c5Zs_{pz8oS6oS2xhpbwJb5=3dK|tbMUhj zurNy@D)O@IvatY8ki0QO){q$ie5wg&SpDlZOjnXkISMm*S8`We2r=2Wwv||s<<_&b z?tTIuiyx^}cUumQ^<^DC;FC@0&q$SALbZN-p%UMnl?q6OC1ufTi$L>cs?ImT6Y*p9 zsT1wlG6XgFCl%gL;u&YdHBNFTy2GvljNN2062${qc+#(WtuvczmTAdd5wm}2j!|+i2phtisqATmeK2?3RDZ5B4xZ=1p zWqq)ug`8Qto{<9kRDm6MJbpBQ0y)cJIMOtFYk~mSvYfYn5~iIx@IqN zw@pgrUSy8{72g%Yzo~FMt$dbcK~`z%jra6ax!7-Ew`G9o*sT4RzV8%1RhD^9k7Bhx%F34O za?8ynK*<$YRWfdw3Dlz1syeC>pJQ{)!XvI^Xw25&=R9*C4!L|s5Lal-@{n!4?bOV= z%H4~IZR6Vr0Lz03A0sMXsDh{ z#)D^1Cgp(1NHna4PBb!JX4jKR**2skubxa8khN>So=hMtI%N?sLic3Czzj#V_GCv} z<=h^oCleS_IkUMZJKADnE7lRgyC;)zZAgG# zJsCe<&be<-#uF7C(bOXqVI_3W#?Ol|>)n&#kn7%)?#YDNsw^~ALXz>|*^@~*U^0@eOBIG0x_Of^ z4ZiDSQnn2#$*U(524wBpuO|}-i%wbSC3H_F49sv;Yfsj$ttS&0QaQ7^Cu=_^dNP?H ztMp_m0eUh^k@GESws*0x>*{S9_^y-jq7z*%NKeKOfvY;to=iqEKu^Y#Re4Rg6j*L# zJbN-30~c5ZEc5K$lgYR?BtWm8j31A*DfOz30?MI#E;%!%9?-id!y(tbC(A>&S5L;X zu5$MxJlhtK-s@y5v*^iW8Yp<(g`P~{T9J$V*2#o1m`cyHCzDCjyC)N7tFq8g2}#C- zXHO>OfXPTStc7m1WW3m;guXqQlx;&w^6JTi0a?5D>&XPdqEi+DBXmzD49sv;YfrXP zTTdo1q;h6+PqxDKWYmLfvlHKeWQ$__ud>B9_=xA;MVTO(p6um@9hw0;GE0G*N*!J} zsk~$x7O5k6cVxWaM28E~k?}(W>BwXx19W6OS(VR(OMvC>#IvlIF)*9_S7yaN|M~7* zlyPlHe_kCKKVG1Yj3+8OB6axX&>a~+E@8$Lrr@V5T#F;wHJAllWj|&YuD!Z3o^6$@ z7h<=~ckrhXoM5w!{p+vUYcQ6*nDxyAcCs({bm+u>ykUO@pKLmLv4yQcH2gdRJ?QhQ zmr|z5eO_eC?<>Lkn)ZjtS<0du$(RHb*bb6Myab}vWMN}n`r^?;@L+u0_r)VgEV0i$ z@ArG*k2ft<7~wX}*Ag9Oqqs9vagW2J@x$JVaR%U_8u zBg5`Wv`jk9a?_xHQ|vu{uN_F5R;Jgwip_qbQ;U+CKVVU+S@@St*JH?VNyE-krMgf#jU6kO z$irSajU_1RkXp)e=#<8eiodr_X-)X&e(&#bHlR`$m(hG)-{nTjxn4C>lgwYq-+em#PhXr~;*$?mw{JvHc^Y^RxNa<)6}ZFYv; ztzO)S=e#F>DeOyY;%$v>@f z7eeg0=PqRCcJ7jYI$R)7xIBDWdi65v@qD7d+_u@>YTb>qPyekb6QO^j3CTb= z(G1jr!|}C-48#&%<}PKA;zd-M~9Pl>b>uBl38H2 zm?la$o73}Yx;YZrbR1&-a~B zKWvS=djTQ6cbcz-Wf2R$(||iY85RSFuK;CN*RoS|urfCK73^! zB$S$h=I=!yxrf}O+O#We=_`&KXvJ|Du8m)5RdHm^?h9cLZk{h&Ke!mh^BEE-sWsFm z@vp&GBDk65!7gM_uGHZ|GpMYRcNRh^P+Uq%n}yX(BJkygOSYpdiUGn$TuE39(z>{_ zMCCPwMxXGF2-ZA|NDK!)5-vi4GyghHKj+kVP+QW~5YF=r5eELqsoh}sdM*TlKXz&| z7$(I~;86B5)Lfbj=J9eh8o>4|`@SRu!8e`S3zjvZKJb%s0I}#o0@@cNSzcwsp`_$P zfk2B!v@+PU)17pt=w5-*m^2zU(^_E#TJd3Wt|4q9{l@~#P0i-u z27ED+-L&A#akc;ugwOIK0Ai2hPYEW&Pc|LeL@Hucr8j?Pe=v}(6Z;BHFq1@bz`XAe zcwvUN;9eGZ89;xy*?YQ^!%R(0>%W$kv+!pwEiJDPE;W_PMWL_EMrB<=hlJq|OICt> zgam)?($w-Si;1dwgT>QxXli)^l4K=MHR>*m3R_t0DZ;y#)X}w_)e%U6Ng-|dN<9o` zMi)b9pyX)dN?`-Zp#-R_QWyg&Y17QIk2(#x4*npLZSC~u`NBwIKh=ceEH73~J#Wi0 zjm9{Q&B9ochijOB3#T$uh^Z;n!T*flHgB%DWai}|Gs%ST-3GkQn`tqf`HL~B!#^aw zP@oX*lq=zTkpntj3Ijq?^Ot=fnHby{o^^q05cjkXEvGcnoj5E*<%E|*fHhxp0c%*! zxF8}aUp@FMMHLZ_(#91eTfuT*E$Iwx+@lJmf8p3>vXYf)sq) z1+d|m6M}@Fq&48fS_q+~07mt-V3uF>eW~8PK!U^99UEpE_JlRya1NbXUVtqlxgOx# zj%_l-s2Cue8a9TWTbs-fC|i|c?YEkP2XPwICOS39EPFzj@RfH6zI4GRedo~u+oh+y z$=7{Vjj>BLPlht|yKK_cHrbmkgL|<++y3+^}7NX|V@$z!wFI zF4(R>;=)g9ZrHBKrjXS9Wj~{|=7#OI>%w+1;a$Xb8P2#Ma^ZnDc&6Zn?J{hNVZ#A& zWT?4dyUgQ~a8Z6KwQvm=xEGIB9Sw5Cc3I|xAmJzJAX3nvUyj%=!>_bD%3n|S&|HXx zA2)25Ws)9FnY}Y`z)Dk=F+cgalpdlth zI5fm=`*z8InLfBw<%0haYIfKxyGG&yA3N^bC6mM^=#TAk^jwk_M?jLayjBYcaYZ3p z9U*qacF8=j)csQzN3Td-URG-m2Dt6phjN`st zfoZV^bHEn`iZ0l$K;ptrX>QoA$fl6g{AE9*wC0BG9<2-8#e{be+hsW8f`|(il`~Tc z0nVFrCDF+d+hy1k!-fOm$WU{^cA3W|;UYA)n`@==R$fPJmt{@}5`K~{0)&TchR1dp zer2^Esc${zynEO##~T+O0%5zkZH9{&j@T~8s2Cs|uttuaTbr!Mb}M_ccKegNai(pe z!@gZkKH>o_zVfa;P`F{c$7XjX0~sndLP;|UJ=BC(2<#;9o z8Ct=cJGN^eTxy9OuwBy-0UBa5#LTc=GGL|;E>-PDJ4|L{hYdSoyJQe-g8tYpN6#f` zaRelltvqR2AQVB*WFWJ2#CFN!Sn5KB>yWxQdPVB;3Wo-fI%2!rOl7sM7@J7#>%|Lq z?6JqOLT=cundjMHyJQ}UOy{_7mr%`YS*B5UY}ZIyG!_?Z*F4DXrZP8dSK#WomdxDt z?FvkbJ(vT&C{T34b_EiWl%@k~yuzMSu5>(;L1a@%YW}jHQCf4uc8}R$yIc{e0g;wn zT4_834G0WwXEHFHaX};yc#}^%Ab8`pZyxJBSXyv+hrb?gbTX6!NW}CkwIsZ zLYVNCcMwVFct(fSoUC%97f)P#()YikWy$IMdMF@*zpRMgOXgFC+$05tKi8!? zL$b3Z;-jXC`FAwohk__u0N*ODh^Qo*)MST>KwyxScKyg(x$yP|+#N&{Lc9s0i6ZED zGirFHK1$Dok=&ijiiSV$TV*}Edc~g$4wUnZK>w%WIf81AxhjV1*er}&JUhx?NLB$W zU4E)`ZHJnDv9_sncafTGQky;28&llk9K5R$CMO^V6k<^zeatc%p=-@fu(8K<)Gud6 zw8|~lxTF^Hyx3IMK1r(}b;TC}y4I}Hb#-zc)v-r2wq$=>_-lTJ-FFdu&nMRwN)>9J z05VK!u{A{;FsMbj_cW=%v`Hm9{X2^O>rsZX_w5;B7%7d%1E6qJwa< z(u8j4%dXtc-liV(y5k|b!o4#_7r&DNq>f_4uTijDHk0buCHr`1C0SzUiA(9sQV{b`p^L)Pl&<;= zdsBMy@-m&Wg(q6$)c&=d7)&BLRi43_ad)%d8%`%J_HQy(-qW4lvoq+V545H`+k;+f zV>FzOM}y90Z@Y&sa2jr$@2nKz!eQAb7yNPz2hu`r*qcnzE$rvw#x-*q1O{KHUV`7} z;Qf2x^(O751iH`{fjhubzc zjdokMpTd-#Z{d^~vN{c}!Ke1X{azM}iU6{jA+y?RSmF|^rH?yAP5NxMu#h-xKPl#p zGR5;M@NTkl$6?6Yf{|P;L$A!*^bqP=xg4D<%-&@u6wx94ns4oZ_*REsQ|*vH><~lg zu*e48yx{p6gnND_93l)mR1~>3r4>cJfw(wT($6ZX2JnI~YIRY00aj%s%GoAKx zDG(DK1_;FDNH>`w8v`+Z*d-8?VI2c8^Z4`1guMcByR}<^7^`C!0&#H!hRlAA6)}0- zB@mNmodYpP;CW@mUKR0C+d!;q>RhT@q7SJV8%pf}ftb91Ei+_eAjS{71Y$C*V<2W8 ze_oleS0FyRYk?T6Lu$eGR>Z~oDl+>u24eEKOCToCItOBo!1KzAy#n#E-3r7+A5t@} zKM<46(lSFf24eiMOCTo0ItF6q@#mEZdj;ZSyB3JCI;0k?3&daF0}qz57`s&sZWELa zC_c=Nzyrk+8p$2($1CkNBa=u?`pytko>!LLFH6J*H|A7i2_DmT-28XYR>z?w+al)R zi+ezFA-jX2t6}78S@~+rJf)BX*sSH@D3}T4CeJGae>1B9`-KtCz-t;{5g zr4i5FYVeEuAStt++4k1XAa0_ScB=(k{ceF1PbkJPE`M_Z=KUu*8HOr}{G^6T?s2Az z_nHXPyw8(dX?~sr>B16a_ziwDLD@TGS1e+s4bpp_z!$XSl(?;YK@oxD_4N#$~Qnf~vn zrMeiDF9zjdwmi@>Hb-PPh|qBOCXS1xauz|o6L7gyhWh~~-E%$VwRC%xVfzKJ=UjKN z(~B=z-@mvwo?G9uHQMYA;P>a@>Wt)ZME7L7HSG;2qj6{0-Re!qs~60>?0YqD*Bw}X z$2=M3ubS(B*+k>aqk};XtXF}j)~i9b>n~>mDi3`X?ew~7F(dClji1Y#-*w|EAL(h$ z6mhF+rbyh%Oc9qbW{RX=G*iTFpqU~uS@@gqV%DnA=?2@qPItK3nGE_Hg=Jk!>}6^F zWz<^c)}{YrU-i+NA$(&V_A{f$3)r?3#&;5>(I?u8Csj;2sbm;`h=eXy4~K#B~U{=3LCjw-u z9J_4_YFJgSiO6MJfjOwPAbB~8k&Ej)f$$}Pv7?kRQ{|Bt+a=bHDp*sCSn{BNBS;4c~ zFwz4l*Gd|^o-%1NQBul^EMD)Y!TjX`c_i`F;0=^HqXudHEwXn*a1D~70%`C@%A`?) zdFvpuc%zm@7q`R%DHzl!wd8|0QRa*qr2US_-c4HeTzrrWrC`wHga3_`Y9!qsp<1;()DXDDV$s%1GUV$6&_YI$&&-1wF)Li7GL!2OTPP}PhHcPOaFWC-}>T#eZTbRfnWZ{ zd!N1S2Y>B%7vaaQjK6dRtU?o=h!rnD^z>46BK>+EeNE8Ux1z60F9P(-`E==KHCwqk zczM*AU)a;!w;1mQbnJ3*gkQWLnrI&%dgyIY6z_v*tN8X@{_SP3pGn&2muNXC`%Rvo z)8Fic#vr8<1MJB@%zWQYzdrzb^J5LupZ39g`W6iOHbE0Wz%u#{O4{_!7xE7!-P?&d~!GEHZY9#eSR_{=0Vm&p+{e)i2u zIuZ1nar&F=oAftJS5o;WjU&y~1(R#HTx{tFbCZR!t^{IAJX5rNBOsxhTioYhdqP=r~GrXrgMCqHkjt*>~^| z@(#ET=;CWSo&FFl*rY;LOb51M0l!?9j@Ct=OuwE=pFsYKY-Dq1BYi$Ka#Xz!C)*=* zsAHE}{~Z5^Q_LS;PzZBB>}&5$h1pz;;Wd!H8B=0TAGr=?p(~aDbA(GHN+q}vPOcG!Rapu5pKI~r{E#%0^#O=al{*n|BXe@W)kWiI4S zmpp)Wa*Vbf9QIQWOhuf1EESl<9Ooy!-e%nF52s1v>ba%M(5KNa+Qo0K;tX_k5f<9> zmmiE8ONR=6a=U*gV*VD``#`TdxTAlL2q67x5&5zF;L-GX%oj3<+)0q&$U*f>3n#L_ zs>}t&!Tb~0qd3bQL9q^r1hcS>B5D8WtM{U0ae&=gg?@7dLR5rpF<0+R4#8ghxk)?U zc7S3!*?<6p{$zSh>SnLJCOQ(O(ilL#yTxTR6bq}3b}JgKH|T9l<4a&4+Vrs5pY}(? zt0C1u>LTgaUTOy_SGynf98a0%{1O>x9{v~8$ei-WMvE5%{k25mcfq0KD4vabn|Jh2 zpYDy(@vS|jVW>PLbt+U%9N#&md{m4$I4IY&elb_;ROAj=bO(5#iu0vOaM-H z$Nlc90ovk%VoX-Q;H;L`O9%NlC1gAl7E@E5cKgFLpX9|tBc5A&AzEucn_5n`-I4th z@~q{BmFrg8*BxoU@Erc;@OYzr_~_v?-Rn1RJbk*`J91O}*)Qycx58B{{4zQ<>5b2I zQ>Djjux0mJM9V)=+DL#3z12hU{>8s0|3zp8tGWDQ!kX-_umdqWb~m#%|6wETu#q_I zpnIw}nAAV)?+I5|@YpOyZ1TQ@Jt-e$Y?Q1zJ2+e6d%wxfR_-Dxdb@iQ^&D&HpUZ8! zIe5UTyQ$eT+kj6-@LE=hZd`5kH3y~tiyMCSnLxZmsy%C=_~<@Obi?dAR*ovUj~+35 ziwCptp$6RNuMfUL;tQsPyMV<_-&y6r8d+pKSR4RXVyFls%V$jwL8XJ^LJkpxk2FG9 z@dV}HGzljmJfiz_5HkHCkyhj*E!8VQp9&&4lw$E)ouN%Pgb0yVgNRVQMwx8`f?sU_ zcUZTRXT>R`6oPA{k|1OP*`G;xpNqh>9G|B)^`$_DG1BF8w%$S|n2$%9?l;6n@%{#A zwiR+k14ihrcPHfH;17s*vbiAMA>UJEp%NBIY+Mt;zmGt(+mw@-a0{5F0R>^#z!-cy zg6mCbJX^qGOU1XMTRP}lPm+tc*77TkUH44#30t$%1pZ^hUnC^6t91IDGZF+NURX`v zyIQjzPUe};Jc@n^Ki4oDRHh2S0qyq3I%>HvLT5(h9;eZ&Z2!>5B6y{hoeI8YC7b|E zYS@AQY>_hvu!_GXs$N<5UR5dN6l|f4!XmQi{W_Mdb+#$Gc=l9!H&4pY515pZFD1le zO~d%cQGT>!Vy|Y@)$j`(csawKHF5?Tyu%`lWJpnl7bUjL7YeYIHar{gH*8A+8d{UA z*uFC@qPEHAudJtie%j+_eZI?vIx^WbRGS()E6%hn{Ief={F!B+Y4W*1)r|z>rD)^J zSKJ|9ZkYiuA8WuH49;w&7i}xg-5T#@)4YPzSfZGH3Gj8z1m=S96!w^FDXkfoc zxjsUMFGgV8mQFGo*IO{DRtGiW=p}{WqHo$cGV-fI$ zUc*`_NeCO1Gy-At>$tlgrF|1fQbHE@Q4X`Wi6m!$rCPszi$<97@WWaWfVdvCTT5X8>E2lKWEX~ z>I?l|005Z-8`rfcgcVXisJ1kmF-QSdmMNMM$s)NkoIs`*j+a`H0y0fjH_A-}V1pD8 ziaNdcAq6%gR9f~Aj1-Xhr82FC6p$x%va?4DY^ZJ7AO%*0e31e&h;rYI4N^de=mcnw z6xdMXOxfJz+K}`{3dkU)R@n+EAjC8xwL}UmD7Dhf07j(1icm0nH1$z7NCBDgt|J9x zA_0*CLQ-r=jKv&z(2Nw2PikyX+n=~#Dr8JWhhnk{8>D~?(GvO;YsnQwqZfxJUTlzp z{MAdg7_;3Z;L=Iu=fiMSovvxLK?*QUwTo)#r`8u>$4>qpY}q+QfWitXAaBEFM+(>@ z1^6*DQjn7`X}mB>55)%AGGK!gvZDVv<3z0{R{{ z>(VpE3dkJTe$*c;AXGK=LyN%*q$}}my9&t2EI;c0+GbY)p{V7hzzVnwSqb-W+j>Bz zXN47z59@g08mw4&LBP1Hz-ENq#0tp#s$&J@NuBKMu>u=vR#<@*A>ZDN4B~|q5F$DO z+G7Pa)HqW%x4AYXQ>?&Hz8BfTrAZ)@u)+$+hczO##0o4Z*bV+FWH zHSa4RVJ5>e3to2tD>&+~uYkzK_;v-k^)R)8g3DQ81xG7k1yl&dB!glF^gJ@wrFB>V z6CJhvd>wBfkjYxD#gh+f>W7-J0x~k6sT4hXI9>{@K#F^~!2s2GvDsHZDC+dWHCUCg z0-F&qRuJr^0x~4m{`2}b6Dg8BDA@*LS%5T*;hb5 ztP`L;R$xPoGi5VYU_;V-UjZ4!gg97X1%#MJq?TBL1tmMIz>1JBRzL!H3h;1z2G%uQHdq0s$+;*~4M*BVWC`x)(iZ1zl_G-^R*0Jo@Stbl}>-Ny=!Iba1u zE@rHNv4KLPv%m_DRl*9W5FE};K!$X$`X#da@D-4+@1F0C$$KZPU+Vw^`H6$+iwF_= zx9opQArphOhyb(YGxzd3Mh^t#>#T7aw?qo1saXpv_ zCj>TozU0t1{gM1sgCH(Zys+?J8Uf9oq6+~%3B9@ykQ&?9`Abp(S-{sq3Y8)bHJN-s z@p9OMzjzJ)0iSQc>q3PAj6wvoH1vu?Kx$#_)+E@kYL}`d`Ci@j7>1Hz4*rHy>VPEJ zNCcm5z_DuPBQ!&r<$Q^Zl3=zf=~jx$(_eb>?#ZA?KvyERt4UMnnyN|D0A4Ux9(xM} zemjEKSh806AcZJDL-7_^=xhwSlSzCG+VPRWXk&G)!kBzvp>7JJ^yD$kYt@|HmX1_l zifJ975y`yTm%)-x3#dkv9=dF6fti{FZ>}Ph#LpfIZ}pXm+*dXFm&A_roWCO#Y)yQT zsfysjajxMJzGCk6|Fk5T+_9&SR)_=0fIK#k2UBMwG3uvs7Q{ zEC$=4o4;gH*-Q$3`ynC`{JL<0Es{ci*xKrKhn)$&@TAilk4NKH`TCRcjVMV)fhF9< z68MIPf;G!M7ZJQY;*aE3@__O{Z;USGBKKsaSADde>h;f@ozeq&%j@`ab8owqPXww4 zb{klyCp-fMb zyAonDmwoGd(Ol+MBVjTsxu9c`J*JXfG&Uh(Ii6nZlW^zpDmg3JtY*2OQ5fDq<|-g` z9a?(6DR)(Xay+Sh<?hRVdMHsgupkrTv+mLzMW6@#T+I{eoZKtRhCE2Gc(JW-A9`naLK*JA?j{ zy}I=zt2)p^jg9)?437=mpN~<0C?|$6PBp%AN zXInFV7u^Y1*)Xa0;_Qml#9RMf6fzkXE_fMt?`j${6=4;AssT4t<~_d}GQT3yvJzyy zIt#stPc%q8G*-w-C6%`|pjTNcw5RFh{Y*<*Ymq-@TXE}l`m#5(0ZkJJ;QvOjS+@w7 zg1ojui3@uQeJ@MeD)`rp`UDWLpQ=r86x0!VF;%ei)Y!8;*A!-uP zt_h&8VmJOn2N$)hskvc)0MDh9WDZ`M*?M{=9}%uiw6CRLvj zQl&k{_%&f;mU4yyu4tEvFqT+RaMj}?btaX!!N0OyO55z8i}yaz>kjVdpR-&f%knM% z#*RUX=J|dm;;+$C5`1o~!F^fNx~QY{Cbda5}sJm$ikQztC4L z!6m6#V|1184Asoz@M!#SO`7SfsB8rOQ3UsS>kL(1IK{GCY9#$5n^|?M;Qu({?-qBm zPkvGq|FfKfWcKjI26V#7YMpGxzAG;#ZyTSP#8*mxorKm=!d-YBM83=I(vIwx!An;! ziT5x5HTf?Rek38P2`KpmoKoKA75$!MQ-ym}S7njFo#Jmb;GS^ihI(0y9ETTkQny!V zx1kanQuaacD}ikb?;%21LiSA(R!PBKT^0HZxgCIo6PTxT)Bit>LYm#Zy)$tW{a+Qt zrI?vw5dyv!!SQgekw*)ZA}Br-P^T&L+$(K3+Ddv=Tt=C>|B*%=+_wwv>yT|=8GImU zaHMA<%qCCNAm6sxY*0Q4Sw<9VkpZ0rDI)2H^K5A`wYp-Il{VqP2Lt1%;q)rD!1+$R2mJeN+BJ~uej+?g1Z=#g38zAfqueMnZF116 z4_oP@0VN|UwD8bP+95vzZ;Vfd>5xS&E6p|t07XYrH4Yr05i~jEp(UA(u1-^3JqC}& zZ>&dG8ZhG*B6!%_tFlEu3}@sqfV_c=mtx&F_kSit-Y8Emt=q6i&LE^D^$ED}dApPn z1SMj`nW>j7u(@mbLk+k;q!FIz4|?dbNA&-Re`sLXB}U_)85pA2MbKK_2~^2V`)C1T zlD#`hs|8g@_udE|3L>=>)@ex{SN`&DdM;#kHYE4TitgoIbas9inXywXoH)4gBy|Z^ z<*@3bouat07{NzFV9u%0Wdb~&o{JFg$-daXCSAHSnnCjpd^|)MLFbrPJVhB1P@G6b0kR35YUbWl4$FM8Z=Ffdf}uZ5M(31% zgWjw_Uo78dDdmwQup<`&vKGm22Ng-0{qvViLJK+o#7i|Nbxs!IRU8y%@Y66LFP&xX zo=BjySq~`GOjQ9uUOL^(MNz_}wGr>6swOT7$V(?7VuIm7UOEKnfdhcNB25JHek26Q zOX;e>!=NB99gRpP0B}fYiV7{vs}_)#j+^shdr?IcX4@ehUZpq`42o@R{?HB`kH(r1 zs4u0eR$eWrFCCY1qr~XZONWp9H|6-u&pZ03Pxr=XOK4BhVRCP~413e3Mq_l4;KpdX z*W%7O&QC=x%)3fw`N1f_CbLp?!f=DkE(c4rhmcm8^WSa4R$Us9=66*STCW`h=qdDy zAr{kg=~UsJ4e(!)Q%n$_ok?Dy-tZrX`nM`N%3sb{W4CXrcYJ-J0gu*YkXxj4_ZNaz z*opsYj#)gt22 z+Ror-DwOhO1P-HKIUIKv+87q^XMRX#9z|%_N8rZ(&F-|@IXxP0b*J$H3eB&3=yuH^ zzY%<_0dKPP0$b6D0ikk+upNjM`&(*`Z{_F>f7-OqvbHR#fGY;npSgi0U2EA^W3xC# zqOjpzNCozRO}+t|7bsDc$~wPhny;tr;8N zI)}4@a8aLi5w0PhYYNj>g@>eVIa&6qKldB{*3dx)%#>;X= zF0p`ro7i4~I5Q0t5sVW}dU0Z8yatwg1mBnVvlXx~OCc)qvh1?408Ws+F+|po83BB% z31?V)*PS#`NjBvu%;a6kU2!4AWZ&9WVnvo)&(Z?@33x1iq*8%yIXKprZAM*#Pd1@H zBUN$<)%xv)N_=-#Dj*e>ltr&K1{2zky_u@}P4GngSbge7yORD%h4+(q#?cX3lV0ao zE4{H6C%N>ytgl)m-&u$IS%rln+FPQLYw+h44jz!Lt>5&K4^mb`!E5FI zat;2WbL&m)g5z3rhP<_W$T8JO$6zFzyup)jS(@qoJ;}8?Hkm{^TUABKB(C5Vk=TR1 zawF5~w6M1g8o(ZghvEl40vHipSs(VP>Z48BMPk7f$E7LjgC#BG%-Z#g6xgQ%uHLvCHsj|#-dK9bmQC7BGms@Tw0ZOjGs*-WbOrRF6R@Kq&%jbApv+#&3 z8M?tW_&LwqheNJ=W0r?(>uslI)>ZCaID@VGc=u#STdH~_U{5B~K*8%S^kf3pid^K^ zlL=!mm7Zr$CX=RjPbSP(Wuc*ZG8qq^J(-jPCL__X7CO<$c$r;KCS}`@lDv8{VL;Zd z{dzKiu;`S!{-_8hq5CyqQVh4=J=xJ#Ik$)D$%IL(oY~xy9c{5a8TBA;u_tXT=B(|V zZ%VRdH}LJrWP+^HleGi%WR@Z?vtKk={NJqlHSeB`7oF&GIX3Cnlkr1fllSb&WF!%S z@7t5{WK~`hE(Mm|-?OlnF)*79SZ2jKB6#;?GOi5?(5olo$IChQ?a6qeq9YdhEp$)D zk4u=Ty(hyV*S#mpL$+5>#&e=)=*eUnsJ{Jjjf%&6olM|bbb%o2WWpFs zHRsur$)xGslL@m`S!k$)B;&!eCzEn8i;N`W#XckXu9HdGHl!r4o=g~!wQIkgOdu>e zj}NrO0_X zWqTKCR^$IRo1Tmpo#=8wdNO_pT=jYOWHOQgdNQ7@%4@=QCV1I zX!w$!Am-n@C*z5#)D~`?j31A*G4-mB!eOC%GC4D~_hdNay7y#x$oA^Vc-B>RQ#gaI z`*^RDt<0h)lWAaAdoqD*MK1DNClkhCsyWY|OeRh5o=ljn%0fdWBpDB$J(-k)3o0YY zmvzB%{|%5OnS19 zcHK3(}GCLj>u_WF!N0WIS1w&xA{W z!8RKQ3X$*pdBo zg==vny9Tp>tL(?@!nId7#3S;cjHK+?1_z1CH1_8Xm=^v379 z)Bb2^e*=t6ES#yR{H%x^oa9VvTTOIDq3r!I_^}4Ok>#dpE@w?mk=Xn)5IX?=-Va!m zY8L)w)AblKT+*JR(Ves|D0 z*B$JjJ5O&P4JT8{MG2=*pT2eZ7Mh=rK|4ND%FldY*TzlV$1@DVsNvq1%xQ9;AEUqlliy<6m3LZ4>(yrz%Pd)R)yY4Rel%Lb? zLd0(8F8QZb?m~$1?ozhfaCTP7gEa0!W^U&$`KQCNp>mfmORrvLJ)X}9xLnISklQx9 zTdlis_UXSBWg_%%G$9#?yIDPo7f}g~%jkJ@x%%AlO*9j=;Bb7cArrCV@)<*YuQ<-1 z--_ThEI&B*vcEBKsP;a?(6XfE!F4HLT~6{9tWx2^HydyV%Uor4TY{td2~T!KEIU1e z?90|ANm7NEct}_7=LQYKcOqD4*{LpVAs+ZiJBTDqJpW+?l3Tc0DHxR6St+ZP1PxS! zZBEapHP{i|v70!M)D~gjPa?RL<*2e=QTL;)aSa0Am9X)h2Hfe%uoyUe1t`0^mYt%5 zm0eiD<1Zr5P>mc(2p_&OpGg4uy$B@tkegJSwx2D1#qmPegL~l1)(CmUK15u7-g>a%wjizPLaT(tVd*b+s%;75HPPCWB#834ZQHe=rS%9}PE?C#n;RBfEKX++rd6vaQ zRlULD={YpDJON3vq88;Pe|fQLuqsKS1axg@bp%piQb=3AQZEvhmxv6)NYOya(Z-d+ z29iSwP`{v4*dIi)t)2cnUl>X3rV z%@vo-yuw2G%q0U~zT1G;c{44hGk-BAb@+!Qtpp0;PFW7V7dfEgr7$2QHGkO$l8Gs; zJ?jG1Ans`&T25)CJK>glG{D%h2Gem^^vfp)UvmL#SkAa0;y#fQ!h(L0??noM+kV3Z zu3^~}!$x%&fu-gE*RYS5t!Y{A)jE69Z5eUbBYfKhu;G{!f`p%>ivZ!ylLo+8O971P zYr!nP==)N=dw~RpuRAu(H0LXy&S(2G|Hmv>?&*}Gx8 zBAY@|^OyaM(wZB#+qS`W14FiWOx~|FD2gMt%W%d85f^|UpRo~uvF1rqYJ(fL%djaz zHx7s+L(K)-Wgf2>EI66=7b>OC+Q+Uct}CVuN$_@@GGl#NqxJ^!NCpN z<#^-516Raqgevi1mlq*BcqZjiBoMaCF)9WKr#6|vF5TK>hQNg!B;2rFUfx2O@RfJ% zfx->jrMD|q!**+cY{^X6JgjZH9kE?9PFAm%jhM%Fxrm~`VMDgXWO9!CcFE%k-rTWW z1L0Cj?67awG(;`fE*aDaJh|=LCFC=GaH%R#J4^;B=O7te%es~kQFg?3$&9cG3eB)X z5asB(BrT3W+21AjY7wek8euY?8{F%N?UF&U)csQzM=y{(Ob~z@w#&^_R_ltfiPXMK zXxFBhI+iENq`?i_C68y?mh#MDy8#y{$;f;pQ6xr3Y?q9oXqqOFR z?H=6)Y!{EohjtB$;)v}soN+;f$;Xjyx1%gpxL|KNw_>C~l{CNS*Ad%g*c8Kt1LDX~ zbHR3*$0gySG6<`!nca2axNnzbP6!fyk}d*-J5M@GcHFni@GGnRB=xP{p8|uxczWQ5 z?Q*<5H^PJ4zFm${F+e!A$qY2>)+RFqBtb&AUhLr>m1B?NzFl74LYVNCckO|~4ck3- z!JNrJX2RAM+NRqP+a=>*7^L({1CQ+nY>UabEPu*vRF2p#8AHLFJGN^eTxy9OuwBy- zwKT+J(3-GaLO#<6m#P9`yJUb12iqkx!Y1gC?Q-;7k`_mxV$yQK7sz-s#dgV{SnB?% zi=$VhE-!i1gY9xNmDRdpY?9Qz+B8!Kwo4}Mi0zWcGi^(Gt_#~GBiki0w|%>03?+B& z*shVZXe=(+u6dB%O=WJ_uE5oEEt$FP+ZC7=doTxlQK0C8?Fu9$DUI&<_KMf?w#yL3 zC9sdGSpnKUFLCJxbRScJXjbcEnZ>cwr`hZP6!fyk}d*-hhQ`!aKv^Q zer2^Esc&oXd22^<#CAE}xbPrpm_4*&T+y*-)D7F^7!?DA1J=mVb8D0J*eVtCzU?dT+5-g&j3=v{D8}6er#*C+L<_Ez(ARho-x1^cg3`C!qx{6$HF_fN z^>8e18&2ev%oYB!;<+0V8d>*fiXh%IEtejWeJ&9nObpV6cQoOLf+$-6-zuGCqLOG* zlO0O>hQh1-)g+HRCQZD(0e1({giw|Q(L_k`m;`~=mLzxQvZCS7`&LDQ1WIYc$Hm1_G9cuQ)+NRRoMQXB1ZT1+_ubje;BoxBC z8ewt*azG=2@IshOe?y4LIj8+%Mg{c=`BtK4!UOll!X=R`^y-&4~La9Q{6F`PZEw(0c^1_9DtxO(vGztZj z?(PcUQbKGAApLMc(XszkpZ z{D@C4#jq?^F+t?$(Mz#YD{W!Az+%hWf<*RmQryqT~QBl`t<2rLk!VY50PEe&IJq=7aQTNq;aJY6khtwQV~-KE4O;RgAK;y@~YEVgJf^xa=+0t=qP@2j^26$NT8F$t&z0 zPO7g_uv<2h>ewawcxNS9V&{oV>C93P^G~6R!qb$l`V4zhdh+rzow9`|TI1CIwVfDD zA~{u_!I^P)v)>y|CoT4GGF9Hwo!+xE=%f#{raRk%UTb4CoQ_9>&Sr1Bhi-owZk+F| zltb%W5kVY~@mv?nF&s!2!-l=d6kW`IE^b^ir`eK-uSM zoHAwNN*_&pF0FL2dnW6tOmgM9XSR~NW$ev%9qHk=%}t}-$_z|h5wB_O-1T-!_|zV_ z-^*fA5kOWmWLA3(OI%{L^l^u%NuSLY77~Z;C&k=Rrg&Zj-c45SI1E`^Fp_J4(Cf0% ziRhzZ@Wtmi|C(>@fcRF2UsLUnKkN`g>9EKK-MrxW8H9U&CcGPA*rD9WwI3i57pH1E z1!D5JiwTlvodYpP;9?-|wX$ZW)3y!7vRXoq+3V{Gm*7ow2>yYX9O))AWMd%454!|n zGOS}DW*&cDnXp$NZnvH*ff%bVKp-xTz>wLmu_7jqy98qLtaBjd2t2Q>*sCHwY8!}4 z?^`oqgiBB*`q-t4n7n^2Gh|~R#t*v$Vlu2_AZ8waUYW30AU^t>3B*`^0RnOHzKYC# zje(dv?h=T}v(AB-Bk;VkVy{4a%r+2T2o*8WVStL5Y?hW8vM~_jhg||O8P+inGmk&7 zOxPlPCMX?He3%`92Z|*$k~`RsSK4hx zCXt%-ogt_^uPnP?mWT~*%&Eu{Jgy$0BkdZ9_eNg$;vSG($nIe1Y8d%iR=yfDPbnk; zHfwn}3T6Vi$@9v<-^?n&eqn?&a9c!!zM$p1t4ii7>WD3K6Zpk_kd#@^Y~mcl_S9ug3XbjIsxZMO6_2CB+W}P$6{NJZ#+}&G5b~oFHr;# zVHdgNW6Tz*L=);1hd?Q&q^&qS#Q^_opK@m)ipiZ7ZB*uVdg-EaJGmH@`)H@wx$bz< zs=gyDc*GNT86+2@vWsFm7o&2v!L|OVj9yasK5bM+k!k_Q=R^^g^I)aiO`3(fI%93M zs1MI9!VBaq$Mg4&?%PgYs;Z=NIlN5&chpi{49XXS@-SN-Xc?O$vKvHbID8Yw#Zozo zpxz0%Tq?u;0F&;y9)EN(^Pa`tWmUoX0@!n|JJ{*Pm#pt!+#AoW@7Wq{_6G3#^Kf-W z@;IV`Vszjl#07C5E-M{xWJ-bL-Oo*$@7WdDzd49xq_qP8i=w zlt!OuC!SQX`J|f7^Dow@Y~K2HBa?6BHlJAAym4f-u`o>~W{Sk-%@ncCn<)~TH&etm zZ>C6XK7obJ>G^GBw3orb_JN4`5B@e>l^I(34WfAQZWO1c{poqm><&D^7$&K)_o8!_ z7CIY)?qm{Q4u|4PvxAhf58?x46+s=oJ`XR-KK1~5ooF;x#P6j`(7w-e$n92dzyHD2 zBg+@V`8L@Jdoi46rX4qq=)?I>qFzlfG(|`kjjf&t|M>_G^7D!JQ6JzOPUjzqi6@bn zJK$hvv@_k_nRYVvCvh8&tTwKWs{BKFQyoyg66L8&046J?9YHkZ1~?*-C-ObbL(u?h zR*?Xqv6p1J!z?mge#LZo8E*lrl6N*4M!nZ@6|Cy_!$*Kjm1DO}K@F=CHxapP7rJwO zJx{@jT4|D9{SkagVC*Pm%v5>gRe6cEqXlaY`%rSK3g*;m%HFY+_z3)0fx%oip%b1lGRHZyq+>?GEq{> ziY#95r!HI|E5xKxmqbJ*4cgEvv;j2fiz6k3ib?w4z71HLHKW^R3qvB2<6hK!n{{I zBqsVJ3MOZ%Lp-&;pbz* KzFzqG2I1!$g`aPNcV7Li0+am+*we}9%{&mNAy0fiCs({xfZ0`I`WV@fkvrHA zN~o!^a*4&Ft(RoT*9V}5j3S?z2Q^|=%-rsEpBqE1nBDD_=&|0%OzO2hM2fk%kYSi2 z0>8^)HVhfB`JjjKvLPcKhxxE%iut$^Vpt;*y1TlB&@322#he^;mNyZ2H8yt=7uHA+ zMWQa|<3@;K%|}K>nC9l>A|n#NJ?)4g<0qpcKQJ?Lu_KO9F((%pkqpGl$VL4OnPNU} zgaX-7kt$g`T52r5XlWG|2BXOYExR0n>)=&vio+>%tJfWNCU4p4j(eM(-grD3x3U%P z)gw!P?+Um)`%w4PpqK8@>!;sL;%{Fb|G$^R??hlK&GQhjdL-6hk~l{YN#Od!_T%Yc zd#gvX;L-XvoHU7faUcpOuDSQuOWNy3%M*vQ9oo>t8~d8&w{1sfHS&k|;gDxweAs)#t%~RaEJKbe!WDpxib$>u zMI+ioaLxc(KjQ|#3HVtrU~ho zI9zBVdNi_5}P zl}oRH#jFhMjJD$hu2yZnUi!|xu&<~Rzj|-{m3uE%iF6+|FZ+vCVsDV0m5s{xi&f&F zkG!o#Jq~ONc~Y0zuI z04lmzCEBQn7JarVq7MX6(ZwpUHy)xSlh;lct3@EVqioiX566Wday(~a0>20czhEviU2_mNIjwaA?b~J&GOjI9e z0Jr#LEL|g$V9~uErz5i} zw^+r|b&XX55h5;!SS3<&k5wWinaJE?6<~?c8G(M}kESjEwGja32>A})tmB~o&a zRU##s$lPKTS;TjaRSW?lDu-CbQgM$}EESoE++r0&)wQnTNDwhOIUpNjd>oLa6HZhV zzjlvR90?Cm@pRlp#nX|A$}LuLbX{YWK!k|PAy$c$++&qUNhUJ4SVeZTIL9i601=f# ztYWFS$10YJOhj(6ilOQnt2h!wOb)S%r{m**rdajy22`3Kw+h08RzJid940VFMTZD2 z46$5tZwxXsnWg<^11bd!72BD?-4kM(MLi#iTrR{%0?ke)#t%23QZV7fNKG8|LU%h) vl2RcS7=lzTx|0EBB@>&M^HPR0+jI9fh7J*37-HVe%g#(9zR$kveTV-)ZVZHo diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587789385.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587789385.Akashs-MacBook-Pro-2.local deleted file mode 100644 index 19e8b20ce62d6ee2ec51efec446381bb897032c0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 267629 zcmeIb3zTF>eILBHdvXtC;@-bIh$0Hcx6!$uFZs}*`Sy)(V5 zMm^m_ch5>ozI=wn9}r?E9NWjo#y%&B9ZpD`7zi8)Hc_x+9AZd}V`HH>0c)vc;quUl2OccpK|S?1OEU-f_g|G%nRb!Zp<@0Wkpb$UDEr`Dd%kt+-uDiFEc@K@=fAiI-Ue4~kB5Ve zbKSGO$+SH_HSJH%_GW|eDBgF^u(#2FY&_iTPg*a8-P6Z<+x__R)r0ZArN1KoMR8-P z+1dxYH)rS2UmHiF)(^vh?s#Xmy))}(k4)n$LG~2vK+KNa+b9p~LO9Y+HWG&&_D=PO zQ=Z#8hW-5rZe!UU^=D6vCy#fh8{_SMd$T{9_S=v5C!_wbeaC1p8}x>QxAiCONpG|{ z-s>8y1lEOs}Bz3DV=!{PXv;doVne6Isa``o}7?*-A=F2M1h2OnQpeTSU=;iFbS2?w|2iy1@vhdw zKfV6vpJI85TF)!-vH*?MU7`5sJxTcBOh_@Rj{>F4z7{^xfV=(mac^dH_hFlhFX4Vn zLRoRDm}$Si*@+v6IE#x~FD_|mKfvlz8f(zMwHCla`egRt9*9n$xYP6yCVZ?B!iuLm zr{d=Eol{8&kLW!agv?;n$^{g82uSHRp9vzkt({@qymM!mgf`s}B1BpZB0_^r!>={q zCU4J;n>TH4hTs~hBnX*6_Gc2_7b1AMo$z8f*_#Y{r-uELoz~@~s$qi*7r6}sw+_NS zl#(a?&F-kTh3X%abv&~2rLcEnJer~Etv^X=!3P^~*p9FEP1EV%%qU*OUlU7lB>pzn zx&jWMT4J!-oAq(U7dNgp$8aRMooT;|ik8O*qchnf0U{Orn+UG4l-Q)-+l-g)K&97g zFgs@}F=KKH!Nn&dxX#4IsX=eb6;-HQ98XF8aXqaK55qO_E39h6lw#}oq>8$1{Q#<> z(lT$YwUD+Cd?lr>3fJJ@MetIa>2uELJY&&oT9*>H##QQ?^~5Fa4DV~04JuQG;DA;F zNF24CPE_eV&VrOXvQE*Th~O1Aif#+OW+j{=?K*R$oBMyT$QgI`W~eTS7xC9b)hj1e zLXK^Nyp}3?hWfwh_80}q9tx4W_+OpMensL9AclU}q>Ods;;2N3_vGK0qh8Lsy_#EB z!akG>dfl{P?akuFc;T+w@4wleZOYKQEW*f%-srJ-A^rcvmifZ2ETM`ucshbR45IC> zRBhjxLPOhRw_Z(S`3kh-!#1&;&PLJcr(J&fFAdeEhK|zWvW);tFfBa8m$KpZ zN;63F|L@IxRX!Z&qR2b~AZdh=lFvC~0`N;_3@#s4bW0Nkmp_&`YrHT@51m);`X%54 z{A)ApMua4Iprw#BtB`9U!~kEm$}IvQV8k)9#U%zYXeP#fu+OK!!k&S-u^EZe2itMnI&145ZVGA5vg5LbiE@_>>z` zK!!=pw({lyH>7|Jsgs>OQeZ=E%LXa1BIMhdkwKIj0Bn!~LPRG(d!)dI8fVJpCf9}} zw++B6ZkbvD8>GNu#+FEd1*LYn8Ni4XSP??@Xked&2e&?pOu`B&ARp%15MfII4jJbI zJ|MFz!3S1I0eM<%NsPrDdC-g$kWXsdTGAVTBY>wuC;#T5^FnH28@XQqak| zdQL$cQoz}@X06m^PfHgMu7MQ51}VT)I2W~^bs$I~XT%68SRn+m`kxB*Yi3Mt6B5IoRQNQM8o&r*jrz#m&61)a8l6c{JJ(ze42xcJ0( z{``Ib`vrHTXbdQ$HyaH+Y;5c2*yAXY#I((>Ym6_6q1yadDw$Ur(?xCX28t^%78 zFjl~&dP7Ade=EStY|!u&WU$&*V8t1EO~BYv*-*2>3akkEVg+OnFRXwN(HWmTR$xPo zGi7s|YeUi-D5UhaA)CwygAFhlQ zkO#wr0h6CtVFky|Csu$5_r(e@70yLVUgF%{r;SsHml^ORt@ahuliFjipOm)b!D zG;~?*D>&wW6%e_Yu>!^h=(K!61FLrsj#a`6s1S-t+Ug`Vq`TBFk=>2YW_&~M96DaG z;)#Q6^dy4o;8@%-oJ1fQNrM`sM^v(XoBnMCbe!TFg7L^e2mTzE_wv8{@BV@%eh`+c z0z{~ZD@!B+of3C4n69`UOoS5xo4xZ}29bO-g0~vrKQ;oIJw+D+dJ=kdAs{stXYdxD z5^VTdNTE{1p(c|LC|+)#g1>n69e4zP*nrzZg#nC01hh2tibFtZVeQr=*sp3|fR|)p zuI-Uv0TG*rP55af>vsIDN$kepvkf>_&3uGrNV8lfkx>%NR#W*1-AYl^`3p+E(rU7q zFGkR*CQYGhswT~l44YW{od{lI$(sHk1rD=QcWzPZwX`fg2A%jwm9kiM>_UXkTuj&f zG_O^2cAJvoJX?Z7_|8=H%9enU2$4f0wDizrTMNw8BzSWbsU&{(pmkthiOStqCpdJ@ z-;pZzEjVr!!D-%uhzghX6ajqGK7Unxs{%LOiLIgK?QkkF9@%hvwIWN6;;R)XTVHFb zzIE}mM%$p9AIMPIOgw>*!Y1H1gcEF$6b7UAR=+puPT#tdotWI8OvaP;as1OQgVU$` zlm2LJ>+P@O3oX@oZ0`{cX>^(he|N+|a^k?!`eJ&jK03IXNq5cGq0$lj zi?Fboo=N@g2ySKV*KIPB-so|1R&h+`vUhzCn#-=$NSKUDF6fwKkEvuAU`&Zvj;Gg~ z@aOR=IV%}xVO$4bX^d_@$!|l^V_4{XQ|_t&<#g;!``I3@mPNY-D=G}~9um@th1$Cj0Zvm0k}hpkO3@>^*T zF1)K6IE)T~x6{z^ovrlBj;+M*<4B=N1#zL`c<o`jYs`y##uW{sCDz_ zUdH?vKknud4`p6vTQhz?g4N1~NwpU*+&UNz6L0-{QNXvH(KvY-z+kl5f3hg9R8INR z4Y#byw;x`peZkuU_?n1>(dGv_n83;-dl{m?DRjgj9H! zlTNI0^xM!1<_vA`y`XWxU$eEaoSS%0{!PNIYuY%YC1Oa$X4F&q%8#Aqp9O&IM{OM> zkvg+fa7EM(PHYD;hj+er0$)j66=9`&g*(JBVFdT-WJ zUPp49{mf5c-6mC^5>jQU#8a6ed`;MxrJSLFE83-^D(f87ysO7WbDaCJ zv?QI{Eq)>eC)^EX7NRxlYZLxjxyn2<>1__sYUY#+*dBrPE$}kkDCv0JbJOmTyoi-* z!*=22{SmxGH(0u`l7~1m>xtdG3pv{duwRJyw{oyL3(=Q(knG;d%&wb0)Gj%cREoBX z#P64aK<={>qHr|_G3qnnUiJtlNmcft`i6hpfD>g+`N|2kL1Z1QnxtxyLt@kbm554Y z@ZJVI98yxVoo%$s@62SpgEn~dxAVPD$#DtL>Z^|Az8Z*jSKwEgveh8>*A-w&tcmmJ8aipot2>&n&X?Az@&%{mi ze^n5dV#bO^2>3w+$HTcs9xYIcp!iTgou4o!bX)(2`tcJA<&Q!qg;lTcv z#bg|Pa7GEOraG!UMR1~0YH`^Hexd;nhSRIq0^`@Xl6(F8Y}z%D?0zCVO$2PbzX_*8 zi=*5qGi`Fvst;S~LjfftDzq@8P1+$p2yco{hUt(+Eh~$mAOI8{P1QJXfJV^dkcXCJ zKDs(hb@ebj7{93=U1`9KUyR@ZZ?DQ0{V<%7#{lvME?$at-{Q!sofvq4dolC=nyhOub}*&0WhMX}~=pjqt=^*hiN=qW@3)Lj%JuF&h8Oz!1eQ zV(MU8Xc6!PD)nXqv_gSiJU!Bc4@7W(5UHiGPFw1@%rdw3wRgp4%k&N3^j~^*ei)gt zQ!Si0xbY-)308Ada}+n0inq9hTJYl`Fz3|hG65b>&qauLXJ71JlP-cym7V`c_1p=L zPQWRhIpf*k4JrVRL?@cbw(PWp46V0fe>#Nf(Od2k)!S|HE}daeFj2)2g5(~aj|*mn zf=)}?J=eEFLC3|e?C%mhK~y(-tx&u(1mZ-egucOVRRoSFmK%wSBROcUw)WxQ3MvQ` zWbnF#KZ?W(H^7<%B`5U*JFG}`OwpkTJ{h8npmWSCo}!EhC{CoJ0NI326=xG_cYLvL zZG~vT&>tqFb4tHKZ&si$mT$9^@<Y=I)6EN}Kh7Ld{ea0OY09&0G{EOe#d)Nfp>{7?77vLc|2afxL7G z(gOzoc}1EC*^WS1Aq! zgJK(-KeR)~qp>Cg>PxAXl&1&+g;HxlgXw%Im+9eskN87J;whh!4wHLLXw;uQF`l4< z1UJUp{Wf>baegXlA&DxT<%gpHo6Jhp3BwICyBsXh9zt4W&VQ>3TXktbn%`ATXuWn2 zpr_C)hFDC~rBj7>H^6^IPBB4zb|!g+dc%Jl>c^_+$i9-O-uESY@o(1}e6ax!)n$-d zq;vNdf>zjx|7wm|$h5$-?z{d~jiROZy;O{}vg4GKxfrRf*obFG@cBl4*$SCv+Oi?F zCv}>CeZ;v7UuuOkDLhqMYRQzA%r}3kVJU4>Tv+kfLu`dtgulG9D>~)cY&iwC zYSSx*xonM?idI-F`yVxi3kKn-O$yk$$8{xRX?2LuAKiZNLexrV2)@+-|80nR`)0AA z+Qg5MA^-jP3RVm_k*)E=|BC9X*XR!P`W0GOmNn;SAM;~%IfvM?=K$KnwH2=ER`)m8 zMm^69@Mlffu2*E*os_jXQ_f#9s7->RK@|2np*!~;gVXV5n4Noyppo-w56Au2CQRn7 za1NZStpG_K=6tAoMAf|sr{Xu)s&2%iy{dn%LMd-X;4tcy!*REvjbZUV=7(hNQKay< zD`DSeZ`SLc9#6J<=uB@Ens2}VX5DR8@QDVz+13keMI#1;${E6TAXe;etvSAxqc?n~ zX`f|nSyBO445&YM150|=vaQBuaf(D?!xwbU_-qs2QjfQk&C4xU*eZZ^oq4L)Cr3E& zrj31HXvpq=O1FRFQglov>n!@x|;lb&uKg)Jv4IOT<5A8`E zO}EF>ez%uh>BIdhy(X~OmkYnyu-|p9VZ1C?-Z%0XwYplRk;AkIw#MFcZk z)LNFNa9?6}=_34m1uV=`h>E-{yKF3g6C`g6ku_vS0H0~X8CL(g4bzomQ;xz+-j&=H z7eY+-t!*V%WV!V*TA)7)562HyD$p$l$NI7@&DY@5O&H8cm0Uu#etV%3-`$l8NQEV3 z(Q7sDN38MhN4$lq`_1rZ{BV8hM!S;!euejwc*faqjg#Do-srKcuUaDCS%>?1g#~K^ zr)dHHyu!hQatU#j08_y6{ipStKJr0Qk@ISxETEGOEB2Rb@b{csZ(yiW zsYW^mBiZDQo`lQNO!x0euGO*0B+}WcDncf41-FRA9_*DHnO3KTy=~9{_5j=;-{%p) zi0I1tu=Zs5cvE(fSa8L0Y0COwNeel%c0DHr4yXdV@JRen00nZE!*HZ&^wt2KQR3+X zvGG@B|ym)SXDA^nF-XQ)v7wG5uamo&B7zDWatLh;C-IC4~Ja7BZwvTUCz)?8#&raA?J-c)WWufonxB^6SZjF_=ovvnP{D z)4L}VW~;K$P(7K92hW~N$^ny+XjluKXk@(1uP2kTZAeL8J((~dYuA1~nLt={O6vWs z&^?(jFvC%;J=xKAIk$)D$pnT}&TQ_6VNCxQ1 zc(N+536}!P{fTE!CS%|N%YbE`y?Zhl*M)syk#kv67Y)zQ!!x+jw}Q+rQ_L#}&I zmWOPwo{VQ*?99l6d9`Bw^;98N3{MN~YF_=ovvnP{D)4L}V zW~;K$Pzg!KgJ(}B<$%dZG^~XN|75&aqbuG$nUrlqO7iN-gaKK*_Up+6!lF~^`Xd?{ zg0GvW8aD31eKt zHti3Qvy??Sk}(M=upJ~1dI?0U$->5Z^u?q5;lB8|?~6y0SYn@hKIr$tA8%T!Fv4w` zuO&Lvs<;FWbjLfh?VVY7n%=xNjhA3?bpgH4yjCdn3{~nQ@KF4Kw^BJ}@b=)9~XBcoWM_)m+Y+nj*3JWgvC{{JkHr zDAg?dv!?4YWVobZXQ@(MsGP=*l}qFSubjpb6m@8uW}4KbTA_0vJ3ah8{1dcexO}qvW$1cOhc8bC>+nDt95ocz2-z+gFQ~ zd_&@;8h0Txw{w^L)4|+A<#+I9>D9}u$MZP=muqT(V=wy~1Basb73s(@ zbZQ`CTJjJ|DNkNb@+7QM3BflTa0|;^Wp>+wqxuO?c10{ZJ%jAa)>XKKhbZNKZlO*g zE5r99SZCR(E^Q$m_(|JTOok91{BZ=5Tew*%7?j#wDXaAY3mShG!RuI#D(e+>KWvSQ zlJIg$`Qd}SU)8YaGf zKLW`;O;j>B+G{0ggzBkN)Q!-Zy0S%u*&gi?KRDXA}VBp<%qaLIO*MKM75h${(eLE6CJEKzxxq0uLNBZ4&# zBND@bkA#a*;LN{{(=RwR9@Lg}HH7ngLxh1pacVahzMc(%;7^^J42DTD6gZT<3^kV~ zgL%AMjb13>;G0hE1@cGH3{$Jqh^;gjn; z6ew@YOBm%v03?H5@RLo4Hj#>0Rp~9%_i76U-!$95C-Y1YVe-V5OJPF9R5i zHv3O@^R6_?DoD#;>pipx4)25OZ_;rk7^-J5AKo%xF~slz`i1FAKrv>!wc=y)j% z2uaOf_NIgsChft~E>I2Pp7x>Tlt#J}hc&#Ma4Q5@^EDT+hUJV4A|A|?LV$?9^65$g ztVxP!_=XEy!?G!cjp{H0OU(hUVIMDBQ(@;GDv(#v21$$S!QXZPY&hnGAmJzJAcE0| zAO$e0uLZOGqVG%f?gbJYzV6sC)37J30f%(ymWQE>u?D{F*d{ZKiUGo@VPojIwaE;D zvQ;T+3XOGZQDAokq-~;8gUqrggb81HYfwt&1s)Hc-LPGH+M9gcH)LZ>hC0vKE}4b` z|HO?K36NaZqG(~Auw60>$+lJxY?n|~@Rm2l4od(A!lIeDV7sOva6@eB9KEn#GKE$N z`*p;23BgPsTqP^nxaiX1k^xHFgp*y-pHL^*mcNsA*ONm^knMqb;R zthI~~gB!NXGwGkYIC}Lq!DJ{$Y?nNq*_*7^6=M?#?OyKoBC?(7iuD+6oKCkhIW14R zVY_CYy|G;~gd#uj&9WjfI%2zIXr)Hov0Wo+(O6utUGpFTuw61pIaQgw72L30GGMNi zXInCJ!*&Ix#U9K7Ulb_1V7medNlGiHl>(;l3VTjj4&1O^kxe0~`O7|#Obj{R5!>z5 zh3#U(=MmdwIOBqdOY566BjPQn8qVKuB`0KXeVhT15LTWXv0a8uF>E*>jtn&yY?pal z5-w;3?4bgAWH3lN47STMCj<#UNf!aaLogZIjO~(XVDPE5?8RfdE|oADz^g5G+qX-GR`BMI?HUM|T4INNyQU!mG{j^G zhlbc~-!2(2(+8KTxMI`L9!NG5IISm;*_85=6bN?2cF7>v1pTpHj-E@>;s{tLEen+h zm1z;+eS(G~w#zf=pSn1DCbTFJ=WhFUxtYppT`@M1+Sf-*!BUtTwrl3u8`~vAcqK8n zeY<4nEP}{1>W=LiNsGqfg6*0IIlrmQ4cisC;*wdQg-SAnU4G)17Yy;*ao;YP5ivy} zojKr(0!0^WS0EutX`!)QGNNMhj!E`z*e)5A$X-Zl{<05~woJ*<5!*dl7q*KDpGRz$ z;fxC+=N-JsB_jgGj@T~4rU>0QAdU<*7i^b#ToNwMKWu~N2X6azS>}Ww;V0=LK)B$f zcv@P>#%_bDk{vy&L71I@&YU?l2n=r6F2|b?9)$BrFOYcQi0yKWiUGm_Yvky; zwaFJcwu@PG#CAFPhzGRz%DeVJ;kIx0nA@2Q3^b)ZH^B*mB6Biy(2i#^5b79wssY<2 z19-K?ZrCmvTEUw;wre0BLZ0vexM{Ji2 zf=y6ph8=<^N6#f`aRellZRkdrjOW-0yY1U0gJP)*6|S$;i`3QoA z$fl6g{AC}=!3=Kfi0vM;!FIVKGBkXHXP^Or!R<^2hBGdRoOi4yXE+eH%djazHx7s+ zL(K)-WgeG=i}R1|;`upZyDW1;knodq5g=T!T|BKCuw8~{YLbp=sBomcHlbY;MdxDF%Or5Z} z2;SL%JA!CJh&MqrQ3M@-@fhqpeQ!W=cP=X${=9FM_2}vqe=fL}={+OR|E_qBpqgVY z2Kh5NmnfDV30~>i4mJB?ZByy)A~o5hHhT=|SMtuv;BAsn2=8fx$qC2-g;*3wA0ee? zkWYH8*$FoGn2!48tcX^*UMmtv<@+QJmfFBEN)CXh(N z8oWJ7WG`1PL39vKRvM|mKz2iY_BQpf-5%|rc<`?L~ESdzjhLXNhGJrGdMHpZ4UaQ*|g36 zO{U5_d$T)thTZgm_H1W+*l%x)N3+Rz*xl@J_t6DTqm6Ujm2!OBRYVYnWrkcAtr+&F zuL_U)(;2#${cPO0dY5Lu6uvI~gS+7kELT`}nanNC7iSTf@p8(Pi7S0H@wv3p#qOD` zr!vWvXP?II%c1zD@a=r6UYNG0#wAk6adE1qQy?af zyOp*Q%sl>_GGVVke9Sr!hpvdRI;2LetBAk88}2J(F?OpO+$Ja;P<)sjfd`5u zG?H7`k5}4lMkbM(^sOPNJf|#sP?m@dZp^8uDtPR_16NkD=!@e>7N(!s4U!Al9Sj`< zva(ox4J%)bnWq$z0GqWu90fCh+~hfB;BR3SV81ZJ8TfkD6+YjsW-|L$M{KEW|I%Ja z%B*L$y|pupn`ouoY5`ZjTj0c_iZP7K-<*JX|4B}Up-Lh@@yp~+XS#T=i7?H(J;{~k z=Sh$*EK!Ev=r=<|F!&q^Xc}ye1iCJ^042V1BzQuwITBPS;2cS*9c+%I2P)>+Qta9) z*1tUC9DFN+7byaWu!~&sF=mTYq6u}uW~wBz zlU$6-1GH1@Y;Q7cSKpBpJmQJF43djc*+ntki%~h-;2JKTDI!;TLHZAAqcVzAi#R?f zinyEyE9Gv|EZj+2vT)4FU%*kZ{L~UWPtJ0@aM$?m?c~L(N-CGbOZ0z7E!D-Kd@(4G zvgLucu{k2UL4=0GH*s7nm9q%yoq)@wGTZ|&?VauO*U~+YTrGde`o5(-@vimVTjS0C zkbN~`kE+OqKUjc68Oh^_?#WJj)*nsBlkTXu)t{1AFW$94M)=F-`d>8B80q!(``4?W zPV3c>*!7pPags;0iY|HGv__FPmd4NJJ?*-2@s6~rW{S82HB%(sVy1{I6Ej892AV12 z?#@h+SR!vyYN%*G{B?LCYgOomcI##Sl9|6v3N`1!

N-HOWE7=n>?N7*FpEr=Uol-?##_LuX- zMev^l29H@YNJ=YFcgG~dc=4PMq~KZYm}~GS0&}mT%-Pq2B73hYcyW0#8X;Mrgr@SjPk zM$-Ke%B4|-d9QX*O!P;zOuDEQkECExqt^6jd|Yl3_KHOdqY_DrnUomRmohF|Bx5O9 zG->gR5nxN3;7+sK8xHd|i@c@9{3i!+gG;25T+07^MEu2y@I*)W`Ka*oG2!P|2|r&a z{CvIe^9{n!H^O@_hlRmtFw5CF$}O?*D2!w`(R4aEGfG}%`w&_7#gDyG^H_m$=0{Q` z*BU>~Q#XD-uO0~fBPROj^HoP6`*1M7<`GEXCv!6MK-Oz*M_;Tj)L6O1V$s%1GUOWq&_YI$&&-1wF)LkmR|6J@1FSnXRdDSO8@)7|MkrNz0bV- z9Y6J*4?KO-kNo=YEy0gp8Gq>tScN7!5i4GV=*i{iMEdm{`kJ7xZ$n>~UjXQr3+d9$ zYPNE7;PR-ku(-RqcPZWj=-B1r2)}qAG|@gj^w8U*DBcUvR`KoG{M*Z5ACt7vFVS*P z_M1FEr@z?)jbTb92H2f_nEAewe!m~~?9i(UFqz!m06in4qKNmJbQX%hA8U zlaOzwN|~MXPvf!DnV#(pclzlh&<{DfP5-`(gpmtz8MQ&^ql4_c#0>tX<3_3pML!os z@PEUeb^K}rJ6g(pnn5u4i+Eu4g9Z18VIP`AuX}nt+3L;W#p!HwD43jX%{Z(cnB z`|)?^|8Ktiel-5ReEi#xwBv)Y9}U($)!TS{dotdxfBbvl%Js>(x4F@q&e9pA#}wW) zIWtYoWqJgSpMA5EP6YjClKv+9CjHIwl~n#o<4ALL5f(a)=3ck~c4te2$)!Lw*< zTt)|TX9|rM@m7xIO91}`y8^7k?$^I{r#CFQ&2Hp2hhX`K=zyzNK-{^k@S6SmF4+U| zdyz&Tg@s!Or~!;&VRNuGU4{WVHu1@B>T0ZtkyVq)Ax;=edNHt1WG>2a_Zrx98#;~> zosg)SrRdw(MfM&zh`a-?1-kf}PG>Me3pS}x71M!jSi~=vrK9!GC)2N|(kGC=A{*J< z*+`#HjT}|)qv`e-`CgA&|1SOyry1E~rvy_d7q8%WsNw*im&9LvDVQ-`V*m$_vpOkHfHjhKHe?76q!8{RTFO9YU9wTS#!e(+HGJdR(; zAaW-`f+Hu|FE5_R{;Dz;6bJK9V2|P~cMQckBofTxHj1Q!ClBpG$zngdwF>>_3WTT# z+hPvwNe;pu{JCi-UnN8_ovataa4?-+ox0g8u8xjGsWgU=?{0A!4aLH0qrDD|)*tpa zX7MGk7j1gj9Lxse(IH4Rkh)0vHA^KbSGxyxA5WR){1O>x9{!io$ei-WMvE5${k25m zx52^VD4tFFo3{*3pYBi4@vVKOVW>PLbt+U%9N#&md{m4$I4mk#i8O2~L9ET*P9>kUR}KFN!PM!akJ z`Dm^EvD9+1?T+l9kY}|PSFT;@TzjPR{ImF*!;_89;iHGo^sd{y;q>WV|HzH;=fAiI z-Ue5(@XPqrv_Co9OO+n8!Is^16SaQ0w2=T6daDQHeM^5u{)^C}RK&PM&k@Sl$0^{f)zxY`4W*1Hcb!F!p9mRtays@Z<>UY5FXKcG6>Hcqf|+y_(U@wvvAr@fQin>?)l; z=Zu_VXsD<5#x_2v7iu}PRPtGZqr{7=>1$$Z@VYWw*VSGW5eHW#mf8Qu8BFL2=H3|rJ-jjA2qWf4X)q$tCS z5?kgSX7Y5z->@wSXlP9`Nc*0)?pgcpvQLi8feqFA1|EfKQ$uIP8PmdZfn4$Om*iTw zr$6$@Q!O8+As8>ZHXcp$wO-R?b)yC(KGA?T8l2fmFWOd~yEWd+rg^5+^Rk5yqxzLm$RNP)i?%5ZhFFNloFL@cBRjq>X)eQw}3auDSOTrNQLkHydzs z0Dr~}ei$=CEq)k6!R*l_#AhNfZc8UAkn1gkP8>%eE>9VTmNgw-_98YO@bLziE`KZn zzR+t}3nd9*gKnGfwJ?E?DATlW0!a$V35|%gXfLE`Ka>*0|QEJcLpi z8-RaphTVve1P?U9Zuw({|9SAGF)R48Rc;Xg0V9r)wx_y-R5Ek`v6#1xscZ^PJ-~p} zG6awT{=f{@pqZ2q0*ert!y_i(m9>C^OHa4}{*G0=VQ>Nylu-zwn4~obV}lgX{pT!t zTYaJ53n?ITVB@+!Qb4G-HJmX>L3xqf83Z6Bv;3&LYa671P}K5LAO&3hQZ!a_)WZ!< zAkzaIq=0-_$BQ3QU^4<;_RseE#*HJo1+c(K0hwQQq<}oBlbtPVj7WJA_W$d+UaHhBT`^R zD40E(`Y0QufXrCkh8RYXwIVK^ESU)EJ(#_o>AI#2x3EGA2uZOeF&1;=K{HZ7KB=)m zZGYk=DS3xt(3cfbK-m)d6l=*9MWYvoCSGiig8bD>wix3!32-%wra5Y9b`!X?WBC~{ zHb?>fsM0Um!O}dif=;H081E@CVg*zPHSH>(?_skpJ!h%j`h$Sgm8SOKA^<)y$1q_~G03_wP<jW zdGMt(TN|w4XuikZ9xK2ts(D`l2{Y|fIt8yDz<>mBu-aE})L~x%k&E%|3Uccq&*eO@ zf}@qN0xE<;6+!nE(DTSxm)2ng4F0R_=g|!>`LRq_tjA0AqyeuD2xPKWyY|V4HT6SH zSOFQCkL-%|{cyb0f)!Nb#Re-N6m@#x8m!7#fz1f#6DuI|tBw_rCv~#3#|mtySz!fM zgnY3AGKd#eK#1rBXpa@xP~%M5j1}0BOtAt(`CeoTmxhr{!fIau`LIT$mRNxWB|EIZ zicsBH0hxpqRzNmJhQuc zLhLKZ*)oC(V1pIpDyfPU;K$5ZK~BD;aRuI{(T>)4UAk~QH7l$j=R)v6BUXSP)0zZ* zf!~7z8?4}1Wvl?VsAjBygc*z#a1cSjGYV|>6&!QG3W!|HSOH@L;Z7>Bc?aQGC9Hr7 zp$PldSOGGmyVNg{-Hoq+d_(VCe?s0nVf|VM7^rc82pwC{^1r2!iNRV#0?=`aZwSVt zcALSw|L!k56_zvgUW2e)=|BX;)obU)6*?vEWH4QEJ(vh51U7rFspsnteMuO#q(>Zh z`v?5TMnJQt=t4kGLa#0aq{iY5-ojIY4POf>REjv%Wby&UYsMa!n^wKW4}aKz+e3u` zj6wvoH1vu?Kx$#_)(~sptJ)XfCHbCfd&)>afV@y@Y9zZ8qDm1JKHGp})yzj|hBSu+ zNMw`*vsFcpWYDU%Px5z0bR}Y^nly#3shTv4D%N}I1_UF5--+NgmaJ7iNFmD4Q1rcv zoT+nlSVIKpePYdP)tud?%y6D9>`oB;P2Hns7oUp=hcCxsL`V-^wza@aO@cR9kxJrc z4+;l12vB(Oq3{4tNv7xg9jRj9g5y>ZoCfn?N&{bpzGCk71|> z0*TUWeXXVXN@p?H2HpH6i^^tF7^&axi3ol}IKdWQ9J z8&Q&q0!z4yCGZUo1#6Z#N(AqW_#?TMJoKjM+6;6l7r7@Zz3QX=M1OGRu^By(x4h?n z*RHp}jxT{!+X~(z9Mb4C5&rIogJeg?()uFW(UHDv%H*VG>rm;A4|IvqD*70_JAzwT z`*oWPLYW>XcO}GRE_>Japt9rPr{wYtK_U?vxRXT zV6OW?SDtACXd^w}l)EZGIiA$M^6BdZ+2FOR667OrpSwz@Gjw6hMt8c=8}=sMjmP>M zk9Vg#8|cCybR$r9bJu5ukf&ty#Q4_XxHoH)Yc!IRk}FUpK}0Gb&AeEtDA72{X&tG{ zrmKD2qMeePJITEUj#kBoJ>@V|XoVG8hd2Z{wyZRt-Pe~pY;9VR!^;R+-j%`2T!2zn zUkyNqz}smkz4a=+`YN&eI8ta*L0pAC6n>YYgz1%9=qQwCy2xMLq=jz7OfNOyP6ksN zz-VWyi>_*$qMI4p+3kq!b9HNqQAIWX*|jU}&+Htc#8-?jf2`^k{PJcMF&Z@psE>ZN zl>@QNWQ*n9;o$Lp-FlK$9cZD(Mt$%{5v)~aZ9M8vGtSysLam!W_cG?c_;ELvcqr4J zZO!<7bSI#@_Tub{)Wlo=UKBDJ7cO`ickgN%G8JJJe!2nISLQvx8Zy5k(y|g{y*dlM ziBB|0JTz9wNhOuHH=tixDzvBR=KV}tT5FL%BGd?7kD1@@TUYVs?)3jBg3Y=`z!c=Q z6-v$>)4yocCxC$cRBeLORE>oL2|XPAWds{_Sxvc%tBv=v(O7nes7XM#Mh^!Y=ep>g z`Dxp#g0c)8nkEf5k9COyezpns2H>hx@OS|Ub#a@_>>6n{pZRN0=~7x);VY zj%(f+RdoqD!!z!mLM5ylcL3a)xwG&|Wpw_Qrx9Gs2!+}rOBZyB7mTqMi#E&s-jL5k-2 zelFs#(NYqe-x>@P4=S&feaSX7XE;(!z-zlmOVX*`;wMsY!rf42AzB)&Rf>a6_-o}V z^US2TIY6tKQ!ZdjA||_#rHQrh7I>L%lytoAxoP)EUc@R^{@Kx#nuPa9@Dkl%w+@EG zJj9tDO(M4pDh;2#&|TFjEAXE`Xh9~*~Yne z=??UDHkh50A$jiO<Z>&ld&VAX04>#d- zcmpnL3pszGuUdjjQnSWrIe&&~<`H-(exN4JbXQb10zVbO-QGGwl^0I2?3NlfyJ|`w zNznv96Y+P8J7s=;QWXD_jAuDhc%}i}aI#t_o3U@pi^-cNXQuI$(qAW`b(C-yUI&pc z2{+AFr?fTuCGg_aOX7V?e?|U_gdYJVzkpN9+q|ORlWeMR4|l38Qms>@F+f#0{dNQH z3}yT|V87LG_Wdk1y8XW1F2=mDkHORN4s_+Vtp?nguj40M313C**MA8fA+0tTab%n}A zDKy>EG<-O)|79^5M<1L~LaV99YAjRrFI2IQr)%-s_Sls4-Dg_@}<0LV+Ho4KeOXen%yFYJSWymS&G zCKwLnr9+S&H~`2i(nKKd$3lR-l&%Uq3<~nn(THRM0Ed*OsL;ZUY5{ZUxH&J@4NMr+ zmkzH|90~@-Ha34~hmJ>MO$gMNQdKLj7SxxHOSw@k2>^QO@Dcx}9H04l%i#3s{se6a z?MpgL?roP*fA++9f({bg7;pF6+&Rbjsi-<6p)h>`et-{00XCVHsuPAAWOlo8howD) zRG62J8TeaG*s4nd()_M!LhH4I06m3XF~lm|8q{|;z<)(fF+qHGCV7Q=!+#v=->T@y zE~+TEcFAAPSc5M%;GwzqoSaKRuvwMhY6_qeWPEUgX^`lH*wXJP#Ctp@mSL)6FJDC~7YckVp~r{m2qJNFbpBj?jT zI{eorOy;d{4xFp407)I@e5iXw)x8O);y2f-Zp5R#s(-FRDQ`yLFzS`Vakrt3VevlZ zhh*+ir0_Sz?M3R-e z)zX{c(fHx|w1oC-8LsVyzhB|~B%X0}L>5XR$J*(QwK&P8-(`K(68SE%4fpd33-*bU zfSB;-6%HPhONgrkm;#RPKds;Nkq=T<16mH>x7nNZx~IpJt==qNME%_D_us53t%^tj zf6uw~CU(JbEjmNqTHf!NYNTT@l1<*|Nw_S{bpM{@S{<8ABAuZ^|wb3$8dWO<5l-X(4CUuIp8x8gMEiLu){l z+l5EshXN>QS%4b;? zWR<4wXjUa8^AI^Nr>rL{0u$G-*|d0T@SX}sm6%1R$b-J=s;r#~2fLbQos(aIk5<^j zZYEoQ3Y5sjeha%T1700l_*7ZuIX#Ni`Y0<~uFEYqmjESKU{%SuWhPLIR;%hrz2EXV z9@i{9;!1|bYz^M$nfq|ab#Kh_kZrx~)Xcid-HY&S3wU_IOCeuK{YxU;L zyC)O4R^%eTo=g~nspdR;GMO~Jdop3RDhmzOlgW7S?8&4YFd2!4wa|%1#>@PAGAY}J zl;qWu2?Mfr?bnkDghi(;0!HY*OBk5pNQV%4_hd)g<=h^oCleS_IkUMZJKAP@GU`F4 z=D};(&ATU)39?F0)(Oy)S&F_#vio}$_A&N;>dAQ4RqkGher`QkXC6J7Oale4=bOXQuX^42N9zo-7a9UOgGly2{;)h&{KSY-JujnM?x(ujipB6S!97BENMqVGO34 z^X$oF()8}hgxRVrG*m*8@!;8$NjaEDMw0PjpOJifGAY}Jl;qWu2?Mfr?bnkDghi(; z0!HY*OBk5pNY@8?_hc)z^<)A=DrYwLWGi#&$v)<}cTpxtrYC!;VTWdbj?7Zvg$Z96 z9T_h;(cyx0Wc(08Ix-o_038`mR^>C{5@5NF@ht0Q49qA0m07XRf4&`=jB7*s^Xka> z@d9;ZJWc)7sRjzJr!&E=;od`~_ z*~b3$*X%VI%U;a-<^em|)9E|BP6mIwVSfdmY&vUS!Mf zE5WZe?GKT<np`F$pNJ9V8EW2}G;O>b}(K(YKK9hx_8=zHcE(f)0Gp?}b0!v{Ye) z+caNGbf{HvbaUJps*gwDq4)uBeQ@?-+jD)&GxL-y6}mIg`nW)6*X6H7myrp)E3sLA zRVsq_`@MD`Xnb+;jZaPcle4|qU_7$F0Y)Yk&Qw%>#`9`=^{a_a_mJ&8q{$F| zya8`wxv84VSyNLaHopwS4uHS+0~V#4g@4v`J%$XIH0&%@stc9V*s*enJm8hnSc0Ms zsig$ZRP3nud)xHcgn#1q{vKxoDs^!g&FA%9ZnT{1m1G_&3;eExlo3rEl|>);Pz2YM zBh0D(tha&=9etudX%9w&*`PPsEH40@-A{ga(EB2ORdP5PVNQE#gsH_%*t zXL%{?O>5%bVAP*v1kuIKXpC({eRY=|4=Av6Jthw9tk`dwP6ua3@gn}3=sD)IdYKrf z!&D?Y=uj1;oyP~GGu-K1-m{L}Tj%a^5BFS8!cX9QfXq(jmd5q;0=is%SL;TaiP~^DzQ&M=*m3y)u0Gic=g)6N@EVpM9DCW{ z7&zp-PvTe}jFj@!-vjMlT%vEN$EjX&5@MKrSvePrjzHD7u_w`VP zJcgL0{k;g*S$3*RTZjjK(l!;7MLcnr6#Q`nl3Tc0DHxR6T`8-T1dS@T-l{Faz@J6% zI+ml#dPUujp~iLBK&ytZ@x2Dz>dCMeID7>tySkR0qJx!PSi$3eMxLP>Ig$`Qd}SUY zK$wCM{C)(Id&o_yP2115zT$X3>3J_(KY)7Pg$%vJ)yDO-+Bgi?#ILZbHp&gJ=3q4V zN(8TFd9Vu^lq)V=TuRD-#Z}A%7Zx?MNDW+Urkzo#nW?WYIy>>WaTANdBU<#tWX65y0)`A0x2*lq%B{m z7f$CTB7-mhp@EX4jVpx>B!?29enF?OKZ;~qJN>AY#poB-*$$QC0j^;mFI!XL2OeUS*S|WOL~0ak z-*3ABHXL(8knodq5W#3fkOCOh*MeDo(f6f#mjs7ee4bCbI^MqS*f7(uC#(Tqu;Jm` zj%_l-s2Cue8a9TWTbs-fC|i}HrqEb`ww`d0G-;dY)F89$31Pxl-n9n`H*A-l_9kEV zRW-(pr)lw5egjDu5SUEa5!)q?EAUU;c#!}Z5Zfi=^5Idn$<__qC1WUf%bQ|{B>)3q z(M(*hUDFV?G{j_3&5jBmY?qMF^r5a~ExImo%YjR?MP^g3X9HrpWDsnE!cIqrAj;8m zNm?8MNzy9Um_?}elCr$qHR#$A+vS<`PhA{6lXHZTmm{{z%~V$Fim{1=_6s6~xna9z zp6kMP$;fs|%njQmV`LFTrcrln*GO7478h*SJjnS?Wp3E6z}2%YnYm%R0@Gp-=728> z6kV`gfy9NM(%i6Jkxe0~`O7{`+A<|aaKm;xHrQ@p_y&*3hjvZ#?uhL&oN+-U5M-M( zQCc1nAzOPk-LPGTO)+dZAdU<*7i^b#ToNwig{nsL!C@ObKXAi#S>}Ww;V0=LK)66r zcv_mxX={$yF2k?1I@*oPi}M^D+^}7aH!eId<$9zOs%#^ahx&|h1l+J)j!`i{IJL

55)w#zan1PMP$7XiS-5Ow^z?b~JemDPTvz734+=1R4NSiue3<#^-5gYCef zu-%+rV;phBb~#4H0O5c&a`fEVm0oG64cjH-@Ajyg8&ZiHN>46)zOCDEp=Z@_fNsGqfg6*0IIlrmQ4cisC;*uF>Uc%?sNl%(>f6BBE*_PwY_;2G9kE@GH!eI_ z$2!TbDR+}BR~KSba>RByM#TW()UYwotXrGR5Re23T2pukGen?q+_%fiTL=@r^42+d zh#-=_&~UQKiDKMsaN0w6Nwna)VSEKp`oenD!iVy&(L;8xgJW^WaLBG?cs_9GdxQx7 zqT;z5k^!@_k&7Uu0z(41MV00$D>S?%1@CIYj|Ne;0KQc^$wVd5q$WGmmK5GHb?Rjz zcxMCd2%-rg-UQJ^5m@}iWAaC|wj{YbmlX|v-nYtnboC0C;0+VvkAtZpT*3dYc#fc& zV=e|siW7hBg%51$k>HiC?NGBX);5*yE>e?CYO}`+G7`O~5hf=f2NYsaAbrd;8lh{= zPO!1Zbkr|rMYPH-*SMq>l5|d_4)VPVl?!re#3o9wHLG-8ot#H5_LRbe(8>t4k7jnV zy2n#8@B^P*TPRhic>>5Vsm0a`fAJKYqO>t+Pdp71rMue#xRelE0!UxeT1CNR2w(N- zuc+}^S2sH4)E9ixJq+%ZC&T5lD5|`}r*r}DaFa-$-;hgL3K_Yv( zatWe?U~K9F6%1roZf9>(5Bt5z2;CgtouFIZNdZzmEfSvoiT(@u9-X5MyWgPFN-zG1&dpN1SM!{~`OsZp-?Bm^)WSN~O zE~PU|LCimaE(%Xmy6Q9RP3g%?%XG>Xo@k9z``1olFp1<;c?M@Dz0E;?G@G{BzsXd2 zXK!}r&aj(4(4OsV5Bu$n@n|*~54)TF?LNBwX|!>!yHaircNG!D0U6JA(TZVzT9S?W z(;2#${cPO0dY5KPBEByDgS+7kELT`}nanNC7iSTf@p8(Pi7S0H@wv3p#qOD`r!vWv zXP?IJS?AH0#P?^=LYuh#W%x<{H%VJRxKvpwkR(mx|Tw=BKafhf$ zpUoB)5{K<4#oSS*cuoc0O;+wW3|U(+lB;Ftm06n}CtlNrYnRQeM2GO}zO@75TOEE) zwL|`}Lky+EA{%t`g6C!s?)#arZ5Wo>uqI+^?bo5)$h99J5ErLvIt60#xQhvrXPpBv zN8n;0?zgjMrrWU%#HDwEnHtz7s1kittb*}@`u>5K9O))AWMd%454!|nGOS}DW*&b| znXp$N?zC+KvDbDcEPJWBRtx3<0&#H!hRlAA6)}0-B@mNmodYpP;5lW*UKR1t^A(7R zKBSgyD76Dr#N_>JnIRhkF@D%35R+jY12OaXbIOFh0`XDnKzzO{VywOZfw*{IMP|Ro zKujKY3B=@C=RnL6curZdS0FxS8;EH&;Zpt*;k!g#NrM<55R=W)GD9{7V*Id6ASS~) z24d#%=adP11>$4Rnm~-z2VdU}_m#02yHyQt6O;}pKFp551H}>=$t~>1EA2KTlSoba z)(}*lQ{usIS`C*T}OsU2*Nqz5YI*i!8nqnX?{NPR1U7byaW zu!~&sF=mTYq6u}9#2+lmDzv8hq2cgN92ZOFEP{F` z;Bu)9_W(?LXZ!ro#m|G?XM4k)etgOLzNJ0!uJzqpQ%)AU8^hjo z8ea|v<4dzsm$DDy17ua5{?`}a1=+{$MXwW$CyMyJddp7uw|b;?F`RFc zov;_fd1l&iXPUkoL7sZmX6f7DP3tx-?TiOJ7n%&-Tn6FvnEiG1WasW5D zMA^xu{Le?kU#tjEbcCOe3O^qcetwnk^L4_{*9$-2ApCqIy!Ucg7>ow9oSmcG5(|&Q zNOluVr-L)2)@zXqYOZz1DUF65;Gez_EdR-pm7WHS)yEIJx4j0?e)w)5pk;jNHL~L_$rCl}juZ zZM`Hzz99fDWEA<#Jg5<~V&-<2``j36#q4gbM3418W>T;BAyUl6g$%ht!gl54ID(2*%v%HDGtFgJ0xUfcoC=zusA2&h_ zYd$h6!ZbH07a5WG?P*6089x~n`GJ{{iyd)XxZfmTnn#cQyfmATm9asJALa;Z_?lF_9v6^q@68;uO3h6pNr^l16)gw1V@Kgk*UMh>$h#w7*%;r4IjopWHUT!e)lI07Q zleeK0@TM69=<2kV;c;ZKSk#CK(;0vCvn0mZKaex?dfVA>hl@*8^V?=)L$ggqV_|Ih zCka<3V3<#RF0z*s3U} z&T>@Ei%3?oQ4#s9wTkEiwko0zunak>2v-0$Dk8OlrHb;8+o~x4ILlEr6_KQ9qayNI zYZcK4Y*j=bU>R~$5iUS%R76S@OBLlGw^dR8ah9V(Mc<4-6N|WpnkJ-U>d-2AfGDft zQ#p4(Qf(}jNr~Ff5{V{_aFfrIRL64sESKb6Po-yaKDqOb`P!&7EiMaFRW83Ama;Oi zJKl~HIHcNqz5KnqU~f?+e)X>SD|cP266ro_UiKHO#QrclD;t&X7puhK0C`)RdK}mk z@}w@a$#3MdBxGtLb7q*Rp2{0C+x$i!Fa%1QLjGcvh}J@kDxyW7t%~Ra0aSFcO0-cC zE&6O#L>~yCqKj2xe=20czhEviU2_mNIjwaA?b~J&GOjI9g0Jr#LX9wh{$VBAo zfK)_|nU0+p`rbLy33PlMkYPz0&fP>s(Tcosa)>b;2@g^6blgS7(~((~Tdd;fy2dJj z2oaY*WNxvF zEaE%IDuw_Nl|!szskp}~mWoV7Zn27?>RMNEB#4-t9FUDMJ`Tvz2`4IwU%SUDj)aG( zcslN);_1jl^-A11imrTLs}ks~=?%4ilK8qC$E*Ij1fo3NY<3}4%DVT6#q$ZAfp*x%>NvRME3_&Uv n-N^v6l8MdBc`3u0?Ya9KLx+ei3^8x#WoIT4-*18;L9mH}9pex~VjLR_;smh4kaYnY z;^cbS#PO}YYgg^6+V$F1wR;BL5o{DGBy3kx6r%F276 zyEVDw)TB2UUH`hX{n2c#bI*7Fx8dX8h_VmfyZ0M+?3-Ts|7M?C`ON3`!dv00o$+w6 zd9Hi5H<`A_r>6bM+1_k09>x3bANDr;PmYIM{Yh&-?Ae-~+v&%RBT?)7;9z&WJKNcv zb*E4EcKXx!O1OOOP`rQnugQN=+*oe5UJQG(55%&+!Y;(@*uBj#!$#U+BXQVa?^J&{ z<+;_(J93gm1iu@>oh-Yf{_N@T5aC=+uhyK;PiO1-NxeFbK7_{owaU;rS9giH=V|9I2>O$9B;0z+YQlr3G6xB8}9bw zD>mZgX1s{h)mT|>A}d)(|5z5At)!&xWmsCv9)V9p@H)F8bzE%^MzKFnR}!8MY`}69 zFSHi_>5V`AEXzyOdO?YoMQE%ogyN(3OkI53V#mh^8*rb$J|4)7?m=vG@g>}kNhm9B z?S<$BiYHAUJ4SJ7+V5|5;>OjS)kdwCmNb0;U~M^#OX%Nv3t%yQGJOy}+z4UC)7?{X z^Z4$mB!oxwo(V!`FjAR35r5<%e)wb%!ENsj=AyY0k?X4 zZrr?eYbylTNF_nYAp0{pVjc#(?C`S@yxdNBF`Vp82E9|m{>e`3QrNdS9?ej-)Stu+ zlrb(R1q&M~xZ20W%^}#2a`dFX)gAS=QMrVOx~FM)Ujq)?@zuU{Ivt!D#Y^~WVkwTq z-{x9Zz(G_?47Pf+KCTDj#brIG&RFgELxn1-IV1krt7M;kx)0Rz+kwbnAtr z+`4SzAS$=gdT_n9nD!a)?;?1a&Gb2Elz5Kw+)9ppDRFFExUTcnltw=AGYzvrWvUPy z&`JP_qm~OJbQY1^Yc-N+l|2?d8o?`U6x|kl%}O`{nAETX|G^?>+|!$(x+Gr0UlUcY zoKy)p1zVPV^m;1!8EOQlJ7W|vdni^e@V`2h{wihY`%KE%K%R?Agm`cMjXCP&tlO)( z;O|P+lYXFmj?ddNN*2|39&1zL=n`0^qp_?ly?F zyIQqIJ~tI&=Q+eBVEh@yc{oA>k|8{YH{JPOsOhK>%(Wg7vQU|M)C^(Y$dmKXEo zPXD!Vv;o>DD9g-k|M4Ojfl160x@l14?Z5jO=c^|`b~C~h->{F@Fpaml%}SqXDiA` zvm~?*|K8kJ<->67pmOZtR02o|8N%(m)2A6+SbW0Nkmp_&`YrHT@51q^XP}4cf zeDGhJVK*Wq!2>OY6ogd`Q}9Kr+#&!1MjRt+Oshqwf>Yb}lm`Qp_`vU(!5XBa5<*}R zB6Ezugo3aZ(DDQasFdLETE!a%Con-7g%Ic<_LzkYQqY+PQa}coD^frPJ>N(HnFq46 ze4hi|6JU)L5E@!u3Z#H5_Clf_WFB%}ZUGylfY8wK;)fL2jF4?!;R^{vJ;bik6K8Y- z2gv+5pu!3%AW!OKXO9%vP}@d}|HjVDijZ$-Mg~#g4OU12A)*tYJyKvpjWgviqVz@z z$h?Cf1%#MJq?Sm51*LYn8Q=o5M?>5I8>E1+&b1+27&nX}YcyP71u_xiegu1@fRGeh z5@Rt(9yB8bD~?Aud^3OD+(H2HUej3OZR=&nd_o zDZn)6h7{o8)Gn%N>16Kd(kJH}8vzO{q###`uvx&^1mnldNI_1%gbisu(L*Se!2_^C z3UV&0A_e%d!v8$@(wG(ep#@UVX$we!QNyckJFI|k4!fIE+(W+PhQ^06f{+;qKnT-Kduv&>Hw7B%7-c{gI zSdk&*t+VB;0XDk|$Us_N3alVJF9CNI*zm$NSe17b*o<&Ku>vx`>R17JQfCMDSb+^S zE3CkZ5W)&nu$9;9E*abkDKW4@Xa`I)YpmI|$@t?x7-Nk5y735q99%yX0@na!osoe}?gB2XD zj1}M(HK`V#QY95{5oE6n6ztpftUQrys{z>TD>&+a6%e_Yu>!^h=DV-pXeF$G3Zc5( zt+0Y)7Zg@NW)lJ{Aha~~Lz{U6!KGv(L)4BHkb$(k_+bTP2stkSu>vxXju)=Ms*Dxb zjDWEM=7oOZUv66k$dFw7UuoNd)xH8Uj81m;Sb+^SE3CkZkng?%GKdOsu)+!m5uE_- zu>u=voGF{J0vnRvSOFQt)GS-Qfk239L~4l@SWvRV3akhPvqw`WWwWn<%$RFLN*%!k zgB6hJf(=$cK3o|qAP<_c0`f_X4Ql%hujDUpeN0wigB4J=gighX6_5u>QghomL8+K{ zv3UdGm;o!W?z&xiLp-8xKqFR=&xx*rcv!8D0~@U1SY@mLx2R^UfCPwa z-%}B5Jb(cnE5K(I*kA?69Iyf+7c*AC*g(PMJoXhFtArI$ArzCeRYkf%m}*$p)4r4HSH;0mrJDkI)QhmdhkE zN`l#{qM5M}fwHsm)){;*f=)GQ3SCn*X$FiVrjP}{6~SvPS<@e+Uez)SKS~k3Ppo;ZnzP%K6zAC`zZ3us!6((dX(aJSU#8PTmu)RDQ05TQ z1Cslb$#~K}j(@s+aQbwA(jRU1<3puiB^3pha2HDqvl||4k8mhTUU+-NAIYucp*Ka> zX3V;$Cga}LW^X!6kC$#g-5;EJaz+p2EyDr}3vYWZ-(jj6ly?aS8#+ydzdPa(IT&Gi zV<|lmADvUpq`PM8>eBJ~OR%_xJ_hfM;11S)-6k{Xjh>R6Roqmq2)AM~;e8u>(S#S) zq6I+5BzsIHy8vTK#Bw~n$%H?TSIJq)KnuS)2+L!1FG_w_iXOv4=bLg@1t`ap+E+gP z0D^4525yR-$b{X>~3!M(ZwW-_m1z|flmn`Ps!+s@g2i) zZ`LN)Xe1|}sAXQPtt&4Dk?fvHT{hhr%q`j}xw(hjYrviIto(S%s`#*{e2xmuA;clT zv1KLU?8cegVQbTh{8k!-3oldyhtVPMb{aaqyPaOyv7Oj`94R!ZAg&@RC<1%hIX~6! z?{v2Y+tch=XS9jw#9-^0^jZ`^ml|*~P&ys9&(*CdSvKS0 zd!<{Y{h6IZl=zDGWb9S-3x0XCiWrsIh^z&see|oX9EfEmdkMcg96Z&pTTe_*S1Lpr z=g=wse-Ht8os^Z}#-si;LT&Q1-)M^i|0{y6x<$YgEw7-gAvo5PCcX74x&bf^uNJ!f3M!MGmT|H9V z5^7ZlSOyMb^H`TS;3u2#Kme|2$|MDk7m!dFx5><|k!JImzXpq(RK?x?xDi+}^QhI% zlYkkYu2$b89OL7{wg*SIrVfzTj5Rn_5|*a1uCzt`;cqu!!#{@L%P(CkR({`tuM!##1+Wf>P6Qjc4qPfCv!W1ye;dIotxP)`th;*Q z7X645JJ1dRfryU^cwmYgRuYmH@H6Yg8b`kqtzgd3_TJiYK&#(NBo8`r6JG?g_P)7 z$f~QPGOF2UUSF}q-Rn|GEc~_5%1%nC+z(2QLO4}NojrTs#?+tGsoV8pe%knM%#*RUX z=J|d);;+$C68!F1gZYAOXwGmX3SQeqT9Quf7C(`K6YhpG3(=a_&$i%il&j1$lit<< zt!7TSfNdE?CGi*D4Bx98B^|GOZrVMP7qQY6G24Zg_e5}wZm@J=B@c0C))TvV7k*I+ zR4Ns~&qn-PIar;A=*v7vc5h{7*G(U4mmEqeMcYN<_een?_t^c@62Spi}r8yck;bX z$#DtL>Z^|A!5WBmSK*hMveh8>*<={Iba(?UYYRDlp|4tkOENEwouL|g z93G7yu1RCvRi)1UK?L`C>kL(1IK{GCY9wWjWq|}g5%G76JJ~HiDT@DjOzZOv=!TQk z2HA{#XI@O+Iyp0quay2e39X}qyYM=Qd`TE;wmPM)+1J2J*Di_oFaI_9FA{zPl>7ot zDR1+NeowNg!ab_1vPj@g@i!ZAPdIZ!y(~tK!;3ko+bdMj%MPuEUkYqncn=Z660&cS zuu2N<>clUj)&W>Lf#;5P@2X@9|0oJ+cK7zr#7*>nRS=hA#;T^`I}scY=NfsmKq-Ra zLjiS~GS9uzhNG>dSIx^Sx&=tZ{SP(j;J#CEUx#c1%i#S%gCji?VLo}H2Kjc(PJ{AE z$TFf>iwx*2ND)adoM%gmskK$RyrGPc9|-J!Sxm;!2WOPfYO1l?v&||$+JHyG=~Zlj z@oQYk1O9zB?HWjSKM|fL0yf^$gj1o#QErr(HaTe3hpqI{fRYgvs`9-e%5Blk`4M)8V3&02$~%7(2|^AU7e=7dJG6 zkT-DgQfztGzAdcr=@5CNJiWAT!yY+@kdo9VV6)G24NMr6h!JO|Ub4W(Y*(rH3daW< zaDPZ6JTVyd(PfY5{}cbvz_3e<#y>MKM6rvQI+zw(1U!LCz1aY*P@wlp(YvI-7{Nn9 zq?W=uZK>mODpeo%(NOHlOyBTL|D|W=hmjdO)xwE`8&6V~V2fnTIi}|PqbNOx5IzzD zb54ye6X5amT!eT}_Qn1+Y0afLje;-Xt$;3I!b(yRyGa@C4B!Se}>M zRw&*c0&${KLf_!GDgwt7%ZYZAcNN>{81!UxB=EAC^@Mg5Mzo| zClOs8X<;V#Sco!$&M~ieiZUXgIFX71WD`17A)D~EgS8c+1w((BjLs?j2EAE!*fDv>pCww3Z zu*s}coiN-Wv&+E}*&(DlT#TkvfgkwcCT!QG0cn0$HKFy|L4ck@uNY!6ZVl=?8{oep zr0;ll)s#@&aSm650rq>@404-v?*2m13On&%%`pp^y0mqZ z@vmwWExqrhVx*NFr<}~iNUO{5Gw3ke8cj28*^t_kI?ca6;^~Diww6eOWdz@<(IhE6 zRj<;L`Q}e5J6G`Pt0A_+E5cu1@fDqNZMK{OTeaym49xZ4YYZ0*!c&_RP`2!@CgZvy zp0qj)fc|`=0sh+%_4dtTLA8k=BSZfC^A)TRoQS4D4iB;pb1 zdqTJJJqf4dtuR~p6w!q!{J%C~GH-=*;9PA5Na`?iR5uONBdWzMI2FICRxKhPt?dkc zszNDmM&K~&mBVp&qK#qke&&Z{?op(0BgO4SYV<9Fk2c^8ZjVL&JeZ(v0{Hq z&GD@qz2Q%r_F2}JB^7YRfckScu%u@#+iGkUr$|`)a$)S}BWHZ732&~)Tgv9;mMd%( zz`D*H)Egv6IPj)f6p7%o4cYxq>Gn^23KOeo&Di+XIh+-Qi~6jKa1HrfQ<%OgJUCtT zXW34yqr(mMqdlpk>CSlC?<#KiV=j>U^@jbfYYpRNxgwWX%J2Alxmr2s%oQ}v{0hXm zX`l#97B}4T(iE;xJe6LF1V3E?3$qlWA}`A>8w=nB$s0pt4Ve+ZC!27F)xU1TbS2r8 zqcD?qC3nSz5R-jtTZxtGxploT;U{S&{{%c1KT@fZx11$bEPSE~gE^^^OQ_awFH~X& zfs*yB(-dE5u6;Ar;+x=!__6x5h;}9Y!wT;w@gilzHBNFTdZQ<^zG|6#m)M5;d4+|- zRcwC zMzYBpJPDVjneN|{T&rV~Nu;w?RfJ693T_dJJ=iNZGObPvd)uS|>|uB)e$XR;5z&?P zVcU7G!$+F3i^PH}j!RS42TNMWnYHUVDR4j)*oDXAM*}F3vmAyaO{2FC=!_CiA1Ifh zv=fhPKASA&9lzQufm5a>|r;Ptv>}yt-If8bjxBtu zEb|QLC|2vEtZcb1x7=I;lw5&TCF7QvKrLFWs-xYP&#}2?;SpETycWNXUW;FcpYhCn zIOOsjL0q9R%R{#Hwo^0fDt9j;_S|~1qit0^60j$eX#nkG-oMqG^}46WlkFb5z89^r z-SyCIx+;i@$GayJxK`vMzn)AOgQ@g9dor0cy?ZiYwkit^)sxA1@a)N?99&QtNyfBy zPbOvCkdnN5GGRd0uKjv4fw1V5)caeZdop2Qh9g}x^6trww#&IaOiv~-q;h6+Pj!^o4^XSQB z8Yp&WCGWs3*@=TuO}16U@AS&o=hf9@19JUt;#|}B_tUSo;{hA112L`SCllq zg-%^EroDSIDcg1SWWs=~UHkQ90%6f9i+~ZjCldx{II6WL>(thh2@I*6+1!(LxSotU zhP5kkD{HE)klsC+OpsN2vef`RnWe}JBfc=!$#~I;E*GRHZzUyQ%t_=y$Yn_ZAkF+WEs?HQGsA@y^ zWO8P`domny-Fvb;WP9~wJZmatw=!tX=!{WCCH) zDRuo(5llk&Yr>=$ZoSvZR%`3Ygh{KM+1!(@ay=Q>VexBS*7jar2wH#-d+uG736kl_ zUS`;#8K5Jx6gba(_BL5zxu&v=DkYB3r=*nARQS$M39b5MlwK0#*5tmA?ADw&LHL;CaT$oTOBb!0qI(GiOn5xV2z$0f{|R^{L)D_n~s z*)^C2TxCCI7p}d!F`jLes~2Lo#USBNBRIik8~fK^v)5oOdok;q2kc~D@aeER_IDcg zSMbTElNVIj8brg-Gth%RuX-tEn%w6_w*0;l{BqO&5IIX(lp`6FfCAe=@`#r}w3;kz ztVdrwdI%njkNduOB#9;Vx#xX;FZ}VQr3xe528~*xL#>KaA5p}-m41dQ^>KJKe%M>7 zoGr0E*T+3GPq|W|I}@#s3v_l}{z`Ni8Fp79SMR%ZTTQ;<$QH%mJ$|npNSaor*Sd<$ ze&bWq{^V?LHW-iWZ-9}Bg);}0pA{iS-AOgm7ca^}k?lo;`m{eVTOX5pVTU5_EdB@H`EmFhy}GW+Hb=%a{;C;w?+=Xy*W*jcgPI-L&AjN&EyHPLg-XZ12MPKT{4;2=`w0KFmB z@8+UFTj5^-2fO3l+0O0^|8g2HLMkG}{VqB&3#s_2!RQSCwz#htemQ~_tt#7-J~~0O zxUxtuh9b&&PD~{Xjk|Q_>n=|}`-9%@l24g)m+-b8zymezLd0(8F8QZb?m~!t_qhw1 zxt+V@pRV6sz9_wVne})+QQ&ed??7(d>TS30#@T1^R+Nd*zwwl0pqpt1YQy39Izt9x z3G;WYNAWT$$Z=7=h^|@}a)P0bhi^pi8kQd%d)eO@IOM#~gQ-%UyPV`XSSvVW?xT-P zwSgq>!PgsbJIh>UcH4rZ`Uy{VMJzi#gY3)JrOkF8+L5PRlW~1Jf(@3P>e3eCfuFRC zc;O)l6VHDbf#eo$Rtg5Cc2~=4B|)Q-`Fk+=64-;w*OV*o6$r6&EfpC6&R_8Xm%$GfC`lW>$Z7_VE7UOfq;!&5Rn1? z*r~~2m=r^SL)ptvb7?Y|$II2|g%S?F?$lnetO@mjpPcPB*SdODx-Ukuyvl|{Ny&!- zffkKuXZUHiH|@^Qy#nJ2X*6C#@lGH>8HUQN`3oTp^{aDkXO{5}LReFne42DD5IXMhxF9R5iw))R>^RL>y}{z?IW)CAfr`n>M}C#NWo&5Z+Ro|-q`;(*wtS^t)Usak zP~!xcbA$#;jyA3oHjo@jfcgcU!u}wVZSC~u`NBwIKi-7nEH73~J#Wi0jm9{Q&BIud zhijOB8>cdZKLql+QmTXh7QwCFTye>atCCD?1x{1tC9h@Nge*- zmU*;Yn@`|}??evhcqt4BNzGsOrkXE|1HyAIPz~aq_MzpJM!FM+b&Z^GOO3(sMW+S; zg0HxMH7sZ9Afg#IwNr7sl7}Efm=?v8eE(FA9>dpM;2M@qF>F+a5m;&ta1Hx-*_sMF z_Yh-cuvQXo;F~Uh4ab}iB>W^DL@*jlP63SSYr!nP==)N=dw~RpuR1o&H02Up2T3RIW=i40)Z;qqm>p$T`ycFBye z2?{$M9fBxF&n0Pb1SCnzYqfw7SEN3Jh-LMy8@5a4fu-)Bx;T18jq7Ei24SR0+X1fQ zW-6<7#n>bX?b;>8mKW39F38e4Cb(g{W}Z20*NXx9+9MxH6p0bsuw5f}?%1x8v}i0Y z*sghy^P9@tuw8+x@3v&-hV2SWi#?bFz9>+1!FB}_l9Yyu3uRa36%6DEjw!IaVY?!m zLQ?aW{hZR88@Ai23){tn&m*?WaApc3F5Pg>pAoXPdV!JGJLXhy2bO@ewu#Y3Q%kV3!cS(Kgu7@DO!42Exc;mtY zhr}3cGsMylo=Lf{E-n4IVY?ioVt{aJlNs#NtxaYKNP>i}rtmPe%CW~0+vVjggb81H z*B&U`uw8n)V*S`I8R|S^yJQ*){FBFaT`FNRfC9GViwad+?1t@zwhIc#_)?4GzFjgSVv532r31bwP;|j|1rm~!rt8DJ!k)YnGX-`xY*%Dc z=*RqJKc}?jhV34;!FIX8uERTAnr}P<4bJ1ZZO}fl?MxrnL1*-EOSDT@RM{AAUvd?KFzoz0%;1?UDf$9WNi0 z6s@-7zFji3f;V?;*Fd<`5<6hKrXd0}#AFDEhS+W2E*UV>2bZcC2QoIMvh3v2qLJCy z_0Ep_cF7>v1cheUA&7GHT#^WoXcumF*f;NyJRp!W@1CUHa$9GyJQ-fk4bsvuw5^$&41snkvn&6*GO7478h*S zJjnS?Wp3E6z!jIwBxsA5&58X}>wICm0@Gp-=728>6kV`gfrKQbg-S+Y0>9?iFC zQuCL+X%Qq4_iotkF&k{xrOn2KHIOZvydv$kL$@;-7|ysL!d1j`z-n?qlwyqtB028c zW!MzMh6CcrP;uw8~=<3#I^;<@aYt;IV^j<~D>wIbyrK zyoE5~EAJ3|X)`T4tmb5u6UDeo;R3rA-fX#d`pb)=}6C7jPN0| zyo>3ffC&Dw;<+1=`FKHY*#)BF?tgy~l>(8ZuZiFtP56Nz$`-)4N++49B%0J@huUc2 zFL` zj-Z-ju8Q$z(kg~jing+%2EAHP>Dmr8`(kZV>Fy#m*`zjmblRq!B@E@e(%xE!cQwM~ z1mu81EDEF#1DFy8j8=fIH9Nt^9@A03oE6b3w_M|rTF4TKh~Ty!V!GOC)XBA6>6RUGE8c*HEq4)s}^~+qmw>LcXtMG zDIvB5kiMoZVq%J?z~IY1{S`GnU3GII#1yoL!M*Zixa6XWHDl)^{J;-x z9!{#SQLtM!lj_(d`*?RXSz+giOXf|p23+(Z)?yW&8BVkZ!%Th)0^G1JM5+pv}e0J!+v{nJep0$!|qmpr;jdh8f~8I zu9gGkLJ>h6mMC&vv|>1rc8{a}bcQZwKN~l$UC?w?*$wOPzCG}ImMg5gOy-v6i?fK# zcsXUt#Fajp_*`1)V)sneQ<>!Icc0lx4y&{`+YO|LTer5n?AG-fK&jQLtBiH{$2U9 z=tFA8dPIC3e$}^jKzysiuc>y(A9jeLbXa7AZeH;G48nas6AlrE9mx}vRS_Sx4aB95yPiRn=tFAR^;g8?{cD*a8v`+Z*d-8?VI2c8^Z4`1guMds zQR_ggvtgImjMX8v;Cd_K;(ZmF{Tc%?dE6xslV_a+F-PEeWyM~B_}KXh#6%xbGp;`n zlg-jHLpBCt{IE+PCc`=gV&?JZl?i(V;$zl(&QS$(By0 zy!PMH`T0E{xsct#(A6;Vb*y|fW}Z?=0&Ldua1_i0a+Bwkfxnqmfc?Sf+*{^kVC`%iK*3{?{ONez|U z<4hOtH4&zHpC`G}{5%QLg(b@H8~kR72nL@c0ZoI=k-*T!7NEpejs#B#Hb;W$1e_x& zwS&!(G(5!|TZ&y2SnR>__WD7JYjI-Yb#z3u*#yn4Y6phg1@wBE5u zM)|Ae#$PtkIE(0DkOLc4;HiyjknP6H*nrAIUqw5;VOq?{J5b~2^5%ELxXMR*S~Er5 zs+uVhw=z@2C5)LO=@-ouaT{o+NK6*~CcK!nDs;liPQTk5ZFQ%^!KQ;{T}KRSW#bxZ zRtp=_|H0Q5VLvl^yo7B#VSFc18hxUjcv8iblWL~SzgVL(W$V|COeUY3a$;@D#*xv+ z!ZekbDH2mQQ^clhrbtZLOc9&1nIbV|^t#{D*6iF4GTO`FVCO)@{0DyvuF4Fp{033H zd^ZX|v%%~fXLbjkU<^|ktn)oYu8ykw(*~3; zMS1EHfXPZ}M-WZ90Sg#de9cqYBp4QqSIx)w1WX zvoN0?{3n6IW7Z6k(n{3bvBE+f=9N=b2&tO>;g1C7UPYO+uLni;URAK?@}>vLu?hy& zR-kzCBr0j}2Fj$#L`f+tvUo#iW6K9p@T^g4NrN|1=8PJo^|#30jav3xluCwDFlbO} zNrN|0CXE`*TL+QFn-nwAh*~QY7e~fZD{1g%%A8SywBHfgyIH}WLC{(jUGz>S2>+Rs zY9!qsp+@@kibNqFpq#eoqXOr0`Wfb z#P@P?#ajiKT_vWEksTSigMGh*ni>L^SS;FlNrrrV09wc>@|k&1BWA_S?Oyk}G1Q9L z-ByVn>wV0mUgtxkn2QS;hAATOI~``jknx%idKfPoGSYFF4@;()j~gL|H6o$At4j#Y zf+1AQ$w6m%ZI4%Db0=|OjRa97>S8`_gc#O*WK@J{ZcZ*TBJtbPju)X$J9=Ho^vkR27NlC`6i#`25a|9@TitxsOtSV;f-#sB^D2ll=9 zhyVO5EAM^o*6;t--&uwqxibF36|e?PbRt%~1kp1q(TViyIrKF_U*C$puDl4)FBj9L zo3(7^=HTT~V{vIubKi2j7tpcG#Swn-erTe7eCVOKMNzyDqV3|_v-!7|!G0!bqhF%s zpzJq!eolY07aGHqN(`_k`!Mr;C;k2a?9GogOn=%3i|Jc1=-UKM00GPBJ2ZinYl?|q z372;cMUCd-o~7k|`wv`l@KAjD3$9wZ3ih*yY%HD{kB135N^a$9;dnXv7kCEp%~UC~ zlm2NuRyxycUqCtu^h1tr)4#7EVdO$wLERPl=pZ{UF@wKjLOoJVDEg@=g8u{dZs5bq z*wIq%(+q;SU%~^UA1t^(4Exa}dfn6G$#!oRFHL6?bg*W!gev&E9=dJqARNHoq5r?_ zu7}Y0`}6VdMAD8A!2vW__f&85sh!Dqr~dIDfGam9*UNdHJ9lT zG=BEYYB~}0n@Rec?3?sAD_2taCygV`wIx{WG@AS1CfJiL0VbCM5#63_m2w3g%#AmC zEa9yjE0+NN3w8zAfIY8!%WiL2a+^KKZLWru@1X;(T>)|DvchW)?7w6$#P3ELeF7Hm z7@!6)hQ+PH_H+dX=-9+(x~Z$NDn?dKCWkm-Ea|1dK9RX7$K7jS@15v4N_0Y^YL=pJ zV;9+X@DTD2xE|=@YdW352rby8LRCx$c3=s=T$Ya3L!V5)o=TrU{)%j5Yj-n!J~eVw zy^p3lV|0RJk6Ql%|A$k|A6`%hb3g3s>`jH)T#n&2kiHpHVoo2q4rQS$mH+k7ytjXb zJe~%;H1MHm5MNz80MJ>^MnlmKkkzEy3*k1{fB&$z*?)38-0Dxtw!@ps$`!B&`#JuS zET+p`$ek{E0PW-$Z$CI1q#l@xIQv*CFo`)XPW%0>xH%ZjlE&2wE0>{9qhEB2-(1BR z=-M(Ybrvr_7&TT775wCO|4zjGEwJ~2es6gD;4Be9`qd)xWBI|O>GPN`WDvQNAi}-oR0FAtq+k1~9jILGe%Nz7Wt#I#WT1KYUrr-)${!moUJUft5{cglhmNCo zHtBEOJ~(~4KS9U0_LYXA@{rW2P&ILU_muKcG2-C3nA_BkM(M+uE0AatzYq?gNukhz zB-BL#GTryIvUp;!^$ahaD`DSL{r*ljjigNIFc$$=Lq<&^lf&pQz0s3|7aAanWU>aZ z+*wL_K;dBxmeY@C*)eyeG$n0uKBaheGo8ec(`S4Y>_a=RX9IM$F3Jrj==VI|d_E{X zCE;fZaI!ZU^iB=Y78ev_vib#QwX|M3$j2!m&0Y}m#-a)_b>l7`7c5{B%AEchp6>^rELbN43s`D13W20o%-No4o-}_B=wsIFq zHM8BSjg{pl>N(cYzm~(=P7WTh3e{f>|A`1*$12f{tL=g2p!9!w<4->uh?hvUXAJ`J z(R+sIrhITc;^UUu`S@T1?(^3NUm@`YQ^H-q;->Gca%chI;tJJOWa+)>DIi>%r3AUi zpuh_o6+ zgeDvOl?HH!bvt=hoI*+=xJD`oLI&BNf$Hcqg0FZocrSLFBiC8LSEZO$3_Vrkuot zTfi(0D1^P5d;&fe!40M~o-JUprQ%!Bl^pb~C(YgHu6ri=oT}Na0{<@JFA|d3RXTmn z8Aao75lACmT1#KVTDKlw=9R8Y!_PF#29>Eoa6r5Lv5s2S`kX7Piv2b^MSnDcS6JDp z;A>XG(Y)QUUU;D^x9}e5?DutX>cPtQ_-l1dJGiQ6Di)T+|_wSG+ zqEm*x&!mieDIq3n8pbz{@}ngado`mQzxV|XyqsarBW36v7GWeqiZZ+?v1Pun9b2)( za}j^Twj`jTHOV0Dd)m5Z?Yqm?IWh+}RO%ae6sk=PofT)=7XG;pKK^Vg6hG=FJ{T{$ zHXiYE%N%(5XanA0aAqsLXj^&i)_5oj?Atr1SUjMfgYqOqV|v0bl4ftc8+R zVK*Wq!2?aOTmD!ORy9n)7p-!O001Bk`EbxdVb8$R%RX0Qg$q=XPy zgvcBoF#)fv1%yKh{;pNLVQ>Nylu-!50kg0{3h4fG7QNLi^fR$h+s&gBSMu$XS8T+~ zbVq;}q(CN{g1W|Xk$kwV;jA8{fQ-!NDOH0LjF(!Ff@-|jAO(b?PA`5)fz1eb*&i7* zZPj^gnJUO&umPE@6;ePxtdpHRQeZ=E+XgAHBIJt{kU^CDW^9lGLPRG(d!)dI8fVJp zCf9}}w++Ch1t5bMTL2rRfDqG&)DkJMpwvz`1B43wMIBeG5N(hG!n(Q*F$JVR7yy|F zTJdD|dZv2`WH5ylQb0(GEs3$1BM+L90`f_X4QiJuz4R|{S`7M1OBf?kK-m)d6l=-N z$--@rg8bD>wix3!2{3hKq+M;*>?UyOJn_811}VTFRM0b`IP@t~Qwm}MVE(8xWA_e%d!v8$@(sA&I7DxfPq&&{w%`&dD zqo&giE8yZ&!E4w}1z>{}&|9pF-2@gv(iSUVY@p!Mb87+EUh!`jSh4VefN@uW%?K1L$leQ%8%O52tANZel_`ZPtgr&| zq)vABSb+^SE3CkZ5W)&nZsoNEP6h`Xtblx2CqR3wz=j%U%H}rLhNL%EKn4kd6%b+? zky>H}7L@F;0xLqk*cBPXC`22qfDq#v5tj~@5VGs_`KWGaZzU_ER#*XOYRQQ(% zUpfwKu!5uc9(#MN0Jo^-eFY@Uv{UJ%YAxO!=w6=3xS0zHq6b?G@{1!N9v<>HSO5UQH`fyD{}!T@As zmLGM0ZL_a{P}K5L3syizw&TSHDb`%~*jA$rLLv^>s9*xikS}5>{^@kPmA_YKawC zP_n}ctO)sH1!NGDD!>LSAjG&vgoCe53YT;(KB_PGEzbK22yd0K0`j04DdY(5V=)0`g#(xf(6pW?w;m6faw5a#%s&?E-i>zG~4mT{im) zFip-yFT8yPc#>9FL9UXjSOI>_j1}bMOBz>rqDDKx(S=+{HdsNx1#gY%?fB2k-oQKl^N0 z&eZrpSgr~Xff1{6aLMKg#QH7%tOGhF?qo1saXpv_Cj>TouBqqi4}Gl}$qzU5_7C`v zjeurP(S?AXgkD_;NX-Uk@E7V=LJE~44mFv4K=E?>6uiY~=q%7(vJJQ^R2aZ0L_kYJ zuQ&vx#^Mq63--&}ZS9hL&$T@gu)%-IrCv&cI7F2qDtxK|$Eul+&g4bBGrawr5tD4j;ZPb$B9Z_o$ zRm2OeTVScXIqXfR@iFMcM~36gwe<>Nu1H31808 zq06=wn5jwd<|$~Wm98Z@OAtA zC7i!t0o`P8RoEIDdkh1Gbdc>k?wg1t`z8Qo>+3DmS2~NqHt6OrSyVO?Pei0R(FcA_ zIKdW4a4>3b_j{x66kmAK?N27-NxOXgN%=;Uq@utQ?qUgi!-MVn84M3^kN6|Gl{}#E z-k+dLxyU_P=~W-?r~895PtNFpyk%fvVc~7Bs0`;)MeAvK5o%Y$<00FUIRy~;=`VD7%H^F(k!9k z2oT`dveJBZUtjL9^U#VG{Z*WElNQ`54xJtXZ>OR3)~odDtHkc(NTEpuait(&dSe!P zd#RbO!50^0rDs0V`a7@qT$zVzY80~I%(N%3zbTeZ+yB)E8ZjPF&=08X~s6VrF zh!S5hzWlMOU+~MDRm5mk66&L0ZRJ2LGudK!cQ|;eU$>rQRR`)ynM9#fAN)ZCT&rp& zxbdhz%{Xgk3AJwi+{>8%;>X=w;-O4?wl(8lpgRF88z$9WoL!Ncc42ZL31SGH_^`G~7JaB@Xz>COi;;E1EJ% z!Q%xa)WvNwvumW;eCDsgA}3XGw?A$KR?IwVweuukp9=Bg!nOxTx26t|*NindRT7q_ zv97d+PyTiTHvD4CPyb{SMTTN;gfuJQ?EcZOnB0_g%vj&A!A%A zfOR-@BG|wceSjS*1^+gJS6Z2NHduGH?xLT8EHtYVXu&N%0gA3`q@37+b_fVWd{n># zQ{=FckW?M3rB1AI^gGdJjTzeBTRRRY%RjdG(y!QBSk6tnH~%K#)-`RM(GoGFVk-(4 zg%hQV0Ct+66CmrOh!5F1NX82RVXG^llEBO=`0a?_5T-N)TA`2QgvXQdqzqWBiJ+F-ti^0avt3 zMHox0D7flzQJIg3K;WO-E~RY^&c=Hm==X-V56)UHl4bdp-;ZRFqB$shI^wUL8KFJGjqneYYM(465&F#)gbA}vX$c8i}#!3lRmnT2T0`r3lOQLZx2OnO@b zw3<2P0=7qBeKUNoZj^Mq?zw6ANM6LU@w%GI(Ds1$L~xC6usa6BVIJbltS5H!F63+< zzNQ@dVXR>!U;IWXBn(gkOU4CaKy#Xq z0Ilw(fYp&aSOd}SD*RGYwi@IE9yQ;Dg_y-Z(=dCkTdEpexZmIF8hJGUA?-qBmPkvGq|MQsE=Nr%sC#wyz8T-z> zn7nmzW*T29{dE#rM+tY~brAWsI;9=i*T75HE{XRq|26q95`HA*LlaQ)3pk~`%`5sn z$)*bTsIJN)fjh?~AZDU&b#xn9;g9o%;c?(2|kU>UqWXmF%wBFraG)F9suH4l~j0LmvJ%ZOqv zGN7{{MI^m&o-Hk=)|Az-ZtZ;3uBj6ezXCPgww0o0_QvN z9`Nt8Y1crq`-$*05wP)|CY%Z_j&h^Sw8=rMK5V6r29%7b&?1^`(GK|$cw>ArOouFL zSs62f08n%^RpY<`8bOmo9$J$5=;}1p)no8T{Kk58r2#X3E`o=>y((Mu!*E6(1IQb= zcqz8Lfrni%&}{vIPlw1G<>{q$8}`UKgp{N{0T(`Rmr{bDM2t8y^^yfPcP)Rg0r!VA z!V`mGA6@o{{y*^#4Gg=)X#6t+LlnCRTH`x`D!JJJEkI1OmrH52pz7#;F@lGJNG*kR z+ET}rUp$$z8bai&duJU*d_*jTCg3d9o zc#1M2pg56=0%Q|9)y%yM@$tpJbtXXzhW;=aom2V^db0w3v3#4Qlt+@lj$8=HS|q;} zR3vHk&tEnPE$9FcFV&pXIT6H5)4F0eyU9I+fV^~;xqBjk(q=uNP%~8p0D0+jGZ!rq zX4K-HRMo@<0eR^pL`*Oo$V-PHJ#YY!SEPwR-VcWWc`02Lco-DqrK1ta1ON^xO;MqR zkJbY6(s6TMtlu0Bv+a-$uTmTe2E{fue`tq}M`KM0)R$6KE3X#RmyS!hQDXGyrNhVl zn{s^S=k0^jr~4DMCA2T;FuAu~M*Z2-;|V%QaC5xVZ*vFJ=BJ|Sj_fE*U+6mF15toY zW~J(c;Rcyq4wl5M)bc|}Y1$O#rDFzuxCz@VEvNltiaAU3yQ>*A4>o6bdGVeCyVr zzOw=TD{_hn;{e69hH)@6{}q;vNdf>zjx|7wm| z$kg4fn~Z-|qiE@UFBK!L>^S9QE=F2iexE^y+16;9Y0HMxp44gn^%3VTe6cpkZ`Ej$ z6rQSAX~}%^rz2)Xs}}fbh^_F7@RwJ7MWc%Hm7_QOY12N-+Onhqt{6~%?gp0htYuq`&Egb^ z!iF#Cobjn9yty84DVvvDuCP@A>pJsPuTPF};7uF*KHHGp|CDb3#HTQ^n%0bsZ=J(g zLAa>Tx(L^h&ozbVtHMLlv7Bt+tNzx}!3WGWfv4iewGsS!!+zJbhVimokxMM$-zK(K zAkIw#MFis{(n}>`x+A(Q#0B{23Rsw>5EXe@cG*|}CrI8HB5TNu06y7-GpxPqRt77{ zrW}QtyeqjYE`*ruTiZ&k$a3qGv_O9X9*ZBTRG?c9j`d|*nyO_0C)Nfy`XK!Ep!wT;w@ghY>Wlj6tW9{_DTAbw4@3OvX znS5s*?&lR2ifC_%$*semRXBK1E+MWGU4l5yZsA=Ojw&&WPLT(F(^XkJ6%KYa&pIc+3LmPlhuuuJ{uC&Yi~SaM zTLzep&9`!Ciq-lkD_gG1EjO0{tq;yMk4LPp!UlFTQPB|%jS`dpe2&jG z&xvTUB!e?8#&rD0n>&J(<9@=mPn2 zpI=WVjKNfMo;{gNn%+H`Fk6*{hU&>=Jb3nGQVuStj3i?ke0wq}+lG|n)sqPWvUcs) zlL>@Hrz`?S=)OxBnBhpX__prdlO1iB3zRTDnZS_Bnaw@f(YCTD%NEKCop|(X;Mjdb@EJe=uWT^~`28;ijRlnxllkuVxT`tdqemxmK1U7lko=ip(y;tnplksF# zUK1__mfhd8u$M8wWI*~-PUu#P9~ZM_4cB=0Wc=tVwT0`+`0;WjuGXtM{o+-PJ(-+Y z@16{YT=$+V57}Nl8PA%^ZmjK?=0fkDtTT_EOr`+H#d0a?5D>&XPdqEi+D zBXmzD49sv;YfsjxttS&0QaQ7^C+l!M8LjC&8Zf@`z&jv)dor0ItMp{60eUh^krzf> zlA&qeqin&sCC%Rmao{S$NNKYms8K5WQ$*R02TnY@{lgSvEPX;WrVxIwh*U4mD z8xo+`IvGD+pq`8;Dmr2jHbSq1@#7L^OzU#qJsA$U?mbx^vb}mTo^_SG7owlrI@#(x zdNP>?3SQ4cPbP3Jxu&5%BRIik8~fK^v)5oOdok;q2kc~D@afQr{hfyW6@0SkV-iqcJ4hb!5{Oolg^l&-%S8{tgYj|Smy0B^ z#6I`D&+mml-n3L*Jo8r(CJfor%`R1vhH_lrDvRX#w0DjQW$f z5ig{3oMzX*HrBEqwr1zhU9gQK%9|n5q=257^tZaB-ZuJZypRTVuE)fIofZ47)9K*M zC|<%}6FtX#RxcCdbePU(7agjCwDZ(pbS683iW;S_2CsmF$WR98pvitW7ZW<+N_BbH zFGsMVRb_k9M<-|&R~G5Tpc@#>P z%0%ejcuF!5ce8pFFQZx-*VBvWa`lDQ%`_9W;c$GNArrCV@<~Vyd*b~0jR;=D@`Gb9 z`x^s?ocCQ$ato{#3^A9)>-NmiZbHa4KB5qlAAG$5x3kPuX16Uks-N&=SH!Z@GswPd zT@tS=l81*)$%}; z%W_m%uc-TBYh2MyTNhXyfp0h94o`-~z~L)E+10h|6dkOr<>cixg&g=Fk!PqzjwFN+ zUzyJ(fc$O*l6%Nas!iL^w!Y$cA?(3D@MRkZm!o(wL+^04aU-oZ4#RcvE3B%Ga>J`R z7!AG@!K+yw>_P_RiVGK9L7^EmwM?;?aOSiWqb?=2&eB>YO88>KCEHOJ#Q@(Ch%p8LBG1#XAPW zp=_PlS7?HnB$5N>eTTpcGZd`63}7(Y>Oa%XmnWKjMyGDu(-Yp;;ae^(Ew2wQHF1Vz z7+jJ={Fp7JH=&=#EdQBHQ_Hg~CaUTU7EjNispSbsl9g_?$SaP?>sN!W0bSc!9f1^> z6w;Qj)QiOBB@Ywl2o01RZCojAAUTu(^$R+M{Xrz#+Ud_Tm&C3ljs1ZQK#l!E3pe#% z%H-mgA~C{`H{m$Ti&az4+p;as9QI}$?Pv0aF;jJVmk8|V^W8IXyF(y#ki%k??evhcqt4BNzGsOn!Q7r z%@hvkBQto;1*$>Z(>}DE(nxorfodGq^71ixztS}A@D&%ZhUJV4B3wq%3@fXKJ-98! zni6jNH5a&sWm60r)nNpdngd+JK3=vaZMg7|tUOp~Gz;H!0c<$tgdpK3X$| zKy9K^gUqrggb81H7nOp0Fwg}XaKm=#X>am%UsYqwc$yY}ZE;)F!hpbJ%8uABd0c^i z;>L>v$bfAz8JDk}RBf>vwoArP@Rm2l4od(A!lIeDV7sOvYQc8Npr$(C5!)r?GktKC zY^kJ_!K=Kfn6Q9A9X6RWxM92GacqLZPDh6z%F%O4S{wmM(xR&cE*%^pu2{~rP?@l# z>4@!;d0?sgr!J0Ok-EG@WDo|pVY}Q+Wwovtn@DJ{F*C7#t4)vKhV7bpt_#~GBeNW; zUu|*2cF7o-Ez30Oj_n#ri^k%D?V1NUFW4@bmLs-H9?!MHC9}YprxiXqf*ZCgFfI0A z4)~%#(FNNTNJvsz=;cZ>qFa^&H*A*-O2UAU)cj>Xr?lpV?RL%swu{H)L%R-&bHjES z&bS~VVyf2j2nF`$?2Ew%+^}7SO)+dZAdU<*7i^b#ToNv51?+-M?SIXDJ^@|hV7DZ`9KmD+a+Ttcyq^g4TMWAvBSPy(-7ySAtsY@#CFN!nLfBwRmwLP z47N+=?1=4>$FT|eW4jzZm!!oJkR+|pjW8L{u@QFLw@U`aQuj|?9K9lSxnR45lyUjQ z4cjH;GtOnTt{9tY#CFMC*brX_woArQ^IR9UOGain42|uQF_hf7W4lJuqOrJOyXHa8 zZz^-!w<~bPB{Q$EFqcf|i0ukYi#?bFz9>+1!FB}_7k)}}!*)eBg{0;$`#GgGH*EK) z4Yq3ww6p@_Ja}@B`*s=5xFBN9jASLr1^B`9D6b=lz>g!g%djbi4F|-Lq2_|^GLKgb z7ao$W4EH)>yDW1;knodq5g^e zF2|@CARMqpj-FeatjBgMd$VNmCWJ$!TYwz)?Q-%F4`}g~cL=_;3ngyY?y(EzOa?L& z)~oc5vD=vpWE>2G%n#coNxfpDoMcEEN`L*Ry(t}%ysQI!m8 z1fJZ^WFX`-eQ>FYt7Hv38f6<3T-s?en{quX1C|}JT`~wZL4RzQqvw*eI0BN&R-Uvh zR3hxSEdsocZXEaR@=OX9u0!hL=$V`&5a({#E;mzItt-YRQv24hZ3rB31WljhZ zev&Q%ggZ}K$i{8oF2k>^_9OLeU~CtU%GZ4w69G4Dm*b5K529OI&!pyf;ka*?V^jeOqB3aPzmx0YT zmI&U_gdYf^Yyo_$v?8LCXi}3MrUF-%6#kNzM;?8M}MifENvu5n2%B9J05VK!u{DX47cS(h7J1mw znHx%XcLs1NA+`jNemJ4%*#ELme?^T?SKV9)F$L{maIZWWF1e_}byXS}%Q`AwJ2!?O z_#vNOieXu-VuHxgqnBc*R@%aJfyI`$1&QqC$|Z;n<`Z8AvK#8Nx2cEy-eiPsj_*#; zE$^fNDW4Vz&;7!y`CO`cDYyqOE(h{#s8jt}Z&f|S>C>lQ8)Ar#dWh_zb}nG3xY&f3 zD!AzHOb5g9NHfT%uj|69%z(Hf`rubsqT z63MCZ49-k?TZ8^+Hf^(ild1Ba-t3;;VK;rCJ=@(G_S>7|(QGmvcDMRFeRTWNX!Bfm zwH#U(iU{I>jOV&&#c&`+zefG(3|-8AHf~(IpjlYP*QLL254@h`3hOSDxuyByEFv>r zPMI=srH>{)msYyiJ(KlRCb|0EXSR~NW$ev%1L@(`tu3S7$_z|ht*vYA-1(BkPws*H zy(|_L0c15pX0_L{#3fcsA9skF^x14-A#vD#Qp_D?isx0}-D2gA!;rNFBe`0JUYQB& zp(4E*yKK`XI)q>KtsM~G>hNo-9rA}AVkjLJ*`S*jJU@eQ-_L|?!?4#XI<^>TtxzqL zgMT0{PStb@#N=@o6C}?%2V#!E#X#I|XU$BvV;hL65xYcCB7CVCR}bd_0x>z#O=if( zK#U)D3B+Vr$3Vs+XiA;F;Ojudi8zC+0r~3AP^TvV94y(SP_%QT>>$A z);SP!1fEw`>{SsTol77l!oP3=F?s)5X2`}sj30Ih#AH~>yftY#xd1b<0f%uqpAU@v}F;*XZbq_pP#$xPNHMmVs zI-vM4I|2_BOK2pwvmdXt+l)*iHR(G-P@)N&I?s2Az_nHXP zyw8(dX?~sr>B16a_ziwDLD@TGS1e+rPdIHXol-j}ONE)7E zjxEL3umX1V4N~8T;3bLxBJ3iUe2m#5m1sg;u-Ph(-cSjt+>pXQ-lyCdh+=YgRU4JL zonE@A+(|A*R3Lf#qT?Wa;sO+Mc99AsCleZi%M&{S-iFMvH~d&Av+ ze96ZC<-PI3#-8o*R(}Y;w+L5fB#$GyCp+y~e>5FWx})B9e@b4xc*i1n_^+B9f7wKj zFQOAe4s29Goi?f=u^TUA<0Ox06q_}U`uXGV{gux%%d?<7j2PqY(Hs@Qx|&F1+RYg9IG{koCKH*%X# ztZm*na=zEeOp(~UnIg7%Geu(aW{TM6%@nE4C$O|NJGX<3_A)rwIS?`b!QX0FjpEd7FgwSY-GL_B|fj|ApkMVYg&2SxT?RoJ1+UO1AB6+%+0D?Ptb z@&;f8q6TlEOqxuTl(Hg=Hw4!Q?h_R}>-_Mh((_M`Z72$v_3iE*d05Ico4f zlTwYO`y-S~qYCq0?U0!0k7${6Q7axv!JCyPO+#>81ixx&Dk`^;5G02oME-E90 z75o^SP9j|RTm;zCCb-k=_J+fJ%_47UG5^Uy+~5*vB$x6(9}$1CDm>8LN?v9A5Lxz}P6ua3$tyLF z6(}be^{l+Yv}OvmXJ+2J(3` z55)V(6W`0p6>k+_c9obuMs{T64)*;LYHF-pVzFrJB^mPd0catk$YLkx>z*xjDJWh{SJCJ7UQA$*9N= z%#2*@h$B?Y$wfva12HplQ9nbbn2#HwKz3B5O4g268p|(Q`9H6K%d=eBJ2mX5yX*$( zH`Dl=m&gCl_dBP2@A#6XM9+o_KY&0CloyAS8Q++^YNZ z5q-c`Mf3rdAx9PAEN-JBlJ6~5lz-e-Mft~Bj;g7MBsLork-M92F}1dIXwS#5Kh=cISrwnkx%>WVW3fz1)P|Nh zG--sJe4eB_mg8r+B=1}*J(Kgvomp(c(pWC3Dpy_(%UK!N9q+^mT&>zbz4GmQVP8=t ze)-<`OZQ%^66s!OUiKHO#Qrcl?;4fw7puhK0C`)RdR%&sv9XXRb(u|mBcCN9Qxlmp z!$kE|-jLbmH~Ih@LL`CGrjWl_C8C81ql#$JXR9LmKmZk8tP*WhM2kLK710L*sOVyq z*q@A0lF4hQi&diR6+n`L{i{UyIqA!U%qkm7RNQ_$&%o0IeqvdP4+T zqE~L#j%cQrukNX*CoKOdrbj-dpC0*?IKBV70h|snL+T!Z!!^^xKG5`UXFpz<0C5yg zDmnXa@S6)8|MId3ez5`7Zvr57Oe&)8DiYEiUK2xDU>;Ar>EwcvoJ+*cP}VgNtQ-UO zGEx0h175>Yu`>qTYGPZebV3o^%%I1Kh`eaz;S#(RdO>Ow}DtpyTXl0v(yC-roRj@yW{3c)FQI-BsDlMIuC8 z0Vw4HlN=4m&QcPYN^_El%*g=}WoorfnT{nuL{;4ZIV#Q$$Wf7r$khRl8XqT=bei;AZsvnsb(#nE+*RRR$rE{9kpQgV+~ zA|;u~++r13yTp~0LzH0%5K(zjagSB(Tx240bwI|k-HHWfVqUR|r{m**3`+rH6@~5G zV--h&Sd>G|Kdy! z5=2Z6v5KeTrxColJ}$YCxr6!ikZZIO>J&cAg}qLM$)@sa$j?1I$V$HZSL; z3}?3I?r#hoBDyfdyq%YwnM8amYp^sNPp9auj3aP8yt1sM?S606oxWwaH|cM6`;*Cd P($1Ge@BWF0Kl=Xx18;<( diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587848505.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587848505.Akashs-MacBook-Pro-2.local deleted file mode 100644 index 68d601bc958fce1ea8687d1a8c5132d66b66c719..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 267629 zcmeIb3zTF>eIGoxdvv3< z+o-2|=vgN@_OA4;ILo~H{;U4)|NmEYtFE2H|NYz#{=m|nxw*Atula@N zjwY9#8h87{8{c%UH=M4vpZxF5%bxgFlzs5n?r+_>XY^;=+2@u%|Ha+#4!CA}H0W=f z@0{z7C#}(`NpF0vJME8#@!tCf-HqPa(O|PTUVb_3nw;%!_u?y855#*H|BCz<#f`=0 z@?O}rIX#d5+Bg&~e;@4cjCQ8mJJU|~$RxfRWKY2k#O&DJjcUV2PQymxu!HWY-eAIW z>%vc@l~Dx09l>2JyTjh}>CyP9&SYb>-D_?3hLc|Fsor?l8?^2j_NV>sp#P5ExHaw$ zH%D8Yonims)uR(~GZK zj~AQqJW^w0X|ai{WDWgeCEHw1N(xPQ zrv2%8TZx%$F$C}Mi3n~oadE2Oop41JDi=o+Qh#tptFGYImT#s--V(nG2(e9-m+dJG8a^LLfV8m>Z^~gr$u;+0qEz=KnSH{i8gP36f5r`d z7&AgGei%Z*?9n8|=Oeh@R)}10!G&?dC~0XWw(wr2QR+_wlmQ#a#~RSG zlhV}m^n5nf^;Q4D+*jqpsa;er3@(2xan^WYlpYe0 zd$U0Uf+xejF~e>|NP-7i3MmMy8m8dOR=Gt01dKRF)|gg{PLkHXVBP~VfZs8LHAqJ# zguo(1<`{tq1z|0qV966MfWKoEZy21w1Z5OLC?;tucPpfzJqx6O3^G%sfDCGh6mVsk zq8V|@w`3j&i^}&o&^-YuoRF3b#?qJ2(DG6s1ui8F8A3iy%O!ssq<{>hzaV1y(a9TL6qmfekgzl*5S9 z8z~_3HX&J7NC6?H@n=hAptiLhC~*aYLp%t%2_zJv{>O{LJY7k26)y~^N$ z6;hCMA$Xvrkb>}~LkfQ10x4*>1f;+?`IVL(R=~xlf>+xHl}8tpdkJi?g0=%zK;&ZF zG0|acAl!}u8?2z6DI(ufV8jZj5UQ)f3M)8V6)RwRYVFFuQ(sMHV*nMb`fEaqOHW~U z6_6q1{dK@y1!N#CF9lY>=t%iWf}eh*xQE@Y0vleq2CMR}0-F&qR=^3$P!WX#7Rdb4 z%wN1DV6&@$JgKt-`&|V#)V6GP6<87S#R|wEsuvEdumVCvCqVmM1vb<;Q#QA`HYB~V z0y2oHS+>Fo2r-RFEwKU%N_JR*6`{JZ0x}6Jtblyj)QGr%Y=pL5`_H!{hL{L8y9&to zm9YZypcyM5pVZi3ur9wS#C()tg%wb?gighX6_5wR%+(ksY<3kKt^q5+G-rkt;NdtI zb!lz$S%(7#BwH4&b`|6*sfrcg$IMtkPQHGuAQxyoSV7K(;DN?=JD-!!%W0jQhA&od zxH49NThzq3uYiP^?DdNx)?5Ir;IIQ$K;&Y^3K$!hEmm;25>`NkVEy6&)tMDmaHJ|$ z;F2K8AO;A4EmO2OgcVjm2G!ILEzdU)$Pl$-1!N#CF9lZMr6V~E!0JYcKUlqiV8aX7 zU{%HnY(}_{SOJ+|b*z9qsj~xntiXnv6;@zH$QLUhgLq*DgosXn_E>=pHO`dHSb+^m zZ>)d}Vr&9z-asJ4G$OUc3M?quVFgx%g4v^~pSIaoKxWJ}B9cW*ugG5NPGnmJ_&{&s z%?2wVyj8{u$b)9AfP7M8gWAz+31$0W>$zZzTdN2`A2yFIM>gPzl|G;1J+$Mzo)Cg$y6kQ1DNoYGlKx(M3Q@nXl zghg)&!`DIzl_Cx`nS4O;a@d2n7!CbN1i#yWyF-Nmj6wvoH1vu?Kx)Wt^*tkVM4q?c z;j7vg;3dgb-S(JfErBt-=f7O)fFxK(1fOldk!t26G((!@GKq|mV7977EBvJ=@4yU- zgfB+Wt|m>PYpN#AfZ^KYS@0VXyup$+{Xq(Eq($rzXvc@D6tSw)6e4^=B41djo5Cnv z_tU&q&Dm`_rrNVLgUA34U*1bTEub1vdg!vP1!igzyt#^05jUxrjDU++UKvTZ&l!?JFzviyd6#@RRT62 zYqSlz`GE|T&7@%3?^K2eepNWZ773<5Y;E!k=#lix)XG5#u*##IAB9`On^(Opzyh_eW z23i=`epno#n@{rFP_!UfdcG-lRe*9lseR?sX9Zc7QG+!wrEuX1c+g#?$lj;@4Rn!7 zchDVoHqQ1o(5)sr8yh`zG0FU~(S6(SSs~;p89gz&b1>>oTjUyz=G}~=uMe~`$sLNIo&TgE^9kw>D$Zw@VaPM4- z`SPZ$E+`u5A@Ft@I=-`&UfHpg*nJ!+G^rr2A}S~Xd)oOv)$46{Hv3zX>{w@X&B=-W z<}>N-CV(zA;7$fp8o+R8tFt}sP0&I1tv8``I%u7*TT@K>Q0^d;IvIlQQGaIV5GB6i zT^V~-{eoZKtRhBbHX{3m(?0t3Ru056lf8uB8T6m()vYI4)xmp`Qho4y5pdT@SqW}5 z>`gMxT3JG^o4;~>!bb^ZWdr}!kGq+~Lz(t$YsNp1V70PgQtid_clHN^#9RMf6fzkb zU2ypdfc|i^_Y5t%;Kv$pb7kI-_XfQUbX-0DYFP=gUX5duPc%q8G-gaT@XiMGDocg- zG@ZPkX-SJw^2cl|?#1i$|0jaYx<$Yg-4+P+frc6@scmWA@ zahpu-8fiA4`D?JqNmbnK4;z6MGmBd70twisLj0t#?ZMHlsRQISV+~G~gr$k=N}38v z^`kY80)Mjs>;5qWUw-LYvGV&Ce3fX;JN51K@7*`5l z9S)s{zH!olPK8$gT?DVSGVN@z&dSAG^dnB}Ksy8kB0ehMfhlrWNk~+@%C1(Y9D5x7 zF0_I#FZ7nS4Cf=QYlW^;rHqK~?7*eqr^_0G~Y^V7b03iDjTL;N_As}pZ zB~%iaSp~lt@f*UFhCnM6Qle)etFBU8vSwC|rV6Dc_1ZYZ-5 zt+LD^4hNg?*UDAqnQ?csk5)4$T)?)BqLTOvZ-ejEjgpSnIX~$f%8OWbB<1>;ba@j6 zy!S_Noo=vnVI>c7X4VtCc^7_BZe~>h?589CtsJb*LiA-GB)hjVv+JY}wMq^p6_D-X z3slSzsKyTawdGJ z38%vwa9LZ(`3rs35?qp+HKqubg(lBXO+EpS#gEjaN#tRFKZ5(bb%rW0oMPE6HEfO2 zTvqOLkyS(gAmZ;9cd}c4QWXF5nAYsKoA+eGvR&VB5lbh!B>LeUpS$QgBx<8c23O z5uPS$yWZb~Q=!FCZj_leIcU{~t@N>gk`WbJn9(Nfgdc^6NqquVIbjhC_>>X^C1S*xsh2FUxoi2u4Y)s~5uWG|dg!u8^#6%} zXkgeSM&q9u7^2uk&>G(fRLM>IXaNGfHj0`}V^8z*5j+$`YALMKk~%K4NkseFyJA;n z`i5_IvLm`@FO1CCsTNKg+<20@1Y2m8b4<&qauLWnb)H zlP>07W#>OqJ$Hhm6L3mr&Ukisg9?Bn(TQfVOo zxEVr_+{5!xtWdb;o**j}bXwBxxxN(&Ixco)f0y71c2>g5f}3It1x~1Ax3DO$0}MGz7>?>8ilPpdc?DjYuW{a7by23Z>4r zTnorc$IW@MeseSo>Pv@LDGmjLVjG)3v_r?Eu_grSOR1`rR|~pJ$EDn;f>HX?;S>JQ zk$B3dq{HN16B_oWPmjjvAi<5%cCW>qbDW=wT9|hgh~Yy~fK6tl>V)A2nOzQ+Xb&N+ zGUvb5gsr+XAkFWpCbV8V2+&jL6+twDWH1N>Ly6cfZ}XOdT_H~hz;{;i6R@|QE# z*zKF@9baE;z+-h8Hoan)iv|d;Q?_qK{%4Khf(^Y3ETCGOuLh^HfPHDOZv4*P&5eZG*lS&m~Q1e3#a4FFkAT)(S<1dzcpbz zYlU;*Tx|tN>M%1@H!VhnREwK%Dt>FNT0}fr+Zp^sg;L&(z+u!YhvV)-8^hwg%n!-T zqe$UKirb6ScSc0;@dmur)(dP!BL;-Z8NzlTR_t%DIlh&nH~ewaKFiv&qynxOP=DqI zmUOLUTaC@)6p6xyFF-1=4(#!P?)jd4%)Ibg)059kt(@_YW?;?CB8c=6_5%`%A(g6f#z*goo|IFvljNN2062${qc+#(WtuvczmTAfz+t>q0G2_J!n;s-qrVGO3y^X$oF()8}hgxRVrG*nL}qE?Hn{Q7h6J(X1tR0{yvlMw@#CkY-Ry`RnI??5F zY|^hMG7S{G zR$nj!-*qy9YtaRQtdj|2FqNKXPbQP5cTXnFR%M}~5|WGu&z?-m!7MV8j2HWiSo=hMtI%N?sLic3Czzj#V_GInadNP3_l{1@rvi1w2CzA=XN>8>D zpeM5wdAT>8E9|;@n+Cq?WW4A^mkZL9@k8LM&$B0!kqpq2@nltA6D|do8yU}@OvV6{ z0d0%Q!XiU2sPPlT{ClsH@kCW>3%5?jk4M^=dR0gFh=lIRC{5@7I-OvV6{|E!&sx_S6SS?1Zh zBa?A$NPk|dWc+x6Ix?Q9=!nL!K{Xb-W8>#Vn1vs&a4n8x*I*WKmHn7qxc2JCc(zoo zPUEb7G55z2oM5w!{p+vU+bx#8nDxyAcCs({bm+wXcEkP(KG}5gf(l!MX!v;sdeG-p zFQrVA`@G0XUPJn&ru`vuma-^EGA029wu9tRFM((^S=d;YzIgNyJQyGMeep;VOYGhC z1AZ_3@usB;BODl5OLUlx;?7XTJpqr!k9aGNvlphX()D@pNzcqvE}N-H6j4XL+|@)^6v}ob!Vfp#VV0Y!xtujMMPl>IKaZ}b&?$`_6@PD=(wguKe(&#bHlR`$m(hG)-{nTjxn4+m;0#_+SJ#lq1Zk-n6@d4jp~EH*WQZ{b|2D=$z{gcF>)tw~vOCsnv}L z%WtIF`Ute+L#1rZhgt4AlT-cfL~C>lggI|r{71}AsaI!n@cTWv^C);*2MMqdo z&UVMW&Cam9)r%YPocH7}hdpWG-0cs03K)%nu8nQ1&ZU8!>oIX)XT^SWGU=Zg z#tZmsqUV^;>Sbb_4wtWj{YaU8bZSShlTQhOe5!l5kZg9)iCIW{PxXgqxVH;@g#^D8 z!ID;$t#J>Xpjlj5q!&XG4t-5eAq#!Ih>zsf3*&n>k%Cs?Jh*@cJ7jYTIDW;81F7MIzTm7)VK?oxt+V@pRV6szAU|Zne})+2jFro??8@j zcDGvh;Ox_X2g*d~-)JJuK&I0AR+@=ga4^2Xkcrq?@E5a3@ggeKak)N^?q;8ZZ$s(Q-V)B5;3*8O9Q^yfIC>`Dzn=X9Mw;FvMXZQ z=^12SwysoM%53K@H-(=Y%oe^A!8*%Mb!iLnz)#vnTcdD+yQJXvBaqy}%}T+b)Xqv- ztt4o;&&v=8_*87t>mNmM8_Q8;y`t_%S>qZ84;v8D>!$fKR~E70I}NzglVLG%_zF;V zbuBwZ2P<>)@|r>p{8{7~s*xiJ;lo$vMZw|Whz9$H-;O|X54lOTX~WslR~$FfBH|$2 z5WmK%h{*cbmy+uJiuL`ddY{kGzN~!U!-nR_MEFVsuV;C%3mKFvb-2(BDhn0PLMR1_ z%Smaou$oB(zT9xhc9caiK=_C&32Q-$z+e!)vJP?80emBZH4h^a!-0>4i%{UojdqRZ z3r>v(wIy8*!ByWn6*@Dfju-F;PVEN67Z(U5y@B(jjs9&P>;BNG$zYfiLxDrt%TRM^ zGMLB9)u=Y(J1anr>S+uVzUkCnu&fF7fuEdf4hd*qjAVJ04TqAF4+R1(8qvz|(@uBN znWB3IMq|=wFhQcJ2~>^c*VCe61zPbzo1%iwiSJ38Sz*ywT3kc8MEVb&k7PG3_;Q>r z01!UCre8)i(X8kAIH3Y!f3)e)CQ`|%D!utT`-6dOo!D1sf|(?e1Ll2)zzZ|91^3d- z6#)I=X78EK^nAKtaj2%wLR29sZ&1zr4bpTT1(GUCzAx3g7fEpVx?{sk!=A7PEWvU;v?4k=TIY3)KJabFHkn~m3=mEY z8$-{nO=bv`ttyBw$jb*!ijh<@X`ATOAhYZVVZvA5L4>0b1Gr(k^t3nmx^KwFm<$!H zF?Pdt$ut!BCvLn*fOM&Z$pDhQ+jV2RWM~C%c~k7L1YjU6nu!axYZ@XzLrjKPo)xxB z2F&!qRkD&)=Z`EuEWGP+^A9LoWc7Dvw|X>kN3NlUle(Mu)rOlFe08@9_c z>7TkddPVB;l81?NH*A-isjSu&V^fgO{tB|4>8kY@?S)P^?=Y*5te@3^?V5S!uw8B) zNwJvdvW8n9|Cz@Db;}+wkvSOB{R;z zOl)0{%;$*h3QUXX%mH5%D7s*~0*Q-1rMY3dBAY@|^OwB|l?~&7j@WLyE^HSQzJS;+ z!x~+qX-GR`BMI?HUM|T4INNyQU!mV7p|9nPIzRz}m1~GJx^@QoI4U zN)rxl*e;n7HbLPnc84I!(Q`>!905sX8yeds<0-PC0r~(pY?lm*r7l#szEUqzmkYK_ zrehrws*+YD=!w(10-29p=5xe$$sokhgmmVBFA5Z0uw8+~ z#h=pLuw9W&A*uPxenx4{ZQt%;8*G>JyU_3ro`D7g2Dg2?3}@;fqGLv~IOGCP<@^~j zBS&nPVN--|91ur_nhUneJYF$ecu2M~-0Qe+mt{@}5`K~{0)&SYG?wg$?K1qz>RnRb zddyi3+4A6pjyE@Km*ee)5gy$3?Q)EY0m7+aW1v~LHkl!CF$W2^eY?E8g)reO@7e=} z8@7AoB0Q6U%*oI}r=igSLnPF*dcADK{@5-V-~wQ~WM~C%?%1w@aH%D(OD}~Nxd0Y~ zi*mzj1fJZ^WFP}(`ruNPq(JrJs8pg}sC~N}%K?%WN6#f`aRkc#F4;d2$jIciZ9Uj7 z&!m6q;^>*2BaFNpv0ZMavRYS+O{Dhqa<>b+Z`aJTH?~WL@JeEC*e)5G0BWT@1n$_b zk+f(mF4(SlkO0e+WDti`=C*H_447+$OJ?DdPbdVK%-Rv#C65zR^v8AuiZ0l$K;q(0 zX>QoA$fl6g9I#!1VwkjLN{){Ec8}O#yDsnzCai&M?cp0YY?t9oA4EcKzIED|hiB@D z?J{hNRj>o%$WU{^cA3W|;lgX*E+(zm$e;oLBz3nVw#zan1PMP$>&{asf`_{kI%2yF zzp{Fl)VJ>Q3KAULuw9NfAv`!9njkABJY*x*I*$8xIYz|*;ea)A^xWEHJ+>?FW~(`P z5T|j&b~*V3*tg5kt36Pl7aC4hIZ=$e|4n=7E{PUv^z7W5;7HsyoSiG7&x5u2a-VcV zVYDnconH@vMDQ0C&)tyBrwqA?p1Ul8$RfV_G0amITD%1b?{2~m1W~rotyDV6L?zLr zCOcFV5Ecqh9_{b1iFY;No*w^>8k~muI*5>FV;4d?k-Z3O=`2pkbWgEnyLpicyA+2 zPCyPQ#G*j@2q`s#e9~*pPO!1Zbkr|rMYPH-H^QVA^1WeHvNc9p1*wbY2+*}=m9DFk z^T@>>%bkFctg7)9NvZY{z?}%b>yv8>r3y7q02wB=*qQ|XC|t-_+VZfYGdGm(?h4>i zLTm{j{cu9jvHw+{{)!r(uDY>yK40CdgHL*|gnQ-5aQOm?DnI1YOED~qRZI{$dh}B4 z)Jp547g=n1XOPHVu3UoXU^elkFT0^Wdz*UD>yC%$F8IzE-5^g2kn;Of@Z8V6o`)tB z=Wjt5US1C5*-)o?)9#9Th|{M}Ti$}MAEHf$;6k2ax{~b$>22(=0k2eW(c7N%2csbw zq%5kZKX*ghPE?=R1;-Sl>}+o$eRSBr@*OUFuXXF__V(a>D&u$${Wf{E-NQ-sH41ji zW>Ot1W*_gYBunf(aVecy3S$0gbWwPk(p8^fZ%R*IRi;z6@I-5z+P}6FgGnT($}>1K z?r!#b!|9~O{!OOJd%M$ncLtsGf!1_qd(dlbjE2+kXwcd0ZTHXxPQ#7!ot1KEohu@U z!>U5Ai{%*hr8w5GH<_Y~+0Vs|>*q9}Om=BHd|(&6ndJ)WE|a;X`Qj`hGhR-aGI6Dk zCO(%|y4XFF^;9Og^1?G)$=x#cX1k8`aCCFi%Wj>oNt9Wwy3AgK&+LNxy(|_L0c15p zX0_L|#3fcsA9skF^x14-A#vD#Qp_D?iWgPj-DKsC!;rNFBe@0$y)K(Mi9RX@Uwn1i zulUvuh;McHHPsIJ!wxZ&4vTEi%?nKsSu@jVU#LJ#^dYru{bIlx_y=Ngq?^o;je!_H>=KB{u#SP4 zdHh9X!d`*6-I_@t#&ydjaRi3UevK6|dE6xslV_a+F-PDN#NWcF(e#N=_8Kun%>4#XUR7nK!z1>z&Nfmmn6EIt53(ch zK(T~IatHhIO1sU-BvO;UGX#|vm1XzK60yOJITZx}kNtNZS1-gPczzd1E@XEwbTy28 z11n#RnWq$z0GqWu90fCh+~h@N;BR9UV81ZJ8F*zx!*>Un%$U^?JKHYGzk@LcKf4E# zGV7UbZ|w}?CR%B?TENxs7C7;wVhrQ*Hz#1;f0C17sFKJ}YN+I1XS#T=i7?IkJjs>j z=Sh$*EK!Ev;x|J?F!&q^Xc}ye1iCJ^042V1BzQuwITC~>;2cS*9c+%I;VI_0tlBX~ zm(V=p9DFN+S11CAu!~&sF=mTYq6u}1*VH0`tcK^v9<<3A9lRGQgsLbv3(naNV zaw#hJ(N3{*-SMPVeMeUCh$rqcNG?TX7sceTViBIa<#;J7kG6ZQQX!VUr0_l3sEi`j z0*=p#A};5_O1Yaf3wIY$WPWxLULt2XoO)fOq03m&$NIz@&Sw#~)qHyneA;Sxmo#+&_QW`rgIe@!a~Z ztm#>G3*s+uX{4%AGMc#D}L?ibAzNx@;Jh`T#8 zMPiBU+cvv3XW{IFzYZ^Ft!i?%yWQ(_hnti6`N10**yPZjmqY&UpF%OMsD+owaptxMjH#$RAQz`Y~D-} z+q{_~v3WB^Z1ZM{)aDad*qol~HUjnE&8!z%`knmERzW7w9cGc~@++pxD|ic7mAtdbFzUbdD?e;Qzey(oWU3syZ3=2w zmAHw>Wm~IG=QsT1%&GOb244~wJ4_ifRUUa&USjRAmNk!;Zsp9WHHTlj{m%k}N30nn zrIo0=BZc2NaFbK5>~d=ovBDn+%)O2>XI~GB?7dDhP{Faw_BfJu6+Ejoh~M24HFy(c z(qy8floeULNih?RqiBWVVg|U%E@|*)%A8SywEh;^yIDI~7Y&l36mqMo!CNSkMh)hz zgUI47g+;muA5&FuQ_TuW8oZSl}K95q{N`UlyUJxGM0jCgL%LgBfyq6#+_!T zI~e3^7I{mH`A_!a2A4=9xt#y`kob!g;fc2J^I_rVBf`(G6Mnu)`1xkx=UarIZ-w_= z3G@A7f10y%m|J4uQJB&qR(@Jk@;cjx$g=Nb(myjyUaNVmKsm`EHyS_9Q#XD-FDMB8 zLniv@^HoP6`*1M7<`GD`AagSFfCDo5ymOy_{U}Rsm*L ziRoixM@H^o-!Gx2#>yoYi?&{pA>SN;7BY%_W**dtSut}v<~}!uS~0uZE74=UkD1h) ze25ftaUsJnMFf7A!)zEbUh_c@<7GoeIu7$;$rST(BgC*qBy>-8387gqgo-&i=qzs{ z@M>)CBrdFxAc{m?%*Txo! znURb788XFu+z17-qaszZcC^%3eAz3$@auPf=K985`rpt0U(fH`^DCY4vDf^X-wmV>h24{_!7xtJlY)?&d~!GEHZY9#eR9d}flG%j6ImKl^4Soe28P zIQ>obP5PUqtEv2x#-Zlw0?fA?%{_1n?8=q~lgojKZdbNSxrEN!&J-Fi;H?}>mjV6@ zb_H06U2l5(PIpjpn_b9lu7#!Vp#!d71#$a|!fW>Jy=*tc??W1W66WvhqXsaB`OW^; zWC{A{*u-Z#sjIOnMpjKGhd5y@>6O4fk+~?x-5X%{UFbMUbV8zPmZEQC7umD_0P+sF z5$NJ;I-ULyE!d<&RZIuAVFABfmX6j%pG?1=N}oXfifm+aXCr++HF8wF4=39rbb@1- zTK^pXhf~ZSUQ!5iFYIaWPKDWAjNuKCz8O#>9PmV zPL9#mgTsF6fvJeIaF7a2Vvh5ZUT-sQ_J`A?aqZmF73kCG7wzIV*Kh{9x(EyH`78HF zjimzxKe^q%2QmLf*!@7SJGi5NjtC(AY7zOd{NST;ecX9xB14eS2K~wO`qa%{bA5CuN~JM?e0Ph>Xebs|8|^kUT5r(Xn8ugE9<=FUvp?;RhSx%> zfz(CPuURTlx!V1(>v+mE=a%g92o_?`diN zM1S)c-ms*D*&07AvpLk7aHO4V zBn~_1p6U%I{IEK1g{#*rxsBkrg{v!gY!)Lnd0)bwl#entN>-g6oUQP^-(+Vicac;x z+t%7xT5O`8V-5Xtxl1+&4_JjZH9I96@aYKN#46E^tF6A~p!9!s^G`k-h?hvU!8jBj z-Diky$_Kt9KHgY6A0KYOeg693Dwc5yZEcr43 zF742R@XI`kVAw-C@8bVYi zdxT$Z0C!lolV`;#q!fZ{q>>L^JxT7@TFb9EcHU*lRpXkSCh+eg{vsin zU8U1EIivGTP51b%@ouQ&<)<2EgUVDPIH2ABSVt`vM(8XexqE3OA@Otgcm%JpvQxp= ztb`MQNew&jA1!hQ0ao$XMAa+n-m5BwoPsTsepo~{y-dfloy<1H7SEnaFXBlV`aY8~ z@}-2BtZ5kEILeQfOzhQM@OQPa(t+1Cta*fYTZEAeDa!Do#FqJDg0|9z=OX@wZAm~w zYm!0Q_q27-+IN?Ia%2u{c+)rVC{&voIxEhY7M@EJikH767pgt?;U}J5_F+0rJ7IXy zwee`0Z}gfbs~a^S@$m+{#o)|VdeOG>+^z9mHqA4oF4wc2?QHK&oU!RGQ;Gw?vyCuz zKpXl{R%OIatW=6zjYuu>lbXUg;q!q6NE`d`rW{6;TyyUedo=AmewpJ_4Y)mkKjQ{J zj6I?jKMbK@_GpM3@O%WuZRsQha=itYk_@Ayb*_jCc$r428M%@yroieVT0g-C^05Y( zE`KbdgV1YO3nd9*gOWxdP&|_NBnEw@Y2O5r6p|Af5o^f>;?Ts)CnLDU947RwKWFTk z4rp0}5jfyBNXF^B;6IrAs{EsB7uC&L;-2oaErWJsBSpNhnw}H020v@Y;BtQQJ*n0h zT>e<%tntDqJtQD^I)et-{dfpqZ2q0*ert!y_i(m9>C^OJ9`W?^wkf1}88<8HEsv zNd`p<=>BsSy{*2`?}ZeQIk1hJKT<%bwlthENI`j#+!+KQBeVRdyK5VyfKb%(QVUW* zMz-U{1}PvEb-ef?1vVq#Wq)Sjo%okq^G}9M#YCHbUS_6p%LXYR!{}sZj}+KY+d_-~ z#?H)&5bDenJw6e=8-hRvw?YcYhjjw9M+$7Hai$zbl-@`InRgJRfDqG&)DkJMpwvn? z0|c{2gG&aoOBFWQ#FwlfZ>S3i2~xY>)!{QO-prUgy&OV#;No z69E9eqR_RFHb_CP5&^(7Ho^EYGg6R~FKJxii5hOSUg$1wE2JRjLhwLKAr=1TK1 z=Qa3!3#5QtQXc2;W*OJnQPXLM6>#yX;PnDv1@so{VmEj zJq1RrfC|BSTLb0BYF7b$51Vyq9ag}^bZtM6PHoL!wY+LQUZigic!3IJvQ{hcxyP{DF$4f0(K{Z}%umVC+rx&iliiH;hjJpbKM!;ABW6Zi8{egECkoj@I zg4M19@}y37_E>=pH7l&ZiV(sI=vHl)E`|)ELL98H0zyP5Kzppfh8kze<~G-cq&HST z1~E1PHoFQ4F^xzqu>uQ9c36QGp9VDexfrVZ?Y4OWmJLBy7s999r` zvj85BcMeTAYPG8X)8t$<5LS@Sw&d{jVglP_sJc;}x>PwtvJrQ7T( z$hi1@t^J)}?h=K@AW9nFAZy zwK#-Te@&=r>W7-J0x~kokKeumLQ%_0Em#2=*^U<*tbkC|@xnD&m9YYw5iTTFK<1aq zlx{+>!V1WfI@#G{1vb>IumUSWzP%Y4#0x7RM05hQ#|mtyai(m>3T#MvV+CZ8AXot* zrV*(nR$xKN4lA%C6wDq?os`YK0y1N+5vhb^O1c&oM2<{kxzx8f?<*iAD`N%ZK{HlB zKB=)mZHM9|DS3}#vI-llfU+fYDn_h;JXjQsUL2ZuvB3)Rqj=devreo4564$6x~9u! zUje4cxv0eJT)M)1wk4md7c0nBQWYz}kD0N8oP0^+T2J(lp4<<0O1HrZaxMf9G-3t# zG2Nbk!Z+P{w%S*4q%u~3TU0YvK*9{h3Mek(0Srh02P>@LhyzwYfA-TuA_H<&fC%nfy)gh+=#;pV!F0v- zU?Q9l*zEa|Lti;X@>30hxI_Ug{HI1h1EJ_bP@78vQe*o%e@Uu#_9?4rSik;mC6{S}qwyQ}~=$fiYbCz-D zHzIh0C2RVF6eL?7qWla+-;2k&f=u1kM(G}5&1==1-KNZNp6xryQe+Sy3?V9L*pNho z^w4En3(V9ccykr0B!2dwa4`eF!iJ)vkMmSZdd}aWD)ucnZWX}=o3sWziQt>|`Aax| z!2-I;-m0)Qw7|zOL^22jW$PO))mJ);!8YjTFIiMJlS1Eq&`1QoDx6@8cAoTyt*u^n z*qPu9PddHvcr}zkSi)T_fp2&yShL)75y876{zz^m58Vm6HUnMCMefN; zuli^`-Rqw@JEaHmmd`qvn|tSNe21xOP~IyXZ0Ix*{_cnaZPhT&{2Cr3>AfJE--Bmi7 zq6=d-I+Km=pgZntob7Eq)tT&UpbLZ0jX>GWU7r;~o|4fMqdNzq?zBa&(MV28u0WLp z5vhPQ^J1k^1?Ed#Hr=qqE!ru$xtH8);AmBR*i#Neg=UKoN7P(Xz;W41^VxlUxx>yv zD_YM{yt{@V=(!Y!26PC#orcm|uhOfp61$Hhg(elml_G-ajahLT2v9LyT$GjWucXL| z$|=#M2HeSDN&^_~Y<198Z4-1eV=KEIv30&~P06wuS^!{Dh|;Y9^=EbtQQ|Acmp@kZ z3x0XCiWtpGLVfh>tsIDDCR;4;4Ej&?E@nObUIc5ESsM*|lZ>-gmQd^F&%KQKuYTOk zBp%AV%(iCyb95)5yY}Mjiqyng|6UX_85b^i1$Xai8dFt~^T!%+b7kK1t0D6%A}uRH z)~mD7oA^Y7#6x3+oK#YIX9Iebr9yj}PTtS7q_r0LW40Cd;&uA}6TxQPB47&g+6pD- zj_F@E>Jvb~eyTRX(W0wLpZ1p#Y}92nN24v^Q3H8@ogmZq_;v_^>kW&_s!V+g+d(zRmc_bvD;v2a;yA)u49tM~J> z@JSYq@WPY6BdoaD2pQu_0j$HJ6Tt?q=mYFfDfo90yw=LJv%xwmbr<~%w4_;`Knrg9 zsaAAdBjv;nv_n83;-dl{m?DRjgrw?NEp=jzqu+(re5Yu8??sIR{<5uw<=n)(^KTMv zUDL)HEfGU1Hl?0O|Bx;M*lGR+0LXsC)$T836JxYc{Jmhw81+w5n43hOqh`V>x;#I2X@F~+Y68?%%% z6mUhmRD`j_ih`>i7tL|*U)V0CZT8Q_yC3Lv2Y2+(SuT=g`IdiU#~?*>Q21QLU!$cY z_`+C&`I2pD&TyodfY)}BmZVd=#ZRQ*gu9{4LbPW4+JwJWt}@SzyPJKqnmOSDwxqV> zD}42ymAAq7>PAV&>zto-4&_CxB9^hEDdQl#KZ5IYgWcI54Dt|XW<9Z+ci|WI0qmzE z{;eFW&O-EM9wZwtGPCQX54B1TWhE}DsL>Ms`=ua|`|N}$T+Km@`bTU{H9m#_=5bdnMFE(YXLGH7~BH0)Xr{mEe6-sZ2E;!pbA1~g6zE1no^D-pQeY{*c zPXv{YsMo>=d$7>i7@+mf_%V1SerPb-Sd}c0`?905WcW}MPKP((vbK=(7y7CtxFoaE z*cqy^C*ZO8k(xBtSyAfj??-T-x6V-Ig;OlMrN*Ng`Ueqzx42X0=O;z+KaXiW-+)dy zS*?@J*mvc{PB5Pyx9_rbym%oEzP2LCt;X?DkYXW}OMzbc4JF*8+D@!bfHhjWcQTA&m` z@u7e^O_}FjX;Z=yxLP!G|3i&BxNjHS*CE@$GWcN7;7HFzm`$FjLB4IHn*+)xAkH)AFfA-$g6%Pf=9f)DqHl!a7G>j z$Q!tLDYm?Uhh0a|Z2f`Hg~%J_>7{iW_Q)B8l%zfZ7xuz6Bud{5f)X*}%+yO3*xa@J z;Rf6v(g;uV2R(GzBl`cuKQu7x5~K0Y3=C21B4~~81ghkweY5~E$zCp{`xg|k0)9S% zhk{5gg>_m|$CY0uk-m?coejzD-%}r*(n!zF4ns)Z8=H=d*}!KxfqeY8^)Hx?uK zSP0BHHM&fI$J28W;$7Jn``4t4xmVfwk5qpwoRu=39p0b<;7GiP)2&A;ueRQb{pk>@ zM{l`HRByM%JJ87(L2?hz$3?S3L8m3{p6gqopyOg!_IC-MAX)@-SePJjq6zN`fjH4A zp>Ob86@lZ4>g5f}3It1x~1Ax3DO$0}MGz7>?>8ilPpdc?DjYuW{a7by2 z3N6g57Lb>YoAY9Q`dFB4hje(A;!rRswz2s`J9Io6YeJyDl&V^JwV=CnT*{3S2ShI& zKH=Y#<1;_+=$}5_8>20uJxPbjz3npWO`jf((LsV6qwQXcJLfn*6}2$$DiFhmq5zxB zO4SL&4KlkNERh{TssX-~{J`I8!d6`xkmh$)6I!nw1n4RBit%eu-_rp96*zjx|7wm|$h5$-ZZiILjiROZ zy;O{}vg4GKnHXtx`F*}oU$#P~nYL_5?Ma>HUmx-G!WUahr0EF$q(+mZ@KkN7B~w~5 z+x%&{0ZQ3J!`DM>g;#{Xyy7c5<=SjH1-5F_YZ#d8f7Tc-7=))bDPZd!)s>8;)uHtU zKec^-s{#Jo5cT%WVnMZuA0tEl`|}m75S*xT%og^X|F5XNdX4TtuV0}wC0QpiK>L`_ z*5w>x%bo*hkJnbXrd!?LS{wB|FTfu)VY^)~aqK4ETu(rMww|!>Csd z$K8cChQ)iCACj3zk;07>w->3=w+KGofVbLufvsr7fKWL@*bc;s{p~f!w{rA`KW^G* zSzDG=z!d}P&)mS0uC;8dv00oVQP}VWNCnmj3qIR~x7Fh*!b2toE3zN`mBp^4f$MCn7%4JByG#d2EOWV4IO;2 z7rlBqoNSLKz0Rq)aeV}zYS{0()-Ya{D{_g2{Ek1{D-dU<0bUT@R1n77UrVwpu1A8O ztbm1C3Q>`lWtWWwaDwFF5LrWJ1n`+AoMG)9*_5L&lXoR|#f1=)eQR5Z6$evw@!eUefK*sg7QMD8mEK0x`Br!` ze!M<)qCH#cw=dSRw=e!-h4+(q#?cX3D03WXr8m~%B$s}d^;L`HyTmr!&nhewO0Wbe z*Wgbp96TtO5LXE>1svahTEFQdAEc~?g4fFZo4~vLLH8b%(PmA(@593q$3)G@Hgw(iG3Zdn+7OViuhu z5BjF7vUVyQ>}sBMPJRVGQeh9fnQZ+jP$C!mE$p@oFddt1QCS#i_zYO)IX#Ni`Y0<~ zuFEYqmjESKU{%SuWhPLIR;%jh-aVgVbIrmdu4HJ;*5Ic+a~}@5?u}UvzB%{|%S7Tc3?ZA7+ER_KKLTiRmGS%VDQC-LtvBl?8#&#(R;nINn5WGexBGE0${*)KLR@_(~hI`dv9<3%UBT#%lO9|Bi(o;{h2 zWPqNGC#&+Ba4E3dpLq6UG6rUo0n4n|XF%U|G8xx~1nAY1@#B#;rC!zPHxx4aL_+su za%R1IG8}T0YrmdMAS^m%5immc zWWvA1-66aQ7?gL zHAxL-xl3OzdI%njkNdt{B#9;Vx#t6ZFZ}VQr3xe5rukZ;!)z3HhAQp}cr1RzTXCGd z*!EnX^vpcvN`>xBv_3A-*>(9V(Pd=VU5No!z?$WzA%ge&y>=jJTA5z!8aDflPEC5_ zbKPlwG_=0~MkW@{R8)RegqTXrRTI4=FWrLp!wq!R@`zVXV+o2nEbJ|GN@GXG-`l3NCj5fm`+J-XsMN(} zG@sXZxzTd2SCUz%EbzV@DI=OTDhoRB!3b_BN0?K+X?F!3I{I{P-0Bbe(|&i*IoBQR zpgT`*9}Oo{$sGu%PoKVR`HeI`AAxp!sFa`iz{*`zZ$_Sv{R1ZWOv-} zo*MK{w!NoxIqXRb;BJ4|8^?`!E}i2fyZ*JYn*FdjJ&*2!Z5&cx7=dyDdScw$>JQIkCs)z3z*FDwOA#z-RoNQ%&~`*we_G`(gc$EGH99~wSJb!*nYo?26 zdYScjJ|p0AE$={%Zg#g?_u%Z)e+SA$=-+5UG7xvOdK53B5*nA$^XPK*x#e4FCThXK z_y$8JV#no^ka{Bu&Y$0k;0-K4IQFu?F>t8%KEu#yU>3d5gX>bByOQKNSf#>+Z#Li# zmbuF8wggA@6Q1mfSax~_*_W+L;gA-pQ07Q?oxpb@SZCR(E^Q$m_({9yHr#nK7Nox) zf#eo$Rtg5Cc2>%2y~u*bA4PB*%TZ;$qV9*Sak->zVXHF_8{cWbot_Mffx}mTva4&^ zDLPnL5dV-D=N3v80ucOJ_S!aeX6 z>-!g@cs>K|aJ6wWtu_wA4e@KNs*Q5Pt2r1Az7oOfSsv^{2IYzi7tXUS)d-i98ew4- zi-U_q8h#|j3w*iZlIVMJm$@R4u;k!YBQ zFE}+G)RuHLgtdv+5HH{loZ1bBFD?*Rvm$9VoGsZHMBopdnhb_XF%&qIy$m&%CWCpr zT#YK442?Z&K{!`Uz&D-R3zjvZKJb%s%^?Bpi;*m^vf)rt@}WSWMI%}ne%k3yI#YB| z^=K?@G@4nh&{$erL&!w>Z~67KURZ%ve9)#|pfh3diKb?QOaq>eWH&ANa-1yy5I(Oa zHPOCxKsC|aGOmY0@u!3!;YXVeZ6cMSs?wXkvp*Qf)`@+ECYVVgIbhy*2)r;uk?t!1 z`oqoMGo5^SqUmRJ>Z&_E;e8GM#HFR>^`TBphQTGpq#v`T6eVC`nB_lpX=-_v#Y9!T z!Q$yTG_^bdNwU(x-(G$$FKG?B26Sy_bp%piQb=3AQZEvhmpn|IBQ#KQv~i`df#gsE z)Gz83_Ir_RYo|ZY7e^BNu_hd6d9iBhd0UogG{$Lc7RHi1Qp5aP7dDmsj|h%>bHyby zN&Dm#DXj7nTPGUNzk@$-z}?28@H3ztceY|W25`f6>1l8Bb>A5_#$+aJZBYldOU6;)pSbZN0Wu)AOU7mS z3ytlPF%-PzO|iogfPt`RCN9{nX^2`HVlt?y&UeIi3HeMPrb^bO>mmazmy(qXuH{Y7 zgtfV0yJSY#1cjZB4ndTo=aRHI0+OT^8rvn~DN1IO)ZMUMGANe1f9m4s6{*Y1L`|H# zVY}Q+Wwovtn@DKqHcy9Ho+NW&{Y;<2+^}6TPNr=s&vjwDWMq~@brN&KcF7n@?%c6m zBWclCT(Dj9AQv{3xna8kS1+_==7#MGOp8631HLFwbisB75*L3;bHjE;Hie|-FMCtX z7bfjFV!Q1NfbHTj`OvQGINh*ahBGdRID@aUtq(4+PtLwrNjqY@44Y!ua6lXxYA)C= z^LWK@;UUP%NRA`6%Q7bf2|q~}0l-5FItOyYb{T$UwI8W(126pGQSF0+8@9{w#)XGK z*lx~-=;Ww>&fjRSx{Dv&uw9N(F+e!A$qaVs)+RFqF6JQNhVAn57Q%$DylW2>ZrCop zUGairyJRM8_|P}TZu@r0I2Z;gEqn3UF1Pbvb@D?%0GLe9ao;X^T)~?=wre0F2ETX;Nrn{$&9cG`eVBsJ(r}#5vZ87yi_8uZA}QL z8@9_c>7TkddPVB;l81?NH*A-isjSu&V-u-;YruBNTgyLeRl;NXVsa=g7T!h;*O%P}ej2nVc@qvzHp>#<#0Q@H!LVz*PZK;yV? zmy?fpK#Q-uYY!A|*zOUxGZ~mfti@lKYv`cKlpW7xAdh44$$;$!gl);Ve0Wsg#g5o6 z8AHLFJGN^eTxy9OuwBy-wKT+J(3-GaLO#<6m#VzC)yhg-msXBUv4jqT+rC{w51XJr zw#(6TNm?8MNo5;)-!2(Xk;zO5ryI6Q2E|erDqM%u#nCHLmzRN=ICsN#xtYppT`@M1 z+PB8c#D;indIUFY*UWQW*e)5FOC+Q+Uco?G2f!y}(GW<%bqx@Q_z}RlCX9oC*w79h!w#)Ixg$LV#Wqq-S z`iybJao;Y-s2Cue8a4)+b!(Fu0vB_TaND=b%UcK&zVg;NLWm%e2F8LZ-Qu|2rT~MF$n^# zElKXqWktiE_pP!XUA@BLaTh!S{qKtB2&y^esu&4U;;%h1f-Fnml4_AIU4E)`ZHJnD zv9_sncafTGQky+ql#%GYjW9U@IiL`W0_kIx(Fk2@c7lyPrlWp2E233yxyB{6kfd`W zb&%&Jt$ewmZVe+q*P2zju1?M)7khl?UJ5e;q#}eE4R?}43cl-;YYU|cHBSH;CbigF z;V-^wk(V|G=?kS6pmcXv0GASCO91JIlaodw@KvAwiW;A;y0L^WqUHL6Px^kEd*#V+ z$wd`w#w4YWujv~j68w-)FU7DdRxv^3=+R5DQ!8y@(pE6OGOrwN;GIDtd%1E6qJ!DQ zm%i+V`s{7$L9aU=qPyTbV|0T&DL~4*&fvM9dp%z~s5Xvu;pOE(o(*-XH|?&dhd6!u z^lc%AXp|gl~ zm%Zh>b#!}sa6XlByoY|9yxQ*Jr1}~KyJa(}j$N{kcUF=mcAmJD&MXBn|1`QNJWc7U z&#*V8C$B2gDO-4=HBRkc+lj#>l2hdwoEdjF`@P|G(qjK6Q{}zg>AgFHPWnJ=y0bm# zwKhh>>3B5gZ1%Q$==P`K#`(@lIke6d5yWAcA=kxn4ExfOY}lJj(Z%fN;>PuJnk|X= zy7Uk1f;Y2VVclgiw=`dzMP$ayDN`n{^wGrU(n=S*XR@BkBv)Q|W-GZ{#@=k#ksgk2 zZW`^DuJO9wa40oVbxpk{@f7&XF1X*zVo?!5Rx@N)dp%2BVzu;fhp0)P%@!6ChwUfD z+)<`@Q3c*jR_-_qSz9oYYk<(}vfYg6Lu%Q2M0^c?#kY1qe5=E+sdmU8c8H;LSY(54 zUhv`!!aY9|whhCwJYOw{dX*cw_5%ds;#5tiKujKYF+uXIb0FpjTnfa!R@TgP+7~Ji z6MbBuKunHwlNqux5aWkk0x=oZF%UD4zo<;uD-gF^wt={`-N3JpL(Y@tMX3dA;(LHV zTpWQRvtMIHOdfX$#N=7$K+F+%QCYE9MSR#c5Ywj4rTitrm)fkM{FOC#{uMEK|5|3q z#z2f8b_v8}SjRxjJpQ6GVXr`Z*g6nj=!zJtFF+tJ-dB;?uQ3pl$6W$3dDb})a|B*g zR_qmskIW>fzUtY@~pwKIsDXruP{D(1MX+A&5mxo1#=Z$`Sd zp{`i1^PH{XG%1yk%EdVvzU}0ds!A%C!*%+#t(tB#&qnUGlnV zjUsO>ji1YV+I8dN9cfj~6mbV?rbxWSOc7TmW{RW@G*iUgotYxBME0$r-I}v-_Q79= zm$OziIosXtb-KgN&ScQvC@kv+VpvP-*HN>YTbKUNe(-P1!(L|ecmdmX!uU?2H2Oq4 z@uZ5)C)I48f3Zeo^VY8$nS3L+`NZ1hjU(rKqs$bE&6_D=n>SM=HgBegZQe|g+I#{F zo73~#$Y`&C{q21b^B?>TxF$2S@*70);yox%P5aaHoZ0Prk}*tDhVDj(=Pz_N2HnXd zz7h_^muIIgWgo-`$g1keug}BFvX4E0UMCuj74duNGPLjW9CEwW+wXsH_0aOAaK24; z!d?pJnQ6z3L;7(3qo`LC3{4TzMPp;$4b={@TqPC2??$knpHIAp`T*x}I{!#aJc-QQ z0sA|no$2<@w3D$viQ8~!wQ+4!VWc2%;%Bz!8Z&(Q-X#rEbXx zp|O``y2C6oU4F%Mc?E9)t1AUp{%gPT#=i3EZYXpy^ke$Xf(xdTl zxkcD37A=fQBrRrAVo-AY1XHuQ1XnVaf<=RQz!xLHmNv$nW~VzC|;pZd5HzDfA`X5r^sgr9GP_gx9|{b7HavvZhRV&PF3 z$!?;_qt` zs$}hGsj>L7rBzrMj3yJb>~aWhgx9hu4yVwqUU%4;ynUxT?rnB@y3OKmBGBfBVY#|Gg4^GXhg-o`-L9}SSq<~+=e-G_5tZZYwa za=CU zRZ&iz<*1q$k*s8+BJx>l710N5RYV_P8FEw+t^jOQL}~>~73CkdRZ;$NmZNGaB1zFk zMdY*ADxweAs)#hjKt!nf2(sz!*o}x

aqAM$1YWgbRRV@`%6_~Z;+jpjmq~+RpOwJysbq& z4r~f}QkU7}H}Y8$GBuGoGfY%Z#QEqOfO#I#f)Ld`dq(@+on8|EK|6rH&a=_Xr%W znI05^rhfL(iT29}DQG2m7c+ft>|jo4-eJx)aAMI#r_eI9Jc+F5XO zG%A|Rh8$dkAwb0ANyXjK*ty88$H~!*WNxvFtX<+t$|1@y1c<0Sskp}~b}ljzxjG=@*lxuFGcm7N z#nbU|K!&A&v5LZW?y-s^K`hE4=J9mgMa9#RS(RI?;^?}@DuDYy zgomhjI_{$4>BvOo7OObAuCYoWLd4||t3*ofu}Y*Q6Pa7ABD-0fV--Vyh{_>Wu~gh+ z6-z}XBDYw@P<4$}90?*Ohgika@o_*?tolR)D$S2u1>r%fA7Bv<6PTl-LxdKFST4DT zgUn23X}{KhN&!Q~c4lz*hL~ni&xazH3-QrFvy+MOBMqn&OgJ%86Gy$!JmWPn-8#OCF^l;Oc^^Kvdv~_@1nv3< z+o-pD=sHn6UFpB#Ec5F7RekU8`&HelgA4e-pZ~!hIJ9SBVSRJueb3yM zTybjB8;ovv-TD4#w$@o+{@^$bHmt?4KPExB8RTUf8uYyRhAl8!J)k`(S@}yffS0nRTaU zd)xhKd@WqPb|Bun{8!|^C~hn_TQ7uN*#~0TUttGgcI@6}c~~8R$Z1lMIP9=@sz03a z+}1Jd??iAH%kHQ@dvZK^qC4FjZ};0<{n51Fexg4a^@r`dM}yg*HypgJKWR^Tqpk6| z?#^g%dOSJT#^T&{`*<{+wQhl>?&h#JoyKiA9A7^iZ?3J|4bgfL>^k2Y?)2kpHsa-G zyol7*IJDeER+FWqadmDmiv4-ImhiN11D2zBp|$wW zZ~CdHSze;n^GdudLStBpp@BL;X@6$*Iys^XGZq`wz>EU?#CpQ z74wRDaY-@z0M?e%FoOQAw*VHW{r*@$#r;!R^kp-$g~s6NAy2?2$aK`$Q`EcM)7~DX~evw-qnnjY_ZC zV0OV)Vsi-wpNQZ_6Bnljy(w2zp>lCNCH2Qmv^G2p*T=80str?$t>=>}>Z*g1Zf(?XFgB z-(O8b^eVLD!!{wB4x(t_)8;+>$A(IM1CK(rsiC8Ta@j@zCYTnUD|!^2L(7Z#a`XGl zhaP)6O*>(DDcbn()4Y|{jT(^nSOeY|!s58zU2NsU*)-3Ty7pUFH}hVi=GytE8)58# zHuRya%7~rVx|#~dDngr&G~siB1V|hE@TMF_lw5P~BQj09lbJmHY6ET$;Lo_h4`Yw0 z#ScRWHTfowr>&ijkl~9FyxdlZTyG&X=Qs*+dCJ5{FmW7afQkp@U z|Nm(2tMcJE7nL}e2P2|;wVc5O_&GBMm-8dKr3r(}A4{AyUKpi^sO0dSrgN6~Km1!W z>_&tnc%Y?_g0QM#3chTWTLeJBh+|}pX|?EN-5*OZ0RC4zwgA6t25XRxN(g~Ph|Dnp z6AHpwK+6*xp!NWN&nn(9IDrYuD1<-)rHRiF=6NC6p0%S(Y2aMh$@T&I$e%tOxbtzd%`5E?pO{Ez~h5wguIyn+n% zkbK$S%WNtm1y-DqrGH~XWkc;8TKo?~2z6#EQ}pV{2p3jJ0r{}b`0SAa8)}>>hY_VW zQb6WyYL%^!0zyn9QcI-3f>Jx(3=qs7O&%Y{E^Uwk!aCQ6aADjuS4q#}6`M+3iww80 zLJA0PVoPEy=E#F)q=0-_Y@eh0xAS+h=6iqg%uo`2Ub7^nJZR62IbPz zMc7q9<{|H|?RFIq8d_fFfEAE=$a%5D3J47yFIy7{@P|&0cA_*RE$^wd63qEPAdbHiisB+tl&ruSOKOvH>>~;XL3=O&M#-A zFp1-VM9Vg>jyGggq3FJlFAhtV|kTHke-x7DtKoD0DNjqNsmtUjy&)3?G3j#S19 zaEqE4_Z5%;k-dIV#F~qM6&!KE3W!|HSOH@L1()-{3XW953aAi@N!qHy3M)7|53GO; zGFPmC463Og+RPgWF69ClLf$$D+*d#b((*C~tboiz&afR;KxpWA;To*USb@z57ZWQW z^Q(>(kSBGrv&RZ-s99kJR)p%>S3m~0!V1WTbpo`<3T&uxrfkLvY)E=z1!RyQSOFoX z5ve6sU_r?aE3hKui(QdHj6$^8S3roF8WFE7Rcb}%gB1|oDq{uYK{HlBKB=+6qJHv{ zp}fyAG}Dq^$7){zWlQK(j93AA&}5rogaS5L!O2E5V@GK0>%dDv``D=cT$1PI|xTB zVFgqO#UyQYk{Z%6^-E;;;*%TS(7S++7p!>V;5t3B;6^wacML}sNJi422I&!%?Ba(0 z+yZo*;v0hT$Up~N5>V}(_7_9><%M_s{hxUn!g5uB2sKUE@+<>%O5Dj{y5f2;5l#qf z_N+PTPP9tjCywOrD)>vD+l26+8Uf9oq6+~%3B9@ykeUt7;4NYJT1cT%#GxjW4=7#^ zd+-*cp+AY>_Zsl}P+z2Xp%8jDBNFW9eYUx1fnVXp1*pI@nikzf(RY0dg3 zJ4(w+p~ZsFG~j48^AVaM&2pJUMoBPREh;#FQM*7EnKZ?d+rAJ%r-5nQlIYc^U$ z@J;*tRrRe3+;k_lhL*R(sl*D_E&J{)%oYbbs<041+4=@c^=*i!HQEN<{6L1vX5tBi zL{w<-E5Zr3NHBv@`&_>_>Q3Lflbv$hpG?M+_Hq2v9fQ-S`;-1?vmYNQ{VJ&_u!Or< z0^jgJr)K-2Mg;GO_#?TMJoKjM+KgHE)MVV-+U!kd>G9I-C;NjlXJ_<4-g3vku<-WR z@*SoEZ-93T2OBy~gugrD069Hjd1EO(VjrDg%|IW`*1^(w{7bO7mYzxdt_bdA?bmHG zliuhFa#nFn=CWsFH=4`BS|m(HB^Pu|vd2`i3oxccEXUKEO!)J7m7J9fR1@ABh2iao zg-s<#SYMg{9f1tQ2r;SxGp%aVB@zd1yt8{wh#p1RyV@C~|rT zyq$)Q@0?4o>^PU$eHq0(et)~WH8?lT4uVG4oSYbJJ(b>W0_ai$ z?qo2f0gQIeb+;$|DLTl${W_FRhwTe<)KoS9LE1t6nVmzF_={ay(etENs7>ybP z)JMPC%7IvBvX}6?!@(2%y7eThI{0N4rTXCaBjB!+vJ%{Q)SqUYwX=j;H-GMB%zyRc zZZ7dqrajx5@y{bzt8AE5d-39(gW)jo*1s2pOvZ%^UIj21ZS|j`1(fnl!yj+JO_g~+ z-XHci(JA%#t7Rp~dNqzsKG7iY&{!cSMY4gnH=tixDzvBR=KV}tT8xrEBG49H5{U0| ztF26Mcl!Sm!B*WOU<&fuB*3+53VpXEZ58~hMtuSZ*iY3aIC{F2(2MCBIQYv5HtVvQ zau-({?`3<&?rfG#U3ZV9z21S|uGv3aaZ9PkrOxIX|_G-Z;4#|ucPi`!&o*GRMZ z%wK~=PO9Q=f7A%9n0eG{7fHZA72+p_Z4ZuaO&uVw8EbH=BrHv1U1<%U{BQ#{{9_2d z{L-~z<@YW4DzR``YayVMv#a;>^YBRnOvHkKTDS?mBdoaD2pQu_0j$HJ6Tt?q1D6Ul zE2>BM_Yu6(%Cxh=x~p{;{anebT%AA*ulcFabX_Cm#16DWKp^6y0v?zmhn0k66uTY~ zw8qizLMxavw7s`>98i{jZ1JU!+ge!8O}sn*CgIjKZJf~(F{ENM>M30Wu+#jr0FeE# zt%GE|5D>Py5-JJItb*T)_zhu7L!cE3Dbcf#RadFarP>bI+NG?zRo^Kx4p60Krz ztM_It<#i;t+0XnG)@@StDV!=PEMSi@eofezrJSLFE83-^D(f87ysO7Wvy=Tx+oiOv z!TEUi{r%qXj=_1$MY1g4@^9=Iq-dV+XCwX^EhWKc#~RF+Y(sN~BT?|$F4B^8YPa}_ z6r6B3lv#+@tgkKjYvn5Q%%rz9K&zQkE?`S)JIh!LZ-MXCjgpSny)f;rtNL+Rg)YNqXx{9dQSr$ z4JoPF&NkZRcV;r)LHjrQ+xcFn zyqJ@^y+R8*Q-LXC+%E>UExd;aVF}qcNmwNXcXgFm3c2lrr4yK^bXV^`i9(v)vHqF3 ziTU(N@x{=H(T|`c&NiP@@j+I|cW3 z$TqMHJ`glG(lZg}lP7ABZ^!I3D4&EZBZ{@ifX;#xk@UiOwzQa9TQ$l`n{eQRf&DLw zy*T>dj1pQ+byRzb;8ci~&niFKfQQ5BRcwLrYh20w{(Uy>8c23O5uPRjHs0HWQ=!FC zZj_leIcU{~t@M$Ak`WbZE?Tx|=ln3dDLxsda~8E927sbeRgD7&Xar3Td1y&$)fFvR zPE(yc3J=F`sz)apFym(SDcC|?N`uT?%a9>CxJTVyd(PfY5{}cbvz_3e<#y>MK zM6rvYHNF$5lA8_C0t9+(bfsCSR#7&a$TAid~uMi@Vv&yXc<1 zFfwDOS~ziV<4Nigtme$5C_8YFCI-G1{AdWwIW@XWfXDG0UD+4=*Q5)Jtg`bTsh&H* z(Fr)EGiN+Iyg>!Pk?2G-*%lJB-FV$9#R2$K2-Tyv+$E~F+u~h1!=PZRiXjBaJv<+m z%nAjamb81WZ-s)6i(T2@C3u3UZp`D6+X}@yLLg3bO6VK>Rz=`=V!4sn=SYrU3n~Z{ zWbnF#KZ?W(H^7<%B`5U*Ekr~tEm9p*bTEQXgeW8E9P^5&C?f)j6R9XbHlb6+*~B`D zD2fQQVCWB%(K)5xpf@Yf7t6O!=NB99gRpP0B}fYiV7{vs1`7nj+^shf39MXZHIJtmEuq^ zD7LZrLpyXl8f!wJzLaW7c?u+_P--n`Fr5$OGCjQSF@NYtJmpi;VREktjry}E#}jmr z;O2O{-{#Ia&QC=xBvGZa{9qJdlUb=cVYoqNw+nYz+CxaI%=vFO;apuBkmh$)6I!nw z1n4RBiXj%`)}X$t0sbp;iV5PgGs!E|oBrcaKUPIY7H+Thip#<}e4zo4)Mb$8NayY^ z1g)?W|J59`kZE&e-DLcm8bwR*d#M;{WydKeb1~BD^7~w)zHEg|Gi}+B+LJoXzdqvJ zg)i17`KL9SB!#DHOD&nwlKJLOZR<<2kPf{1dWfy?itv|Lc15RLn=PlnR&9F4Fc%FF zT&HZ`iu{in!v%xz)FuUN-Q&8Fv9vlw=#SmeGD>I&zSRK#ZHRjNX0f2!#E+37|NZ$2 zRtQeyYvunds;^$7JJ9P_Xc0NBbJqtNaJDYz5L@;fKzp>d!ZqFM{^r`K=XnACxCz_! zicGtcvNmVR`AY`1Nl-M1!d@qI=iXU39dCu%xu*ykIiL19@V_--GH-=*;9PA5Na`?i zRJUzZD(qrJ?P3c~#c!_FE|4(bCo7clW&{qSUO60h7upyW?`3{S<{m{25qcIm2 zeXId*w)FyA(TD+|a)z)Sh!y)=YmRT_=na3;w9m4(EUADi2GpOsfh9d_*;Zq-I7Om} z4VQGz_)HVtQjfQk&C4xU*eZZ^oq4L)C-gh;ra9m|g3mW(_dliEKk+F{tfn<%<6Gx& zRuC@gvo69l#>;X=F0qu~ z@%J*da?qJ8Xmvtc{Nn; zFV_pM|ABMsP3(f>T6BiIwS3Sq)kw!+B%8d^lWHa;*wK_JLL^@klMaU$s;1-eC zgS~Pi)9SRkZ?!gQBzy=Sj34lbgha*?#eK9XyGSg!;U@sPH4Bfp zk|{dDPkZJ*9CG=NAg<7ujKv;CjB4C8>$%KI!j%w}6jGgBeNKbx*yM&3OcFFMiXa%|GCC*y~}Chysk$w;F2ihX-Bo~+7i z!ll5n`+FAlG6t9oNI|X8JsCePX3M&k_3p{|(N$^-*OT$%&R;MtQ&IhaRAlJR1nk$ihHDcg1SWWs=~UHkQ90%6f9 zsrLt8Cldx{II6WL>(thh2@I*6+1!(Lo)tZrOpsN2vef`RnWe}JBi3&bn%6oRFFMiX zg7jqk5V-2|?8#N3A(S(Vp>OM!U>oUYL1p}?~zlQA%#3|MBxJ_Gu$lgYR?BtWlq zGJZVL#>`1|p?fkpGqv|*IOMwbWO>N;>dAQ4Rqk$W$27Ei_hhT{=*eUnpe#5vXWl)T zz_lV5`K^-)V=$GTXHO=Rrgu*!%vNQgp%Rje2hW~N%E3G`l8hH?Pxf9Xld^3{NnYz@ z!hoz@`}Jf3VbLjd{ZSE2La(6-lVZ5_-n+P3TTdoTTII~_^V{R;6#TD(vk5)1nJ0RBm;D0JXw{`giCCdYpKUvMfMBbvGMaF%)(DpxE4pUYcLDA%6`l)Tzhq6 zJX6kO;FC=!FQ~9Jh=!kM zpa*;g2^hRT$wmXw(uN=A*bXRB?~NBk@DtisS5s>8o^o9(>X>^OP$Ux--%G zxIkyu<*!7Sk@4A;*yOtl%V<8Y?{cH%T(2hcP+8!e zqEbdo0SP`3!S&?`bE-e|BqD13N4B+osdOnNhri zzb1N)`K(?h#_2HSb_bn-g_QQhV00$k>t4hWU$(D-{YbHct=_EP&Dlld75DbQFGX-j ztIGDIk514mt}N1vK{ZY=_B8I&nXkJ%`ScHZyGuUhXSKT!vD>*z{%MuF5Ms|hcOf&k zbC>+n!Q4XSckpHD)yu5M^N9kNYk3E9+g9&f`)-_l25&=|2>lyRNd~%wW}r44j;}Xl zAeJzH*LoB$qskmt=ZomN^#wj-sJE?UkHEJgcn!-Bj=k(}3>7H(Dw2Bmga%WA#Eg2o?5@LHCm%6diJkFv%!2v}C@ zdZ^PX#=v(PaHl83V&L!3SXCTZv-^C~>|V98A2qv+84@U|HPlD)ufta&cs0v|UC5wZ zsl$b4P+6#O7D6ddTuF+NrL{~V@a2X}wxcYH0m4UINmvUi?Alp;@&dx3JfX{nZ$z-} zVMJm$@R4u;k!YBQ&pS09)RuHLgtH1mgn>VFYBw0Zo(+NEkDQtehDk9LIF!8%HJ2uX zdAwYWwn8|wzXQQHo!SeQHK9K6le0fZ+GSg`FGjMw%7#Np$%g`g7L90U7-zRP?at7> z0^`w-FGjMP7JNCreLR}Z z2%lclFE0Y1Squ1!3OW9QxXli)^l4M0sw(${`BIT*E3aT=(hDFzQR!1NOCWW-+EA>E5XAlM;G*EK1aiy?< z4S0@u?9kI77|-)hq%{C*_c+Ud{prIEybya~ryUaXpW-j-z= zjd2>Ahp{9N)iD3|#Z6`ZJ%Zc3x#E&p;LMZWG`-2~E$~i`@aGM9y*JZhI`bD}Qip#8 zir4Zk&Je{V7%_Y|azMvRVL(V~{<04wCWAoW85gJqaZmfua!MoJiNm@^PPi2UtofP? zSi^G01(A}|ivTK{RU=K)ne9SF4GFj1$z4?N4HvkEWm60r)nNpdngd+JK3=vaMeVAA zA0YI6DNu_tX4of>48Jt0i^%DeVJ z;fC$f)86Fkz9Ac9GL!+bWmCAmF?Pdt$ut!BCvLn*fOG)>$N-WJ-gRTUWM~C%c~k7L z1YjU6nu!axYZ@XzLrjKf%?sNl17`Z*Dp~JtRkks~%UZ?S+7a93SPqc1IC?Hgiz6UO zTA^ClBEb8o0o<@%o=N}I#nB7HnQ{5V4cp~rDywzH*c2qRUtDb0%rl4Wa#g-!G0|xe zmdrzu>A(%!B~-JZDbuJswreCU8jB0IYaZm{rZP8dSKx|EW@74SU2@sY5!)4*7Sovn zz9>+1!FB}_l9Z-gzDLOF|y6J}Pifjr=&0qF&N^5S|Zl^A67ZcW?GHm~)1%<#3 z+hsW8f{0{+$OU!E`74DaXKct3+hy1k!-fOm$WU{^cA3W|;ezgN@Q}B>imrwagB!NX zGA9HHKS>t>!b30`5jbMI48PLqXcsOoJmwr?;l~Zz<#^-5gR}q28f6dJh=Iis+vOM) z1B3(C$kB6alUeYyv|{w`G}kNedS$ypm4)>>FtX3W4mOii;V4( zX(;ede{7cwpy+t{+DXxBgWJAcGPHs>cWl={xYQCm?AtXB5uhO^L(C1^B?FeRT@G(C zG)J=R zwcVRf2yxuEE3zphHGkR9DXqC-yGQE6b}`|Li0v|*aX};ywwv=OLNKW2&5sq#k5S;; zFddlSwr`hVQw$pph$BPI1>0pFw+{a2J*%w#zXp1_-Byje%y}+GK{nr5q&Ouw7o>LYVNC zckO|~4ck5Hb|!;Mtw)9m))>3(+a=S$Fi2^E!XMiu16%}bmkh1o%^lk{5H7XE4%n_~ zhyV>S8DehOE*UV>2bZcOm8_Rfl~SMg)+tA9mt#3V(&Fg3BrT4Bb<&!5BTQ!Li0zWc zvE%!vE{M(gwI@(=FJh?6_^&&nFGEk zP;|j|1rm~!7J9jojOe&r>9%i|3`)X)kktHTZ+ej=Oxklili{chw#%e2?GD?P3v_{J zpaFp$_w6#AaY4jmj-?Rd4CKl}ha0xbuqlQO2gH$~=7Q}qk4wUZ*S=j$S`itl;lqyD zF3X${B>W^@1ON}))QG@w-!8+ito9@It;d`%9=6Nz#)St-!|cIfxQOA1?Q)EY0m1=m zLcR>Ga~d~nmy=I`eY+gJ+5-i8q2Xke6UDeo;a9s*|`$>e5Z?V0!j~_j9QY@`Snmh1b0BdLq!2$p#b4f)4~`1HSvxH+#N&{LRk_-69E}DbV3oJ zwI#{jxvXgT^S)KqqpMfE)fF@X{qKtB2&y^eVvwYY@#i~yU`t;usB~?IntidhsdRUd znru>=J%;ovd394gsKL7%VR8a;Kp_?d(uXO81e7YEYt2ruvBz}OFK0!x$}Km-q!#kM zw^hc)5WAz});SHivWWVqy_ ziln0P<<%H0_$E!K*dOxgr5KjQDkg{=J$fm2YNahqmso6hdyvRpu3UoXK(qHY+)eH< zklj$9y-hvr_a-BBb9{G#Zh0pKNcnv#c;@F{&0D?drQjaCupG#=(2z1 zJ6!e>?DlQj+rtZ~jN?7@+vH_-4=2^vDA+BVNp-B4eZ0Gx9Af8*OXi^9{C zuKEmnQ+o2!GM%!8CtBmw{f|p23+(Z)?yW&8BVkZ!%Th)0^G1Gwh}hv}Zfp z!+v`cT?;W854&6a?LNA|X|#EvyV{CjUs|?}`qLS@LH&H(c=>{+^}$!6mrA{bqN;I# z&U5|2`**<`Sgx?{GMQVNFU}$|ho6Ic3Z;&W-Gi`_F>Pi2y;&pxx2+%02owi`$f zw{2}1?N(=l&bM&NoIzcUt;45x!F^s9i;4iUnjy2=m$SqrR!bjuh??}-Y+)gB*nU#X z9c7B=RN&oW<&MLUwFM)&T83Vo3A^k-Bl@Tq)3e*u;Fo=C2gJ8J{F-Wq{9%U}N{2-@ z=;j5_%^=+OGhy2>TrpynJ2}77A=iF@KwO-v=@f{`<1Qvho^=ky9D&P$xZlp2nQq56 z5Yzo)E>V{V-zDlw+#x_9CP%u-4A~fn@xv~Gm<;O}h?&QqQzq;ch&yfDKum4ep#ejl zk|sf^1#8M*nf3Zt#KjR9GW#`F#N=_8Kun%>4#XUR=adzDRm4Z;5{QZLrDm*OqRwu; z<5#a10t906{=KB{u#SP4dHgwL!d`*+h;<;=b#*SW8LLBT)SB2Fpdv2b zSCQGTF%Xl-T>>$A);SP!1fEk?>=lTQ+6H2s5xW%0L?2Q!HpJ!tftYM=mKm}!5aWkk z0x=oZF%UD4Kc`ICD-a*OXn`23Lu$dgK>YPx@IV=hv0K&PHbLot;=}9+JWwp5k=((4 zywYwnGKtir?+iiZIc3>{vP5ifV@^eu;IRyN(+ARQADmF+7VCE2yLRD=yFhXwyMv)) zKvov3uV>|}G4qr{5@55IhofL7kefWG4E!yu0_+z?I0N6XT;ZdLhVS+(nX9NHw#-f7 z=k`ESW<9g*b34PhiB{UJ7I5{u1x`G!7{j>y%?X(IpX6j1swDCgzfA6Nri=HQ2-CdR zlU!+jo&@Q_5@q;}eltV_gU^wGrorY&(ClIhP~s~`f+qx&XJVb!RAOBo??zg zs`ZU$id|vwtq5ME2q3~Pa>>V-EmDal)P>_hzNcHkn+$6Hr+bt;15r%wu4}XrMeuHF9+pO zwmi@_Hb-PPh|qBOCXUOcauz|o6L7gyhWh}fz4Lwk=;G(WuJgU&PCve4WAF0rcwuAL zx$#zi$i79gyQPZ4zq<$rGm^&<-IJa6tUsELC*4u+Tz^Vly?Ey$8R0LR8-LM6V=SWc zKK5-?L7g_LA+Z}TW#c4|Xcb-ZhG~r=Z!C?U%X``lwQf7+8 z=FJqb&6_C_n>SO$HgBd#Z9ajet=WZbWVBbo{?5LL`49dkT$dSI`3<6Y`EC@aW`o%U z&g}L*&KM>sLwBPCO_sWw!`^fnUkwN1E3-qHvJc_|WL3fY>x=M$>|^(%x0J>cMf^T= z1={y{9=YAx?e{&fwqor^t;=zLn{1i=|99MnA49#GU}%bvE*e`s5&rig*w4==-a~zW zb2y!UBqp9jW;CIobqDP4j(29;JF{*knrYmDm9@sfsLDV1N|dKA0hp|mb_CIs8{lv+ zJS$G~QkMo%ipE}%=?=5Vbomw2eTXdk;>TX8d8|M=^CPK}8;qaksT)6^mnnq) ziitk@eAN-iJ{-)ic?1&p$(+nQ;OI*}Zytd}IP!#f1nlYL^X3tVtC1(Zmy;{rD!}Y2 zF@22e$jBY+`z6%WSh>Vv(bh{cwnD`t0lC3>v)F_U_o z50PRnE@T*{h`{f1m<>b5Yd+{q$PdhnT3_fQH@>)U&oz^s&6W2(bKCd- z@^3H0k6asn=^9vrCOQ!-UV`YUL(z%!>jm^RL0{j7z8-o3pkFSgOE+uT%FX_(qsHRW zuI8TQcsHP9my09(;=RyB`}oj9Z;zsQ4@BpRZ_nr7UIlxZq>X-wmV>h202>urA-oURmu%o5irx^ruzk~-y zKUi>o81|w`^tz|VlXJaUyfmFn(7~F?5~|=||KROw`(Ypc4*mb_uYVAYzc(NME+p;v z0PI79bx-v+pV*#^x9cDOez!V1yQBQlToQ1KY5KUoJ~W>!DAkUr(h^Ab&+RvbD3BKA##ns@_M_?J+tSvPZ3d zf&arP<`2&+gt-^?bato0Y%a&}8c5%aDKV#yTnDnymCFAHXddgIA&;j4FAaQX8pK!E z_5pO3v(Zqr17tPn_CmNF_TD$_ZT8QOhg-qSIdl!|!hVjwB#Y@%7ILR6?ngU0 z#^)Xw4N?zGMVx&s6_~^v7pMLHR@@wnW=Z4V!lA3sr_nDu#c!_T40LT7mO6`9?~fXX z4ix<4cKK>F1p@?-hIBkA*)FJut8lOVy7gX#}0oyh*GG8YsF z^G{%p;w*Oz#X2Ms%+fZBq=Tmp?ncRCAG@^*{pK2ks0iC)4(?74z;67xX(!)yfMPn? zfB?h6boTPp&0g{HXeCOeF@$_~i_2&z7FHYWwP>{du)jHruYf&h)5F$aHW-f%LaKq( zMbfYR)DBdxb|36Io-)n(B{I-F{4b}GIpvRy7B2+)Yl+0~f&<4_Xs2Fi@T+D6iN2B!N%oRwqiJuP#(4BAKiKEO(Yt9#D8#gXQ$& zS$51_DNRXRoKGp9-ApGj)8OEt&4KQ3Hm+HH=hrRPf7Th0-Wqk2E9{5 zw8aI*n5=%mSuL%X_VaN{$ap9$rlvaU4Mu4`$%}`GUSm`|fJpShJWV3Vl$l)`+8@FygeY)3Qxh4MG7k0zj;5rt58K0W=C+B;q(qlH* zvb%1g*7ub*3!p-8?LfSD`LD=-5!zzeY`u`MCi^SwK+KNa%WTbm*ho8UBn~_5o$3## z^$+_y!qpW#Hj5FPyf0x-%10R+C9CcZ&Q|!|Z?d!XMWk4|W+Ps1X4_gDhnAbD=U7Mo zd{&{Ea6N{KGd@VE*M#tv2>@K$p$Fk3jSyBmMfo>P z!bu2^=sgvL%wR;s?6oUJwT1K3K?H|VEPksqwCRQrA=0Yx?1g7QqT+ZC3Kuns8j7mm_XR-AbD#3gLSLyN~;g;7d zNWuFXpxIW)6%81nx89wQi-SKP-pS^)i~a5nQF*B!OYGl8pxJH8Nldr}%+i2@#8ZuC*lG#-{ea;yr{^dN= zl6Mh!Bwkue-~L+XYXu!IKix1JRHh2S0qyq3I%>HvLT3?G-m)k0bNE;UuduRH!Pl&W z6M#vbVt>;jXAoc&e@#@qvhKaAQphRTLKe>=vgsW?maTPmvTgC~sq}syKn#7KNg4T4 zLQK{)jBgy}M@uI5YA*P@R#@r4>l)TP!aFU(NQM+;cu``@d@(^=X~Q!Sf5WyUprJL% zAnkkFx@YaX%RV_W2R6Lv8+a6|O%0tDXG{yvr3ppnAi3-8nGZeobjye7H0^}pMc2lo zY2NBJO;$H*K;mN!c%#9Yt@NU8<+)qqy=%B^0tt{d_Tf!Aj40FQo;L43u}9NA;)|GmwE?#W@MqlM zhp|W0;)fv=%pMJK1HKr6aa%e`fn0CFr6j{BX`L%J`d+3{YDR=s9~;O=8(_Npv4{>r zuVF2eB!mqH@)&soVxb)Vu-hn2`zDa2ketwnSW7Mt(+2k7lM&o(4inC;zXKeDM3)Z7 zCwN_7^&icBRZf$0QHj@i%(mon?f}5gnK8JWpL|cMH3pYImN;u%(vlZ;>LES3YwDE# zZ_Th95t876CfF^1EC^pZ4!&%aTLeJBh-0MfsqP?^3>`o$=B;BYn}XB8_`vU)!5TD^ z5<*}RB6E1e1iZ2qP;lvs68t@@c*EcXCMcs2LNUppNCDk{&Z4)~7y7-B0x}0SuKObe zglb#E8G{s*7s;JL05USmkGi|IK?(>(EiVO9z?GPa#!8NQxWNfzdSHVTkPqv4@k0u1 zM!?JdNKmv@=e1=hj>5qPWU^LB0r{{_cJ@et4YhM<@!!~)SrPK>%*Y_heKR&l0U@Fj zpgmGxLya@#FrxHE3dp>JAO(b&Mx>TVfd!>@x)~stJ(`@{3yvY&SRn=E!(1D}g>lnd zB|VD^Qa~osDs?T+NC6=!wj{=4jyz~a3dkolHdxe8USg5ABNobWK?*2aLZ4zSxj;-C z*t0?k@>eg}V$61vfJ^t79|ps@kgnyjK?*QU&P63&=P}!o&((_*khfv8BL(b{0{oa6 zDagr}G+r2`hp6O!sL`+lp8y-AAm>8xKqFFsA8Y9*M6-*#7~l^qkOFc^d7QtSWn5=R zO{X1Jz{RIRtrr0+pto2Ty9q3Uq%Bs!*Z^vq_*W(?ZX}>m0voKLlPMy`dkTzL0Tlur z#D0H+&8`Ca9yaUJI;?<+zS@2so!XkeYRSDl-~}p>$y%+%lMid^hju+!0U4R^J{3KC zI9@IuRzMiG!V1WTb$a0%tXOzKz__cxW(0~AWbXyXjTPMn-N3sF$ox|KpiqSsRzRNA z$<7`tu%Tv!6<87S#R|wEURVJkq7$G!R$xPoGi7s|YeO={3Jm3Yku6+0BQgoF*;PP3 ztP!atR$xKN4lA%C6wDqCs<6Te$c(v0q|`dd+I2q380Hdc?*W-E*z77GAFhlQkO$3J z0r{lH1|_{h7$>RxA_3lyVY91%vL$pX#xj^ZIHxdSv#TIKf`~0M>%|<<$@ndGJASYkac;R*)49^WK$hi2^v^o z1xG4j1yl%yDuV7Spy!dXF0I1~YJdR99N5UN#UZTvYeH31Kh%U3kdaw_{Pqvx`RHoIi0`jCzcJ^3-4K*vQz>1J> zZ$<|3!U_lxodE5z0vl?aDVwnZ8BG^AjC8xwZsZ6DA{2JR)m7tqoF^y z!V1WYxkkhV<{^YyV)xLh18A_q3docyV+G_vGgd%8sj!H3h;1z)uL;lP_sp>xnM#K`y<@@PXC7f}D%0SV2A~ue}2~4V~HAUs|7V^K%b6NK2+LIgA~2gml!HrNPC%^R>d!i$ zQ{qkr(-qf)iEu(-vnRgS^cMn$53pu#+Sd@VuRXwjY6LWUiY^57B=qV+Kx%AX=O0{D zJAA$tQm7PhsLA95ikHJ4{Kc#9EF$>52E0C07{Dk*KubffI0U4I?A9O+5?Uhv&;no8 zz5p-D_olYT-xf;Uj|8!ZDuqG#OaqQqGasQD(i{>Xkx>%NR<%8nzo_M{lEM7aD-k=@ zq$zYw)ub8F+)bVZzY)P}ELqbZq`*~8>dq}{Ne)|#^1~Kyg{AK1us5B?N1+q149A;m z>lG%Ko;;>`t(vpjlo`&mOHL?25P}2hIFw)`zMP>$mu)RDQfn* zrKd02?)s0?Y<+{J`buXp*aqGFC5y^tQgG;ph(z!!!U?wc;?Q&b-l#jp7oK$clgW6} zE?<9Az7Zv+Rt3~gRVT&yo}aJ&o||+3Q&$GwXb~otRTxWYOn^@R4%ta1`oKabUH&9#%y+{ zo4sLg(%n4U-+ZDw-PuGJ2B8~)vYWd;BZNF9qbJ694#&M&n_Q!joRnOFDhVP|0cqyN zN~H=Qmbz@ZVToI`Q*v_;x!1tas`#*{9EJ+5utH0yI06JXwyZRt-Pe~pY;9VR??*$( z@Z`==<4B=N1#uPnPz5z~5Ndj*7J9v@nJ)4dH)*Bk zIipJrxRb$@1~A$=*F{&gP0`Ja?d*2M_Jz7N#i*j1|12z|{h6IZl=zDA<&RbUf?wXO zB1WSI(?0suRu056lP#8ahl3~jb?Zr1b)bbB8}-5ON5HkJR)QOk`qPZFc9u}<=C53z zu)Jw18~Cq&+|4B(%Cu)&GyXZc6R@&jQtidr6{(50{=Fz;3D(>FZG-N8mD*Si@ zZmP_Cel=u%MWkgV$a-}adJ~^$ka%dUkdsO(Z*M@qvQ%hK)6M&twzSqFf6TVxUb;^I zeMZ0sE=i1gEJQ3kMQ15RUOlVcUbFTT=(fYsMO!DhW%|SXWxZ zCqLYP4gVN|FTZrHSowVmzDg|I(pm`U);RiI zXtTx)ZSSoe2b3Wcw)oP=Z7nS4Cf=QYlW^;rHqK~?7*esBRe=@(>@+_oK-NbQAGURn zj28mJR#!qLfvLb9sv7vMh~E&VGz40qkPQiN+RSa(R z-mIm(j^sA`nV-VCO{zYHQzd=1#~x$+ny@iTIYR+gv`a-8OROlk>T!`elgiuRU)nCE zZ4J)HyYKJ!hIb6kTP~7i`IdiU#~?-Xd_NoU*JvpTK0DT6zGNGkGaM-<;I&<(CF#^| z@e?UH;ch6i5Up8XTkzM)RpyyVZ)<>7GpAg@mPAa;SPO4~@70Zxj@P{~?XKiStmIV~ z`^`GA^!R{Okhk1xIv!2+^yO6Ve0Q;GUe=7&8vk-lm2g$~Z%NQ@dVOX@uh zcr>J>W;@$xm*1JmcxSu2)!)wdIwi*?K&!hcV09!9)IhYm3cuKttp>Tz7K>zaJep0$ z!&E5!5xU@P^8$0+fdt8OA1{~AV?m`O>b0<}KHbaz2s{)&SlY|)8$mHU%2MouO*kFi zfXmuK&cA#&(%Wn>yO7ipT#|Wd>s@!A#f-sQg{|3$)&fRbOpDdlZm(eFt%Rk%lWRTin%DJlvkBE|@Qy#e=x zGdI-BV&piyn3KA_LKVI2&}#U_z_x|=5Fsof`z8siq~NZu6duKt_QBE#%oEzP2LB`q zX?Dl@XW}OMzbc4}amMi72#$wyjXYYQ6hZN!fI3Z?=U!>U(N@x{>dUJlF#J%X4(>Yz z_jSm&nhdCIDxxHOAZT!;XCllePt+ja4mA&z_oVqGWEoMcMFw;hq==*!&a8c23O5uPRj zHs0HWQ=!FCZj_leIcU{~t@M$Ak`WbJc<2`GkROIO#V5mb$fA~&W*Y>6qNAxA2M*8( znjG@blFUa}r>U+Ug@@xe)uSs7nDMg_Jml?F*`gnYGx8Wf-oV96vF>m5d^SYhC{Hh~ z+ptH@A*3Yr3ApfiyOa_HC1S*xsh2FUxoi1D4Y)6)5uO+f`{=Sq^#6%}XkgeSM&q9u z7^2ukOx1e}EdriErQU3SRw$mdnF)M z{jqRX%6N8og9?Bn@ggoHX1noP;^vRz=`=V!4sn=SYrU3n~Z{WbnF# zKZ?W(H^7<%B`5U*Ekr~tEmECabTEQXgeW8E9P^5&C?f)j6R9XbHlb6^+`EtvU+mlF zmUO|;A10%7O20vGR-iAIZ?ly0ND|nQ3jtY+04g@GvOIOGhJ;2>={YnxaChpLp1+1r4SX%$W|e?T}JcDGmjL zVjG)3v_r?Eu_grSOR1KWM?s!iP+vMOS?cE8P?bDW=wT13ez5W@$f0GrH8)d|B5GP@isiCL-ThmcaOR+;nP zZo)a1meYPR#hj)2UDbrvYX<>(3I&rS-#YA^BUaXgcQwF&MNTn6e0C;zg?iI}9O~by z=qP_VW1Zc;sXR~uzR-Y2>N3c4q;vNdf>zjx|7wm|$hN%&v9bvMO^u?Z_q|k%w6f!r zleriv4VSipgwHkV%T~xV)0PdXJ*m_D>m#0C_+l%`!X*E+Mw6uQRBfpxQ(7|L{7Gf! z*e1i*Lu`dtgulGvD>~)cY&iwCYSU{NnCpMk7%muur#2~I>mJvYjHT6K0QBcu4e;NF zsJCwx3#v{07#Z^4pRZtr)U7lPa(Iw+D2M+Q)mN|49q9Edvd!ZqFM{^r`K6LjW}o3LH4$h12tYjdWYzhqFG1Vw|ecF@AGCv=P7SvVbUh1ue# zh%Q9o|E&p=c`KX)=V~iJQiqwNx@njmQ7vx4srb#cY7y~hZD;V46-s$C0*6tr9FDsS zZ48U|GCw49k0J=u*t^x6^}46WlXJaUyo4h1>mR&bv*jqFo(CUmz?*Hoz*aP3K&YG{ zYzJb+{??k~TRD2epET{WtSw6_;EDnD=Wbw0&sw(C*ep(wpa$8x2&uq6kbI^IZ>h&y z%I4*kD{K|Oy3QQbt3H6dh(#@*{Ncbie7+&O|0&)6iBDl-HLanKZ=J(gLAa>Tx(L^h z&ozbVtHOi-RezPP^IJy;8SF*tF{A1Bc-rr#*8~=NgkNpg@4D77UY09zi6#8o#P$lr zxoMz?V4P@Dv6}BX=N`!}U4);ifQ4BKQIVHrmyHE*g5*shvWCnE;L}Yw!#byKWw4TL z%2AlfyOO)&LWs$}wXMX8EVrJe1^VOgX#8-c0^M?OtS{S)x(=Ue!eCCSQ;l>CMzYBpJqeekneN|{T&rV~Nu;w?RfJ693T_dJJ=iNZGObPv zd)uS|?jd+Ee!wHZ5z&?PVeQHA(WdMovEYj1(voK-R0re~CR`aro1rJcw;oH;qn)<|+)YcFvxx--6WtJd|wzpijRt$dbcK~`z% zj)66)z%ncL84w-CYJHTIE!X9in@fP!2j`l{Bi2`81G|~1=!k|!iOGLH z$LgBrMVN)3_RM`av{Q!{HScc-z|-k^PZvLkI(%@MFClWD-A6{F(u z?#TqMMHdLtlL=!m)tqNfCX=RjPbSP(Wuc*ZG8qq^J(-k)d1NFRFZK@X+mlJzHl!r4 zo=g~!wQIkgOdu>erDit(laoUCWWuBvZoPZ5Bkgh}6Q(BNC*wsYx?G+I{dzKf2yF76J(-Lodau~GC*#Sg zye3=tq7gid^KkP9}`O zRCAs^nM|7AJ()0Dm4${%NHQKgdon2pOh%#`uR|vq887w@?Awz`*{-uE69#1M+OH=Q z2#Zcx1dPyqmoPBHk**K)?#Vi}^<)A=DrYwLWF4+2qaMU=u zY;`U@*+)G0F3JSS^kgqJ?9dF*ky#47FyZR$m!zBX?#Ot-i4GT}Bjbm_HJ|4ynT%wB zj*KU(@|kc6u-wLYF15%Qm{0yIvtpnBd^<83*M{`x)sgYzktSwNstetb$(b>BWIs{i zS{%u)!7Sh^`!Tz4?bVI(Y^z+oh}O0X%oM5w!{p+vUYcQ6*nDxyAcCs({bXXnx zyAAs*_+-<`i!E#oiVXCC&#PWanI`vokuATk1i#d@KSa(_7Uf9BB%r`{kUZ=q5UnN) z8|%@Riynjr;^V$A7fE7?eeQX`-wS`dX{o{pw?U(p=uoTTtlk(qLzVg%JQ6?TtyIpI zn0^dC>6v-Tl?vUNXnkCuv+MF#qRYszyAlJefHliaLj>>jd+k8dv@*Tcb!_$C>-m-AZ%tQRu`gr5wx$R_+3mQ-j{r?xKZX ziQq-rDaUZKHyQL!4f`iMRwUD~Jw4l-^tZaB-no9|BqD13N4B+osdOnNhrizb1N)`K(?h#_2HSb_X4*g0%C*V04B%oy(i; zYhXVzi~%}vvfs_wMJHT65B7J*JG1Sb8UE!oUW8N(zZAhCtt#7-J~~0OxUxtuh9VsL zYELy`wYzk5?!sIHtrUjDr=EQJ2fcj?)b5f`nRAzUdxDjJYTSj0-OgR|PpjO85PSBy z3z@l{yX2p)-(9{ey?UATcs?WGaxL#bZrkdeYu}Bt&){t+6QO_ODak-DA{pqKjTmL1 zWmGR`8LhR5?n_^2-9j@_8xF_U8!{0)E^m?QmjiJA{8j|7Vfn$am;H@_L(cm=W?jlR zSCf1LYlU#QKxE)ek`>{b4Y-44t}?rA!BPE$C%Yn+ot{DVW$P+j!b27E9B3hJAuGdo zBG_QrsV;3H9{5Q+h;R&IfIo;patk*r1%pz%t7Wy4pi#;EYl|@O#}T}i<*2e=QTM~v zxRRvpVZPxb34EskcX~1`1`b~V%C4?ur|4j1+QQmHAx`lG{%7PFs*xiJ;lo$va|s~7 z6M^I&a+7M)_Oq?8IG#@`j;l8Iql#lOMd5-f!uYdW-vREDZbZ}HB-a41_R_7$37CW+*LdEX)M!i>7TPETTI z4zsulU@+S1Kh>RGNY^A*nqEc;f9lfG^7>GxCKFSV{@2UHO0}whrDdXoKXYknd6vaQ zRlULD={YpDJON3vqI*ERL@G~t7K#;XSafY?bp%piQb=3AQZEvhmxv6)NYOya(Z-d+ z29iSwP`{*8*zZTOt)2cnUm8j5$D44R<;AM0=WSW0(HN((c^FIbP!02M<5WfiPuNtY z*%E={`hSn$HgB%DWF|`=Y#C=f_dtfkc*Pnq!@-|7;Pu{2i|Nc?j7c5-5hz{-?XrLMgI6fbfcU&C^^)(l;hUJV4BBIN- zL;%%#VzOxBE)V#I3tYppDTa;eFak@>0j^;mFI!XSWu1Jq>L?I=+Xb-Um=l78pQMWb z;ch(|0AoD`FsiQwv;3m(OZDz05*)tn*f7(uSIZhOvCVM72EOgsCNqqR0m7+aW9YfH z$qa$ARR!?{c}1Z~F_O@fwuw#+GRvM2CVb@`L^v8Tr0+aBV7v6RH~G5n92;XY6E=m{ zf$frU6!<4@yhwlyi0zVbS^h#}yJQRnZ+TN(znQpTyN0>v4Y8q;F1##E!%QaSi0zWc zGktKCEVuu_#8e<`m&^uL&BDHXS#$1)?UF&T2?{$M9fBxF&n0Pb1SCmIo^UKwBA~K` z>A9d1WR~ED?UKi_)csQzM=uO#M$E|#+vR2|t938w6y}EQnt84Z+a)90B{4T_myD6w zvP`4y*shVZXe=(+u6dA)vP?;)1#Z|bc|6z3vn`pqVY>p;Vh`qkFA5Z0uw8+KB&DHx zp_b&NfNKo765Oy|kxe0~`OAJzY0VAW?OX(G7mvw@c3sEmhV3$(aX~~<W^@1PBi)==gQRb{T%9 z)lvR>df+1!FB}_ zmwrlf!*)eBg{0;$`#GgGH*ELFMZk9Pm~8TjwA&8duw8~TE{Fuec5@~~2*!OEup_q1 zuqi?}4u~T|%>~#^Ms4j#gx9QW;V@(~Yc z@s+nOO9tWD4ck3>$(+eRX2RAM`o`GpOa?MeRe4KW$i*buv&$w0_w`ruL(mm}xY4wKnfw!>~`GLS*A2@1`yLlEWYxg;%) zK-u3VHD(cxU9e0tp5rDbw|%>0P%L$!!u6GUAbA+9+!5R5IG5GBVr(L{Zw=TknF|}@ z^(o8^+a=><+LrQsQL$YzJx6SpJg(%<9osdM7LCOP+cghzaZ{NawkvSOB{K;u;}tne zz>Ms@64uF#hdwxBy8_c<59WX`3KU(iU4g`2RCe&3^XGD-R=Z@GeFK;1C_{zJe6g-5ZNqK2tJXz&LG43`v?V-CQ zT5#Pkz5*yc!7a)U5&T8Pb2lUdX5FJYf*58XD>D3L zP^vW~eN6=KY{Cx&QMLfSRXWK;CDEiNJCt~O;Z^>Um)9Z&BY<}_;O-!r5aLY`O&Hpj zD4_V#Ghrlm=dz;V&-+$ckFH+1v?9>|u6T~1nqw{oSw(Dmr8`(kZV z>Fy#m*`zjmyd)#hyBlG00&+kh76sBrz-VI%Z0WUTC)n6yI_j6RB3k8^Yg|$bNjfJ| z2l?KGY6)^g>9uB+uB(&t$i*Jpv1Jj(m+w;C6Ht~2zUz}~3#AG*PXHMvwb+_A%J@oK zUhSwGj0jM=yDNZ839%)B^ozLSD-<33U-jv)sPXBl8*AsQFZiVIr@2?243}I~5gnN? zUNl0VN0@;h^68}*mc=S2h#Wn7DRye5*HbUC*z)!uk-c2G1ku5K;>$pGLw)u(^|0TY zjL^;T-3hwoofIHku&{p_3!eG;S3?h8SPtabP^bE{-m2#ir%#`Ltzn3c=Md>d?XU?i zR&deZo(_iN5gDX>u;!DWy}rW-A%z6t6T9G;Vw9cjEu@bw`&YihWpBA|-?qIyypYN` z-b24lUS{`jQhkkr-Ljcf$1d5&yQ|3|cAmJD&MXBn|0KF7JWc7U&#*V8Coe72DO-4= zHBRkcJBh(0l2hdwoSF2t2K~`&+GhVIQ{_Fq**!bMZu&rbwzEC#w>QV5*k=7sKRIkYYm5yS!f&2`a=VPE=^@2EeWp^Mqi$BmaSXxborUHbcX!5dhvuFX{C$ZGg(h%lB>@?vz6Q}V{f(_NDsGdZ5i!WW?<^#Y+Y;T zt{1Pur+2}9UKWdr0J54Pv)Y%l#3fcsA9skF^x14-A#vD#Qp_D?isw||-D2gA!;rNF zBe@0$y)N6$h(0O?UwpCAFZ12IS7av<)vvu38-u?@s@D~LI7>Jq2pHn966^J`++dv$$9(L&akh7(EG(aFOj=+%F zudyN~kGlk7@~m?p<_J8etk|m}K4Ke)Wky^*gcbmXOFSn!3=oLP``0oM!1E`gW~>llcc$DdOs>=lTQS_fjC4ZBphSRGOe);Z4k$j%j=%%O5*o=J?8htZHY1ZrP5RCdRGw3oJt#}W1~=wZWC_VCWc-mBs4oS@~+rJf)BX*sSH@D3}T4CeJAYe+#Ps`-KtC zz&9*c_;m5xcki6cd(=TZf}h(1NtyM`w$JSh<0e{Zw_3o}?-n@mxMB?B@;4`7-hYyl zVW^VGPy8~u$C)nPYa&eZUQcqR`FRqg3rm#YH~P&G5ez;@0-6S!BSEu^EkKE{90{Hf zY>ot>2{=bmY6qJmX?Th`7O0eO@KSC6`c?!lQUnlT7rEqP%oeFc6Y9coA>Y%j;7wAx zz(3ui+!=^sa(7i5mARc>x~SYqE=T17+9`IvH<`Ap@5l-s@x)yQ$>pf*qL}XGsGMzZ z4HwT8kt=;k;d``E8AYlk9G??KT+V})ayMxf?!s~69hskAhUdvyju(%O@7+#btg57P zIlM&wchpi{4$7B<@+ey#Xd9a&vKvHbID8YwqaKu z$ZbBcwt3^o`Ccb8MPl=2irD7O6p787DPo&9Q=~SZz|z+2!ZtG6t6+a;U&Q;h+Y`yOWulT@a=TQ8!gE?o@=;w!ULm$DDy17uaf`|FGFg6w1W zqt}VX6Gi+!bOqY?c^*2cbG+{%deO&ui`CWP4doW$g{DW zj`@!OnJUL_hk_bbC2k^e*(ucN`g)#%6SW>C`|%_AlEByz%9yG0$gA=aYey6lQ}1-m z-j6L~;EdWGHnil^ga0frc+{FfQd)_+J6h<@VO}{^g^((0?+*p$UPYO+uLni;UZvH9 z%l0^upA|f-4FkWsCu;CU%B0CeNhvF`c%z>NaZ{+^##4hgQRa*qr1iJR-c1F2F6tt5 z6$~2GC9$5858h0fG-@z!9YhvymJCyH?c$brAO+7FrGn^#w@~Jc8l?S>$lfiAc~qBu zAru!=AQOcDLP|A~?pG+6Miu70+5s`quPB(*2s^me@!S9%jgQMM!d|gxVN@b%F_RL5 zs|p6^twl0J_(BBO(k8gm?DmGke9a>?WE{2WLjft85=4%f9%rS85(B zP|o~Fs^kXar+Mne&*udNp}%6Hk3L^@1hNkY^J^Y~WDqhZGY>fWlFyq*Ac2WIVIBc{ zI{CbL1mZO0iSOm)inj_dyGl$SBRev32m5{rH8oZ)u~@YAk_`EV0JM-%wV0mUgtxkn2QS;hAATOyBuc2knx%idKfPoGSYFF4@;()j~gL| zH6o$At4j#Yf+1AQ$w6m%6MS8`_gc#O*WK@J{ZcZ*TBJtbPju)X$J9=Ho^vkR27NlC`5ljpY{{`a9Ra)mg6Wof`Jj zU3P=?n`!**tKKtb+fUd*`}hgFt+@Yge#M=k&ov* zt(kf9R1pd7HYy^YCCnIAL?5tK5q*HQIY$-YG-{(Fl5p+f0ZH?=D$1#|998ool38q2 zL_TY+BKm->is%C@LyjuKS=>fNB;Q-ADF3*vit>-M992^hNo+PMBA>NZ5q-c`Mf3rd zAx9PABEm*Rq};GnQT}mT73CjiIVx22%?LEHh-->zLOQArt&#_bvMN56bNBt##$uV2 zs0}T1XwnEb`8-K=EXU7sN#40sdM4+SJG0n?rLkO6RUUd7EN5k4cf1`Za8R{@`p|cd z!JeW@{OYmzE5|NZiF7YCFZ;_?Vt<&Oca6&T%T?lVfV{0uJubb+*jUJuy38iOk`z80$>g=u}UcVnW#R{0B-Tg%F%ebnMJ+6 zvYCrSh`0h!$^|Al8jzi(Br=ueBompF10u@QYMnA2OMr-~x&v}loE?y(A`_9T15yz= zW;%9a=zHf(C(!Y6K!zo0ICm2jMJw{k$sxvYBs@gL({UFSPe*1|Zn27^>l&*BB1Bvc zu}Y-m9;-x3GLgB(DzbKoD=CL4!w?{%@}%M(tJt~7MC9s#jAOeM3(Um4Viixv#{n6Z z0>&x|+quUojs&qNhnUCHaTgU&M`l%Sv5KSX8mj~%L|hKBN~Gi-t3*mNk-5bxvWV{- zs~7@AR1UFgmiPshgrO|j||4X89fZWV+Ft$u(-I80!UiVhK47-G5P-V|hJGE4il z22=_dDz-C&yC=jni+Vm3xm<`32b!Hsj2~`5rC`E|k(xN_h3_l6FvPr_mz|kJe23OxX*ixv(OVfSa09%utfX`O-l#i$>rQXd V-|F@!lkudTFNwbIdEVac+1c5DS1aw6gqF+0WP&lYSoKct>NOms(MU+hMh~>B6?!(kGrg;p zdb@}2o|Tq7j1vb4u@g?5R^2@A_}OF8kc_XFk6N-U?T3jfaDc^WAg3 z$+SH_HSJH%^=53pAIoC1m6X)I1dFTLBk+j`US~I?j;phSQS8stm4v7L>#!8XyIKqX z{Q94KhUFz{y`aR)0yI{4h2o?4bX|PhWXHz`8*q=mKJLqm?tW}@@g>}kNhmAk8G30+ zG5Y~lm(nnT{;jnD7N-6FX52WmtF;HB6DSZheH;JaB?upGgs|f2_NlmeeEU=q z!XtW52O%>UsZ5?oG4hZid@_jO&TbFm=3U#vB(&*<5FyfP2vM~R1`fa6fLpvhH*Vgt zxfz0Mq>>+V7(B z76hf!1E8qYsQ3jj6Ss#~~8D)bt!M}^(YDNaJV~%<`>-K7HT?zY8(C&59hP5|~ z7vqJyU;n_Zwx~g&T6l*=7&*}!JsvNl|DV`0U)YW%kg*2OMsTM=wB41e?fYwJh+ctq zeAp&L(?JvseA>LH|Jd-RZ{Sg=HZ^o~P%hgDzy#C6d+}9n;-CHCqn1s4FkXr_K0Gg$ zjr>O&@Wv1p$Mx=FD<96LiBC0KUJ_%wa@K|;O&75y;sfwZBaBVchCYmkpq4%iA=Fgm z5J==FZWS8#nLq-hjeU4i4kJpgx%aYMbw!`qC;VCiZVlkixWNx&MySOPLnxR%nuPd; z2wrI`M6S0Gnrj>~xHS#K&}qFTcK=@XB6cY7kp{GFc@vv`YoR0|Y*5>LcnL+`fEZMh zcAF-Uq$pv8M#Nfjfg-MnmybtqgV_qQev@4#;#z+PR0_!_r5U98|1EQ0l@G_cDCdou zo>S(D9vq8)*b*y!asnHX~%4SDf2=;*4$(1DRhse6jFvgA|Y_b+WTZ3T&vI zwLuE32%*kQ(c=?ox*-T;aIirN$cJ?Tv_}eTsBxxjZgOo%rbvOIcn>5}8>GNu#+FEd z1*LYn8Ni4XSP??@I3FR}AO&Q`TpJRo&nL7|JE33auMS06Aq8Z7u_ZAUbL2rYQb0bb zacgaV;(`>AF)MElz#yCrQb2|fmn^L%7bXmt`~+-}f=<@ea|-fC3NX#NAq99i&PC^q z6yzM2JlP2;$W6&w))Poe{TvSC0@;TA95f8p}SjQh& zAO)SafD{-tywbMA3b^=G#2VWMl?MjAu!4>QRzT!p+%eH*Y@pzB9#}yqQ$)U}z=#!4 zAvo-Bu-R2`bRJj%8Dy?l0U4A_PwHI-T+~-JH7+#;nTNayeQyUH@K6X50TtP=wX40xQnQYXYIH%G{gTObcz`DdMb` zh!xIhg%w!Ml)Nrr+*M#hjWgviqO2V&AbeTvDj*-$__HNeU_r?aE3hI|H&#F<0XA3x z`LMbXG2t44fCVxUJ7B>ID6Tt(k zT?M&Hs$vEBF*8<>ldm5u$Ymh+LyZosb`|7Y2p(u`xAQr9VFfu29S1g8!O_ZC0d7$f z_OS#0tnjI$pR2t1?z#Gr~^pDD_KWDwQv1S_n75YY+HeqVtNHO`dHSb+`66f2-bh)bJ81~Ih~ zR#*WcrV*(nR$xKN4lA%C6wDq?U7-zDKxWJ}B9eB(OWR^Y(ei%9!3rxNyj8{u$b)9A zfP7M8gTb)^GG>kY3dj(Z_Z5%_=M*Mv_7xm6U@$!2K@BI5eZHXU*<*EP?7`-XaKn$QWDeE_-r^KBMrYo)o6XArwW>0*t89+8j z1@yfGzLe4T91(l;A^fLCK(nXlLO@SKuPy|n#`bmoo~hcH@YRq)rHDgKCLd6|9QNQZ zUV|OO?=|4{p~3)0Ap%+&dc`3iHDtF2X^_wo`G*$xiuMI~Npj`d9)HD`Iv5ESAsm}* zl1{nBf=@NzST*wznjy_{nM6iOFk4kLGZrFH9;@iBGx&T2oodn)x~6K<3~25q&w}5K z;I)>l=?_x4i59WPpc5adQpBoGQ;4b^r5L5_ewx>+IlE2APk zmtul|YDDRw%eEGnsY&qWDpE=O?4dATU#V2x0+Fdkhyv+3e@Cj=x8S%{1Q(8TjrqYh z?DJREw<>Vco!A;$-VUb{v)#|1si?q8RBu#JNv7HQ8cX%9i>Ec(2HpHXhRS9_XNgvh zeGYMq;8%qcY>|WpqxRW;Z`7T>Wji}Yxj&hVC+*|-r`ranPxmMN(MCT$So&2`QD6yo zu>`*1!Cs#P=YzLL{E^&B9(q%BZN{v7YBKI^ZuF+J^mys^Q~kl2$7l3F-g0leYuDRu z<~u=EgYqun6$Gb=@OMWrxuMxQR62Zr5f)a{6Rr=ELl&0S7t@pY<6ZF12ySQX*KIPB z-snkkR&h)Q*}J|64YF%B5)QCb$psyg>@k(>0*omU%klIk6aGA2C1)iA)tpY#D17Pw zERE5fDfvAr&GdXz?y3OgcvAbyr>_@egEg?Ga=HCcxZho+$lhmz4Rn!7Z`hl3Hy-bA zpj%D0H#Yj{Vv>cs$MR(4>O6TBrzqvftn8ZVt{)vvZ!&`%EVWn@?w#myk50bV_uo0e3Q( z(f~%=XS-XI{uCW#-+moRr^EL7Icln!{~+id^=EbtQQ|A!ov~NdFZkunDq>XWhQ{=? zkA97n1F_6xFX4BGgD3lS>q!>E_~W6K>Vw~pfV)o0N^s**f0}XD&Jt?f{FUnyHneLh z8~Cq&+|4B(%Cu)&Gkzz6)yjrRwHGhkJ{S%YZ~c2w$Ye}it#ui|V6@qPnifzh?}Q&~ z!1a}RKi(hqH_&nQ_^V|l$a*!7O+L{e@z7WyCsnolwg&VoONI6{-MpV^ON&wR$80NZ z-A?b`yZqk>HtQAvQ;^T6P*Ui-C26bRUp49zK)`;gHo>W3C{3UC7ZGgKWi{n4t~TDw z_F&l^q9y@dR#My&YE=kW1`cEMSeH29$D43p0Iq1tBn6KbkWd%5$;_^iX7ic92Hjwh zKWYS4%sgtf9TKolh4^t{+k>N9QwPXv#u}U|2}{#hS6c40i{NiHVBJ54;L9&vD^`Br zg0B*bGSXTI=;Z9`{ro(9Qn81Sz(jnfsHqI#+ro;QjgT>}6u>$hIuUH(s=Ck$m4bgC z!K_9sN1R_2v;DIS}SV>5#j@1q+w8zo!Kr5Itw7s`>98i{j zZ1JUEwzaUFn|M$DO~S2f+Bl;nVo1eSL_viUrHcS|nx7LO>!XMd**ZwZ3jtxPE1{CW zl&(T>bNKCu-w>uW1X`hx5mITtHQ=CNFYI8oM=ubfaDMApHoNvb9}Bt{Js9-yk&-`#*mLP~13y@hu9 zotccc(FTwHR=(FMIW7TOebtfNUjxzZ3jAVIwi@IN=MXdVOf3pKET89VEjPoeE?s-$Ly$l9zM{7)8P%ctS#jH zg}!PDF3G$!c7|%~QFu6huqKUlSCl&YhY{T4tus`4;S|emsqv_W{!zr=E$(Eu{G=%U z=P|7>G@u(!R_kOl_8oaKdCTO?G`>>$>m;;}67Fc~Ao6W>N}IG_1}|N`B;L35m*l@l z_>qLJCZOaOa7uZbSM+<5O%?7@U6nLGhu0I!&49UTMS8R?@5H!aRS!ojsd?2v@Wic5?ADmG_ ztErA^k47oGtnwobcqp7+#TFR9#+BUX-)GaVfn@g+;b|gZg8%vSGq2VJTFlQ>j zOtxjGE%a->75ft*RFB?r=aw6=>g~38m(DOKm=I3C&{+rs$vr$D7t9I;otCtFu5X2c zj*Io_?-D#gR5#{%sp+y*0dEh1IMFGgZ}3|cf#ZqgMq-~MIetB;AW)FO>k|Gb5-Z#Q zYZ8>4)DN@}5wWyLb#l?62tF2~jG%MOE1sf^2q;dZq5#>1P8DYp+mum=lIsf61w((B zjLs?j2EAEAr&**|~TB($IdK)h6QQs-nLUd3+qLy^{l z!+^YWmbrT(fzoC@pinba1ps;JbTbz%5@uw%bQlEWrIQdb!Ehii9fI`00YF}nCIWds z90KH}bXDMCP>`37MkEseIHWX1g%%;M7Lb>YoAY9Q`dFB4hje(A;!rRswz2s`J9Io6 zYeJyDl&V^JwV=LqT*{3SqeqALJ?ak~iKl!@I!x|0p;3SK)Odmp65JSX_1oMz$N8zK zg?U%$EI$wh*ko3!P8e>G+2vq~>=054-Bp?M-)_R$x-=lo@2V!WUONcTQ|J}r(w7n| zYr;Dl;J+fLm>@nole|K`;Xe-bZ&h@Zznrng!nEp>;XdDhhwC!Pv!rwP7lKyUiT`Sj zS;*Agt(%O0Q=@3Rwt?W4EWG+TpU4EZw)R(Q0X{Ie3QhQRT`PWB0z3|1>5=r2T z;7@BbNeWNZmRd5UCG*XnRCa27O5YuSEyPxMMfl4rzM@mE&6ZPOt2Vucfw}%?jp2ep zcxsaZw(fCV$yiz)20(wl*#Q4-h8ZjVL&JeZ(v0{Hq z&GD@qz2Q%q_F2}JB^7YRfckScu%u@#+iGkUr$`hwd_m`oPc`9*dc37Tx(L^h&ozbVtHOiRRex1s z#x-=x!9KJ(bu`@?Py1cPoqrMhTEl+VwTAJsT#-vG=6C$PLaiKh<_el!eg)#(G*Cn^ ziyLlvAqsaXo=Wdbf}gB_g;@$wk(XtcjRkOmR-2Ex{_?lQJBfQ zlDpzUh{?XSt;C8fw?0k_^vB?l_@PP#y5-#LT?cZqGdpI2BY zlwgTQuEC#GICxMlA+8c&3OK(1w0_e^K1eEZUJa^Can)_*8Wj)x1LxM8*agS6=nQ#l z`G8}pk&eMgHhH5b;j%Q-{do555fcS z{T>01h_0*;`&9LjrtBiI;ELnYl=Z=q7IJ3odQJ))Pz843(fHv23gj$@;Yicyt)*v_ zc=|xO45gjOJe)Z>%+^S9U9*?C*VWnm>k7xy%4b;?WR<4wXghV_WF8_nR6LSiSk}hT zSO)K^a8!v|bc#Iao36^*sc^8XdDc1k75GqvJ?v((^`}6ITB=C|hElL=faa*htW$WF!OhWIS1w*Mv)f zK%zt`iq1QSYPgJG0aO-6Jc%+S~S9PXnLEG)nJ)4|a z@16{YT=$+V57}Nl8PA%^ZVG3xbsz7ZY-JujnM?x(uRG9_30y03k>5I*Fa}fUdG=&7 zX?pi$!faI*8Y&^lc<}7Wq#Q6AiH5DviAKiD{CYAe+jaJ2!hoz@`}Jf3VbLjzfDyVU z69#5DsB(Mh*r6Gq zBeN7Z&wTbaSz*Xcf5u9MNjs5Gd*=sPCy_ogQ19q}6_;l#R{%*tm3O?C%@?r~H zgJ}492D;zpRWGGXlY6|#mfu%`UuxPPB4;U!awKCCP+&Vq9`X{1R+EK|_2`R755WEL zao-n@B(cOk_q@;Vg+Jc3RAGbz18a#6^HJOxs<=nt;rKys#c}px+jD)~GxL-y6}mIg z`nW)6*X6H7myz+=mDuFF3*{Lg@AZ4_K+?1_z1CH1_8Xs?_9y3hv%z>|e*=t6ES#yR z{HzEu>Q1VOt|_#9KMa1j0dHctshZ1KQ&S{1zYN3?~EP z3zgH@v2uw#=#|r0f}##>(@e84%+R@yogV%ke!=hkJf$n*&+EI~XgM=0NsTJ2 z3j>xiqG_YDI0WyH;OcUOIn|%_R?wlNPxUA5!DuiW^oHGYz2P>x^Yqs7XgX`%L=*8b z=)^}#iI|U|+)X8?2ED1|qJ`6^Pv2~J(Za7r@DlBmV>sEH40@-A{gWNNClt>!bxgHY-c2?}SOs9i0qj(X2 zP4pb|S-nh*(_!lhIDnKn*zC>v-JBH!qpI#rMzh~WCuSiPKRFnk;SpC)Gk{-;U|Flm z_N0$a&@8Sj(u+aK1NAxI39H?uqjQ&iuzPcM9$gvSIHDG`=iDSd_0%&zcr&QoC7&|q zE@-CS)*amEH10ygZs#ufr&aDki1F@HqXSfPMUA_VncKNb{^|PNBy0nFO;3w^(iE!u1Sdjif1d?00St%Hl+FdED zl>`lsdF$flJlBY>j&XMTnLxgu%BPaJwhNV&L!4+Z-=i(u;yVzVmR=TZ~&2Ln1|0fH6GNK zbTx#t3PXf}KXht07{0hbVC$L^1gqsN24lp4KXPg^7$(I~;86B5)Lfbj=J9eh8o>60 zgoAH5wHGXFLVe&T=Kx~Sg#@%OMzXxhhC@lohXR2Xjc8|JS+_Us&d|LA;|XarUPEhz z6==tYZE6KN6c0HxvwESiytIbkhV&m>Gc+}ugB$RLNOseLFUQ#eKoCB!Bz4jLbwG8| zu9l=GQlQfCqfLi4k&0MV=`Gwo7!GCY#J)lk%p{Q^Xo@FsnRd29(dJau3 zPe7Nfyi_7jSZbgIt4LDAqH8;=Bai}$V9Si}6=JDbY>M+CQc zbHyby3E<*oPT}(#e5V1g_hwp5XZ~VL>hO=sIISuf6&;I@I^nyK13F#`142^smwg!S zOvw?Rb%AOS_p}c!r!>-?aDiRZ8@eo!<}+xiQ62cI3s}Q)#sv{;W+c(ZVcm=HSBf(;`V+;supasg~O=7b>OCut4%z?@|7 z!7lpJ{G#tm_3i}{9KPn*Fw?LntN|PSl<#*9){F28cE6%WfP`;3w#f{mVt{aJ z*cf_lZ8AfkY*muwmJlvqJ@%w+qEmy+vL}QIUwH?SgvMgicOD(EU3%J^eBC!>V@!tH zVQiO7LxF$d#)||pD(1Y?tB8R1tgdSBeTE zWNXi+8@9`^DTWOP#F3%qg6%SoR}2>(Dv;N|I)ie;W20RdXBxvMq9eA;$w%D8?67J+h}-LPGHyJG#=E*WZvv0X9^1^(%e?UDg@0NW))D|mCq zb`69}EwRJCUDFV_AvSf6URwys6v_>=?xE*ID}Ad=IzG&}HB9e;O1%k<^U5YBs z*pTDCU4~6DY&al}3^f;Qmw8+gE-Hht!C@ObKgWH$EOSDT@RM{AAUq6FcWl={ zxYQCmV7sOv0yM;A2#1E)?MwzTV5Sc)RdHpxp*@gf6elQ#%*GA~cEoncAlL-`v0aXy zOVZ*9NRpQB{W6!PkBm20Y?lm*rS6})IC@2m>m?6^l{;d)+)QP)t{9t0?d!$M&SJY} zo;hsSi_`h<+ck3Mj_n#ri^k%D?V1PK*;M9+?Fw9R$;`{ZN}ZQ%-z{G>86t@!UpnsF z6_^%#Fb8~5py-0_3M3>cO%B#rNKH88GECriJd;6WQ%Gw5vNtV)1mfNe+da19*e;$y zcx;#9j0+-MMXW)@B;M_WUFZCj!jA|7J7T*Gn_}2-KpYurF4!*fxOKQNs6ZJiTtGNH zKSykrWljhZev&Q%fICk**vApuW%!lVyQIGLnDfqIyBu%Njqu=hCIiQ)7$6+5Mvk6a zo283dkyOP%>)dG#mJ!KLqScq85&U_@b2lXODMN0O#bbZ2OXY*4dx@%aFA=<>2|o}- z*#h`h=_C`CM3b8AFsC@^Nc|Mr{Xz+YL2-oCVaS%wL{*bOqZW3UE85%U#x8^-Cd+6o785H zPPlzkZ~TIHHNxZsI40qN5%ny+hq@ zhz)qDf{XsvbTAx`G=qHl>W}t`KQoD;b}@&eTKa$J$ZSVPT9f}t#NAq z+DQy1k(?^e;LN1AIp~jO(>D7znJVw<&F6zr8UY%_ievceB6MM;ADa zHqLif%7JoM5kVY~@mv?J81|>=*Qh_8p^Mqi#f?|)(kxGBm!`w}cEcN3uCVShnOmAK z<47<&-HCSNdq;b7`fE-7{HFWs)n;J+qbEEn{!C>qrl`Y;GFumM-5o-!&;SQFTeN zCh-*b12IS7Vj%9fvu38-*{MKG^dU84{nF%GfIv)+ zbdwpfF%aX2T>>!~)-ez>k3X+W*eeis+B+49u{xv{TyG#Qj=+%FudyN~kGlk7@~m?p z<_J8mtk|m}K583?Wi6Q@UP{`s0>E&oqKOUz1Y+|3wak!>ffzsR5{SvLj)9nY{CQ=< zUV-@Njs;?@jvWZZ#rrBU`!xn)^0-SNCeJztVvfM`%8I=L@v-MpASOBt5Qxdp)BqVuxjnE|z`Os4FQ(;A^|#{xTM0x2nNyg3dW&*j%^UA=VU=?7$Fv1zQEutZ72G!7mWWzE8MSJd6 zgZJ!(q|AC|+h@0jaTBeyTP@(~cMF_&OfiOW`I{3k??1`OFjPt8CpA=Zmor_w*F>1+ zJ)Y!B^YbJ~7nUf)Z}giXA{cy*1T+mcM*>|JTYwT@ITAb}*c=H$6L5~C)DAXB(!3ON zEKn)WVAN9Wl>24`FHr;#VHdgNW6Tz*L=);H6>d1jNeJ-F61+gpa=dW&_@1rgrK(CQm&42Se@89V#h`pK zD37w`fwr+ZBD+C^hQl{;Tr8Ed2+?q!zW{ch>kYU2@g?i~miENE z)_0#BZ}x}ms}XyYwee>c;7~^LIHG&9)1LK5)A6J`>YeRR$*UL4n_qi0#~kf%y<>q) z|d_}PpwyjY}a4T22>vUD%$CF(_%*6ff_%TH^1w~RX)WxD$_maNTtQ@#;`Y?#+Spv_|oi9rtE|G09jQD{j~*n zQTDO>(Cb9wi6VY4UxN01o)jC+z>(F)p{U9~Z9w^Ql&3BM zn5>j`1ksQ%ipe>@;a63Ul6U+f_@cnrQOcO9^2m$r5^G1bs&Lq?k`Gkb zF7Ru&|5;%0m^Fi>v=Vi9OfeI+2gxK~7=VLF@~Oce3d~(gnX|74MfR@M>d$4*G0D#g zx#>N74MYuIN0~I4C@Ez{7O(RYe*W@65dRZ3cs*s#s6kqPi|kz=T!Un&KpMP(GHKLc z-a3dZ-k@dC#iQ^*3I;W$DDN0-vOU6=gZBQ(H zJ_2lM6WnQbd&6PAW|6nFSiQ*s+~5*rCztX+9}$1CB0SL%em*Mvd`$THTH)vGgrBb$ ze!fBY`9^s6<*+aq4Q4qzN4X^y9)*$YCYnwMXGY1jwhxhIU;NmsHIEhGWqu@8a*grR zJayye^ICw=KVqVfK3{bNvJVIIYaW553o<7&4>XXZhTm=!a(yWQu;P%CD4 zYbAQD_c4=toez;>E-qvkrij4raF`84#%n(4VZ3a}NXKD5ESX|HZiE=th=lH}E+I4v zhEOpl2c6|j1YV8Joy3JT5=4=xi}|<_Vp#K$Q4yxOIl0J)#BWbKV#xT(sK^h@j9l!9 zBUH@EMMfk8F*9;eKSQRNj~k&tc2uNF){d4NOE0?l@BG|*KKaVVuJpg3|Lb4azjyuV zJD#}q7oNT4`+wzkmf(l4jK6pVtU?o=h!rnF^z?FcBK>+EeNE8Ux1z7hF9P(-g>>m= zHCwqkaCy{NSlr#*yA{-W$m$9Rz z+@~1?bH9iOMn70^e;D?mN%Xp>$CIjq)LBE-#zsbHyf3tigm4DJW(p+7Hg-)Zn7jA&v+0tNgDG<@^&Q>Xx(ZSq!H^?I1 z%CUS2;J;v3fOXjYy0>iih9$SzjojuCEPoFjaP@yliDXg&1F^y{hg3FNQHMmD!M(&tknN7egi zx-~{8IQFRZ@8bV(iuuC}3SsVpy`4R&Fq=y;ycW_u*d^xlk?UXEUH1I@epXUOAe zz)J%kng;RJ)%^gSrED}5?EqO#y1fu?g?;x9dmH`7$HUG3q-;C9sVrXsyRo0+FUdl> z%!S zg2m3lExa-#k6 z;)(39Dsw?`F#iPhD9&=nP^?2D!7Of}NIH1>&>oa5_On~7&~L6lh>EZ+=FpzxAnd`P zn|AVT2PmeK4G1tCOlPl5-RxDbjE+R9G=`AxZgCk6#lmW%-Hb--5BnRl_!8KQHa%<( zW`ptQ5TqJNT_pY5PwhbEYWKqK<0;deUm^p|!~aqmnN$AQXz^m8zm`b+4mfxm#j{C& z^R~h1)BOoLzO}D343&qZPKBz80AkWpX~Ryx@jb3LWj8sI0P9riA)Zozw}0r6JBV5D3Zx4z*1*1 z_d7 zDZt6zWY9Y`L|a@?jLGU3oYm5L=>Q+6gp7y6Vrr_h-e8pGle}1H#JiSXh}PO4Pc0|g z?#TWLc~)z2<(iewHAgxxJcqwIJlW_RK6>~}@4C$!PM_}ekK7o4=JR{tt#B0!zl={! z`;&9MROvArY}vgQQS19k8wpUMw|X$%xAd3fzX@z)1mA@K!M!d<}PrthqB;A4t<2`Agm&;r1f z7%IZZ@>$bEQ0d^fkV6FF!;KJDJVp68O~OeCkLW!egv?+>q!sx{OSOgblR*TBQY?O} zGqmZ35FyfP5D^*-9Dcb0++p2Lo)xE%QV6b*N`jC<_Ge%`gmB?=5tx?a^VG&&3RJom zOt=*lhaN7Jo4Ao&ALOvepHCLv*8t77Lau1Q2)*_0gnR`4fOsdH3!%Wu>-tzTrDLii z_;(R#cAIh%6K(;sG@!ZI^T#5%&XmTp1uV8yd@H(=gTD17xrl2mzv9?=<0a<^nw=)_ zA0qxDA(>sJ)90MgmU_C6g>8ILFVu2osT2VSjuJ1frf+|(!A~{J29>Eoa6r5Lv5s0U ze9oEkAiMZJ8o{fq>{ReIE8zrSQfH+9(IRIMU=@E&RK2q9y{b~kDcC}W%p$VsWjdDa zWVW%kc=l9!5f31SzR#qLd?_I&YZ}Hkj`E`=6MHom{9P%mbl`OjYaZbp7GWeqiZZ+? zv1PuPpslpw*@(YkTN2RFnqYUr#u)3)%> ze(=#}T0Ya{dqq_@5{#FkjW2iTh?kq@z{^J)@J53(Tj@pH%5%5Id)YMcsiwDRXjjhb zTwQl(7u~S<06fzOW7D*u591-Ir4K_0HB|~1AZm!b)Bv9eBtY8Ohd1RgqD-56lV#BZ zXMF+~{8|HU4dBnX!4G3bsKpOMNZaIN_Y5P%FGOJ6mQFGo*ISgze4)|DlVlQdWHbv& z7vg0gA8CN;^2Z|J3%!Q5P?8WfSh-op(k(aZfua>k`zDa2ketwnSW7O+Z15HR8xX?vP{3EJ9=skC=d0)&dGHeNlqHXBBT4oWKNS6hbH_85Aj? z`_EbQw)#T97XU!!z=lRG3Sos55UOnrXZ0WjWMq~f<-&V7UTQ%Ks_|lj6cCC!z4##o zHX~G8_Aj;d#2MWn1~R|YJ}AUsg%pq{b+WTZ3T&vIwLuE32(iomF5L+kM1?n4Aq9kp zPJs4Efekgzl+8`94M}brfR`u@c!L#EU@>D$q`-nwJKYRmL<+13)r}O8Nq`MfKt9a1 zAv=KqkcqTPjf@PY02`!$kQ7@IV=+e_G$RG%lNz_yb{k$!BwxKWxt0x5K-m)d6l=+m zJmrSLlHr3O1^KI&Y%#`d65xzo*EMQ(6Sy>ucwS(G6yT4lT~s$~2}I{U+p=?tD1#MJ zK;DMUjufy*3h-lQq#!3>(zw9oTNNPuW(SV35bfQk-mumXCEb+MbkB1qa|1&j@#ebW3ZlNC1-6l~dG z1)WS0G2T;P#0sbothY53v$5G#K;Oe=U3$(~0ht5aTl-@LgsP@~XxD=kkdaw_{ICK- zQOip$SOFQ?ju#uOfKb%&!Zld2@PdGGSAoq4JBbyL`BldX$dfwR*<%GZ)U2=qD?+|l z0U5*#D=B?9BF0?L-qsTj*(@?apQ4eVL%D#(u@V#`bpD+mk$;NkeHMb~uMUOYRNt-wJT0)n3XbM`?Cr4v+@hNI6_7B4u>!gWFDz(ag%uoi*jGU0Vtl)T+umXA>8SBz>#tO(B*g)%#6%eYL`k^MQfQ-!Yb4MkSvHxS5> zI@#G{1vb>IumUSWzP%Y4#0x7RM05hQ#|mtyai(m>3T#MvV+CZ8AXot*rV*(nR$xKN z4lA%C6wDqCPHwfYfXtX{L|j(82_d^)zgn~^uZ>#mD$;<87s)imo#1&r3+S&Gc5N*jfQ1DX!8a_&V}HCMyw#8ChaUeSk*9P zg%upDj1}M()r=L8FthXf3XVBo1w<}ptbnnB!g%L_6&$OC6;L5KU;+{QE!s z%mv3)0V0&~0W{&8{3p!kWCc0_=#;pV!F0v-U?Q9l*z9@s^p#V@0sthkG{cCPA`IO86+xrT;TzpqD{~FJ5s^cvW7mUcgpBGQMSIuQhlYf7;J-X{*pyyGbxPJ4-tvrSA`R7 zkrW1__St@K)ScoBPrCidWISn?uRkf@h>}zkSi)T_fp2&yShGCKA%eF@{E^&B9#9_W zPtc`Y@Ffp-aqG&)U$zdM4d`kJjnr3*dK zjYX^J!MX>@eOycHi%aOx<#-poGlJV$`*oWPLYbZ*{3qg||AnyMZU z&@ss#Q^_tGn-Z}cPcQaKxbt|GoRw_0Fs=j4bwB9JGfe<(r01J*R|P1?liF84eZ3$X zc7l8q?sr$|bcQaB+2~F;dc)qNyYYB`9wA#ll+9|oYi`;AAXjOdJQw~FgR#>5Rh(j=H%S!Xv zeSNvZ)}|FMD5=Ys5O7RbuyXq|l^-xC(tJ0(%M>rdMjk zX{10Eb8(YadY&`7)POq~Olbh4?Xz8URofKZ%-GIuM{J+3TT`-ZhH!mGflBl|?a%BS zqQqB>FMq7+7yR;O6)_q$2&j*Kjg?&cB?WnN}mGkyo%30T=MsrKURiqyng|6UX_85b^i z8F%k$8Zs4O6@IJ%*H`8}zZx>XBGR%FWW72Iy@^jWNIW!F$VnxYw>6+&St_)r>E``R zTUu+8KW1BT>vno3sA)W0*1P=Q2sY~$0aK9IRwy}lO#iA;p8x{(Q?&_>?%}D@r~O3) z8+BPtxr?ifcg}4TL84W*tVViuCAu|wIM_JfMfc24+g62uW#G^>X}Ec;OC0dyO}H-r zS2Sgkg2xL;sEgZVX4gow`OIH~I`pH+SAr|TA2k9iW*)WL4hfj?X=TGDt?%DmDcdd-)g|Re++NgsrZGN&=&zR_OwMJK{HlDGh;ED5ONsLRMX+wgh+J zGmUb!6@I5WM{~)eJ~$*=#o$)&&05OqNN%&A`6;a1r0P>RRZ>{M9%KBfurW(HLjhN` zOGOwf0HBJq2rAdvg)geY9iL5%uLPL`$RKG#B$gv!veLfJZ_~YPP+FcKMx|jJLPCoBgePuTyed0<^lD0#-+I ze+@*tEAWd=*=mscY_Ui-#-rI}JWPesAE67*HqJARyit)qmy@H&WmNf>IjI;Gv(FN2q^UJ~zH`b+X(B>V^{`30O(-sTnko@7&ndsJ6t zk!qbHZRu5&({D83u5jjtdRdGdhZl2Fw^tC;QuZ`j*cyH@ux;TzLVO?w^U9=>Mu9F2#&huJPRnj)!xNJX)X>LGhu0I!&49-UYyT|=8N5GeaHMA<%qLIOAm0u(4`r!`@=3@tqF9Rz=qyMP zNiUpdON*)16{D>5`Q!tE{V$8jIQrm>5?W0)R(ny)F01@V10D*eSFr`ocjDdW-)GaV zfn@g+;b|gZVE&eOk zWAqmPLflwtEH9<+<7P)ba(=DbZ23$GI3bl8JJrI8gBwp$mtZvqszpe+%mFN%L&HZx zV9r#4$J28W;@#O7``4t4U?c4A*S!(|NcDGzvr@*h!#E*pPyuixUc|QNyhdg3rox6# zgit+t%bi~;vMK@j3Bv(=i`D|p`g=}cF*;#P|$I)KK)&SCy44shlL4Zx=na{ z2*im_34Md#st6oUEH@GtM{?M{ML`4+!LJ7u1PU^EUBVwlVuc%EO@fk>`hgZAqQX+0 zTy!XckA)~B=p6Hkrzj%=iW8|QKsKRM&D^_?5LX*mCz*7?&>tqFb4tHKZ&si$mT$9^ z@<%ViWtJ>=~f`37MkEseIHWX1g%;*j3&=~y&3UoUEkRIUI=o78C>Rvm*!-a#Iv$NRAy8jR zRjs^QP+vMO51YnV+`}PM_{i(3a4?q{HOib{X|&PmL$&Ai<6CR=>?1 zOq-vIszVYA(-+_e_&^k3lUb=cVYoqNmxCoSD>d;DQej>?X5eo(;VetbX+N1_&eHs@ zYC`L^g8)5+f=MCYx^1C*X9N6Kq^Gb>X6;w=LQ48Hyhx;4N-63EEZIo_%Sl%zdv8Wicq(T zn*nV@IsEUazIu)BK(Ak+G$mOlF+ls6AFspF+a80+mzqvN*d0v1&Zo*c* zBGc}qtj(En{*pm$5)=)hu-6IQx%W7njyJ>X+*1UNoKJfn`EN~_%v<3cI9FQ%k~+*z zsCz`!y$Pq{H`l6e#G}2cf1*MuZ${uS>XpNBcc6`7@jm8l?>9f%eCTWXGP<>(E6(zMUAwk)ZDD+bh`yMZM=YuQ#~vp7Yfu;B|j zXMCy&Pt@ZrW%F{&6}AdsU1y%^^~n(qylEB~BKTZGcK=hl-4UO{#A;eIHokQZX9eM+ zKI{WlITl$#2wcOu&F}j$?Hr=kluQlv&re?e4+`1 zIjNFMsMc>URN}k4QUR&3q%3-EQ7Sz_)%j+4EPkXub)r36rrW3gafSDjc*fCDS<`;^ zSUbJ37ALv%yR5HTBHtyp;eK9W!P>y7w&2ey96TtO5LXE>1svahTEFQdAEc}Xv}V3< zvp4H?Pmd>Od$V{E^>eR(;8sm(wN`Nb51dJg%TU^h%)^6Vr0Lz0 z3A0sMXsDh{#)D^1Cgorr8A-;AeMa)_$)s!>Qj%9sCJe~hwO>yr5Eh+M*B=$ZBy_(f zOp4*wyC*x^E>|*PdNN_sDrYwLWJlZ2g`P|%$SOTqCqPeTDe}TlOMR_rv2Bg-*|Nnp z@1Beoo#=9T9`x(U_#v>#d-h~9lIXo+-=2&otMZy~DX{GRo`t=P0VV_57L|pOhHl>Y z31a@edorG=N^RkKGJd>Vi7WN0j>2J~doDRMrXJ9{C&MAvy(h~jv(v$I`6J0JyPsR@sq$iV+4A7JD zWK~`hE(Hef$z%*L8PK+plfB%qLo+}}W-0J;XS%aH7kR;n z4i}^&T-MPq*u2NgL zRWg1&(xl8ub)h>hIWwjX5PrPEwK$SpgIT~;_G5P8+N&Gm*;d(1QE*syV}BCC2{zl< zzy6xN-D25`S>HThC;Ng=hfeJ8Htet9lT9Ztwy-rQGSK}#uX-tEn%v_>R`MFsFE#BC zk+YOVxsYcPP+&Vq9`X{1R+EK|_2`R755WELao-n@B(cOk_q@;Vg+Jc3RAGcmJ+(xK z`6%uTRotWSaQvXR;y8P;?YTbgnR&`Z7u}g?eO#ck>+)Bk%gC_161hd2>?Nn~@AZ4_K+?1_z1CH1_8Xs?_9y3hv%z>|e*=t6ES#yR{7fSq-EvnGo%bQ#g80J? zcoWM_)m+Y+nj*3JWgvC{{JkHrDAg?di>B)_WVobZXQ@(MsGP=*l}qG7ubjpb6m?kG zTj-R=j*7pxO=(T|1;6+AI2%x@i_2&}ukUiBdp58hhO=pse5>B5!eRJz3nxBtBCq7cj&wOCz zE-*PY=uPb|TKLrnUZS0H3@3Y&LGRSCf3nlM6!xYCY;Q2?PvS|BqD13N4BTc*>&nNhrmzb1N)`K(?h z#_2HSXB!==g0%DGV00!si;5bZCu6NE-~cj|0Xh++-_6C0j=EA^-t|inENfNSp7hZP zn#GkxdNCB?(3cI>gw^iSspc+EJ@bP*?k@S1pVRI_#BS#<`KMLxLWn*0+=a~C&Rz0P z*Y7T0l3u;cdOV*IaJiOuAh&Gx&bIHw*=O)pl!?&4@swmB?q>BUUP2`_&YuhDa`j!U z8)+tL!{PX9LndO!<&%&hTf;XacrD8hj=k(}3>L)ze6|wB}46-jMkkxg9s$IaI;b{D7Cv%R_g^8H2ye(n^}%3>lJlBY>n&f*Rm4VU5BNb8@}Cu+dUZ; z1Bb5wWmng-Q*^MhBAiAVw)keADRiDJa249ZgH7pNyA%k+og^N;M za4D$^7FRJ>Tuq>vmMm_#$^yRBaLIO*MKM75h${(eL51DB3P;gb8&o57`SA4!);x?z z3W@^YA&R#)I0Du7=prFz|;??FPda7YKI3%}d1r`b7@N<ws#aT`gP>`AnO~UsOQsk2W3JL@F9prMGbVU^tYm6Z;BHFq1@b zz`XAecwvSD`Ii9Yx+;a$n3>oRuT78e`;lyGr$5hJ5@V9Ja;Z|X;*IjNXM+B*CLCvZ zv1;mhTb5}w#%XLG#*#c(!~EMjo67!21h;r|#U(RY`e1!Px9&2Z3!-wueyLWEN5I039~kwlQ*LXz*2L7YuLxj)>PQJ zyDbEPeBfIyfDOl-5G4F0tpPTelkDAvmI4^n*MeDo(f6f#mjs7eeBRz`zUJ64)37J3 z0b9qqImZ$BmSdaDFe(NJr-qH8=hh}O1j<&Wtts4HpnBW@)FwJL$Siw8nDCW%kO-e4KW$iROdTlyM%nE53Z86f>(J{v49)JE&47fBbhU}VY}pUY=Xj0 zM~5KF(Q`>!9D#~S%SV2Sv}MquBeu&k>7TkddPVB;l81?NH*A-isjSu&V-pGOHDJ4B zF09|`QfqF+ZEXqlA6El z=akmmu-(pujP2sdIbypEXX+rLX>?@)#06Zy^C*Xp(pEe-Y?onEgl-%VM~0dUw#z&& z2^VrlH>g0lv%y^#j@T~CoDd}ZBwYjucb;_kup_q1@GGnRNPQa^+r^`@m2ITu%?;b- zc;mu@hYCu)w&>#4TuEY&Beu&iDh3FrHkrXL-P&Y^fFwxJn!-a7Ap(sfw#&;~2ot{Y z);U6mAOddKF1=lG$FW^96V}o6jjfbEjGupwTb z!rZW3GESy#DbICbyJTdRLv<2!!*4J?TwtLL&Ookd7V=@!A zw$L}mZrCmv2g4xq!*aFtTLC7fpspBHLZ#hV2SWi#?bF zz9>+1!FB}_l9YyEGUc{9FI~wSJcBwMv0af(A*uPxeokr44ck4o1K2JelXo=@I_HS( zGMsTigv(NzVI|(}gw^ESmcow+A~|Ba44Y!ua6lXxYA)C=^SC5j&Ji z3PH*d+hv&(f`p%>ivZyv7>x)Vv0a8=S?x#a+rZdvu4h{>1vhM$?_;xNoOh^54Rz$1ZawAM?AxY;%>LA~{ zP%Yc12+*}=m9DFk^T@>>NeAW2juyv8>r3y7q02wB=*jnK) zzB-VXHX2b&0vw=pcSiu15@JgL>1$f6D3}c4D?a@dH9lQ+b0NeOw1>gH@?^Mt4n>t8 z^68}*mc=S2h#Wn7DRye5Elk1uLb0o%Vr+R^kjP%HT!QEzoUAlbfr0FX`s{7$VZS#S zp_}8o6LiZvDL~2xZNalY`x+itP+jKJgBOI40qN5%nyQrNj zfmB>`m#(%gc1i7M^I0 zQ~TFWVlav1RCxwxCcVu;e>9u6*}utDc~@_C*Y>cRKG2?RZw>qHjqzwU84tUg{jEN_ z{b{suzPnO{3x_44T=1IK@>7$9yrIjvr&tyH7Nv=Hi%vSP3v%T4_BR$-*xoNc9vi%gM?0k`` z%#hV-a1B1W8}9Y8SX2a%)eM={zLF&_v0D1LL)4_tW(y06!}gP6?kH0{uLAETD|Z}* ztSuPH)iU(TtW6JGua#TfU4_}Z?1UmZgkSNk9T4B@@N23a@`oK_C><8rpqm#wKZ9`J z&xCEmu-8@_Y%$XEK@)W)J`f-f7pH1E1!D5JiwTlvodYpP;9?-|x3gxZ+p!JA)QDXo zC=tHYjH`$90D+hs=_WH|V<5&4y98n~tYaW%9)Dh$uvZ}NwC56tvFxQ5Joi9c9DyOT zUt>i~9(M`EzX>3*i7^xwQNId4iJdR``0oLV_liyzkgO%ZRqRe2)?izBp0$f7`hrpzM7S<#>`U+Nr25-9*%;UKyLE9GVmu@ z1=uf)a0Xr((eT|+C^Ket5RYU*{5^XiDYKs0_Sx-W+(awwRtvcL-2x{bQ;cC;{^kVC z`%iK*3{?{ONez|U>^uRM(rx&Duk2Wf!NVSOLbE1gLd9YIMCe6ZKow2rB)Q4x5;01D) z=w2jRX*$pBz9KMO;VyT?HBH*1k%B3>g z3oz}S>+{#ry#RKf>kYU2@g?i~miENE)_0#BZ}x}ms}Xxzif|5}U4TOw$>WIb$xeIL zA5F)T?x=URKP9hTykmik@E6VXKX0Nj7SM?y``4?WPV3c>*!7pQags;0iY|HGv__FP zmd4NJJ?*-2@s6~rW{S82HB%(sVy1}uMKeWGaF{9L?#@h+SR!vyYN%*G{55znYgN<7 zdt3c(Z?xH+4hI{BWihV^?a{mdy1#WbvB2f^mr?uMwJ!Z1e0>4-F{8(e*tQeKcM_%1 zC)$Z8Rct<~X7l`uH7c98e%;9A8@bIV);4b(8Eq^~Q;C@(v3WB^Z1ZM{#OBQuvCW$) zQkzd;adUQl3mNTYaGWxTK+xg_&rCaR9MOhf_)*lW35KQ!>7ub!3EcMS<@X{uz|SY%OMQTIIGukaCZ0rQ zG@+n%8yx74w`W`1vu-AuY21M$tBpfZm4EQ%C{JAiFj*<>2%;fjCTe-M?22wzT?a^( zj3Pj2>?N7*FpEr=Uol-?##_MZO1aZ#PLA+j`<0)+VOsq~rpmF~p`eCUiJORAcC=hN zzu_lmPOaKC_@cnrQOcO9^2n?55^F~l2BsFV;+0uO?GAff^69~U78pEc%^)eQMBN=z zFsKo;R@)BDCQT+vN?DP` z>xy9NBK-V)g38V&5o}3=*Hh+<8l?5N$lmpCf|g9Lkduqv$plLpyn!-l)L`B^h%DZq zove#e@jwd6XpcX(X5OKOYf)u_8Rt5q>@@{CrIK`C8%U>x7@L7k<7$`1wY7 z_vNrK7!77QJ4d-C79NF>>?WE{2WLjfwYCqDWncW*t2K`mC}(~oRdS8-(>!(K=kr>C z&_80Lk3L^@1hNkY^J^Y~1b#9nGY>fWlFyq*AmNNWVIBc{I{CbL1mb<%b4^e|pFWTfLTAC^opA2&h_YeYhKR+kW(1w*KqlY`Fk zCIYX<=1$_m8VRCE)Wv+<2r;br$fyX@+?-rwMB=xn9WiA5WK`q_W=1Y{#1SgyiBGTdo(ybo}67hazg~qL}2QrvS^L? z(E!P8&coc;eK_ak1`{t?zF;|dD>@W!nlXT`PHPz+M;41kjhHZ<@kc*PVx0X0IWw=b zoeg)mxI{I-VKz22+f+0b#+HAQaAh(!^6{LfRWnbXDk5pYMn&Yagc+lX=mWMYq7Se( z=cpo_f^AeplC@ntASHsWigM~KN7cNDWF;FFk-M992^hNs2ZqBA>NZ5q-c`Mf3rdAx9PA0>nl|q*Sp~QT}mT73CjiIVx22 zjR-Wch-;{6LOP}nt&#_bvMN56bNBt##$uV2s0}T0Y0?Nc`8-K=EXU7sN#6BTdM4+S zJMWmUjY`wvvM^QU@+)8|D+9aZtvG>0s?FES-@Y667FFU`?vB5F_r)ra?xW^qf3ZsJ z53{qfQTcwcN*oT5x3#IqflVP#>N1=BMm|eIrY16HhKcH_ydksAZ}b5}ptLFEFII_Y zEySoITJ+hfh&~WNMHj0?8x_%_&sIhBfdDGHSS9u+Ba~$F+Ua7Ih_>sNE52Y=BK(Z> zMMP$W4kapXuby%V&+w^y%3Qr&rZqro(SzO)!ItQqp4B6o>E$bf>gfqfhl=TuPwA&e zJ|#}?pEQ80)GirGi7N4vfji;Mg)axsoxk!YFD*&ZjV3K16*;z^=Q)x~zkvTaaqD-yU zDbuk8h^VSNAVev0U|0-D(Rw{A|@vX zWMhnv1G03&iHhRa?y-s^;UOxXj=QLMIx5;EEV@y#Zr-p$SqbeR9#~gM}mmSAy)Bpd>qget3K9%O7r7ZL3q&W z2Uvu|1m>vd5TS)3mP_tUL1reiv|n#PrGTMgJ2SYuLQJ!$=R=Xph4@gQ*~!HCp$1e6 zCY%_liKAZVPUlHdD#QXqkjh1OGQg~4V)Jrd%5Y|T?*7KmA)*UI%-ea{nMuU=S3mom GxBoxSeuAX{ diff --git a/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587860969.Akashs-MacBook-Pro-2.local b/flow/controllers/imitation_learning/graphs2/events.out.tfevents.1587860969.Akashs-MacBook-Pro-2.local deleted file mode 100644 index 7c59cdfdcadd549ead313ea8df07aae86ba79bb9..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 267629 zcmeIb3zTF>eIGoxdv6z)> zZPeR6boZ>Z%9qbLaexpz;lw#Uew~vqb~v%)*g)Vwz(m0h9AZd}V}rmXfDMM653o&~ z93M;^e|2x&s#{gJUbm`l?@Hf_v&^gSzv}<~|9@4t>fjvy?-zdfhnDut&8EqvuvJc+7`&)PJ`REsqXP;a8%;$H*GjQehXwct0 z*E!oAPg*eE+-|0F2cfU_6U3;g4fv%spINQe;E7obOqsQ-#RQt@!azK zzq;Y4pJjQ8mS0@rWgZ%F4-U1(Jz-;l*&GJMMQk2E7w)ACGtu?AaU*r>M~CjpGJN zHkXkKiH%xZ7BDced;s>M^gixwb%xzDsFsT7;JpnvWXD(Qw#lS_Y8WrzuZg8N5`UXZ z=e;xObx`^7WPf-nZd@G&ahS_tKPplBTit077m^uegAn02Be=#=V&h(SD_*=Cm0r{S z^qj54g79YI;$sn9Z{lL3-<@zp6)G1;6KVaSuHe>|Z=glwA-E=fwN(+B4!!&mQf^(k zz8{raX+5~MJfHR%@E;?1rOos?XDpk)``k+2`6A+-xNu$Liw})_;Aa|UgUVDPIG~jP z5=Sj-ea=~BA6Wn<_7HtJ?a{1QH-^?8BRK7*VFpy~(n2v?=nkZ1{}^+#bN6af2Vmj8KaohEOnj zG^hf;5W&^9LgadjQa>Pc>o|s3cI)`Vl6J+%7vaMVXxZ{6Hv85>NkZ5liS>RSu`KU4 z3??9He9~^y1duH#dPq<1hZ+sbeDL3!VK*Wq z!2>OY6ogd`Q}AW0+#&!1MjRt+OshpF>;9O>093F56&(D58LUA%Dj@_GAu`7ZOehFz z0n47?K*82Ow2C(jPGEvE3L)65f)t5XNI`oRNC6pSrbq!9v__1vVpOn^)v3`Cig)y%L}z^VNP!JC&Xmnft_?|U8-SO!8af;+q`+dvmPmmG zrB=EbAdF!LvqzHMMF-r|N*r zL*9fxn-&em;+N3S@={<0=ZP1)T?ID0a1B=FT?IBHV61=>l%eJi2P}~JrT9znl7P*w z0`jEJ4(xXo*if^=3akkE_GV-dFRXwN(FxEVE3l!)nXka-3lu>S`{l`qNDaL0jIv2%*FsJSoPP0mZpAadA@-_hETu& ze5O+158-&30al=A*bXZoG<1658m!7#fz1f#6DuI|tBw_rCv~#3#|mtySz!fMgivp$ zaDgJUxm4d|aIo1|Kt8M!pgmS#Lya?KGge?jGQ|o^Jq|5WyhLh)6A z6_6n+V+G{FFp<@0;Wk*o(HgJqqD#Yj#k17s1S-tT4M!hgUY??m&oqJXEVN`dk!5hSn8$98;&fHjHCer(jzL_n5ac003E0JhG0B0(1AaP<@XZa`Hz0q@&tphTooWfOaSe(cVcU3c{`j+tcX~aeRt+Y#Rd!2c?^KE z^|hAjTNh7jv<oM4Lt(;v3Z^t!{&crl8ORLxQiw54G-1z32%@1Be|74bSLQAjA>_MJnC+3b|=&H zco%EjclabZt2ice*|WYI&1G&i5+#^rbdY`Pbts(-%zxXUT+qi?oyAeNczCH&5y|77n%*3%aw;I5Oh65MFmn`E4| zvV>YUf9_?>fAiyRCh<__WwtfrcOzJ>Y?xGg@%)|r!65P0zZZo}#zj)Q6rexc>OEZ) zSE|VQ6AiebGVjNFgWe`Or5=B^tOQxF#<9sK8YCVXE99h-%G(;yt1K1T({%EFrX?*# z$se<=xEHR||DOo9>J|Z0kk?iyId@F|rcs{&0`^n22~Jm3(t=Fr;oz?$*sRNH%3WM- zyqAr}vO7dg0=jynxFyu85U>m!#^$juallVD;eh~L(UeIF9xouFE^d>lT_errGk*;h zIZ1FS4X)gPKWPM3%q(iP^CV!VV^%g?3a|q{E^K>nbZhDWdCgdZQzc<(;=0lnBPooy zRDil_Gz6D<;7Tns(Cp|g4dOtr4pESTkJT$7Q4B$J$ikpp)F|HKA zIvhF?p>a|)1X_3s{zC+>u`=y!u+GYbTl6DN>_9sN1R_2v;DIS}SV>5QcRA_A8b`ki ztzb^k_TCE`2mGk5h2`ADyYp`nZe7#H87&b*DmJ5@(pP@$H2*vRWIt%@AQ>+NgsrZG zN&+*h;CCZ_LzvPKXoW&b^ekl6RcfQ%BKg7em6uk3uR2FF$)i41CR)YdR`1PP%IipO zv!D4XtlOmOQ$ngNm3XQZ0Qj1)F-ti^0avt3MOD^0sCie9i)JVL72Bn>t^V0~_XEA| z;Ew)T%SEy*-|}zl7^G;P@24aF8Z9Nk*{#7idkuP?5x6cnf^LZj^Mq&bdkFa9+eJI%akP z%5EFp6Twxw!P145Jj9t`5HQnXzp zevcFca-W?Lg{wJ;QJ)F-vPU>c7J60m3-B)+aJ;N3Upb*Rh^&KElT=M|NQ@dVOX}SX zcr2u(raRkcm*1)JXa{ZZ=xyhFoswe`pw(9$$%8cz?X18rH)X3q?z6=r*&Ge0cvCv|%TF%xA^qhBfb<-oRu_Yfg0A^Ro?tEAwrUI=d=EF8ykZyau_ss{MyQAo49 zw|6RTqW`OcxD+#1H5K2B;8-}<$fE^H5fmQ^sMC~r?v*wiZ6&=bE~8Y6p;Cb#Y1F}e zyWqYK*#?%u`+^2XdM3hb@a?Vrq58C@XzF zd4FL4%VIK)J~*R@j7u(|J%_&@{h4{3zQ`-2|3>=FHc;vX6q zc8SsWX9k8Sb`iA3cN|r6(>_{&K<|~JW|M{pRY&)W5j+$`YALMKk~;3o$+nlv*JJb+ z|9sq7Y%DFN@8ecG*%95d7e;36R0}5#Zahg{f-SVlJFNQrqX@}7f)9tloKvIA1b8ex z7a`u2eX)N{x|n;Fo&QMn+zF0Oz$u+M=D6vfBOO6CqTO-g1|y z-foL`=?numLkN<4cs`013isR-WQBrGOWHlxw?aY3#jfn{59SM-Zx4Yu z(J7&C@LLsu9ek-UTP>{jv68E8GBU5|o_O53~>wv9w5aOwqvzJ{F>k zpmWSCo}!EhC{CoJ0NI326=xG#5%RTzZIVeB4EBif`GhqmbrT(fzoC@pinba1ps;J zbTb!K11&{(BA#S-A)R#^$XF3e(OR1_9hk`+|jm;n0q2tk569V<6R7=VuX=k|>)R&G+xly9N z=%vHQ{h=fAlut>A$-O2t>`k8(3cX^8gxwm{cQ(L(MNTn6e0C;zg?iI} z9O~by=qP_VV~yRusoV|*pKrjUbs6Lt(z*K!K`ZRUe>KM}WE#Y+`>uaiqiE@UFBK!L z>^S9QCPrFaexGU7m#vU#rY##%ds3(Q*GHVY@Wt9B|GY+%r0`U2sU=fdGTZ#AZGA}= z(t%fB53vCsd$K8cChQ)iCACj3zk;07>*BGhMw+KGcfH&KEfvsr7fKWL@*bc;s{jD{}w{rA` zKWo}&SzDG=z!d}P&)mS0uC;8dv00oVQP}W#NCozR8rwn(^Y@y{!6sg zbU4`_O?sUTbW=}pk)5qetf4m&7$Vv2fAAX(`(4)>#>;X=F0qi`@%JjVa?qJ8h@bol z#F=S;7ep$}xgw~k_b#ucdzblVQz1t1(-p8VOCc)qvh1?408Ws+DMZ$g83BB<38z^7 z>o!bRl1(`ZGkI5XS6m1&*|)ZpSdrz{)3iW;0v?MWsZ^j_4vzI@9X{X_P3X@^m0Uu# zetV%3-<_2TNQEV3(Q8c?&KFVVEmYlah9~03>Qgt`mGsXlyr0A~&W3B8q%?y7Zna3luL-K1egMj??0{I^pOveikw#iS~Hj4j;n4f*Qj{l zA3L|+#4b3lMQ6xc%ZD6OjdTn~vdJ4g374gr?%$JKt7DT%q_b63giPWJZV`z+*ef?O ztxl=&EN{{P_AopYKj;y_i0I1tuuoMVZptna3$8dWO<5l-X(4CUu4kmcK2=}`9*-Xl zpg_)Y7>+cJ-dcJ_iKh>g%TU^h%)*(ILu`#C*EM^IyKPb`Hy(5RulTMI{%wWhY2~vl z3$jX6cchg%a54*#7g0#xyh%D>p`XioL3me%qe{%8Q{+M4bXC?)g@awqv(Cw{zy~Yr zVKYCq$d`*6tRJA$}EW0r?(>uslI)>Q6pUCGhV?%k6eX{qXwfIXQ^ z1C#}a=FGb%6S!97BEOzY7=x+wJbN;kG`)K=VYVs@4b_v$c<}7Wq#RsO8A--8`1WK{ zwhbxCt0xl%WbN9oCld&ZPFVzu&^?(jFvC%;J=u{~Ik$)D$pnT}&TQ_mD4 z?8$@yS-bY@$ppfpQx$pnT}&TQ_<+RuxgOeV-GJ=sctp3G9@ zrecS0S+l*3{IB?}lHTiNyy!%i3(}MEL*S~)w;)A=|4b<5^d^ zdr@}S7LeXO*~%<>GMNSnUe7~MCUC9DMSkmK!Wc}Y=h>6Vr0Lz03A0sMXsCoF`_ABbuua2hLq&BP9_Y<+O=O#CJ+{#vIrQVdop2QhND`0vX$C; zGJzqLGn;#|m6`NpAM)J0C=(>plfBZgLo+}}W-0K(gfEO$GG1_^!v*Qc_#uLHWHOQg zIx?QD%4fnQz;buuS=P%Km`(mGvtpnBe0MI&xHhCeua1l#FHlFu6BQk?2pge0GJagb zjIks8$qLuvNOlco0aw|N*@bJbZj5JJ@!dI%njkNLiMB#9;Vx#zupFZ}VQr3xe528~*x!)z3HiYo4Lcr_sl%yN`>xBv_3A-*>(9V(Pd~6aX)Hlehqh^kxePUQ?qjEizlUG) zdw-9!0hPMAjOO$DE;m}v%t}(D%Id;^rHp9Ws4Nb_`y#le9AP$k)9wm7bo8m-xYZx_ zr~U4rbGAF!L3f_sJ{nG@RyQIn-$JwXQE119OWB$av)pwi8~yG?avQ_RlP7P5UytDB z+9}6yqC4()HwL{EZM&JG4dIj1-EnWLGwhz}#f^B*d-5-WJ!#?G?GJn7j3Bzh8;yak zjcu&X0SfF~kBI|2EB4zalm4k;ynw$ZdXD+5UM9xrFipri=nO2Rk0<-XQ~b$ZzB;=c z_9H{-Z*`}=PR=emI81c|^RGs*q*Y~W+(Rd57FQPO#ZaVQUp7<|R=Z1E=Ptg}xq@6H zy>A^BBV?WPPd)p?w_4e@#$EC$bM7*0Pq5=b?Jh*@cJ7jYTIDW;81F8XOuA0z0=2u4 zncKNb{%Mt426H3$vh?a@*5mmcfXlVK1G#Ohd!}_a&OZHTP$okEMiY{OZlcMh1&88m z49SHhyv$w79>t5OAjd`dJi3>Cj<-ld9S`4%;I%A2IQFu?F>q+|J`X-iDd;kif?%~^ z8s(*}>A5s79gd9U!Z#am2g_V#c3Xm@`Uy{VMJzi#gY3)JrFCBqRmfwA$+*4~!8*%M zb!iLnz)#vmT=0;DiRV9xKynK=D+Pm6J1b?iUSL7vPb0XM<*2e=QTM~vxZZx9n!Q*6 zd+1mUJn)?c-08`%7&v?dD7(6rouY%41@RB3NFfPZ@V_F@P>mc(2p_&OH>H;l0)*d> zKynYcNwsOi+0s`WT%yz+Z+HVOO%A~|@vE&$ldR!=2`Te0UEhz&{P_$i%!(5DN(48v zJlKT{$`uzbtQnC+1!vyGn1HSW%lqMl)r?X2a>FItQ5MAj;UlgjtOZHjvoLjMiOM5| z#xUU<5v+L_kr)nqBpiT$9jBjlYCNbd>1qgPeW|^hZ#i^){jpQK!SMBb2n2uP)MPMB zilM-v>}9CAG#Sj}r#OJaI5!pCkKX_n#w)BqhbyI+@+=E^`TBpYRn)TT-LpMv0tiHg$5h5{1+}wEzh!; zsH!(uJUxe|mM0)dR$(kkUaVRuR#>S5bZuvK1X5s9NL#*AFA|rR)ft2V2o01RZCojA zAUTu(byW)U;)R3QOPPtSceUvez8J~2cKY*tVI;AiXu>g;7ptb8w`G||W1PljVJykR zHO#+tepA{1h~PGFuDE38A#9;{|C%ha2tofP?Si^G01rZT1Xz*2L7YuLxj)}#m*28j!GYvC;JqK9w005%+RLXhy2 zbO@d_;K*7EU{qfVX8A?mm+D;-92kfu(#>MoU16Fi0$+D*m}%G()_|?qku2(Xa41HD z^0Lmi9ouAvQ87R`HEaw$w>Fs}P`0Wdz96qlss);~O>}CIS@wi5;VbVT!qJEU+^}7G z+M9gcH)LZ>hB835Y);oV#%|aynT7)Y#ElmTkS<*d89=gkJB4jo{*-vJBeqM1R`8ZL z#STjV2EwA5xL~`cAp$hSWQgTiVY_6&Odnh&E6G4!wpPkPGPssEJrmaEhV61J2S{2R zJ(r}#5s)M;qzPr6>ZKBSCNoLh4cq0J^iN$Jy&`pa$-~6C8@9{MR95SXv5ACsrdZUb z8K}QnL+_2rY z4Yteqou)T*f^YB)bnuNEw##tF1rbR_?1IVU(=J5?XXw}w+hy1k!-fOm$WU{^cA3X3 zh6@iB$m?H?nL1*-EOSDT@RM{AAUvd?l+cs*OZpVGQWEu+mlgDPgYUI<+Qb7775^5!)qH6}-7)y9UCgme^t6u4#w> z4KW#FX4oznu#D}x;4Oq2T00cZOz{SCC3_#h4cjG?#3m@b#qJP9IeIQhiz6VZY~=~Z z07OcXP7&aJbnCcpmuFI_aDAm7hBMPPDaU=g9OtrHSBy=h_Vv+HuoULDZ`aH-hwYxX zB<6XqqOFR?H)Pr*e;%d4!&{2b{WpNAi`xSO`|I*Rh_WwoWD}65kX)_ zY?onE3>yxJBSXyv+hrb?gbUs71{YK_m#n<7)@ard+hv&(f`p%>wYy#E>wfMMa@@De z@GGl#NquYWPwoupf`c2j%kjpAhd@g|xosM~o<*Uw&^WkZyBwopfN*Nq7--h5O=bv4 zf<$F+*0#v*Hs^@#^70nKgs;4dO2VBlU9fT7w|ms>Oa_GA9HHKS>t>!ks5A zWaGAPm*H1d?~?k~+Mhg_Q=jbOcqRkK+w&tlxM8~-qhf$?z#2JvZf&w2+x1$@z_SZ- z_=_X9%gHCezFm%<&dEarku)%#sB)qhcmJFA&|PV?(0A(5@)|v8_j))Qw+#pFN+!cY z!1&sr^f8K2PqR~66QkPK0V+$42_KO?2`LDH&3d`V@HF1(`&KNLjS!p6!6+LFRwO)ec#1aEJ^-9a=VlqEqlQ3Mu$@t8~!y*D7aJC_v=f8MvsdUW-QKNlP* zR|+cCobZ2FJV#K?vAT*eNIi84@ggCAAz1~i^wok&*LJAc7i*hJcNeM2CbijPNWXFl zJCaZc?`nj}3CID3SQJPfA*E*U3qaSJ9cN>Y>8M}MifENvZiGoKUMmtv<@e#aV#T6q+iugt5*mbV3o?B&WOhz^3q z5M7{xzU+qj>}~2nuR9*1o8voUbjv#_K+0?G@Z2xl%oh)e7mc9{FDnP~Y^aUiw7Wuv z;M`V0OY+|F$&)8{|(MnlaYpT4GTC#sL_f_oLC>}+o# zeRSBr@*OUF33ltY?d`$2RL1ch`fc(myN8qNYZUC3&7?Y3%s$>(NtW1o;!--Z6vX^f z=%Vm6rK>)}-jtravP`FJ;fdBbwSR3V29roO$}>1M?r!ya!|9~O{!OOJd%Dwmb_SjF zf!1_qd(dlbj)v3mXwcc}ZTHXxPQ%S}ot1LgHdjOthlP|}7t1m1OC$EMH<_Y~+0Vv} ztLHTD!e%$D!+UqZ8(6Ne?lPHMnlH{GGUMfxDHB)vXyS8erHkD&Sx;q>E6+c(mE0|3 zZ?@}554UY?8SR!X-#A~nDKk-ZOR*;L6!_#WxZlfSQ4v5^Gh|kKHA`G#we)d^s7asA z78VkR?I*?DQKoo71>P-I?l=rtTQHKVW$2X|pdKoc8DSBwUG_8)9m22q)((hob@(;a z4*A0lF_aFAY|zaMUYJ3+=V!vUVYt+p`IQg3Hl^i*CN>8M#KozaPJx&_?qY)ES?55^ z5x5wLd#$XQ>9n6uftctpKp-YZy2%XL7>M!1E`gW~>llcc$6rt;>=lUHt@9O#u{zE} zATEx;klC-XA|{W!1Y+{6b0Fpjyr8Vut0F#P8;Et)giCcx^dU84L#Z7g5R><>Wrl1F z#Q0&CKum^p48+XiFDMiC3dBdwTOh{jkXmrP6>;&tip+kEftWn*5{SvO&ViUC@Pe{p zuRwg%HV{*E%_Zs*;Y)4S5OrlJn?E!`HaE)**%*lN!!CiC4C@$(na5vHChQf6k6H&} zYQql87#%EosRe7IuFQJBz6%~KV=;EC8r&u*9Z-CT9f1dmB{Y&d*pFA*ZAK=Mn)IC^ zsJx&oyI+=w4Q|Y-$Pzr(mCF8`YO6!j5qx16NG@b|FmyGHd<`pKjhUwuk^q~vJRAix zf!yQ;W#DgN6=1(G!Wp~4c^MeuS(01eg&C@9(yE#%;tteIk$8)lBCbr#6iFLsrii;cGeu&Fyh*7sr+x6Z;bp84d=GLYEvmg8$^XNQ<)J?EW#|!vt!uU?2H2Oq4@uZ5)C)I48 zf3Zeo^VY8$nS3L+`NZ1hjU%Itg=s1=QzSNTrig9cOp(~UnIg7%Gev6i2`p?)&ut^4 zy%hGh_eIQq@OR+K%+Shj5XFmkqc}C~PtS2?x9ZwB?7T=tHwM5yqG0)t1b86>5ZsJo+H4CYi73~E#* zWA2Xy=B}g6+1G<2d)G+@DmZr8F-P*Vf@if-(=#U5N*cVLGHEhVQp$=fUaw`*!D{n? z6bx#V3Ze#Ypv)OHNb7Hry&DwsFbG;J6c@dd36?Z?BW2R4!Mt@4S-ep@Sr-lBffSOf zqQRReb4Cr)en({QCO?NGbQRoGw*&aEq*Np6{xIdzsKUHgJ0K?d!;0x?gdJS#cy54> z#>eFrVXs)UFe;I>m`RC2rj&7UA3|6mCI>BkJ_2lMW87(Wx`RQ!W|6nFnEzxyZg7b- zk{9tm9~OVHB0SL+em)}nd{p@PI^pN*g`aN_e!fxo`6hVxWia0#_NO^JN4O;x9)*$Y zCYnt8r-sROwhxhIU;Nl>G>;W1XMQAAa;@>xJayye^MZoVKWw6pK3{bNvJVIIYaW3F zeljOB4>ujy%Igv`s0h>CoLpo?;55h~{7A|sN4m>IdKpCMDs$Bj@RJ1SBo zYe!3s#g{(*-CckA$*UW4>3_fYe|=%!o?p4_p=Yl9rRQ$@!C(8mMflMx;xAnetI$Lz zV#Ny(J-rkiPrsf+Ula888T57OrGS1ppDx|3W-B-QFN+%U3%iEr#&#Az6FE6P0$1ou#CP#6Ii;cnD`ZNS^GfLXwL6iSlqLB-zEDG#FxGJ%B3s$ zb3f-dMx#N3j*?qCC>$?G{{l}#zL_dzcHBFO$4Y0KZ5~J`fqux*ZTj~mB#c~$OQ^d- zAMI!7C1&t?x*zu8@6i9>{`!Z|_ zw;Rnpa3k!>mIjj-0TJDmSchG&d+SbjP;#4H$ZZb7 z()ZB;S1*USeQDt}`}SV48{&5(jXnYMclJ>O7{mNl|IB0w`smoir#q>uu_{JZO(usp zVJzttz&??=D97DvVfS6=I7)OvqH30+Z(|qPv;P3{4!9QR;%hpc{tzwLq(W6p2ex4W zzg(7%) zHWy=fEu?S8l$g^;t^---qSS-Ko{VL!)TlKFI*3%S!J51^eKqcaZ<`>6+}BF;XR3QS^- z^OIh0D{l6O)1+~5Zs}6=Y4nSB@tZ3-16^H&h4%bq`=iFvfr6jh?%#=+zXf(b(CZHF z=$|D5NWWS{ek?zDG<_cPg$yEh5+pcsYW~u~@$9cEb3t)1{{;3Z&T>aktV1HfENr7l z+JE}sZj>zcv0JOqZ!SlOim)x_;O^uA?8cv)wDVO$6w}FiAq@JH>D8&5z543taFj}8 z0Qv3~m(frxtTx)MXtds-w>gb3fjwx`!&ZOV9}N#es)5u+(yzVL4pgpoKkPb|GR^rV zGSEEyFQ$<><&TXPF9Z5(iNx=M1IJK28~3*E=$|~<8>8b}drHGlc}VJ1sG2yov!Q%c zj5s(h<~H?XQTlM^3MAUZFM$JSQYdsF33X6_O!qx4%^&Y?JuCaVC8?S+&F6dqP#G5vU!9dlPoQ_>daQ;KId(@6|C zea2V99<=j%+DB*WqTFzte$VsG=Y!%?5`HECC%WT)cVmFIxS$x5)h{@!rS;N&K28Z4 z4~50lRHxnkFwG}RB_nYi&We!JgYBs<$_-pK%+5XqY(qa=eCu`{6vcs}bjxxX}B6uCEL^rO^^yxwA z3{R12+ppxn_kVT6Pd|GB@X>vm=%#$CJeuMy9#iC9P_?J-0}Z&(Umtvh#1~8ncL9r= zzO%|9WeC?&sIcO>Q!nx0?G6CAEK48E&jV?Ch!#H72w}w&lz-DCoP_X*?$bfY^oK-1 zk%xd(jtHL&A~=*{@mrmtO*e!Hkye9<&|uT>(FSmbbvt=hoI*+=xJD`oLI&BNf$&49EGTZFqBwiKBt|8ek4Kg61QIS}&F@ZVfM#1ES2SRR-gHcqf~yyiBj!vKGV=elr5iZc|QT!YyEy1~e0U{#XRpo6>l;fW?-I&!8(g=vz;c z`?c2at;NgEyDYidOtV`B{$s>nBqXz|bo!h#O8m=trX}wp@JPI{n!f$D#@8DZ#wIWtH&;37JJC_h>Kw#*k3v=uu%7x6c2O9C2NldRajbN4{gt~~9?v##7_ z>l~Q_8{V|gKpLt|4V@Kd{w@4-A9(!PWgn))@lv$$T*^~4-4Zwg@p6kFFCS^Z8x78E zr59~0&)pjDWz(#|MVb;R+u6P>?caS=pAJv41bDU)#-?dQAI3vaOCN?1+thnWO<~aR znLq-hjeU4i4kOC6xmWjn*jPz^_4DW7Xu$0O{24d+Vay1%_+bbIvqys};0qBLx22O5 zXzDGz9H-P@2;Doj94>j7c>8bzOqV|v0bl4ftc8+fb7rS&2{jmj!;%n^aXK&fPv*WV|0w68C6{t;*;D%rfYcFw ze%_41@MWvqA^-wL93yQ{ zbqA?r=m26dZyi(F6zyqPpaA~B4A!8Tln?@o5ShaxCg7E|0Ms<`+aN1~15`@z53S-2 zgAE2lKWEWf-9mp2NCBAx+qn571%zr#!x@7Vlo!dJK>#u`%a3vc zffZ6fC~A2rkOC<$ZXf^|*^U<*q<~P=@#2RR*o;7z{mHQ{Geio={Hh}blt__j&4qnLpu@*=`bW>7?**eAS}CAFPl9 zOp|lbl1rU;qySG6Y>6jI@T z9(?II_#+FXfLu}@=kI12*V$3iX@?bX@hMenu^*tD?FX=3)aIWR_7d1&1@so{VmEIoWHVFm3>5i#CVV8jZj5Q<6KD!^)20eugfb?F&n1!N9vWcSAk2vtq} z(5eS3AS1K<_+bTvqL!ChumUo&9WORm0imelg=?^4;RONXt^%78Fjhb}_q){KWSCSm zbaMbNCqu!=Zd9<^RX~Q+$<7`tu%Tv!6<87S?ajy_D#XDGD?$CFLP$K(rG3oV68&&lgea`Af?Oq4u>$;<87s)imo#1& zrH2IMPN&hZ#Q(ttE6BMJJkW?0;Kzcns$mLju!1A`9(#MN0Jo^-eFY@UWLT!I-{=2U z4M(urS8&8(UjdPe@$Cw7>tSjGg+^zA6&$IA6;L4*lMIR#(DTSxm!2_JK<2=pH7l&ZijZ$_Mh5Z13J4LM0PV2?8)}>>o3R2LlHOPW86*f+K!|BXYKawC zP_n}ctOy0OM^h(dv#)^6Slx&iMoFu&5U>E52lg*gJ!IN zd{SeBZp;6Y4x=c5V^({wwb@eKs>;mZ38$b%*a55$WNR*)aX%a)mSVg-0OzG~4m zT{c(&rm1#O4Skt=x^&7pTP5do!U}SgRK*JLV`i)%CtuQdVRRl6m>E`(b5Rv5z>fuC zRby6GSi#ZCSOIQP%~$~mGv^;GIO>2E5V@GK0>%akjm`oqI9drSph74nX^j;i(CJ?F zOJw)qD;}qWzj7RM@gLnR;pM5qgXKMT) zELS=Z0daN9thhp_#GMGHE3O9<;e^0u&zBtfsy$*i3lOJmh7oZAEc};7K(nXlLO@SK zuPy|n#`bmo!A0>Z1bi)|P$}Y2lgS4ZZ;CybjGCT);4OamTmxPoDhyy0BA}(AR~!OT zLv}ku!G2ZSeoFE^*Y=e94+%IBRf~iY{KDUSKGlGu)yzj|hBPg@L`F$4TP4XL3Xlxs zt&+*skUn&7)f?@t-_eRJE@z(DBUBhd99kW+msp3vwbI7iVOlWAw&fY8xqJQ zJ#^XD0y8xU-dsg0iJv`afkR&*%RO2n%k-ST!xd~z1RSdft_WSeZ0Z>LrhWbr&R;NY z^T17aVryuDk70;X%PHQ^$lkF4l&!C|RA1>V2HT*UzhqI_ObUJbAtDj{x^RLml0tvj zI@9Y8I}?22NvAg+kH)R?^(W;UQId)ROSp?A@C^^PXE{Vv2JrTXKayL?1Ih!vF}jqC z+>@1F_0f8&*FSZ7N)O~MpARrM_qJR4M4)O=-X$E;=rj?{-4O>%7kZ!@i&oQxq66eU zuEq6*MRag;JO}TL;7-BI~h(mzmvX$nu`}%T+txYSo z=&#~J8w5emr8qR8L*VT+l-_!kUVW9=eHg;J^8CHxWmO1R1`bV=hMUK_!~s9qga-m}MN=jzc)Wmwy0}fIc8xTf z&-^u5v+zj+OvHDJn#usaBdoaD2pQu_0j$HJ z6VW$LR;U#GhX`I{W!l+bos|o>=trE`fp!Q8M0`}h15@O%l8}mos4CiZ$^mN}{Vudw zV~V!-UeGw;M{O-E=O*5rf0J*W~TK&E19L*$;`c#={6@y#7H)|=cBe~6f z=BKc3ld4bYQzc!aDVKg<6EWyGTpYsomlyQgFiE zP-Y=ov%a?AZ@fkcJnU$xD=>VDuADj__uPfIt$U4d5~5Tibm6J=?+&$Wd<%HTGw@^}(91^1jRAx>k2=8vd zV<9Cq-PuOF{7#KWJKLSD-gds%DLEzqTHQ?nt0Q@^2BMu6_~oW-HOPIoSR|XH;dDG2 zq(bQp(FJFl=ic4?FL zRq%?{OX9tYe?$I@gda)rYo^IB;FR(lA4WP*qO9-GF<-nH%b5 zF>)MU%t_r|p^9F1`&Z$Y1KSqfLxiw|?3*O4l7hRsQh3w=_rbz(%u~8I?Vm>>&FZtbQ*+VI_A8WuP;q)rD!1+$R2mJeN+BJ~uemp!)1Z=#g2^*osQErr( zHaTe3hpqI{fRYgvs)8V3&02$~%7(2~@uD;f$; zQk^^okHl}PM<*IEJzZp=lMn@2uj3=GgB{FU~|{<2O4mHNFzMnAN0^=kLdps|Iom&ON_=pGcZK4 zi=Z{WtKM#lcll%t8kz)U>nsF<tqFb4tHKZ&si$mT$9^@<a=F)LzuTmTe2E{fue`tq}M`KM0)R$5%DNhju3Z>S92GjXaF4LC|ANOy{@tL1@^iQ7b zjnS6So}|O%-gX)GrcaH==pez((RQ!JopYR@idsmbN@w~0D8MGOQgy;`gUl`mOJs+T z>TogIDhT|*-)q8|x-=lo@2V!WUONcTQ|J{#EXJ)teP;vwSL75E#Aj!cSEx7r$Dw|# zijMM^GuGG_YSjCKKHq>x>oUkQq;vNdf>zjx|7wm|$hJ*CvCIhmU5%op_q|k%w6f!r zlbIMP4T-jbgwHhU%T~xV)0PdXJ*m_D>m$xxHft@?Kd;dwDLhqMYRQzA%r<{|gI8Y< zu@zns{_@JM=#*=-T+N6N3dsJ64mR5%d{h@JL3Git50K&H# z;J*z~Z{I8yRGauQGUUHMU%`q2C$f&kP-fZgywxSUOLgfr$I}j`O zx7Hlr%F!GCtZAQRZCO$QR}82>a|27d*0Qa}W^szd^CV|{stIqY$6Lzg<(4aK6~MaA zEY<6iBOG|s1wr_1Lw5gDy4?|)TthzB6sE5V4^CJ8akun! z(fz1%wkdTDy@(*(@W9 zZ-yu0$Ldoj+OuW2Ya9Mqh4+(q#?cX3=#p-HV=Yc{$GgMRSzonCzOxSZvkD7^5-c&f zHTa7P2M@|6#8m=J0mt{B)^GaA2T4WFtD)eva-+Eh|Jb?pCU(JbEjmNqT0Z2MYNTT@ zl1<*|Nw_S{bpM{@S{<8ABAu%I#C-cHr^&(E!Tjtcu|l&>1D3K2R=0 zX(uuZXHE{WHIiJ{fiH2FwRrGvD;!TNpJiE)RhqgZt<-^&S%|zaRIW?2Y3wATn}c^% zII6@fIz=AzO;=^@R5;kxJnNkN3Vg7_9(FU?`ct4pF7{j4Z5fbtY~eFtnHBpCh>l{l zKFZ3L>vGG@B|ym)SXDA^nOoGN%|>-T$K#rXM_kF!n61Ijc;-GFa`}!RuF#m}A=`S} zshM?^yB86AWsJy&;GJ$JFF7oTigfW>@j0ex2Ov=G5GLnoJ`;6q0YrmdMAS^m%5gbGJUBbW&N1DaA zHSeD6NUNON!}MeVLn>!B_hd&}&xf8&Cdev1Svx>aW+`%hjcp@av7~!)tG8+1JsB@L z(dBY%(yu4uhrlN9*^|jgqW6k@dorG^%4@=ZCah<;{0S$h^enM?x( zujgT%OyF8{fgtN-!Wc}Y=h>6Vr0Lz03A0sMXsCoFZA`tYqqt4z zo=nb6?L8R|x$Zq#9D`kFvsGDWsDvcr!Luima=>IH8rDK58W}Ivo(#S{nUw81dop1_)~@|} zGJ&w@ltsV@-IECeGaS|0ldaU&lL-u|oY~xyt#CaV^&qY}tJZ+=O-a_+!G}EeF3JSS z^klCz?9dF*ky#47FyWE}U0O5mj*J(a=x{+gGJc349hr<|fR2nOtMZv}2{3p^CSzbW z`LE21eg5<9$Yfj_(x2BV89!d2j*KTNI${wvLU&~RxP%#FNA{ByuEmk;8q5N&vLCYx z*IwNi&$i0dt!>-WMRZ(6D_!hwOcM2A`xM~nVbR3DGSqw&Mu`rz!vw&(h|XXYtaDs*R} z^>KmDuFGGEE+Z3oS7KAvZN10uwF61h%Jf=Sve|F6G3kxZcBlQ((EbJ(nOHbeQTbV! z*W%0${%@u?#hT~{kTv+x2E2*orfM!{O-+&5{4x+b0RG+&Sd?lO{&my!7&2Vau(MRD zE>uoq$I2!0uvboF35q%_%BIkniX9byZ<{`w@JoL0?{PMuQWuxed|uz>M$5TgNoJw4 zz!;Yp58thPNt|z--UnP zw+@R@Jhwc5^5n@|mv5o@`6#sG!=?Pp2UhL^lZ}3NVt3KPuSf85?UZ9U(H-}@8-w17 zwiU@VG*3=<$Gxr2uzRK#HxNUw26nE;#DSd^ z`)!j+|I{#Ez+V$R$9z^V6XSGP1PxyvFNgie3i{}H$zCVt4egOpUDAVuyo1hiK}PXp ze|Re0P6NLh!ID;$t#J>Xpjlj5q!&Xt&J;X=+Fjb!+~uiffB3w+OFrf2wYw0p+qp~r zX_dPWV$VN!Av3pgm;BT9yUUlQS1+?3&u0W&uH_xbZCl+lt-Epd=|6)q5&AcpkPP&4 zl7TK?k5L9%MD=o((U#}Yo#}JSH_=SgfFC<4hX+^iG~O6{za)p~&i zjX#axR+gj6dPUt2TjP5BwX6X4Z~!szz;_yOrzgW=;P4fo?CM%}iVjvLap52?;}lQe ze?^|58aa{>K73_v>PACI3w}QW$vxyI)u!!dOJ8wtiBfln*-J=ia_RbhRGQ4EP+ne^ z+(65cLvT&}YOAs&of>>4f}2?$>_P_RiVGK#D#LYN%c85JU*v7>!YXFmR&4O)hD)}i zEQ$fbM_fr*3tE(qEqThi4KHF!7Q za%ju~{@AJAVEEz!fvs!G$^%y;G)D~h6Q?GFVNwhQ4rMPx&85j;9xqp;{`MUtpujhs z+6$I7p+4}Fa{#gETnFuoku0yW;ZRcYp+KNTBU%~k+38L?Q*==EXiORn#RlC*ARq}{#(A8)(b1piVxY;3v|lxg-CYOf-lF}0s!H&ybge7?Kf>c7^;BS zA8$IeiBvSIN^kzo{$LD$IF*l5x>98yOVM*ERQxXli)^x@6@e zEJeaHS!=lp26Sy_bp%piQb=3AQrEP#g46kk5D*$DIoh~V*g$e90qUw0R%2#jL%cRU z!WSdi)=qz(FN`Gi6HPeA@?zE0^R_J0XpGa?EQ}?2xQ6++a4Ms_{=%jzlefbEh~PGF zuDE1&!R4+}69(UH!0Wx47Sox(7?V2uqi8gI;&Hf?_Pxjf9WR9eA*uPx-sC7@#0by1 zKsAVa+J}}?8tG2B@=((oE^9Cyhh>m_a_}`5u!iM~3nHb8*coY+!8smme!~T>Vc8VJ zMs*m0rRD(Fu#cCmsjzc*At{1|&L$@CP!(77!8~Cuc|R-stRrMS_igErtFCAlE)SJCvLn*fDDN3l5yF_ zU%62j+^}6ThJv@eDRx)_Fc22a#0A?m4N*%&Oa?X8`QV1_67rcoxJvedX@|)a%k8im zwoB+?6BKqjIs{RUo=ei=2uPBaJmFMFWs3ms(ovIHI%2!zaV&NJ)Wy*=IY%JQ-LPG5 zrm|XBj7=o8Ul1wG4cj&I%wf9>r^+ZC7=doTxlQK0C8?Fu9$DUGfg zcnLOCVgxsAS7cL2YW}hhleSFB(GlBi+hDtaAzM5q-=F9@PB(0q;fxC+fxw%biGbjZ z8@9`^DTWOP#F3%qg6%SoOTtANWi4F81wYGGAdeq9gL1=mS>}Ww;V0=JQqb}1hV3%^ zN~@#%_4M-`9Ne&7jyEnma5aWTsG$~iaw9gz5l3v7V^je4KW$iSfRUNyM%nE4|S@VQ9Dd#W7!V7VY_4y zY=ZvSE=SKLX>kN3Nz1DdCd3u{7AzDizRT{mZL`+hV2sa z8RxQESBy;r+pS^WE}07(;B~iV!LDvC3o)Fu9388 zEH2otc@Uh+Ou>N$rZD?@$+R5z?UKiHtvuh7ncKcyfoZV^bHEn`iZ0l$K;ptrX>QoA z$fl6g{AE9*wC1*N_sDs`cJY{eXxE@9j{9~Q&bT0AL{Y3K77E_MGX*znmtj*38xDvg zL(K)-Wggds3m5bOlUCHf8taijKj4P#vdjrV!cWpgfbbBEMg)%gb{T$U^)9Jzt-;X1 zATXXDxM8~-Z(MlrP(iJc#Sz=(7!?DA1J=mVb8D0J*e$FnA-oauOMmkW>z95&>ZCo(z5eY@mw1#j-yu7Pl=C3e7e zO+(az?UF%_z>^!cOUP&X;8K+fN=B&JVY7TGYyuL>yC%R5+a;64Cg_jta`ar17Du4$ z??qhpQi(imSx7w$w#zdqRJabQi=!7vL#l-#*vyGGKYvAAHn=0VPHDs$VnD{#dnvr_XU+d9jej3F4v z;2EFz?6_}NU|Q_K9PmYfq6@Ywkht(unj5w&vMD4rf7zRAzA$Oeao_II^MLK*F?m1L zK~rw~b{WpNAi}j;nqg(3!3A%~xh=&m@hAvKY?onE3>yxJBSXyv+hrb?gp1Joc5}(f zOJa>?9kE@OIUz{+NxBFC9`>c<*KOY}!>_FNBlT@y$TnB1t(Q7tyBu#^c<@kx(Z#Jp zo+S1-V!IrpVt{aJ*cfQmtxaYKT*yJf?Mwz<-a?r0m3MvncJZ{+z<8p{iDKMsaN0w6 zNwnbnT3?(;{9z3EzM%AQ#wb6Se~lirdp#VD+lGU7C3A(ps(9{(ghm;1D}@0`mlE-z zz$Az=YPRgp{5zWPLqU`+fNzyfGEqr1smTr{OT9(<^_Hn(>Di!OczXlx4x$Mm-UQKv zp?!%0iZ6X{Kyr63D;oa1ZXi#C0{!ob=Lo7f=Bk)F0*2LcK()Y@EDgoM zl_$d`7gcN{S`9IyT=<9)4SvL@mtt5JtC%2i^ysD7sg+(&Ra7uKtJ6WizRvZwAd$UX zxdhQcQMY)qsS8xlm)%gGy-hvnb;m<=g?nd=E`BEkNcm+fc=&*m~J6!gb>(*`C+k+!uN;2cNe^Yj5+t!ws-8x@CDYII2m9Yk&+y(c0Su837$ZCenYOiLAORSbY?hrNUv)RHz z;;{Xsm^;c8FQ~w~#mXIrA!`dpat#oAUG|0%eN+s-*)?SFYreGu;#(blO|?V*utN-` z!y+4W^MV&<5bpVzaELJMP(I|^lv;4Tfw(wT($6ZX2JnI~YIRY00aj%s%Go7|= zAf~VRxkOzee5uXq7i8BY)pvkEOpbJu8L}}D~teDW0u96^H6cR469IFq$z6%~KV=;EC8r&u*9Z-CT9f1dmB{Y&d z*pFA*ZAK=Mn)IC^sJx&oyI+=w4Q|Y-$PzrJWrqJ=a+#y(V{#;M7Wl$0kX*>_VCZTX z`5IQf8Z%ERBmp*Sc{mDY0=dZx%D~^kD!_hWgfnnkM1xwm??x7xeXA3J)V6VnNyv8Pidq;f+F|6-4FXCR8nofU0V=5~7NqH;UA z7?t~Ir`XxE|uYafJygkk3YKj#jxvacd*loFInHaxI3O(-*slR)f=#{ zM(kD<+3*MRa4;iz4ADK&ZcTf`$!OdecF*)C|3t_PpwyjY}a4O22>vUD%$CF(_%*6ff_%TH^1w~RX)3g=GLYE_%b*UzbHFMDf=KkKvq>ke|;Wantkj6^g7XK ztcc%Bm!N&0XOY{j-hTgstA{T}>CraX3H$%=C~Y56e;oB{f}trwx@c_HwL_Iu0H2Fs zKR=&%5A^}g;dK6yn0OMIxdZlhMmy8(ooOdye-gLh@M_~=ROO#GpnN6DQbkgcnELk#&QZ)9GOm~a$|EnfOROEyvgWYWBp*m&w`$Gd z*KYq%VDP9lgQT<)b$7IIMF(zjstRs2s*;Em{#an{I?9}VJt(qwon)YbW0&o5BtI*7 zR%;NyyC-V!ddj59L`f+tvUt6gMHi*!?-Nw_KT(4>Q09ypr1iJR-VIvzTr^0A3Z%gs zDU(JG=B|#P+as*CJ6tPlxig1 zAEsOyRhai`2gF2wSUX)89paG`veW4h9gUC6Ey7;0Xkk<$X)%)$gDZ-e%vy_NhVc0a zu%(T0r`hQa2Kkyr-qK?Jll{2CCDKS<#Q%I){Kbm!L|gdzi171K;pgjwpRX5wzCrl; zM&ak1;N6$Oe1F)V=Ik8dmRNWcMzWh|GU=ZhCfC_MM3#N=W3SOXR)ClJkyOdG#!vIq zji1j83PS&|i9Y&#)e*=(9L%qI1d=YuoXkAn=u19t9)ScV@`QN=92>~z%_9(}Ay0fi zCs({xfZ0`I`WV@fkvrHAN~ozJaEZmDt(RoTHw2)Cj3S?z2Q^|=%-rsEpBqE1nBDD_ z=&|0%OzL$$M2fk%kYSi20>8^)HVhfB`JjjKvLPcKhxxE%iut$^Vpt;*y1TlB&@322 z#he^;mNyZ2H8yt=7uHA+MWQa|<3@;K%|}K>nC9l>A|n#NJ?)4g<0qpcKQJ?Lu_KO9 zF((%pkqpGl$VL4OnPNU}gaX-7kt$g`T52r5bm>*Fn5D|jXgf~eplajH(s%BKJ!lVJ zzq>I&OEh1-H~z}K@VgP1itpUXR}aS;)DdR^!Yi&}Y(JhJr?+~zeY-k0bJW?hJ;!LF zbabY(Gwh!njnAwezA=JlBQSMKSv*GkXrN*?t6^^JKAiJ%qluR+Be0x2gU+~{WDGnP z!Ls3TWRqCbhzZjve)O{>irGJqGxIvz*>J~)OH}jQW@AIMO+{m2Z22b%S0-a4AJ2JO zHS^@DA`-my^; z`K+~y=mWMYq7SePIjRWfcpDXwYQR!O`NwTllz*J%sG5pM+Ots+`K+~y=mWMYq7SeP zIjRVk7&a;*#fYVf@{il9DE~OiQK6!5MxcpBT+2)o(ouD2l{`R{Rq?5uyC1AJ7R#hW zZDJpuMnOpkm@KRxm(aeDtx12`REhSb{UD%SKR zPc;2I*pF8xKpe%BO3wZp{pKPuG5Ey>R6hZhM-Y&Z?(~`%s`7F#^QMyvO7cEBJ44iDR5WgmMn%(EadR|=0I?tk zM`NkDI~q$xW<5@hhAfeE=h%X(I|xsLh{>Cdv!e;~k%{Vk4d7q}D@Wt$W)}7O%4RMS zA>s-^DHoXJNEmjOlE_q=lT2hz4u~jIt95cfmH-h|bqD0AI6ELmMJ6Iw2c#m(Y|6=T z7!pKG-gJB%keyFBQBkxaubdoMgZZ0wrxy34GZmzM4BS9?6$&uL@ z#!801=f#tYWF;VseO8EESoE++r0&)iqXeB#4+CViixv#{uh!RTRH=k5wEA4^i=S z+(pIHky({ntm5dp#wvjb5tl=(5-GXIDv^>*WNxvFtW!A0Duw_Nl|!szskp}~mWoV7 zZn27?>Kdy!5=2Z6v5KeTn#3CFfFh@m)2rUe;Tyk#;GBcT_ z{Z<1i1q>D2nZex?Vwwmb$=jY#ZC5@>cZF@CTS;-+T=Zr$xXN$My>d?85XqB{{_ zRx+`9IWJ{6vpsizW9Sgkg(2qcyzI;*;#*pUg~4btLHq3v!?p05vXaj9y2H-otvlUu zZ>!TAk4NKHz9hQz_b-RbvKW_c*h^2(%XjX5`?C1|z3k%7J$ibT1nJJvXuM Date: Mon, 4 May 2020 12:50:36 -0700 Subject: [PATCH 040/335] minor cleanup --- flow/controllers/imitation_learning/imitating_network.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 0ea5c32c8..3b1e826da 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -127,10 +127,6 @@ def define_forward_pass(self): # build forward pass and get the tensor for output of last layer network_output = build_neural_net(self.obs_placeholder, output_size=output_size, scope='network_scope', n_layers=self.num_layers, size=self.size) - # unpack array of array into just array - # if self.stochastic: - # # network_output = network_output[0] - # parse the mean and covariance from output if stochastic, and set up distribution if self.stochastic: # determine means and (diagonal entries of ) covariance matrices (could be many in the case of batch) for action distribution From d15f19b0d5ca4529374d62553f4dd677c39f5008 Mon Sep 17 00:00:00 2001 From: Kanaad Parvate Date: Tue, 5 May 2020 11:14:57 -0700 Subject: [PATCH 041/335] fix train multiagent_i210 (#915) --- flow/envs/multiagent/i210.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index f931b3bec..09705ccf5 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -19,7 +19,8 @@ # whether we use an obs space that contains adjacent lane info or just the lead obs "lead_obs": True, # whether the reward should come from local vehicles instead of global rewards - "local_reward": True + "local_reward": True, + "target_velocity": 25 } @@ -64,6 +65,7 @@ def __init__(self, env_params, sim_params, network, simulator='traci'): super().__init__(env_params, sim_params, network, simulator) self.lead_obs = env_params.additional_params.get("lead_obs") self.max_lanes = MAX_LANES + self.leader = [] @property def observation_space(self): From ba2e214c6d263c2e8b107f8edde1666f3cf282d9 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Fri, 8 May 2020 15:43:03 -0700 Subject: [PATCH 042/335] Add option to reroute exiting vehicles back into the network (#918) Add option to reroute exiting vehicles back into the network --- .../rl/multiagent/multiagent_i210.py | 14 +- .../rl/multiagent/multiagent_straight_road.py | 12 +- .../singleagent/singleagent_straight_road.py | 164 +++++++++++++ flow/envs/__init__.py | 2 + flow/envs/base.py | 13 + flow/envs/multiagent/base.py | 9 + flow/envs/multiagent/i210.py | 105 +++++--- flow/envs/straightroad_env.py | 231 ++++++++++++++++++ flow/utils/rllib.py | 2 +- flow/visualize/visualizer_rllib.py | 4 +- 10 files changed, 510 insertions(+), 46 deletions(-) create mode 100644 examples/exp_configs/rl/singleagent/singleagent_straight_road.py create mode 100644 flow/envs/straightroad_env.py diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 327282e28..b74f64027 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -26,7 +26,7 @@ # SET UP PARAMETERS FOR THE SIMULATION # number of steps per rollout -HORIZON = 4000 +HORIZON = 2000 VEH_PER_HOUR_BASE_119257914 = 10800 VEH_PER_HOUR_BASE_27414345 = 321 @@ -43,7 +43,10 @@ # configure the observation space. Look at the I210MultiEnv class for more info. 'lead_obs': True, # whether to add in a reward for the speed of nearby vehicles - "local_reward": True + "local_reward": True, + # whether to reroute vehicles once they have exited + "reroute_on_exit": True, + 'target_velocity': 18, }) # CREATE VEHICLE TYPES AND INFLOWS @@ -114,6 +117,10 @@ config.PROJECT_PATH, "examples/exp_configs/templates/sumo/test2.net.xml") +warmup_steps = 0 +if additional_env_params['reroute_on_exit']: + warmup_steps = 400 + flow_params = dict( # name of the experiment exp_tag='I_210_subnetwork', @@ -140,8 +147,9 @@ env=EnvParams( horizon=HORIZON, sims_per_step=1, - warmup_steps=0, + warmup_steps=warmup_steps, additional_params=additional_env_params, + done_at_exit=False ), # network-related parameters (see flow.core.params.NetParams and the diff --git a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py index 9ed38656f..a15471539 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py +++ b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py @@ -48,7 +48,9 @@ 'max_decel': 4.5, 'target_velocity': 18, 'local_reward': True, - 'lead_obs': True + 'lead_obs': True, + # whether to reroute vehicles once they have exited + "reroute_on_exit": True }) @@ -69,6 +71,7 @@ # autonomous vehicles vehicles.add( + color='red', veh_id='rl', acceleration_controller=(RLController, {})) @@ -92,6 +95,9 @@ name="rl_highway_inflow") # SET UP FLOW PARAMETERS +warmup_steps = 0 +if additional_env_params['reroute_on_exit']: + warmup_steps = 400 flow_params = dict( # name of the experiment @@ -109,7 +115,7 @@ # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( horizon=HORIZON, - warmup_steps=0, + warmup_steps=warmup_steps, sims_per_step=1, # do not put more than one additional_params=additional_env_params, ), @@ -119,7 +125,7 @@ sim_step=0.5, render=False, use_ballistic=True, - restart_instance=False + restart_instance=True ), # network-related parameters (see flow.core.params.NetParams and the diff --git a/examples/exp_configs/rl/singleagent/singleagent_straight_road.py b/examples/exp_configs/rl/singleagent/singleagent_straight_road.py new file mode 100644 index 000000000..265d34d42 --- /dev/null +++ b/examples/exp_configs/rl/singleagent/singleagent_straight_road.py @@ -0,0 +1,164 @@ +"""Multi-agent highway with ramps example. + +Trains a non-constant number of agents, all sharing the same policy, on the +highway with ramps network. +""" +from flow.controllers import RLController, IDMController +from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ + VehicleParams, SumoParams, SumoLaneChangeParams +from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS +from flow.networks import HighwayNetwork +from flow.envs import SingleStraightRoad +from flow.networks.highway import ADDITIONAL_NET_PARAMS +from flow.utils.registry import make_create_env +from ray.tune.registry import register_env + + +# SET UP PARAMETERS FOR THE SIMULATION + +# number of steps per rollout +HORIZON = 2000 + +# inflow rate on the highway in vehicles per hour +HIGHWAY_INFLOW_RATE = 10800 / 5 +# percentage of autonomous vehicles compared to human vehicles on highway +PENETRATION_RATE = 10 + + +# SET UP PARAMETERS FOR THE NETWORK + +additional_net_params = ADDITIONAL_NET_PARAMS.copy() +additional_net_params.update({ + # length of the highway + "length": 2000, + # number of lanes + "lanes": 1, + # speed limit for all edges + "speed_limit": 30, + # number of edges to divide the highway into + "num_edges": 2 +}) + + +# SET UP PARAMETERS FOR THE ENVIRONMENT + +additional_env_params = ADDITIONAL_ENV_PARAMS.copy() +additional_env_params.update({ + 'max_accel': 2.6, + 'max_decel': 4.5, + 'target_velocity': 18.0, + 'local_reward': True, + 'lead_obs': True, + "terminate_on_wave": False, + # the environment is not allowed to terminate below this horizon length + 'wave_termination_horizon': 1000, + # the speed below which we consider a wave to have occured + 'wave_termination_speed': 10.0, + # whether the vehicle continues to acquire reward after it exits the system. This causes it to have incentive + # to leave the network in a good state after it leaves + 'reward_after_exit': True +}) + + +# CREATE VEHICLE TYPES AND INFLOWS + +vehicles = VehicleParams() +inflows = InFlows() + +# human vehicles +vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), +) + +# autonomous vehicles +vehicles.add( + veh_id='rl', + acceleration_controller=(RLController, {})) + +# add human vehicles on the highway +inflows.add( + veh_type="human", + edge="highway_0", + vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (1 - PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23.0", + name="idm_highway_inflow") + +# add autonomous vehicles on the highway +# they will stay on the highway, i.e. they won't exit through the off-ramps +inflows.add( + veh_type="rl", + edge="highway_0", + vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23.0", + name="rl_highway_inflow") + +# SET UP FLOW PARAMETERS +done_at_exit = True +if additional_env_params['reward_after_exit']: + done_at_exit = False + +flow_params = dict( + # name of the experiment + exp_tag='singleagent_highway', + + # name of the flow environment the experiment is running on + env_name=SingleStraightRoad, + + # name of the network class the experiment is running on + network=HighwayNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=0, + sims_per_step=1, # do not put more than one + done_at_exit=done_at_exit, + additional_params=additional_env_params, + ), + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.5, + render=False, + use_ballistic=True, + restart_instance=True + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflows, + additional_params=additional_net_params + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) + + +# SET UP RLLIB MULTI-AGENT FEATURES + +create_env, env_name = make_create_env(params=flow_params, version=0) + +# register as rllib env +register_env(env_name, create_env) + +# multiagent configuration +test_env = create_env() +obs_space = test_env.observation_space +act_space = test_env.action_space diff --git a/flow/envs/__init__.py b/flow/envs/__init__.py index 5befe6a33..6f4351cc0 100755 --- a/flow/envs/__init__.py +++ b/flow/envs/__init__.py @@ -11,6 +11,7 @@ from flow.envs.ring.wave_attenuation import WaveAttenuationEnv, \ WaveAttenuationPOEnv from flow.envs.merge import MergePOEnv +from flow.envs.straightroad_env import SingleStraightRoad from flow.envs.test import TestEnv # deprecated classes whose names have changed @@ -36,6 +37,7 @@ 'BottleneckDesiredVelocityEnv', 'TestEnv', 'BayBridgeEnv', + 'SingleStraightRoad', # deprecated classes 'BottleNeckAccelEnv', 'DesiredVelocityEnv', diff --git a/flow/envs/base.py b/flow/envs/base.py index 1abb8a3c9..adc959b9a 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -148,6 +148,10 @@ def __init__(self, self.state = None self.obs_var_labels = [] + # track IDs that have ever been observed in the system + self.observed_ids = set() + self.observed_rl_ids = set() + # simulation step size self.sim_step = sim_params.sim_step @@ -322,6 +326,11 @@ def step(self, rl_actions): contains other diagnostic information from the previous action """ for _ in range(self.env_params.sims_per_step): + # This tracks vehicles that have appeared during warmup steps + if self.time_counter <= self.env_params.sims_per_step * self.env_params.warmup_steps: + self.observed_ids.update(self.k.vehicle.get_ids()) + self.observed_rl_ids.update(self.k.vehicle.get_rl_ids()) + self.time_counter += 1 self.step_counter += 1 @@ -430,6 +439,10 @@ def reset(self): # reset the time counter self.time_counter = 0 + # reset the observed ids + self.observed_ids = set() + self.observed_rl_ids = set() + # Now that we've passed the possibly fake init steps some rl libraries # do, we can feel free to actually render things if self.should_render: diff --git a/flow/envs/multiagent/base.py b/flow/envs/multiagent/base.py index dfc7c72ad..126107b00 100644 --- a/flow/envs/multiagent/base.py +++ b/flow/envs/multiagent/base.py @@ -49,6 +49,10 @@ def step(self, rl_actions): contains other diagnostic information from the previous action """ for _ in range(self.env_params.sims_per_step): + if self.time_counter <= self.env_params.sims_per_step * self.env_params.warmup_steps: + self.observed_ids.update(self.k.vehicle.get_ids()) + self.observed_rl_ids.update(self.k.vehicle.get_rl_ids()) + self.time_counter += 1 self.step_counter += 1 @@ -103,6 +107,7 @@ def step(self, rl_actions): # stop collecting new simulation steps if there is a collision if crash: + print('A CRASH! A CRASH!!!!!! AAAAAAAAAH!!!!!') break states = self.get_state() @@ -149,6 +154,10 @@ def reset(self, new_inflow_rate=None): # reset the time counter self.time_counter = 0 + # reset the observed ids + self.observed_ids = set() + self.observed_rl_ids = set() + # Now that we've passed the possibly fake init steps some rl libraries # do, we can feel free to actually render things if self.should_render: diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index 09705ccf5..a6e39cdec 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -3,7 +3,6 @@ from gym.spaces import Box import numpy as np -from flow.core.rewards import average_velocity from flow.envs.multiagent.base import MultiEnv # largest number of lanes on any given edge in the network @@ -64,7 +63,11 @@ class I210MultiEnv(MultiEnv): def __init__(self, env_params, sim_params, network, simulator='traci'): super().__init__(env_params, sim_params, network, simulator) self.lead_obs = env_params.additional_params.get("lead_obs") + self.reroute_on_exit = env_params.additional_params.get("reroute_on_exit") self.max_lanes = MAX_LANES + self.num_enter_lanes = 5 + self.entrance_edge = "119257914" + self.exit_edge = "119257908#3" self.leader = [] @property @@ -132,8 +135,8 @@ def get_state(self): else: lead_speed = self.k.vehicle.get_speed(lead_id) headway = self.k.vehicle.get_headway(rl_id) - self.leader.append(lead_id) - veh_info.update({rl_id: np.array([speed / SPEED_SCALE, headway /HEADWAY_SCALE, lead_speed / SPEED_SCALE])}) + veh_info.update({rl_id: np.array([speed / SPEED_SCALE, headway / HEADWAY_SCALE, + lead_speed / SPEED_SCALE])}) else: veh_info = {rl_id: np.concatenate((self.state_util(rl_id), self.veh_statistics(rl_id))) @@ -158,46 +161,25 @@ def compute_reward(self, rl_actions, **kwargs): if self.k.vehicle.get_speed(rl_id) >= 0: speeds.append(self.k.vehicle.get_speed(rl_id)) if len(speeds) > 0: - # rescale so the q function can estimate it quickly - rewards[rl_id] = np.mean([(des_speed - np.abs(speed - des_speed))**2 - for speed in speeds]) / (des_speed**2) + # rescale so the critic can estimate it quickly + rewards[rl_id] = np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 + for speed in speeds]) / (des_speed ** 2) else: - for rl_id in self.k.vehicle.get_rl_ids(): - if self.env_params.evaluate: - # reward is speed of vehicle if we are in evaluation mode - reward = self.k.vehicle.get_speed(rl_id) - elif kwargs['fail']: - # reward is 0 if a collision occurred - reward = 0 - else: - # reward high system-level velocities - cost1 = average_velocity(self, fail=kwargs['fail']) - - # penalize small time headways - cost2 = 0 - t_min = 1 # smallest acceptable time headway - - lead_id = self.k.vehicle.get_leader(rl_id) - if lead_id not in ["", None] \ - and self.k.vehicle.get_speed(rl_id) > 0: - t_headway = max( - self.k.vehicle.get_headway(rl_id) / - self.k.vehicle.get_speed(rl_id), 0) - cost2 += min((t_headway - t_min) / t_min, 0) - - # weights for cost1, cost2, and cost3, respectively - eta1, eta2 = 1.00, 0.10 - - reward = max(eta1 * cost1 + eta2 * cost2, 0) - - rewards[rl_id] = reward + speeds = self.k.vehicle.get_speed(self.k.vehicle.get_ids()) + des_speed = self.env_params.additional_params["target_velocity"] + # rescale so the critic can estimate it quickly + reward = np.nan_to_num(np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 + for speed in speeds]) / (des_speed ** 2)) + rewards = {rl_id: reward for rl_id in self.k.vehicle.get_rl_ids()} return rewards def additional_command(self): """See parent class. - Define which vehicles are observed for visualization purposes. + Define which vehicles are observed for visualization purposes. Additionally, optionally reroute vehicles + back once they have exited. """ + super().additional_command() # specify observed vehicles for rl_id in self.k.vehicle.get_rl_ids(): # leader @@ -205,6 +187,41 @@ def additional_command(self): if lead_id: self.k.vehicle.set_observed(lead_id) + if self.reroute_on_exit and self.time_counter >= self.env_params.sims_per_step * self.env_params.warmup_steps \ + and not self.env_params.evaluate: + veh_ids = self.k.vehicle.get_ids() + edges = self.k.vehicle.get_edge(veh_ids) + for veh_id, edge in zip(veh_ids, edges): + if edge == "": + continue + if edge[0] == ":": # center edge + continue + # on the exit edge, near the end, and is the vehicle furthest along + if edge == self.exit_edge and \ + (self.k.vehicle.get_position(veh_id) > self.k.network.edge_length(self.exit_edge) - 100) \ + and self.k.vehicle.get_leader(veh_id) is None: + type_id = self.k.vehicle.get_type(veh_id) + # remove the vehicle + self.k.vehicle.remove(veh_id) + lane = np.random.randint(low=0, high=self.num_enter_lanes) + # reintroduce it at the start of the network + # TODO(@evinitsky) select the lane and speed a bit more cleanly + # Note, the position is 10 so you are not overlapping with the inflow car that is being removed. + # this allows the vehicle to be immediately inserted. + self.k.vehicle.add( + veh_id=veh_id, + edge=self.entrance_edge, + type_id=str(type_id), + lane=str(lane), + pos="10.0", + speed="23.0") + + departed_ids = self.k.vehicle.get_departed_ids() + if len(departed_ids) > 0: + for veh_id in departed_ids: + if veh_id not in self.observed_ids: + self.k.vehicle.remove(veh_id) + def state_util(self, rl_id): """Return an array of headway, tailway, leader speed, follower speed. @@ -245,13 +262,27 @@ def veh_statistics(self, rl_id): lane = (self.k.vehicle.get_lane(rl_id) + 1) / 10.0 return np.array([speed, lane]) + def step(self, rl_actions): + """See parent class for more details; add option to reroute vehicles.""" + state, reward, done, info = super().step(rl_actions) + # handle the edge case where a vehicle hasn't been put back when the rollout terminates + if self.reroute_on_exit and done['__all__']: + for rl_id in self.observed_rl_ids: + if rl_id not in state.keys(): + done[rl_id] = True + reward[rl_id] = 0 + state[rl_id] = -1 * np.ones(self.observation_space.shape[0]) + return state, reward, done, info + class MultiStraightRoad(I210MultiEnv): """Partially observable multi-agent environment for a straight road. Look at superclass for more information.""" def __init__(self, env_params, sim_params, network, simulator): super().__init__(env_params, sim_params, network, simulator) - self.max_lanes = 1 + self.num_enter_lanes = 1 + self.entrance_edge = self.network.routes['highway_0'][0][0][0] + self.exit_edge = self.network.routes['highway_0'][0][0][-1] def _apply_rl_actions(self, rl_actions): """See class definition.""" diff --git a/flow/envs/straightroad_env.py b/flow/envs/straightroad_env.py new file mode 100644 index 000000000..92fbb855b --- /dev/null +++ b/flow/envs/straightroad_env.py @@ -0,0 +1,231 @@ +"""Environment for training vehicles to reduce congestion in the I210.""" + +from gym.spaces import Box +import numpy as np + +from flow.envs.base import Env + +# largest number of lanes on any given edge in the network +MAX_LANES = 6 +MAX_NUM_VEHS = 8 +SPEED_SCALE = 50 +HEADWAY_SCALE = 1000 + +ADDITIONAL_ENV_PARAMS = { + # maximum acceleration for autonomous vehicles, in m/s^2 + "max_accel": 1, + # maximum deceleration for autonomous vehicles, in m/s^2 + "max_decel": 1, + # whether we use an obs space that contains adjacent lane info or just the lead obs + "lead_obs": True, + # whether the reward should come from local vehicles instead of global rewards + "local_reward": True, + # if the environment terminates once a wave has occurred + "terminate_on_wave": False, + # the environment is not allowed to terminate below this horizon length + 'wave_termination_horizon': 500, + # the speed below which we consider a wave to have occured + 'wave_termination_speed': 10.0 +} + + +class I210SingleEnv(Env): + """Partially observable single-agent environment for the I-210 subnetworks. + + The policy is shared among the agents, so there can be a non-constant + number of RL vehicles throughout the simulation. + Required from env_params: + * max_accel: maximum acceleration for autonomous vehicles, in m/s^2 + * max_decel: maximum deceleration for autonomous vehicles, in m/s^2 + The following states, actions and rewards are considered for one autonomous + vehicle only, as they will be computed in the same way for each of them. + States + The observation consists of the speeds and bumper-to-bumper headways of + the vehicles immediately preceding and following autonomous vehicles in + all of the preceding lanes as well, a binary value indicating which of + these vehicles is autonomous, and the speed of the autonomous vehicle. + Missing vehicles are padded with zeros. + Actions + The action consists of an acceleration, bound according to the + environment parameters, as well as three values that will be converted + into probabilities via softmax to decide of a lane change (left, none + or right). NOTE: lane changing is currently not enabled. It's a TODO. + Rewards + The reward function encourages proximity of the system-level velocity + to a desired velocity specified in the environment parameters, while + slightly penalizing small time headways among autonomous vehicles. + Termination + A rollout is terminated if the time horizon is reached or if two + vehicles collide into one another. + """ + + def __init__(self, env_params, sim_params, network, simulator='traci'): + super().__init__(env_params, sim_params, network, simulator) + self.lead_obs = env_params.additional_params.get("lead_obs") + self.max_lanes = MAX_LANES + self.total_reward = 0.0 + + @property + def observation_space(self): + """See class definition.""" + # speed, speed of leader, headway + if self.lead_obs: + return Box( + low=-float('inf'), + high=float('inf'), + shape=(3 * MAX_NUM_VEHS,), + dtype=np.float32 + ) + # speed, dist to ego vehicle, binary value which is 1 if the vehicle is + # an AV + else: + leading_obs = 3 * self.max_lanes + follow_obs = 3 * self.max_lanes + + # speed and lane + self_obs = 2 + + return Box( + low=-float('inf'), + high=float('inf'), + shape=(leading_obs + follow_obs + self_obs,), + dtype=np.float32 + ) + + @property + def action_space(self): + """See class definition.""" + return Box( + low=-np.abs(self.env_params.additional_params['max_decel']), + high=self.env_params.additional_params['max_accel'], + shape=(1 * MAX_NUM_VEHS,), # (4,), + dtype=np.float32) + + def _apply_rl_actions(self, rl_actions): + """See class definition.""" + # in the warmup steps, rl_actions is None + if rl_actions is not None: + accels = [] + veh_ids = [] + rl_ids = self.get_sorted_rl_ids() + + for i, rl_id in enumerate(self.rl_id_list): + accels.append(rl_actions[i]) + veh_ids.append(rl_id) + + # lane_change_softmax = np.exp(actions[1:4]) + # lane_change_softmax /= np.sum(lane_change_softmax) + # lane_change_action = np.random.choice([-1, 0, 1], + # p=lane_change_softmax) + + self.k.vehicle.apply_acceleration(rl_ids, accels) + + def get_state(self): + """See class definition.""" + rl_ids = self.get_sorted_rl_ids() + self.rl_id_list = rl_ids + veh_info = np.zeros(self.observation_space.shape[0]) + per_vehicle_obs = 3 + for i, rl_id in enumerate(rl_ids): + speed = self.k.vehicle.get_speed(rl_id) + lead_id = self.k.vehicle.get_leader(rl_id) + if lead_id in ["", None]: + # in case leader is not visible + lead_speed = SPEED_SCALE + headway = HEADWAY_SCALE + else: + lead_speed = self.k.vehicle.get_speed(lead_id) + headway = self.k.vehicle.get_headway(rl_id) + veh_info[i * per_vehicle_obs: (i + 1) * per_vehicle_obs] = [speed / SPEED_SCALE, + headway / HEADWAY_SCALE, + lead_speed / SPEED_SCALE] + return veh_info + + def compute_reward(self, rl_actions, **kwargs): + """See class definition.""" + # in the warmup steps + if rl_actions is None: + return {} + + rl_ids = self.get_sorted_rl_ids() + + des_speed = self.env_params.additional_params["target_velocity"] + rewards = np.nan_to_num(np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 + for speed in self.k.vehicle.get_speed(rl_ids)])) / (des_speed ** 2) + return rewards + + def get_sorted_rl_ids(self): + """Return the MAX_NUM_VEHS closest to the exit.""" + rl_ids = self.k.vehicle.get_rl_ids() + rl_ids = sorted(rl_ids, key=lambda veh_id: self.k.vehicle.get_x_by_id(veh_id)) + rl_ids = rl_ids[-MAX_NUM_VEHS:] + return rl_ids + + def additional_command(self): + """Define which vehicles are observed for visualization purposes.""" + # specify observed vehicles + for rl_id in self.k.vehicle.get_rl_ids(): + # leader + lead_id = self.k.vehicle.get_leader(rl_id) + if lead_id: + self.k.vehicle.set_observed(lead_id) + + def state_util(self, rl_id): + """Return an array of headway, tailway, leader speed, follower speed. + + Also return a 1 if leader is rl 0 otherwise, a 1 if follower is rl 0 otherwise. + If there are fewer than MAX_LANES the extra + entries are filled with -1 to disambiguate from zeros. + """ + veh = self.k.vehicle + lane_headways = veh.get_lane_headways(rl_id).copy() + lane_tailways = veh.get_lane_tailways(rl_id).copy() + lane_leader_speed = veh.get_lane_leaders_speed(rl_id).copy() + lane_follower_speed = veh.get_lane_followers_speed(rl_id).copy() + leader_ids = veh.get_lane_leaders(rl_id).copy() + follower_ids = veh.get_lane_followers(rl_id).copy() + rl_ids = self.k.vehicle.get_rl_ids() + is_leader_rl = [1 if l_id in rl_ids else 0 for l_id in leader_ids] + is_follow_rl = [1 if f_id in rl_ids else 0 for f_id in follower_ids] + diff = MAX_LANES - len(is_leader_rl) + if diff > 0: + # the minus 1 disambiguates missing cars from missing lanes + lane_headways += diff * [-1] + lane_tailways += diff * [-1] + lane_leader_speed += diff * [-1] + lane_follower_speed += diff * [-1] + is_leader_rl += diff * [-1] + is_follow_rl += diff * [-1] + lane_headways = np.asarray(lane_headways) / 1000 + lane_tailways = np.asarray(lane_tailways) / 1000 + lane_leader_speed = np.asarray(lane_leader_speed) / 100 + lane_follower_speed = np.asarray(lane_follower_speed) / 100 + return np.concatenate((lane_headways, lane_tailways, lane_leader_speed, + lane_follower_speed, is_leader_rl, + is_follow_rl)) + + def veh_statistics(self, rl_id): + """Return speed, edge information, and x, y about the vehicle itself.""" + speed = self.k.vehicle.get_speed(rl_id) / 100.0 + lane = (self.k.vehicle.get_lane(rl_id) + 1) / 10.0 + return np.array([speed, lane]) + + +class SingleStraightRoad(I210SingleEnv): + """Partially observable multi-agent environment for a straight road. Look at superclass for more information.""" + + def __init__(self, env_params, sim_params, network, simulator): + super().__init__(env_params, sim_params, network, simulator) + self.max_lanes = 1 + + def step(self, rl_actions): + """See parent class.""" + obs, rew, done, info = super().step(rl_actions) + mean_speed = np.nan_to_num(np.mean(self.k.vehicle.get_speed(self.k.vehicle.get_ids()))) + if self.env_params.additional_params['terminate_on_wave'] and \ + mean_speed < self.env_params.additional_params['wave_termination_speed'] \ + and self.time_counter > self.env_params.additional_params['wave_termination_horizon'] \ + and len(self.k.vehicle.get_ids()) > 0: + done = True + + return obs, rew, done, info diff --git a/flow/utils/rllib.py b/flow/utils/rllib.py index 7d777d769..ca8072c85 100644 --- a/flow/utils/rllib.py +++ b/flow/utils/rllib.py @@ -146,7 +146,7 @@ def get_flow_params(config): if flow_params["net"]["inflows"]: net.inflows.__dict__ = flow_params["net"]["inflows"].copy() - if len(net.template) > 0: + if net.template and len(net.template) > 0: dirname = os.getcwd() filename = os.path.join(dirname, '../../examples') split = net.template.split('examples')[1][1:] diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 8c38a91c1..c1dd83193 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -166,7 +166,7 @@ def visualizer_rllib(args): if multiagent: rets = {} # map the agent id to its policy - policy_map_fn = config['multiagent']['policy_mapping_fn'].func + policy_map_fn = config['multiagent']['policy_mapping_fn'] for key in config['multiagent']['policies'].keys(): rets[key] = [] else: @@ -177,7 +177,7 @@ def visualizer_rllib(args): if multiagent: state_init = {} # map the agent id to its policy - policy_map_fn = config['multiagent']['policy_mapping_fn'].func + policy_map_fn = config['multiagent']['policy_mapping_fn'] size = config['model']['lstm_cell_size'] for key in config['multiagent']['policies'].keys(): state_init[key] = [np.zeros(size, np.float32), From 6ef1f0f231d3a3e643b0ee74b540846826733ff5 Mon Sep 17 00:00:00 2001 From: chendiw <31671291+chendiw@users.noreply.github.com> Date: Tue, 21 Apr 2020 15:14:31 -0700 Subject: [PATCH 043/335] moved imports under functions in train.py (#903) * deleting unworking params from SumoChangeLaneParams * deleted unworking params, sublane working in highway : * moved imports inside functions * Apply suggestions from code review * bug fixes * bug fix Co-authored-by: Aboudy Kreidieh --- examples/train.py | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/examples/train.py b/examples/train.py index d688f2b9a..9cfaf28c6 100644 --- a/examples/train.py +++ b/examples/train.py @@ -126,6 +126,9 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ + from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv + from stable_baselines import PPO2 + if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -174,6 +177,13 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ + from ray import tune + from ray.tune.registry import register_env + try: + from ray.rllib.agents.agent import get_agent_class + except ImportError: + from ray.rllib.agents.registry import get_agent_class + horizon = flow_params['env'].horizon alg_run = flags.algorithm.upper() @@ -255,6 +265,9 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" + import ray + from ray.tune import run_experiments + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) @@ -301,7 +314,7 @@ def train_h_baselines(flow_params, args, multiagent): """Train policies using SAC and TD3 with h-baselines.""" from hbaselines.algorithms import OffPolicyRLAlgorithm from hbaselines.utils.train import parse_options, get_hyperparameters - from hbaselines.envs.mixed_autonomy.envs import FlowEnv + from hbaselines.envs.mixed_autonomy import FlowEnv flow_params = deepcopy(flow_params) @@ -402,6 +415,9 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" + from stable_baselines.common.vec_env import DummyVecEnv + from stable_baselines import PPO2 + flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] From 771e504a413cdf0720de6304df7ebc48db43ddca Mon Sep 17 00:00:00 2001 From: Aboudy Kreidieh Date: Sat, 2 May 2020 02:51:06 -0700 Subject: [PATCH 044/335] Bando / ghost edge (#917) * added bando model * added ghost edge to the highway network * added highway-single example * bug fixes * more tests --- examples/exp_configs/non_rl/highway_single.py | 110 ++++++++++++++++++ flow/controllers/__init__.py | 5 +- flow/controllers/car_following_models.py | 83 +++++++++++++ flow/networks/highway.py | 80 +++++++++++-- tests/fast_tests/test_controllers.py | 58 ++++++++- tests/fast_tests/test_examples.py | 5 + tests/fast_tests/test_scenarios.py | 61 +++++++++- tests/fast_tests/test_vehicles.py | 16 ++- tests/setup_scripts.py | 4 +- 9 files changed, 405 insertions(+), 17 deletions(-) create mode 100644 examples/exp_configs/non_rl/highway_single.py diff --git a/examples/exp_configs/non_rl/highway_single.py b/examples/exp_configs/non_rl/highway_single.py new file mode 100644 index 000000000..46b18c0e9 --- /dev/null +++ b/examples/exp_configs/non_rl/highway_single.py @@ -0,0 +1,110 @@ +"""Multi-agent highway with ramps example. + +Trains a non-constant number of agents, all sharing the same policy, on the +highway with ramps network. +""" +from flow.controllers import BandoFTLController +from flow.core.params import EnvParams +from flow.core.params import NetParams +from flow.core.params import InitialConfig +from flow.core.params import InFlows +from flow.core.params import VehicleParams +from flow.core.params import SumoParams +from flow.core.params import SumoLaneChangeParams +from flow.networks import HighwayNetwork +from flow.envs import TestEnv +from flow.networks.highway import ADDITIONAL_NET_PARAMS + +TRAFFIC_SPEED = 11 +END_SPEED = 16 +TRAFFIC_FLOW = 2056 +HORIZON = 3600 +INCLUDE_NOISE = False + +additional_net_params = ADDITIONAL_NET_PARAMS.copy() +additional_net_params.update({ + # length of the highway + "length": 2500, + # number of lanes + "lanes": 1, + # speed limit for all edges + "speed_limit": 30, + # number of edges to divide the highway into + "num_edges": 2, + # whether to include a ghost edge of length 500m. This edge is provided a + # different speed limit. + "use_ghost_edge": True, + # speed limit for the ghost edge + "ghost_speed_limit": END_SPEED +}) + +vehicles = VehicleParams() +vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(BandoFTLController, { + 'alpha': .5, + 'beta': 20.0, + 'h_st': 12.0, + 'h_go': 50.0, + 'v_max': 30.0, + 'noise': 1.0 if INCLUDE_NOISE else 0.0, + }), +) + +inflows = InFlows() +inflows.add( + veh_type="human", + edge="highway_0", + vehs_per_hour=TRAFFIC_FLOW, + depart_lane="free", + depart_speed=TRAFFIC_SPEED, + name="idm_highway_inflow") + +# SET UP FLOW PARAMETERS + +flow_params = dict( + # name of the experiment + exp_tag='highway-single', + + # name of the flow environment the experiment is running on + env_name=TestEnv, + + # name of the network class the experiment is running on + network=HighwayNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=0, + sims_per_step=1, + ), + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.5, + render=False, + restart_instance=False + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflows, + additional_params=additional_net_params + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) diff --git a/flow/controllers/__init__.py b/flow/controllers/__init__.py index 6cb20077a..4dfcf05b7 100755 --- a/flow/controllers/__init__.py +++ b/flow/controllers/__init__.py @@ -14,7 +14,8 @@ from flow.controllers.base_controller import BaseController from flow.controllers.car_following_models import CFMController, \ BCMController, OVMController, LinearOVM, IDMController, \ - SimCarFollowingController, LACController, GippsController + SimCarFollowingController, LACController, GippsController, \ + BandoFTLController from flow.controllers.velocity_controllers import FollowerStopper, \ PISaturation, NonLocalFollowerStopper @@ -35,5 +36,5 @@ "IDMController", "SimCarFollowingController", "FollowerStopper", "PISaturation", "StaticLaneChanger", "SimLaneChangeController", "ContinuousRouter", "GridRouter", "BayBridgeRouter", "LACController", - "GippsController", "NonLocalFollowerStopper" + "GippsController", "NonLocalFollowerStopper", "BandoFTLController" ] diff --git a/flow/controllers/car_following_models.py b/flow/controllers/car_following_models.py index f86c546e8..42c9b2a9b 100755 --- a/flow/controllers/car_following_models.py +++ b/flow/controllers/car_following_models.py @@ -580,3 +580,86 @@ def get_accel(self, env): v_next = min(v_acc, v_safe, self.v_desired) return (v_next-v)/env.sim_step + + +class BandoFTLController(BaseController): + """Bando follow-the-leader controller. + + Usage + ----- + See BaseController for usage example. + + Attributes + ---------- + veh_id : str + Vehicle ID for SUMO identification + car_following_params : flow.core.params.SumoCarFollowingParams + see parent class + alpha : float + gain on desired velocity to current velocity difference + (default: 0.6) + beta : float + gain on lead car velocity and self velocity difference + (default: 0.9) + h_st : float + headway for stopping (default: 5) + h_go : float + headway for full speed (default: 35) + v_max : float + max velocity (default: 30) + time_delay : float + time delay (default: 0.5) + noise : float + std dev of normal perturbation to the acceleration (default: 0) + fail_safe : str + type of flow-imposed failsafe the vehicle should posses, defaults + to no failsafe (None) + """ + + def __init__(self, + veh_id, + car_following_params, + alpha=.5, + beta=20, + h_st=2, + h_go=10, + v_max=32, + want_max_accel=False, + time_delay=0, + noise=0, + fail_safe=None): + """Instantiate an Bando controller.""" + BaseController.__init__( + self, + veh_id, + car_following_params, + delay=time_delay, + fail_safe=fail_safe, + noise=noise, + ) + self.veh_id = veh_id + self.v_max = v_max + self.alpha = alpha + self.beta = beta + self.h_st = h_st + self.h_go = h_go + self.want_max_accel = want_max_accel + + def get_accel(self, env): + """See parent class.""" + lead_id = env.k.vehicle.get_leader(self.veh_id) + if not lead_id: # no car ahead + if self.want_max_accel: + return self.max_accel + + v_l = env.k.vehicle.get_speed(lead_id) + v = env.k.vehicle.get_speed(self.veh_id) + s = env.k.vehicle.get_headway(self.veh_id) + return self.accel_func(v, v_l, s) + + def accel_func(self, v, v_l, s): + """Compute the acceleration function.""" + v_h = self.v_max * ((np.tanh(s/self.h_st-2)+np.tanh(2))/(1+np.tanh(2))) + s_dot = v_l - v + u = self.alpha * (v_h - v) + self.beta * s_dot/(s**2) + return u diff --git a/flow/networks/highway.py b/flow/networks/highway.py index e1234053c..7e9c18ad5 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -13,7 +13,12 @@ # speed limit for all edges "speed_limit": 30, # number of edges to divide the highway into - "num_edges": 1 + "num_edges": 1, + # whether to include a ghost edge of length 500m. This edge is provided a + # different speed limit. + "use_ghost_edge": False, + # speed limit for the ghost edge + "ghost_speed_limit": 25, } @@ -29,6 +34,9 @@ class HighwayNetwork(Network): * **lanes** : number of lanes in the highway * **speed_limit** : max speed limit of the highway * **num_edges** : number of edges to divide the highway into + * **use_ghost_edge** : whether to include a ghost edge of length 500m. This + edge is provided a different speed limit. + * **ghost_speed_limit** : speed limit for the ghost edge Usage ----- @@ -62,9 +70,7 @@ def __init__(self, if p not in net_params.additional_params: raise KeyError('Network parameter "{}" not supplied'.format(p)) - self.length = net_params.additional_params["length"] - self.lanes = net_params.additional_params["lanes"] - self.num_edges = net_params.additional_params.get("num_edges", 1) + self.end_length = 500 super().__init__(name, vehicles, net_params, initial_config, traffic_lights) @@ -83,6 +89,13 @@ def specify_nodes(self, net_params): "y": 0 }] + if self.net_params.additional_params["use_ghost_edge"]: + nodes += [{ + "id": "edge_{}".format(num_edges + 1), + "x": length + self.end_length, + "y": 0 + }] + return nodes def specify_edges(self, net_params): @@ -101,12 +114,22 @@ def specify_edges(self, net_params): "length": segment_length }] + if self.net_params.additional_params["use_ghost_edge"]: + edges += [{ + "id": "highway_end", + "type": "highway_end", + "from": "edge_{}".format(num_edges), + "to": "edge_{}".format(num_edges + 1), + "length": self.end_length + }] + return edges def specify_types(self, net_params): """See parent class.""" lanes = net_params.additional_params["lanes"] speed_limit = net_params.additional_params["speed_limit"] + end_speed_limit = net_params.additional_params["ghost_speed_limit"] types = [{ "id": "highwayType", @@ -114,6 +137,13 @@ def specify_types(self, net_params): "speed": speed_limit }] + if self.net_params.additional_params["use_ghost_edge"]: + types += [{ + "id": "highway_end", + "numLanes": lanes, + "speed": end_speed_limit + }] + return types def specify_routes(self, net_params): @@ -123,15 +153,51 @@ def specify_routes(self, net_params): for i in range(num_edges): rts["highway_{}".format(i)] = ["highway_{}".format(j) for j in range(i, num_edges)] + if self.net_params.additional_params["use_ghost_edge"]: + rts["highway_{}".format(i)].append("highway_end") return rts def specify_edge_starts(self): """See parent class.""" + junction_length = 0.1 + length = self.net_params.additional_params["length"] + num_edges = self.net_params.additional_params.get("num_edges", 1) + + # Add the main edges. + edge_starts = [ + ("highway_{}".format(i), + i * (length / num_edges + junction_length)) + for i in range(num_edges) + ] + + if self.net_params.additional_params["use_ghost_edge"]: + edge_starts += [ + ("highway_end", length + num_edges * junction_length) + ] + + return edge_starts + + def specify_internal_edge_starts(self): + """See parent class.""" + junction_length = 0.1 length = self.net_params.additional_params["length"] - edgestarts = [("highway_{}".format(i), (length / self.num_edges) * i) - for i in range(self.num_edges)] - return edgestarts + num_edges = self.net_params.additional_params.get("num_edges", 1) + + # Add the junctions. + edge_starts = [ + (":edge_{}".format(i + 1), + (i + 1) * length / num_edges + i * junction_length) + for i in range(num_edges - 1) + ] + + if self.net_params.additional_params["use_ghost_edge"]: + edge_starts += [ + (":edge_{}".format(num_edges), + length + (num_edges - 1) * junction_length) + ] + + return edge_starts @staticmethod def gen_custom_start_pos(cls, net_params, initial_config, num_vehicles): diff --git a/tests/fast_tests/test_controllers.py b/tests/fast_tests/test_controllers.py index 76146dbe6..58967cef8 100644 --- a/tests/fast_tests/test_controllers.py +++ b/tests/fast_tests/test_controllers.py @@ -8,7 +8,7 @@ from flow.controllers.routing_controllers import ContinuousRouter from flow.controllers.car_following_models import IDMController, \ OVMController, BCMController, LinearOVM, CFMController, LACController, \ - GippsController + GippsController, BandoFTLController from flow.controllers import FollowerStopper, PISaturation, NonLocalFollowerStopper from tests.setup_scripts import ring_road_exp_setup import os @@ -709,7 +709,7 @@ def test_get_action(self): np.testing.assert_array_almost_equal(requested_accel, expected_accel) -class TestGippsontroller(unittest.TestCase): +class TestGippsController(unittest.TestCase): """ Tests that the Gipps Controller returning mathematically accurate values. """ @@ -765,5 +765,59 @@ def test_get_action(self): np.testing.assert_array_almost_equal(requested_accel, expected_accel) +class TestBandoFTLController(unittest.TestCase): + """ + Tests that the Bando Controller returning mathematically accurate values. + """ + + def setUp(self): + # add a few vehicles to the network using the requested model + # also make sure that the input params are what is expected + contr_params = { + "alpha": .5, + "beta": 20, + "h_st": 2, + "h_go": 10, + "v_max": 32, + "want_max_accel": False, + } + + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(BandoFTLController, contr_params), + routing_controller=(ContinuousRouter, {}), + car_following_params=SumoCarFollowingParams( + accel=15, decel=5), + num_vehicles=5) + + # create the environment and network classes for a ring road + self.env, _, _ = ring_road_exp_setup(vehicles=vehicles) + + def tearDown(self): + # terminate the traci instance + self.env.terminate() + + # free data used by the class + self.env = None + + def test_get_action(self): + self.env.reset() + ids = self.env.k.vehicle.get_ids() + + test_headways = [2, 4, 6, 8, 10] + for i, veh_id in enumerate(ids): + self.env.k.vehicle.set_headway(veh_id, test_headways[i]) + + requested_accel = [ + self.env.k.vehicle.get_acc_controller(veh_id).get_action(self.env) + for veh_id in ids + ] + + expected_accel = [1.649129, 7.853475, 14.057821, 15.70695, 15.959713] + + np.testing.assert_array_almost_equal(requested_accel, expected_accel) + + if __name__ == '__main__': unittest.main() diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index a05fed68e..336c17bf8 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -42,6 +42,7 @@ from examples.exp_configs.non_rl.minicity import flow_params as non_rl_minicity from examples.exp_configs.non_rl.ring import flow_params as non_rl_ring from examples.exp_configs.non_rl.i210_subnetwork import flow_params as non_rl_i210 +from examples.exp_configs.non_rl.highway_single import flow_params as non_rl_highway_single os.environ['TEST_FLAG'] = 'True' os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' @@ -110,6 +111,10 @@ def test_i210(self): """Verify that examples/exp_configs/non_rl/i210_subnetwork.py is working.""" self.run_simulation(non_rl_i210) + def test_highway_single(self): + """Verify that examples/exp_configs/non_rl/highway_single.py is working.""" + self.run_simulation(non_rl_highway_single) + @staticmethod def run_simulation(flow_params): # make the horizon small and set render to False diff --git a/tests/fast_tests/test_scenarios.py b/tests/fast_tests/test_scenarios.py index f9dd47c04..d72a50b17 100644 --- a/tests/fast_tests/test_scenarios.py +++ b/tests/fast_tests/test_scenarios.py @@ -5,6 +5,7 @@ from flow.networks import BottleneckNetwork, FigureEightNetwork, \ TrafficLightGridNetwork, HighwayNetwork, RingNetwork, MergeNetwork, \ MiniCityNetwork, MultiRingNetwork +from tests.setup_scripts import highway_exp_setup __all__ = [ "MultiRingNetwork", "MiniCityNetwork" @@ -94,11 +95,69 @@ def test_additional_net_params(self): "length": 1000, "lanes": 4, "speed_limit": 30, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25 } ) ) + def test_ghost_edge(self): + """Validate the functionality of the ghost edge feature.""" + # =================================================================== # + # Without a ghost edge # + # =================================================================== # + + # create the network + env, _, _ = highway_exp_setup( + net_params=NetParams(additional_params={ + "length": 1000, + "lanes": 4, + "speed_limit": 30, + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25 + }) + ) + env.reset() + + # check the network length + self.assertEqual(env.k.network.length(), 1000) + + # check the edge list + self.assertEqual(env.k.network.get_edge_list(), ["highway_0"]) + + # check the speed limits of the edges + self.assertEqual(env.k.network.speed_limit("highway_0"), 30) + + # =================================================================== # + # With a ghost edge # + # =================================================================== # + + # create the network + env, _, _ = highway_exp_setup( + net_params=NetParams(additional_params={ + "length": 1000, + "lanes": 4, + "speed_limit": 30, + "num_edges": 1, + "use_ghost_edge": True, + "ghost_speed_limit": 25 + }) + ) + env.reset() + + # check the network length + self.assertEqual(env.k.network.length(), 1500.1) + + # check the edge list + self.assertEqual(env.k.network.get_edge_list(), + ["highway_0", "highway_end"]) + + # check the speed limits of the edges + self.assertEqual(env.k.network.speed_limit("highway_0"), 30) + self.assertEqual(env.k.network.speed_limit("highway_end"), 25) + class TestRingNetwork(unittest.TestCase): diff --git a/tests/fast_tests/test_vehicles.py b/tests/fast_tests/test_vehicles.py index 485a6a072..b791bba64 100644 --- a/tests/fast_tests/test_vehicles.py +++ b/tests/fast_tests/test_vehicles.py @@ -258,7 +258,9 @@ def test_no_junctions_highway(self): "lanes": 3, "speed_limit": 30, "resolution": 40, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -330,7 +332,9 @@ def test_no_junctions_highway(self): "lanes": 4, "speed_limit": 30, "resolution": 40, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -398,7 +402,9 @@ def test_no_junctions_highway(self): "lanes": 3, "speed_limit": 30, "resolution": 40, - "num_edges": 3 + "num_edges": 3, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -465,7 +471,9 @@ def test_no_junctions_highway(self): "lanes": 3, "speed_limit": 30, "resolution": 40, - "num_edges": 3 + "num_edges": 3, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() diff --git a/tests/setup_scripts.py b/tests/setup_scripts.py index 08d5b2c1e..ac88d2e42 100644 --- a/tests/setup_scripts.py +++ b/tests/setup_scripts.py @@ -343,7 +343,9 @@ def highway_exp_setup(sim_params=None, "lanes": 1, "speed_limit": 30, "resolution": 40, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) From bb3c14cfdbb742eca861a3846d6016aa0b237384 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Sun, 3 May 2020 23:47:51 -0700 Subject: [PATCH 045/335] Benchmark fix (#919) * Add the appropriate reward to the grid benchmark back * Put the bottleneck in a congested regime * Bump bottleneck inflows to put it in the congested regime --- flow/benchmarks/README.md | 6 +++--- flow/benchmarks/bottleneck0.py | 2 +- flow/benchmarks/bottleneck1.py | 2 +- flow/benchmarks/bottleneck2.py | 2 +- flow/benchmarks/grid0.py | 4 ++-- flow/benchmarks/grid1.py | 4 ++-- flow/envs/__init__.py | 3 ++- flow/envs/traffic_light_grid.py | 11 +++++++++++ 8 files changed, 23 insertions(+), 11 deletions(-) diff --git a/flow/benchmarks/README.md b/flow/benchmarks/README.md index 963ad5b70..bbcba9414 100644 --- a/flow/benchmarks/README.md +++ b/flow/benchmarks/README.md @@ -38,12 +38,12 @@ inflow = 300 veh/hour/lane S=(915,), A=(25,), T=400. this problem is to learn to avoid the *capacity drop* that is characteristic to bottleneck structures in transportation networks, and maximize the total outflow in a mixed-autonomy setting. -- `flow.benchmarks.bottleneck0` 4 lanes, inflow = 1900 veh/hour, 10% CAV +- `flow.benchmarks.bottleneck0` 4 lanes, inflow = 2500 veh/hour, 10% CAV penetration, no vehicles are allowed to lane change, S=(141,), A=(20,), T=1000. -- `flow.benchmarks.bottleneck1` 4 lanes, inflow = 1900 veh/hour, 10% CAV +- `flow.benchmarks.bottleneck1` 4 lanes, inflow = 2500 veh/hour, 10% CAV penetration, the human drivers follow the standard lane changing model in the simulator, S=(141,), A=(20,), T=1000. -- `flow.benchmarks.bottleneck2` 8 lanes, inflow = 3800 veh/hour, 10% CAV +- `flow.benchmarks.bottleneck2` 8 lanes, inflow = 5000 veh/hour, 10% CAV penetration, no vehicles are allowed to lane change, S=(281,), A=(40,), T=1000. ## Training on Custom Algorithms diff --git a/flow/benchmarks/bottleneck0.py b/flow/benchmarks/bottleneck0.py index b0e86844c..b07947ad7 100644 --- a/flow/benchmarks/bottleneck0.py +++ b/flow/benchmarks/bottleneck0.py @@ -66,7 +66,7 @@ } # flow rate -flow_rate = 2000 * SCALING +flow_rate = 2500 * SCALING # percentage of flow coming out of each lane inflow = InFlows() diff --git a/flow/benchmarks/bottleneck1.py b/flow/benchmarks/bottleneck1.py index 26ae6527a..9c8d9c192 100644 --- a/flow/benchmarks/bottleneck1.py +++ b/flow/benchmarks/bottleneck1.py @@ -66,7 +66,7 @@ } # flow rate -flow_rate = 2000 * SCALING +flow_rate = 2500 * SCALING # percentage of flow coming out of each lane inflow = InFlows() diff --git a/flow/benchmarks/bottleneck2.py b/flow/benchmarks/bottleneck2.py index 5052b3b88..4651d448b 100644 --- a/flow/benchmarks/bottleneck2.py +++ b/flow/benchmarks/bottleneck2.py @@ -66,7 +66,7 @@ } # flow rate -flow_rate = 2000 * SCALING +flow_rate = 2500 * SCALING # percentage of flow coming out of each lane inflow = InFlows() diff --git a/flow/benchmarks/grid0.py b/flow/benchmarks/grid0.py index 1655c3b3c..5c4ee5349 100644 --- a/flow/benchmarks/grid0.py +++ b/flow/benchmarks/grid0.py @@ -4,7 +4,7 @@ - **Observation Dimension**: (339, ) - **Horizon**: 400 steps """ -from flow.envs import TrafficLightGridPOEnv +from flow.envs import TrafficLightGridBenchmarkEnv from flow.networks import TrafficLightGridNetwork from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ InFlows, SumoCarFollowingParams @@ -68,7 +68,7 @@ exp_tag="grid_0", # name of the flow environment the experiment is running on - env_name=TrafficLightGridPOEnv, + env_name=TrafficLightGridBenchmarkEnv, # name of the network class the experiment is running on network=TrafficLightGridNetwork, diff --git a/flow/benchmarks/grid1.py b/flow/benchmarks/grid1.py index ec2a27454..83055adfd 100644 --- a/flow/benchmarks/grid1.py +++ b/flow/benchmarks/grid1.py @@ -4,7 +4,7 @@ - **Observation Dimension**: (915, ) - **Horizon**: 400 steps """ -from flow.envs import TrafficLightGridPOEnv +from flow.envs import TrafficLightGridBenchmarkEnv from flow.networks import TrafficLightGridNetwork from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ InFlows, SumoCarFollowingParams @@ -68,7 +68,7 @@ exp_tag="grid_1", # name of the flow environment the experiment is running on - env_name=TrafficLightGridPOEnv, + env_name=TrafficLightGridBenchmarkEnv, # name of the network class the experiment is running on network=TrafficLightGridNetwork, diff --git a/flow/envs/__init__.py b/flow/envs/__init__.py index 6f4351cc0..8bea3dd4f 100755 --- a/flow/envs/__init__.py +++ b/flow/envs/__init__.py @@ -4,7 +4,7 @@ from flow.envs.bottleneck import BottleneckAccelEnv, BottleneckEnv, \ BottleneckDesiredVelocityEnv from flow.envs.traffic_light_grid import TrafficLightGridEnv, \ - TrafficLightGridPOEnv, TrafficLightGridTestEnv + TrafficLightGridPOEnv, TrafficLightGridTestEnv, TrafficLightGridBenchmarkEnv from flow.envs.ring.lane_change_accel import LaneChangeAccelEnv, \ LaneChangeAccelPOEnv from flow.envs.ring.accel import AccelEnv @@ -34,6 +34,7 @@ 'WaveAttenuationPOEnv', 'TrafficLightGridEnv', 'TrafficLightGridPOEnv', + 'TrafficLightGridBenchmarkEnv', 'BottleneckDesiredVelocityEnv', 'TestEnv', 'BayBridgeEnv', diff --git a/flow/envs/traffic_light_grid.py b/flow/envs/traffic_light_grid.py index 53391a329..8be0cb8a5 100644 --- a/flow/envs/traffic_light_grid.py +++ b/flow/envs/traffic_light_grid.py @@ -731,6 +731,17 @@ def additional_command(self): [self.k.vehicle.set_observed(veh_id) for veh_id in self.observed_ids] +class TrafficLightGridBenchmarkEnv(TrafficLightGridPOEnv): + """Class used for the benchmarks in `Benchmarks for reinforcement learning inmixed-autonomy traffic`.""" + + def compute_reward(self, rl_actions, **kwargs): + """See class definition.""" + if self.env_params.evaluate: + return - rewards.min_delay_unscaled(self) + else: + return rewards.desired_velocity(self) + + class TrafficLightGridTestEnv(TrafficLightGridEnv): """ Class for use in testing. From aa1d7133bda5d89c54cd5a68a792a83e9e0f09cc Mon Sep 17 00:00:00 2001 From: Yashar Zeinali Farid <34227133+Yasharzf@users.noreply.github.com> Date: Thu, 7 May 2020 23:51:53 -0700 Subject: [PATCH 046/335] get not departed vehicles (#922) * added function to kernel/vehicle to get number of not departed vehiles * fixed over indentation of the docstring * indentation edit * pep8 Co-authored-by: AboudyKreidieh --- flow/core/kernel/simulation/traci.py | 10 +++++--- flow/core/kernel/vehicle/base.py | 7 ++++++ flow/core/kernel/vehicle/traci.py | 37 ++++++++++++++++++++++------ 3 files changed, 43 insertions(+), 11 deletions(-) diff --git a/flow/core/kernel/simulation/traci.py b/flow/core/kernel/simulation/traci.py index 0ee29ada6..35b3c2612 100644 --- a/flow/core/kernel/simulation/traci.py +++ b/flow/core/kernel/simulation/traci.py @@ -46,9 +46,13 @@ def pass_api(self, kernel_api): # subscribe some simulation parameters needed to check for entering, # exiting, and colliding vehicles self.kernel_api.simulation.subscribe([ - tc.VAR_DEPARTED_VEHICLES_IDS, tc.VAR_ARRIVED_VEHICLES_IDS, - tc.VAR_TELEPORT_STARTING_VEHICLES_IDS, tc.VAR_TIME_STEP, - tc.VAR_DELTA_T + tc.VAR_DEPARTED_VEHICLES_IDS, + tc.VAR_ARRIVED_VEHICLES_IDS, + tc.VAR_TELEPORT_STARTING_VEHICLES_IDS, + tc.VAR_TIME_STEP, + tc.VAR_DELTA_T, + tc.VAR_LOADED_VEHICLES_NUMBER, + tc.VAR_DEPARTED_VEHICLES_NUMBER ]) def simulation_step(self): diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index d9fc773cd..c68d68c3a 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -290,6 +290,13 @@ def get_departed_ids(self): """Return the ids of vehicles that departed in the last time step.""" raise NotImplementedError + def get_num_not_departed(self): + """Return the number of vehicles not departed in the last time step. + + This includes vehicles that were loaded but not departed. + """ + raise NotImplementedError + def get_speed(self, veh_id, error=-1001): """Return the speed of the specified vehicle. diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 22dcc8837..9cb1a5f0f 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -22,7 +22,8 @@ STEPS = 10 rdelta = 255 / STEPS # smoothly go from red to green as the speed increases -color_bins = [[int(255 - rdelta * i), int(rdelta * i), 0] for i in range(STEPS + 1)] +color_bins = [[int(255 - rdelta * i), int(rdelta * i), 0] for i in + range(STEPS + 1)] class TraCIVehicle(KernelVehicle): @@ -56,6 +57,8 @@ def __init__(self, self.num_vehicles = 0 # number of rl vehicles in the network self.num_rl_vehicles = 0 + # number of vehicles loaded but not departed vehicles + self.num_not_departed = 0 # contains the parameters associated with each type of vehicle self.type_parameters = {} @@ -101,6 +104,7 @@ def initialize(self, vehicles): self.minGap = vehicles.minGap self.num_vehicles = 0 self.num_rl_vehicles = 0 + self.num_not_departed = 0 self.__vehicles.clear() for typ in vehicles.initial: @@ -183,11 +187,12 @@ def update(self, reset): self._departed_ids.clear() self._arrived_ids.clear() self._arrived_rl_ids.clear() + self.num_not_departed = 0 # add vehicles from a network template, if applicable if hasattr(self.master_kernel.network.network, "template_vehicles"): - for veh_id in self.master_kernel.network.network.\ + for veh_id in self.master_kernel.network.network. \ template_vehicles: vals = deepcopy(self.master_kernel.network.network. template_vehicles[veh_id]) @@ -212,6 +217,10 @@ def update(self, reset): self._departed_ids.append(sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS]) self._arrived_ids.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS]) + # update the number of not departed vehicles + self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ + sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] + # update the "headway", "leader", and "follower" variables for veh_id in self.__ids: try: @@ -321,8 +330,12 @@ def _add_departed(self, veh_id, veh_type): # subscribe the new vehicle self.kernel_api.vehicle.subscribe(veh_id, [ - tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION, tc.VAR_ROAD_ID, - tc.VAR_SPEED, tc.VAR_EDGES, tc.VAR_POSITION, tc.VAR_ANGLE, + tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION, + tc.VAR_ROAD_ID, + tc.VAR_SPEED, + tc.VAR_EDGES, + tc.VAR_POSITION, + tc.VAR_ANGLE, tc.VAR_SPEED_WITHOUT_TRACI ]) self.kernel_api.vehicle.subscribeLeader(veh_id, 2000) @@ -523,6 +536,10 @@ def get_departed_ids(self): else: return 0 + def get_num_not_departed(self): + """See parent class.""" + return self.num_not_departed + def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): @@ -1009,7 +1026,8 @@ def update_vehicle_colors(self): for veh_id in self.get_rl_ids(): try: # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: # color rl vehicles red self.set_color(veh_id=veh_id, color=RED) except (FatalTraCIError, TraCIException) as e: @@ -1020,7 +1038,8 @@ def update_vehicle_colors(self): try: color = CYAN if veh_id in self.get_observed_ids() else WHITE # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: self.set_color(veh_id=veh_id, color=color) except (FatalTraCIError, TraCIException) as e: print('Error when updating human vehicle colors:', e) @@ -1030,7 +1049,8 @@ def update_vehicle_colors(self): if 'av' in veh_id: color = RED # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: self.set_color(veh_id=veh_id, color=color) except (FatalTraCIError, TraCIException) as e: print('Error when updating human vehicle colors:', e) @@ -1043,7 +1063,8 @@ def update_vehicle_colors(self): veh_speed = self.get_speed(veh_id) bin_index = np.digitize(veh_speed, speed_ranges) # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: self.set_color(veh_id=veh_id, color=color_bins[bin_index]) # clear the list of observed vehicles From 5080514630615d232f3b9caf75c57c1623bdca7f Mon Sep 17 00:00:00 2001 From: Yashar Zeinali Farid <34227133+Yasharzf@users.noreply.github.com> Date: Sat, 9 May 2020 15:31:44 -0700 Subject: [PATCH 047/335] changed _departed_ids, and _arrived_ids in the update function (#926) * changed _departed_ids, and _arrived_ids in the update function * fixed bug in get_departed_ids and get_arrived_ids --- flow/core/kernel/simulation/traci.py | 3 ++- flow/core/kernel/vehicle/traci.py | 27 ++++++++++----------------- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/flow/core/kernel/simulation/traci.py b/flow/core/kernel/simulation/traci.py index 35b3c2612..2cd109024 100644 --- a/flow/core/kernel/simulation/traci.py +++ b/flow/core/kernel/simulation/traci.py @@ -52,7 +52,8 @@ def pass_api(self, kernel_api): tc.VAR_TIME_STEP, tc.VAR_DELTA_T, tc.VAR_LOADED_VEHICLES_NUMBER, - tc.VAR_DEPARTED_VEHICLES_NUMBER + tc.VAR_DEPARTED_VEHICLES_NUMBER, + tc.VAR_ARRIVED_VEHICLES_NUMBER ]) def simulation_step(self): diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 9cb1a5f0f..3439e98cc 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -71,11 +71,11 @@ def __init__(self, # number of vehicles that entered the network for every time-step self._num_departed = [] - self._departed_ids = [] + self._departed_ids = 0 # number of vehicles to exit the network for every time-step self._num_arrived = [] - self._arrived_ids = [] + self._arrived_ids = 0 self._arrived_rl_ids = [] # whether or not to automatically color vehicles @@ -184,8 +184,8 @@ def update(self, reset): self.prev_last_lc[veh_id] = -float("inf") self._num_departed.clear() self._num_arrived.clear() - self._departed_ids.clear() - self._arrived_ids.clear() + self._departed_ids = 0 + self._arrived_ids = 0 self._arrived_rl_ids.clear() self.num_not_departed = 0 @@ -211,11 +211,10 @@ def update(self, reset): self.__vehicles[veh_id]["last_lc"] = self.time_counter # updated the list of departed and arrived vehicles - self._num_departed.append( - len(sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS])) - self._num_arrived.append(len(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS])) - self._departed_ids.append(sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS]) - self._arrived_ids.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS]) + self._num_departed.append(sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER]) + self._num_arrived.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_NUMBER]) + self._departed_ids = sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS] + self._arrived_ids = sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS] # update the number of not departed vehicles self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ @@ -517,10 +516,7 @@ def get_num_arrived(self): def get_arrived_ids(self): """See parent class.""" - if len(self._arrived_ids) > 0: - return self._arrived_ids[-1] - else: - return 0 + return self._arrived_ids def get_arrived_rl_ids(self): """See parent class.""" @@ -531,10 +527,7 @@ def get_arrived_rl_ids(self): def get_departed_ids(self): """See parent class.""" - if len(self._departed_ids) > 0: - return self._departed_ids[-1] - else: - return 0 + return self._departed_ids def get_num_not_departed(self): """See parent class.""" From 1db687e557ffab1d4caffb0b3a72cc647d806892 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 18 Mar 2020 16:43:22 -0700 Subject: [PATCH 048/335] Add an on ramp option --- .../exp_configs/non_rl/i210_subnetwork.py | 74 ++++--- flow/controllers/routing_controllers.py | 21 ++ flow/envs/base.py | 14 +- flow/networks/i210_subnetwork.py | 196 ++++++++++-------- 4 files changed, 184 insertions(+), 121 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index d993ae93a..87bab415a 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -4,6 +4,7 @@ import numpy as np from flow.controllers.car_following_models import IDMController +from flow.controllers.routing_controllers import I210Router from flow.core.params import SumoParams from flow.core.params import EnvParams from flow.core.params import NetParams @@ -15,18 +16,35 @@ from flow.envs import TestEnv from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -# create the base vehicle type that will be used for inflows -vehicles = VehicleParams() -vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, { - "a": 0.3, "b": 2.0, "noise": 0.5 - }), -) +ON_RAMP = True + +if ON_RAMP: + vehicles = VehicleParams() + vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + routing_controller=(I210Router, {}) + ) + +else: + # create the base vehicle type that will be used for inflows + vehicles = VehicleParams() + vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + ) inflow = InFlows() # main highway @@ -37,18 +55,19 @@ departLane="random", departSpeed=23) # on ramp -# inflow.add( -# veh_type="human", -# edge="27414345", -# vehs_per_hour=321, -# departLane="random", -# departSpeed=20) -# inflow.add( -# veh_type="human", -# edge="27414342#0", -# vehs_per_hour=421, -# departLane="random", -# departSpeed=20) +if ON_RAMP: + inflow.add( + veh_type="human", + edge="27414345", + vehs_per_hour=321, + departLane="random", + departSpeed=20) + inflow.add( + veh_type="human", + edge="27414342#0", + vehs_per_hour=421, + departLane="random", + departSpeed=20) NET_TEMPLATE = os.path.join( config.PROJECT_PATH, @@ -71,20 +90,21 @@ sim=SumoParams( sim_step=0.5, render=False, - color_by_speed=True, + color_by_speed=False, use_ballistic=True ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=4500, + horizon=7200, ), # network-related parameters (see flow.core.params.NetParams and the # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( inflows=inflow, - template=NET_TEMPLATE + template=NET_TEMPLATE, + additional_params={"use_on_ramp": ON_RAMP} ), # vehicles to be placed in the network at the start of a rollout (see diff --git a/flow/controllers/routing_controllers.py b/flow/controllers/routing_controllers.py index e6ccdde78..c880b5bbf 100755 --- a/flow/controllers/routing_controllers.py +++ b/flow/controllers/routing_controllers.py @@ -124,3 +124,24 @@ def choose_route(self, env): new_route = super().choose_route(env) return new_route + +class I210Router(ContinuousRouter): + """Assists in choosing routes in select cases for the Bay Bridge network. + Extension to the Continuous Router. + Usage + ----- + See base class for usage example. + """ + + def choose_route(self, env): + """See parent class.""" + edge = env.k.vehicle.get_edge(self.veh_id) + lane = env.k.vehicle.get_lane(self.veh_id) + + # vehicles on these edges in lanes 4 and 5 are not going to be able to make it out in time + if edge == "119257908#1-AddedOffRampEdge" and lane in [5, 4, 3]: + new_route = env.available_routes["119257908#1-AddedOffRampEdge"][0][0] + else: + new_route = super().choose_route(env) + + return new_route diff --git a/flow/envs/base.py b/flow/envs/base.py index adc959b9a..f033514ff 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -405,8 +405,18 @@ def step(self, rl_actions): # test if the environment should terminate due to a collision or the # time horizon being met done = (self.time_counter >= self.env_params.sims_per_step * - (self.env_params.warmup_steps + self.env_params.horizon) - or crash) + (self.env_params.warmup_steps + self.env_params.horizon)) + if crash: + print( + "**********************************************************\n" + "**********************************************************\n" + "**********************************************************\n" + "WARNING: There was a crash. \n" + "**********************************************************\n" + "**********************************************************\n" + "**********************************************************" + ) + # compute the info for each agent infos = {} diff --git a/flow/networks/i210_subnetwork.py b/flow/networks/i210_subnetwork.py index d8e05efb5..febb39b00 100644 --- a/flow/networks/i210_subnetwork.py +++ b/flow/networks/i210_subnetwork.py @@ -45,97 +45,109 @@ def specify_routes(self, net_params): Routes for vehicles moving through the bay bridge from Oakland to San Francisco. """ - rts = { - # Main highway - "119257914": [ - (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) - # (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # 17 / 8378) - ], - # "119257908#0": [ - # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - # # "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1-AddedOnRampEdge": [ - # (["119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1-AddedOnRampEdge", "119257908#1", - # # "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1": [ - # (["119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1-AddedOffRampEdge": [ - # (["119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#2": [ - # (["119257908#2", "119257908#3"], 1), - # ], - # "119257908#3": [ - # (["119257908#3"], 1), - # ], - # - # # On-ramp - # "27414345": [ - # (["27414345", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1 - 9 / 321), - # (["27414345", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 9 / 321), - # ], - # "27414342#0": [ - # (["27414342#0", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1 - 20 / 421), - # (["27414342#0", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 20 / 421), - # ], - # "27414342#1-AddedOnRampEdge": [ - # (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 0.5), - # (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # 0.5), - # ], - # - # # Off-ramp - # "173381935": [ - # (["173381935"], 1), - # ], - } + if net_params.additional_params["use_on_ramp"]: + rts = { + # Main highway + "119257914": [ + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + 17 / 8378) + ], + "119257908#0": [ + (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", + # "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1-AddedOnRampEdge": [ + (["119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1-AddedOnRampEdge", "119257908#1", + # "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1": [ + (["119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1-AddedOffRampEdge": [ + (["119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#2": [ + (["119257908#2", "119257908#3"], 1), + ], + "119257908#3": [ + (["119257908#3"], 1), + ], + + # On-ramp + "27414345": [ + (["27414345", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1 - 9 / 321), + (["27414345", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "173381935"], + 9 / 321), + ], + "27414342#0": [ + (["27414342#0", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1 - 20 / 421), + (["27414342#0", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "173381935"], + 20 / 421), + ], + "27414342#1-AddedOnRampEdge": [ + (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 0.5), + (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + 0.5), + ], + + # Off-ramp + "173381935": [ + (["173381935"], 1), + ], + } + + else: + rts = { + # Main highway + "119257914": [ + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1), + ], + } return rts From 1a36503ba19034f1bd11891fc13896b22f7d5c25 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 11:32:12 -0700 Subject: [PATCH 049/335] Increased inflows to 10800 to match density in Bennis ring --- examples/exp_configs/non_rl/i210_subnetwork.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 87bab415a..049ec032a 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -16,7 +16,7 @@ from flow.envs import TestEnv from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -ON_RAMP = True +ON_RAMP = False if ON_RAMP: vehicles = VehicleParams() @@ -51,7 +51,7 @@ inflow.add( veh_type="human", edge="119257914", - vehs_per_hour=8378, + vehs_per_hour=10800, departLane="random", departSpeed=23) # on ramp From 37161a60991187f71d20effb03b527481f657030 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 12:10:07 -0700 Subject: [PATCH 050/335] Upgrade the network to not have keepclear value on the junctions --- .../exp_configs/templates/sumo/test2.net.xml | 78 ++++++++++++++----- 1 file changed, 57 insertions(+), 21 deletions(-) diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 00e3edcd5..16170b917 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,5 +1,41 @@ + + @@ -4680,24 +4716,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4801,10 +4837,10 @@ - + - - + + From d99b8b7271bbd6231b93b3035d837028257db490 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 12:41:31 -0700 Subject: [PATCH 051/335] Convert inflows to pick out the best lane to travel in instead of a random lane --- examples/exp_configs/non_rl/i210_subnetwork.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 049ec032a..f87a31308 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -52,8 +52,8 @@ veh_type="human", edge="119257914", vehs_per_hour=10800, - departLane="random", - departSpeed=23) + departLane="best", + departSpeed=23.0) # on ramp if ON_RAMP: inflow.add( From edfd1496f0fb85c7798526b8c23bc22b331ad2cc Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Tue, 24 Mar 2020 22:49:17 -0700 Subject: [PATCH 052/335] Add 1 lane highway network for Benni --- examples/exp_configs/non_rl/highway.py | 40 +++++++------------ .../exp_configs/non_rl/i210_subnetwork.py | 2 +- flow/networks/highway.py | 2 +- 3 files changed, 16 insertions(+), 28 deletions(-) diff --git a/examples/exp_configs/non_rl/highway.py b/examples/exp_configs/non_rl/highway.py index e7505f2d7..1905e2f7f 100644 --- a/examples/exp_configs/non_rl/highway.py +++ b/examples/exp_configs/non_rl/highway.py @@ -5,25 +5,19 @@ from flow.core.params import VehicleParams, InFlows from flow.envs.ring.lane_change_accel import ADDITIONAL_ENV_PARAMS from flow.networks.highway import HighwayNetwork, ADDITIONAL_NET_PARAMS -from flow.envs import LaneChangeAccelEnv +from flow.envs import TestEnv vehicles = VehicleParams() vehicles.add( - veh_id="human", - acceleration_controller=(IDMController, {}), - lane_change_params=SumoLaneChangeParams( - model="SL2015", - lc_sublane=2.0, - ), - num_vehicles=20) -vehicles.add( - veh_id="human2", - acceleration_controller=(IDMController, {}), - lane_change_params=SumoLaneChangeParams( - model="SL2015", - lc_sublane=2.0, - ), - num_vehicles=20) + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + ) env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) @@ -31,13 +25,7 @@ inflow.add( veh_type="human", edge="highway_0", - probability=0.25, - departLane="free", - departSpeed=20) -inflow.add( - veh_type="human2", - edge="highway_0", - probability=0.25, + vehs_per_hour=10800 / 5.0, departLane="free", departSpeed=20) @@ -47,7 +35,7 @@ exp_tag='highway', # name of the flow environment the experiment is running on - env_name=LaneChangeAccelEnv, + env_name=TestEnv, # name of the network class the experiment is running on network=HighwayNetwork, @@ -58,12 +46,12 @@ # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( render=True, - lateral_resolution=1.0, + sim_step=0.5 ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=1500, + horizon=4000, additional_params=ADDITIONAL_ENV_PARAMS.copy(), ), diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index f87a31308..8264fc286 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -5,7 +5,7 @@ from flow.controllers.car_following_models import IDMController from flow.controllers.routing_controllers import I210Router -from flow.core.params import SumoParams +from flow.core.params import SumoParams, SumoCarFollowingParams from flow.core.params import EnvParams from flow.core.params import NetParams from flow.core.params import SumoLaneChangeParams diff --git a/flow/networks/highway.py b/flow/networks/highway.py index 7e9c18ad5..02b61f133 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -9,7 +9,7 @@ # length of the highway "length": 1000, # number of lanes - "lanes": 4, + "lanes": 1, # speed limit for all edges "speed_limit": 30, # number of edges to divide the highway into From 48e2642bd6da2be5696c2649eb73f1351b94769c Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 6 Apr 2020 15:28:57 -0700 Subject: [PATCH 053/335] datapip pipeline implemented --- examples/data_pipeline.py | 179 ++++++++++++++++++++++++++++ examples/query.py | 8 ++ examples/run_query.py | 34 ++++++ examples/simulate.py | 10 +- flow/controllers/base_controller.py | 12 ++ flow/core/experiment.py | 27 ++++- flow/core/kernel/vehicle/base.py | 16 +++ flow/core/kernel/vehicle/traci.py | 15 +++ 8 files changed, 299 insertions(+), 2 deletions(-) create mode 100644 examples/data_pipeline.py create mode 100644 examples/query.py create mode 100644 examples/run_query.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py new file mode 100644 index 000000000..5fdc30cf2 --- /dev/null +++ b/examples/data_pipeline.py @@ -0,0 +1,179 @@ +import pandas as pd +import boto3 +from botocore.exceptions import ClientError +from examples.query import QueryStrings +from time import time + + +def generate_trajectory_table(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based on standard SUMO emission + + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ + raw_output = pd.read_csv(data_path, index_col=["time", "id"]) + required_cols = {"time", "id", "speed", "x", "y"} + raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) + + extra_info = pd.DataFrame.from_dict(extra_info) + extra_info.set_index(["time", "id"]) + raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) + + # add the partition column + raw_output['partition'] = partition_name + + output_file_path = data_path[:-4]+"_trajectory.csv" + raw_output.to_csv(output_file_path, index=False) + return output_file_path + + +def upload_to_s3(bucket_name, bucket_key, file_path): + """ upload a file to S3 bucket + + Parameters + ---------- + bucket_name : str + the bucket to upload to + bucket_key: str + the key within the bucket for the file + file_path: str + the path of the file to be uploaded + """ + s3 = boto3.resource("s3") + s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + return + + +class AthenaQuery: + + def __init__(self): + self.MAX_WAIT = 60 + self.client = boto3.client("athena") + self.existing_partitions = self.get_existing_partitions() + + def get_existing_partitions(self): + """prints the existing partitions in the S3 bucket""" + + response = self.client.start_query_execution( + QueryString='SHOW PARTITIONS trajectory_table', + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("get current partitions timed out") + response = self.client.get_query_results( + QueryExecutionId=response['QueryExecutionId'], + MaxResults=1000 + ) + return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] + + def check_status(self, execution_id): + """ Return the status of the execution with given id + + Parameters + ---------- + execution_id : string + id of the execution that is checked for + Returns + ------- + status: str + QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED + """ + + response = self.client.get_query_execution( + QueryExecutionId=execution_id + ) + return response['QueryExecution']['Status']['State'] + + def wait_for_execution(self, execution_id): + """ wait for the execution to finish or time-out + + Parameters + ---------- + execution_id : str + id of the execution this is watiing for + Returns + ------- + time_out: bool + True if time-out, False if success + Raises + ------ + RuntimeError: if execution failed or get canceled + """ + start = time() + while time() - start < self.MAX_WAIT: + state = self.check_status(execution_id) + if state == 'FAILED' or state == 'CANCELLED': + raise RuntimeError("update partition failed") + elif state == 'SUCCEEDED': + return False + return True + + def update_partition(self, partition): + """ load the given partition to the trajectory_table on Athena + + Parameters + ---------- + partition : str + the new partition that needs to be loaded + """ + response = self.client.start_query_execution( + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("update partition timed out") + self.existing_partitions.append(partition) + return + + def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): + """ start the execution of a query, does not wait for it to finish + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be run + result_location: str, optional + location on the S3 bucket where the result will be stored + partition: str, optional + name of the partition to run this query on + Returns + ------- + execution_id: str + the execution id of the execution started by this method + Raises + ------ + ValueError: if tries to run a query not existed in QueryStrings enum + """ + if query_name not in QueryStrings.__members__: + raise ValueError("query not existed: please add it to query.py") + + if partition not in self.existing_partitions: + self.update_partition(partition) + + response = self.client.start_query_execution( + QueryString=QueryStrings[query_name].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + ResultConfiguration={ + 'OutputLocation': result_location, + }, + WorkGroup='primary' + ) + return response['QueryExecutionId'] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py new file mode 100644 index 000000000..3fbbe69e1 --- /dev/null +++ b/examples/query.py @@ -0,0 +1,8 @@ +from enum import Enum + +tags = {} + + +class QueryStrings(Enum): + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py new file mode 100644 index 000000000..7b4a5af7d --- /dev/null +++ b/examples/run_query.py @@ -0,0 +1,34 @@ +import argparse +import sys +from examples.data_pipeline import AthenaQuery +from examples.query import QueryStrings + +parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" + "a S3 location") +parser.add_argument("--run", type=str, nargs="+") +parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") +parser.add_argument("--partition", type=str, nargs='?', default="default") +parser.add_argument("--list_partitions", action="store_true") +parser.add_argument("--check_status", type=str, nargs='+') +parser.add_argument("--list_queries", action="store_true") + + +if __name__ == "__main__": + args = parser.parse_args() + queryEngine = AthenaQuery() + + if args.run: + execution_ids = [] + for query_name in args.run: + execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) + print(execution_ids) + if args.list_partitions: + print(queryEngine.existing_partitions) + if args.check_status: + status = dict() + for execution_id in args.check_status: + status[execution_id] = queryEngine.check_status(execution_id) + print(status) + if args.list_queries: + for q in QueryStrings: + print(q) diff --git a/examples/simulate.py b/examples/simulate.py index 848f030a4..f54bb38d9 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -48,6 +48,12 @@ def parse_args(args): action='store_true', help='Specifies whether to generate an emission file from the ' 'simulation.') + parser.add_argument( + '--to_aws', + type=str, nargs='?', default=None, const="default", + help='Specifies the name of the partition to store the output' + 'file on S3. Putting not None value for this argument' + 'automatically set gen_emission to True.') return parser.parse_known_args(args)[0] @@ -55,6 +61,8 @@ def parse_args(args): if __name__ == "__main__": flags = parse_args(sys.argv[1:]) + flags.gen_emission = flags.gen_emission or flags.to_aws + # Get the flow_params object. module = __import__("exp_configs.non_rl", fromlist=[flags.exp_config]) flow_params = getattr(module, flags.exp_config).flow_params @@ -83,4 +91,4 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 4004b1c4d..6e6734764 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -88,6 +88,9 @@ def get_action(self, env): float the modified form of the acceleration """ + # clear the current stored accel_without_noise of this vehicle None + env.k.vehicle.update_accel_without_noise(self.veh_id, None) + # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed if len(env.k.vehicle.get_edge(self.veh_id)) == 0: @@ -105,6 +108,15 @@ def get_action(self, env): if accel is None: return None + # store the acceleration without noise to each vehicle + # run fail safe if requested + accel_without_noice = accel + if self.fail_safe == 'instantaneous': + accel_without_noice = self.get_safe_action_instantaneous(env, accel_without_noice) + elif self.fail_safe == 'safe_velocity': + accel_without_noice = self.get_safe_velocity_action(env, accel_without_noice) + env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noice) + # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 69a78cb0e..a81f3b130 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,6 +1,7 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env +from examples.data_pipeline import generate_trajectory_table, upload_to_s3 import datetime import logging import time @@ -85,7 +86,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters @@ -98,6 +99,10 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False): convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file + partition_name: str + Specifies the S3 partition you want to store the output file, + will be used to later for query. If NONE, won't upload output + to S3. Returns ------- @@ -136,6 +141,8 @@ def rl_actions(*_): # time profiling information t = time.time() times = [] + extra_info = {"time": [], "id": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], + "leader_rel_speed": [], "accel_without_noise": [], "road_grade": []} for i in range(num_runs): ret = 0 @@ -153,6 +160,18 @@ def rl_actions(*_): vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward + # collect additional information for the data pipeline + for vid in veh_ids: + extra_info["time"].append(self.env.k.vehicle.get_timestep(veh_ids[0]) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) + extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) + extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) + extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) + extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed(self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) + extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) + extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) + # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) @@ -195,4 +214,10 @@ def rl_actions(*_): # Delete the .xml version of the emission file. os.remove(emission_path) + output_file = generate_trajectory_table(emission_path[:-4] + ".csv", extra_info, partition_name) + + if partition_name: + upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + output_file.split('/')[-1], output_file) + return info_dict diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index c68d68c3a..27e6b1ded 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -670,3 +670,19 @@ def get_max_speed(self, veh_id, error): float """ raise NotImplementedError + + ########################################################################### + # Methods for Datapipeline # + ########################################################################### + + def get_accel(self, veh_id): + """ see traci class """ + raise NotImplementedError + + def update_accel_without_noise(self, veh_id, accel_without_noise): + """ see traci class """ + raise NotImplementedError + + def get_accel_without_noise(self, veh_id): + """ see traci class """ + raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 3439e98cc..365b07a71 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -113,6 +113,7 @@ def initialize(self, vehicles): self.__vehicles[veh_id] = dict() self.__vehicles[veh_id]['type'] = typ['veh_id'] self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed'] + self.__vehicles[veh_id]["accel_without_noise"] = None self.num_vehicles += 1 if typ['acceleration_controller'][0] == RLController: self.num_rl_vehicles += 1 @@ -1110,3 +1111,17 @@ def get_max_speed(self, veh_id, error=-1001): def set_max_speed(self, veh_id, max_speed): """See parent class.""" self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) + + # add for data pipeline + def get_accel(self, veh_id): + return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + + def update_accel_without_noise(self, veh_id, accel_without_noise): + self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise + + def get_accel_without_noise(self, veh_id): + return self.__vehicles[veh_id]["accel_without_noise"] + + def get_road_grade(self, veh_id): + # TODO + return 0 From bd13f693bd58522dfa69b11c15bc12f26b862772 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Fri, 10 Apr 2020 19:54:30 -0700 Subject: [PATCH 054/335] multiple runs issue solved, testing added --- examples/data_pipeline.py | 55 +++++++- examples/datapipeline_test.py | 33 +++++ examples/query.py | 13 +- examples/run_query.py | 6 +- flow/core/experiment.py | 224 +----------------------------- flow/core/kernel/vehicle/base.py | 4 + flow/core/kernel/vehicle/traci.py | 3 + 7 files changed, 107 insertions(+), 231 deletions(-) create mode 100644 examples/datapipeline_test.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 5fdc30cf2..9d56548c2 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -1,7 +1,8 @@ import pandas as pd +import numpy as np import boto3 from botocore.exceptions import ClientError -from examples.query import QueryStrings +from examples.query import QueryStrings, testing_functions from time import time @@ -30,13 +31,22 @@ def generate_trajectory_table(data_path, extra_info, partition_name): raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) # add the partition column - raw_output['partition'] = partition_name - + # raw_output['partition'] = partition_name + raw_output = raw_output.sort_values(by=["time", "id"]) output_file_path = data_path[:-4]+"_trajectory.csv" raw_output.to_csv(output_file_path, index=False) return output_file_path +def generate_trajectory_from_flow(data_path, extra_info, partition_name): + extra_info = pd.DataFrame.from_dict(extra_info) + # extra_info["partition"] = partition_name + extra_info.to_csv(data_path, index=False) + upload_only_file_path = data_path[:-4] + "_upload" + ".csv" + extra_info.to_csv(upload_only_file_path, index=False, header=False) + return upload_only_file_path + + def upload_to_s3(bucket_name, bucket_key, file_path): """ upload a file to S3 bucket @@ -176,4 +186,41 @@ def run_query(self, query_name, result_location="s3://brent.experiments/query-re }, WorkGroup='primary' ) - return response['QueryExecutionId'] \ No newline at end of file + return response['QueryExecutionId'] + +########################################################################### +# Helpers for testing the SQL Queries # +########################################################################### + + +def test_sql_query(query_name): + if query_name not in testing_functions: + raise ValueError("no tests supported for this query") + + # Run the respective sql query + queryEngine = AthenaQuery() + execution_id = queryEngine.run_query(query_name, result_location="s3://brent.experiments/query-result/query-test", + partition="test") + if queryEngine.wait_for_execution(execution_id): + raise RuntimeError("execution timed out") + + # get the Athena query result from S3 + s3 = boto3.resource("s3") + s3.Bucket("brent.experiments").download_file("query-result/query-test/"+execution_id+".csv", + "data/athena_result.csv") + athena_result = pd.read_csv("data/athena_result.csv") + athena_result = athena_result.sort_values(by=["time", "id"]) + + # get the python expected result + expected_result = pd.read_csv("data/test_data.csv") + expected_result = expected_result.apply(testing_functions[query_name], axis=1, result_type="expand") + expected_result.columns = ["time", "id", "power"] + expected_result = expected_result.sort_values(by=["time", "id"]) + + difference = athena_result["power"] - expected_result["power"] + print("average difference is: " + str(np.mean(difference))) + print("std of difference is: " + str(np.std(difference))) + print("average ratio of difference to expected is: " + + str(np.mean(np.divide(difference, expected_result["power"])))) + difference = pd.DataFrame(difference) + difference.to_csv("./difference.csv") diff --git a/examples/datapipeline_test.py b/examples/datapipeline_test.py new file mode 100644 index 000000000..564060d3b --- /dev/null +++ b/examples/datapipeline_test.py @@ -0,0 +1,33 @@ +import math + +# Vehicle Mass +M = 1200 +# Gravity +g = 9.81 +# Density of Air +ro_air = 1.225 +# Rolling resistance coefficient +C_r = .005 +# Aerodynamic drag coefficient +C_a = 0.3 +# Vehicle Cross sectional Area +A = 2.6 +# Road grade +theta = 0 + + +def heavyside(inp): + return 0 if inp <= 0 else 1 + + +def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): + acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) + accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) + rolling_friction = M * g * C_r * mu + air_drag = .5 * ro_air * A * C_a * mu**3 + power = accel_and_slope + rolling_friction + air_drag + return power + + +def apply_energy_one(row): + return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py index 3fbbe69e1..6354cec3b 100644 --- a/examples/query.py +++ b/examples/query.py @@ -1,8 +1,17 @@ from enum import Enum +from examples.datapipeline_test import apply_energy_one -tags = {} +tags = {"energy": ["ENERGY_ONE"]} + +testing_functions = {"ENERGY_ONE": apply_energy_one} class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" - UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" + ENERGY_ONE = "SELECT id, time, 1200 * speed * " \ + "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py index 7b4a5af7d..ea8839b09 100644 --- a/examples/run_query.py +++ b/examples/run_query.py @@ -1,6 +1,5 @@ import argparse -import sys -from examples.data_pipeline import AthenaQuery +from examples.data_pipeline import AthenaQuery, test_sql_query from examples.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" @@ -11,6 +10,7 @@ parser.add_argument("--list_partitions", action="store_true") parser.add_argument("--check_status", type=str, nargs='+') parser.add_argument("--list_queries", action="store_true") +parser.add_argument("--test_query", nargs=1) if __name__ == "__main__": @@ -32,3 +32,5 @@ if args.list_queries: for q in QueryStrings: print(q) + if args.test_query: + test_sql_query(args.test_query[0]) \ No newline at end of file diff --git a/flow/core/experiment.py b/flow/core/experiment.py index a81f3b130..63c92e798 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,223 +1 @@ -"""Contains an experiment class for running simulations.""" -from flow.core.util import emission_to_csv -from flow.utils.registry import make_create_env -from examples.data_pipeline import generate_trajectory_table, upload_to_s3 -import datetime -import logging -import time -import os -import numpy as np - - -class Experiment: - """ - Class for systematically running simulations in any supported simulator. - - This class acts as a runner for a network and environment. In order to use - it to run an network and environment in the absence of a method specifying - the actions of RL agents in the network, type the following: - - >>> from flow.envs import Env - >>> flow_params = dict(...) # see the examples in exp_config - >>> exp = Experiment(flow_params) # for some experiment configuration - >>> exp.run(num_runs=1) - - If you wish to specify the actions of RL agents in the network, this may be - done as follows: - - >>> rl_actions = lambda state: 0 # replace with something appropriate - >>> exp.run(num_runs=1, rl_actions=rl_actions) - - Finally, if you would like to like to plot and visualize your results, this - class can generate csv files from emission files produced by sumo. These - files will contain the speeds, positions, edges, etc... of every vehicle - in the network at every time step. - - In order to ensure that the simulator constructs an emission file, set the - ``emission_path`` attribute in ``SimParams`` to some path. - - >>> from flow.core.params import SimParams - >>> flow_params['sim'] = SimParams(emission_path="./data") - - Once you have included this in your environment, run your Experiment object - as follows: - - >>> exp.run(num_runs=1, convert_to_csv=True) - - After the experiment is complete, look at the "./data" directory. There - will be two files, one with the suffix .xml and another with the suffix - .csv. The latter should be easily interpretable from any csv reader (e.g. - Excel), and can be parsed using tools such as numpy and pandas. - - Attributes - ---------- - custom_callables : dict < str, lambda > - strings and lambda functions corresponding to some information we want - to extract from the environment. The lambda will be called at each step - to extract information from the env and it will be stored in a dict - keyed by the str. - env : flow.envs.Env - the environment object the simulator will run - """ - - def __init__(self, flow_params, custom_callables=None): - """Instantiate the Experiment class. - - Parameters - ---------- - flow_params : dict - flow-specific parameters - custom_callables : dict < str, lambda > - strings and lambda functions corresponding to some information we - want to extract from the environment. The lambda will be called at - each step to extract information from the env and it will be stored - in a dict keyed by the str. - """ - self.custom_callables = custom_callables or {} - - # Get the env name and a creator for the environment. - create_env, _ = make_create_env(flow_params) - - # Create the environment. - self.env = create_env() - - logging.info(" Starting experiment {} at {}".format( - self.env.network.name, str(datetime.datetime.utcnow()))) - - logging.info("Initializing environment.") - - def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): - """Run the given network for a set number of runs. - - Parameters - ---------- - num_runs : int - number of runs the experiment should perform - rl_actions : method, optional - maps states to actions to be performed by the RL agents (if - there are any) - convert_to_csv : bool - Specifies whether to convert the emission file created by sumo - into a csv file - partition_name: str - Specifies the S3 partition you want to store the output file, - will be used to later for query. If NONE, won't upload output - to S3. - - Returns - ------- - info_dict : dict < str, Any > - contains returns, average speed per step - """ - num_steps = self.env.env_params.horizon - - # raise an error if convert_to_csv is set to True but no emission - # file will be generated, to avoid getting an error at the end of the - # simulation - if convert_to_csv and self.env.sim_params.emission_path is None: - raise ValueError( - 'The experiment was run with convert_to_csv set ' - 'to True, but no emission file will be generated. If you wish ' - 'to generate an emission file, you should set the parameter ' - 'emission_path in the simulation parameters (SumoParams or ' - 'AimsunParams) to the path of the folder where emissions ' - 'output should be generated. If you do not wish to generate ' - 'emissions, set the convert_to_csv parameter to False.') - - # used to store - info_dict = { - "returns": [], - "velocities": [], - "outflows": [], - } - info_dict.update({ - key: [] for key in self.custom_callables.keys() - }) - - if rl_actions is None: - def rl_actions(*_): - return None - - # time profiling information - t = time.time() - times = [] - extra_info = {"time": [], "id": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], - "leader_rel_speed": [], "accel_without_noise": [], "road_grade": []} - - for i in range(num_runs): - ret = 0 - vel = [] - custom_vals = {key: [] for key in self.custom_callables.keys()} - state = self.env.reset() - for j in range(num_steps): - t0 = time.time() - state, reward, done, _ = self.env.step(rl_actions(state)) - t1 = time.time() - times.append(1 / (t1 - t0)) - - # Compute the velocity speeds and cumulative returns. - veh_ids = self.env.k.vehicle.get_ids() - vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) - ret += reward - - # collect additional information for the data pipeline - for vid in veh_ids: - extra_info["time"].append(self.env.k.vehicle.get_timestep(veh_ids[0]) / 1000) - extra_info["id"].append(vid) - extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) - extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) - extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) - extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) - extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed(self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) - extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) - extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) - - # Compute the results for the custom callables. - for (key, lambda_func) in self.custom_callables.items(): - custom_vals[key].append(lambda_func(self.env)) - - if done: - break - - # Store the information from the run in info_dict. - outflow = self.env.k.vehicle.get_outflow_rate(int(500)) - info_dict["returns"].append(ret) - info_dict["velocities"].append(np.mean(vel)) - info_dict["outflows"].append(outflow) - for key in custom_vals.keys(): - info_dict[key].append(np.mean(custom_vals[key])) - - print("Round {0}, return: {1}".format(i, ret)) - - # Print the averages/std for all variables in the info_dict. - for key in info_dict.keys(): - print("Average, std {}: {}, {}".format( - key, np.mean(info_dict[key]), np.std(info_dict[key]))) - - print("Total time:", time.time() - t) - print("steps/second:", np.mean(times)) - self.env.terminate() - - if convert_to_csv and self.env.simulator == "traci": - # wait a short period of time to ensure the xml file is readable - time.sleep(0.1) - - # collect the location of the emission file - dir_path = self.env.sim_params.emission_path - emission_filename = \ - "{0}-emission.xml".format(self.env.network.name) - emission_path = os.path.join(dir_path, emission_filename) - - # convert the emission file into a csv - emission_to_csv(emission_path) - - # Delete the .xml version of the emission file. - os.remove(emission_path) - - output_file = generate_trajectory_table(emission_path[:-4] + ".csv", extra_info, partition_name) - - if partition_name: - upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + output_file.split('/')[-1], output_file) - - return info_dict +"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = self.env.network.name for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = emission_path[:-4] + "_trajectory.csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1], upload_file_path) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 27e6b1ded..f22a4ead3 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -683,6 +683,10 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): """ see traci class """ raise NotImplementedError + def get_2D_position(self, veh_id, error=-1001): + """ see traci class """ + raise NotImplementedError + def get_accel_without_noise(self, veh_id): """ see traci class """ raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 365b07a71..2c8d3173f 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1122,6 +1122,9 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): def get_accel_without_noise(self, veh_id): return self.__vehicles[veh_id]["accel_without_noise"] + def get_2D_position(self, veh_id, error=-1001): + return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) + def get_road_grade(self, veh_id): # TODO return 0 From 221bb9319a1df6e7550ecf18804278e6584ca4ea Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Wed, 22 Apr 2020 05:22:01 -0700 Subject: [PATCH 055/335] added more support for lambda function --- examples/data_pipeline.py | 28 ++++++++++++++++++++++++++-- examples/lambda_function.py | 26 ++++++++++++++++++++++++++ examples/simulate.py | 8 +++++++- flow/core/experiment.py | 2 +- 4 files changed, 60 insertions(+), 4 deletions(-) create mode 100644 examples/lambda_function.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 9d56548c2..28d3b5e73 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -39,6 +39,24 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def generate_trajectory_from_flow(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based only on flow output + + Parameters + ---------- + data_path : str + output file path + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file that should be used for + upload to s3 only, it does not the human readable column names and + will be deleted after uploading to s3. A copy of this file with all + the column name will remain in the ./data folder + """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) @@ -47,7 +65,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name): return upload_only_file_path -def upload_to_s3(bucket_name, bucket_key, file_path): +def upload_to_s3(bucket_name, bucket_key, file_path, only_query): """ upload a file to S3 bucket Parameters @@ -58,9 +76,15 @@ def upload_to_s3(bucket_name, bucket_key, file_path): the key within the bucket for the file file_path: str the path of the file to be uploaded + only_query: str + specify which query should be run on this file by lambda: + if empty: run none of them + if "all": run all available analysis query + if a string of list of queries: run only those mentioned in the list """ s3 = boto3.resource("s3") - s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + s3.Bucket(bucket_name).upload_file(file_path, bucket_key, + ExtraArgs={"Metadata": {"run-query": only_query}}) return diff --git a/examples/lambda_function.py b/examples/lambda_function.py new file mode 100644 index 000000000..01ce1512a --- /dev/null +++ b/examples/lambda_function.py @@ -0,0 +1,26 @@ +import boto3 +from urllib.parse import unquote_plus +from examples.data_pipeline import AthenaQuery +from examples.query import tags + +s3 = boto3.client('s3') +queryEngine = AthenaQuery() + + +def lambda_handler(event, context): + for record in event['Records']: + bucket = record['s3']['bucket']['name'] + key = unquote_plus(record['s3']['object']['key']) + partition = key.split('/')[-2].split('=')[-1] + response = s3.head_object(Bucket=bucket, Key=key) + run_query = response["Metadata"]["run-query"] + + if bucket == 'brent.experiments' and 'trajectory-output/' in key: + if run_query == "all": + query_list = tags["analysis"] + elif not run_query: + break + else: + query_list = run_query.split("\', \'") + for query_name in query_list: + queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) \ No newline at end of file diff --git a/examples/simulate.py b/examples/simulate.py index f54bb38d9..69e11b2fb 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -54,6 +54,12 @@ def parse_args(args): help='Specifies the name of the partition to store the output' 'file on S3. Putting not None value for this argument' 'automatically set gen_emission to True.') + parser.add_argument( + '--only_query', + nargs='*', default="[\'all\']", + help='specify which query should be run by lambda' + 'for detail, see upload_to_s3 in data_pipeline.py' + ) return parser.parse_known_args(args)[0] @@ -91,4 +97,4 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, only_query=flags.only_query) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 63c92e798..12391f9ae 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1 +1 @@ -"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = self.env.network.name for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = emission_path[:-4] + "_trajectory.csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1], upload_file_path) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file +"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np import uuid class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. only_query: str Specifies whether queries should be automatically run the simulation data when it gets uploaded to s3 Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = uuid.uuid4().hex for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file From 8f05ec596edfe048487fcabf830b9cd04cedaf04 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 02:54:33 -0700 Subject: [PATCH 056/335] fix windoes line ending issue with experiment.py --- flow/core/experiment.py | 240 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 239 insertions(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 12391f9ae..80d607e7d 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1 +1,239 @@ -"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np import uuid class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. only_query: str Specifies whether queries should be automatically run the simulation data when it gets uploaded to s3 Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = uuid.uuid4().hex for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file +"""Contains an experiment class for running simulations.""" +from flow.core.util import emission_to_csv +from flow.utils.registry import make_create_env +from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 +import datetime +import logging +import time +import os +import numpy as np +import uuid + + +class Experiment: + """ + Class for systematically running simulations in any supported simulator. + + This class acts as a runner for a network and environment. In order to use + it to run an network and environment in the absence of a method specifying + the actions of RL agents in the network, type the following: + + >>> from flow.envs import Env + >>> flow_params = dict(...) # see the examples in exp_config + >>> exp = Experiment(flow_params) # for some experiment configuration + >>> exp.run(num_runs=1) + + If you wish to specify the actions of RL agents in the network, this may be + done as follows: + + >>> rl_actions = lambda state: 0 # replace with something appropriate + >>> exp.run(num_runs=1, rl_actions=rl_actions) + + Finally, if you would like to like to plot and visualize your results, this + class can generate csv files from emission files produced by sumo. These + files will contain the speeds, positions, edges, etc... of every vehicle + in the network at every time step. + + In order to ensure that the simulator constructs an emission file, set the + ``emission_path`` attribute in ``SimParams`` to some path. + + >>> from flow.core.params import SimParams + >>> flow_params['sim'] = SimParams(emission_path="./data") + + Once you have included this in your environment, run your Experiment object + as follows: + + >>> exp.run(num_runs=1, convert_to_csv=True) + + After the experiment is complete, look at the "./data" directory. There + will be two files, one with the suffix .xml and another with the suffix + .csv. The latter should be easily interpretable from any csv reader (e.g. + Excel), and can be parsed using tools such as numpy and pandas. + + Attributes + ---------- + custom_callables : dict < str, lambda > + strings and lambda functions corresponding to some information we want + to extract from the environment. The lambda will be called at each step + to extract information from the env and it will be stored in a dict + keyed by the str. + env : flow.envs.Env + the environment object the simulator will run + """ + + def __init__(self, flow_params, custom_callables=None): + """Instantiate the Experiment class. + + Parameters + ---------- + flow_params : dict + flow-specific parameters + custom_callables : dict < str, lambda > + strings and lambda functions corresponding to some information we + want to extract from the environment. The lambda will be called at + each step to extract information from the env and it will be stored + in a dict keyed by the str. + """ + self.custom_callables = custom_callables or {} + + # Get the env name and a creator for the environment. + create_env, _ = make_create_env(flow_params) + + # Create the environment. + self.env = create_env() + + logging.info(" Starting experiment {} at {}".format( + self.env.network.name, str(datetime.datetime.utcnow()))) + + logging.info("Initializing environment.") + + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): + """Run the given network for a set number of runs. + + Parameters + ---------- + num_runs : int + number of runs the experiment should perform + rl_actions : method, optional + maps states to actions to be performed by the RL agents (if + there are any) + convert_to_csv : bool + Specifies whether to convert the emission file created by sumo + into a csv file + partition_name: str + Specifies the S3 partition you want to store the output file, + will be used to later for query. If NONE, won't upload output + to S3. + only_query: str + Specifies whether queries should be automatically run the + simulation data when it gets uploaded to s3 + + Returns + ------- + info_dict : dict < str, Any > + contains returns, average speed per step + """ + num_steps = self.env.env_params.horizon + + # raise an error if convert_to_csv is set to True but no emission + # file will be generated, to avoid getting an error at the end of the + # simulation + if convert_to_csv and self.env.sim_params.emission_path is None: + raise ValueError( + 'The experiment was run with convert_to_csv set ' + 'to True, but no emission file will be generated. If you wish ' + 'to generate an emission file, you should set the parameter ' + 'emission_path in the simulation parameters (SumoParams or ' + 'AimsunParams) to the path of the folder where emissions ' + 'output should be generated. If you do not wish to generate ' + 'emissions, set the convert_to_csv parameter to False.') + + # used to store + info_dict = { + "returns": [], + "velocities": [], + "outflows": [], + } + info_dict.update({ + key: [] for key in self.custom_callables.keys() + }) + + if rl_actions is None: + def rl_actions(*_): + return None + + # time profiling information + t = time.time() + times = [] + extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], + "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], + "road_grade": [], "source_id": []} + source_id = uuid.uuid4().hex + + for i in range(num_runs): + ret = 0 + vel = [] + custom_vals = {key: [] for key in self.custom_callables.keys()} + state = self.env.reset() + for j in range(num_steps): + t0 = time.time() + state, reward, done, _ = self.env.step(rl_actions(state)) + t1 = time.time() + times.append(1 / (t1 - t0)) + + # Compute the velocity speeds and cumulative returns. + veh_ids = self.env.k.vehicle.get_ids() + vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) + ret += reward + + # collect additional information for the data pipeline + for vid in veh_ids: + extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) + extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) + extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) + extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) + extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( + self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) + extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) + extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) + position = self.env.k.vehicle.get_2D_position(vid) + extra_info["x"].append(position[0]) + extra_info["y"].append(position[1]) + extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) + extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) + + # Compute the results for the custom callables. + for (key, lambda_func) in self.custom_callables.items(): + custom_vals[key].append(lambda_func(self.env)) + + if done: + break + + # Store the information from the run in info_dict. + outflow = self.env.k.vehicle.get_outflow_rate(int(500)) + info_dict["returns"].append(ret) + info_dict["velocities"].append(np.mean(vel)) + info_dict["outflows"].append(outflow) + for key in custom_vals.keys(): + info_dict[key].append(np.mean(custom_vals[key])) + + print("Round {0}, return: {1}".format(i, ret)) + + # Print the averages/std for all variables in the info_dict. + for key in info_dict.keys(): + print("Average, std {}: {}, {}".format( + key, np.mean(info_dict[key]), np.std(info_dict[key]))) + + print("Total time:", time.time() - t) + print("steps/second:", np.mean(times)) + self.env.terminate() + + if convert_to_csv and self.env.simulator == "traci": + # wait a short period of time to ensure the xml file is readable + time.sleep(0.1) + + # collect the location of the emission file + dir_path = self.env.sim_params.emission_path + emission_filename = \ + "{0}-emission.xml".format(self.env.network.name) + emission_path = os.path.join(dir_path, emission_filename) + + # convert the emission file into a csv + emission_to_csv(emission_path) + + # Delete the .xml version of the emission file. + os.remove(emission_path) + + trajectory_table_path = './data/' + source_id + ".csv" + upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) + + if partition_name: + upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) + + # delete the S3-only version of the trajectory file + os.remove(upload_file_path) + + return info_dict From 29ebdb70d4ab1203edfee65d9d50bb03785ea235 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:35:54 -0700 Subject: [PATCH 057/335] fix style issue --- examples/data_pipeline.py | 113 ++++++++++++++++-------- examples/datapipeline_test.py | 4 + examples/lambda_function.py | 10 +++ examples/query.py | 11 ++- examples/run_query.py | 1 + flow/controllers/routing_controllers.py | 1 + flow/core/kernel/vehicle/base.py | 12 ++- flow/core/kernel/vehicle/traci.py | 5 ++ 8 files changed, 111 insertions(+), 46 deletions(-) diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 28d3b5e73..03b0f87e5 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -1,3 +1,4 @@ +"""contains class and helper functions for the data pipeline.""" import pandas as pd import numpy as np import boto3 @@ -7,21 +8,21 @@ def generate_trajectory_table(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based on standard SUMO emission + """Generate desired output for the trajectory_table based on standard SUMO emission. - Parameters - ---------- - data_path : str - path to the standard SUMO emission - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file - """ + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ raw_output = pd.read_csv(data_path, index_col=["time", "id"]) required_cols = {"time", "id", "speed", "x", "y"} raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) @@ -39,24 +40,24 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def generate_trajectory_from_flow(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based only on flow output - - Parameters - ---------- - data_path : str - output file path - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file that should be used for - upload to s3 only, it does not the human readable column names and - will be deleted after uploading to s3. A copy of this file with all - the column name will remain in the ./data folder - """ + """Generate desired output for the trajectory_table based only on flow output. + + Parameters + ---------- + data_path : str + output file path + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file that should be used for + upload to s3 only, it does not the human readable column names and + will be deleted after uploading to s3. A copy of this file with all + the column name will remain in the ./data folder + """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) @@ -66,7 +67,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name): def upload_to_s3(bucket_name, bucket_key, file_path, only_query): - """ upload a file to S3 bucket + """Upload a file to S3 bucket. Parameters ---------- @@ -89,15 +90,40 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): class AthenaQuery: + """ + Class used to run query. + + Act as a query engine, maintains an open session with AWS Athena. + + Attributes + ---------- + MAX_WAIT: int + maximum number of seconds to wait before declares time-out + client: boto3.client + the athena client that is used to run the query + existing_partitions: list + a list of partitions that is already recorded in Athena's datalog, + this is obtained through query at the initialization of this class + instance. + """ def __init__(self): + """Initialize AthenaQuery instance. + + initialize a client session with AWS Athena, + query Athena to obtain extisting_partition. + """ self.MAX_WAIT = 60 self.client = boto3.client("athena") self.existing_partitions = self.get_existing_partitions() def get_existing_partitions(self): - """prints the existing partitions in the S3 bucket""" + """Return the existing partitions in the S3 bucket. + Returns + ------- + partitions: a list of existing partitions on S3 bucket + """ response = self.client.start_query_execution( QueryString='SHOW PARTITIONS trajectory_table', QueryExecutionContext={ @@ -114,7 +140,7 @@ def get_existing_partitions(self): return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] def check_status(self, execution_id): - """ Return the status of the execution with given id + """Return the status of the execution with given id. Parameters ---------- @@ -125,14 +151,13 @@ def check_status(self, execution_id): status: str QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED """ - response = self.client.get_query_execution( QueryExecutionId=execution_id ) return response['QueryExecution']['Status']['State'] def wait_for_execution(self, execution_id): - """ wait for the execution to finish or time-out + """Wait for the execution to finish or time-out. Parameters ---------- @@ -156,7 +181,7 @@ def wait_for_execution(self, execution_id): return True def update_partition(self, partition): - """ load the given partition to the trajectory_table on Athena + """Load the given partition to the trajectory_table on Athena. Parameters ---------- @@ -176,7 +201,7 @@ def update_partition(self, partition): return def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): - """ start the execution of a query, does not wait for it to finish + """Start the execution of a query, does not wait for it to finish. Parameters ---------- @@ -218,6 +243,16 @@ def run_query(self, query_name, result_location="s3://brent.experiments/query-re def test_sql_query(query_name): + """Start the execution of a query, does not wait for it to finish. + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be tested + Raises + ------ + RuntimeError: if timeout + """ if query_name not in testing_functions: raise ValueError("no tests supported for this query") diff --git a/examples/datapipeline_test.py b/examples/datapipeline_test.py index 564060d3b..ae0ea382f 100644 --- a/examples/datapipeline_test.py +++ b/examples/datapipeline_test.py @@ -1,3 +1,4 @@ +"""functions that calculates the expected result for testing.""" import math # Vehicle Mass @@ -17,10 +18,12 @@ def heavyside(inp): + """Return 1 if input is positive.""" return 0 if inp <= 0 else 1 def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): + """Calculate the expected power for POWER_DEMAND_MODEL query.""" acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) rolling_friction = M * g * C_r * mu @@ -30,4 +33,5 @@ def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_ def apply_energy_one(row): + """Apply the power calculation to a row of the dataframe.""" return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file diff --git a/examples/lambda_function.py b/examples/lambda_function.py index 01ce1512a..4f7937c85 100644 --- a/examples/lambda_function.py +++ b/examples/lambda_function.py @@ -1,3 +1,4 @@ +"""lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus from examples.data_pipeline import AthenaQuery @@ -8,6 +9,15 @@ def lambda_handler(event, context): + """Invoke by AWS Lambda upon triggered by an event. + + Parameters + ---------- + event : dic < str: dic > + an S3 event + context: + not used + """ for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) diff --git a/examples/query.py b/examples/query.py index 6354cec3b..0f0ee13b4 100644 --- a/examples/query.py +++ b/examples/query.py @@ -1,15 +1,20 @@ +"""stores all the pre-defined query strings.""" from enum import Enum from examples.datapipeline_test import apply_energy_one -tags = {"energy": ["ENERGY_ONE"]} +# tags for different queries +tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} -testing_functions = {"ENERGY_ONE": apply_energy_one} +# specify the function to calculate the expected result of each query +testing_functions = {"POWER_DEMAND_MODEL": apply_energy_one} class QueryStrings(Enum): + """An enumeration of all the pre-defined query strings.""" + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - ENERGY_ONE = "SELECT id, time, 1200 * speed * " \ + POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ diff --git a/examples/run_query.py b/examples/run_query.py index ea8839b09..64baa6656 100644 --- a/examples/run_query.py +++ b/examples/run_query.py @@ -1,3 +1,4 @@ +"""runner script for invoking query manually.""" import argparse from examples.data_pipeline import AthenaQuery, test_sql_query from examples.query import QueryStrings diff --git a/flow/controllers/routing_controllers.py b/flow/controllers/routing_controllers.py index c880b5bbf..24f8af3f3 100755 --- a/flow/controllers/routing_controllers.py +++ b/flow/controllers/routing_controllers.py @@ -127,6 +127,7 @@ def choose_route(self, env): class I210Router(ContinuousRouter): """Assists in choosing routes in select cases for the Bay Bridge network. + Extension to the Continuous Router. Usage ----- diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index f22a4ead3..351f95405 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -676,17 +676,21 @@ def get_max_speed(self, veh_id, error): ########################################################################### def get_accel(self, veh_id): - """ see traci class """ + """Return the acceleration of vehicle with veh_id.""" raise NotImplementedError def update_accel_without_noise(self, veh_id, accel_without_noise): - """ see traci class """ + """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError def get_2D_position(self, veh_id, error=-1001): - """ see traci class """ + """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError def get_accel_without_noise(self, veh_id): - """ see traci class """ + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_road_grade(self, veh_id): + """Return the road-grade of the vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 2c8d3173f..79221c09d 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1114,17 +1114,22 @@ def set_max_speed(self, veh_id, max_speed): # add for data pipeline def get_accel(self, veh_id): + """See parent class.""" return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step def update_accel_without_noise(self, veh_id, accel_without_noise): + """See parent class.""" self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise def get_accel_without_noise(self, veh_id): + """See parent class.""" return self.__vehicles[veh_id]["accel_without_noise"] def get_2D_position(self, veh_id, error=-1001): + """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) def get_road_grade(self, veh_id): + """See parent class.""" # TODO return 0 From 23783bd6e70f471189c929086be0a5e0a18e7797 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:38:47 -0700 Subject: [PATCH 058/335] reorganized file locations --- {examples => flow/data_pipeline}/data_pipeline.py | 0 {examples => flow/data_pipeline}/datapipeline_test.py | 0 {examples => flow/data_pipeline}/lambda_function.py | 0 {examples => flow/data_pipeline}/query.py | 0 {examples => flow/data_pipeline}/run_query.py | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename {examples => flow/data_pipeline}/data_pipeline.py (100%) rename {examples => flow/data_pipeline}/datapipeline_test.py (100%) rename {examples => flow/data_pipeline}/lambda_function.py (100%) rename {examples => flow/data_pipeline}/query.py (100%) rename {examples => flow/data_pipeline}/run_query.py (100%) diff --git a/examples/data_pipeline.py b/flow/data_pipeline/data_pipeline.py similarity index 100% rename from examples/data_pipeline.py rename to flow/data_pipeline/data_pipeline.py diff --git a/examples/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py similarity index 100% rename from examples/datapipeline_test.py rename to flow/data_pipeline/datapipeline_test.py diff --git a/examples/lambda_function.py b/flow/data_pipeline/lambda_function.py similarity index 100% rename from examples/lambda_function.py rename to flow/data_pipeline/lambda_function.py diff --git a/examples/query.py b/flow/data_pipeline/query.py similarity index 100% rename from examples/query.py rename to flow/data_pipeline/query.py diff --git a/examples/run_query.py b/flow/data_pipeline/run_query.py similarity index 100% rename from examples/run_query.py rename to flow/data_pipeline/run_query.py From 3bd49eca1b39d998abbc2c4fbbbb737dd58786cc Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:58:44 -0700 Subject: [PATCH 059/335] fix some more style issues --- examples/simulate.py | 3 ++- flow/controllers/base_controller.py | 10 +++++----- flow/controllers/routing_controllers.py | 1 + flow/core/experiment.py | 3 ++- flow/core/kernel/vehicle/base.py | 2 +- flow/core/kernel/vehicle/traci.py | 2 +- flow/data_pipeline/__init__.py | 1 + flow/data_pipeline/data_pipeline.py | 3 +-- flow/data_pipeline/datapipeline_test.py | 2 +- flow/data_pipeline/lambda_function.py | 2 +- flow/data_pipeline/query.py | 12 ++++++------ flow/data_pipeline/run_query.py | 8 ++++---- flow/envs/base.py | 1 - 13 files changed, 26 insertions(+), 24 deletions(-) create mode 100644 flow/data_pipeline/__init__.py diff --git a/examples/simulate.py b/examples/simulate.py index 69e11b2fb..86d14aa14 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -97,4 +97,5 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, only_query=flags.only_query) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, + only_query=flags.only_query) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 6e6734764..7adcdf310 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -110,18 +110,18 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested - accel_without_noice = accel + accel_without_noise = accel if self.fail_safe == 'instantaneous': - accel_without_noice = self.get_safe_action_instantaneous(env, accel_without_noice) + accel_without_noise = self.get_safe_action_instantaneous(env, accel_without_noise) elif self.fail_safe == 'safe_velocity': - accel_without_noice = self.get_safe_velocity_action(env, accel_without_noice) - env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noice) + accel_without_noise = self.get_safe_velocity_action(env, accel_without_noise) + env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noise) # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) - # run the failsafes, if requested + # run the fail-safes, if requested if self.fail_safe == 'instantaneous': accel = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': diff --git a/flow/controllers/routing_controllers.py b/flow/controllers/routing_controllers.py index 24f8af3f3..18d6c1842 100755 --- a/flow/controllers/routing_controllers.py +++ b/flow/controllers/routing_controllers.py @@ -125,6 +125,7 @@ def choose_route(self, env): return new_route + class I210Router(ContinuousRouter): """Assists in choosing routes in select cases for the Bay Bridge network. diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 80d607e7d..aa5028836 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -231,7 +231,8 @@ def rl_actions(*_): if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 351f95405..18d7b98a1 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -683,7 +683,7 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError - def get_2D_position(self, veh_id, error=-1001): + def get_2d_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 79221c09d..6dd4077b9 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1125,7 +1125,7 @@ def get_accel_without_noise(self, veh_id): """See parent class.""" return self.__vehicles[veh_id]["accel_without_noise"] - def get_2D_position(self, veh_id, error=-1001): + def get_2d_position(self, veh_id, error=-1001): """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) diff --git a/flow/data_pipeline/__init__.py b/flow/data_pipeline/__init__.py new file mode 100644 index 000000000..622e09b06 --- /dev/null +++ b/flow/data_pipeline/__init__.py @@ -0,0 +1 @@ +"""Empty init file to ensure that data_pipeline is recognized as a package""" diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 03b0f87e5..afbc09f92 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -2,8 +2,7 @@ import pandas as pd import numpy as np import boto3 -from botocore.exceptions import ClientError -from examples.query import QueryStrings, testing_functions +from flow.data_pipeline.query import QueryStrings, testing_functions from time import time diff --git a/flow/data_pipeline/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py index ae0ea382f..0e1a50518 100644 --- a/flow/data_pipeline/datapipeline_test.py +++ b/flow/data_pipeline/datapipeline_test.py @@ -34,4 +34,4 @@ def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_ def apply_energy_one(row): """Apply the power calculation to a row of the dataframe.""" - return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file + return [row[0], row[1], calculate_power(row[4], row[6])] diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 4f7937c85..afef55a4b 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -33,4 +33,4 @@ def lambda_handler(event, context): else: query_list = run_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) \ No newline at end of file + queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 0f0ee13b4..af1b51ce7 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -1,6 +1,6 @@ """stores all the pre-defined query strings.""" from enum import Enum -from examples.datapipeline_test import apply_energy_one +from flow.data_pipeline.datapipeline_test import apply_energy_one # tags for different queries tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} @@ -15,8 +15,8 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ - "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ - "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" \ No newline at end of file + "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" diff --git a/flow/data_pipeline/run_query.py b/flow/data_pipeline/run_query.py index 64baa6656..f065a726e 100644 --- a/flow/data_pipeline/run_query.py +++ b/flow/data_pipeline/run_query.py @@ -1,10 +1,10 @@ """runner script for invoking query manually.""" import argparse -from examples.data_pipeline import AthenaQuery, test_sql_query -from examples.query import QueryStrings +from flow.data_pipeline.data_pipeline import AthenaQuery, test_sql_query +from flow.data_pipeline.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" - "a S3 location") + "a S3 location") parser.add_argument("--run", type=str, nargs="+") parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") parser.add_argument("--partition", type=str, nargs='?', default="default") @@ -34,4 +34,4 @@ for q in QueryStrings: print(q) if args.test_query: - test_sql_query(args.test_query[0]) \ No newline at end of file + test_sql_query(args.test_query[0]) diff --git a/flow/envs/base.py b/flow/envs/base.py index f033514ff..e7ad57fde 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -417,7 +417,6 @@ def step(self, rl_actions): "**********************************************************" ) - # compute the info for each agent infos = {} From 6335dd847ef95b4e672616f27293bd612f8f6e1c Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 13:02:33 -0700 Subject: [PATCH 060/335] fix one more style issue --- flow/data_pipeline/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/__init__.py b/flow/data_pipeline/__init__.py index 622e09b06..d9d6a6573 100644 --- a/flow/data_pipeline/__init__.py +++ b/flow/data_pipeline/__init__.py @@ -1 +1 @@ -"""Empty init file to ensure that data_pipeline is recognized as a package""" +"""Empty init file to ensure that data_pipeline is recognized as a package.""" From 5d5606acad5b7f60c2eed3a3c67060d465d75733 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sat, 9 May 2020 22:06:30 -0700 Subject: [PATCH 061/335] added new two new quries --- flow/core/experiment.py | 4 ++-- flow/core/kernel/vehicle/base.py | 4 ++++ flow/core/kernel/vehicle/traci.py | 4 ++++ flow/data_pipeline/query.py | 38 ++++++++++++++++++++++++++++++- 4 files changed, 47 insertions(+), 3 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index aa5028836..37fcb03af 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,7 +1,7 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env -from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time @@ -178,7 +178,7 @@ def rl_actions(*_): self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) - position = self.env.k.vehicle.get_2D_position(vid) + position = self.env.k.vehicle.get_2d_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 18d7b98a1..cb547cddb 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -691,6 +691,10 @@ def get_accel_without_noise(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError + def get_velocity_without_noise(self, veh_id): + """Return the velocity without noise of vehicle with veh_id.""" + raise NotImplementedError + def get_road_grade(self, veh_id): """Return the road-grade of the vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 6dd4077b9..c52cc2f22 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1125,6 +1125,10 @@ def get_accel_without_noise(self, veh_id): """See parent class.""" return self.__vehicles[veh_id]["accel_without_noise"] + def get_velocity_without_noise(self, veh_id): + """See parent class.""" + return max([self.get_speed(veh_id) + self.get_accel_without_noise(veh_id) * self.sim_step, 0]) + def get_2d_position(self, veh_id, error=-1001): """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index af1b51ce7..0c87b3dcc 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -3,7 +3,8 @@ from flow.data_pipeline.datapipeline_test import apply_energy_one # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} +tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], + "analysis": ["POWER_DEMAND_MODEL"]} # specify the function to calculate the expected result of each query testing_functions = {"POWER_DEMAND_MODEL": apply_energy_one} @@ -20,3 +21,38 @@ class QueryStrings(Enum): "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ "FROM trajectory_table " \ "WHERE partition_name=\'{partition}\'" + POWER_DEMAND_MODEL_DENOISED_ACCEL = \ + "SELECT id, time, 1200 * speed * " \ + "((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise)+0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" + POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = \ + "WITH sub1 AS ( " \ + "SELECT" \ + "time, id, speed, acceleration, accel_without_noise, road_grade, source_id," \ + "time - LAG(time, 1) " \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step," \ + "LAG(speed, 1) " \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed," \ + "LAG(acceleration, 1) " \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel," \ + "LAG(accel_without_noise, 1) " \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised" \ + "FROM trajectory_table" \ + "WHERE partition_name=\'{partition}\'" \ + ")," \ + "sub2 AS (" \ + "SELECT time, id, speed, acceleration, accel_without_noise, " \ + "road_grade, source_id, " \ + "speed-prev_accel*sim_step+prev_accel_denoised*sim_step AS speed_denoised" \ + "FROM sub1" \ + ") " \ + "SELECT id, time, speed_denoised, accel_without_noise," \ + "1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 " \ + "THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 " \ + "* SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 " \ + "* 2.6 * 0.3 * POW(speed_denoised,3) AS power, " \ + "'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id" \ + "FROM sub2 " From bdd6068b9326f984b886037f9572b01013df2e05 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sun, 10 May 2020 23:03:35 -0700 Subject: [PATCH 062/335] including next_V for testing only --- flow/core/experiment.py | 1 + flow/core/kernel/vehicle/traci.py | 15 ++++++++++- flow/data_pipeline/query.py | 41 ++++++++++++++++--------------- 3 files changed, 36 insertions(+), 21 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 37fcb03af..8b5cbac02 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -182,6 +182,7 @@ def rl_actions(*_): extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) + #extra_info["next_v"].append(self.env.k.vehicle.get_next_v(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index c52cc2f22..2fd978012 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -949,8 +949,10 @@ def apply_acceleration(self, veh_ids, acc): for i, vid in enumerate(veh_ids): if acc[i] is not None and vid in self.get_ids(): + self.__vehicles[vid]["accel"] = acc[i] this_vel = self.get_speed(vid) next_vel = max([this_vel + acc[i] * self.sim_step, 0]) + #self.__vehicles[vid]["next_v"] = next_vel self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3) def apply_lane_change(self, veh_ids, direction): @@ -1113,9 +1115,18 @@ def set_max_speed(self, veh_id, max_speed): self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) # add for data pipeline + def get_next_v(self, veh_id): + """See parent class.""" + if not "next_v" in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["next_v"] = None + return self.__vehicles[veh_id]["next_v"] + #return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + def get_accel(self, veh_id): """See parent class.""" - return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + if not "accel" in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel"] = None + return self.__vehicles[veh_id]["accel"] def update_accel_without_noise(self, veh_id, accel_without_noise): """See parent class.""" @@ -1123,6 +1134,8 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): def get_accel_without_noise(self, veh_id): """See parent class.""" + if not "accel_without_noise" in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_without_noise"] = None return self.__vehicles[veh_id]["accel_without_noise"] def get_velocity_without_noise(self, veh_id): diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 0c87b3dcc..9054364e6 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -15,44 +15,45 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ + POWER_DEMAND_MODEL = "SELECT id, time, speed, acceleration, 1200 * speed * " \ "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "* POW(speed,3) AS power, 'POWER_DEMAND_MODEL' AS energy_model_id, source_id " \ "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" + "WHERE partition_name=\'{partition}\' " \ + "ORDER BY id, time " POWER_DEMAND_MODEL_DENOISED_ACCEL = \ - "SELECT id, time, 1200 * speed * " \ + "SELECT id, time, speed, accel_without_noise, 1200 * speed * " \ "((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise)+0.8 " \ "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "* POW(speed,3) AS power, 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id " \ "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" + "WHERE partition_name=\'{partition}\' " \ + "ORDER BY id, time " POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = \ "WITH sub1 AS ( " \ - "SELECT" \ - "time, id, speed, acceleration, accel_without_noise, road_grade, source_id," \ + "SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, " \ "time - LAG(time, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step," \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, " \ "LAG(speed, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed," \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, " \ "LAG(acceleration, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel," \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, " \ "LAG(accel_without_noise, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised" \ - "FROM trajectory_table" \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised "\ + "FROM trajectory_table " \ "WHERE partition_name=\'{partition}\'" \ ")," \ "sub2 AS (" \ - "SELECT time, id, speed, acceleration, accel_without_noise, " \ - "road_grade, source_id, " \ - "speed-prev_accel*sim_step+prev_accel_denoised*sim_step AS speed_denoised" \ + "SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, " \ + "prev_speed+accel_without_noise*sim_step AS speed_denoised " \ "FROM sub1" \ ") " \ - "SELECT id, time, speed_denoised, accel_without_noise," \ - "1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 " \ + "SELECT id, time, speed_denoised, accel_without_noise, " \ + "1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 " \ "THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 " \ "* SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 " \ "* 2.6 * 0.3 * POW(speed_denoised,3) AS power, " \ - "'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id" \ - "FROM sub2 " + "'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id " \ + "FROM sub2 " \ + "ORDER BY id, time " From 3468747c4f824fcefcfc7b80ad3a695b4e8ae5d3 Mon Sep 17 00:00:00 2001 From: Kanaad Parvate Date: Thu, 14 May 2020 11:51:26 -0700 Subject: [PATCH 063/335] Replay Improvement / Fixes (#905) * added aggressive driver and made modifications to replay scripts * add numpy import * some more small changes and cleanup * remove aggressive driver * added distribution plots * Fixed minor but common matplotlib error * merge Co-authored-by: Kathy Jang --- .../rl/multiagent/multiagent_i210.py | 14 +- flow/controllers/velocity_controllers.py | 20 +-- flow/core/experiment.py | 2 +- flow/core/kernel/simulation/traci.py | 5 + flow/core/kernel/vehicle/traci.py | 4 +- flow/core/params.py | 6 +- flow/core/rewards.py | 28 +++ flow/envs/base.py | 2 +- flow/envs/multiagent/base.py | 2 +- flow/utils/rllib.py | 2 +- flow/visualize/i210_replay.py | 160 +++++++++++++++--- flow/visualize/plot_custom_callables.py | 42 ++++- flow/visualize/transfer/util.py | 56 +++--- 13 files changed, 264 insertions(+), 79 deletions(-) diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index b74f64027..7176abb06 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -9,7 +9,7 @@ from ray.tune.registry import register_env from flow.controllers import RLController -from flow.controllers.car_following_models import IDMController +from flow.controllers.car_following_models import IDMController, SimCarFollowingController import flow.config as config from flow.core.params import EnvParams from flow.core.params import NetParams @@ -18,6 +18,7 @@ from flow.core.params import VehicleParams from flow.core.params import SumoParams from flow.core.params import SumoLaneChangeParams +from flow.core.params import SumoCarFollowingParams from flow.core.rewards import energy_consumption from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION from flow.envs.multiagent.i210 import I210MultiEnv, ADDITIONAL_ENV_PARAMS @@ -57,11 +58,13 @@ num_vehicles=0, lane_change_params=SumoLaneChangeParams(lane_change_mode="strategic"), acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), + car_following_params=SumoCarFollowingParams(speed_mode="no_collide"), ) vehicles.add( "av", acceleration_controller=(RLController, {}), num_vehicles=0, + color='red' ) inflow = InFlows() @@ -140,7 +143,8 @@ render=False, color_by_speed=False, restart_instance=True, - use_ballistic=True + use_ballistic=True, + disable_collisions=True ), # environment related parameters (see flow.core.params.EnvParams) @@ -195,7 +199,7 @@ def policy_mapping_fn(_): custom_callables = { "avg_speed": lambda env: np.mean([speed for speed in env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]), - "avg_outflow": lambda env: np.nan_to_num( - env.k.vehicle.get_outflow_rate(120)), - "avg_energy": lambda env: -1*energy_consumption(env, 0.1) + "avg_outflow": lambda env: np.nan_to_num(env.k.vehicle.get_outflow_rate(120)), + "avg_energy": lambda env: -1*energy_consumption(env, 0.1), + "avg_per_step_energy": lambda env: -1*energy_consumption(env, 0.1) / env.k.vehicle.num_vehicles, } diff --git a/flow/controllers/velocity_controllers.py b/flow/controllers/velocity_controllers.py index 2e4b7c22a..c3da6136d 100644 --- a/flow/controllers/velocity_controllers.py +++ b/flow/controllers/velocity_controllers.py @@ -25,11 +25,10 @@ class FollowerStopper(BaseController): def __init__(self, veh_id, car_following_params, - v_des=15, - danger_edges=None): + v_des=15): """Instantiate FollowerStopper.""" BaseController.__init__( - self, veh_id, car_following_params, delay=1.0, + self, veh_id, car_following_params, delay=0.0, fail_safe='safe_velocity') # desired speed of the vehicle @@ -45,7 +44,6 @@ def __init__(self, self.d_1 = 1.5 self.d_2 = 1.0 self.d_3 = 0.5 - self.danger_edges = danger_edges if danger_edges else {} def find_intersection_dist(self, env): """Find distance to intersection. @@ -106,14 +104,9 @@ def get_accel(self, env): if edge == "": return None - - if self.find_intersection_dist(env) <= 10 and \ - env.k.vehicle.get_edge(self.veh_id) in self.danger_edges or \ - env.k.vehicle.get_edge(self.veh_id)[0] == ":": - return None else: # compute the acceleration from the desired velocity - return (v_cmd - this_vel) / env.sim_step + return np.clip((v_cmd - this_vel) / env.sim_step, -np.abs(self.max_deaccel), self.max_accel) class NonLocalFollowerStopper(FollowerStopper): @@ -154,11 +147,6 @@ def get_accel(self, env): if edge == "": return None - - if self.find_intersection_dist(env) <= 10 and \ - env.k.vehicle.get_edge(self.veh_id) in self.danger_edges or \ - env.k.vehicle.get_edge(self.veh_id)[0] == ":": - return None else: # compute the acceleration from the desired velocity return (v_cmd - this_vel) / env.sim_step @@ -184,7 +172,7 @@ class PISaturation(BaseController): def __init__(self, veh_id, car_following_params): """Instantiate PISaturation.""" - BaseController.__init__(self, veh_id, car_following_params, delay=1.0) + BaseController.__init__(self, veh_id, car_following_params, delay=0.0) # maximum achievable acceleration by the vehicle self.max_accel = car_following_params.controller_params['accel'] diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 69a78cb0e..a0497b595 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -157,7 +157,7 @@ def rl_actions(*_): for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) - if done: + if type(done) is dict and done['__all__'] or type(done) is not dict and done: break # Store the information from the run in info_dict. diff --git a/flow/core/kernel/simulation/traci.py b/flow/core/kernel/simulation/traci.py index 0ee29ada6..0accdeddf 100644 --- a/flow/core/kernel/simulation/traci.py +++ b/flow/core/kernel/simulation/traci.py @@ -91,6 +91,11 @@ def start_simulation(self, network, sim_params): "--step-length", str(sim_params.sim_step) ] + # disable all collisions and teleporting in the simulation. + if sim_params.disable_collisions: + sumo_call.extend(["--collision.mingap-factor", str(0), + "--collision.action", str("none")]) + # use a ballistic integration step (if request) if sim_params.use_ballistic: sumo_call.append("--step-method.ballistic") diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 22dcc8837..14720cdce 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -727,7 +727,7 @@ def _multi_lane_headways(self): for lane in range(max_lanes): edge_dict[edge][lane].sort(key=lambda x: x[1]) - for veh_id in self.get_rl_ids(): + for veh_id in self.get_ids(): # collect the lane leaders, followers, headways, and tailways for # each vehicle edge = self.get_edge(veh_id) @@ -970,7 +970,7 @@ def apply_lane_change(self, veh_ids, direction): # perform the requested lane action action in TraCI if target_lane != this_lane: self.kernel_api.vehicle.changeLane( - veh_id, int(target_lane), 100000) + veh_id, int(target_lane), self.sim_step) if veh_id in self.get_rl_ids(): self.prev_last_lc[veh_id] = \ diff --git a/flow/core/params.py b/flow/core/params.py index afead7017..0527b33c2 100755 --- a/flow/core/params.py +++ b/flow/core/params.py @@ -568,6 +568,8 @@ class SumoParams(SimParams): current time step use_ballistic: bool, optional If true, use a ballistic integration step instead of an euler step + disable_collisions: bool, optional + If true, disables explicit collision checking and teleporting in SUMO """ def __init__(self, @@ -589,7 +591,8 @@ def __init__(self, teleport_time=-1, num_clients=1, color_by_speed=False, - use_ballistic=False): + use_ballistic=False, + disable_collisions=False): """Instantiate SumoParams.""" super(SumoParams, self).__init__( sim_step, render, restart_instance, emission_path, save_render, @@ -604,6 +607,7 @@ def __init__(self, self.num_clients = num_clients self.color_by_speed = color_by_speed self.use_ballistic = use_ballistic + self.disable_collisions = disable_collisions class EnvParams: diff --git a/flow/core/rewards.py b/flow/core/rewards.py index 6de472af2..60760f357 100755 --- a/flow/core/rewards.py +++ b/flow/core/rewards.py @@ -322,6 +322,8 @@ def energy_consumption(env, gain=.001): rho = 1.225 # air density (kg/m^3) A = 2.6 # vehicle cross sectional area (m^2) for veh_id in env.k.vehicle.get_ids(): + if veh_id not in env.k.vehicle.previous_speeds: + continue speed = env.k.vehicle.get_speed(veh_id) prev_speed = env.k.vehicle.get_previous_speed(veh_id) @@ -330,3 +332,29 @@ def energy_consumption(env, gain=.001): power += M * speed * accel + M * g * Cr * speed + 0.5 * rho * A * Ca * speed ** 3 return -gain * power + +def vehicle_energy_consumption(env, veh_id, gain=.001): + """Calculate power consumption of a vehicle. + + Assumes vehicle is an average sized vehicle. + The power calculated here is the lower bound of the actual power consumed + by a vehicle. + """ + power = 0 + + M = 1200 # mass of average sized vehicle (kg) + g = 9.81 # gravitational acceleration (m/s^2) + Cr = 0.005 # rolling resistance coefficient + Ca = 0.3 # aerodynamic drag coefficient + rho = 1.225 # air density (kg/m^3) + A = 2.6 # vehicle cross sectional area (m^2) + if veh_id not in env.k.vehicle.previous_speeds: + return 0 + speed = env.k.vehicle.get_speed(veh_id) + prev_speed = env.k.vehicle.get_previous_speed(veh_id) + + accel = abs(speed - prev_speed) / env.sim_step + + power += M * speed * accel + M * g * Cr * speed + 0.5 * rho * A * Ca * speed ** 3 + + return -gain * power diff --git a/flow/envs/base.py b/flow/envs/base.py index adc959b9a..baf8270b5 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -385,7 +385,7 @@ def step(self, rl_actions): # crash encodes whether the simulator experienced a collision crash = self.k.simulation.check_collision() - + self.crash = crash # stop collecting new simulation steps if there is a collision if crash: break diff --git a/flow/envs/multiagent/base.py b/flow/envs/multiagent/base.py index 126107b00..594fb2fdb 100644 --- a/flow/envs/multiagent/base.py +++ b/flow/envs/multiagent/base.py @@ -104,7 +104,7 @@ def step(self, rl_actions): # crash encodes whether the simulator experienced a collision crash = self.k.simulation.check_collision() - + self.crash = crash # stop collecting new simulation steps if there is a collision if crash: print('A CRASH! A CRASH!!!!!! AAAAAAAAAH!!!!!') diff --git a/flow/utils/rllib.py b/flow/utils/rllib.py index ca8072c85..e3404a61f 100644 --- a/flow/utils/rllib.py +++ b/flow/utils/rllib.py @@ -146,7 +146,7 @@ def get_flow_params(config): if flow_params["net"]["inflows"]: net.inflows.__dict__ = flow_params["net"]["inflows"].copy() - if net.template and len(net.template) > 0: + if net.template is not None and len(net.template) > 0: dirname = os.getcwd() filename = os.path.join(dirname, '../../examples') split = net.template.split('examples')[1][1:] diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 502d646aa..ac4cc031d 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -1,10 +1,13 @@ """Transfer and replay for i210 environment.""" import argparse +from datetime import datetime from collections import defaultdict from copy import deepcopy import numpy as np import json import os +import pytz +import subprocess import time import ray @@ -14,15 +17,16 @@ from ray.rllib.agents.registry import get_agent_class from ray.tune.registry import register_env -from flow.core.util import emission_to_csv +from flow.core.util import emission_to_csv, ensure_dir +from flow.core.rewards import vehicle_energy_consumption from flow.utils.registry import make_create_env from flow.utils.rllib import get_flow_params from flow.utils.rllib import get_rllib_config from flow.utils.rllib import get_rllib_pkl from flow.utils.rllib import FlowParamsEncoder - from flow.visualize.transfer.util import inflows_range +from flow.visualize.plot_custom_callables import plot_trip_distribution from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables @@ -38,8 +42,8 @@ 2 - the number of the checkpoint """ - -def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config=None, result_dir=None): +@ray.remote +def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config=None, result_dir=None, max_completed_trips=None, v_des=12): """Replay or run transfer test (defined by transfer_fn) by modif. Arguments: @@ -55,8 +59,9 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= """ assert bool(args.controller) ^ bool(rllib_config), \ "Need to specify either controller or rllib_config, but not both" - - if args.run_transfer: + if transfer_test is not None: + if type(transfer_test) == bytes: + transfer_test = ray.cloudpickle.loads(transfer_test) flow_params = transfer_test.flow_params_modifier_fn(flow_params) if args.controller: @@ -71,7 +76,8 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= elif args.controller == 'follower_stopper': from flow.controllers.velocity_controllers import FollowerStopper controller = FollowerStopper - test_params.update({'v_des': 15}) + test_params.update({'v_des': v_des}) + # flow_params['veh'].type_parameters['av']['car_following_params'] elif args.controller == 'sumo': from flow.controllers.car_following_models import SimCarFollowingController controller = SimCarFollowingController @@ -185,17 +191,29 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= info_dict = { "velocities": [], "outflows": [], + "avg_trip_energy": [], + "avg_trip_time": [], + "total_completed_trips": [] } + all_trip_energy_distribution = defaultdict(lambda: []) + all_trip_time_distribution = defaultdict(lambda: []) + info_dict.update({ key: [] for key in custom_callables.keys() }) - for i in range(args.num_rollouts): + i = 0 + while i < args.num_rollouts: + print("Rollout iter", i) vel = [] + per_vehicle_energy_trace = defaultdict(lambda: []) + completed_veh_types = {} + completed_vehicle_avg_energy = {} + completed_vehicle_travel_time = {} custom_vals = {key: [] for key in custom_callables.keys()} state = env.reset() + initial_vehicles = set(env.k.vehicle.get_ids()) for _ in range(env_params.horizon): - if rllib_config: if multiagent: action = {} @@ -226,17 +244,41 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= for (key, lambda_func) in custom_callables.items(): custom_vals[key].append(lambda_func(env)) + for past_veh_id in per_vehicle_energy_trace.keys(): + if past_veh_id not in veh_ids and past_veh_id not in completed_vehicle_avg_energy: + all_trip_energy_distribution[completed_veh_types[past_veh_id]].append(np.sum(per_vehicle_energy_trace[past_veh_id])) + all_trip_time_distribution[completed_veh_types[past_veh_id]].append(len(per_vehicle_energy_trace[past_veh_id])) + completed_vehicle_avg_energy[past_veh_id] = np.sum(per_vehicle_energy_trace[past_veh_id]) + completed_vehicle_travel_time[past_veh_id] = len(per_vehicle_energy_trace[past_veh_id]) + + for veh_id in veh_ids: + if veh_id not in initial_vehicles: + if veh_id not in per_vehicle_energy_trace: + # we have to skip the first step's energy calculation + per_vehicle_energy_trace[veh_id].append(0) + completed_veh_types[veh_id] = env.k.vehicle.get_type(veh_id) + else: + per_vehicle_energy_trace[veh_id].append(-1*vehicle_energy_consumption(env, veh_id)) + if type(done) is dict and done['__all__']: break elif type(done) is not dict and done: break - - # Store the information from the run in info_dict. - outflow = env.k.vehicle.get_outflow_rate(int(500)) - info_dict["velocities"].append(np.mean(vel)) - info_dict["outflows"].append(outflow) - for key in custom_vals.keys(): - info_dict[key].append(np.mean(custom_vals[key])) + elif max_completed_trips is not None and len(completed_vehicle_avg_energy) > max_completed_trips: + break + if env.crash: + print("Crash on iter", i) + else: + # Store the information from the run in info_dict. + outflow = env.k.vehicle.get_outflow_rate(int(500)) + info_dict["velocities"].append(np.mean(vel)) + info_dict["outflows"].append(outflow) + info_dict["avg_trip_energy"].append(np.mean(list(completed_vehicle_avg_energy.values()))) + info_dict["avg_trip_time"].append(np.mean(list(completed_vehicle_travel_time.values()))) + info_dict["total_completed_trips"].append(len(list(completed_vehicle_avg_energy.values()))) + for key in custom_vals.keys(): + info_dict[key].append(np.mean(custom_vals[key])) + i += 1 print('======== Summary of results ========') if args.run_transfer: @@ -252,6 +294,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= env.unwrapped.terminate() if output_dir: + ensure_dir(output_dir) if args.run_transfer: exp_name = "{}-replay".format(transfer_test.transfer_str) else: @@ -276,6 +319,14 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # delete the .xml version of the emission file os.remove(emission_path) + all_trip_energies = os.path.join(output_dir, '{}-all_trip_energies.npy'.format(exp_name)) + np.save(all_trip_energies, dict(all_trip_energy_distribution)) + fig_names, figs = plot_trip_distribution(all_trip_energy_distribution) + + for fig_name, fig in zip(fig_names, figs): + edist_out = os.path.join(output_dir, '{}_energy_distribution.png'.format(fig_name)) + fig.savefig(edist_out) + # Create the flow_params object with open(os.path.join(output_dir, exp_name) + '.json', 'w') as outfile: json.dump(flow_params, outfile, @@ -342,16 +393,42 @@ def create_parser(): action='store_true', help='Runs transfer tests if true' ) + parser.add_argument( + '-pr', + '--penetration_rate', + type=float, + help='Specifies percentage of AVs.', + required=False) + parser.add_argument( + '-mct', + '--max_completed_trips', + type=int, + help='Terminate rollout after max_completed_trips vehicles have started and ended.', + default=None) + parser.add_argument( + '--v_des_sweep', + action='store_true', + help='Runs a sweep over v_des params.', + default=None) parser.add_argument( '--output_dir', type=str, help='Directory to save results.', default=None ) + parser.add_argument('--use_s3', action='store_true', help='If true, upload results to s3') + parser.add_argument('--num_cpus', type=int, default=1, help='Number of cpus to run experiment with') + parser.add_argument('--multi_node', action='store_true', help='Set to true if this will ' + 'be run in cluster mode') + parser.add_argument('--exp_title', type=str, required=False, default=None, + help='Informative experiment title to help distinguish results') return parser if __name__ == '__main__': + date = datetime.now(tz=pytz.utc) + date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") + parser = create_parser() args = parser.parse_args() @@ -365,14 +442,51 @@ def create_parser(): flow_params = deepcopy(I210_MA_DEFAULT_FLOW_PARAMS) - if args.local: - ray.init(num_cpus=1, object_store_memory=200 * 1024 * 1024) + if args.multi_node: + ray.init(redis_address='localhost:6379') + elif args.local: + ray.init(local_mode=True, object_store_memory=200 * 1024 * 1024) else: - ray.init(num_cpus=1) + ray.init(num_cpus=args.num_cpus + 1, object_store_memory=200 * 1024 * 1024) + + if args.exp_title: + output_dir = os.path.join(args.output_dir, args.exp_title) + else: + output_dir = args.output_dir if args.run_transfer: - for transfer_test in inflows_range(penetration_rates=[0.05, 0.1, 0.2], flow_rate_coefs=[0.8, 1.0, 1.2]): - replay(args, flow_params, output_dir=args.output_dir, transfer_test=transfer_test, - rllib_config=rllib_config, result_dir=rllib_result_dir) + s = [ray.cloudpickle.dumps(transfer_test) for transfer_test in inflows_range(penetration_rates=[0.0, 0.1, 0.2, 0.3])] + ray_output = [replay.remote(args, flow_params, output_dir=output_dir, transfer_test=transfer_test, + rllib_config=rllib_config, result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips) + for transfer_test in s] + ray.get(ray_output) + + elif args.v_des_sweep: + assert args.controller == 'follower_stopper' + + ray_output = [replay.remote(args, flow_params, output_dir="{}/{}".format(output_dir, v_des), rllib_config=rllib_config, + result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips, v_des=v_des) + for v_des in range(8, 17, 2)] + ray.get(ray_output) + else: - replay(args, flow_params, output_dir=args.output_dir, rllib_config=rllib_config, result_dir=rllib_result_dir) + if args.penetration_rate is not None: + pr = args.penetration_rate if args.penetration_rate is not None else 0 + single_transfer = next(inflows_range(penetration_rates=pr)) + ray.get(replay.remote(args, flow_params, output_dir=output_dir, transfer_test=single_transfer, + rllib_config=rllib_config, result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips)) + else: + ray.get(replay.remote(args, flow_params, output_dir=output_dir, + rllib_config=rllib_config, result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips)) + + if args.use_s3: + s3_string = 's3://kanaad.experiments/i210_replay/' + date + if args.exp_title: + s3_string += '/' + args.exp_title + + for i in range(4): + try: + p1 = subprocess.Popen("aws s3 sync {} {}".format(output_dir, s3_string).split(' ')) + p1.wait(50) + except Exception as e: + print('This is the error ', e) diff --git a/flow/visualize/plot_custom_callables.py b/flow/visualize/plot_custom_callables.py index 8df0e7f62..f82377cef 100644 --- a/flow/visualize/plot_custom_callables.py +++ b/flow/visualize/plot_custom_callables.py @@ -4,11 +4,43 @@ from datetime import datetime import errno import numpy as np -import matplotlib.pyplot as plt +try: + from matplotlib import pyplot as plt +except ImportError: + import matplotlib + matplotlib.use('TkAgg') + from matplotlib import pyplot as plt import os import pytz import sys +def make_bar_plot(vals, title): + print(len(vals)) + fig = plt.figure() + plt.hist(vals, 10, facecolor='blue', alpha=0.5) + plt.title(title) + plt.xlim(1000,3000) + return fig + +def plot_trip_distribution(all_trip_energy_distribution): + non_av_vals = [] + figures = [] + figure_names = [] + for key in all_trip_energy_distribution: + if key != 'av': + non_av_vals.extend(all_trip_energy_distribution[key]) + figures.append(make_bar_plot(all_trip_energy_distribution[key], key)) + figure_names.append(key) + + figure_names.append('All Non-AV') + figures.append(make_bar_plot(non_av_vals, 'All Non-AV')) + + figure_names.append('All') + figures.append(make_bar_plot(non_av_vals + all_trip_energy_distribution['av'], 'All')) + + return figure_names, figures + + def parse_flags(args): """Parse training options user can specify in command line. @@ -51,13 +83,17 @@ def parse_flags(args): for (dirpath, dir_names, file_names) in os.walk(flags.target_folder): for file_name in file_names: if file_name[-4:] == ".npy": - exp_name = os.path.basename(os.path.dirname(dirpath)) + exp_name = os.path.basename(dirpath) info_dict = np.load(os.path.join(dirpath, file_name), allow_pickle=True).item() info_dicts.append(info_dict) exp_names.append(exp_name) custom_callable_names.update(info_dict.keys()) + idxs = np.argsort(exp_names) + exp_names = [exp_names[i] for i in idxs] + info_dicts = [info_dicts[i] for i in idxs] + for name in custom_callable_names: y_vals = [np.mean(info_dict[name]) for info_dict in info_dicts] y_stds = [np.std(info_dict[name]) for info_dict in info_dicts] @@ -65,7 +101,7 @@ def parse_flags(args): plt.bar(x_pos, y_vals, align='center', alpha=0.5) plt.xticks(x_pos, [exp_name for exp_name in exp_names], rotation=60) - plt.ylabel('Experiment') + plt.xlabel('Experiment') plt.title('I210 Replay Result: {}'.format(name)) plt.tight_layout() if flags.output_folder: diff --git a/flow/visualize/transfer/util.py b/flow/visualize/transfer/util.py index 50b503956..d6c8d9f88 100644 --- a/flow/visualize/transfer/util.py +++ b/flow/visualize/transfer/util.py @@ -6,13 +6,13 @@ VEH_PER_HOUR_BASE_27414345, VEH_PER_HOUR_BASE_27414342 -def make_inflows(penetration_rate=0.1, flow_rate_coef=1.0, departSpeed=20, on_ramp=False): +def make_inflows(pr=0.1, fr_coef=1.0, departSpeed=20, on_ramp=False): """Generate inflows object from parameters. Uses default inflows from multiagent_i210. Keyword Arguments: ----------------- - penetration_rate {float} -- [AV Penetration Rate] (default: {0.1}) - flow_rate_coef {float} -- [Scale flow rate by] (default: {1.0}) + pr {float} -- [AV Penetration Rate] (default: {0.1}) + fr_coef {float} -- [Scale flow rate by] (default: {1.0}) departSpeed {int} -- [Initial speed of all flows] (default: {20}) Returns @@ -22,39 +22,45 @@ def make_inflows(penetration_rate=0.1, flow_rate_coef=1.0, departSpeed=20, on_ra """ inflow = InFlows() # main highway - assert penetration_rate < 1.0, "your penetration rate is over 100%" - assert penetration_rate > 0.0, "your penetration rate should be above zero" + assert pr < 1.0, "your penetration rate is over 100%" + + all_inflows = [] inflow_119257914 = dict(veh_type="human", edge="119257914", - vehs_per_hour=VEH_PER_HOUR_BASE_119257914 * penetration_rate * flow_rate_coef, + vehs_per_hour=VEH_PER_HOUR_BASE_119257914 * + (1 - (pr)) * fr_coef, # probability=1.0, departLane="random", departSpeed=departSpeed) - inflow_119257914_av = dict(veh_type="av", - edge="119257914", - vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * penetration_rate * flow_rate_coef), - # probability=1.0, - departLane="random", - departSpeed=departSpeed) + all_inflows.append(inflow_119257914) + + if pr > 0.0: + inflow_119257914_av = dict(veh_type="av", + edge="119257914", + vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * pr * fr_coef), + # probability=1.0, + departLane="random", + departSpeed=departSpeed) + all_inflows.append(inflow_119257914_av) + if on_ramp: inflow_27414345 = dict(veh_type="human", edge="27414345", - vehs_per_hour=VEH_PER_HOUR_BASE_27414345 * penetration_rate * flow_rate_coef, + vehs_per_hour=VEH_PER_HOUR_BASE_27414345 * + (1 - (pr)) * fr_coef, departLane="random", departSpeed=departSpeed) - - inflow_27414342 = dict(veh_type="human", - edge="27414342#0", - vehs_per_hour=VEH_PER_HOUR_BASE_27414342 * penetration_rate * flow_rate_coef, - departLane="random", - departSpeed=departSpeed) - - all_inflow_defs = (inflow_119257914, inflow_27414345, inflow_27414342, inflow_119257914_av) - else: - all_inflow_defs = (inflow_119257914, inflow_119257914_av) - - for inflow_def in all_inflow_defs: + all_inflows.append(inflow_27414345) + if pr > 0.0: + inflow_27414342 = dict(veh_type="human", + edge="27414342#0", + vehs_per_hour=VEH_PER_HOUR_BASE_27414342 * pr * fr_coef, + departLane="random", + departSpeed=departSpeed) + all_inflows.append(inflow_27414342) + + for inflow_def in all_inflows: inflow.add(**inflow_def) return inflow From 0f45dbe356b79067915e933f5795ab3760d69930 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Mon, 18 May 2020 14:06:22 -0400 Subject: [PATCH 064/335] Mpg reward2 (#933) Add an MPG and MPJ reward --- examples/exp_configs/non_rl/highway_single.py | 144 ++++++++++++++++++ .../rl/multiagent/multiagent_i210.py | 2 +- examples/train.py | 4 +- flow/benchmarks/README.md | 6 +- flow/benchmarks/bottleneck0.py | 2 +- flow/benchmarks/bottleneck1.py | 2 +- flow/benchmarks/bottleneck2.py | 2 +- flow/benchmarks/grid0.py | 4 +- flow/benchmarks/grid1.py | 4 +- flow/controllers/__init__.py | 5 +- flow/controllers/car_following_models.py | 83 ++++++++++ flow/core/kernel/simulation/traci.py | 14 +- flow/core/kernel/vehicle/base.py | 21 +++ flow/core/kernel/vehicle/traci.py | 76 +++++---- flow/core/rewards.py | 83 ++++++++++ flow/envs/__init__.py | 3 +- flow/envs/traffic_light_grid.py | 11 ++ flow/networks/highway.py | 80 +++++++++- flow/visualize/i210_replay.py | 40 +++-- flow/visualize/plot_custom_callables.py | 13 +- flow/visualize/transfer/util.py | 14 +- tests/fast_tests/test_controllers.py | 58 ++++++- tests/fast_tests/test_examples.py | 5 + tests/fast_tests/test_scenarios.py | 61 +++++++- tests/fast_tests/test_vehicles.py | 16 +- tests/setup_scripts.py | 4 +- 26 files changed, 668 insertions(+), 89 deletions(-) create mode 100644 examples/exp_configs/non_rl/highway_single.py diff --git a/examples/exp_configs/non_rl/highway_single.py b/examples/exp_configs/non_rl/highway_single.py new file mode 100644 index 000000000..a2e44032a --- /dev/null +++ b/examples/exp_configs/non_rl/highway_single.py @@ -0,0 +1,144 @@ +"""Multi-agent highway with ramps example. + +Trains a non-constant number of agents, all sharing the same policy, on the +highway with ramps network. +""" +import numpy as np + +from flow.controllers import BandoFTLController +from flow.controllers.velocity_controllers import FollowerStopper +from flow.core.params import EnvParams +from flow.core.params import NetParams +from flow.core.params import InitialConfig +from flow.core.params import InFlows +from flow.core.params import VehicleParams +from flow.core.params import SumoParams +from flow.core.params import SumoLaneChangeParams +from flow.core.rewards import miles_per_megajoule +from flow.networks import HighwayNetwork +from flow.envs import TestEnv +from flow.networks.highway import ADDITIONAL_NET_PARAMS + +TRAFFIC_SPEED = 11 +END_SPEED = 16 +TRAFFIC_FLOW = 2056 +HORIZON = 2000 +INCLUDE_NOISE = False + +# percentage of autonomous vehicles compared to human vehicles on highway +PENETRATION_RATE = 10.0 + +additional_net_params = ADDITIONAL_NET_PARAMS.copy() +additional_net_params.update({ + # length of the highway + "length": 2500, + # number of lanes + "lanes": 1, + # speed limit for all edges + "speed_limit": 30, + # number of edges to divide the highway into + "num_edges": 2, + # whether to include a ghost edge of length 500m. This edge is provided a + # different speed limit. + "use_ghost_edge": True, + # speed limit for the ghost edge + "ghost_speed_limit": END_SPEED +}) + +vehicles = VehicleParams() +vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(BandoFTLController, { + 'alpha': .5, + 'beta': 20.0, + 'h_st': 12.0, + 'h_go': 50.0, + 'v_max': 30.0, + 'noise': 1.0 if INCLUDE_NOISE else 0.0, + }), +) + +if PENETRATION_RATE > 0.0: + vehicles.add( + "av", + num_vehicles=0, + acceleration_controller=(FollowerStopper, {"v_des": 11.0}), + ) + +inflows = InFlows() + +inflows.add( + veh_type="human", + edge="highway_0", + vehs_per_hour=int(TRAFFIC_FLOW * (1 - PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23", + name="idm_highway_inflow") + +if PENETRATION_RATE > 0.0: + inflows.add( + veh_type="av", + edge="highway_0", + vehs_per_hour=int(TRAFFIC_FLOW * (PENETRATION_RATE / 100)), + depart_lane="free", + depart_speed="23", + name="av_highway_inflow") + +# SET UP FLOW PARAMETERS + +flow_params = dict( + # name of the experiment + exp_tag='highway-single', + + # name of the flow environment the experiment is running on + env_name=TestEnv, + + # name of the network class the experiment is running on + network=HighwayNetwork, + + # simulator that is used by the experiment + simulator='traci', + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=HORIZON, + warmup_steps=0, + sims_per_step=1, + ), + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.5, + render=False, + restart_instance=False + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflows, + additional_params=additional_net_params + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig(), +) + +custom_callables = { + "avg_merge_speed": lambda env: np.nan_to_num(np.mean( + env.k.vehicle.get_speed(env.k.vehicle.get_ids()))), + "avg_outflow": lambda env: np.nan_to_num( + env.k.vehicle.get_outflow_rate(120)), + "miles_per_megajoule": lambda env: np.nan_to_num( + miles_per_megajoule(env, env.k.vehicle.get_ids(), gain=1.0) + ) +} diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 7176abb06..a7d707068 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -9,7 +9,7 @@ from ray.tune.registry import register_env from flow.controllers import RLController -from flow.controllers.car_following_models import IDMController, SimCarFollowingController +from flow.controllers.car_following_models import IDMController import flow.config as config from flow.core.params import EnvParams from flow.core.params import NetParams diff --git a/examples/train.py b/examples/train.py index d688f2b9a..5b5431a3d 100644 --- a/examples/train.py +++ b/examples/train.py @@ -25,8 +25,8 @@ import ray from ray import tune -from ray.rllib.env.group_agents_wrapper import _GroupAgentsWrapper from ray.tune.registry import register_env +from ray.rllib.env.group_agents_wrapper import _GroupAgentsWrapper try: from ray.rllib.agents.agent import get_agent_class except ImportError: @@ -301,7 +301,7 @@ def train_h_baselines(flow_params, args, multiagent): """Train policies using SAC and TD3 with h-baselines.""" from hbaselines.algorithms import OffPolicyRLAlgorithm from hbaselines.utils.train import parse_options, get_hyperparameters - from hbaselines.envs.mixed_autonomy.envs import FlowEnv + from hbaselines.envs.mixed_autonomy import FlowEnv flow_params = deepcopy(flow_params) diff --git a/flow/benchmarks/README.md b/flow/benchmarks/README.md index 963ad5b70..bbcba9414 100644 --- a/flow/benchmarks/README.md +++ b/flow/benchmarks/README.md @@ -38,12 +38,12 @@ inflow = 300 veh/hour/lane S=(915,), A=(25,), T=400. this problem is to learn to avoid the *capacity drop* that is characteristic to bottleneck structures in transportation networks, and maximize the total outflow in a mixed-autonomy setting. -- `flow.benchmarks.bottleneck0` 4 lanes, inflow = 1900 veh/hour, 10% CAV +- `flow.benchmarks.bottleneck0` 4 lanes, inflow = 2500 veh/hour, 10% CAV penetration, no vehicles are allowed to lane change, S=(141,), A=(20,), T=1000. -- `flow.benchmarks.bottleneck1` 4 lanes, inflow = 1900 veh/hour, 10% CAV +- `flow.benchmarks.bottleneck1` 4 lanes, inflow = 2500 veh/hour, 10% CAV penetration, the human drivers follow the standard lane changing model in the simulator, S=(141,), A=(20,), T=1000. -- `flow.benchmarks.bottleneck2` 8 lanes, inflow = 3800 veh/hour, 10% CAV +- `flow.benchmarks.bottleneck2` 8 lanes, inflow = 5000 veh/hour, 10% CAV penetration, no vehicles are allowed to lane change, S=(281,), A=(40,), T=1000. ## Training on Custom Algorithms diff --git a/flow/benchmarks/bottleneck0.py b/flow/benchmarks/bottleneck0.py index b0e86844c..b07947ad7 100644 --- a/flow/benchmarks/bottleneck0.py +++ b/flow/benchmarks/bottleneck0.py @@ -66,7 +66,7 @@ } # flow rate -flow_rate = 2000 * SCALING +flow_rate = 2500 * SCALING # percentage of flow coming out of each lane inflow = InFlows() diff --git a/flow/benchmarks/bottleneck1.py b/flow/benchmarks/bottleneck1.py index 26ae6527a..9c8d9c192 100644 --- a/flow/benchmarks/bottleneck1.py +++ b/flow/benchmarks/bottleneck1.py @@ -66,7 +66,7 @@ } # flow rate -flow_rate = 2000 * SCALING +flow_rate = 2500 * SCALING # percentage of flow coming out of each lane inflow = InFlows() diff --git a/flow/benchmarks/bottleneck2.py b/flow/benchmarks/bottleneck2.py index 5052b3b88..4651d448b 100644 --- a/flow/benchmarks/bottleneck2.py +++ b/flow/benchmarks/bottleneck2.py @@ -66,7 +66,7 @@ } # flow rate -flow_rate = 2000 * SCALING +flow_rate = 2500 * SCALING # percentage of flow coming out of each lane inflow = InFlows() diff --git a/flow/benchmarks/grid0.py b/flow/benchmarks/grid0.py index 1655c3b3c..5c4ee5349 100644 --- a/flow/benchmarks/grid0.py +++ b/flow/benchmarks/grid0.py @@ -4,7 +4,7 @@ - **Observation Dimension**: (339, ) - **Horizon**: 400 steps """ -from flow.envs import TrafficLightGridPOEnv +from flow.envs import TrafficLightGridBenchmarkEnv from flow.networks import TrafficLightGridNetwork from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ InFlows, SumoCarFollowingParams @@ -68,7 +68,7 @@ exp_tag="grid_0", # name of the flow environment the experiment is running on - env_name=TrafficLightGridPOEnv, + env_name=TrafficLightGridBenchmarkEnv, # name of the network class the experiment is running on network=TrafficLightGridNetwork, diff --git a/flow/benchmarks/grid1.py b/flow/benchmarks/grid1.py index ec2a27454..83055adfd 100644 --- a/flow/benchmarks/grid1.py +++ b/flow/benchmarks/grid1.py @@ -4,7 +4,7 @@ - **Observation Dimension**: (915, ) - **Horizon**: 400 steps """ -from flow.envs import TrafficLightGridPOEnv +from flow.envs import TrafficLightGridBenchmarkEnv from flow.networks import TrafficLightGridNetwork from flow.core.params import SumoParams, EnvParams, InitialConfig, NetParams, \ InFlows, SumoCarFollowingParams @@ -68,7 +68,7 @@ exp_tag="grid_1", # name of the flow environment the experiment is running on - env_name=TrafficLightGridPOEnv, + env_name=TrafficLightGridBenchmarkEnv, # name of the network class the experiment is running on network=TrafficLightGridNetwork, diff --git a/flow/controllers/__init__.py b/flow/controllers/__init__.py index 6cb20077a..4dfcf05b7 100755 --- a/flow/controllers/__init__.py +++ b/flow/controllers/__init__.py @@ -14,7 +14,8 @@ from flow.controllers.base_controller import BaseController from flow.controllers.car_following_models import CFMController, \ BCMController, OVMController, LinearOVM, IDMController, \ - SimCarFollowingController, LACController, GippsController + SimCarFollowingController, LACController, GippsController, \ + BandoFTLController from flow.controllers.velocity_controllers import FollowerStopper, \ PISaturation, NonLocalFollowerStopper @@ -35,5 +36,5 @@ "IDMController", "SimCarFollowingController", "FollowerStopper", "PISaturation", "StaticLaneChanger", "SimLaneChangeController", "ContinuousRouter", "GridRouter", "BayBridgeRouter", "LACController", - "GippsController", "NonLocalFollowerStopper" + "GippsController", "NonLocalFollowerStopper", "BandoFTLController" ] diff --git a/flow/controllers/car_following_models.py b/flow/controllers/car_following_models.py index f86c546e8..42c9b2a9b 100755 --- a/flow/controllers/car_following_models.py +++ b/flow/controllers/car_following_models.py @@ -580,3 +580,86 @@ def get_accel(self, env): v_next = min(v_acc, v_safe, self.v_desired) return (v_next-v)/env.sim_step + + +class BandoFTLController(BaseController): + """Bando follow-the-leader controller. + + Usage + ----- + See BaseController for usage example. + + Attributes + ---------- + veh_id : str + Vehicle ID for SUMO identification + car_following_params : flow.core.params.SumoCarFollowingParams + see parent class + alpha : float + gain on desired velocity to current velocity difference + (default: 0.6) + beta : float + gain on lead car velocity and self velocity difference + (default: 0.9) + h_st : float + headway for stopping (default: 5) + h_go : float + headway for full speed (default: 35) + v_max : float + max velocity (default: 30) + time_delay : float + time delay (default: 0.5) + noise : float + std dev of normal perturbation to the acceleration (default: 0) + fail_safe : str + type of flow-imposed failsafe the vehicle should posses, defaults + to no failsafe (None) + """ + + def __init__(self, + veh_id, + car_following_params, + alpha=.5, + beta=20, + h_st=2, + h_go=10, + v_max=32, + want_max_accel=False, + time_delay=0, + noise=0, + fail_safe=None): + """Instantiate an Bando controller.""" + BaseController.__init__( + self, + veh_id, + car_following_params, + delay=time_delay, + fail_safe=fail_safe, + noise=noise, + ) + self.veh_id = veh_id + self.v_max = v_max + self.alpha = alpha + self.beta = beta + self.h_st = h_st + self.h_go = h_go + self.want_max_accel = want_max_accel + + def get_accel(self, env): + """See parent class.""" + lead_id = env.k.vehicle.get_leader(self.veh_id) + if not lead_id: # no car ahead + if self.want_max_accel: + return self.max_accel + + v_l = env.k.vehicle.get_speed(lead_id) + v = env.k.vehicle.get_speed(self.veh_id) + s = env.k.vehicle.get_headway(self.veh_id) + return self.accel_func(v, v_l, s) + + def accel_func(self, v, v_l, s): + """Compute the acceleration function.""" + v_h = self.v_max * ((np.tanh(s/self.h_st-2)+np.tanh(2))/(1+np.tanh(2))) + s_dot = v_l - v + u = self.alpha * (v_h - v) + self.beta * s_dot/(s**2) + return u diff --git a/flow/core/kernel/simulation/traci.py b/flow/core/kernel/simulation/traci.py index 0accdeddf..09ea03907 100644 --- a/flow/core/kernel/simulation/traci.py +++ b/flow/core/kernel/simulation/traci.py @@ -12,7 +12,6 @@ import subprocess import signal - # Number of retries on restarting SUMO before giving up RETRIES_ON_ERROR = 10 @@ -46,9 +45,14 @@ def pass_api(self, kernel_api): # subscribe some simulation parameters needed to check for entering, # exiting, and colliding vehicles self.kernel_api.simulation.subscribe([ - tc.VAR_DEPARTED_VEHICLES_IDS, tc.VAR_ARRIVED_VEHICLES_IDS, - tc.VAR_TELEPORT_STARTING_VEHICLES_IDS, tc.VAR_TIME_STEP, - tc.VAR_DELTA_T + tc.VAR_DEPARTED_VEHICLES_IDS, + tc.VAR_ARRIVED_VEHICLES_IDS, + tc.VAR_TELEPORT_STARTING_VEHICLES_IDS, + tc.VAR_TIME_STEP, + tc.VAR_DELTA_T, + tc.VAR_LOADED_VEHICLES_NUMBER, + tc.VAR_DEPARTED_VEHICLES_NUMBER, + tc.VAR_ARRIVED_VEHICLES_NUMBER ]) def simulation_step(self): @@ -94,7 +98,7 @@ def start_simulation(self, network, sim_params): # disable all collisions and teleporting in the simulation. if sim_params.disable_collisions: sumo_call.extend(["--collision.mingap-factor", str(0), - "--collision.action", str("none")]) + "--collision.action", str("none")]) # use a ballistic integration step (if request) if sim_params.use_ballistic: diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index d9fc773cd..706504027 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -290,6 +290,27 @@ def get_departed_ids(self): """Return the ids of vehicles that departed in the last time step.""" raise NotImplementedError + def get_num_not_departed(self): + """Return the number of vehicles not departed in the last time step. + + This includes vehicles that were loaded but not departed. + """ + raise NotImplementedError + + def get_fuel_consumption(selfself, veh_id, error=-1001): + """Return the mpg / s of the specified vehicle. + + Parameters + ---------- + veh_id : str or list of str + vehicle id, or list of vehicle ids + error : any, optional + value that is returned if the vehicle is not found + Returns + ------- + float + """ + def get_speed(self, veh_id, error=-1001): """Return the speed of the specified vehicle. diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 14720cdce..b89e981be 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -22,7 +22,8 @@ STEPS = 10 rdelta = 255 / STEPS # smoothly go from red to green as the speed increases -color_bins = [[int(255 - rdelta * i), int(rdelta * i), 0] for i in range(STEPS + 1)] +color_bins = [[int(255 - rdelta * i), int(rdelta * i), 0] for i in + range(STEPS + 1)] class TraCIVehicle(KernelVehicle): @@ -56,6 +57,8 @@ def __init__(self, self.num_vehicles = 0 # number of rl vehicles in the network self.num_rl_vehicles = 0 + # number of vehicles loaded but not departed vehicles + self.num_not_departed = 0 # contains the parameters associated with each type of vehicle self.type_parameters = {} @@ -68,11 +71,11 @@ def __init__(self, # number of vehicles that entered the network for every time-step self._num_departed = [] - self._departed_ids = [] + self._departed_ids = 0 # number of vehicles to exit the network for every time-step self._num_arrived = [] - self._arrived_ids = [] + self._arrived_ids = 0 self._arrived_rl_ids = [] # whether or not to automatically color vehicles @@ -101,6 +104,7 @@ def initialize(self, vehicles): self.minGap = vehicles.minGap self.num_vehicles = 0 self.num_rl_vehicles = 0 + self.num_not_departed = 0 self.__vehicles.clear() for typ in vehicles.initial: @@ -180,14 +184,15 @@ def update(self, reset): self.prev_last_lc[veh_id] = -float("inf") self._num_departed.clear() self._num_arrived.clear() - self._departed_ids.clear() - self._arrived_ids.clear() + self._departed_ids = 0 + self._arrived_ids = 0 self._arrived_rl_ids.clear() + self.num_not_departed = 0 # add vehicles from a network template, if applicable if hasattr(self.master_kernel.network.network, "template_vehicles"): - for veh_id in self.master_kernel.network.network.\ + for veh_id in self.master_kernel.network.network. \ template_vehicles: vals = deepcopy(self.master_kernel.network.network. template_vehicles[veh_id]) @@ -206,11 +211,14 @@ def update(self, reset): self.__vehicles[veh_id]["last_lc"] = self.time_counter # updated the list of departed and arrived vehicles - self._num_departed.append( - len(sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS])) - self._num_arrived.append(len(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS])) - self._departed_ids.append(sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS]) - self._arrived_ids.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS]) + self._num_departed.append(sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER]) + self._num_arrived.append(sim_obs[tc.VAR_ARRIVED_VEHICLES_NUMBER]) + self._departed_ids = sim_obs[tc.VAR_DEPARTED_VEHICLES_IDS] + self._arrived_ids = sim_obs[tc.VAR_ARRIVED_VEHICLES_IDS] + + # update the number of not departed vehicles + self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ + sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] # update the "headway", "leader", and "follower" variables for veh_id in self.__ids: @@ -321,9 +329,14 @@ def _add_departed(self, veh_id, veh_type): # subscribe the new vehicle self.kernel_api.vehicle.subscribe(veh_id, [ - tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION, tc.VAR_ROAD_ID, - tc.VAR_SPEED, tc.VAR_EDGES, tc.VAR_POSITION, tc.VAR_ANGLE, - tc.VAR_SPEED_WITHOUT_TRACI + tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION, + tc.VAR_ROAD_ID, + tc.VAR_SPEED, + tc.VAR_EDGES, + tc.VAR_POSITION, + tc.VAR_ANGLE, + tc.VAR_SPEED_WITHOUT_TRACI, + tc.VAR_FUELCONSUMPTION ]) self.kernel_api.vehicle.subscribeLeader(veh_id, 2000) @@ -358,6 +371,8 @@ def _add_departed(self, veh_id, veh_type): self.kernel_api.vehicle.getLaneIndex(veh_id) self.__sumo_obs[veh_id][tc.VAR_SPEED] = \ self.kernel_api.vehicle.getSpeed(veh_id) + self.__sumo_obs[veh_id][tc.VAR_FUELCONSUMPTION] = \ + self.kernel_api.vehicle.getFuelConsumption(veh_id) # make sure that the order of rl_ids is kept sorted self.__rl_ids.sort() @@ -504,10 +519,7 @@ def get_num_arrived(self): def get_arrived_ids(self): """See parent class.""" - if len(self._arrived_ids) > 0: - return self._arrived_ids[-1] - else: - return 0 + return self._arrived_ids def get_arrived_rl_ids(self): """See parent class.""" @@ -518,10 +530,18 @@ def get_arrived_rl_ids(self): def get_departed_ids(self): """See parent class.""" - if len(self._departed_ids) > 0: - return self._departed_ids[-1] - else: - return 0 + return self._departed_ids + + def get_num_not_departed(self): + """See parent class.""" + return self.num_not_departed + + def get_fuel_consumption(self, veh_id, error=-1001): + """Return fuel consumption in gallons/s.""" + ml_to_gallons = 0.000264172 + if isinstance(veh_id, (list, np.ndarray)): + return [self.get_fuel_consumption(vehID, error) for vehID in veh_id] + return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_FUELCONSUMPTION, error) * ml_to_gallons def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" @@ -1009,7 +1029,8 @@ def update_vehicle_colors(self): for veh_id in self.get_rl_ids(): try: # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: # color rl vehicles red self.set_color(veh_id=veh_id, color=RED) except (FatalTraCIError, TraCIException) as e: @@ -1020,7 +1041,8 @@ def update_vehicle_colors(self): try: color = CYAN if veh_id in self.get_observed_ids() else WHITE # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: self.set_color(veh_id=veh_id, color=color) except (FatalTraCIError, TraCIException) as e: print('Error when updating human vehicle colors:', e) @@ -1030,7 +1052,8 @@ def update_vehicle_colors(self): if 'av' in veh_id: color = RED # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: self.set_color(veh_id=veh_id, color=color) except (FatalTraCIError, TraCIException) as e: print('Error when updating human vehicle colors:', e) @@ -1043,7 +1066,8 @@ def update_vehicle_colors(self): veh_speed = self.get_speed(veh_id) bin_index = np.digitize(veh_speed, speed_ranges) # If vehicle is already being colored via argument to vehicles.add(), don't re-color it. - if self._force_color_update or 'color' not in self.type_parameters[self.get_type(veh_id)]: + if self._force_color_update or 'color' not in \ + self.type_parameters[self.get_type(veh_id)]: self.set_color(veh_id=veh_id, color=color_bins[bin_index]) # clear the list of observed vehicles diff --git a/flow/core/rewards.py b/flow/core/rewards.py index 60760f357..1434636e6 100755 --- a/flow/core/rewards.py +++ b/flow/core/rewards.py @@ -333,6 +333,7 @@ def energy_consumption(env, gain=.001): return -gain * power + def vehicle_energy_consumption(env, veh_id, gain=.001): """Calculate power consumption of a vehicle. @@ -348,6 +349,7 @@ def vehicle_energy_consumption(env, veh_id, gain=.001): Ca = 0.3 # aerodynamic drag coefficient rho = 1.225 # air density (kg/m^3) A = 2.6 # vehicle cross sectional area (m^2) + if veh_id not in env.k.vehicle.previous_speeds: return 0 speed = env.k.vehicle.get_speed(veh_id) @@ -358,3 +360,84 @@ def vehicle_energy_consumption(env, veh_id, gain=.001): power += M * speed * accel + M * g * Cr * speed + 0.5 * rho * A * Ca * speed ** 3 return -gain * power + + +def miles_per_megajoule(env, veh_ids=None, gain=.001): + """Calculate miles per mega-joule of either a particular vehicle or the total average of all the vehicles. + + Assumes vehicle is an average sized vehicle. + The power calculated here is the lower bound of the actual power consumed + by a vehicle. + + Parameters + ---------- + env : flow.envs.Env + the environment variable, which contains information on the current + state of the system. + veh_ids : [list] + list of veh_ids to compute the reward over + gain : float + scaling factor for the reward + """ + mpj = 0 + counter = 0 + if veh_ids is None: + veh_ids = env.k.vehicle.get_ids() + elif not isinstance(veh_ids, list): + veh_ids = [veh_ids] + for veh_id in veh_ids: + speed = env.k.vehicle.get_speed(veh_id) + # convert to be positive since the function called is a penalty + power = -vehicle_energy_consumption(env, veh_id, gain=1.0) + if power > 0 and speed >= 0.0: + counter += 1 + # meters / joule is (v * \delta t) / (power * \delta t) + mpj += speed / power + if counter > 0: + mpj /= counter + + # convert from meters per joule to miles per joule + mpj /= 1609.0 + # convert from miles per joule to miles per megajoule + mpj *= 10 ** 6 + + return mpj * gain + + +def miles_per_gallon(env, veh_ids=None, gain=.001): + """Calculate mpg of either a particular vehicle or the total average of all the vehicles. + + Assumes vehicle is an average sized vehicle. + The power calculated here is the lower bound of the actual power consumed + by a vehicle. + + Parameters + ---------- + env : flow.envs.Env + the environment variable, which contains information on the current + state of the system. + veh_ids : [list] + list of veh_ids to compute the reward over + gain : float + scaling factor for the reward + """ + mpg = 0 + counter = 0 + if veh_ids is None: + veh_ids = env.k.vehicle.get_ids() + elif not isinstance(veh_ids, list): + veh_ids = [veh_ids] + for veh_id in veh_ids: + speed = env.k.vehicle.get_speed(veh_id) + gallons_per_s = env.k.vehicle.get_fuel_consumption(veh_id) + if gallons_per_s > 0 and speed >= 0.0: + counter += 1 + # meters / gallon is (v * \delta t) / (gallons_per_s * \delta t) + mpg += speed / gallons_per_s + if counter > 0: + mpg /= counter + + # convert from meters per gallon to miles per gallon + mpg /= 1609.0 + + return mpg * gain diff --git a/flow/envs/__init__.py b/flow/envs/__init__.py index 6f4351cc0..8bea3dd4f 100755 --- a/flow/envs/__init__.py +++ b/flow/envs/__init__.py @@ -4,7 +4,7 @@ from flow.envs.bottleneck import BottleneckAccelEnv, BottleneckEnv, \ BottleneckDesiredVelocityEnv from flow.envs.traffic_light_grid import TrafficLightGridEnv, \ - TrafficLightGridPOEnv, TrafficLightGridTestEnv + TrafficLightGridPOEnv, TrafficLightGridTestEnv, TrafficLightGridBenchmarkEnv from flow.envs.ring.lane_change_accel import LaneChangeAccelEnv, \ LaneChangeAccelPOEnv from flow.envs.ring.accel import AccelEnv @@ -34,6 +34,7 @@ 'WaveAttenuationPOEnv', 'TrafficLightGridEnv', 'TrafficLightGridPOEnv', + 'TrafficLightGridBenchmarkEnv', 'BottleneckDesiredVelocityEnv', 'TestEnv', 'BayBridgeEnv', diff --git a/flow/envs/traffic_light_grid.py b/flow/envs/traffic_light_grid.py index 53391a329..8be0cb8a5 100644 --- a/flow/envs/traffic_light_grid.py +++ b/flow/envs/traffic_light_grid.py @@ -731,6 +731,17 @@ def additional_command(self): [self.k.vehicle.set_observed(veh_id) for veh_id in self.observed_ids] +class TrafficLightGridBenchmarkEnv(TrafficLightGridPOEnv): + """Class used for the benchmarks in `Benchmarks for reinforcement learning inmixed-autonomy traffic`.""" + + def compute_reward(self, rl_actions, **kwargs): + """See class definition.""" + if self.env_params.evaluate: + return - rewards.min_delay_unscaled(self) + else: + return rewards.desired_velocity(self) + + class TrafficLightGridTestEnv(TrafficLightGridEnv): """ Class for use in testing. diff --git a/flow/networks/highway.py b/flow/networks/highway.py index e1234053c..7e9c18ad5 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -13,7 +13,12 @@ # speed limit for all edges "speed_limit": 30, # number of edges to divide the highway into - "num_edges": 1 + "num_edges": 1, + # whether to include a ghost edge of length 500m. This edge is provided a + # different speed limit. + "use_ghost_edge": False, + # speed limit for the ghost edge + "ghost_speed_limit": 25, } @@ -29,6 +34,9 @@ class HighwayNetwork(Network): * **lanes** : number of lanes in the highway * **speed_limit** : max speed limit of the highway * **num_edges** : number of edges to divide the highway into + * **use_ghost_edge** : whether to include a ghost edge of length 500m. This + edge is provided a different speed limit. + * **ghost_speed_limit** : speed limit for the ghost edge Usage ----- @@ -62,9 +70,7 @@ def __init__(self, if p not in net_params.additional_params: raise KeyError('Network parameter "{}" not supplied'.format(p)) - self.length = net_params.additional_params["length"] - self.lanes = net_params.additional_params["lanes"] - self.num_edges = net_params.additional_params.get("num_edges", 1) + self.end_length = 500 super().__init__(name, vehicles, net_params, initial_config, traffic_lights) @@ -83,6 +89,13 @@ def specify_nodes(self, net_params): "y": 0 }] + if self.net_params.additional_params["use_ghost_edge"]: + nodes += [{ + "id": "edge_{}".format(num_edges + 1), + "x": length + self.end_length, + "y": 0 + }] + return nodes def specify_edges(self, net_params): @@ -101,12 +114,22 @@ def specify_edges(self, net_params): "length": segment_length }] + if self.net_params.additional_params["use_ghost_edge"]: + edges += [{ + "id": "highway_end", + "type": "highway_end", + "from": "edge_{}".format(num_edges), + "to": "edge_{}".format(num_edges + 1), + "length": self.end_length + }] + return edges def specify_types(self, net_params): """See parent class.""" lanes = net_params.additional_params["lanes"] speed_limit = net_params.additional_params["speed_limit"] + end_speed_limit = net_params.additional_params["ghost_speed_limit"] types = [{ "id": "highwayType", @@ -114,6 +137,13 @@ def specify_types(self, net_params): "speed": speed_limit }] + if self.net_params.additional_params["use_ghost_edge"]: + types += [{ + "id": "highway_end", + "numLanes": lanes, + "speed": end_speed_limit + }] + return types def specify_routes(self, net_params): @@ -123,15 +153,51 @@ def specify_routes(self, net_params): for i in range(num_edges): rts["highway_{}".format(i)] = ["highway_{}".format(j) for j in range(i, num_edges)] + if self.net_params.additional_params["use_ghost_edge"]: + rts["highway_{}".format(i)].append("highway_end") return rts def specify_edge_starts(self): """See parent class.""" + junction_length = 0.1 + length = self.net_params.additional_params["length"] + num_edges = self.net_params.additional_params.get("num_edges", 1) + + # Add the main edges. + edge_starts = [ + ("highway_{}".format(i), + i * (length / num_edges + junction_length)) + for i in range(num_edges) + ] + + if self.net_params.additional_params["use_ghost_edge"]: + edge_starts += [ + ("highway_end", length + num_edges * junction_length) + ] + + return edge_starts + + def specify_internal_edge_starts(self): + """See parent class.""" + junction_length = 0.1 length = self.net_params.additional_params["length"] - edgestarts = [("highway_{}".format(i), (length / self.num_edges) * i) - for i in range(self.num_edges)] - return edgestarts + num_edges = self.net_params.additional_params.get("num_edges", 1) + + # Add the junctions. + edge_starts = [ + (":edge_{}".format(i + 1), + (i + 1) * length / num_edges + i * junction_length) + for i in range(num_edges - 1) + ] + + if self.net_params.additional_params["use_ghost_edge"]: + edge_starts += [ + (":edge_{}".format(num_edges), + length + (num_edges - 1) * junction_length) + ] + + return edge_starts @staticmethod def gen_custom_start_pos(cls, net_params, initial_config, num_vehicles): diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index ac4cc031d..a37bac95b 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -11,6 +11,7 @@ import time import ray + try: from ray.rllib.agents.agent import get_agent_class except ImportError: @@ -26,7 +27,7 @@ from flow.utils.rllib import FlowParamsEncoder from flow.visualize.transfer.util import inflows_range -from flow.visualize.plot_custom_callables import plot_trip_distribution +from flow.visualize.plot_custom_callables import plot_trip_distribution from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables @@ -42,8 +43,10 @@ 2 - the number of the checkpoint """ + @ray.remote -def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config=None, result_dir=None, max_completed_trips=None, v_des=12): +def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config=None, result_dir=None, + max_completed_trips=None, v_des=12): """Replay or run transfer test (defined by transfer_fn) by modif. Arguments: @@ -221,8 +224,8 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= if use_lstm: action[agent_id], lstm_state[agent_id], _ = \ agent.compute_action( - state[agent_id], state=lstm_state[agent_id], - policy_id=policy_map_fn(agent_id)) + state[agent_id], state=lstm_state[agent_id], + policy_id=policy_map_fn(agent_id)) else: action[agent_id] = agent.compute_action( state[agent_id], policy_id=policy_map_fn(agent_id)) @@ -246,8 +249,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= for past_veh_id in per_vehicle_energy_trace.keys(): if past_veh_id not in veh_ids and past_veh_id not in completed_vehicle_avg_energy: - all_trip_energy_distribution[completed_veh_types[past_veh_id]].append(np.sum(per_vehicle_energy_trace[past_veh_id])) - all_trip_time_distribution[completed_veh_types[past_veh_id]].append(len(per_vehicle_energy_trace[past_veh_id])) + all_trip_energy_distribution[completed_veh_types[past_veh_id]].append( + np.sum(per_vehicle_energy_trace[past_veh_id])) + all_trip_time_distribution[completed_veh_types[past_veh_id]].append( + len(per_vehicle_energy_trace[past_veh_id])) completed_vehicle_avg_energy[past_veh_id] = np.sum(per_vehicle_energy_trace[past_veh_id]) completed_vehicle_travel_time[past_veh_id] = len(per_vehicle_energy_trace[past_veh_id]) @@ -258,7 +263,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= per_vehicle_energy_trace[veh_id].append(0) completed_veh_types[veh_id] = env.k.vehicle.get_type(veh_id) else: - per_vehicle_energy_trace[veh_id].append(-1*vehicle_energy_consumption(env, veh_id)) + per_vehicle_energy_trace[veh_id].append(-1 * vehicle_energy_consumption(env, veh_id)) if type(done) is dict and done['__all__']: break @@ -322,7 +327,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= all_trip_energies = os.path.join(output_dir, '{}-all_trip_energies.npy'.format(exp_name)) np.save(all_trip_energies, dict(all_trip_energy_distribution)) fig_names, figs = plot_trip_distribution(all_trip_energy_distribution) - + for fig_name, fig in zip(fig_names, figs): edist_out = os.path.join(output_dir, '{}_energy_distribution.png'.format(fig_name)) fig.savefig(edist_out) @@ -455,18 +460,21 @@ def create_parser(): output_dir = args.output_dir if args.run_transfer: - s = [ray.cloudpickle.dumps(transfer_test) for transfer_test in inflows_range(penetration_rates=[0.0, 0.1, 0.2, 0.3])] + s = [ray.cloudpickle.dumps(transfer_test) for transfer_test in + inflows_range(penetration_rates=[0.0, 0.1, 0.2, 0.3])] ray_output = [replay.remote(args, flow_params, output_dir=output_dir, transfer_test=transfer_test, - rllib_config=rllib_config, result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips) + rllib_config=rllib_config, result_dir=rllib_result_dir, + max_completed_trips=args.max_completed_trips) for transfer_test in s] ray.get(ray_output) elif args.v_des_sweep: assert args.controller == 'follower_stopper' - ray_output = [replay.remote(args, flow_params, output_dir="{}/{}".format(output_dir, v_des), rllib_config=rllib_config, - result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips, v_des=v_des) - for v_des in range(8, 17, 2)] + ray_output = [ + replay.remote(args, flow_params, output_dir="{}/{}".format(output_dir, v_des), rllib_config=rllib_config, + result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips, v_des=v_des) + for v_des in range(8, 17, 2)] ray.get(ray_output) else: @@ -474,10 +482,12 @@ def create_parser(): pr = args.penetration_rate if args.penetration_rate is not None else 0 single_transfer = next(inflows_range(penetration_rates=pr)) ray.get(replay.remote(args, flow_params, output_dir=output_dir, transfer_test=single_transfer, - rllib_config=rllib_config, result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips)) + rllib_config=rllib_config, result_dir=rllib_result_dir, + max_completed_trips=args.max_completed_trips)) else: ray.get(replay.remote(args, flow_params, output_dir=output_dir, - rllib_config=rllib_config, result_dir=rllib_result_dir, max_completed_trips=args.max_completed_trips)) + rllib_config=rllib_config, result_dir=rllib_result_dir, + max_completed_trips=args.max_completed_trips)) if args.use_s3: s3_string = 's3://kanaad.experiments/i210_replay/' + date diff --git a/flow/visualize/plot_custom_callables.py b/flow/visualize/plot_custom_callables.py index f82377cef..ee9a10c1d 100644 --- a/flow/visualize/plot_custom_callables.py +++ b/flow/visualize/plot_custom_callables.py @@ -4,25 +4,31 @@ from datetime import datetime import errno import numpy as np + try: from matplotlib import pyplot as plt except ImportError: import matplotlib + matplotlib.use('TkAgg') from matplotlib import pyplot as plt import os import pytz import sys + def make_bar_plot(vals, title): + """Make a bar plot.""" print(len(vals)) fig = plt.figure() plt.hist(vals, 10, facecolor='blue', alpha=0.5) plt.title(title) - plt.xlim(1000,3000) + plt.xlim(1000, 3000) return fig + def plot_trip_distribution(all_trip_energy_distribution): + """Plot a distribution of trips.""" non_av_vals = [] figures = [] figure_names = [] @@ -31,7 +37,7 @@ def plot_trip_distribution(all_trip_energy_distribution): non_av_vals.extend(all_trip_energy_distribution[key]) figures.append(make_bar_plot(all_trip_energy_distribution[key], key)) figure_names.append(key) - + figure_names.append('All Non-AV') figures.append(make_bar_plot(non_av_vals, 'All Non-AV')) @@ -39,8 +45,7 @@ def plot_trip_distribution(all_trip_energy_distribution): figures.append(make_bar_plot(non_av_vals + all_trip_energy_distribution['av'], 'All')) return figure_names, figures - - + def parse_flags(args): """Parse training options user can specify in command line. diff --git a/flow/visualize/transfer/util.py b/flow/visualize/transfer/util.py index d6c8d9f88..107e6d026 100644 --- a/flow/visualize/transfer/util.py +++ b/flow/visualize/transfer/util.py @@ -28,8 +28,7 @@ def make_inflows(pr=0.1, fr_coef=1.0, departSpeed=20, on_ramp=False): inflow_119257914 = dict(veh_type="human", edge="119257914", - vehs_per_hour=VEH_PER_HOUR_BASE_119257914 * - (1 - (pr)) * fr_coef, + vehs_per_hour=VEH_PER_HOUR_BASE_119257914 * (1 - (pr)) * fr_coef, # probability=1.0, departLane="random", departSpeed=departSpeed) @@ -47,17 +46,16 @@ def make_inflows(pr=0.1, fr_coef=1.0, departSpeed=20, on_ramp=False): if on_ramp: inflow_27414345 = dict(veh_type="human", edge="27414345", - vehs_per_hour=VEH_PER_HOUR_BASE_27414345 * - (1 - (pr)) * fr_coef, + vehs_per_hour=VEH_PER_HOUR_BASE_27414345 * (1 - (pr)) * fr_coef, departLane="random", departSpeed=departSpeed) all_inflows.append(inflow_27414345) if pr > 0.0: inflow_27414342 = dict(veh_type="human", - edge="27414342#0", - vehs_per_hour=VEH_PER_HOUR_BASE_27414342 * pr * fr_coef, - departLane="random", - departSpeed=departSpeed) + edge="27414342#0", + vehs_per_hour=VEH_PER_HOUR_BASE_27414342 * pr * fr_coef, + departLane="random", + departSpeed=departSpeed) all_inflows.append(inflow_27414342) for inflow_def in all_inflows: diff --git a/tests/fast_tests/test_controllers.py b/tests/fast_tests/test_controllers.py index 76146dbe6..58967cef8 100644 --- a/tests/fast_tests/test_controllers.py +++ b/tests/fast_tests/test_controllers.py @@ -8,7 +8,7 @@ from flow.controllers.routing_controllers import ContinuousRouter from flow.controllers.car_following_models import IDMController, \ OVMController, BCMController, LinearOVM, CFMController, LACController, \ - GippsController + GippsController, BandoFTLController from flow.controllers import FollowerStopper, PISaturation, NonLocalFollowerStopper from tests.setup_scripts import ring_road_exp_setup import os @@ -709,7 +709,7 @@ def test_get_action(self): np.testing.assert_array_almost_equal(requested_accel, expected_accel) -class TestGippsontroller(unittest.TestCase): +class TestGippsController(unittest.TestCase): """ Tests that the Gipps Controller returning mathematically accurate values. """ @@ -765,5 +765,59 @@ def test_get_action(self): np.testing.assert_array_almost_equal(requested_accel, expected_accel) +class TestBandoFTLController(unittest.TestCase): + """ + Tests that the Bando Controller returning mathematically accurate values. + """ + + def setUp(self): + # add a few vehicles to the network using the requested model + # also make sure that the input params are what is expected + contr_params = { + "alpha": .5, + "beta": 20, + "h_st": 2, + "h_go": 10, + "v_max": 32, + "want_max_accel": False, + } + + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(BandoFTLController, contr_params), + routing_controller=(ContinuousRouter, {}), + car_following_params=SumoCarFollowingParams( + accel=15, decel=5), + num_vehicles=5) + + # create the environment and network classes for a ring road + self.env, _, _ = ring_road_exp_setup(vehicles=vehicles) + + def tearDown(self): + # terminate the traci instance + self.env.terminate() + + # free data used by the class + self.env = None + + def test_get_action(self): + self.env.reset() + ids = self.env.k.vehicle.get_ids() + + test_headways = [2, 4, 6, 8, 10] + for i, veh_id in enumerate(ids): + self.env.k.vehicle.set_headway(veh_id, test_headways[i]) + + requested_accel = [ + self.env.k.vehicle.get_acc_controller(veh_id).get_action(self.env) + for veh_id in ids + ] + + expected_accel = [1.649129, 7.853475, 14.057821, 15.70695, 15.959713] + + np.testing.assert_array_almost_equal(requested_accel, expected_accel) + + if __name__ == '__main__': unittest.main() diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index a05fed68e..336c17bf8 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -42,6 +42,7 @@ from examples.exp_configs.non_rl.minicity import flow_params as non_rl_minicity from examples.exp_configs.non_rl.ring import flow_params as non_rl_ring from examples.exp_configs.non_rl.i210_subnetwork import flow_params as non_rl_i210 +from examples.exp_configs.non_rl.highway_single import flow_params as non_rl_highway_single os.environ['TEST_FLAG'] = 'True' os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' @@ -110,6 +111,10 @@ def test_i210(self): """Verify that examples/exp_configs/non_rl/i210_subnetwork.py is working.""" self.run_simulation(non_rl_i210) + def test_highway_single(self): + """Verify that examples/exp_configs/non_rl/highway_single.py is working.""" + self.run_simulation(non_rl_highway_single) + @staticmethod def run_simulation(flow_params): # make the horizon small and set render to False diff --git a/tests/fast_tests/test_scenarios.py b/tests/fast_tests/test_scenarios.py index f9dd47c04..d72a50b17 100644 --- a/tests/fast_tests/test_scenarios.py +++ b/tests/fast_tests/test_scenarios.py @@ -5,6 +5,7 @@ from flow.networks import BottleneckNetwork, FigureEightNetwork, \ TrafficLightGridNetwork, HighwayNetwork, RingNetwork, MergeNetwork, \ MiniCityNetwork, MultiRingNetwork +from tests.setup_scripts import highway_exp_setup __all__ = [ "MultiRingNetwork", "MiniCityNetwork" @@ -94,11 +95,69 @@ def test_additional_net_params(self): "length": 1000, "lanes": 4, "speed_limit": 30, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25 } ) ) + def test_ghost_edge(self): + """Validate the functionality of the ghost edge feature.""" + # =================================================================== # + # Without a ghost edge # + # =================================================================== # + + # create the network + env, _, _ = highway_exp_setup( + net_params=NetParams(additional_params={ + "length": 1000, + "lanes": 4, + "speed_limit": 30, + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25 + }) + ) + env.reset() + + # check the network length + self.assertEqual(env.k.network.length(), 1000) + + # check the edge list + self.assertEqual(env.k.network.get_edge_list(), ["highway_0"]) + + # check the speed limits of the edges + self.assertEqual(env.k.network.speed_limit("highway_0"), 30) + + # =================================================================== # + # With a ghost edge # + # =================================================================== # + + # create the network + env, _, _ = highway_exp_setup( + net_params=NetParams(additional_params={ + "length": 1000, + "lanes": 4, + "speed_limit": 30, + "num_edges": 1, + "use_ghost_edge": True, + "ghost_speed_limit": 25 + }) + ) + env.reset() + + # check the network length + self.assertEqual(env.k.network.length(), 1500.1) + + # check the edge list + self.assertEqual(env.k.network.get_edge_list(), + ["highway_0", "highway_end"]) + + # check the speed limits of the edges + self.assertEqual(env.k.network.speed_limit("highway_0"), 30) + self.assertEqual(env.k.network.speed_limit("highway_end"), 25) + class TestRingNetwork(unittest.TestCase): diff --git a/tests/fast_tests/test_vehicles.py b/tests/fast_tests/test_vehicles.py index 485a6a072..b791bba64 100644 --- a/tests/fast_tests/test_vehicles.py +++ b/tests/fast_tests/test_vehicles.py @@ -258,7 +258,9 @@ def test_no_junctions_highway(self): "lanes": 3, "speed_limit": 30, "resolution": 40, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -330,7 +332,9 @@ def test_no_junctions_highway(self): "lanes": 4, "speed_limit": 30, "resolution": 40, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -398,7 +402,9 @@ def test_no_junctions_highway(self): "lanes": 3, "speed_limit": 30, "resolution": 40, - "num_edges": 3 + "num_edges": 3, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -465,7 +471,9 @@ def test_no_junctions_highway(self): "lanes": 3, "speed_limit": 30, "resolution": 40, - "num_edges": 3 + "num_edges": 3, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() diff --git a/tests/setup_scripts.py b/tests/setup_scripts.py index 08d5b2c1e..ac88d2e42 100644 --- a/tests/setup_scripts.py +++ b/tests/setup_scripts.py @@ -343,7 +343,9 @@ def highway_exp_setup(sim_params=None, "lanes": 1, "speed_limit": 30, "resolution": 40, - "num_edges": 1 + "num_edges": 1, + "use_ghost_edge": False, + "ghost_speed_limit": 25, } net_params = NetParams(additional_params=additional_net_params) From 153da9d7dfd6b811c13634284f06000dceca9842 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 18 May 2020 12:25:00 -0700 Subject: [PATCH 065/335] change the bucket to a common bucket --- examples/exp_configs/non_rl/highway_single.py | 2 +- flow/core/experiment.py | 29 +++++----------- flow/core/kernel/vehicle/base.py | 4 +-- flow/core/kernel/vehicle/traci.py | 13 ++----- flow/data_pipeline/data_pipeline.py | 34 +++++++++++++++++-- flow/data_pipeline/lambda_function.py | 4 +-- flow/visualize/i210_replay.py | 10 ++++++ 7 files changed, 57 insertions(+), 39 deletions(-) diff --git a/examples/exp_configs/non_rl/highway_single.py b/examples/exp_configs/non_rl/highway_single.py index 46b18c0e9..c2df0759a 100644 --- a/examples/exp_configs/non_rl/highway_single.py +++ b/examples/exp_configs/non_rl/highway_single.py @@ -19,7 +19,7 @@ END_SPEED = 16 TRAFFIC_FLOW = 2056 HORIZON = 3600 -INCLUDE_NOISE = False +INCLUDE_NOISE = True additional_net_params = ADDITIONAL_NET_PARAMS.copy() additional_net_params.update({ diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 8b5cbac02..2296ef635 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,10 +1,11 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env -from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3 +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info import datetime import logging import time +from datetime import date import os import numpy as np import uuid @@ -145,9 +146,7 @@ def rl_actions(*_): # time profiling information t = time.time() times = [] - extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], - "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], - "road_grade": [], "source_id": []} + extra_info = extra_init() source_id = uuid.uuid4().hex for i in range(num_runs): @@ -167,22 +166,7 @@ def rl_actions(*_): ret += reward # collect additional information for the data pipeline - for vid in veh_ids: - extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) - extra_info["id"].append(vid) - extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) - extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) - extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) - extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) - extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( - self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) - extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) - extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) - position = self.env.k.vehicle.get_2d_position(vid) - extra_info["x"].append(position[0]) - extra_info["y"].append(position[1]) - extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) - #extra_info["next_v"].append(self.env.k.vehicle.get_next_v(vid)) + get_extra_info(self.env.k.vehicle, extra_info, veh_ids) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. @@ -231,7 +215,10 @@ def rl_actions(*_): upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: - upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + if partition_name == "default": + partition_name = source_id[0:3] + partition_name = date.today().isoformat() + " " + partition_name + upload_to_s3('circles.data', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index cb547cddb..7f001ed13 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -691,8 +691,8 @@ def get_accel_without_noise(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError - def get_velocity_without_noise(self, veh_id): - """Return the velocity without noise of vehicle with veh_id.""" + def get_realized_accel(self, veh_id): + """Return the acceleration that the vehicle actually make.""" raise NotImplementedError def get_road_grade(self, veh_id): diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 2fd978012..f1dbee8bf 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1115,13 +1115,6 @@ def set_max_speed(self, veh_id, max_speed): self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) # add for data pipeline - def get_next_v(self, veh_id): - """See parent class.""" - if not "next_v" in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["next_v"] = None - return self.__vehicles[veh_id]["next_v"] - #return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step - def get_accel(self, veh_id): """See parent class.""" if not "accel" in self.__vehicles[veh_id]: @@ -1138,9 +1131,9 @@ def get_accel_without_noise(self, veh_id): self.__vehicles[veh_id]["accel_without_noise"] = None return self.__vehicles[veh_id]["accel_without_noise"] - def get_velocity_without_noise(self, veh_id): + def get_realized_accel(self, veh_id): """See parent class.""" - return max([self.get_speed(veh_id) + self.get_accel_without_noise(veh_id) * self.sim_step, 0]) + return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step def get_2d_position(self, veh_id, error=-1001): """See parent class.""" @@ -1148,5 +1141,5 @@ def get_2d_position(self, veh_id, error=-1001): def get_road_grade(self, veh_id): """See parent class.""" - # TODO + # TODO : Brent return 0 diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index afbc09f92..0cd0cbc79 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -88,6 +88,34 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): return +def extra_init(): + """Return the dictionary with all the feild pre-populated with empty list.""" + extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], + "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], + "leader_rel_speed": [], "road_grade": [], "source_id": []} + return extra_info + + +def get_extra_info(veh_kernel, extra_info, veh_ids): + """Get all the necessary information for the trajectory output from flow.""" + for vid in veh_ids: + extra_info["time"].append(veh_kernel.get_timestep(vid) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(veh_kernel.get_headway(vid)) + extra_info["acceleration"].append(veh_kernel.get_accel(vid)) + extra_info["leader_id"].append(veh_kernel.get_leader(vid)) + extra_info["follower_id"].append(veh_kernel.get_follower(vid)) + extra_info["leader_rel_speed"].append(veh_kernel.get_speed( + veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) + extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) + extra_info["realilzed_accel"].append(veh_kernel.get_realized_accel(vid)) + extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) + position = veh_kernel.get_2d_position(vid) + extra_info["x"].append(position[0]) + extra_info["y"].append(position[1]) + extra_info["speed"].append(veh_kernel.get_speed(vid)) + + class AthenaQuery: """ Class used to run query. @@ -199,7 +227,7 @@ def update_partition(self, partition): self.existing_partitions.append(partition) return - def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): + def run_query(self, query_name, result_location="s3://circles.data/query-result/", partition="default"): """Start the execution of a query, does not wait for it to finish. Parameters @@ -257,14 +285,14 @@ def test_sql_query(query_name): # Run the respective sql query queryEngine = AthenaQuery() - execution_id = queryEngine.run_query(query_name, result_location="s3://brent.experiments/query-result/query-test", + execution_id = queryEngine.run_query(query_name, result_location="s3://circles.data/query-result/query-test", partition="test") if queryEngine.wait_for_execution(execution_id): raise RuntimeError("execution timed out") # get the Athena query result from S3 s3 = boto3.resource("s3") - s3.Bucket("brent.experiments").download_file("query-result/query-test/"+execution_id+".csv", + s3.Bucket("circles.data").download_file("query-result/query-test/"+execution_id+".csv", "data/athena_result.csv") athena_result = pd.read_csv("data/athena_result.csv") athena_result = athena_result.sort_values(by=["time", "id"]) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index afef55a4b..3f0abb8a1 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -25,7 +25,7 @@ def lambda_handler(event, context): response = s3.head_object(Bucket=bucket, Key=key) run_query = response["Metadata"]["run-query"] - if bucket == 'brent.experiments' and 'trajectory-output/' in key: + if bucket == 'circles.data' and 'trajectory-output/' in key: if run_query == "all": query_list = tags["analysis"] elif not run_query: @@ -33,4 +33,4 @@ def lambda_handler(event, context): else: query_list = run_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) + queryEngine.run_query(query_name, 's3://circles.data/query-result/auto/', partition) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 502d646aa..9e41009e8 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -27,6 +27,9 @@ from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info +import uuid + EXAMPLE_USAGE = """ example usage: python i210_replay.py -r /ray_results/experiment_dir/result_dir -c 1 @@ -190,6 +193,9 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= key: [] for key in custom_callables.keys() }) + extra_info = extra_init() + source_id = uuid.uuid4().hex + for i in range(args.num_rollouts): vel = [] custom_vals = {key: [] for key in custom_callables.keys()} @@ -222,6 +228,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= veh_ids = env.k.vehicle.get_ids() vel.append(np.mean(env.k.vehicle.get_speed(veh_ids))) + # Collect information from flow for the trajectory output + get_extra_info(env.k.vehicle, extra_info, veh_ids) + extra_info["source_id"].extend([source_id + "run" + str(i)] * len(veh_ids)) + # Compute the results for the custom callables. for (key, lambda_func) in custom_callables.items(): custom_vals[key].append(lambda_func(env)) From 2851e8a6b7089756c33a4519f6148c373b763a77 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 18 May 2020 12:28:17 -0700 Subject: [PATCH 066/335] removed the old tests --- flow/data_pipeline/datapipeline_test.py | 37 ------------------------- 1 file changed, 37 deletions(-) delete mode 100644 flow/data_pipeline/datapipeline_test.py diff --git a/flow/data_pipeline/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py deleted file mode 100644 index 0e1a50518..000000000 --- a/flow/data_pipeline/datapipeline_test.py +++ /dev/null @@ -1,37 +0,0 @@ -"""functions that calculates the expected result for testing.""" -import math - -# Vehicle Mass -M = 1200 -# Gravity -g = 9.81 -# Density of Air -ro_air = 1.225 -# Rolling resistance coefficient -C_r = .005 -# Aerodynamic drag coefficient -C_a = 0.3 -# Vehicle Cross sectional Area -A = 2.6 -# Road grade -theta = 0 - - -def heavyside(inp): - """Return 1 if input is positive.""" - return 0 if inp <= 0 else 1 - - -def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): - """Calculate the expected power for POWER_DEMAND_MODEL query.""" - acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) - accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) - rolling_friction = M * g * C_r * mu - air_drag = .5 * ro_air * A * C_a * mu**3 - power = accel_and_slope + rolling_friction + air_drag - return power - - -def apply_energy_one(row): - """Apply the power calculation to a row of the dataframe.""" - return [row[0], row[1], calculate_power(row[4], row[6])] From c01f235891baa45b8af010e730e0daeefb557ae5 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 18 May 2020 12:49:11 -0700 Subject: [PATCH 067/335] fix merge issue in i210_replay --- flow/visualize/i210_replay.py | 1 - 1 file changed, 1 deletion(-) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index f6898d1ec..abb13bbc9 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -211,7 +211,6 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= extra_info = extra_init() source_id = uuid.uuid4().hex - for i in range(args.num_rollouts): i = 0 while i < args.num_rollouts: print("Rollout iter", i) From 27445157469851cf146b7eb4b08d811929155033 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 04:01:41 -0700 Subject: [PATCH 068/335] added auto upload to s3 feature for the reply scipt and fix some other minor issues --- flow/core/experiment.py | 15 ++--- flow/core/kernel/vehicle/traci.py | 5 +- flow/data_pipeline/data_pipeline.py | 12 ++-- flow/data_pipeline/lambda_function.py | 4 +- flow/data_pipeline/query.py | 86 ++++++++++++++------------- flow/data_pipeline/run_query.py | 2 +- flow/visualize/i210_replay.py | 21 ++++++- 7 files changed, 82 insertions(+), 63 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 8e0ba0bbf..6e9be9aea 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -88,7 +88,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=""): """Run the given network for a set number of runs. Parameters @@ -106,8 +106,9 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=No will be used to later for query. If NONE, won't upload output to S3. only_query: str - Specifies whether queries should be automatically run the - simulation data when it gets uploaded to s3 + Specifies which queries should be automatically run when the + simulation data gets uploaded to S3. If an empty str is passed in, + then it implies no queries should be run on this. Returns ------- @@ -147,7 +148,7 @@ def rl_actions(*_): t = time.time() times = [] extra_info = extra_init() - source_id = uuid.uuid4().hex + source_id = 'flow_{}'.format(uuid.uuid4().hex) for i in range(num_runs): ret = 0 @@ -167,7 +168,7 @@ def rl_actions(*_): # collect additional information for the data pipeline get_extra_info(self.env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) + extra_info["source_id"].extend(['{}_run_{}'.format(source_id, i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): @@ -218,8 +219,8 @@ def rl_actions(*_): if partition_name == "default": partition_name = source_id[0:3] partition_name = date.today().isoformat() + " " + partition_name - upload_to_s3('circles.data', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + upload_to_s3('circles.data.pipeline', 'trajectory-output/partition_name={}/{}.csv'.format( + partition_name, upload_file_path.split('/')[-1].split('_')[0]), upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index b2fd66073..2a4e06257 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -962,7 +962,6 @@ def apply_acceleration(self, veh_ids, acc): self.__vehicles[vid]["accel"] = acc[i] this_vel = self.get_speed(vid) next_vel = max([this_vel + acc[i] * self.sim_step, 0]) - #self.__vehicles[vid]["next_v"] = next_vel self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3) def apply_lane_change(self, veh_ids, direction): @@ -1127,7 +1126,7 @@ def set_max_speed(self, veh_id, max_speed): # add for data pipeline def get_accel(self, veh_id): """See parent class.""" - if not "accel" in self.__vehicles[veh_id]: + if "accel" not in self.__vehicles[veh_id]: self.__vehicles[veh_id]["accel"] = None return self.__vehicles[veh_id]["accel"] @@ -1137,7 +1136,7 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): def get_accel_without_noise(self, veh_id): """See parent class.""" - if not "accel_without_noise" in self.__vehicles[veh_id]: + if "accel_without_noise" not in self.__vehicles[veh_id]: self.__vehicles[veh_id]["accel_without_noise"] = None return self.__vehicles[veh_id]["accel_without_noise"] diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 0cd0cbc79..fbd975c5e 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -38,7 +38,7 @@ def generate_trajectory_table(data_path, extra_info, partition_name): return output_file_path -def generate_trajectory_from_flow(data_path, extra_info, partition_name): +def generate_trajectory_from_flow(data_path, extra_info, partition_name=None): """Generate desired output for the trajectory_table based only on flow output. Parameters @@ -227,7 +227,7 @@ def update_partition(self, partition): self.existing_partitions.append(partition) return - def run_query(self, query_name, result_location="s3://circles.data/query-result/", partition="default"): + def run_query(self, query_name, result_location="s3://circles.data.pipeline/query-result/", partition="default"): """Start the execution of a query, does not wait for it to finish. Parameters @@ -285,15 +285,15 @@ def test_sql_query(query_name): # Run the respective sql query queryEngine = AthenaQuery() - execution_id = queryEngine.run_query(query_name, result_location="s3://circles.data/query-result/query-test", - partition="test") + execution_id = queryEngine.run_query(query_name, result_location="s3://circles.data.pipeline/" + "query-result/query-test", partition="test") if queryEngine.wait_for_execution(execution_id): raise RuntimeError("execution timed out") # get the Athena query result from S3 s3 = boto3.resource("s3") - s3.Bucket("circles.data").download_file("query-result/query-test/"+execution_id+".csv", - "data/athena_result.csv") + s3.Bucket("circles.data.pipeline").download_file("query-result/query-test/"+execution_id+".csv", + "data/athena_result.csv") athena_result = pd.read_csv("data/athena_result.csv") athena_result = athena_result.sort_values(by=["time", "id"]) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 3f0abb8a1..fd50ba8f5 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -25,7 +25,7 @@ def lambda_handler(event, context): response = s3.head_object(Bucket=bucket, Key=key) run_query = response["Metadata"]["run-query"] - if bucket == 'circles.data' and 'trajectory-output/' in key: + if bucket == 'circles.data.pipeline' and 'trajectory-output/' in key: if run_query == "all": query_list = tags["analysis"] elif not run_query: @@ -33,4 +33,4 @@ def lambda_handler(event, context): else: query_list = run_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://circles.data/query-result/auto/', partition) + queryEngine.run_query(query_name, 's3://circles.data.pipeline/query-result/auto/', partition) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 2d34570f9..78960456d 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -11,45 +11,47 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - POWER_DEMAND_MODEL = "SELECT id, time, speed, acceleration, 1200 * speed * " \ - "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ - "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 'POWER_DEMAND_MODEL' AS energy_model_id, source_id " \ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\' " \ - "ORDER BY id, time " - POWER_DEMAND_MODEL_DENOISED_ACCEL = \ - "SELECT id, time, speed, accel_without_noise, 1200 * speed * " \ - "((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise)+0.8 " \ - "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id " \ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\' " \ - "ORDER BY id, time " - POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = \ - "WITH sub1 AS ( " \ - "SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, " \ - "time - LAG(time, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, " \ - "LAG(speed, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, " \ - "LAG(acceleration, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, " \ - "LAG(accel_without_noise, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised "\ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" \ - ")," \ - "sub2 AS (" \ - "SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, " \ - "prev_speed+accel_without_noise*sim_step AS speed_denoised " \ - "FROM sub1" \ - ") " \ - "SELECT id, time, speed_denoised, accel_without_noise, " \ - "1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 " \ - "THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 " \ - "* SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 " \ - "* 2.6 * 0.3 * POW(speed_denoised,3) AS power, " \ - "'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id " \ - "FROM sub2 " \ - "ORDER BY id, time " + POWER_DEMAND_MODEL = """ + SELECT id, time, speed, acceleration, 1200 * speed * + ((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 + + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 + * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL' AS energy_model_id, source_id + FROM trajectory_table + WHERE partition_name=\'{partition}\' + ORDER BY id, time """ + POWER_DEMAND_MODEL_DENOISED_ACCEL = """ + SELECT id, time, speed, accel_without_noise, + 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) + * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) + + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id + FROM trajectory_table + WHERE partition_name=\'{partition}\' + ORDER BY id, time """ + POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ + WITH lagged_timestep AS ( + SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, + time - LAG(time, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + LAG(speed, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, + LAG(acceleration, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, + LAG(accel_without_noise, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised + FROM trajectory_table + WHERE partition_name=\'{partition}\'), + speed_denoised_table AS ( + SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, + prev_speed+accel_without_noise*sim_step AS speed_denoised + FROM lagged_timestep + ) + SELECT id, time, speed_denoised, accel_without_noise, + 1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 + THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 + * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 + * 2.6 * 0.3 * POW(speed_denoised,3) AS power, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id + FROM speed_denoised_table + ORDER BY id, time """ diff --git a/flow/data_pipeline/run_query.py b/flow/data_pipeline/run_query.py index f065a726e..ac927c749 100644 --- a/flow/data_pipeline/run_query.py +++ b/flow/data_pipeline/run_query.py @@ -6,7 +6,7 @@ parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" "a S3 location") parser.add_argument("--run", type=str, nargs="+") -parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") +parser.add_argument("--result_location", type=str, nargs='?', default="s3://circles.data.pipeline/query-result/") parser.add_argument("--partition", type=str, nargs='?', default="default") parser.add_argument("--list_partitions", action="store_true") parser.add_argument("--check_status", type=str, nargs='+') diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index abb13bbc9..5fd3142ad 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -209,7 +209,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= }) extra_info = extra_init() - source_id = uuid.uuid4().hex + source_id = 'flow_{}'.format(uuid.uuid4().hex) i = 0 while i < args.num_rollouts: @@ -251,7 +251,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # Collect information from flow for the trajectory output get_extra_info(env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend([source_id + "run" + str(i)] * len(veh_ids)) + extra_info["source_id"].extend(['{}_run_{}'.format(source_id, i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in custom_callables.items(): @@ -328,6 +328,17 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # convert the emission file into a csv file emission_to_csv(emission_path, output_path=output_path) + # generate the trajectory output file + trajectory_table_path = './data/' + source_id + ".csv" + upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) + + # upload to s3 if asked + if args.use_s3: + partition_name = date.today().isoformat() + " " + source_id[0:3] + upload_to_s3('circles.data.pipeline', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + upload_file_path, str(args.only_query)[2:-2]) + # print the location of the emission csv file print("\nGenerated emission file at " + output_path) @@ -437,6 +448,12 @@ def create_parser(): 'be run in cluster mode') parser.add_argument('--exp_title', type=str, required=False, default=None, help='Informative experiment title to help distinguish results') + parser.add_argument( + '--only_query', + nargs='*', default="[\'all\']", + help='specify which query should be run by lambda' + 'for detail, see upload_to_s3 in data_pipeline.py' + ) return parser From 462f4bb877a6e179ed27a926e0e660e9b37d6700 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 04:07:29 -0700 Subject: [PATCH 069/335] fix trailing white space style issue --- flow/data_pipeline/query.py | 79 ++++++++++++++++++------------------- 1 file changed, 39 insertions(+), 40 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 78960456d..06150eadc 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -12,46 +12,45 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" POWER_DEMAND_MODEL = """ - SELECT id, time, speed, acceleration, 1200 * speed * - ((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 - + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 - * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL' AS energy_model_id, source_id - FROM trajectory_table - WHERE partition_name=\'{partition}\' - ORDER BY id, time """ + SELECT id, time, speed, acceleration, 1200 * speed * + ((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 + + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 + * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL' AS energy_model_id, source_id + FROM trajectory_table + WHERE partition_name=\'{partition}\' + ORDER BY id, time""" POWER_DEMAND_MODEL_DENOISED_ACCEL = """ - SELECT id, time, speed, accel_without_noise, - 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) - * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) - + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id - FROM trajectory_table - WHERE partition_name=\'{partition}\' - ORDER BY id, time """ + SELECT id, time, speed, accel_without_noise, + 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) + * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) + + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id + FROM trajectory_table + WHERE partition_name=\'{partition}\' + ORDER BY id, time""" POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ - WITH lagged_timestep AS ( - SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, - time - LAG(time, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, - LAG(speed, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, - LAG(acceleration, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, - LAG(accel_without_noise, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised - FROM trajectory_table + WITH lagged_timestep AS ( + SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, + time - LAG(time, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + LAG(speed, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, + LAG(acceleration, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, + LAG(accel_without_noise, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised + FROM trajectory_table WHERE partition_name=\'{partition}\'), - speed_denoised_table AS ( - SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, - prev_speed+accel_without_noise*sim_step AS speed_denoised - FROM lagged_timestep - ) - SELECT id, time, speed_denoised, accel_without_noise, - 1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 - THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 - * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 - * 2.6 * 0.3 * POW(speed_denoised,3) AS power, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id - FROM speed_denoised_table - ORDER BY id, time """ + speed_denoised_table AS ( + SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, + prev_speed+accel_without_noise*sim_step AS speed_denoised + FROM lagged_timestep) + SELECT id, time, speed_denoised, accel_without_noise, + 1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 + THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 + * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 + * 2.6 * 0.3 * POW(speed_denoised,3) AS power, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id + FROM speed_denoised_table + ORDER BY id, time""" From c2513e9e1d0f22065e513c34e8edee178bef1602 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 04:10:43 -0700 Subject: [PATCH 070/335] some minor issue fixed --- flow/data_pipeline/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 06150eadc..4f34945d8 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -22,7 +22,7 @@ class QueryStrings(Enum): ORDER BY id, time""" POWER_DEMAND_MODEL_DENOISED_ACCEL = """ SELECT id, time, speed, accel_without_noise, - 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) + 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id From 1dcf6a654f59a2c36406b3b6cf732e1fac79d3fb Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 08:42:29 -0700 Subject: [PATCH 071/335] reformatting energy queries --- flow/data_pipeline/query.py | 112 ++++++++++++++++++++++-------------- 1 file changed, 69 insertions(+), 43 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 4f34945d8..e194b5648 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -5,52 +5,78 @@ tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], "analysis": ["POWER_DEMAND_MODEL"]} +VEHICLE_POWER_DEMAND_SUBQUERY = """ + SELECT + id, + "time", + speed, + acceleration, + road_grade, + 1200 * speed * ( + (CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + + 0.8 + 9.81 * SIN(road_grade) + ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL' AS energy_model_id, + source_id + FROM {} + WHERE 1 = 1 + AND partition_name=\'{partition}\' + ORDER BY id, "time" + """ class QueryStrings(Enum): """An enumeration of all the pre-defined query strings.""" - SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" - UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - POWER_DEMAND_MODEL = """ - SELECT id, time, speed, acceleration, 1200 * speed * - ((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 - + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 - * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL' AS energy_model_id, source_id - FROM trajectory_table - WHERE partition_name=\'{partition}\' - ORDER BY id, time""" + SAMPLE = """ + SELECT * + FROM trajectory_table + WHERE partition_name=\'{partition}\' + LIMIT 15; + """ + + UPDATE_PARTITION = """ + ALTER TABLE trajectory_table + ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\'); + """ + + POWER_DEMAND_MODEL = VEHICLE_POWER_DEMAND_SUBQUERY.format('trajectory_table') + POWER_DEMAND_MODEL_DENOISED_ACCEL = """ - SELECT id, time, speed, accel_without_noise, - 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) - * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) - + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id - FROM trajectory_table - WHERE partition_name=\'{partition}\' - ORDER BY id, time""" + WITH denoised_accel_cte AS ( + SELECT + id, + "time", + speed, + accel_without_noise AS acceleration, + road_grade, + source_id + FROM trajectory_table + ) + {}""".format(VEHICLE_POWER_DEMAND_SUBQUERY.format('denoised_accel_cte')) + POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ - WITH lagged_timestep AS ( - SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, - time - LAG(time, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, - LAG(speed, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, - LAG(acceleration, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, - LAG(accel_without_noise, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised - FROM trajectory_table - WHERE partition_name=\'{partition}\'), - speed_denoised_table AS ( - SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, - prev_speed+accel_without_noise*sim_step AS speed_denoised - FROM lagged_timestep) - SELECT id, time, speed_denoised, accel_without_noise, - 1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 - THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 - * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 - * 2.6 * 0.3 * POW(speed_denoised,3) AS power, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id - FROM speed_denoised_table - ORDER BY id, time""" + WITH lagged_timestep AS ( + SELECT + "time", + id, + accel_without_noise, + road_grade, + source_id, + "time" - LAG("time", 1) + OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + LAG(speed, 1) + OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed + FROM trajectory_table + WHERE 1 = 1 + AND partition_name=\'{partition}\' + ), denoised_speed_cte AS ( + SELECT + id, + "time", + prev_speed + accel_without_noise * sim_step AS speed, + accel_without_noise AS acceleration, + road_grade, + source_id + FROM lagged_timestep + ) + {}""".format(VEHICLE_POWER_DEMAND_SUBQUERY.format('denoised_speed_cte')) From 8a68fb93be587339fc1535b5c58c5e79731e1cd7 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 08:52:17 -0700 Subject: [PATCH 072/335] rename vehicle power demand query --- flow/data_pipeline/query.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index e194b5648..db1283f9d 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -5,7 +5,7 @@ tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], "analysis": ["POWER_DEMAND_MODEL"]} -VEHICLE_POWER_DEMAND_SUBQUERY = """ +VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT id, "time", @@ -39,7 +39,7 @@ class QueryStrings(Enum): ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\'); """ - POWER_DEMAND_MODEL = VEHICLE_POWER_DEMAND_SUBQUERY.format('trajectory_table') + POWER_DEMAND_MODEL = VEHICLE_POWER_DEMAND_FINAL_SELECT.format('trajectory_table') POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -52,7 +52,7 @@ class QueryStrings(Enum): source_id FROM trajectory_table ) - {}""".format(VEHICLE_POWER_DEMAND_SUBQUERY.format('denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -79,4 +79,4 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_SUBQUERY.format('denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_speed_cte')) From f021d5a6381e777eaea0a917b6a8f7e95ca3a1e0 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 10:44:06 -0700 Subject: [PATCH 073/335] move partition condition to cte's --- flow/data_pipeline/query.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index db1283f9d..28564afde 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -19,8 +19,6 @@ 'POWER_DEMAND_MODEL' AS energy_model_id, source_id FROM {} - WHERE 1 = 1 - AND partition_name=\'{partition}\' ORDER BY id, "time" """ @@ -39,7 +37,20 @@ class QueryStrings(Enum): ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\'); """ - POWER_DEMAND_MODEL = VEHICLE_POWER_DEMAND_FINAL_SELECT.format('trajectory_table') + POWER_DEMAND_MODEL = """ + WITH regular_cte AS ( + SELECT + id, + "time", + speed, + acceleration, + road_grade, + source_id + FROM trajectory_table + WHERE 1 = 1 + AND partition_name=\'{partition}\' + ) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -51,14 +62,16 @@ class QueryStrings(Enum): road_grade, source_id FROM trajectory_table + WHERE 1 = 1 + AND partition_name=\'{partition}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( SELECT - "time", id, + "time", accel_without_noise, road_grade, source_id, From 3d16a5ad4e308da3628354191f14b2c949a59526 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Tue, 19 May 2020 15:53:30 -0400 Subject: [PATCH 074/335] Ev i210 highway updated (#937) Merge in wave calibration for the straight road @AboudyKreidieh --- examples/exp_configs/non_rl/highway_single.py | 69 +- .../exp_configs/non_rl/i210_subnetwork.py | 110 +- .../non_rl/i210_subnetwork_sweep.py | 151 - .../rl/multiagent/multiagent_i210.py | 12 +- .../rl/multiagent/multiagent_straight_road.py | 45 +- .../templates/sumo/i210_with_ghost_cell.xml | 5719 +++++++++++++++++ .../i210_with_ghost_cell_with_downstream.xml | 5719 +++++++++++++++++ examples/train.py | 18 +- flow/controllers/__init__.py | 5 +- flow/controllers/routing_controllers.py | 26 + flow/networks/highway.py | 17 +- flow/networks/i210_subnetwork.py | 247 +- flow/networks/ring.py | 2 +- tests/fast_tests/test_scenarios.py | 6 +- tests/fast_tests/test_vehicles.py | 1 + tests/setup_scripts.py | 1 + 16 files changed, 11812 insertions(+), 336 deletions(-) delete mode 100644 examples/exp_configs/non_rl/i210_subnetwork_sweep.py create mode 100644 examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml create mode 100644 examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml diff --git a/examples/exp_configs/non_rl/highway_single.py b/examples/exp_configs/non_rl/highway_single.py index a2e44032a..7e0a5eb49 100644 --- a/examples/exp_configs/non_rl/highway_single.py +++ b/examples/exp_configs/non_rl/highway_single.py @@ -1,11 +1,7 @@ -"""Multi-agent highway with ramps example. - -Trains a non-constant number of agents, all sharing the same policy, on the -highway with ramps network. -""" +"""Example of an open network with human-driven vehicles and a wave.""" import numpy as np -from flow.controllers import BandoFTLController +from flow.controllers import IDMController from flow.controllers.velocity_controllers import FollowerStopper from flow.core.params import EnvParams from flow.core.params import NetParams @@ -14,18 +10,23 @@ from flow.core.params import VehicleParams from flow.core.params import SumoParams from flow.core.params import SumoLaneChangeParams +from flow.core.params import SumoCarFollowingParams from flow.core.rewards import miles_per_megajoule from flow.networks import HighwayNetwork from flow.envs import TestEnv from flow.networks.highway import ADDITIONAL_NET_PARAMS -TRAFFIC_SPEED = 11 -END_SPEED = 16 -TRAFFIC_FLOW = 2056 -HORIZON = 2000 -INCLUDE_NOISE = False +# the speed of vehicles entering the network +TRAFFIC_SPEED = 24.1 +# the maximum speed at the downstream boundary edge +END_SPEED = 6.0 +# the inflow rate of vehicles +TRAFFIC_FLOW = 2215 +# the simulation time horizon (in steps) +HORIZON = 1500 +# whether to include noise in the car-following models +INCLUDE_NOISE = True -# percentage of autonomous vehicles compared to human vehicles on highway PENETRATION_RATE = 10.0 additional_net_params = ADDITIONAL_NET_PARAMS.copy() @@ -38,35 +39,38 @@ "speed_limit": 30, # number of edges to divide the highway into "num_edges": 2, - # whether to include a ghost edge of length 500m. This edge is provided a - # different speed limit. + # whether to include a ghost edge "use_ghost_edge": True, # speed limit for the ghost edge - "ghost_speed_limit": END_SPEED + "ghost_speed_limit": END_SPEED, + # length of the cell imposing a boundary + "boundary_cell_length": 300, }) vehicles = VehicleParams() vehicles.add( "human", - num_vehicles=0, + acceleration_controller=(IDMController, { + 'a': 1.3, + 'b': 2.0, + 'noise': 0.3 if INCLUDE_NOISE else 0.0 + }), + car_following_params=SumoCarFollowingParams( + min_gap=0.5 + ), lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", + model="SL2015", + lc_sublane=2.0, ), - acceleration_controller=(BandoFTLController, { - 'alpha': .5, - 'beta': 20.0, - 'h_st': 12.0, - 'h_go': 50.0, - 'v_max': 30.0, - 'noise': 1.0 if INCLUDE_NOISE else 0.0, - }), ) +inflows = InFlows() + if PENETRATION_RATE > 0.0: vehicles.add( "av", num_vehicles=0, - acceleration_controller=(FollowerStopper, {"v_des": 11.0}), + acceleration_controller=(FollowerStopper, {"v_des": 6.0}), ) inflows = InFlows() @@ -76,7 +80,7 @@ edge="highway_0", vehs_per_hour=int(TRAFFIC_FLOW * (1 - PENETRATION_RATE / 100)), depart_lane="free", - depart_speed="23", + depart_speed=TRAFFIC_SPEED, name="idm_highway_inflow") if PENETRATION_RATE > 0.0: @@ -85,7 +89,7 @@ edge="highway_0", vehs_per_hour=int(TRAFFIC_FLOW * (PENETRATION_RATE / 100)), depart_lane="free", - depart_speed="23", + depart_speed=TRAFFIC_SPEED, name="av_highway_inflow") # SET UP FLOW PARAMETERS @@ -106,15 +110,16 @@ # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( horizon=HORIZON, - warmup_steps=0, - sims_per_step=1, + warmup_steps=500, + sims_per_step=3, ), # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( - sim_step=0.5, + sim_step=0.4, render=False, - restart_instance=False + restart_instance=False, + use_ballistic=True ), # network-related parameters (see flow.core.params.NetParams and the diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index d993ae93a..25565bb49 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -1,9 +1,9 @@ """I-210 subnetwork example.""" import os - import numpy as np -from flow.controllers.car_following_models import IDMController +from flow.controllers import IDMController +from flow.controllers import I210Router from flow.core.params import SumoParams from flow.core.params import EnvParams from flow.core.params import NetParams @@ -15,7 +15,49 @@ from flow.envs import TestEnv from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -# create the base vehicle type that will be used for inflows +# =========================================================================== # +# Specify some configurable constants. # +# =========================================================================== # + +# whether to include the upstream ghost edge in the network +WANT_GHOST_CELL = True +# whether to include the downstream slow-down edge in the network +WANT_DOWNSTREAM_BOUNDARY = True +# whether to include vehicles on the on-ramp +ON_RAMP = True +# the inflow rate of vehicles (in veh/hr) +INFLOW_RATE = 5 * 2215 +# the speed of inflowing vehicles from the main edge (in m/s) +INFLOW_SPEED = 24.1 + +# =========================================================================== # +# Specify the path to the network template. # +# =========================================================================== # + +if WANT_DOWNSTREAM_BOUNDARY: + net_template = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_" + "downstream.xml") +elif WANT_GHOST_CELL: + net_template = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml") +else: + net_template = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/test2.net.xml") + +# If the ghost cell is not being used, remove it from the initial edges that +# vehicles can be placed on. +edges_distribution = EDGES_DISTRIBUTION.copy() +if not WANT_GHOST_CELL: + edges_distribution.remove("ghost0") + +# =========================================================================== # +# Specify vehicle-specific information and inflows. # +# =========================================================================== # + vehicles = VehicleParams() vehicles.add( "human", @@ -24,35 +66,39 @@ lane_change_mode="strategic", ), acceleration_controller=(IDMController, { - "a": 0.3, "b": 2.0, "noise": 0.5 + "a": 1.3, + "b": 2.0, + "noise": 0.3, }), + routing_controller=(I210Router, {}) if ON_RAMP else None, ) inflow = InFlows() # main highway inflow.add( veh_type="human", - edge="119257914", - vehs_per_hour=8378, - departLane="random", - departSpeed=23) + edge="ghost0" if WANT_GHOST_CELL else "119257914", + vehs_per_hour=INFLOW_RATE, + departLane="best", + departSpeed=INFLOW_SPEED) # on ramp -# inflow.add( -# veh_type="human", -# edge="27414345", -# vehs_per_hour=321, -# departLane="random", -# departSpeed=20) -# inflow.add( -# veh_type="human", -# edge="27414342#0", -# vehs_per_hour=421, -# departLane="random", -# departSpeed=20) - -NET_TEMPLATE = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml") +if ON_RAMP: + inflow.add( + veh_type="human", + edge="27414345", + vehs_per_hour=500, + departLane="random", + departSpeed=10) + inflow.add( + veh_type="human", + edge="27414342#0", + vehs_per_hour=500, + departLane="random", + departSpeed=10) + +# =========================================================================== # +# Generate the flow_params dict with all relevant simulation information. # +# =========================================================================== # flow_params = dict( # name of the experiment @@ -69,7 +115,7 @@ # simulation-related parameters sim=SumoParams( - sim_step=0.5, + sim_step=0.4, render=False, color_by_speed=True, use_ballistic=True @@ -77,14 +123,18 @@ # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=4500, + horizon=10000, ), # network-related parameters (see flow.core.params.NetParams and the # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( inflows=inflow, - template=NET_TEMPLATE + template=net_template, + additional_params={ + "on_ramp": ON_RAMP, + "ghost_edge": WANT_GHOST_CELL, + } ), # vehicles to be placed in the network at the start of a rollout (see @@ -94,10 +144,14 @@ # parameters specifying the positioning of vehicles upon initialization/ # reset (see flow.core.params.InitialConfig) initial=InitialConfig( - edges_distribution=EDGES_DISTRIBUTION, + edges_distribution=edges_distribution, ), ) +# =========================================================================== # +# Specify custom callable that is logged during simulation runtime. # +# =========================================================================== # + edge_id = "119257908#1-AddedOnRampEdge" custom_callables = { "avg_merge_speed": lambda env: np.nan_to_num(np.mean( diff --git a/examples/exp_configs/non_rl/i210_subnetwork_sweep.py b/examples/exp_configs/non_rl/i210_subnetwork_sweep.py deleted file mode 100644 index 28cba81ce..000000000 --- a/examples/exp_configs/non_rl/i210_subnetwork_sweep.py +++ /dev/null @@ -1,151 +0,0 @@ -"""I-210 subnetwork example. - -In this case flow_params is a list of dicts. This is to test the effects of -multiple human-driver model parameters on the flow traffic. -""" -from collections import OrderedDict -from copy import deepcopy -import itertools -import os -import numpy as np - -from flow.core.params import SumoParams -from flow.core.params import EnvParams -from flow.core.params import NetParams -from flow.core.params import SumoLaneChangeParams -from flow.core.params import VehicleParams -from flow.core.params import InitialConfig -from flow.core.params import InFlows -import flow.config as config -from flow.envs import TestEnv -from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION - -# the default parameters for all lane change parameters -default_dict = { - "lane_change_mode": "strategic", - "model": "LC2013", - "lc_strategic": 1.0, - "lc_cooperative": 1.0, - "lc_speed_gain": 1.0, - "lc_keep_right": 1.0, - "lc_look_ahead_left": 2.0, - "lc_speed_gain_right": 1.0, - "lc_sublane": 1.0, - "lc_pushy": 0, - "lc_pushy_gap": 0.6, - "lc_assertive": 1, - "lc_accel_lat": 1.0 -} - -# values to sweep through for some lane change parameters -sweep_dict = OrderedDict({ - "lc_strategic": [1.0, 2.0, 4.0, 8.0], - "lc_cooperative": [1.0, 2.0], - "lc_look_ahead_left": [2.0, 4.0] -}) - -# Create a list of possible lane change parameter combinations. -all_names = sorted(sweep_dict) -combinations = itertools.product(*(sweep_dict[name] for name in all_names)) -combination_list = list(combinations) -res = [] -for val in combination_list: - curr_dict = {} - for elem, name in zip(val, all_names): - curr_dict[name] = elem - res.append(curr_dict) - -# Create a list of all possible flow_params dictionaries to sweep through the -# different lane change parameters. -flow_params = [] - -for lane_change_dict in res: - # no vehicles in the network. The lane change parameters of inflowing - # vehicles are updated here. - vehicles = VehicleParams() - update_dict = deepcopy(default_dict) - update_dict.update(lane_change_dict) - vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams(**update_dict) - ) - - inflow = InFlows() - # main highway - inflow.add( - veh_type="human", - edge="119257914", - vehs_per_hour=8378, - # probability=1.0, - departLane="random", - departSpeed=20) - # on ramp - inflow.add( - veh_type="human", - edge="27414345", - vehs_per_hour=321, - departLane="random", - departSpeed=20) - inflow.add( - veh_type="human", - edge="27414342#0", - vehs_per_hour=421, - departLane="random", - departSpeed=20) - - NET_TEMPLATE = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml") - - params = dict( - # name of the experiment - exp_tag='I-210_subnetwork', - - # name of the flow environment the experiment is running on - env_name=TestEnv, - - # name of the network class the experiment is running on - network=I210SubNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # simulation-related parameters - sim=SumoParams( - sim_step=0.8, - render=True, - color_by_speed=True - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=4500, # one hour of run time - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - inflows=inflow, - template=NET_TEMPLATE - ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon - # initialization/reset (see flow.core.params.InitialConfig) - initial=InitialConfig( - edges_distribution=EDGES_DISTRIBUTION, - ), - ) - - # Store the next flow_params dict. - flow_params.append(params) - - -custom_callables = { - "avg_merge_speed": lambda env: np.mean(env.k.vehicle.get_speed( - env.k.vehicle.get_ids_by_edge("119257908#1-AddedOnRampEdge"))) -} diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index a7d707068..01b9e6082 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -36,6 +36,10 @@ # percentage of autonomous vehicles compared to human vehicles on highway PENETRATION_RATE = 10 +# TODO: temporary fix +edges_distribution = EDGES_DISTRIBUTION.copy() +edges_distribution.remove("ghost0") + # SET UP PARAMETERS FOR THE ENVIRONMENT additional_env_params = ADDITIONAL_ENV_PARAMS.copy() additional_env_params.update({ @@ -160,7 +164,11 @@ # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( inflows=inflow, - template=NET_TEMPLATE + template=NET_TEMPLATE, + additional_params={ + "on_ramp": False, + "ghost_edge": False + } ), # vehicles to be placed in the network at the start of a rollout (see @@ -170,7 +178,7 @@ # parameters specifying the positioning of vehicles upon initialization/ # reset (see flow.core.params.InitialConfig) initial=InitialConfig( - edges_distribution=EDGES_DISTRIBUTION, + edges_distribution=edges_distribution, ), ) diff --git a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py index a15471539..ec71a2f42 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py +++ b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py @@ -5,7 +5,7 @@ """ from flow.controllers import RLController, IDMController from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ - VehicleParams, SumoParams, SumoLaneChangeParams + VehicleParams, SumoParams, SumoLaneChangeParams, SumoCarFollowingParams from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS from flow.networks import HighwayNetwork from flow.envs.multiagent import MultiStraightRoad @@ -16,27 +16,35 @@ # SET UP PARAMETERS FOR THE SIMULATION -# number of steps per rollout -HORIZON = 2000 +# the speed of vehicles entering the network +TRAFFIC_SPEED = 24.1 +# the maximum speed at the downstream boundary edge +END_SPEED = 6.0 +# the inflow rate of vehicles +HIGHWAY_INFLOW_RATE = 2215 +# the simulation time horizon (in steps) +HORIZON = 1500 +# whether to include noise in the car-following models +INCLUDE_NOISE = True -# inflow rate on the highway in vehicles per hour -HIGHWAY_INFLOW_RATE = 10800 / 5 -# percentage of autonomous vehicles compared to human vehicles on highway -PENETRATION_RATE = 10 - - -# SET UP PARAMETERS FOR THE NETWORK +PENETRATION_RATE = 10.0 additional_net_params = ADDITIONAL_NET_PARAMS.copy() additional_net_params.update({ # length of the highway - "length": 2000, + "length": 2500, # number of lanes "lanes": 1, # speed limit for all edges "speed_limit": 30, # number of edges to divide the highway into - "num_edges": 2 + "num_edges": 2, + # whether to include a ghost edge + "use_ghost_edge": True, + # speed limit for the ghost edge + "ghost_speed_limit": END_SPEED, + # length of the cell imposing a boundary + "boundary_cell_length": 300, }) @@ -62,11 +70,18 @@ # human vehicles vehicles.add( "human", - num_vehicles=0, + acceleration_controller=(IDMController, { + 'a': 1.3, + 'b': 2.0, + 'noise': 0.3 if INCLUDE_NOISE else 0.0 + }), + car_following_params=SumoCarFollowingParams( + min_gap=0.5 + ), lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", + model="SL2015", + lc_sublane=2.0, ), - acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), ) # autonomous vehicles diff --git a/examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml b/examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml new file mode 100644 index 000000000..66e5a1131 --- /dev/null +++ b/examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml @@ -0,0 +1,5719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml b/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml new file mode 100644 index 000000000..10d4d8d45 --- /dev/null +++ b/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml @@ -0,0 +1,5719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/train.py b/examples/train.py index 5b5431a3d..e34b2935c 100644 --- a/examples/train.py +++ b/examples/train.py @@ -23,9 +23,7 @@ except ImportError: print("Stable-baselines not installed") -import ray from ray import tune -from ray.tune.registry import register_env from ray.rllib.env.group_agents_wrapper import _GroupAgentsWrapper try: from ray.rllib.agents.agent import get_agent_class @@ -126,6 +124,9 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ + from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv + from stable_baselines import PPO2 + if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -174,6 +175,13 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ + from ray import tune + from ray.tune.registry import register_env + try: + from ray.rllib.agents.agent import get_agent_class + except ImportError: + from ray.rllib.agents.registry import get_agent_class + horizon = flow_params['env'].horizon alg_run = flags.algorithm.upper() @@ -255,6 +263,9 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" + import ray + from ray.tune import run_experiments + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) @@ -402,6 +413,9 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" + from stable_baselines.common.vec_env import DummyVecEnv + from stable_baselines import PPO2 + flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] diff --git a/flow/controllers/__init__.py b/flow/controllers/__init__.py index 4dfcf05b7..a61d16980 100755 --- a/flow/controllers/__init__.py +++ b/flow/controllers/__init__.py @@ -28,7 +28,7 @@ # routing controllers from flow.controllers.base_routing_controller import BaseRouter from flow.controllers.routing_controllers import ContinuousRouter, \ - GridRouter, BayBridgeRouter + GridRouter, BayBridgeRouter, I210Router __all__ = [ "RLController", "BaseController", "BaseLaneChangeController", "BaseRouter", @@ -36,5 +36,6 @@ "IDMController", "SimCarFollowingController", "FollowerStopper", "PISaturation", "StaticLaneChanger", "SimLaneChangeController", "ContinuousRouter", "GridRouter", "BayBridgeRouter", "LACController", - "GippsController", "NonLocalFollowerStopper", "BandoFTLController" + "GippsController", "NonLocalFollowerStopper", "BandoFTLController", + "I210Router" ] diff --git a/flow/controllers/routing_controllers.py b/flow/controllers/routing_controllers.py index e6ccdde78..02aa34cb4 100755 --- a/flow/controllers/routing_controllers.py +++ b/flow/controllers/routing_controllers.py @@ -124,3 +124,29 @@ def choose_route(self, env): new_route = super().choose_route(env) return new_route + + +class I210Router(ContinuousRouter): + """Assists in choosing routes in select cases for the I-210 sub-network. + + Extension to the Continuous Router. + + Usage + ----- + See base class for usage example. + """ + + def choose_route(self, env): + """See parent class.""" + edge = env.k.vehicle.get_edge(self.veh_id) + lane = env.k.vehicle.get_lane(self.veh_id) + + # vehicles on these edges in lanes 4 and 5 are not going to be able to + # make it out in time + if edge == "119257908#1-AddedOffRampEdge" and lane in [5, 4, 3]: + new_route = env.available_routes[ + "119257908#1-AddedOffRampEdge"][0][0] + else: + new_route = super().choose_route(env) + + return new_route diff --git a/flow/networks/highway.py b/flow/networks/highway.py index 7e9c18ad5..6f10d3279 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -14,11 +14,13 @@ "speed_limit": 30, # number of edges to divide the highway into "num_edges": 1, - # whether to include a ghost edge of length 500m. This edge is provided a - # different speed limit. + # whether to include a ghost edge. This edge is provided a different speed + # limit. "use_ghost_edge": False, # speed limit for the ghost edge "ghost_speed_limit": 25, + # length of the cell imposing a boundary + "boundary_cell_length": 500 } @@ -34,9 +36,10 @@ class HighwayNetwork(Network): * **lanes** : number of lanes in the highway * **speed_limit** : max speed limit of the highway * **num_edges** : number of edges to divide the highway into - * **use_ghost_edge** : whether to include a ghost edge of length 500m. This - edge is provided a different speed limit. + * **use_ghost_edge** : whether to include a ghost edge. This edge is + provided a different speed limit. * **ghost_speed_limit** : speed limit for the ghost edge + * **boundary_cell_length** : length of the cell imposing a boundary Usage ----- @@ -80,6 +83,7 @@ def specify_nodes(self, net_params): length = net_params.additional_params["length"] num_edges = net_params.additional_params.get("num_edges", 1) segment_lengths = np.linspace(0, length, num_edges+1) + end_length = net_params.additional_params["boundary_cell_length"] nodes = [] for i in range(num_edges+1): @@ -92,7 +96,7 @@ def specify_nodes(self, net_params): if self.net_params.additional_params["use_ghost_edge"]: nodes += [{ "id": "edge_{}".format(num_edges + 1), - "x": length + self.end_length, + "x": length + end_length, "y": 0 }] @@ -103,6 +107,7 @@ def specify_edges(self, net_params): length = net_params.additional_params["length"] num_edges = net_params.additional_params.get("num_edges", 1) segment_length = length/float(num_edges) + end_length = net_params.additional_params["boundary_cell_length"] edges = [] for i in range(num_edges): @@ -120,7 +125,7 @@ def specify_edges(self, net_params): "type": "highway_end", "from": "edge_{}".format(num_edges), "to": "edge_{}".format(num_edges + 1), - "length": self.end_length + "length": end_length }] return edges diff --git a/flow/networks/i210_subnetwork.py b/flow/networks/i210_subnetwork.py index d8e05efb5..b86a0dc8a 100644 --- a/flow/networks/i210_subnetwork.py +++ b/flow/networks/i210_subnetwork.py @@ -1,9 +1,18 @@ """Contains the I-210 sub-network class.""" - from flow.networks.base import Network +from flow.core.params import InitialConfig +from flow.core.params import TrafficLightParams + +ADDITIONAL_NET_PARAMS = { + # whether to include vehicle on the on-ramp + "on_ramp": False, + # whether to include the downstream slow-down edge in the network + "ghost_edge": False, +} EDGES_DISTRIBUTION = [ # Main highway + "ghost0", "119257914", "119257908#0", "119257908#1-AddedOnRampEdge", @@ -25,6 +34,12 @@ class I210SubNetwork(Network): """A network used to simulate the I-210 sub-network. + Requires from net_params: + + * **on_ramp** : whether to include vehicle on the on-ramp + * **ghost_edge** : whether to include the downstream slow-down edge in the + network + Usage ----- >>> from flow.core.params import NetParams @@ -39,103 +54,145 @@ class I210SubNetwork(Network): >>> ) """ - def specify_routes(self, net_params): - """See parent class. + def __init__(self, + name, + vehicles, + net_params, + initial_config=InitialConfig(), + traffic_lights=TrafficLightParams()): + """Initialize the I210 sub-network scenario.""" + for p in ADDITIONAL_NET_PARAMS.keys(): + if p not in net_params.additional_params: + raise KeyError('Network parameter "{}" not supplied'.format(p)) + + super(I210SubNetwork, self).__init__( + name=name, + vehicles=vehicles, + net_params=net_params, + initial_config=initial_config, + traffic_lights=traffic_lights, + ) - Routes for vehicles moving through the bay bridge from Oakland to San - Francisco. - """ + def specify_routes(self, net_params): + """See parent class.""" rts = { - # Main highway "119257914": [ - (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) - # (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # 17 / 8378) - ], - # "119257908#0": [ - # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - # # "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1-AddedOnRampEdge": [ - # (["119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1-AddedOnRampEdge", "119257908#1", - # # "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1": [ - # (["119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#1-AddedOffRampEdge": [ - # (["119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1.0), - # # (["119257908#1-AddedOffRampEdge", "173381935"], - # # 0.5), - # ], - # "119257908#2": [ - # (["119257908#2", "119257908#3"], 1), - # ], - # "119257908#3": [ - # (["119257908#3"], 1), - # ], - # - # # On-ramp - # "27414345": [ - # (["27414345", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1 - 9 / 321), - # (["27414345", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 9 / 321), - # ], - # "27414342#0": [ - # (["27414342#0", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 1 - 20 / 421), - # (["27414342#0", "27414342#1-AddedOnRampEdge", - # "27414342#1", - # "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 20 / 421), - # ], - # "27414342#1-AddedOnRampEdge": [ - # (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - # "119257908#3"], - # 0.5), - # (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - # "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # 0.5), - # ], - # - # # Off-ramp - # "173381935": [ - # (["173381935"], 1), - # ], + (["119257914", + "119257908#0", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1.0), + ] } + if net_params.additional_params["ghost_edge"]: + rts.update({ + "ghost0": [ + (["ghost0", + "119257914", + "119257908#0", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1), + ], + }) + + if net_params.additional_params["on_ramp"]: + rts.update({ + # Main highway + "119257908#0": [ + (["119257908#0", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1.0), + ], + "119257908#1-AddedOnRampEdge": [ + (["119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1.0), + ], + "119257908#1": [ + (["119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1.0), + ], + "119257908#1-AddedOffRampEdge": [ + (["119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1.0), + ], + "119257908#2": [ + (["119257908#2", + "119257908#3"], 1), + ], + "119257908#3": [ + (["119257908#3"], 1), + ], + + # On-ramp + "27414345": [ + (["27414345", + "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1 - 9 / 321), + (["27414345", + "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "173381935"], 9 / 321), + ], + "27414342#0": [ + (["27414342#0", + "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 1 - 20 / 421), + (["27414342#0", + "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "173381935"], 20 / 421), + ], + "27414342#1-AddedOnRampEdge": [ + (["27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3"], 0.5), + (["27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "173381935"], 0.5), + ], + + # Off-ramp + "173381935": [ + (["173381935"], 1), + ], + }) + return rts diff --git a/flow/networks/ring.py b/flow/networks/ring.py index de4d17503..ceef22a78 100755 --- a/flow/networks/ring.py +++ b/flow/networks/ring.py @@ -37,7 +37,7 @@ class RingNetwork(Network): >>> from flow.core.params import NetParams >>> from flow.core.params import VehicleParams >>> from flow.core.params import InitialConfig - >>> from flow.scenarios import RingNetwork + >>> from flow.networks import RingNetwork >>> >>> network = RingNetwork( >>> name='ring_road', diff --git a/tests/fast_tests/test_scenarios.py b/tests/fast_tests/test_scenarios.py index d72a50b17..2263f3474 100644 --- a/tests/fast_tests/test_scenarios.py +++ b/tests/fast_tests/test_scenarios.py @@ -97,7 +97,8 @@ def test_additional_net_params(self): "speed_limit": 30, "num_edges": 1, "use_ghost_edge": False, - "ghost_speed_limit": 25 + "ghost_speed_limit": 25, + "boundary_cell_length": 300, } ) ) @@ -116,7 +117,8 @@ def test_ghost_edge(self): "speed_limit": 30, "num_edges": 1, "use_ghost_edge": False, - "ghost_speed_limit": 25 + "ghost_speed_limit": 25, + "boundary_cell_length": 300, }) ) env.reset() diff --git a/tests/fast_tests/test_vehicles.py b/tests/fast_tests/test_vehicles.py index b791bba64..a37b235ff 100644 --- a/tests/fast_tests/test_vehicles.py +++ b/tests/fast_tests/test_vehicles.py @@ -261,6 +261,7 @@ def test_no_junctions_highway(self): "num_edges": 1, "use_ghost_edge": False, "ghost_speed_limit": 25, + "boundary_cell_length": 300, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() diff --git a/tests/setup_scripts.py b/tests/setup_scripts.py index ac88d2e42..343bad906 100644 --- a/tests/setup_scripts.py +++ b/tests/setup_scripts.py @@ -346,6 +346,7 @@ def highway_exp_setup(sim_params=None, "num_edges": 1, "use_ghost_edge": False, "ghost_speed_limit": 25, + "boundary_cell_length": 300, } net_params = NetParams(additional_params=additional_net_params) From f43d0e43a0b4ef158a15c680d4e3131bd7dee0fb Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 13:41:17 -0700 Subject: [PATCH 075/335] fix some query string formatting issue --- examples/train.py | 2 -- flow/data_pipeline/query.py | 6 +++--- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/examples/train.py b/examples/train.py index 1ba9586af..e34b2935c 100644 --- a/examples/train.py +++ b/examples/train.py @@ -23,9 +23,7 @@ except ImportError: print("Stable-baselines not installed") -import ray from ray import tune -from ray.tune.registry import register_env from ray.rllib.env.group_agents_wrapper import _GroupAgentsWrapper try: from ray.rllib.agents.agent import get_agent_class diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 28564afde..2b5052f66 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -48,7 +48,7 @@ class QueryStrings(Enum): source_id FROM trajectory_table WHERE 1 = 1 - AND partition_name=\'{partition}\' + AND partition_name=\'{{partition}}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('regular_cte')) @@ -63,7 +63,7 @@ class QueryStrings(Enum): source_id FROM trajectory_table WHERE 1 = 1 - AND partition_name=\'{partition}\' + AND partition_name=\'{{partition}}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) @@ -81,7 +81,7 @@ class QueryStrings(Enum): OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed FROM trajectory_table WHERE 1 = 1 - AND partition_name=\'{partition}\' + AND partition_name=\'{{partition}}\' ), denoised_speed_cte AS ( SELECT id, From 78e47457c034dce1cfbe3d18695ba6ff2a466159 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 15:28:54 -0700 Subject: [PATCH 076/335] fix some style issue --- flow/data_pipeline/query.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 2b5052f66..96fc86497 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -22,6 +22,7 @@ ORDER BY id, "time" """ + class QueryStrings(Enum): """An enumeration of all the pre-defined query strings.""" From 437f8cf4103626bae222268b3e9380f397f26469 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 15:56:53 -0700 Subject: [PATCH 077/335] get up to date with i210_dev --- examples/exp_configs/non_rl/highway.py | 40 ++++++---- .../exp_configs/non_rl/i210_subnetwork.py | 2 +- .../exp_configs/templates/sumo/test2.net.xml | 78 +++++-------------- 3 files changed, 48 insertions(+), 72 deletions(-) diff --git a/examples/exp_configs/non_rl/highway.py b/examples/exp_configs/non_rl/highway.py index 1905e2f7f..e7505f2d7 100644 --- a/examples/exp_configs/non_rl/highway.py +++ b/examples/exp_configs/non_rl/highway.py @@ -5,19 +5,25 @@ from flow.core.params import VehicleParams, InFlows from flow.envs.ring.lane_change_accel import ADDITIONAL_ENV_PARAMS from flow.networks.highway import HighwayNetwork, ADDITIONAL_NET_PARAMS -from flow.envs import TestEnv +from flow.envs import LaneChangeAccelEnv vehicles = VehicleParams() vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, { - "a": 0.3, "b": 2.0, "noise": 0.5 - }), - ) + veh_id="human", + acceleration_controller=(IDMController, {}), + lane_change_params=SumoLaneChangeParams( + model="SL2015", + lc_sublane=2.0, + ), + num_vehicles=20) +vehicles.add( + veh_id="human2", + acceleration_controller=(IDMController, {}), + lane_change_params=SumoLaneChangeParams( + model="SL2015", + lc_sublane=2.0, + ), + num_vehicles=20) env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) @@ -25,7 +31,13 @@ inflow.add( veh_type="human", edge="highway_0", - vehs_per_hour=10800 / 5.0, + probability=0.25, + departLane="free", + departSpeed=20) +inflow.add( + veh_type="human2", + edge="highway_0", + probability=0.25, departLane="free", departSpeed=20) @@ -35,7 +47,7 @@ exp_tag='highway', # name of the flow environment the experiment is running on - env_name=TestEnv, + env_name=LaneChangeAccelEnv, # name of the network class the experiment is running on network=HighwayNetwork, @@ -46,12 +58,12 @@ # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( render=True, - sim_step=0.5 + lateral_resolution=1.0, ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=4000, + horizon=1500, additional_params=ADDITIONAL_ENV_PARAMS.copy(), ), diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 194da1099..25565bb49 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -117,7 +117,7 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=False, + color_by_speed=True, use_ballistic=True ), diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 16170b917..00e3edcd5 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,41 +1,5 @@ - - @@ -4716,24 +4680,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4837,10 +4801,10 @@ - + - - + + From e6db29b9d6013b541f9e066383f1aa7f3090f885 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:45:08 -0700 Subject: [PATCH 078/335] remove dupe imports --- examples/train.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/examples/train.py b/examples/train.py index e34b2935c..7cb84d361 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,8 +124,6 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ - from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv - from stable_baselines import PPO2 if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() @@ -175,12 +173,7 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ - from ray import tune from ray.tune.registry import register_env - try: - from ray.rllib.agents.agent import get_agent_class - except ImportError: - from ray.rllib.agents.registry import get_agent_class horizon = flow_params['env'].horizon @@ -263,8 +256,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - import ray - from ray.tune import run_experiments flow_params = submodule.flow_params flow_params['sim'].render = flags.render @@ -413,8 +404,6 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" - from stable_baselines.common.vec_env import DummyVecEnv - from stable_baselines import PPO2 flow_params = submodule.flow_params # Path to the saved files From 6b5111b93db5760235d05f26e6ef163591b36497 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:51:14 -0700 Subject: [PATCH 079/335] remove blank lines after docstrings --- examples/train.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/train.py b/examples/train.py index 7cb84d361..5a9ab5903 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,7 +124,6 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ - if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -256,7 +255,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) @@ -404,7 +402,6 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" - flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] From 4d206b374e843ee611f46d5519de62119d8fb1b2 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:59:00 -0700 Subject: [PATCH 080/335] add back ray import --- examples/train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/train.py b/examples/train.py index 5a9ab5903..50720b756 100644 --- a/examples/train.py +++ b/examples/train.py @@ -255,6 +255,8 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" + import ray + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) From 18a88bcaef5b677a1f81e65beb9dbab6d3a17f29 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 21:04:56 -0700 Subject: [PATCH 081/335] remove whitespace --- examples/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/train.py b/examples/train.py index 50720b756..1689d846f 100644 --- a/examples/train.py +++ b/examples/train.py @@ -256,7 +256,7 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" import ray - + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) From 7306298d97fbaa1fd03fb9f4a4ea816631b300b5 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 21:18:43 -0700 Subject: [PATCH 082/335] update lambda function, change partition into multi-column --- flow/core/experiment.py | 8 +-- flow/data_pipeline/data_pipeline.py | 84 ++++++++------------------- flow/data_pipeline/lambda_function.py | 26 +++------ flow/data_pipeline/query.py | 29 ++++----- flow/data_pipeline/run_query.py | 6 +- flow/visualize/i210_replay.py | 7 ++- 6 files changed, 58 insertions(+), 102 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 6e9be9aea..f46f802a5 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -217,10 +217,10 @@ def rl_actions(*_): if partition_name: if partition_name == "default": - partition_name = source_id[0:3] - partition_name = date.today().isoformat() + " " + partition_name - upload_to_s3('circles.data.pipeline', 'trajectory-output/partition_name={}/{}.csv'.format( - partition_name, upload_file_path.split('/')[-1].split('_')[0]), + partition_name = source_id[-3:] + cur_date = date.today().isoformat() + upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( + cur_date, partition_name, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index fbd975c5e..111c41994 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -2,8 +2,9 @@ import pandas as pd import numpy as np import boto3 -from flow.data_pipeline.query import QueryStrings, testing_functions +from flow.data_pipeline.query import QueryStrings from time import time +from datetime import date def generate_trajectory_table(data_path, extra_info, partition_name): @@ -90,7 +91,7 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): def extra_init(): """Return the dictionary with all the feild pre-populated with empty list.""" - extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], + extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "road_grade": [], "source_id": []} return extra_info @@ -99,7 +100,7 @@ def extra_init(): def get_extra_info(veh_kernel, extra_info, veh_ids): """Get all the necessary information for the trajectory output from flow.""" for vid in veh_ids: - extra_info["time"].append(veh_kernel.get_timestep(vid) / 1000) + extra_info["time_step"].append(veh_kernel.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(veh_kernel.get_headway(vid)) extra_info["acceleration"].append(veh_kernel.get_accel(vid)) @@ -154,7 +155,7 @@ def get_existing_partitions(self): response = self.client.start_query_execution( QueryString='SHOW PARTITIONS trajectory_table', QueryExecutionContext={ - 'Database': 'simulation' + 'Database': 'circles' }, WorkGroup='primary' ) @@ -164,7 +165,7 @@ def get_existing_partitions(self): QueryExecutionId=response['QueryExecutionId'], MaxResults=1000 ) - return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] + return [data['Data'][0]['VarCharValue'] for data in response['ResultSet']['Rows']] def check_status(self, execution_id): """Return the status of the execution with given id. @@ -207,27 +208,30 @@ def wait_for_execution(self, execution_id): return False return True - def update_partition(self, partition): + def update_partition(self, query_date, partition): """Load the given partition to the trajectory_table on Athena. Parameters ---------- + query_date : str + the new partition date that needs to be loaded partition : str the new partition that needs to be loaded """ response = self.client.start_query_execution( - QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(date=query_date, partition=partition), QueryExecutionContext={ - 'Database': 'simulation' + 'Database': 'circles' }, WorkGroup='primary' ) if self.wait_for_execution(response['QueryExecutionId']): raise RuntimeError("update partition timed out") - self.existing_partitions.append(partition) + self.existing_partitions.append("date={}/partition_name={}".format(query_date, partition)) return - def run_query(self, query_name, result_location="s3://circles.data.pipeline/query-result/", partition="default"): + def run_query(self, query_name, result_location="s3://circles.data.pipeline/result/", + query_date="today", partition="default"): """Start the execution of a query, does not wait for it to finish. Parameters @@ -236,6 +240,8 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/quer name of the query in QueryStrings enum that will be run result_location: str, optional location on the S3 bucket where the result will be stored + query_date : str + name of the partition date to run this query on partition: str, optional name of the partition to run this query on Returns @@ -249,13 +255,16 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/quer if query_name not in QueryStrings.__members__: raise ValueError("query not existed: please add it to query.py") - if partition not in self.existing_partitions: - self.update_partition(partition) + if query_date == "today": + query_date = date.today().isoformat() + + if "date={}/partition_name={}".format(query_date, partition) not in self.existing_partitions: + self.update_partition(query_date, partition) response = self.client.start_query_execution( - QueryString=QueryStrings[query_name].value.format(partition=partition), + QueryString=QueryStrings[query_name].value.format(date=query_date, partition=partition), QueryExecutionContext={ - 'Database': 'simulation' + 'Database': 'circles' }, ResultConfiguration={ 'OutputLocation': result_location, @@ -263,50 +272,3 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/quer WorkGroup='primary' ) return response['QueryExecutionId'] - -########################################################################### -# Helpers for testing the SQL Queries # -########################################################################### - - -def test_sql_query(query_name): - """Start the execution of a query, does not wait for it to finish. - - Parameters - ---------- - query_name : str - name of the query in QueryStrings enum that will be tested - Raises - ------ - RuntimeError: if timeout - """ - if query_name not in testing_functions: - raise ValueError("no tests supported for this query") - - # Run the respective sql query - queryEngine = AthenaQuery() - execution_id = queryEngine.run_query(query_name, result_location="s3://circles.data.pipeline/" - "query-result/query-test", partition="test") - if queryEngine.wait_for_execution(execution_id): - raise RuntimeError("execution timed out") - - # get the Athena query result from S3 - s3 = boto3.resource("s3") - s3.Bucket("circles.data.pipeline").download_file("query-result/query-test/"+execution_id+".csv", - "data/athena_result.csv") - athena_result = pd.read_csv("data/athena_result.csv") - athena_result = athena_result.sort_values(by=["time", "id"]) - - # get the python expected result - expected_result = pd.read_csv("data/test_data.csv") - expected_result = expected_result.apply(testing_functions[query_name], axis=1, result_type="expand") - expected_result.columns = ["time", "id", "power"] - expected_result = expected_result.sort_values(by=["time", "id"]) - - difference = athena_result["power"] - expected_result["power"] - print("average difference is: " + str(np.mean(difference))) - print("std of difference is: " + str(np.std(difference))) - print("average ratio of difference to expected is: " + - str(np.mean(np.divide(difference, expected_result["power"])))) - difference = pd.DataFrame(difference) - difference.to_csv("./difference.csv") diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index fd50ba8f5..35dcbfba8 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -1,36 +1,28 @@ """lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus -from examples.data_pipeline import AthenaQuery -from examples.query import tags +from flow.data_pipeline.data_pipeline import AthenaQuery +from flow.data_pipeline.query import tags s3 = boto3.client('s3') queryEngine = AthenaQuery() def lambda_handler(event, context): - """Invoke by AWS Lambda upon triggered by an event. - - Parameters - ---------- - event : dic < str: dic > - an S3 event - context: - not used - """ for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) + query_date = key.split('/')[-3].split('=')[-1] partition = key.split('/')[-2].split('=')[-1] response = s3.head_object(Bucket=bucket, Key=key) - run_query = response["Metadata"]["run-query"] + required_query = response["Metadata"]["run-query"] if bucket == 'circles.data.pipeline' and 'trajectory-output/' in key: - if run_query == "all": - query_list = tags["analysis"] - elif not run_query: + if required_query == "all": + query_list = tags["energy"] + elif not required_query: break else: - query_list = run_query.split("\', \'") + query_list = required_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://circles.data.pipeline/query-result/auto/', partition) + queryEngine.run_query(query_name, 's3://circles.data.pipeline/result/auto/', query_date, partition) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 96fc86497..2e137946d 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,13 +2,12 @@ from enum import Enum # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], - "analysis": ["POWER_DEMAND_MODEL"]} +tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT id, - "time", + time_step, speed, acceleration, road_grade, @@ -19,7 +18,7 @@ 'POWER_DEMAND_MODEL' AS energy_model_id, source_id FROM {} - ORDER BY id, "time" + ORDER BY id, time_step """ @@ -29,26 +28,28 @@ class QueryStrings(Enum): SAMPLE = """ SELECT * FROM trajectory_table - WHERE partition_name=\'{partition}\' + WHERE date = \'{date}\' + AND partition_name=\'{partition}\' LIMIT 15; """ UPDATE_PARTITION = """ ALTER TABLE trajectory_table - ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\'); + ADD IF NOT EXISTS PARTITION (date = \'{date}\', partition_name=\'{partition}\'); """ POWER_DEMAND_MODEL = """ WITH regular_cte AS ( SELECT id, - "time", + time_step, speed, acceleration, road_grade, source_id FROM trajectory_table WHERE 1 = 1 + AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('regular_cte')) @@ -57,13 +58,14 @@ class QueryStrings(Enum): WITH denoised_accel_cte AS ( SELECT id, - "time", + time_step, speed, accel_without_noise AS acceleration, road_grade, source_id FROM trajectory_table WHERE 1 = 1 + AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) @@ -72,21 +74,22 @@ class QueryStrings(Enum): WITH lagged_timestep AS ( SELECT id, - "time", + time_step, accel_without_noise, road_grade, source_id, - "time" - LAG("time", 1) - OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + time_step - LAG(time_step, 1) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) - OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed FROM trajectory_table WHERE 1 = 1 + AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ), denoised_speed_cte AS ( SELECT id, - "time", + time_step, prev_speed + accel_without_noise * sim_step AS speed, accel_without_noise AS acceleration, road_grade, diff --git a/flow/data_pipeline/run_query.py b/flow/data_pipeline/run_query.py index ac927c749..1eb802205 100644 --- a/flow/data_pipeline/run_query.py +++ b/flow/data_pipeline/run_query.py @@ -1,6 +1,6 @@ """runner script for invoking query manually.""" import argparse -from flow.data_pipeline.data_pipeline import AthenaQuery, test_sql_query +from flow.data_pipeline.data_pipeline import AthenaQuery from flow.data_pipeline.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" @@ -21,7 +21,7 @@ if args.run: execution_ids = [] for query_name in args.run: - execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) + execution_ids.append(queryEngine.run_query(query_name, args.result_location, partition=args.partition)) print(execution_ids) if args.list_partitions: print(queryEngine.existing_partitions) @@ -33,5 +33,3 @@ if args.list_queries: for q in QueryStrings: print(q) - if args.test_query: - test_sql_query(args.test_query[0]) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 5fd3142ad..8e62bb0d8 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -334,9 +334,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # upload to s3 if asked if args.use_s3: - partition_name = date.today().isoformat() + " " + source_id[0:3] - upload_to_s3('circles.data.pipeline', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + partition_name = source_id[-3:] + cur_date = date.today().isoformat() + upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( + cur_date, partition_name, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(args.only_query)[2:-2]) # print the location of the emission csv file From efa60f79f096f44f5b12fcbf70fa57c344d0a587 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 21:32:39 -0700 Subject: [PATCH 083/335] style fix --- .../non_rl/Highway_Downstream_Congestion.py | 112 ++++++++++++ flow/data_pipeline/lambda_function.py | 1 + flow/networks/SpeedChange.py | 173 ++++++++++++++++++ 3 files changed, 286 insertions(+) create mode 100644 examples/exp_configs/non_rl/Highway_Downstream_Congestion.py create mode 100644 flow/networks/SpeedChange.py diff --git a/examples/exp_configs/non_rl/Highway_Downstream_Congestion.py b/examples/exp_configs/non_rl/Highway_Downstream_Congestion.py new file mode 100644 index 000000000..ddf3542f1 --- /dev/null +++ b/examples/exp_configs/non_rl/Highway_Downstream_Congestion.py @@ -0,0 +1,112 @@ +"""Example of an open multi-lane network with human-driven vehicles.""" + +from flow.controllers import IDMController,LinearOVM,BandoFTLController +from flow.core.params import SumoParams, EnvParams, NetParams, InitialConfig, SumoLaneChangeParams +from flow.core.params import VehicleParams, InFlows +from flow.envs.ring.lane_change_accel import ADDITIONAL_ENV_PARAMS +from flow.networks.highway import HighwayNetwork, ADDITIONAL_NET_PARAMS +from flow.networks.SpeedChange import HighwayNetwork_Modified, ADDITIONAL_NET_PARAMS +from flow.envs import LaneChangeAccelEnv + +# accel_data = (BandoFTL_Controller,{'alpha':.5,'beta':20.0,'h_st':12.0,'h_go':50.0,'v_max':30.0,'noise':0.0}) +# traffic_speed = 28.6 +# traffic_flow = 2172 + +accel_data = (IDMController,{'a':1.3,'b':2.0,'noise':0.3}) +traffic_speed = 24.1 +traffic_flow = 2215 + + + +vehicles = VehicleParams() +vehicles.add( + veh_id="human", + acceleration_controller=accel_data, + lane_change_params=SumoLaneChangeParams( + model="SL2015", + lc_sublane=2.0, + ), +) + +# Does this break the sim? +# vehicles.add( +# veh_id="human2", +# acceleration_controller=(LinearOVM,{'v_max':traffic_speed}), +# lane_change_params=SumoLaneChangeParams( +# model="SL2015", +# lc_sublane=2.0, +# ), +# num_vehicles=1) + +env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) + +inflow = InFlows() +inflow.add( + veh_type="human", + edge="highway_0", + vehs_per_hour=traffic_flow, + departLane="free", + departSpeed=traffic_speed) + +# inflow.add( +# veh_type="human2", +# edge="highway_0", +# probability=0.25, +# departLane="free", +# departSpeed=20) + + +additional_net_params = ADDITIONAL_NET_PARAMS.copy() +additional_net_params['lanes'] = 1 +additional_net_params['length'] = 1000 +additional_net_params['end_speed_limit'] = 6.0 +additional_net_params['boundary_cell_length'] = 300 + + + + +flow_params = dict( + # name of the experiment + exp_tag='highway', + + # name of the flow environment the experiment is running on + env_name=LaneChangeAccelEnv, + + # name of the network class the experiment is running on + network=HighwayNetwork_Modified, + + # simulator that is used by the experiment + simulator='traci', + + # sumo-related parameters (see flow.core.params.SumoParams) + sim=SumoParams( + sim_step=0.4, + render=False, + color_by_speed=True, + use_ballistic=True + ), + + # environment related parameters (see flow.core.params.EnvParams) + env=EnvParams( + horizon=3000, + additional_params=ADDITIONAL_ENV_PARAMS.copy(), + ), + + # network-related parameters (see flow.core.params.NetParams and the + # network's documentation or ADDITIONAL_NET_PARAMS component) + net=NetParams( + inflows=inflow, + additional_params=additional_net_params, + ), + + # vehicles to be placed in the network at the start of a rollout (see + # flow.core.params.VehicleParams) + veh=vehicles, + + # parameters specifying the positioning of vehicles upon initialization/ + # reset (see flow.core.params.InitialConfig) + initial=InitialConfig( + spacing="uniform", + shuffle=True, + ), +) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 35dcbfba8..fe8efe3c0 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -9,6 +9,7 @@ def lambda_handler(event, context): + """Run on AWS Lambda to start query automatically.""" for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) diff --git a/flow/networks/SpeedChange.py b/flow/networks/SpeedChange.py new file mode 100644 index 000000000..c24fb767b --- /dev/null +++ b/flow/networks/SpeedChange.py @@ -0,0 +1,173 @@ +"""Contains the highway network class.""" + +from flow.networks.base import Network +from flow.core.params import InitialConfig +from flow.core.params import TrafficLightParams +import numpy as np + +ADDITIONAL_NET_PARAMS = { + # length of the highway + "length": 1000, + # number of lanes + "lanes": 4, + # speed limit for all edges + "speed_limit": 30, + # end speed limit + "end_speed_limit": 25, + # number of edges to divide the highway into + "num_edges": 1, + # Length of the cell imposing a boundary + "boundary_cell_length": 500 +} + + +class HighwayNetwork_Modified(Network): + """Highway network class. + + This network consists of `num_edges` different straight highway sections + with a total characteristic length and number of lanes. + + Requires from net_params: + + * **length** : length of the highway + * **lanes** : number of lanes in the highway + * **speed_limit** : max speed limit of the highway + * **num_edges** : number of edges to divide the highway into + + Usage + ----- + >>> from flow.core.params import NetParams + >>> from flow.core.params import VehicleParams + >>> from flow.core.params import InitialConfig + >>> from flow.networks import HighwayNetwork + >>> + >>> network = HighwayNetwork( + >>> name='highway', + >>> vehicles=VehicleParams(), + >>> net_params=NetParams( + >>> additional_params={ + >>> 'length': 230, + >>> 'lanes': 1, + >>> 'speed_limit': 30, + >>> 'num_edges': 1 + >>> }, + >>> ) + >>> ) + """ + + def __init__(self, + name, + vehicles, + net_params, + initial_config=InitialConfig(), + traffic_lights=TrafficLightParams()): + """Initialize a highway network.""" + for p in ADDITIONAL_NET_PARAMS.keys(): + if p not in net_params.additional_params: + raise KeyError('Network parameter "{}" not supplied'.format(p)) + + self.length = net_params.additional_params["length"] + self.lanes = net_params.additional_params["lanes"] + self.num_edges = net_params.additional_params.get("num_edges", 1) + self.end_length = net_params.additional_params["boundary_cell_length"] + + super().__init__(name, vehicles, net_params, initial_config, + traffic_lights) + + def specify_nodes(self, net_params): + """See parent class.""" + length = net_params.additional_params["length"] + num_edges = net_params.additional_params.get("num_edges", 1) + segment_lengths = np.linspace(0, length, num_edges+1) + + nodes = [] + for i in range(num_edges+1): + nodes += [{ + "id": "edge_{}".format(i), + "x": segment_lengths[i], + "y": 0 + }] + + nodes +=[{"id": "edge_{}".format(num_edges+1), + "x": length+self.end_length, + "y": 0}] + + return nodes + + def specify_edges(self, net_params): + """See parent class.""" + length = net_params.additional_params["length"] + num_edges = net_params.additional_params.get("num_edges", 1) + segment_length = length/float(num_edges) + + edges = [] + for i in range(num_edges): + edges += [{ + "id": "highway_{}".format(i), + "type": "highwayType", + "from": "edge_{}".format(i), + "to": "edge_{}".format(i+1), + "length": segment_length + }] + + edges += [{ + "id": "highway_end", + "type": "highway_end", + "from": "edge_{}".format(num_edges), + "to": "edge_{}".format(num_edges+1), + "length": self.end_length + }] + + return edges + + def specify_types(self, net_params): + """See parent class.""" + lanes = net_params.additional_params["lanes"] + speed_limit = net_params.additional_params["speed_limit"] + end_speed_limt = net_params.additional_params["end_speed_limit"] + + types = [{ + "id": "highwayType", + "numLanes": lanes, + "speed": speed_limit + }] + + types +=[{ + "id":"highway_end", + "numLanes":lanes, + "speed":end_speed_limt}] + + return types + + def specify_routes(self, net_params): + """See parent class.""" + num_edges = net_params.additional_params.get("num_edges", 1) + rts = {} + for i in range(num_edges): + rts["highway_{}".format(i)] = ["highway_{}".format(j) for + j in range(i, num_edges)] + rts["highway_{}".format(i)].append("highway_end") + + return rts + + def specify_edge_starts(self): + """See parent class.""" + num_edges = self.num_edges + edgestarts = [("highway_{}".format(i), 0) + for i in range(num_edges)] + + + # Adding this line fixes the problem that the simulation breaks when it reaches + # the slow down segment, but then the simulation doesn't advance past the first step. + + edgestarts += [("highway_end",self.length)] + return edgestarts + + @staticmethod + def gen_custom_start_pos(cls, net_params, initial_config, num_vehicles): + """Generate a user defined set of starting positions. + + This method is just used for testing. + """ + return initial_config.additional_params["start_positions"], \ + initial_config.additional_params["start_lanes"] From d4923f6a76f3d3f8756c41021c24d50e72bf3094 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 21:37:12 -0700 Subject: [PATCH 084/335] remove the IDM config file from another campus --- .../non_rl/Highway_Downstream_Congestion.py | 112 ------------ flow/networks/SpeedChange.py | 173 ------------------ 2 files changed, 285 deletions(-) delete mode 100644 examples/exp_configs/non_rl/Highway_Downstream_Congestion.py delete mode 100644 flow/networks/SpeedChange.py diff --git a/examples/exp_configs/non_rl/Highway_Downstream_Congestion.py b/examples/exp_configs/non_rl/Highway_Downstream_Congestion.py deleted file mode 100644 index ddf3542f1..000000000 --- a/examples/exp_configs/non_rl/Highway_Downstream_Congestion.py +++ /dev/null @@ -1,112 +0,0 @@ -"""Example of an open multi-lane network with human-driven vehicles.""" - -from flow.controllers import IDMController,LinearOVM,BandoFTLController -from flow.core.params import SumoParams, EnvParams, NetParams, InitialConfig, SumoLaneChangeParams -from flow.core.params import VehicleParams, InFlows -from flow.envs.ring.lane_change_accel import ADDITIONAL_ENV_PARAMS -from flow.networks.highway import HighwayNetwork, ADDITIONAL_NET_PARAMS -from flow.networks.SpeedChange import HighwayNetwork_Modified, ADDITIONAL_NET_PARAMS -from flow.envs import LaneChangeAccelEnv - -# accel_data = (BandoFTL_Controller,{'alpha':.5,'beta':20.0,'h_st':12.0,'h_go':50.0,'v_max':30.0,'noise':0.0}) -# traffic_speed = 28.6 -# traffic_flow = 2172 - -accel_data = (IDMController,{'a':1.3,'b':2.0,'noise':0.3}) -traffic_speed = 24.1 -traffic_flow = 2215 - - - -vehicles = VehicleParams() -vehicles.add( - veh_id="human", - acceleration_controller=accel_data, - lane_change_params=SumoLaneChangeParams( - model="SL2015", - lc_sublane=2.0, - ), -) - -# Does this break the sim? -# vehicles.add( -# veh_id="human2", -# acceleration_controller=(LinearOVM,{'v_max':traffic_speed}), -# lane_change_params=SumoLaneChangeParams( -# model="SL2015", -# lc_sublane=2.0, -# ), -# num_vehicles=1) - -env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) - -inflow = InFlows() -inflow.add( - veh_type="human", - edge="highway_0", - vehs_per_hour=traffic_flow, - departLane="free", - departSpeed=traffic_speed) - -# inflow.add( -# veh_type="human2", -# edge="highway_0", -# probability=0.25, -# departLane="free", -# departSpeed=20) - - -additional_net_params = ADDITIONAL_NET_PARAMS.copy() -additional_net_params['lanes'] = 1 -additional_net_params['length'] = 1000 -additional_net_params['end_speed_limit'] = 6.0 -additional_net_params['boundary_cell_length'] = 300 - - - - -flow_params = dict( - # name of the experiment - exp_tag='highway', - - # name of the flow environment the experiment is running on - env_name=LaneChangeAccelEnv, - - # name of the network class the experiment is running on - network=HighwayNetwork_Modified, - - # simulator that is used by the experiment - simulator='traci', - - # sumo-related parameters (see flow.core.params.SumoParams) - sim=SumoParams( - sim_step=0.4, - render=False, - color_by_speed=True, - use_ballistic=True - ), - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=3000, - additional_params=ADDITIONAL_ENV_PARAMS.copy(), - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - inflows=inflow, - additional_params=additional_net_params, - ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig( - spacing="uniform", - shuffle=True, - ), -) diff --git a/flow/networks/SpeedChange.py b/flow/networks/SpeedChange.py deleted file mode 100644 index c24fb767b..000000000 --- a/flow/networks/SpeedChange.py +++ /dev/null @@ -1,173 +0,0 @@ -"""Contains the highway network class.""" - -from flow.networks.base import Network -from flow.core.params import InitialConfig -from flow.core.params import TrafficLightParams -import numpy as np - -ADDITIONAL_NET_PARAMS = { - # length of the highway - "length": 1000, - # number of lanes - "lanes": 4, - # speed limit for all edges - "speed_limit": 30, - # end speed limit - "end_speed_limit": 25, - # number of edges to divide the highway into - "num_edges": 1, - # Length of the cell imposing a boundary - "boundary_cell_length": 500 -} - - -class HighwayNetwork_Modified(Network): - """Highway network class. - - This network consists of `num_edges` different straight highway sections - with a total characteristic length and number of lanes. - - Requires from net_params: - - * **length** : length of the highway - * **lanes** : number of lanes in the highway - * **speed_limit** : max speed limit of the highway - * **num_edges** : number of edges to divide the highway into - - Usage - ----- - >>> from flow.core.params import NetParams - >>> from flow.core.params import VehicleParams - >>> from flow.core.params import InitialConfig - >>> from flow.networks import HighwayNetwork - >>> - >>> network = HighwayNetwork( - >>> name='highway', - >>> vehicles=VehicleParams(), - >>> net_params=NetParams( - >>> additional_params={ - >>> 'length': 230, - >>> 'lanes': 1, - >>> 'speed_limit': 30, - >>> 'num_edges': 1 - >>> }, - >>> ) - >>> ) - """ - - def __init__(self, - name, - vehicles, - net_params, - initial_config=InitialConfig(), - traffic_lights=TrafficLightParams()): - """Initialize a highway network.""" - for p in ADDITIONAL_NET_PARAMS.keys(): - if p not in net_params.additional_params: - raise KeyError('Network parameter "{}" not supplied'.format(p)) - - self.length = net_params.additional_params["length"] - self.lanes = net_params.additional_params["lanes"] - self.num_edges = net_params.additional_params.get("num_edges", 1) - self.end_length = net_params.additional_params["boundary_cell_length"] - - super().__init__(name, vehicles, net_params, initial_config, - traffic_lights) - - def specify_nodes(self, net_params): - """See parent class.""" - length = net_params.additional_params["length"] - num_edges = net_params.additional_params.get("num_edges", 1) - segment_lengths = np.linspace(0, length, num_edges+1) - - nodes = [] - for i in range(num_edges+1): - nodes += [{ - "id": "edge_{}".format(i), - "x": segment_lengths[i], - "y": 0 - }] - - nodes +=[{"id": "edge_{}".format(num_edges+1), - "x": length+self.end_length, - "y": 0}] - - return nodes - - def specify_edges(self, net_params): - """See parent class.""" - length = net_params.additional_params["length"] - num_edges = net_params.additional_params.get("num_edges", 1) - segment_length = length/float(num_edges) - - edges = [] - for i in range(num_edges): - edges += [{ - "id": "highway_{}".format(i), - "type": "highwayType", - "from": "edge_{}".format(i), - "to": "edge_{}".format(i+1), - "length": segment_length - }] - - edges += [{ - "id": "highway_end", - "type": "highway_end", - "from": "edge_{}".format(num_edges), - "to": "edge_{}".format(num_edges+1), - "length": self.end_length - }] - - return edges - - def specify_types(self, net_params): - """See parent class.""" - lanes = net_params.additional_params["lanes"] - speed_limit = net_params.additional_params["speed_limit"] - end_speed_limt = net_params.additional_params["end_speed_limit"] - - types = [{ - "id": "highwayType", - "numLanes": lanes, - "speed": speed_limit - }] - - types +=[{ - "id":"highway_end", - "numLanes":lanes, - "speed":end_speed_limt}] - - return types - - def specify_routes(self, net_params): - """See parent class.""" - num_edges = net_params.additional_params.get("num_edges", 1) - rts = {} - for i in range(num_edges): - rts["highway_{}".format(i)] = ["highway_{}".format(j) for - j in range(i, num_edges)] - rts["highway_{}".format(i)].append("highway_end") - - return rts - - def specify_edge_starts(self): - """See parent class.""" - num_edges = self.num_edges - edgestarts = [("highway_{}".format(i), 0) - for i in range(num_edges)] - - - # Adding this line fixes the problem that the simulation breaks when it reaches - # the slow down segment, but then the simulation doesn't advance past the first step. - - edgestarts += [("highway_end",self.length)] - return edgestarts - - @staticmethod - def gen_custom_start_pos(cls, net_params, initial_config, num_vehicles): - """Generate a user defined set of starting positions. - - This method is just used for testing. - """ - return initial_config.additional_params["start_positions"], \ - initial_config.additional_params["start_lanes"] From a60b023d233335c2a0f4776f404d5a79f47e9b02 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 21:47:19 -0700 Subject: [PATCH 085/335] style fixed --- flow/data_pipeline/data_pipeline.py | 1 - flow/data_pipeline/query.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 111c41994..a999b6eb1 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -1,6 +1,5 @@ """contains class and helper functions for the data pipeline.""" import pandas as pd -import numpy as np import boto3 from flow.data_pipeline.query import QueryStrings from time import time diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 2e137946d..e8c341444 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -79,9 +79,9 @@ class QueryStrings(Enum): road_grade, source_id, time_step - LAG(time_step, 1) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed FROM trajectory_table WHERE 1 = 1 AND date = \'{{date}}\' From 0ea7ffc571c441e0d6b4c8c42d0edc4df7186fc5 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 11:49:38 -0700 Subject: [PATCH 086/335] specify power demand model names --- flow/data_pipeline/query.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index e8c341444..2ee794507 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -15,7 +15,7 @@ (CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 + 9.81 * SIN(road_grade) ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL' AS energy_model_id, + \'{}\' AS energy_model_id, source_id FROM {} ORDER BY id, time_step @@ -52,7 +52,7 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('regular_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -68,7 +68,7 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', 'denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -96,4 +96,4 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) From da243f946c109259e4c75943bd24515dd4d9e516 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:31:20 -0700 Subject: [PATCH 087/335] Add several accelerations (with/without noise, with/without failsafes) to custom output --- flow/controllers/base_controller.py | 18 +++++++++------ flow/core/kernel/vehicle/base.py | 20 ++++++++++++++-- flow/core/kernel/vehicle/traci.py | 36 +++++++++++++++++++++++------ flow/data_pipeline/data_pipeline.py | 19 ++++++++++----- 4 files changed, 71 insertions(+), 22 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 7adcdf310..c417bb73a 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -88,8 +88,10 @@ def get_action(self, env): float the modified form of the acceleration """ - # clear the current stored accel_without_noise of this vehicle None - env.k.vehicle.update_accel_without_noise(self.veh_id, None) + # clear the current stored accel_no_noise_no_failsafe of this vehicle None + env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, None) + env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, None) + env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, None) # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed @@ -110,23 +112,25 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested - accel_without_noise = accel + env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, accel) if self.fail_safe == 'instantaneous': - accel_without_noise = self.get_safe_action_instantaneous(env, accel_without_noise) + accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': - accel_without_noise = self.get_safe_velocity_action(env, accel_without_noise) - env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noise) + accel_no_noise_with_failsafe = self.get_safe_velocity_action(env, accel) + env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, accel_no_noise_with_failsafe) # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) + env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, accel) # run the fail-safes, if requested if self.fail_safe == 'instantaneous': accel = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': accel = self.get_safe_velocity_action(env, accel) - + env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, accel) + return accel def get_safe_action_instantaneous(self, env, action): diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 647ef37fe..ed53773cb 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -693,7 +693,15 @@ def get_accel(self, veh_id): """Return the acceleration of vehicle with veh_id.""" raise NotImplementedError - def update_accel_without_noise(self, veh_id, accel_without_noise): + def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): + """Update stored acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): + """Update stored acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError @@ -701,7 +709,15 @@ def get_2d_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError - def get_accel_without_noise(self, veh_id): + def get_accel_no_noise_no_failsafe(self, veh_id): + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_accel_no_noise_with_failsafe(self, veh_id): + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_accel_with_noise_no_failsafe(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 2a4e06257..4a1916617 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -113,7 +113,9 @@ def initialize(self, vehicles): self.__vehicles[veh_id] = dict() self.__vehicles[veh_id]['type'] = typ['veh_id'] self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed'] - self.__vehicles[veh_id]["accel_without_noise"] = None + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = None + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = None + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None self.num_vehicles += 1 if typ['acceleration_controller'][0] == RLController: self.num_rl_vehicles += 1 @@ -1130,15 +1132,35 @@ def get_accel(self, veh_id): self.__vehicles[veh_id]["accel"] = None return self.__vehicles[veh_id]["accel"] - def update_accel_without_noise(self, veh_id, accel_without_noise): + def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): """See parent class.""" - self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = accel_no_noise_no_failsafe - def get_accel_without_noise(self, veh_id): + def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): """See parent class.""" - if "accel_without_noise" not in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["accel_without_noise"] = None - return self.__vehicles[veh_id]["accel_without_noise"] + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = accel_no_noise_with_failsafe + + def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): + """See parent class.""" + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = accel_with_noise_no_failsafe + + def get_accel_no_noise_no_failsafe(self, veh_id): + """See parent class.""" + if "accel_no_noise_no_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = None + return self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] + + def get_accel_no_noise_with_failsafe(self, veh_id): + """See parent class.""" + if "accel_no_noise_with_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = None + return self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] + + def get_accel_with_noise_no_failsafe(self, veh_id): + """See parent class.""" + if "accel_with_noise_no_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None + return self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] def get_realized_accel(self, veh_id): """See parent class.""" diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index a999b6eb1..11d85cb0d 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -89,9 +89,11 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): def extra_init(): - """Return the dictionary with all the feild pre-populated with empty list.""" - extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], - "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], + """Return the dictionary with all the field pre-populated with empty list.""" + extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], + "target_accel_with_noise_with_failsafe": [], "target_accel_no_noise_no_failsafe": [], + "target_accel_with_noise_no_failsafe": [], "target_accel_no_noise_with_failsafe": [], + "realized_accel": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "road_grade": [], "source_id": []} return extra_info @@ -102,13 +104,18 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): extra_info["time_step"].append(veh_kernel.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(veh_kernel.get_headway(vid)) - extra_info["acceleration"].append(veh_kernel.get_accel(vid)) + extra_info["target_accel_with_noise_with_failsafe"].append(veh_kernel.get_accel(vid)) extra_info["leader_id"].append(veh_kernel.get_leader(vid)) extra_info["follower_id"].append(veh_kernel.get_follower(vid)) extra_info["leader_rel_speed"].append(veh_kernel.get_speed( veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) - extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) - extra_info["realilzed_accel"].append(veh_kernel.get_realized_accel(vid)) + extra_info["target_accel_no_noise_no_failsafe"].append( + veh_kernel.get_accel_no_noise_no_failsafe(vid)) + extra_info["target_accel_with_noise_no_failsafe"].append( + veh_kernel.get_accel_with_noise_no_failsafe(vid)) + extra_info["target_accel_no_noise_with_failsafe"].append( + veh_kernel.get_accel_no_noise_with_failsafe(vid)) + extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) position = veh_kernel.get_2d_position(vid) extra_info["x"].append(position[0]) From 951c755672a37bffcec0ac723545746b5e1d0e73 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:44:15 -0700 Subject: [PATCH 088/335] update queries with new column names --- flow/data_pipeline/query.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 2ee794507..43ad45216 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -27,14 +27,14 @@ class QueryStrings(Enum): SAMPLE = """ SELECT * - FROM trajectory_table + FROM fact_vehicle_trace WHERE date = \'{date}\' AND partition_name=\'{partition}\' LIMIT 15; """ UPDATE_PARTITION = """ - ALTER TABLE trajectory_table + ALTER TABLE fact_vehicle_trace ADD IF NOT EXISTS PARTITION (date = \'{date}\', partition_name=\'{partition}\'); """ @@ -44,10 +44,10 @@ class QueryStrings(Enum): id, time_step, speed, - acceleration, + target_accel_with_noise_with_failsafe AS acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -60,10 +60,10 @@ class QueryStrings(Enum): id, time_step, speed, - accel_without_noise AS acceleration, + target_accel_no_noise_with_failsafe AS acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -75,14 +75,14 @@ class QueryStrings(Enum): SELECT id, time_step, - accel_without_noise, + target_accel_no_noise_with_failsafe, road_grade, source_id, time_step - LAG(time_step, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -90,8 +90,8 @@ class QueryStrings(Enum): SELECT id, time_step, - prev_speed + accel_without_noise * sim_step AS speed, - accel_without_noise AS acceleration, + prev_speed + target_accel_no_noise_with_failsafe * sim_step AS speed, + target_accel_no_noise_with_failsafe AS acceleration, road_grade, source_id FROM lagged_timestep From df0bb664e80e5fe9819c6246663b8602212da243 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:47:44 -0700 Subject: [PATCH 089/335] fix flake8 issues --- flow/controllers/base_controller.py | 2 +- flow/data_pipeline/query.py | 17 +++++++++++++---- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index c417bb73a..3f6a0f4ae 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -130,7 +130,7 @@ def get_action(self, env): elif self.fail_safe == 'safe_velocity': accel = self.get_safe_velocity_action(env, accel) env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, accel) - + return accel def get_safe_action_instantaneous(self, env, action): diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 43ad45216..e403b51f8 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,7 +2,13 @@ from enum import Enum # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} +tags = { + "energy": [ + "POWER_DEMAND_MODEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" + ] + } VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT @@ -52,7 +58,8 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', + 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -68,7 +75,8 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', 'denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', + 'denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -96,4 +104,5 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + 'denoised_speed_cte')) From 863f360809eac6fad1ae26eba0b197759a7c666c Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:51:46 -0700 Subject: [PATCH 090/335] remove trailing whitespaces --- flow/data_pipeline/query.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index e403b51f8..8e8196f6f 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -4,8 +4,8 @@ # tags for different queries tags = { "energy": [ - "POWER_DEMAND_MODEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" ] } @@ -58,7 +58,7 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ @@ -104,5 +104,5 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) From 3b7364b9ee26642d8c9700874541899de447de9a Mon Sep 17 00:00:00 2001 From: Kathy Jang Date: Thu, 21 May 2020 12:41:32 -0700 Subject: [PATCH 091/335] Updated ray_autoscale and requirements.txt --- requirements.txt | 2 +- scripts/ray_autoscale.yaml | 11 ++++++++--- 2 files changed, 9 insertions(+), 4 deletions(-) diff --git a/requirements.txt b/requirements.txt index c069a6cb6..f06c3c69f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ gym==0.14.0 -numpy==1.16.0 +numpy==1.18.4 scipy==1.1.0 lxml==4.4.1 pyprind==2.11.2 diff --git a/scripts/ray_autoscale.yaml b/scripts/ray_autoscale.yaml index d0c9cccbb..5cf0eca96 100644 --- a/scripts/ray_autoscale.yaml +++ b/scripts/ray_autoscale.yaml @@ -68,15 +68,20 @@ worker_nodes: setup_commands: - cd flow && git fetch && git checkout origin/i210_dev - -head_setup_commands: - pip install ray==0.8.0 - pip install tabulate - pip install boto3==1.10.45 # 1.4.8 adds InstanceMarketOptions - pip install awscli==1.16.309 - pip install stable-baselines - - pip install torch==1.4.0 - pip install pytz + - pip install torch==1.3.1 + - pip install tensorflow==2.0.0 + - pip install lz4 + - pip install dm-tree + - pip install numpy==1.18.4 + - ./flow/scripts/setup_sumo_ubuntu1604.sh + +head_setup_commands: [] # Custom commands that will be run on worker nodes after common setup. worker_setup_commands: [] From 7493d9b6d56b5c06c2a8b3b000f938288b506314 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 21 May 2020 16:28:52 -0700 Subject: [PATCH 092/335] more quieres added --- examples/simulate.py | 2 +- flow/core/experiment.py | 12 +- flow/data_pipeline/data_pipeline.py | 4 +- flow/data_pipeline/query.py | 367 +++++++++++++++++++++++++++- flow/visualize/i210_replay.py | 3 +- 5 files changed, 374 insertions(+), 14 deletions(-) diff --git a/examples/simulate.py b/examples/simulate.py index 86d14aa14..0b183649b 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -97,5 +97,5 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, to_aws=flags.to_aws, only_query=flags.only_query) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index f46f802a5..eb9beeca9 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -88,7 +88,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=""): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only_query=""): """Run the given network for a set number of runs. Parameters @@ -101,7 +101,7 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=No convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file - partition_name: str + to_aws: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. @@ -213,14 +213,12 @@ def rl_actions(*_): os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" - upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) + upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) - if partition_name: - if partition_name == "default": - partition_name = source_id[-3:] + if to_aws: cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( - cur_date, partition_name, upload_file_path.split('/')[-1].split('_upload')[0]), + cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index a999b6eb1..c83734f4c 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -91,7 +91,7 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): def extra_init(): """Return the dictionary with all the feild pre-populated with empty list.""" extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], - "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], + "accel_without_noise": [], "realized_accel": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "road_grade": [], "source_id": []} return extra_info @@ -108,7 +108,7 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): extra_info["leader_rel_speed"].append(veh_kernel.get_speed( veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) - extra_info["realilzed_accel"].append(veh_kernel.get_realized_accel(vid)) + extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) position = veh_kernel.get_2d_position(vid) extra_info["x"].append(position[0]) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 2ee794507..7459a9903 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,7 +2,8 @@ from enum import Enum # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} +tags = {"vehicle_energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT @@ -96,4 +97,366 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + 'denoised_speed_cte')) + + FACT_NETWORK_THROUGHPUT_AGG = """ + WITH agg AS ( + SELECT + source_id, + COUNT(DISTINCT id) AS n_vehicles, + MAX(time_step) - MIN(time_step) AS total_time_seconds + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1 + ) + SELECT + source_id, + n_vehicles * 3600 / total_time_seconds AS throughput_per_hour + FROM agg + ;""" + + FACT_VEHICLE_FUEL_EFFICIENCY_AGG = """ + WITH distance AS ( + SELECT + id, + source_id, + MAX(x) AS distance_meters + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND source_id = + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2 + ), energy AS ( + SELECT + id, + source_id, + energy_model_id, + (MAX(time_step) - MIN(time_step)) / (COUNT(DISTINCT time_step) - 1) AS time_step_size_seconds, + SUM(power) AS power_watts + FROM fact_energy_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2, 3 + HAVING COUNT(DISTINCT time_step) > 1 + ) + SELECT + d.id, + d.source_id, + e.energy_model_id, + distance_meters, + power_watts * time_step_size_seconds AS energy_joules, + distance_meters / (power_watts * time_step_size_seconds) AS efficiency_meters_per_joules, + 74564 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon + FROM distance d + JOIN energy e ON 1=1 + AND d.id = e.id + AND d.source_id = e.source_id + ; + """ + + FACT_NETWORK_FUEL_EFFICIENCY_AGG = """ + SELECT + source_id, + energy_model_id, + SUM(distance_meters) AS distance_meters, + SUM(energy_joules) AS energy_joules, + SUM(distance_meters) / SUM(energy_joules) AS efficiency_meters_per_joules, + 74564 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon + FROM fact_vehicle_fuel_efficiency_agg + WHERE 1 = 1 + AND date = \'{{date}}\' + AND parititon_name = \'{{partition}}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' + AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + GROUP BY 1, 2 + ;""" + + LEADERBOARD_CHART = """ + SELECT + t.source_id, + e.energy_model_id, + e.efficiency_meters_per_joules, + 74564 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon + t.throughput_per_hour + FROM fact_network_throughput_agg t + JOIN fact_network_fuel_efficiency_agg e ON 1 = 1 + AND t.date = \'{{date}}\' + AND t.partition_name = \'{{partition}}_FACT_NETWORK_THROUGHPUT_AGG\' + AND e.date = \'{{date}}\' + AND e.partition_name = \'{{partition}}_FACT_NETWORK_FUEL_EFFICIENCY_AGG\' + AND t.source_id = e.source_id + AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + WHERE 1 = 1 + ;""" + + FACT_NETWORK_INFLOWS_OUTFLOWS = """ + WITH min_max_time_step AS ( + SELECT + id, + source_id, + MIN(time_step) AS min_time_step, + MAX(time_step) AS max_time_step + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2 + ), inflows AS ( + SELECT + INT(min_time_step / 60) * 60 AS time_step, + source_id, + 60 * COUNT(DISTINCT id) AS inflow_rate + FROM min_max_time_step + GROUP BY 1, 2 + ), outflows AS ( + SELECT + INT(max_time_step / 60) * 60 AS time_step, + source_id, + 60 * COUNT(DISTINCT id) AS outflow_rate + FROM min_max_time_step + GROUP BY 1, 2 + ) + SELECT + COALESCE(i.time_step, o.time_step) AS time_step, + COALESCE(i.source_id, o.source_id) AS source_id, + COALESCE(i.inflow_rate, 0) AS inflow_rate, + COALESCE(o.outflow_rate, 0) AS outflow_rate + FROM inflows i + FULL OUTER JOIN outflows o ON 1 = 1 + AND i.time_step = o.time_step + AND i.source_id = o.source_id + ;""" + + FACT_NETWORK_METRICS_BY_DISTANCE_AGG = """ + WITH joined_trace AS ( + SELECT + id, + source_id, + time_step, + x, + energy_model_id, + time_step - LAG(time_step, 1) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + SUM(power) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS + cumulative_power + FROM fact_vehicle_trace vt + JOIN fact_energy_trace et ON 1 = 1 + AND vt.date = \'{{date}}\' + AND vt.partition_name = \'{{partition}}\' + AND et.date = \'{{date}}\' + AND et.partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND vt.id = et.id + AND vt.source_id = et.source_id + AND vt.time_step = et.time_step + AND vt.x BETWEEN 500 AND 2300 + AND vt.time_step >= 600 + AND et.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + WHERE 1 = 1 + ), cumulative_energy AS ( + SELECT + id, + source_id, + time_step, + x, + energy_model_id, + cumulative_power * sim_step AS energy_joules + FROM joined_trace + WHERE 1 = 1 + AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2, 3 + HAVING COUNT(DISTINCT time_step) > 1 + ), binned_cumulative_energy AS ( + SELECT + source_id, + INT(x/10) * 10 AS distance_meters_bin, + AVG(energy_joules) AS cumulative_energy_avg, + AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound + FROM cumulative_energy + GROUP BY 1, 2 + ), binned_speed_accel AS ( + SELECT + source_id, + INT(x/10) * 10 AS distance_meters_bin, + AVG(speed) AS speed_avg, + AVG(speed) + STDEV(speed) AS speed_upper_bound, + AVG(speed) - STDEV(speed) AS speed_lower_bound, + AVG(accel_without_noise) AS accel_avg, + AVG(accel_without_noise) + STDEV(accel_without_noise) AS accel_upper_bound, + AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2 + ), binned_energy_start_end AS ( + SELECT DISTINCT + source_id, + id, + INT(x/10) * 10 AS distance_meters_bin, + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_end + FROM cumulative_energy + ), binned_energy AS ( + SELECT + source_id, + distance_meters_bin, + AVG(energy_end - energy_start) AS instantaneous_energy_avg, + AVG(energy_end - energy_start) + STDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, + AVG(energy_end - energy_start) - STDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound + FROM binned_energy_start_end + GROUP BY 1, 2 + ) + SELECT + COALESCE(bce.source_id, bsa.source_id, be.source_id) AS source_id, + COALESCE(bce.distance_meters_bin, bsa.distance_meters_bin, be.distance_meters_bin) AS distance_meters_bin, + bce.cumulative_energy_avg, + bce.cumulative_energy_lower_bound, + bce.cumulative_energy_upper_bound, + bsa.speed_avg, + bsa.speed_upper_bound, + bsa.speed_lower_bound, + bsa.accel_avg, + bsa.accel_upper_bound, + bsa.accel_lower_bound, + be.instantaneous_energy_avg, + be.instantaneous_energy_upper_bound, + be.instantaneous_energy_lower_bound + FROM binned_cumulative_energy bce + FULL OUTER JOIN binned_speed_accel bsa ON 1 = 1 + AND bce.source_id = bsa.source_id + AND bce.distance_meters_bin = bsa.distance_meters_bin + FULL OUTER JOIN binned_energy be ON 1 = 1 + AND COALESCE(bce.source_id, bsa.source_id) = be.source_id + AND COALESCE(bce.distance_meters_bin, bce.distance_meters_bin) = be.distance_meters_bin + ;""" + + FACT_NETWORK_METRICS_BY_TIME_AGG = """ + WITH joined_trace AS ( + SELECT + id, + source_id, + time_step, + x, + energy_model_id, + time_step - LAG(time_step, 1) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + SUM(power) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS + cumulative_power + FROM fact_vehicle_trace vt + JOIN fact_energy_trace et ON 1 = 1 + AND vt.date = \'{{date}}\' + AND vt.partition_name = \'{{partition}}\' + AND et.date = \'{{date}}\' + AND et.partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND vt.id = et.id + AND vt.source_id = et.source_id + AND vt.time_step = et.time_step + AND vt.x BETWEEN 500 AND 2300 + AND vt.time_step >= 600 + AND et.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + WHERE 1 = 1 + ), cumulative_energy AS ( + SELECT + id, + source_id, + time_step, + x, + energy_model_id, + cumulative_power * sim_step AS energy_joules + FROM joined_trace + WHERE 1 = 1 + AND date = + AND partition_name = + AND source_id = + AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2, 3 + HAVING COUNT(DISTINCT time_step) > 1 + ), binned_cumulative_energy AS ( + SELECT + source_id, + INT(time_step/60) * 60 AS time_seconds_bin, + AVG(energy_joules) AS cumulative_energy_avg, + AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound + FROM cumulative_energy + GROUP BY 1, 2 + ), binned_speed_accel AS ( + SELECT + source_id, + INT(time_step/60) * 60 AS time_seconds_bin, + AVG(speed) AS speed_avg, + AVG(speed) + STDEV(speed) AS speed_upper_bound, + AVG(speed) - STDEV(speed) AS speed_lower_bound, + AVG(accel_without_noise) AS accel_avg, + AVG(accel_without_noise) + STDEV(accel_without_noise) AS accel_upper_bound, + AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2 + ), binned_energy_start_end AS ( + SELECT DISTINCT + source_id, + id, + INT(time_step/60) * 60 AS time_seconds_bin, + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_end + FROM cumulative_energy + ), binned_energy AS ( + SELECT + source_id, + time_seconds_bin, + AVG(energy_end - energy_start) AS instantaneous_energy_avg, + AVG(energy_end - energy_start) + STDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, + AVG(energy_end - energy_start) - STDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound + FROM binned_energy_start_end + GROUP BY 1, 2 + ) + SELECT + COALESCE(bce.source_id, bsa.source_id, be.source_id) AS source_id, + COALESCE(bce.time_seconds_bin, bsa.time_seconds_bin, be.time_seconds_bin) AS time_seconds_bin, + bce.cumulative_energy_avg, + bce.cumulative_energy_lower_bound, + bce.cumulative_energy_upper_bound, + bsa.speed_avg, + bsa.speed_upper_bound, + bsa.speed_lower_bound, + bsa.accel_avg, + bsa.accel_upper_bound, + bsa.accel_lower_bound, + be.instantaneous_energy_avg, + be.instantaneous_energy_upper_bound, + be.instantaneous_energy_lower_bound + FROM binned_cumulative_energy bce + FULL OUTER JOIN binned_speed_accel bsa ON 1 = 1 + AND bce.source_id = bsa.source_id + AND bce.time_seconds_bin = bsa.time_seconds_bin + FULL OUTER JOIN binned_energy be ON 1 = 1 + AND COALESCE(bce.source_id, bsa.source_id) = be.source_id + AND COALESCE(bce.time_seconds_bin, bce.time_seconds_bin) = be.time_seconds_bin + ;""" diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 8e62bb0d8..23a7de1d8 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -334,10 +334,9 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # upload to s3 if asked if args.use_s3: - partition_name = source_id[-3:] cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( - cur_date, partition_name, upload_file_path.split('/')[-1].split('_upload')[0]), + cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(args.only_query)[2:-2]) # print the location of the emission csv file From 40ace2578737333db4c8ddf3e4311d93f56528ad Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 21 May 2020 19:54:59 -0700 Subject: [PATCH 093/335] update the architecture and fix some bugs --- flow/core/experiment.py | 2 +- flow/data_pipeline/data_pipeline.py | 31 +++++++--- flow/data_pipeline/query.py | 96 ++++++++++++++++------------- flow/visualize/i210_replay.py | 2 +- 4 files changed, 75 insertions(+), 56 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index eb9beeca9..1652da1ad 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -217,7 +217,7 @@ def rl_actions(*_): if to_aws: cur_date = date.today().isoformat() - upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( + upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(only_query)[2:-2]) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index c83734f4c..290ac70e9 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -142,9 +142,9 @@ def __init__(self): """ self.MAX_WAIT = 60 self.client = boto3.client("athena") - self.existing_partitions = self.get_existing_partitions() + self.existing_partitions = {} - def get_existing_partitions(self): + def get_existing_partitions(self, table): """Return the existing partitions in the S3 bucket. Returns @@ -152,7 +152,7 @@ def get_existing_partitions(self): partitions: a list of existing partitions on S3 bucket """ response = self.client.start_query_execution( - QueryString='SHOW PARTITIONS trajectory_table', + QueryString='SHOW PARTITIONS {}'.format(table), QueryExecutionContext={ 'Database': 'circles' }, @@ -207,18 +207,21 @@ def wait_for_execution(self, execution_id): return False return True - def update_partition(self, query_date, partition): + def update_partition(self, table, query_date, partition): """Load the given partition to the trajectory_table on Athena. Parameters ---------- + table : str + the name of the table to update query_date : str the new partition date that needs to be loaded partition : str the new partition that needs to be loaded """ response = self.client.start_query_execution( - QueryString=QueryStrings['UPDATE_PARTITION'].value.format(date=query_date, partition=partition), + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(table=table, date=query_date, + partition=partition), QueryExecutionContext={ 'Database': 'circles' }, @@ -226,11 +229,11 @@ def update_partition(self, query_date, partition): ) if self.wait_for_execution(response['QueryExecutionId']): raise RuntimeError("update partition timed out") - self.existing_partitions.append("date={}/partition_name={}".format(query_date, partition)) + self.existing_partitions[table].append("date={}/partition_name={}".format(query_date, partition)) return def run_query(self, query_name, result_location="s3://circles.data.pipeline/result/", - query_date="today", partition="default"): + query_date="today", partition="default", primary_table=""): """Start the execution of a query, does not wait for it to finish. Parameters @@ -243,6 +246,8 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu name of the partition date to run this query on partition: str, optional name of the partition to run this query on + primary_table: str + the table whose partition that may need update Returns ------- execution_id: str @@ -257,11 +262,17 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu if query_date == "today": query_date = date.today().isoformat() - if "date={}/partition_name={}".format(query_date, partition) not in self.existing_partitions: - self.update_partition(query_date, partition) + source_id = "flow_{}".format(partition.split('_')[1]) + + if primary_table: + if primary_table not in self.existing_partitions.keys(): + self.existing_partitions[primary_table] = self.get_existing_partitions(primary_table) + if "date={}/partition_name={}".format(query_date, partition) not in \ + self.existing_partitions[primary_table]: + self.update_partition(primary_table, query_date, partition) response = self.client.start_query_execution( - QueryString=QueryStrings[query_name].value.format(date=query_date, partition=partition), + QueryString=QueryStrings[query_name].value.format(date=query_date, partition=source_id), QueryExecutionContext={ 'Database': 'circles' }, diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 7459a9903..152eefc52 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,8 +2,16 @@ from enum import Enum # tags for different queries -tags = {"vehicle_energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} +tags = {"fact_vehicle_trace": {"fact_energy_trace": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], + "fact_network_throughput_agg": ["FACT_NETWORK_THROUGHPUT_AGG"], + "fact_network_inflows_outflows": ["FACT_NETWORK_INFLOWS_OUTFLOWS"]}, + "fact_energy_trace": {"fact_vehicle_fuel_efficiency_agg": ["FACT_VEHICLE_FUEL_EFFICIENCY_AGG"], + "fact_network_metrics_by_distance_agg": ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], + "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, + "fact_vehicle_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"], + "fact_network_fuel_efficiency_agg": ["LEADERBOARD_CHART"] + } VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT @@ -35,7 +43,7 @@ class QueryStrings(Enum): """ UPDATE_PARTITION = """ - ALTER TABLE trajectory_table + ALTER TABLE {table} ADD IF NOT EXISTS PARTITION (date = \'{date}\', partition_name=\'{partition}\'); """ @@ -48,7 +56,7 @@ class QueryStrings(Enum): acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -64,7 +72,7 @@ class QueryStrings(Enum): accel_without_noise AS acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -83,7 +91,7 @@ class QueryStrings(Enum): OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -108,8 +116,8 @@ class QueryStrings(Enum): MAX(time_step) - MIN(time_step) AS total_time_seconds FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 AND time_step >= 600 GROUP BY 1 @@ -128,8 +136,8 @@ class QueryStrings(Enum): MAX(x) AS distance_meters FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND source_id = AND x BETWEEN 500 AND 2300 AND time_step >= 600 @@ -143,8 +151,8 @@ class QueryStrings(Enum): SUM(power) AS power_watts FROM fact_energy_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND date = \'{date}\' + AND partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AND x BETWEEN 500 AND 2300 AND time_step >= 600 @@ -176,8 +184,8 @@ class QueryStrings(Enum): 74564 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon FROM fact_vehicle_fuel_efficiency_agg WHERE 1 = 1 - AND date = \'{{date}}\' - AND parititon_name = \'{{partition}}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' + AND date = \'{date}\' + AND parititon_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' GROUP BY 1, 2 ;""" @@ -191,10 +199,10 @@ class QueryStrings(Enum): t.throughput_per_hour FROM fact_network_throughput_agg t JOIN fact_network_fuel_efficiency_agg e ON 1 = 1 - AND t.date = \'{{date}}\' - AND t.partition_name = \'{{partition}}_FACT_NETWORK_THROUGHPUT_AGG\' - AND e.date = \'{{date}}\' - AND e.partition_name = \'{{partition}}_FACT_NETWORK_FUEL_EFFICIENCY_AGG\' + AND t.date = \'{date}\' + AND t.partition_name = \'{partition}_FACT_NETWORK_THROUGHPUT_AGG\' + AND e.date = \'{date}\' + AND e.partition_name = \'{partition}_FACT_NETWORK_FUEL_EFFICIENCY_AGG\' AND t.source_id = e.source_id AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' WHERE 1 = 1 @@ -209,21 +217,21 @@ class QueryStrings(Enum): MAX(time_step) AS max_time_step FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 AND time_step >= 600 GROUP BY 1, 2 ), inflows AS ( SELECT - INT(min_time_step / 60) * 60 AS time_step, + CAST(min_time_step / 60 AS INTEGER) * 60 AS time_step, source_id, 60 * COUNT(DISTINCT id) AS inflow_rate FROM min_max_time_step GROUP BY 1, 2 ), outflows AS ( SELECT - INT(max_time_step / 60) * 60 AS time_step, + CAST(max_time_step / 60 AS INTEGER) * 60 AS time_step, source_id, 60 * COUNT(DISTINCT id) AS outflow_rate FROM min_max_time_step @@ -255,10 +263,10 @@ class QueryStrings(Enum): cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 - AND vt.date = \'{{date}}\' - AND vt.partition_name = \'{{partition}}\' - AND et.date = \'{{date}}\' - AND et.partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' + AND et.date = \'{date}\' + AND et.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND vt.id = et.id AND vt.source_id = et.source_id AND vt.time_step = et.time_step @@ -284,7 +292,7 @@ class QueryStrings(Enum): ), binned_cumulative_energy AS ( SELECT source_id, - INT(x/10) * 10 AS distance_meters_bin, + CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(energy_joules) AS cumulative_energy_avg, AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound @@ -293,7 +301,7 @@ class QueryStrings(Enum): ), binned_speed_accel AS ( SELECT source_id, - INT(x/10) * 10 AS distance_meters_bin, + CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(speed) AS speed_avg, AVG(speed) + STDEV(speed) AS speed_upper_bound, AVG(speed) - STDEV(speed) AS speed_lower_bound, @@ -302,8 +310,8 @@ class QueryStrings(Enum): AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 AND time_step >= 600 GROUP BY 1, 2 @@ -311,9 +319,9 @@ class QueryStrings(Enum): SELECT DISTINCT source_id, id, - INT(x/10) * 10 AS distance_meters_bin, - FIRST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_start, - LAST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_end + CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT @@ -364,10 +372,10 @@ class QueryStrings(Enum): cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 - AND vt.date = \'{{date}}\' - AND vt.partition_name = \'{{partition}}\' - AND et.date = \'{{date}}\' - AND et.partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' + AND et.date = \'{date}\' + AND et.partition_name = \'{partitio}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND vt.id = et.id AND vt.source_id = et.source_id AND vt.time_step = et.time_step @@ -396,7 +404,7 @@ class QueryStrings(Enum): ), binned_cumulative_energy AS ( SELECT source_id, - INT(time_step/60) * 60 AS time_seconds_bin, + CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, AVG(energy_joules) AS cumulative_energy_avg, AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound @@ -405,7 +413,7 @@ class QueryStrings(Enum): ), binned_speed_accel AS ( SELECT source_id, - INT(time_step/60) * 60 AS time_seconds_bin, + CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, AVG(speed) AS speed_avg, AVG(speed) + STDEV(speed) AS speed_upper_bound, AVG(speed) - STDEV(speed) AS speed_lower_bound, @@ -414,8 +422,8 @@ class QueryStrings(Enum): AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 AND time_step >= 600 GROUP BY 1, 2 @@ -423,9 +431,9 @@ class QueryStrings(Enum): SELECT DISTINCT source_id, id, - INT(time_step/60) * 60 AS time_seconds_bin, - FIRST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_start, - LAST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_end + CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 23a7de1d8..a28dadec4 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -335,7 +335,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # upload to s3 if asked if args.use_s3: cur_date = date.today().isoformat() - upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( + upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(args.only_query)[2:-2]) From ba2c3da129826f42171efde224c09b8b7f093bc9 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Fri, 22 May 2020 00:18:53 -0700 Subject: [PATCH 094/335] fix inflow issue --- flow/data_pipeline/query.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 152eefc52..40698e39c 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -109,17 +109,25 @@ class QueryStrings(Enum): 'denoised_speed_cte')) FACT_NETWORK_THROUGHPUT_AGG = """ - WITH agg AS ( - SELECT + WITH min_time AS ( + SELECT source_id, - COUNT(DISTINCT id) AS n_vehicles, - MAX(time_step) - MIN(time_step) AS total_time_seconds + id, + MIN(time_step) AS enter_time FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 - AND time_step >= 600 + GROUP BY 1, 2 + ), agg AS ( + SELECT + source_id, + COUNT(DISTINCT id) AS n_vehicles, + MAX(enter_time) - MIN(enter_time) AS total_time_seconds + FROM min_time + WHERE 1 = 1 + AND enter_time >= 600 GROUP BY 1 ) SELECT @@ -133,7 +141,7 @@ class QueryStrings(Enum): SELECT id, source_id, - MAX(x) AS distance_meters + MAX(x)-MIN(x) AS distance_meters FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' From b4f844fe870220e13e0ced5f7c1709678ad7ee7f Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sat, 23 May 2020 13:12:40 -0700 Subject: [PATCH 095/335] Bug fixes for stochastic policies --- .../imitation_learning/imitating_network.py | 43 +++++++++++++------ flow/controllers/imitation_learning/run.py | 4 +- .../controllers/imitation_learning/trainer.py | 5 ++- flow/controllers/imitation_learning/utils.py | 15 ++++--- .../imitation_learning/utils_tensorflow.py | 2 +- 5 files changed, 44 insertions(+), 25 deletions(-) diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 3b1e826da..04a0a4ce4 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -46,8 +46,9 @@ def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, r self.load_network(load_path) else: - with tf.variable_scope(policy_scope, reuse=tf.AUTO_REUSE): - self.build_network() + print("HERE") + self.build_network() + # init replay buffer if self.training: @@ -57,9 +58,14 @@ def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, r # set up policy variables, and saver to save model. Save only non-training variables (weights/biases) if not load_existing: - self.policy_vars = [v for v in tf.all_variables() if policy_scope in v.name and 'train' not in v.name] + self.policy_vars = [v for v in tf.all_variables() if 'network_scope' in v.name and 'train' not in v.name] self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) + # tensorboard + self.writer = tf.summary.FileWriter('/Users/akashvelu/Documents/Random/tensorboard/', tf.get_default_graph()) + # track number of training steps + self.train_steps = 0 + def build_network(self): """ Defines neural network for choosing actions. Defines placeholders and forward pass @@ -69,10 +75,11 @@ def build_network(self): self.define_forward_pass() # set up training operation (e.g. Adam optimizer) if self.training: - with tf.variable_scope('train', reuse=tf.AUTO_REUSE): + with tf.variable_scope('train'): self.define_train_op() + def load_network(self, path): """ Load tensorflow model from the path specified, set action prediction to proper placeholder @@ -91,11 +98,12 @@ def load_network(self, path): if self.stochastic: # determine means and (diagonal entries of ) covariance matrices (could be many in the case of batch) for action distribution means = network_output[:, :self.action_dim] - cov_diags = network_output[:, self.action_dim:] + log_vars = network_output[:, self.action_dim:] + vars = tf.math.exp(log_vars) # set up action distribution (parameterized by network output) # if a batch of size k is input as observations, then the the self.dist will store k different Gaussians - self.dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=cov_diags) + self.dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=vars, name='Prediction Distribution') # action is a sample from this distribution; one sample output per Gaussian contained in self.dist self.action_predictions = self.dist.sample() else: @@ -109,7 +117,7 @@ def define_placeholders(self): # placeholder for observations (input into network) self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="observation", dtype=tf.float32) - # if training, define placeholder for labels (supervised leearning) + # if training, define placeholder for labels (supervised learning) if self.training: self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) @@ -130,12 +138,14 @@ def define_forward_pass(self): # parse the mean and covariance from output if stochastic, and set up distribution if self.stochastic: # determine means and (diagonal entries of ) covariance matrices (could be many in the case of batch) for action distribution - means = network_output[:, :self.action_dim] - cov_diags = network_output[:, self.action_dim:] + + means, log_vars = tf.split(network_output, num_or_size_splits=2, axis=1) + vars = tf.math.exp(log_vars) # set up action distribution (parameterized by network output) # if a batch of size k is input as observations, then the the self.dist will store k different Gaussians - self.dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=cov_diags) + with tf.variable_scope('Action_Distribution'): + self.dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=vars) # action is a sample from this distribution; one sample output per Gaussian contained in self.dist self.action_predictions = self.dist.sample() @@ -154,12 +164,17 @@ def define_train_op(self): if self.stochastic: # negative log likelihood loss for stochastic policy - log_likelihood = self.dist.log_prob(true_actions) - self.loss = -tf.reduce_mean(log_likelihood) + self.loss = self.dist.log_prob(true_actions) + self.loss = tf.negative(self.loss) + self.loss = tf.reduce_mean(self.loss) + summary_name = 'Loss_tracking_NLL' else: # MSE loss for deterministic policy self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) + summary_name = 'Loss_tracking_MSE' + + self.loss_summary = tf.summary.scalar(name=summary_name, tensor=self.loss) # Adam optimizer self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) @@ -169,7 +184,9 @@ def train(self, observation_batch, action_batch): """ # reshape action_batch to ensure a shape (batch_size, action_dim) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - self.sess.run([self.train_op, self.loss], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) + _, loss, summary = self.sess.run([self.train_op, self.loss, self.loss_summary], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) + self.writer.add_summary(summary, global_step=self.train_steps) + self.train_steps += 1 def get_accel_from_observation(self, observation): """ diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 17434d63e..265991e20 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -41,7 +41,7 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument('--ep_len', type=int, default=5000) - parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy (per iter in n_iter) + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy parser.add_argument('--n_iter', type=int, default=5) parser.add_argument('--batch_size', type=int, default=3000) # training data collected (in the env) during each iteration @@ -50,7 +50,7 @@ def main(): parser.add_argument('--train_batch_size', type=int, default=100) # number of sampled data points to be used per gradient/train step - parser.add_argument('--num_layers', type=int, default=3) # depth, of policy to be learned + parser.add_argument('--num_layers', type=int, default=3) # number of hidden layers, of policy to be learned parser.add_argument('--size', type=int, default=64) # width of each layer, of policy to be learned parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) # learning rate for supervised learning parser.add_argument('--replay_buffer_size', type=int, default=1000000) diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 801c7517f..b6d04ed25 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -5,7 +5,7 @@ import gym import os from flow.utils.registry import make_create_env -from env_configs.singleagent_straight_road import flow_params +from examples.exp_configs.rl.multiagent.multiagent_straight_road import flow_params from imitating_controller import ImitatingController from imitating_network import ImitatingNetwork from flow.controllers.car_following_models import IDMController @@ -111,7 +111,8 @@ def collect_training_trajectories(self, itr, batch_size): """ print("\nCollecting data to be used for training...") - trajectories, envsteps_this_batch = sample_trajectories(self.env, self.controllers, self.action_network, batch_size, self.params['ep_len'], self.multiagent, use_expert=itr==0, v_des=self.params['v_des']) + max_decel = flow_params['env'].additional_params['max_decel'] + trajectories, envsteps_this_batch = sample_trajectories(self.env, self.controllers, self.action_network, batch_size, self.params['ep_len'], self.multiagent, use_expert=itr==0, v_des=self.params['v_des'], max_decel=max_decel) return trajectories, envsteps_this_batch diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index 198a2a4ad..a55f32c97 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -11,7 +11,7 @@ """ Class agnostic helper functions """ -def sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des): +def sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des, max_decel): """ Samples a trajectory for a given vehicle using the actions prescribed by specified controller. Args: @@ -68,8 +68,9 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto for i in range(action_dim): # if max number of RL vehicles is not reached, insert dummy values if i >= len(vehicle_ids): - rl_actions.append(0.0) - actions_expert.append(0.0) + ignore_accel = -2 * max_decel + rl_actions.append(ignore_accel) + actions_expert.append(ignore_accel) else: imitator = controllers[vehicle_ids[i]][0] expert = controllers[vehicle_ids[i]][1] @@ -224,7 +225,7 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector return traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals), traj_length -def sample_trajectories(env, controllers, action_network, min_batch_timesteps, max_trajectory_length, multiagent, use_expert, v_des=15): +def sample_trajectories(env, controllers, action_network, min_batch_timesteps, max_trajectory_length, multiagent, use_expert, v_des=15, max_decel=4.5): """ Samples trajectories to collect at least min_batch_timesteps steps in the environment @@ -248,7 +249,7 @@ def sample_trajectories(env, controllers, action_network, min_batch_timesteps, m if multiagent: trajectory, traj_length = sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des) else: - trajectory, traj_length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des) + trajectory, traj_length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des, max_decel) trajectories.append(trajectory) @@ -256,7 +257,7 @@ def sample_trajectories(env, controllers, action_network, min_batch_timesteps, m return trajectories, total_envsteps -def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_length, multiagent, use_expert, v_des=15): +def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_length, multiagent, use_expert, v_des=15, max_decel=4.5): """ Collects a fixed number of trajectories. @@ -280,7 +281,7 @@ def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_le if multiagent: trajectory, length = sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des) else: - trajectory, length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des) + trajectory, length = sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des, max_decel) trajectories.append((trajectory, length)) diff --git a/flow/controllers/imitation_learning/utils_tensorflow.py b/flow/controllers/imitation_learning/utils_tensorflow.py index 1636da035..70df79693 100644 --- a/flow/controllers/imitation_learning/utils_tensorflow.py +++ b/flow/controllers/imitation_learning/utils_tensorflow.py @@ -23,7 +23,7 @@ def build_neural_net(input_placeholder, output_size, scope, n_layers, size, acti output_placeholder: the result of pass through Neural Network """ output_placeholder = input_placeholder - with tf.variable_scope(scope): + with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): for _ in range(n_layers): output_placeholder = tf.layers.dense(output_placeholder, size, activation=activation) output_placeholder = tf.layers.dense(output_placeholder, output_size, activation=output_activation,name='Output_Layer') From 9dbf4fc3964d83a202e90708bc84d6cacea43d55 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sun, 24 May 2020 20:07:30 -0700 Subject: [PATCH 096/335] the extended new pipeline constructed and works --- flow/core/experiment.py | 8 +- flow/data_pipeline/data_pipeline.py | 6 +- flow/data_pipeline/query.py | 152 +++++++++++++++------------- 3 files changed, 89 insertions(+), 77 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 1652da1ad..6c6a5fbfb 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -213,15 +213,15 @@ def rl_actions(*_): os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" - upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) + generate_trajectory_from_flow(trajectory_table_path, extra_info) if to_aws: cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( - cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), - upload_file_path, str(only_query)[2:-2]) + cur_date, source_id, source_id), + trajectory_table_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file - os.remove(upload_file_path) + # os.remove(upload_file_path) return info_dict diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 290ac70e9..d74725590 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -60,9 +60,9 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name=None): extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) - upload_only_file_path = data_path[:-4] + "_upload" + ".csv" - extra_info.to_csv(upload_only_file_path, index=False, header=False) - return upload_only_file_path + # upload_only_file_path = data_path[:-4] + "_upload" + ".csv" + # extra_info.to_csv(upload_only_file_path, index=False, header=False) + return def upload_to_s3(bucket_name, bucket_key, file_path, only_query): diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 40698e39c..595379367 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -6,13 +6,19 @@ "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], "fact_network_throughput_agg": ["FACT_NETWORK_THROUGHPUT_AGG"], "fact_network_inflows_outflows": ["FACT_NETWORK_INFLOWS_OUTFLOWS"]}, - "fact_energy_trace": {"fact_vehicle_fuel_efficiency_agg": ["FACT_VEHICLE_FUEL_EFFICIENCY_AGG"], - "fact_network_metrics_by_distance_agg": ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], - "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, - "fact_vehicle_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"], - "fact_network_fuel_efficiency_agg": ["LEADERBOARD_CHART"] + "fact_energy_trace": {}, + "POWER_DEMAND_MODEL_DENOISED_ACCEL": {"fact_vehicle_fuel_efficiency_agg": ["FACT_VEHICLE_FUEL_EFFICIENCY_AGG"], + "fact_network_metrics_by_distance_agg": + ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], + "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, + "fact_vehicle_fuel_efficiency_agg": {"fact_network_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"]}, + "fact_network_fuel_efficiency_agg": {"leaderboard_chart": ["LEADERBOARD_CHART"]} } +tables = ["fact_vehicle_trace", "fact_energy_trace", "fact_network_throughput_agg", "fact_network_inflows_outflows", + "fact_vehicle_fuel_efficiency_agg", "fact_network_metrics_by_distance_agg", + "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", "leaderboard_chart"] + VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT id, @@ -53,7 +59,7 @@ class QueryStrings(Enum): id, time_step, speed, - acceleration, + COALESCE (acceleration, realized_accel) AS acceleration, road_grade, source_id FROM fact_vehicle_trace @@ -69,7 +75,7 @@ class QueryStrings(Enum): id, time_step, speed, - accel_without_noise AS acceleration, + COALESCE (accel_without_noise, acceleration, realized_accel) AS acceleration, road_grade, source_id FROM fact_vehicle_trace @@ -84,9 +90,10 @@ class QueryStrings(Enum): SELECT id, time_step, - accel_without_noise, + COALESCE (accel_without_noise, acceleration, realized_accel) AS acceleration, road_grade, source_id, + speed AS cur_speed, time_step - LAG(time_step, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) @@ -99,8 +106,8 @@ class QueryStrings(Enum): SELECT id, time_step, - prev_speed + accel_without_noise * sim_step AS speed, - accel_without_noise AS acceleration, + COALESCE (prev_speed + acceleration * sim_step, cur_speed) AS speed, + acceleration, road_grade, source_id FROM lagged_timestep @@ -137,35 +144,46 @@ class QueryStrings(Enum): ;""" FACT_VEHICLE_FUEL_EFFICIENCY_AGG = """ - WITH distance AS ( - SELECT + WITH sub_fact_vehicle_trace AS ( + SELECT id, - source_id, - MAX(x)-MIN(x) AS distance_meters + time_step, + x, + source_id FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}\' - AND source_id = - AND x BETWEEN 500 AND 2300 - AND time_step >= 600 - GROUP BY 1, 2 - ), energy AS ( + ), distance AS ( SELECT id, source_id, - energy_model_id, - (MAX(time_step) - MIN(time_step)) / (COUNT(DISTINCT time_step) - 1) AS time_step_size_seconds, - SUM(power) AS power_watts - FROM fact_energy_trace + MAX(x)-MIN(x) AS distance_meters + FROM sub_fact_vehicle_trace WHERE 1 = 1 - AND date = \'{date}\' - AND partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' - AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AND x BETWEEN 500 AND 2300 AND time_step >= 600 + GROUP BY 1, 2 + ), energy AS ( + SELECT + e.id, + e.source_id, + e.energy_model_id, + (MAX(e.time_step) - MIN(e.time_step)) / (COUNT(DISTINCT e.time_step) - 1) AS time_step_size_seconds, + SUM(e.power) AS power_watts + FROM fact_energy_trace AS e + JOIN sub_fact_vehicle_trace AS v ON 1 = 1 + AND e.id = v.id + AND e.time_step = v.time_step + AND e.source_id = v.source_id + WHERE 1 = 1 + AND e.date = \'{date}\' + AND e.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND v.x BETWEEN 500 AND 2300 + AND e.time_step >= 600 GROUP BY 1, 2, 3 - HAVING COUNT(DISTINCT time_step) > 1 + HAVING COUNT(DISTINCT e.time_step) > 1 ) SELECT d.id, @@ -193,7 +211,7 @@ class QueryStrings(Enum): FROM fact_vehicle_fuel_efficiency_agg WHERE 1 = 1 AND date = \'{date}\' - AND parititon_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' + AND partition_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' GROUP BY 1, 2 ;""" @@ -203,10 +221,10 @@ class QueryStrings(Enum): t.source_id, e.energy_model_id, e.efficiency_meters_per_joules, - 74564 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon + 74564 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, t.throughput_per_hour - FROM fact_network_throughput_agg t - JOIN fact_network_fuel_efficiency_agg e ON 1 = 1 + FROM fact_network_throughput_agg AS t + JOIN fact_network_fuel_efficiency_agg AS e ON 1 = 1 AND t.date = \'{date}\' AND t.partition_name = \'{partition}_FACT_NETWORK_THROUGHPUT_AGG\' AND e.date = \'{date}\' @@ -259,15 +277,15 @@ class QueryStrings(Enum): FACT_NETWORK_METRICS_BY_DISTANCE_AGG = """ WITH joined_trace AS ( SELECT - id, - source_id, - time_step, - x, + vt.id, + vt.source_id, + vt.time_step, + vt.x, energy_model_id, - time_step - LAG(time_step, 1) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + vt.time_step - LAG(vt.time_step, 1) + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, SUM(power) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 @@ -295,27 +313,26 @@ class QueryStrings(Enum): AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AND x BETWEEN 500 AND 2300 AND time_step >= 600 - GROUP BY 1, 2, 3 - HAVING COUNT(DISTINCT time_step) > 1 ), binned_cumulative_energy AS ( SELECT source_id, CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(energy_joules) AS cumulative_energy_avg, - AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, - AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound + AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound FROM cumulative_energy GROUP BY 1, 2 + HAVING COUNT(DISTINCT time_step) > 1 ), binned_speed_accel AS ( SELECT source_id, CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(speed) AS speed_avg, - AVG(speed) + STDEV(speed) AS speed_upper_bound, - AVG(speed) - STDEV(speed) AS speed_lower_bound, + AVG(speed) + STDDEV(speed) AS speed_upper_bound, + AVG(speed) - STDDEV(speed) AS speed_lower_bound, AVG(accel_without_noise) AS accel_avg, - AVG(accel_without_noise) + STDEV(accel_without_noise) AS accel_upper_bound, - AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound + AVG(accel_without_noise) + STDDEV(accel_without_noise) AS accel_upper_bound, + AVG(accel_without_noise) - STDDEV(accel_without_noise) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' @@ -336,8 +353,8 @@ class QueryStrings(Enum): source_id, distance_meters_bin, AVG(energy_end - energy_start) AS instantaneous_energy_avg, - AVG(energy_end - energy_start) + STDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, - AVG(energy_end - energy_start) - STDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound + AVG(energy_end - energy_start) + STDDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, + AVG(energy_end - energy_start) - STDDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound FROM binned_energy_start_end GROUP BY 1, 2 ) @@ -368,22 +385,22 @@ class QueryStrings(Enum): FACT_NETWORK_METRICS_BY_TIME_AGG = """ WITH joined_trace AS ( SELECT - id, - source_id, - time_step, - x, + vt.id, + vt.source_id, + vt.time_step, + vt.x, energy_model_id, - time_step - LAG(time_step, 1) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + vt.time_step - LAG(vt.time_step, 1) + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, SUM(power) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 AND vt.date = \'{date}\' AND vt.partition_name = \'{partition}\' AND et.date = \'{date}\' - AND et.partition_name = \'{partitio}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND et.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND vt.id = et.id AND vt.source_id = et.source_id AND vt.time_step = et.time_step @@ -401,33 +418,28 @@ class QueryStrings(Enum): cumulative_power * sim_step AS energy_joules FROM joined_trace WHERE 1 = 1 - AND date = - AND partition_name = - AND source_id = - AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AND x BETWEEN 500 AND 2300 AND time_step >= 600 - GROUP BY 1, 2, 3 - HAVING COUNT(DISTINCT time_step) > 1 ), binned_cumulative_energy AS ( SELECT source_id, CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, AVG(energy_joules) AS cumulative_energy_avg, - AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, - AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound + AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound FROM cumulative_energy GROUP BY 1, 2 + HAVING COUNT(DISTINCT time_step) > 1 ), binned_speed_accel AS ( SELECT source_id, CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, AVG(speed) AS speed_avg, - AVG(speed) + STDEV(speed) AS speed_upper_bound, - AVG(speed) - STDEV(speed) AS speed_lower_bound, + AVG(speed) + STDDEV(speed) AS speed_upper_bound, + AVG(speed) - STDDEV(speed) AS speed_lower_bound, AVG(accel_without_noise) AS accel_avg, - AVG(accel_without_noise) + STDEV(accel_without_noise) AS accel_upper_bound, - AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound + AVG(accel_without_noise) + STDDEV(accel_without_noise) AS accel_upper_bound, + AVG(accel_without_noise) - STDDEV(accel_without_noise) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' @@ -448,8 +460,8 @@ class QueryStrings(Enum): source_id, time_seconds_bin, AVG(energy_end - energy_start) AS instantaneous_energy_avg, - AVG(energy_end - energy_start) + STDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, - AVG(energy_end - energy_start) - STDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound + AVG(energy_end - energy_start) + STDDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, + AVG(energy_end - energy_start) - STDDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound FROM binned_energy_start_end GROUP BY 1, 2 ) From 57b42ca5544b72a5d6641ac38c50f9677117d940 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 24 May 2020 21:29:11 -0700 Subject: [PATCH 097/335] fix bug in vehicle power demand --- flow/data_pipeline/query.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 2ee794507..8dde9474d 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -11,9 +11,12 @@ speed, acceleration, road_grade, - 1200 * speed * ( - (CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) - + 0.8 + 9.81 * SIN(road_grade) + 1200 * speed * MAX(0, ( + CASE + WHEN acceleration > 0 THEN 1 + WHEN acceleration < 0 THEN 0 + ELSE 0.5 + END * (1 - 0.8) + 0.8) * acceleration + 9.81 * SIN(road_grade) ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, \'{}\' AS energy_model_id, source_id From 87dcff271ac1aa5450516f2592940eddeb149fe4 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:31:20 -0700 Subject: [PATCH 098/335] Add several accelerations (with/without noise, with/without failsafes) to custom output --- flow/controllers/base_controller.py | 18 +++++++++------ flow/core/kernel/vehicle/base.py | 20 ++++++++++++++-- flow/core/kernel/vehicle/traci.py | 36 +++++++++++++++++++++++------ flow/data_pipeline/data_pipeline.py | 19 ++++++++++----- 4 files changed, 71 insertions(+), 22 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 7adcdf310..c417bb73a 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -88,8 +88,10 @@ def get_action(self, env): float the modified form of the acceleration """ - # clear the current stored accel_without_noise of this vehicle None - env.k.vehicle.update_accel_without_noise(self.veh_id, None) + # clear the current stored accel_no_noise_no_failsafe of this vehicle None + env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, None) + env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, None) + env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, None) # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed @@ -110,23 +112,25 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested - accel_without_noise = accel + env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, accel) if self.fail_safe == 'instantaneous': - accel_without_noise = self.get_safe_action_instantaneous(env, accel_without_noise) + accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': - accel_without_noise = self.get_safe_velocity_action(env, accel_without_noise) - env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noise) + accel_no_noise_with_failsafe = self.get_safe_velocity_action(env, accel) + env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, accel_no_noise_with_failsafe) # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) + env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, accel) # run the fail-safes, if requested if self.fail_safe == 'instantaneous': accel = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': accel = self.get_safe_velocity_action(env, accel) - + env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, accel) + return accel def get_safe_action_instantaneous(self, env, action): diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 647ef37fe..ed53773cb 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -693,7 +693,15 @@ def get_accel(self, veh_id): """Return the acceleration of vehicle with veh_id.""" raise NotImplementedError - def update_accel_without_noise(self, veh_id, accel_without_noise): + def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): + """Update stored acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): + """Update stored acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError @@ -701,7 +709,15 @@ def get_2d_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError - def get_accel_without_noise(self, veh_id): + def get_accel_no_noise_no_failsafe(self, veh_id): + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_accel_no_noise_with_failsafe(self, veh_id): + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_accel_with_noise_no_failsafe(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 2a4e06257..4a1916617 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -113,7 +113,9 @@ def initialize(self, vehicles): self.__vehicles[veh_id] = dict() self.__vehicles[veh_id]['type'] = typ['veh_id'] self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed'] - self.__vehicles[veh_id]["accel_without_noise"] = None + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = None + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = None + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None self.num_vehicles += 1 if typ['acceleration_controller'][0] == RLController: self.num_rl_vehicles += 1 @@ -1130,15 +1132,35 @@ def get_accel(self, veh_id): self.__vehicles[veh_id]["accel"] = None return self.__vehicles[veh_id]["accel"] - def update_accel_without_noise(self, veh_id, accel_without_noise): + def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): """See parent class.""" - self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = accel_no_noise_no_failsafe - def get_accel_without_noise(self, veh_id): + def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): """See parent class.""" - if "accel_without_noise" not in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["accel_without_noise"] = None - return self.__vehicles[veh_id]["accel_without_noise"] + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = accel_no_noise_with_failsafe + + def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): + """See parent class.""" + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = accel_with_noise_no_failsafe + + def get_accel_no_noise_no_failsafe(self, veh_id): + """See parent class.""" + if "accel_no_noise_no_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = None + return self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] + + def get_accel_no_noise_with_failsafe(self, veh_id): + """See parent class.""" + if "accel_no_noise_with_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = None + return self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] + + def get_accel_with_noise_no_failsafe(self, veh_id): + """See parent class.""" + if "accel_with_noise_no_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None + return self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] def get_realized_accel(self, veh_id): """See parent class.""" diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index a999b6eb1..11d85cb0d 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -89,9 +89,11 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): def extra_init(): - """Return the dictionary with all the feild pre-populated with empty list.""" - extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], - "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], + """Return the dictionary with all the field pre-populated with empty list.""" + extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], + "target_accel_with_noise_with_failsafe": [], "target_accel_no_noise_no_failsafe": [], + "target_accel_with_noise_no_failsafe": [], "target_accel_no_noise_with_failsafe": [], + "realized_accel": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "road_grade": [], "source_id": []} return extra_info @@ -102,13 +104,18 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): extra_info["time_step"].append(veh_kernel.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(veh_kernel.get_headway(vid)) - extra_info["acceleration"].append(veh_kernel.get_accel(vid)) + extra_info["target_accel_with_noise_with_failsafe"].append(veh_kernel.get_accel(vid)) extra_info["leader_id"].append(veh_kernel.get_leader(vid)) extra_info["follower_id"].append(veh_kernel.get_follower(vid)) extra_info["leader_rel_speed"].append(veh_kernel.get_speed( veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) - extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) - extra_info["realilzed_accel"].append(veh_kernel.get_realized_accel(vid)) + extra_info["target_accel_no_noise_no_failsafe"].append( + veh_kernel.get_accel_no_noise_no_failsafe(vid)) + extra_info["target_accel_with_noise_no_failsafe"].append( + veh_kernel.get_accel_with_noise_no_failsafe(vid)) + extra_info["target_accel_no_noise_with_failsafe"].append( + veh_kernel.get_accel_no_noise_with_failsafe(vid)) + extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) position = veh_kernel.get_2d_position(vid) extra_info["x"].append(position[0]) From d192a9f5dc1ca2622875cd26997198c1889d213c Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:44:15 -0700 Subject: [PATCH 099/335] update queries with new column names --- flow/data_pipeline/query.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 8dde9474d..689818e65 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -30,14 +30,14 @@ class QueryStrings(Enum): SAMPLE = """ SELECT * - FROM trajectory_table + FROM fact_vehicle_trace WHERE date = \'{date}\' AND partition_name=\'{partition}\' LIMIT 15; """ UPDATE_PARTITION = """ - ALTER TABLE trajectory_table + ALTER TABLE fact_vehicle_trace ADD IF NOT EXISTS PARTITION (date = \'{date}\', partition_name=\'{partition}\'); """ @@ -47,10 +47,10 @@ class QueryStrings(Enum): id, time_step, speed, - acceleration, + target_accel_with_noise_with_failsafe AS acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -63,10 +63,10 @@ class QueryStrings(Enum): id, time_step, speed, - accel_without_noise AS acceleration, + target_accel_no_noise_with_failsafe AS acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -78,14 +78,14 @@ class QueryStrings(Enum): SELECT id, time_step, - accel_without_noise, + target_accel_no_noise_with_failsafe, road_grade, source_id, time_step - LAG(time_step, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -93,8 +93,8 @@ class QueryStrings(Enum): SELECT id, time_step, - prev_speed + accel_without_noise * sim_step AS speed, - accel_without_noise AS acceleration, + prev_speed + target_accel_no_noise_with_failsafe * sim_step AS speed, + target_accel_no_noise_with_failsafe AS acceleration, road_grade, source_id FROM lagged_timestep From 92a745dd3d517135f3bef6b69782c212ffbfd336 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:47:44 -0700 Subject: [PATCH 100/335] fix flake8 issues --- flow/controllers/base_controller.py | 2 +- flow/data_pipeline/query.py | 17 +++++++++++++---- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index c417bb73a..3f6a0f4ae 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -130,7 +130,7 @@ def get_action(self, env): elif self.fail_safe == 'safe_velocity': accel = self.get_safe_velocity_action(env, accel) env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, accel) - + return accel def get_safe_action_instantaneous(self, env, action): diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 689818e65..928bb5d47 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,7 +2,13 @@ from enum import Enum # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} +tags = { + "energy": [ + "POWER_DEMAND_MODEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" + ] + } VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT @@ -55,7 +61,8 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', + 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -71,7 +78,8 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', 'denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', + 'denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -99,4 +107,5 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + 'denoised_speed_cte')) From 215d4abb938e7b7032f227b9a2e6997092164bc6 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:51:46 -0700 Subject: [PATCH 101/335] remove trailing whitespaces --- flow/data_pipeline/query.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 928bb5d47..e8ac34abc 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -4,8 +4,8 @@ # tags for different queries tags = { "energy": [ - "POWER_DEMAND_MODEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" ] } @@ -61,7 +61,7 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ @@ -107,5 +107,5 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) From 97f3ccdf34d4fb1323a25d479abc4ccab616c8f1 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 24 May 2020 23:20:29 -0700 Subject: [PATCH 102/335] fix accel with noise with failsafe output --- flow/controllers/base_controller.py | 1 + flow/core/kernel/vehicle/base.py | 20 ++++++++++++++------ flow/core/kernel/vehicle/traci.py | 10 ++++++++++ 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 3f6a0f4ae..1169ce5b8 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -113,6 +113,7 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, accel) + accel_no_noise_with_failsafe = accel if self.fail_safe == 'instantaneous': accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index ed53773cb..f6f8ee382 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -694,15 +694,19 @@ def get_accel(self, veh_id): raise NotImplementedError def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): - """Update stored acceleration without noise of vehicle with veh_id.""" + """Update stored acceleration without noise without failsafe of vehicle with veh_id.""" raise NotImplementedError def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): - """Update stored acceleration without noise of vehicle with veh_id.""" + """Update stored acceleration without noise with failsafe of vehicle with veh_id.""" raise NotImplementedError def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): - """Update stored acceleration without noise of vehicle with veh_id.""" + """Update stored acceleration with noise without failsafe of vehicle with veh_id.""" + raise NotImplementedError + + def update_accel_with_noise_with_failsafe(self, veh_id, accel_with_noise_with_failsafe): + """Update stored acceleration with noise with failsafe of vehicle with veh_id.""" raise NotImplementedError def get_2d_position(self, veh_id, error=-1001): @@ -710,15 +714,19 @@ def get_2d_position(self, veh_id, error=-1001): raise NotImplementedError def get_accel_no_noise_no_failsafe(self, veh_id): - """Return the acceleration without noise of vehicle with veh_id.""" + """Return the acceleration without noise without failsafe of vehicle with veh_id.""" raise NotImplementedError def get_accel_no_noise_with_failsafe(self, veh_id): - """Return the acceleration without noise of vehicle with veh_id.""" + """Return the acceleration without noise with failsafe of vehicle with veh_id.""" raise NotImplementedError def get_accel_with_noise_no_failsafe(self, veh_id): - """Return the acceleration without noise of vehicle with veh_id.""" + """Return the acceleration with noise without failsafe of vehicle with veh_id.""" + raise NotImplementedError + + def get_accel_with_noise_with_failsafe(self, veh_id): + """Return the acceleration with noise with failsafe of vehicle with veh_id.""" raise NotImplementedError def get_realized_accel(self, veh_id): diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 4a1916617..1c0b5f19b 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1144,6 +1144,10 @@ def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsa """See parent class.""" self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = accel_with_noise_no_failsafe + def update_accel_with_noise_with_failsafe(self, veh_id, accel_with_noise_with_failsafe): + """See parent class.""" + self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] = accel_with_noise_with_failsafe + def get_accel_no_noise_no_failsafe(self, veh_id): """See parent class.""" if "accel_no_noise_no_failsafe" not in self.__vehicles[veh_id]: @@ -1162,6 +1166,12 @@ def get_accel_with_noise_no_failsafe(self, veh_id): self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None return self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] + def get_accel_with_noise_with_failsafe(self, veh_id): + """See parent class.""" + if "accel_with_noise_with_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] = None + return self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] + def get_realized_accel(self, veh_id): """See parent class.""" return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step From e9e66a77e5ea38e2deda69ae46191d3aeee72723 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 14:27:27 -0700 Subject: [PATCH 103/335] remove extra_init() in favor of collections.defaultdict() --- flow/core/experiment.py | 5 +++-- flow/data_pipeline/data_pipeline.py | 8 -------- flow/visualize/i210_replay.py | 4 ++-- 3 files changed, 5 insertions(+), 12 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index f46f802a5..779fdb0f4 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,7 +1,8 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env -from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, get_extra_info +from collections import defaultdict import datetime import logging import time @@ -147,7 +148,7 @@ def rl_actions(*_): # time profiling information t = time.time() times = [] - extra_info = extra_init() + extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) for i in range(num_runs): diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index a999b6eb1..8cd00358c 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -88,14 +88,6 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): return -def extra_init(): - """Return the dictionary with all the feild pre-populated with empty list.""" - extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], - "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], - "leader_rel_speed": [], "road_grade": [], "source_id": []} - return extra_info - - def get_extra_info(veh_kernel, extra_info, veh_ids): """Get all the necessary information for the trajectory output from flow.""" for vid in veh_ids: diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 8e62bb0d8..57e72586a 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -32,7 +32,7 @@ from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables -from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, get_extra_info import uuid EXAMPLE_USAGE = """ @@ -208,7 +208,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= key: [] for key in custom_callables.keys() }) - extra_info = extra_init() + extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) i = 0 From d0df0a3d9271584c80c26fe691d7d2cb4a70f5f4 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 14:27:50 -0700 Subject: [PATCH 104/335] revert temporary change --- flow/networks/highway.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/networks/highway.py b/flow/networks/highway.py index 871e7f415..6f10d3279 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -9,7 +9,7 @@ # length of the highway "length": 1000, # number of lanes - "lanes": 1, + "lanes": 4, # speed limit for all edges "speed_limit": 30, # number of edges to divide the highway into From 3b93994d4be805e9b12d79906e29857eeca76312 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 14:31:43 -0700 Subject: [PATCH 105/335] update energy query to MVP params --- flow/data_pipeline/query.py | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 8dde9474d..078bdd129 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,22 +2,28 @@ from enum import Enum # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} +tags = { + "energy": [ + "POWER_DEMAND_MODEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" + ] +} -VEHICLE_POWER_DEMAND_FINAL_SELECT = """ +VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ SELECT id, time_step, speed, acceleration, road_grade, - 1200 * speed * MAX(0, ( + MAX(0, 1200 * speed * ( CASE WHEN acceleration > 0 THEN 1 WHEN acceleration < 0 THEN 0 ELSE 0.5 - END * (1 - 0.8) + 0.8) * acceleration + 9.81 * SIN(road_grade) - ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, + END * (1 - {}) + {}) * acceleration + 9.81 * SIN(road_grade) + ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3)) AS power, \'{}\' AS energy_model_id, source_id FROM {} @@ -55,7 +61,9 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) + {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, + 'POWER_DEMAND_MODEL', + 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -71,7 +79,9 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', 'denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL', + 'denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -99,4 +109,6 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + 'denoised_speed_cte')) From f773ff342e28daeef3df2d20413dbe5ccbdb60a1 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 21 May 2020 16:28:52 -0700 Subject: [PATCH 106/335] more quieres added --- examples/simulate.py | 2 +- flow/core/experiment.py | 12 +- flow/data_pipeline/data_pipeline.py | 2 +- flow/data_pipeline/query.py | 375 +++++++++++++++++++++++++++- flow/visualize/i210_replay.py | 3 +- 5 files changed, 376 insertions(+), 18 deletions(-) diff --git a/examples/simulate.py b/examples/simulate.py index 86d14aa14..0b183649b 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -97,5 +97,5 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, to_aws=flags.to_aws, only_query=flags.only_query) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 779fdb0f4..9984692f6 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -89,7 +89,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=""): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only_query=""): """Run the given network for a set number of runs. Parameters @@ -102,7 +102,7 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=No convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file - partition_name: str + to_aws: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. @@ -214,14 +214,12 @@ def rl_actions(*_): os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" - upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) + upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) - if partition_name: - if partition_name == "default": - partition_name = source_id[-3:] + if to_aws: cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( - cur_date, partition_name, upload_file_path.split('/')[-1].split('_upload')[0]), + cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 8cd00358c..0d8f91c39 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -100,7 +100,7 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): extra_info["leader_rel_speed"].append(veh_kernel.get_speed( veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) - extra_info["realilzed_accel"].append(veh_kernel.get_realized_accel(vid)) + extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) position = veh_kernel.get_2d_position(vid) extra_info["x"].append(position[0]) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 078bdd129..f22c90114 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,13 +2,8 @@ from enum import Enum # tags for different queries -tags = { - "energy": [ - "POWER_DEMAND_MODEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" - ] -} +tags = {"vehicle_energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ SELECT @@ -109,6 +104,372 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) +<<<<<<< HEAD {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) +======= + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + 'denoised_speed_cte')) + + FACT_NETWORK_THROUGHPUT_AGG = """ + WITH agg AS ( + SELECT + source_id, + COUNT(DISTINCT id) AS n_vehicles, + MAX(time_step) - MIN(time_step) AS total_time_seconds + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1 + ) + SELECT + source_id, + n_vehicles * 3600 / total_time_seconds AS throughput_per_hour + FROM agg + ;""" + + FACT_VEHICLE_FUEL_EFFICIENCY_AGG = """ + WITH distance AS ( + SELECT + id, + source_id, + MAX(x) AS distance_meters + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND source_id = + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2 + ), energy AS ( + SELECT + id, + source_id, + energy_model_id, + (MAX(time_step) - MIN(time_step)) / (COUNT(DISTINCT time_step) - 1) AS time_step_size_seconds, + SUM(power) AS power_watts + FROM fact_energy_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2, 3 + HAVING COUNT(DISTINCT time_step) > 1 + ) + SELECT + d.id, + d.source_id, + e.energy_model_id, + distance_meters, + power_watts * time_step_size_seconds AS energy_joules, + distance_meters / (power_watts * time_step_size_seconds) AS efficiency_meters_per_joules, + 74564 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon + FROM distance d + JOIN energy e ON 1=1 + AND d.id = e.id + AND d.source_id = e.source_id + ; + """ + + FACT_NETWORK_FUEL_EFFICIENCY_AGG = """ + SELECT + source_id, + energy_model_id, + SUM(distance_meters) AS distance_meters, + SUM(energy_joules) AS energy_joules, + SUM(distance_meters) / SUM(energy_joules) AS efficiency_meters_per_joules, + 74564 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon + FROM fact_vehicle_fuel_efficiency_agg + WHERE 1 = 1 + AND date = \'{{date}}\' + AND parititon_name = \'{{partition}}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' + AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + GROUP BY 1, 2 + ;""" + + LEADERBOARD_CHART = """ + SELECT + t.source_id, + e.energy_model_id, + e.efficiency_meters_per_joules, + 74564 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon + t.throughput_per_hour + FROM fact_network_throughput_agg t + JOIN fact_network_fuel_efficiency_agg e ON 1 = 1 + AND t.date = \'{{date}}\' + AND t.partition_name = \'{{partition}}_FACT_NETWORK_THROUGHPUT_AGG\' + AND e.date = \'{{date}}\' + AND e.partition_name = \'{{partition}}_FACT_NETWORK_FUEL_EFFICIENCY_AGG\' + AND t.source_id = e.source_id + AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + WHERE 1 = 1 + ;""" + + FACT_NETWORK_INFLOWS_OUTFLOWS = """ + WITH min_max_time_step AS ( + SELECT + id, + source_id, + MIN(time_step) AS min_time_step, + MAX(time_step) AS max_time_step + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2 + ), inflows AS ( + SELECT + INT(min_time_step / 60) * 60 AS time_step, + source_id, + 60 * COUNT(DISTINCT id) AS inflow_rate + FROM min_max_time_step + GROUP BY 1, 2 + ), outflows AS ( + SELECT + INT(max_time_step / 60) * 60 AS time_step, + source_id, + 60 * COUNT(DISTINCT id) AS outflow_rate + FROM min_max_time_step + GROUP BY 1, 2 + ) + SELECT + COALESCE(i.time_step, o.time_step) AS time_step, + COALESCE(i.source_id, o.source_id) AS source_id, + COALESCE(i.inflow_rate, 0) AS inflow_rate, + COALESCE(o.outflow_rate, 0) AS outflow_rate + FROM inflows i + FULL OUTER JOIN outflows o ON 1 = 1 + AND i.time_step = o.time_step + AND i.source_id = o.source_id + ;""" + + FACT_NETWORK_METRICS_BY_DISTANCE_AGG = """ + WITH joined_trace AS ( + SELECT + id, + source_id, + time_step, + x, + energy_model_id, + time_step - LAG(time_step, 1) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + SUM(power) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS + cumulative_power + FROM fact_vehicle_trace vt + JOIN fact_energy_trace et ON 1 = 1 + AND vt.date = \'{{date}}\' + AND vt.partition_name = \'{{partition}}\' + AND et.date = \'{{date}}\' + AND et.partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND vt.id = et.id + AND vt.source_id = et.source_id + AND vt.time_step = et.time_step + AND vt.x BETWEEN 500 AND 2300 + AND vt.time_step >= 600 + AND et.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + WHERE 1 = 1 + ), cumulative_energy AS ( + SELECT + id, + source_id, + time_step, + x, + energy_model_id, + cumulative_power * sim_step AS energy_joules + FROM joined_trace + WHERE 1 = 1 + AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2, 3 + HAVING COUNT(DISTINCT time_step) > 1 + ), binned_cumulative_energy AS ( + SELECT + source_id, + INT(x/10) * 10 AS distance_meters_bin, + AVG(energy_joules) AS cumulative_energy_avg, + AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound + FROM cumulative_energy + GROUP BY 1, 2 + ), binned_speed_accel AS ( + SELECT + source_id, + INT(x/10) * 10 AS distance_meters_bin, + AVG(speed) AS speed_avg, + AVG(speed) + STDEV(speed) AS speed_upper_bound, + AVG(speed) - STDEV(speed) AS speed_lower_bound, + AVG(accel_without_noise) AS accel_avg, + AVG(accel_without_noise) + STDEV(accel_without_noise) AS accel_upper_bound, + AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2 + ), binned_energy_start_end AS ( + SELECT DISTINCT + source_id, + id, + INT(x/10) * 10 AS distance_meters_bin, + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_end + FROM cumulative_energy + ), binned_energy AS ( + SELECT + source_id, + distance_meters_bin, + AVG(energy_end - energy_start) AS instantaneous_energy_avg, + AVG(energy_end - energy_start) + STDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, + AVG(energy_end - energy_start) - STDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound + FROM binned_energy_start_end + GROUP BY 1, 2 + ) + SELECT + COALESCE(bce.source_id, bsa.source_id, be.source_id) AS source_id, + COALESCE(bce.distance_meters_bin, bsa.distance_meters_bin, be.distance_meters_bin) AS distance_meters_bin, + bce.cumulative_energy_avg, + bce.cumulative_energy_lower_bound, + bce.cumulative_energy_upper_bound, + bsa.speed_avg, + bsa.speed_upper_bound, + bsa.speed_lower_bound, + bsa.accel_avg, + bsa.accel_upper_bound, + bsa.accel_lower_bound, + be.instantaneous_energy_avg, + be.instantaneous_energy_upper_bound, + be.instantaneous_energy_lower_bound + FROM binned_cumulative_energy bce + FULL OUTER JOIN binned_speed_accel bsa ON 1 = 1 + AND bce.source_id = bsa.source_id + AND bce.distance_meters_bin = bsa.distance_meters_bin + FULL OUTER JOIN binned_energy be ON 1 = 1 + AND COALESCE(bce.source_id, bsa.source_id) = be.source_id + AND COALESCE(bce.distance_meters_bin, bce.distance_meters_bin) = be.distance_meters_bin + ;""" + + FACT_NETWORK_METRICS_BY_TIME_AGG = """ + WITH joined_trace AS ( + SELECT + id, + source_id, + time_step, + x, + energy_model_id, + time_step - LAG(time_step, 1) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + SUM(power) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS + cumulative_power + FROM fact_vehicle_trace vt + JOIN fact_energy_trace et ON 1 = 1 + AND vt.date = \'{{date}}\' + AND vt.partition_name = \'{{partition}}\' + AND et.date = \'{{date}}\' + AND et.partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND vt.id = et.id + AND vt.source_id = et.source_id + AND vt.time_step = et.time_step + AND vt.x BETWEEN 500 AND 2300 + AND vt.time_step >= 600 + AND et.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + WHERE 1 = 1 + ), cumulative_energy AS ( + SELECT + id, + source_id, + time_step, + x, + energy_model_id, + cumulative_power * sim_step AS energy_joules + FROM joined_trace + WHERE 1 = 1 + AND date = + AND partition_name = + AND source_id = + AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2, 3 + HAVING COUNT(DISTINCT time_step) > 1 + ), binned_cumulative_energy AS ( + SELECT + source_id, + INT(time_step/60) * 60 AS time_seconds_bin, + AVG(energy_joules) AS cumulative_energy_avg, + AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound + FROM cumulative_energy + GROUP BY 1, 2 + ), binned_speed_accel AS ( + SELECT + source_id, + INT(time_step/60) * 60 AS time_seconds_bin, + AVG(speed) AS speed_avg, + AVG(speed) + STDEV(speed) AS speed_upper_bound, + AVG(speed) - STDEV(speed) AS speed_lower_bound, + AVG(accel_without_noise) AS accel_avg, + AVG(accel_without_noise) + STDEV(accel_without_noise) AS accel_upper_bound, + AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound + FROM fact_vehicle_trace + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + AND x BETWEEN 500 AND 2300 + AND time_step >= 600 + GROUP BY 1, 2 + ), binned_energy_start_end AS ( + SELECT DISTINCT + source_id, + id, + INT(time_step/60) * 60 AS time_seconds_bin, + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_end + FROM cumulative_energy + ), binned_energy AS ( + SELECT + source_id, + time_seconds_bin, + AVG(energy_end - energy_start) AS instantaneous_energy_avg, + AVG(energy_end - energy_start) + STDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, + AVG(energy_end - energy_start) - STDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound + FROM binned_energy_start_end + GROUP BY 1, 2 + ) + SELECT + COALESCE(bce.source_id, bsa.source_id, be.source_id) AS source_id, + COALESCE(bce.time_seconds_bin, bsa.time_seconds_bin, be.time_seconds_bin) AS time_seconds_bin, + bce.cumulative_energy_avg, + bce.cumulative_energy_lower_bound, + bce.cumulative_energy_upper_bound, + bsa.speed_avg, + bsa.speed_upper_bound, + bsa.speed_lower_bound, + bsa.accel_avg, + bsa.accel_upper_bound, + bsa.accel_lower_bound, + be.instantaneous_energy_avg, + be.instantaneous_energy_upper_bound, + be.instantaneous_energy_lower_bound + FROM binned_cumulative_energy bce + FULL OUTER JOIN binned_speed_accel bsa ON 1 = 1 + AND bce.source_id = bsa.source_id + AND bce.time_seconds_bin = bsa.time_seconds_bin + FULL OUTER JOIN binned_energy be ON 1 = 1 + AND COALESCE(bce.source_id, bsa.source_id) = be.source_id + AND COALESCE(bce.time_seconds_bin, bce.time_seconds_bin) = be.time_seconds_bin + ;""" +>>>>>>> more quieres added diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 57e72586a..f7b4ff358 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -334,10 +334,9 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # upload to s3 if asked if args.use_s3: - partition_name = source_id[-3:] cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( - cur_date, partition_name, upload_file_path.split('/')[-1].split('_upload')[0]), + cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(args.only_query)[2:-2]) # print the location of the emission csv file From 7731187279b8c41e421cf6641157fbd6e2de2e40 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 21 May 2020 19:54:59 -0700 Subject: [PATCH 107/335] update the architecture and fix some bugs --- flow/core/experiment.py | 2 +- flow/data_pipeline/data_pipeline.py | 31 +++++++--- flow/data_pipeline/query.py | 96 ++++++++++++++++------------- flow/visualize/i210_replay.py | 2 +- 4 files changed, 75 insertions(+), 56 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 9984692f6..6b0e32589 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -218,7 +218,7 @@ def rl_actions(*_): if to_aws: cur_date = date.today().isoformat() - upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( + upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(only_query)[2:-2]) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 0d8f91c39..8f57a29d8 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -134,9 +134,9 @@ def __init__(self): """ self.MAX_WAIT = 60 self.client = boto3.client("athena") - self.existing_partitions = self.get_existing_partitions() + self.existing_partitions = {} - def get_existing_partitions(self): + def get_existing_partitions(self, table): """Return the existing partitions in the S3 bucket. Returns @@ -144,7 +144,7 @@ def get_existing_partitions(self): partitions: a list of existing partitions on S3 bucket """ response = self.client.start_query_execution( - QueryString='SHOW PARTITIONS trajectory_table', + QueryString='SHOW PARTITIONS {}'.format(table), QueryExecutionContext={ 'Database': 'circles' }, @@ -199,18 +199,21 @@ def wait_for_execution(self, execution_id): return False return True - def update_partition(self, query_date, partition): + def update_partition(self, table, query_date, partition): """Load the given partition to the trajectory_table on Athena. Parameters ---------- + table : str + the name of the table to update query_date : str the new partition date that needs to be loaded partition : str the new partition that needs to be loaded """ response = self.client.start_query_execution( - QueryString=QueryStrings['UPDATE_PARTITION'].value.format(date=query_date, partition=partition), + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(table=table, date=query_date, + partition=partition), QueryExecutionContext={ 'Database': 'circles' }, @@ -218,11 +221,11 @@ def update_partition(self, query_date, partition): ) if self.wait_for_execution(response['QueryExecutionId']): raise RuntimeError("update partition timed out") - self.existing_partitions.append("date={}/partition_name={}".format(query_date, partition)) + self.existing_partitions[table].append("date={}/partition_name={}".format(query_date, partition)) return def run_query(self, query_name, result_location="s3://circles.data.pipeline/result/", - query_date="today", partition="default"): + query_date="today", partition="default", primary_table=""): """Start the execution of a query, does not wait for it to finish. Parameters @@ -235,6 +238,8 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu name of the partition date to run this query on partition: str, optional name of the partition to run this query on + primary_table: str + the table whose partition that may need update Returns ------- execution_id: str @@ -249,11 +254,17 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu if query_date == "today": query_date = date.today().isoformat() - if "date={}/partition_name={}".format(query_date, partition) not in self.existing_partitions: - self.update_partition(query_date, partition) + source_id = "flow_{}".format(partition.split('_')[1]) + + if primary_table: + if primary_table not in self.existing_partitions.keys(): + self.existing_partitions[primary_table] = self.get_existing_partitions(primary_table) + if "date={}/partition_name={}".format(query_date, partition) not in \ + self.existing_partitions[primary_table]: + self.update_partition(primary_table, query_date, partition) response = self.client.start_query_execution( - QueryString=QueryStrings[query_name].value.format(date=query_date, partition=partition), + QueryString=QueryStrings[query_name].value.format(date=query_date, partition=source_id), QueryExecutionContext={ 'Database': 'circles' }, diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index f22c90114..b010ac2ba 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,8 +2,16 @@ from enum import Enum # tags for different queries -tags = {"vehicle_energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} +tags = {"fact_vehicle_trace": {"fact_energy_trace": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], + "fact_network_throughput_agg": ["FACT_NETWORK_THROUGHPUT_AGG"], + "fact_network_inflows_outflows": ["FACT_NETWORK_INFLOWS_OUTFLOWS"]}, + "fact_energy_trace": {"fact_vehicle_fuel_efficiency_agg": ["FACT_VEHICLE_FUEL_EFFICIENCY_AGG"], + "fact_network_metrics_by_distance_agg": ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], + "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, + "fact_vehicle_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"], + "fact_network_fuel_efficiency_agg": ["LEADERBOARD_CHART"] + } VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ SELECT @@ -38,7 +46,7 @@ class QueryStrings(Enum): """ UPDATE_PARTITION = """ - ALTER TABLE trajectory_table + ALTER TABLE {table} ADD IF NOT EXISTS PARTITION (date = \'{date}\', partition_name=\'{partition}\'); """ @@ -51,7 +59,7 @@ class QueryStrings(Enum): acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -69,7 +77,7 @@ class QueryStrings(Enum): accel_without_noise AS acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -90,7 +98,7 @@ class QueryStrings(Enum): OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -120,8 +128,8 @@ class QueryStrings(Enum): MAX(time_step) - MIN(time_step) AS total_time_seconds FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 AND time_step >= 600 GROUP BY 1 @@ -140,8 +148,8 @@ class QueryStrings(Enum): MAX(x) AS distance_meters FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND source_id = AND x BETWEEN 500 AND 2300 AND time_step >= 600 @@ -155,8 +163,8 @@ class QueryStrings(Enum): SUM(power) AS power_watts FROM fact_energy_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND date = \'{date}\' + AND partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AND x BETWEEN 500 AND 2300 AND time_step >= 600 @@ -188,8 +196,8 @@ class QueryStrings(Enum): 74564 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon FROM fact_vehicle_fuel_efficiency_agg WHERE 1 = 1 - AND date = \'{{date}}\' - AND parititon_name = \'{{partition}}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' + AND date = \'{date}\' + AND parititon_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' GROUP BY 1, 2 ;""" @@ -203,10 +211,10 @@ class QueryStrings(Enum): t.throughput_per_hour FROM fact_network_throughput_agg t JOIN fact_network_fuel_efficiency_agg e ON 1 = 1 - AND t.date = \'{{date}}\' - AND t.partition_name = \'{{partition}}_FACT_NETWORK_THROUGHPUT_AGG\' - AND e.date = \'{{date}}\' - AND e.partition_name = \'{{partition}}_FACT_NETWORK_FUEL_EFFICIENCY_AGG\' + AND t.date = \'{date}\' + AND t.partition_name = \'{partition}_FACT_NETWORK_THROUGHPUT_AGG\' + AND e.date = \'{date}\' + AND e.partition_name = \'{partition}_FACT_NETWORK_FUEL_EFFICIENCY_AGG\' AND t.source_id = e.source_id AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' WHERE 1 = 1 @@ -221,21 +229,21 @@ class QueryStrings(Enum): MAX(time_step) AS max_time_step FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 AND time_step >= 600 GROUP BY 1, 2 ), inflows AS ( SELECT - INT(min_time_step / 60) * 60 AS time_step, + CAST(min_time_step / 60 AS INTEGER) * 60 AS time_step, source_id, 60 * COUNT(DISTINCT id) AS inflow_rate FROM min_max_time_step GROUP BY 1, 2 ), outflows AS ( SELECT - INT(max_time_step / 60) * 60 AS time_step, + CAST(max_time_step / 60 AS INTEGER) * 60 AS time_step, source_id, 60 * COUNT(DISTINCT id) AS outflow_rate FROM min_max_time_step @@ -267,10 +275,10 @@ class QueryStrings(Enum): cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 - AND vt.date = \'{{date}}\' - AND vt.partition_name = \'{{partition}}\' - AND et.date = \'{{date}}\' - AND et.partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' + AND et.date = \'{date}\' + AND et.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND vt.id = et.id AND vt.source_id = et.source_id AND vt.time_step = et.time_step @@ -296,7 +304,7 @@ class QueryStrings(Enum): ), binned_cumulative_energy AS ( SELECT source_id, - INT(x/10) * 10 AS distance_meters_bin, + CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(energy_joules) AS cumulative_energy_avg, AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound @@ -305,7 +313,7 @@ class QueryStrings(Enum): ), binned_speed_accel AS ( SELECT source_id, - INT(x/10) * 10 AS distance_meters_bin, + CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(speed) AS speed_avg, AVG(speed) + STDEV(speed) AS speed_upper_bound, AVG(speed) - STDEV(speed) AS speed_lower_bound, @@ -314,8 +322,8 @@ class QueryStrings(Enum): AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 AND time_step >= 600 GROUP BY 1, 2 @@ -323,9 +331,9 @@ class QueryStrings(Enum): SELECT DISTINCT source_id, id, - INT(x/10) * 10 AS distance_meters_bin, - FIRST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_start, - LAST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_end + CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT @@ -376,10 +384,10 @@ class QueryStrings(Enum): cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 - AND vt.date = \'{{date}}\' - AND vt.partition_name = \'{{partition}}\' - AND et.date = \'{{date}}\' - AND et.partition_name = \'{{partition}}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' + AND et.date = \'{date}\' + AND et.partition_name = \'{partitio}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND vt.id = et.id AND vt.source_id = et.source_id AND vt.time_step = et.time_step @@ -408,7 +416,7 @@ class QueryStrings(Enum): ), binned_cumulative_energy AS ( SELECT source_id, - INT(time_step/60) * 60 AS time_seconds_bin, + CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, AVG(energy_joules) AS cumulative_energy_avg, AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound @@ -417,7 +425,7 @@ class QueryStrings(Enum): ), binned_speed_accel AS ( SELECT source_id, - INT(time_step/60) * 60 AS time_seconds_bin, + CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, AVG(speed) AS speed_avg, AVG(speed) + STDEV(speed) AS speed_upper_bound, AVG(speed) - STDEV(speed) AS speed_lower_bound, @@ -426,8 +434,8 @@ class QueryStrings(Enum): AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 AND time_step >= 600 GROUP BY 1, 2 @@ -435,9 +443,9 @@ class QueryStrings(Enum): SELECT DISTINCT source_id, id, - INT(time_step/60) * 60 AS time_seconds_bin, - FIRST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_start, - LAST_VALUE(energy_joules) OVER (PARTITION BY id, INT(x/10) * 10 ORDER BY x ASC) AS energy_end + CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index f7b4ff358..4faf54385 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -335,7 +335,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # upload to s3 if asked if args.use_s3: cur_date = date.today().isoformat() - upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( + upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(args.only_query)[2:-2]) From 71cb4ee9c5f2a2d32e6172620cd3cfb410fad5eb Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Fri, 22 May 2020 00:18:53 -0700 Subject: [PATCH 108/335] fix inflow issue --- flow/data_pipeline/query.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index b010ac2ba..47bd9dd81 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -121,17 +121,25 @@ class QueryStrings(Enum): 'denoised_speed_cte')) FACT_NETWORK_THROUGHPUT_AGG = """ - WITH agg AS ( - SELECT + WITH min_time AS ( + SELECT source_id, - COUNT(DISTINCT id) AS n_vehicles, - MAX(time_step) - MIN(time_step) AS total_time_seconds + id, + MIN(time_step) AS enter_time FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 - AND time_step >= 600 + GROUP BY 1, 2 + ), agg AS ( + SELECT + source_id, + COUNT(DISTINCT id) AS n_vehicles, + MAX(enter_time) - MIN(enter_time) AS total_time_seconds + FROM min_time + WHERE 1 = 1 + AND enter_time >= 600 GROUP BY 1 ) SELECT @@ -145,7 +153,7 @@ class QueryStrings(Enum): SELECT id, source_id, - MAX(x) AS distance_meters + MAX(x)-MIN(x) AS distance_meters FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' From a90e8d0510a28904177f0591ae2f1b086287742d Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sun, 24 May 2020 20:07:30 -0700 Subject: [PATCH 109/335] the extended new pipeline constructed and works --- flow/core/experiment.py | 8 +- flow/data_pipeline/data_pipeline.py | 6 +- flow/data_pipeline/query.py | 160 +++++++++++++++------------- 3 files changed, 90 insertions(+), 84 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 6b0e32589..c50648746 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -214,15 +214,15 @@ def rl_actions(*_): os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" - upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) + generate_trajectory_from_flow(trajectory_table_path, extra_info) if to_aws: cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( - cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), - upload_file_path, str(only_query)[2:-2]) + cur_date, source_id, source_id), + trajectory_table_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file - os.remove(upload_file_path) + # os.remove(upload_file_path) return info_dict diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 8f57a29d8..72caa5218 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -60,9 +60,9 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name=None): extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) - upload_only_file_path = data_path[:-4] + "_upload" + ".csv" - extra_info.to_csv(upload_only_file_path, index=False, header=False) - return upload_only_file_path + # upload_only_file_path = data_path[:-4] + "_upload" + ".csv" + # extra_info.to_csv(upload_only_file_path, index=False, header=False) + return def upload_to_s3(bucket_name, bucket_key, file_path, only_query): diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 47bd9dd81..abc3bcd53 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -6,14 +6,20 @@ "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], "fact_network_throughput_agg": ["FACT_NETWORK_THROUGHPUT_AGG"], "fact_network_inflows_outflows": ["FACT_NETWORK_INFLOWS_OUTFLOWS"]}, - "fact_energy_trace": {"fact_vehicle_fuel_efficiency_agg": ["FACT_VEHICLE_FUEL_EFFICIENCY_AGG"], - "fact_network_metrics_by_distance_agg": ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], - "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, - "fact_vehicle_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"], - "fact_network_fuel_efficiency_agg": ["LEADERBOARD_CHART"] + "fact_energy_trace": {}, + "POWER_DEMAND_MODEL_DENOISED_ACCEL": {"fact_vehicle_fuel_efficiency_agg": ["FACT_VEHICLE_FUEL_EFFICIENCY_AGG"], + "fact_network_metrics_by_distance_agg": + ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], + "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, + "fact_vehicle_fuel_efficiency_agg": {"fact_network_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"]}, + "fact_network_fuel_efficiency_agg": {"leaderboard_chart": ["LEADERBOARD_CHART"]} } -VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ +tables = ["fact_vehicle_trace", "fact_energy_trace", "fact_network_throughput_agg", "fact_network_inflows_outflows", + "fact_vehicle_fuel_efficiency_agg", "fact_network_metrics_by_distance_agg", + "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", "leaderboard_chart"] + +VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT id, time_step, @@ -56,7 +62,7 @@ class QueryStrings(Enum): id, time_step, speed, - acceleration, + COALESCE (acceleration, realized_accel) AS acceleration, road_grade, source_id FROM fact_vehicle_trace @@ -74,7 +80,7 @@ class QueryStrings(Enum): id, time_step, speed, - accel_without_noise AS acceleration, + COALESCE (accel_without_noise, acceleration, realized_accel) AS acceleration, road_grade, source_id FROM fact_vehicle_trace @@ -91,9 +97,10 @@ class QueryStrings(Enum): SELECT id, time_step, - accel_without_noise, + COALESCE (accel_without_noise, acceleration, realized_accel) AS acceleration, road_grade, source_id, + speed AS cur_speed, time_step - LAG(time_step, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) @@ -106,17 +113,12 @@ class QueryStrings(Enum): SELECT id, time_step, - prev_speed + accel_without_noise * sim_step AS speed, - accel_without_noise AS acceleration, + COALESCE (prev_speed + acceleration * sim_step, cur_speed) AS speed, + acceleration, road_grade, source_id FROM lagged_timestep ) -<<<<<<< HEAD - {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', - 'denoised_speed_cte')) -======= {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) @@ -149,35 +151,46 @@ class QueryStrings(Enum): ;""" FACT_VEHICLE_FUEL_EFFICIENCY_AGG = """ - WITH distance AS ( - SELECT + WITH sub_fact_vehicle_trace AS ( + SELECT id, - source_id, - MAX(x)-MIN(x) AS distance_meters + time_step, + x, + source_id FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}\' - AND source_id = - AND x BETWEEN 500 AND 2300 - AND time_step >= 600 - GROUP BY 1, 2 - ), energy AS ( + ), distance AS ( SELECT id, source_id, - energy_model_id, - (MAX(time_step) - MIN(time_step)) / (COUNT(DISTINCT time_step) - 1) AS time_step_size_seconds, - SUM(power) AS power_watts - FROM fact_energy_trace + MAX(x)-MIN(x) AS distance_meters + FROM sub_fact_vehicle_trace WHERE 1 = 1 - AND date = \'{date}\' - AND partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' - AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AND x BETWEEN 500 AND 2300 AND time_step >= 600 + GROUP BY 1, 2 + ), energy AS ( + SELECT + e.id, + e.source_id, + e.energy_model_id, + (MAX(e.time_step) - MIN(e.time_step)) / (COUNT(DISTINCT e.time_step) - 1) AS time_step_size_seconds, + SUM(e.power) AS power_watts + FROM fact_energy_trace AS e + JOIN sub_fact_vehicle_trace AS v ON 1 = 1 + AND e.id = v.id + AND e.time_step = v.time_step + AND e.source_id = v.source_id + WHERE 1 = 1 + AND e.date = \'{date}\' + AND e.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND v.x BETWEEN 500 AND 2300 + AND e.time_step >= 600 GROUP BY 1, 2, 3 - HAVING COUNT(DISTINCT time_step) > 1 + HAVING COUNT(DISTINCT e.time_step) > 1 ) SELECT d.id, @@ -205,7 +218,7 @@ class QueryStrings(Enum): FROM fact_vehicle_fuel_efficiency_agg WHERE 1 = 1 AND date = \'{date}\' - AND parititon_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' + AND partition_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' GROUP BY 1, 2 ;""" @@ -215,10 +228,10 @@ class QueryStrings(Enum): t.source_id, e.energy_model_id, e.efficiency_meters_per_joules, - 74564 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon + 74564 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, t.throughput_per_hour - FROM fact_network_throughput_agg t - JOIN fact_network_fuel_efficiency_agg e ON 1 = 1 + FROM fact_network_throughput_agg AS t + JOIN fact_network_fuel_efficiency_agg AS e ON 1 = 1 AND t.date = \'{date}\' AND t.partition_name = \'{partition}_FACT_NETWORK_THROUGHPUT_AGG\' AND e.date = \'{date}\' @@ -271,15 +284,15 @@ class QueryStrings(Enum): FACT_NETWORK_METRICS_BY_DISTANCE_AGG = """ WITH joined_trace AS ( SELECT - id, - source_id, - time_step, - x, + vt.id, + vt.source_id, + vt.time_step, + vt.x, energy_model_id, - time_step - LAG(time_step, 1) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + vt.time_step - LAG(vt.time_step, 1) + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, SUM(power) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 @@ -307,27 +320,26 @@ class QueryStrings(Enum): AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AND x BETWEEN 500 AND 2300 AND time_step >= 600 - GROUP BY 1, 2, 3 - HAVING COUNT(DISTINCT time_step) > 1 ), binned_cumulative_energy AS ( SELECT source_id, CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(energy_joules) AS cumulative_energy_avg, - AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, - AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound + AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound FROM cumulative_energy GROUP BY 1, 2 + HAVING COUNT(DISTINCT time_step) > 1 ), binned_speed_accel AS ( SELECT source_id, CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(speed) AS speed_avg, - AVG(speed) + STDEV(speed) AS speed_upper_bound, - AVG(speed) - STDEV(speed) AS speed_lower_bound, + AVG(speed) + STDDEV(speed) AS speed_upper_bound, + AVG(speed) - STDDEV(speed) AS speed_lower_bound, AVG(accel_without_noise) AS accel_avg, - AVG(accel_without_noise) + STDEV(accel_without_noise) AS accel_upper_bound, - AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound + AVG(accel_without_noise) + STDDEV(accel_without_noise) AS accel_upper_bound, + AVG(accel_without_noise) - STDDEV(accel_without_noise) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' @@ -348,8 +360,8 @@ class QueryStrings(Enum): source_id, distance_meters_bin, AVG(energy_end - energy_start) AS instantaneous_energy_avg, - AVG(energy_end - energy_start) + STDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, - AVG(energy_end - energy_start) - STDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound + AVG(energy_end - energy_start) + STDDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, + AVG(energy_end - energy_start) - STDDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound FROM binned_energy_start_end GROUP BY 1, 2 ) @@ -380,22 +392,22 @@ class QueryStrings(Enum): FACT_NETWORK_METRICS_BY_TIME_AGG = """ WITH joined_trace AS ( SELECT - id, - source_id, - time_step, - x, + vt.id, + vt.source_id, + vt.time_step, + vt.x, energy_model_id, - time_step - LAG(time_step, 1) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + vt.time_step - LAG(vt.time_step, 1) + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, SUM(power) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 AND vt.date = \'{date}\' AND vt.partition_name = \'{partition}\' AND et.date = \'{date}\' - AND et.partition_name = \'{partitio}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' + AND et.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND vt.id = et.id AND vt.source_id = et.source_id AND vt.time_step = et.time_step @@ -413,33 +425,28 @@ class QueryStrings(Enum): cumulative_power * sim_step AS energy_joules FROM joined_trace WHERE 1 = 1 - AND date = - AND partition_name = - AND source_id = - AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AND x BETWEEN 500 AND 2300 AND time_step >= 600 - GROUP BY 1, 2, 3 - HAVING COUNT(DISTINCT time_step) > 1 ), binned_cumulative_energy AS ( SELECT source_id, CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, AVG(energy_joules) AS cumulative_energy_avg, - AVG(energy_joules) + STDEV(energy_joules) AS cumulative_energy_upper_bound, - AVG(energy_joules) - STDEV(energy_joules) AS cumulative_energy_lower_bound + AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound FROM cumulative_energy GROUP BY 1, 2 + HAVING COUNT(DISTINCT time_step) > 1 ), binned_speed_accel AS ( SELECT source_id, CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, AVG(speed) AS speed_avg, - AVG(speed) + STDEV(speed) AS speed_upper_bound, - AVG(speed) - STDEV(speed) AS speed_lower_bound, + AVG(speed) + STDDEV(speed) AS speed_upper_bound, + AVG(speed) - STDDEV(speed) AS speed_lower_bound, AVG(accel_without_noise) AS accel_avg, - AVG(accel_without_noise) + STDEV(accel_without_noise) AS accel_upper_bound, - AVG(accel_without_noise) - STDEV(accel_without_noise) AS accel_lower_bound + AVG(accel_without_noise) + STDDEV(accel_without_noise) AS accel_upper_bound, + AVG(accel_without_noise) - STDDEV(accel_without_noise) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' @@ -460,8 +467,8 @@ class QueryStrings(Enum): source_id, time_seconds_bin, AVG(energy_end - energy_start) AS instantaneous_energy_avg, - AVG(energy_end - energy_start) + STDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, - AVG(energy_end - energy_start) - STDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound + AVG(energy_end - energy_start) + STDDEV(energy_end - energy_start) AS instantaneous_energy_upper_bound, + AVG(energy_end - energy_start) - STDDEV(energy_end - energy_start) AS instantaneous_energy_lower_bound FROM binned_energy_start_end GROUP BY 1, 2 ) @@ -488,4 +495,3 @@ class QueryStrings(Enum): AND COALESCE(bce.source_id, bsa.source_id) = be.source_id AND COALESCE(bce.time_seconds_bin, bce.time_seconds_bin) = be.time_seconds_bin ;""" ->>>>>>> more quieres added From 94337c20d80cb38e25257b86d06dc9684c6baf4e Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 25 May 2020 15:03:26 -0700 Subject: [PATCH 110/335] updated the lambda function --- flow/data_pipeline/lambda_function.py | 43 +++++++++++++++++++-------- 1 file changed, 31 insertions(+), 12 deletions(-) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index fe8efe3c0..22145af9c 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -2,28 +2,47 @@ import boto3 from urllib.parse import unquote_plus from flow.data_pipeline.data_pipeline import AthenaQuery -from flow.data_pipeline.query import tags +from flow.data_pipeline.query import tags, tables s3 = boto3.client('s3') queryEngine = AthenaQuery() def lambda_handler(event, context): - """Run on AWS Lambda to start query automatically.""" + """Handles S3 put event on AWS Lambda.""" + records = [] + # delete all unwanted metadata for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) + table = key.split('/')[0] + if table not in tables: + continue + if key[-9:] == '.metadata': + s3.delete_object(Bucket=bucket, Key=key) + continue + if table in tags.keys(): + records.append((bucket, key, table)) + # initialize the queries + for bucket, key, table in records: query_date = key.split('/')[-3].split('=')[-1] partition = key.split('/')[-2].split('=')[-1] - response = s3.head_object(Bucket=bucket, Key=key) - required_query = response["Metadata"]["run-query"] + source_id = "flow_{}".format(partition.split('_')[1]) + # response = s3.head_object(Bucket=bucket, Key=key) + # required_query = response["Metadata"]["run-query"] - if bucket == 'circles.data.pipeline' and 'trajectory-output/' in key: - if required_query == "all": - query_list = tags["energy"] - elif not required_query: - break - else: - query_list = required_query.split("\', \'") + query_dict = tags[table] + + # handle different energy models + if table == "fact_energy_trace": + energy_model_id = partition.replace(source_id, "")[1:] + query_dict = tags[energy_model_id] + + # initialize queries and store them at appropriate locations + for table_name, query_list in query_dict.items(): for query_name in query_list: - queryEngine.run_query(query_name, 's3://circles.data.pipeline/result/auto/', query_date, partition) + result_location = 's3://circles.data.pipeline/{}/date={}/partition_name={}_{}'.format(table_name, + query_date, + source_id, + query_name) + queryEngine.run_query(query_name, result_location, query_date, partition, table) From 3a501dab148900d2d5f3a17f7af7232acf1eb059 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 25 May 2020 15:59:26 -0700 Subject: [PATCH 111/335] fix minor string formatting issue in the query --- flow/data_pipeline/query.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 078bdd129..7091997cc 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -22,11 +22,11 @@ WHEN acceleration > 0 THEN 1 WHEN acceleration < 0 THEN 0 ELSE 0.5 - END * (1 - {}) + {}) * acceleration + 9.81 * SIN(road_grade) + END * (1 - {0}) + {0}) * acceleration + 9.81 * SIN(road_grade) ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3)) AS power, - \'{}\' AS energy_model_id, + \'{1}\' AS energy_model_id, source_id - FROM {} + FROM {2} ORDER BY id, time_step """ From 36086521fff4c3e3d0728423e36af15bfa242403 Mon Sep 17 00:00:00 2001 From: Yashar Zeinali Farid <34227133+Yasharzf@users.noreply.github.com> Date: Thu, 7 May 2020 23:51:53 -0700 Subject: [PATCH 112/335] get not departed vehicles (#922) * added function to kernel/vehicle to get number of not departed vehiles * fixed over indentation of the docstring * indentation edit * pep8 Co-authored-by: AboudyKreidieh --- flow/core/kernel/vehicle/traci.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index b89e981be..58eddfd1c 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -220,6 +220,10 @@ def update(self, reset): self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] + # update the number of not departed vehicles + self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ + sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] + # update the "headway", "leader", and "follower" variables for veh_id in self.__ids: try: @@ -543,6 +547,10 @@ def get_fuel_consumption(self, veh_id, error=-1001): return [self.get_fuel_consumption(vehID, error) for vehID in veh_id] return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_FUELCONSUMPTION, error) * ml_to_gallons + def get_num_not_departed(self): + """See parent class.""" + return self.num_not_departed + def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): From 0a83576b80c2955ac8e09f88e0b159c836a887c4 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 18 Mar 2020 16:43:22 -0700 Subject: [PATCH 113/335] Add an on ramp option --- examples/exp_configs/non_rl/i210_subnetwork.py | 2 +- flow/envs/base.py | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 25565bb49..194da1099 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -117,7 +117,7 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=True, + color_by_speed=False, use_ballistic=True ), diff --git a/flow/envs/base.py b/flow/envs/base.py index baf8270b5..f2067d947 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -405,8 +405,18 @@ def step(self, rl_actions): # test if the environment should terminate due to a collision or the # time horizon being met done = (self.time_counter >= self.env_params.sims_per_step * - (self.env_params.warmup_steps + self.env_params.horizon) - or crash) + (self.env_params.warmup_steps + self.env_params.horizon)) + if crash: + print( + "**********************************************************\n" + "**********************************************************\n" + "**********************************************************\n" + "WARNING: There was a crash. \n" + "**********************************************************\n" + "**********************************************************\n" + "**********************************************************" + ) + # compute the info for each agent infos = {} From 9b649efbda97a80f3c926ec1fc9838b76f27aa60 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 12:10:07 -0700 Subject: [PATCH 114/335] Upgrade the network to not have keepclear value on the junctions --- .../exp_configs/templates/sumo/test2.net.xml | 78 ++++++++++++++----- 1 file changed, 57 insertions(+), 21 deletions(-) diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 00e3edcd5..16170b917 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,5 +1,41 @@ + + @@ -4680,24 +4716,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4801,10 +4837,10 @@ - + - - + + From c18ec58b8a1e0a036b111649dbd2b0f05bd28c55 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 6 Apr 2020 15:28:57 -0700 Subject: [PATCH 115/335] datapip pipeline implemented --- examples/data_pipeline.py | 179 ++++++++++++++++++++++++++++ examples/query.py | 8 ++ examples/run_query.py | 34 ++++++ examples/simulate.py | 10 +- flow/controllers/base_controller.py | 12 ++ flow/core/experiment.py | 27 ++++- flow/core/kernel/vehicle/base.py | 16 +++ flow/core/kernel/vehicle/traci.py | 15 +++ 8 files changed, 299 insertions(+), 2 deletions(-) create mode 100644 examples/data_pipeline.py create mode 100644 examples/query.py create mode 100644 examples/run_query.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py new file mode 100644 index 000000000..5fdc30cf2 --- /dev/null +++ b/examples/data_pipeline.py @@ -0,0 +1,179 @@ +import pandas as pd +import boto3 +from botocore.exceptions import ClientError +from examples.query import QueryStrings +from time import time + + +def generate_trajectory_table(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based on standard SUMO emission + + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ + raw_output = pd.read_csv(data_path, index_col=["time", "id"]) + required_cols = {"time", "id", "speed", "x", "y"} + raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) + + extra_info = pd.DataFrame.from_dict(extra_info) + extra_info.set_index(["time", "id"]) + raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) + + # add the partition column + raw_output['partition'] = partition_name + + output_file_path = data_path[:-4]+"_trajectory.csv" + raw_output.to_csv(output_file_path, index=False) + return output_file_path + + +def upload_to_s3(bucket_name, bucket_key, file_path): + """ upload a file to S3 bucket + + Parameters + ---------- + bucket_name : str + the bucket to upload to + bucket_key: str + the key within the bucket for the file + file_path: str + the path of the file to be uploaded + """ + s3 = boto3.resource("s3") + s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + return + + +class AthenaQuery: + + def __init__(self): + self.MAX_WAIT = 60 + self.client = boto3.client("athena") + self.existing_partitions = self.get_existing_partitions() + + def get_existing_partitions(self): + """prints the existing partitions in the S3 bucket""" + + response = self.client.start_query_execution( + QueryString='SHOW PARTITIONS trajectory_table', + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("get current partitions timed out") + response = self.client.get_query_results( + QueryExecutionId=response['QueryExecutionId'], + MaxResults=1000 + ) + return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] + + def check_status(self, execution_id): + """ Return the status of the execution with given id + + Parameters + ---------- + execution_id : string + id of the execution that is checked for + Returns + ------- + status: str + QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED + """ + + response = self.client.get_query_execution( + QueryExecutionId=execution_id + ) + return response['QueryExecution']['Status']['State'] + + def wait_for_execution(self, execution_id): + """ wait for the execution to finish or time-out + + Parameters + ---------- + execution_id : str + id of the execution this is watiing for + Returns + ------- + time_out: bool + True if time-out, False if success + Raises + ------ + RuntimeError: if execution failed or get canceled + """ + start = time() + while time() - start < self.MAX_WAIT: + state = self.check_status(execution_id) + if state == 'FAILED' or state == 'CANCELLED': + raise RuntimeError("update partition failed") + elif state == 'SUCCEEDED': + return False + return True + + def update_partition(self, partition): + """ load the given partition to the trajectory_table on Athena + + Parameters + ---------- + partition : str + the new partition that needs to be loaded + """ + response = self.client.start_query_execution( + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("update partition timed out") + self.existing_partitions.append(partition) + return + + def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): + """ start the execution of a query, does not wait for it to finish + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be run + result_location: str, optional + location on the S3 bucket where the result will be stored + partition: str, optional + name of the partition to run this query on + Returns + ------- + execution_id: str + the execution id of the execution started by this method + Raises + ------ + ValueError: if tries to run a query not existed in QueryStrings enum + """ + if query_name not in QueryStrings.__members__: + raise ValueError("query not existed: please add it to query.py") + + if partition not in self.existing_partitions: + self.update_partition(partition) + + response = self.client.start_query_execution( + QueryString=QueryStrings[query_name].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + ResultConfiguration={ + 'OutputLocation': result_location, + }, + WorkGroup='primary' + ) + return response['QueryExecutionId'] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py new file mode 100644 index 000000000..3fbbe69e1 --- /dev/null +++ b/examples/query.py @@ -0,0 +1,8 @@ +from enum import Enum + +tags = {} + + +class QueryStrings(Enum): + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py new file mode 100644 index 000000000..7b4a5af7d --- /dev/null +++ b/examples/run_query.py @@ -0,0 +1,34 @@ +import argparse +import sys +from examples.data_pipeline import AthenaQuery +from examples.query import QueryStrings + +parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" + "a S3 location") +parser.add_argument("--run", type=str, nargs="+") +parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") +parser.add_argument("--partition", type=str, nargs='?', default="default") +parser.add_argument("--list_partitions", action="store_true") +parser.add_argument("--check_status", type=str, nargs='+') +parser.add_argument("--list_queries", action="store_true") + + +if __name__ == "__main__": + args = parser.parse_args() + queryEngine = AthenaQuery() + + if args.run: + execution_ids = [] + for query_name in args.run: + execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) + print(execution_ids) + if args.list_partitions: + print(queryEngine.existing_partitions) + if args.check_status: + status = dict() + for execution_id in args.check_status: + status[execution_id] = queryEngine.check_status(execution_id) + print(status) + if args.list_queries: + for q in QueryStrings: + print(q) diff --git a/examples/simulate.py b/examples/simulate.py index 848f030a4..f54bb38d9 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -48,6 +48,12 @@ def parse_args(args): action='store_true', help='Specifies whether to generate an emission file from the ' 'simulation.') + parser.add_argument( + '--to_aws', + type=str, nargs='?', default=None, const="default", + help='Specifies the name of the partition to store the output' + 'file on S3. Putting not None value for this argument' + 'automatically set gen_emission to True.') return parser.parse_known_args(args)[0] @@ -55,6 +61,8 @@ def parse_args(args): if __name__ == "__main__": flags = parse_args(sys.argv[1:]) + flags.gen_emission = flags.gen_emission or flags.to_aws + # Get the flow_params object. module = __import__("exp_configs.non_rl", fromlist=[flags.exp_config]) flow_params = getattr(module, flags.exp_config).flow_params @@ -83,4 +91,4 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 4004b1c4d..6e6734764 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -88,6 +88,9 @@ def get_action(self, env): float the modified form of the acceleration """ + # clear the current stored accel_without_noise of this vehicle None + env.k.vehicle.update_accel_without_noise(self.veh_id, None) + # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed if len(env.k.vehicle.get_edge(self.veh_id)) == 0: @@ -105,6 +108,15 @@ def get_action(self, env): if accel is None: return None + # store the acceleration without noise to each vehicle + # run fail safe if requested + accel_without_noice = accel + if self.fail_safe == 'instantaneous': + accel_without_noice = self.get_safe_action_instantaneous(env, accel_without_noice) + elif self.fail_safe == 'safe_velocity': + accel_without_noice = self.get_safe_velocity_action(env, accel_without_noice) + env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noice) + # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index a0497b595..1f0cce355 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,6 +1,7 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env +from examples.data_pipeline import generate_trajectory_table, upload_to_s3 import datetime import logging import time @@ -85,7 +86,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters @@ -98,6 +99,10 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False): convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file + partition_name: str + Specifies the S3 partition you want to store the output file, + will be used to later for query. If NONE, won't upload output + to S3. Returns ------- @@ -136,6 +141,8 @@ def rl_actions(*_): # time profiling information t = time.time() times = [] + extra_info = {"time": [], "id": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], + "leader_rel_speed": [], "accel_without_noise": [], "road_grade": []} for i in range(num_runs): ret = 0 @@ -153,6 +160,18 @@ def rl_actions(*_): vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward + # collect additional information for the data pipeline + for vid in veh_ids: + extra_info["time"].append(self.env.k.vehicle.get_timestep(veh_ids[0]) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) + extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) + extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) + extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) + extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed(self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) + extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) + extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) + # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) @@ -195,4 +214,10 @@ def rl_actions(*_): # Delete the .xml version of the emission file. os.remove(emission_path) + output_file = generate_trajectory_table(emission_path[:-4] + ".csv", extra_info, partition_name) + + if partition_name: + upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + output_file.split('/')[-1], output_file) + return info_dict diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 706504027..0c992503c 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -684,3 +684,19 @@ def get_max_speed(self, veh_id, error): float """ raise NotImplementedError + + ########################################################################### + # Methods for Datapipeline # + ########################################################################### + + def get_accel(self, veh_id): + """ see traci class """ + raise NotImplementedError + + def update_accel_without_noise(self, veh_id, accel_without_noise): + """ see traci class """ + raise NotImplementedError + + def get_accel_without_noise(self, veh_id): + """ see traci class """ + raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 58eddfd1c..b06ab112b 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -113,6 +113,7 @@ def initialize(self, vehicles): self.__vehicles[veh_id] = dict() self.__vehicles[veh_id]['type'] = typ['veh_id'] self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed'] + self.__vehicles[veh_id]["accel_without_noise"] = None self.num_vehicles += 1 if typ['acceleration_controller'][0] == RLController: self.num_rl_vehicles += 1 @@ -1128,3 +1129,17 @@ def get_max_speed(self, veh_id, error=-1001): def set_max_speed(self, veh_id, max_speed): """See parent class.""" self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) + + # add for data pipeline + def get_accel(self, veh_id): + return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + + def update_accel_without_noise(self, veh_id, accel_without_noise): + self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise + + def get_accel_without_noise(self, veh_id): + return self.__vehicles[veh_id]["accel_without_noise"] + + def get_road_grade(self, veh_id): + # TODO + return 0 From 5878eae7cc27e766085362b478beb2abe1f51933 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 15:56:53 -0700 Subject: [PATCH 116/335] get up to date with i210_dev --- .../exp_configs/non_rl/i210_subnetwork.py | 2 +- .../exp_configs/templates/sumo/test2.net.xml | 78 +++++-------------- 2 files changed, 22 insertions(+), 58 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 194da1099..25565bb49 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -117,7 +117,7 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=False, + color_by_speed=True, use_ballistic=True ), diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 16170b917..00e3edcd5 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,41 +1,5 @@ - - @@ -4716,24 +4680,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4837,10 +4801,10 @@ - + - - + + From 34cecff822badd955a79ee8c773640875e6bea2b Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:45:08 -0700 Subject: [PATCH 117/335] remove dupe imports --- examples/train.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/examples/train.py b/examples/train.py index e34b2935c..7cb84d361 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,8 +124,6 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ - from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv - from stable_baselines import PPO2 if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() @@ -175,12 +173,7 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ - from ray import tune from ray.tune.registry import register_env - try: - from ray.rllib.agents.agent import get_agent_class - except ImportError: - from ray.rllib.agents.registry import get_agent_class horizon = flow_params['env'].horizon @@ -263,8 +256,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - import ray - from ray.tune import run_experiments flow_params = submodule.flow_params flow_params['sim'].render = flags.render @@ -413,8 +404,6 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" - from stable_baselines.common.vec_env import DummyVecEnv - from stable_baselines import PPO2 flow_params = submodule.flow_params # Path to the saved files From fc9983631ec172b624ae6dfef65eeed1eb8dce4c Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:51:14 -0700 Subject: [PATCH 118/335] remove blank lines after docstrings --- examples/train.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/train.py b/examples/train.py index 7cb84d361..5a9ab5903 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,7 +124,6 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ - if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -256,7 +255,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) @@ -404,7 +402,6 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" - flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] From 6c11a70281ba4673edad160ffdcf68fc4372c13a Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:59:00 -0700 Subject: [PATCH 119/335] add back ray import --- examples/train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/train.py b/examples/train.py index 5a9ab5903..50720b756 100644 --- a/examples/train.py +++ b/examples/train.py @@ -255,6 +255,8 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" + import ray + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) From d6ffaa6bb0783fe0aaf0feb09a7b2b1f9591d0b5 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 21:04:56 -0700 Subject: [PATCH 120/335] remove whitespace --- examples/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/train.py b/examples/train.py index 50720b756..1689d846f 100644 --- a/examples/train.py +++ b/examples/train.py @@ -256,7 +256,7 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" import ray - + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) From 151e3b2195de3d6f9079593d0acf684489633e81 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 16:57:52 -0700 Subject: [PATCH 121/335] fix rebase errors --- examples/data_pipeline.py | 179 ---------------------------- examples/query.py | 8 -- examples/run_query.py | 34 ------ flow/controllers/base_controller.py | 1 + flow/core/kernel/vehicle/traci.py | 8 -- flow/data_pipeline/data_pipeline.py | 10 -- 6 files changed, 1 insertion(+), 239 deletions(-) delete mode 100644 examples/data_pipeline.py delete mode 100644 examples/query.py delete mode 100644 examples/run_query.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py deleted file mode 100644 index 5fdc30cf2..000000000 --- a/examples/data_pipeline.py +++ /dev/null @@ -1,179 +0,0 @@ -import pandas as pd -import boto3 -from botocore.exceptions import ClientError -from examples.query import QueryStrings -from time import time - - -def generate_trajectory_table(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based on standard SUMO emission - - Parameters - ---------- - data_path : str - path to the standard SUMO emission - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file - """ - raw_output = pd.read_csv(data_path, index_col=["time", "id"]) - required_cols = {"time", "id", "speed", "x", "y"} - raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) - - extra_info = pd.DataFrame.from_dict(extra_info) - extra_info.set_index(["time", "id"]) - raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) - - # add the partition column - raw_output['partition'] = partition_name - - output_file_path = data_path[:-4]+"_trajectory.csv" - raw_output.to_csv(output_file_path, index=False) - return output_file_path - - -def upload_to_s3(bucket_name, bucket_key, file_path): - """ upload a file to S3 bucket - - Parameters - ---------- - bucket_name : str - the bucket to upload to - bucket_key: str - the key within the bucket for the file - file_path: str - the path of the file to be uploaded - """ - s3 = boto3.resource("s3") - s3.Bucket(bucket_name).upload_file(file_path, bucket_key) - return - - -class AthenaQuery: - - def __init__(self): - self.MAX_WAIT = 60 - self.client = boto3.client("athena") - self.existing_partitions = self.get_existing_partitions() - - def get_existing_partitions(self): - """prints the existing partitions in the S3 bucket""" - - response = self.client.start_query_execution( - QueryString='SHOW PARTITIONS trajectory_table', - QueryExecutionContext={ - 'Database': 'simulation' - }, - WorkGroup='primary' - ) - if self.wait_for_execution(response['QueryExecutionId']): - raise RuntimeError("get current partitions timed out") - response = self.client.get_query_results( - QueryExecutionId=response['QueryExecutionId'], - MaxResults=1000 - ) - return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] - - def check_status(self, execution_id): - """ Return the status of the execution with given id - - Parameters - ---------- - execution_id : string - id of the execution that is checked for - Returns - ------- - status: str - QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED - """ - - response = self.client.get_query_execution( - QueryExecutionId=execution_id - ) - return response['QueryExecution']['Status']['State'] - - def wait_for_execution(self, execution_id): - """ wait for the execution to finish or time-out - - Parameters - ---------- - execution_id : str - id of the execution this is watiing for - Returns - ------- - time_out: bool - True if time-out, False if success - Raises - ------ - RuntimeError: if execution failed or get canceled - """ - start = time() - while time() - start < self.MAX_WAIT: - state = self.check_status(execution_id) - if state == 'FAILED' or state == 'CANCELLED': - raise RuntimeError("update partition failed") - elif state == 'SUCCEEDED': - return False - return True - - def update_partition(self, partition): - """ load the given partition to the trajectory_table on Athena - - Parameters - ---------- - partition : str - the new partition that needs to be loaded - """ - response = self.client.start_query_execution( - QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), - QueryExecutionContext={ - 'Database': 'simulation' - }, - WorkGroup='primary' - ) - if self.wait_for_execution(response['QueryExecutionId']): - raise RuntimeError("update partition timed out") - self.existing_partitions.append(partition) - return - - def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): - """ start the execution of a query, does not wait for it to finish - - Parameters - ---------- - query_name : str - name of the query in QueryStrings enum that will be run - result_location: str, optional - location on the S3 bucket where the result will be stored - partition: str, optional - name of the partition to run this query on - Returns - ------- - execution_id: str - the execution id of the execution started by this method - Raises - ------ - ValueError: if tries to run a query not existed in QueryStrings enum - """ - if query_name not in QueryStrings.__members__: - raise ValueError("query not existed: please add it to query.py") - - if partition not in self.existing_partitions: - self.update_partition(partition) - - response = self.client.start_query_execution( - QueryString=QueryStrings[query_name].value.format(partition=partition), - QueryExecutionContext={ - 'Database': 'simulation' - }, - ResultConfiguration={ - 'OutputLocation': result_location, - }, - WorkGroup='primary' - ) - return response['QueryExecutionId'] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py deleted file mode 100644 index 3fbbe69e1..000000000 --- a/examples/query.py +++ /dev/null @@ -1,8 +0,0 @@ -from enum import Enum - -tags = {} - - -class QueryStrings(Enum): - SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" - UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py deleted file mode 100644 index 7b4a5af7d..000000000 --- a/examples/run_query.py +++ /dev/null @@ -1,34 +0,0 @@ -import argparse -import sys -from examples.data_pipeline import AthenaQuery -from examples.query import QueryStrings - -parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" - "a S3 location") -parser.add_argument("--run", type=str, nargs="+") -parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") -parser.add_argument("--partition", type=str, nargs='?', default="default") -parser.add_argument("--list_partitions", action="store_true") -parser.add_argument("--check_status", type=str, nargs='+') -parser.add_argument("--list_queries", action="store_true") - - -if __name__ == "__main__": - args = parser.parse_args() - queryEngine = AthenaQuery() - - if args.run: - execution_ids = [] - for query_name in args.run: - execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) - print(execution_ids) - if args.list_partitions: - print(queryEngine.existing_partitions) - if args.check_status: - status = dict() - for execution_id in args.check_status: - status[execution_id] = queryEngine.check_status(execution_id) - print(status) - if args.list_queries: - for q in QueryStrings: - print(q) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 1169ce5b8..ac29bca2e 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -92,6 +92,7 @@ def get_action(self, env): env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, None) env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, None) env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, None) + env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, None) # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 5f3821a01..c1e614fe5 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -224,10 +224,6 @@ def update(self, reset): self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] - # update the number of not departed vehicles - self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ - sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] - # update the "headway", "leader", and "follower" variables for veh_id in self.__ids: try: @@ -551,10 +547,6 @@ def get_fuel_consumption(self, veh_id, error=-1001): return [self.get_fuel_consumption(vehID, error) for vehID in veh_id] return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_FUELCONSUMPTION, error) * ml_to_gallons - def get_num_not_departed(self): - """See parent class.""" - return self.num_not_departed - def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 11d85cb0d..aea9b349c 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -88,16 +88,6 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): return -def extra_init(): - """Return the dictionary with all the field pre-populated with empty list.""" - extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], - "target_accel_with_noise_with_failsafe": [], "target_accel_no_noise_no_failsafe": [], - "target_accel_with_noise_no_failsafe": [], "target_accel_no_noise_with_failsafe": [], - "realized_accel": [], "leader_id": [], "follower_id": [], - "leader_rel_speed": [], "road_grade": [], "source_id": []} - return extra_info - - def get_extra_info(veh_kernel, extra_info, veh_ids): """Get all the necessary information for the trajectory output from flow.""" for vid in veh_ids: From 9d2026e6a3635f756417f632abd30bb8891310a9 Mon Sep 17 00:00:00 2001 From: Yashar Zeinali Farid <34227133+Yasharzf@users.noreply.github.com> Date: Thu, 7 May 2020 23:51:53 -0700 Subject: [PATCH 122/335] get not departed vehicles (#922) * added function to kernel/vehicle to get number of not departed vehiles * fixed over indentation of the docstring * indentation edit * pep8 Co-authored-by: AboudyKreidieh --- flow/core/kernel/vehicle/traci.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index b89e981be..58eddfd1c 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -220,6 +220,10 @@ def update(self, reset): self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] + # update the number of not departed vehicles + self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ + sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] + # update the "headway", "leader", and "follower" variables for veh_id in self.__ids: try: @@ -543,6 +547,10 @@ def get_fuel_consumption(self, veh_id, error=-1001): return [self.get_fuel_consumption(vehID, error) for vehID in veh_id] return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_FUELCONSUMPTION, error) * ml_to_gallons + def get_num_not_departed(self): + """See parent class.""" + return self.num_not_departed + def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): From 43eeee0193d92e10ef76c9436dac903a52060157 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 18 Mar 2020 16:43:22 -0700 Subject: [PATCH 123/335] Add an on ramp option --- examples/exp_configs/non_rl/i210_subnetwork.py | 2 +- flow/envs/base.py | 14 ++++++++++++-- 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 25565bb49..194da1099 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -117,7 +117,7 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=True, + color_by_speed=False, use_ballistic=True ), diff --git a/flow/envs/base.py b/flow/envs/base.py index baf8270b5..f2067d947 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -405,8 +405,18 @@ def step(self, rl_actions): # test if the environment should terminate due to a collision or the # time horizon being met done = (self.time_counter >= self.env_params.sims_per_step * - (self.env_params.warmup_steps + self.env_params.horizon) - or crash) + (self.env_params.warmup_steps + self.env_params.horizon)) + if crash: + print( + "**********************************************************\n" + "**********************************************************\n" + "**********************************************************\n" + "WARNING: There was a crash. \n" + "**********************************************************\n" + "**********************************************************\n" + "**********************************************************" + ) + # compute the info for each agent infos = {} From 8eed7e16ef8793914761a48cc6c0af30756b89d0 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 12:10:07 -0700 Subject: [PATCH 124/335] Upgrade the network to not have keepclear value on the junctions --- .../exp_configs/templates/sumo/test2.net.xml | 78 ++++++++++++++----- 1 file changed, 57 insertions(+), 21 deletions(-) diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 00e3edcd5..16170b917 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,5 +1,41 @@ + + @@ -4680,24 +4716,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4801,10 +4837,10 @@ - + - - + + From c373e94388e8fa4399a95e377c1ba95bbdb282c3 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 6 Apr 2020 15:28:57 -0700 Subject: [PATCH 125/335] datapip pipeline implemented --- examples/data_pipeline.py | 179 ++++++++++++++++++++++++++++ examples/query.py | 8 ++ examples/run_query.py | 34 ++++++ examples/simulate.py | 10 +- flow/controllers/base_controller.py | 12 ++ flow/core/experiment.py | 27 ++++- flow/core/kernel/vehicle/base.py | 16 +++ flow/core/kernel/vehicle/traci.py | 15 +++ 8 files changed, 299 insertions(+), 2 deletions(-) create mode 100644 examples/data_pipeline.py create mode 100644 examples/query.py create mode 100644 examples/run_query.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py new file mode 100644 index 000000000..5fdc30cf2 --- /dev/null +++ b/examples/data_pipeline.py @@ -0,0 +1,179 @@ +import pandas as pd +import boto3 +from botocore.exceptions import ClientError +from examples.query import QueryStrings +from time import time + + +def generate_trajectory_table(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based on standard SUMO emission + + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ + raw_output = pd.read_csv(data_path, index_col=["time", "id"]) + required_cols = {"time", "id", "speed", "x", "y"} + raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) + + extra_info = pd.DataFrame.from_dict(extra_info) + extra_info.set_index(["time", "id"]) + raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) + + # add the partition column + raw_output['partition'] = partition_name + + output_file_path = data_path[:-4]+"_trajectory.csv" + raw_output.to_csv(output_file_path, index=False) + return output_file_path + + +def upload_to_s3(bucket_name, bucket_key, file_path): + """ upload a file to S3 bucket + + Parameters + ---------- + bucket_name : str + the bucket to upload to + bucket_key: str + the key within the bucket for the file + file_path: str + the path of the file to be uploaded + """ + s3 = boto3.resource("s3") + s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + return + + +class AthenaQuery: + + def __init__(self): + self.MAX_WAIT = 60 + self.client = boto3.client("athena") + self.existing_partitions = self.get_existing_partitions() + + def get_existing_partitions(self): + """prints the existing partitions in the S3 bucket""" + + response = self.client.start_query_execution( + QueryString='SHOW PARTITIONS trajectory_table', + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("get current partitions timed out") + response = self.client.get_query_results( + QueryExecutionId=response['QueryExecutionId'], + MaxResults=1000 + ) + return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] + + def check_status(self, execution_id): + """ Return the status of the execution with given id + + Parameters + ---------- + execution_id : string + id of the execution that is checked for + Returns + ------- + status: str + QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED + """ + + response = self.client.get_query_execution( + QueryExecutionId=execution_id + ) + return response['QueryExecution']['Status']['State'] + + def wait_for_execution(self, execution_id): + """ wait for the execution to finish or time-out + + Parameters + ---------- + execution_id : str + id of the execution this is watiing for + Returns + ------- + time_out: bool + True if time-out, False if success + Raises + ------ + RuntimeError: if execution failed or get canceled + """ + start = time() + while time() - start < self.MAX_WAIT: + state = self.check_status(execution_id) + if state == 'FAILED' or state == 'CANCELLED': + raise RuntimeError("update partition failed") + elif state == 'SUCCEEDED': + return False + return True + + def update_partition(self, partition): + """ load the given partition to the trajectory_table on Athena + + Parameters + ---------- + partition : str + the new partition that needs to be loaded + """ + response = self.client.start_query_execution( + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("update partition timed out") + self.existing_partitions.append(partition) + return + + def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): + """ start the execution of a query, does not wait for it to finish + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be run + result_location: str, optional + location on the S3 bucket where the result will be stored + partition: str, optional + name of the partition to run this query on + Returns + ------- + execution_id: str + the execution id of the execution started by this method + Raises + ------ + ValueError: if tries to run a query not existed in QueryStrings enum + """ + if query_name not in QueryStrings.__members__: + raise ValueError("query not existed: please add it to query.py") + + if partition not in self.existing_partitions: + self.update_partition(partition) + + response = self.client.start_query_execution( + QueryString=QueryStrings[query_name].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + ResultConfiguration={ + 'OutputLocation': result_location, + }, + WorkGroup='primary' + ) + return response['QueryExecutionId'] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py new file mode 100644 index 000000000..3fbbe69e1 --- /dev/null +++ b/examples/query.py @@ -0,0 +1,8 @@ +from enum import Enum + +tags = {} + + +class QueryStrings(Enum): + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py new file mode 100644 index 000000000..7b4a5af7d --- /dev/null +++ b/examples/run_query.py @@ -0,0 +1,34 @@ +import argparse +import sys +from examples.data_pipeline import AthenaQuery +from examples.query import QueryStrings + +parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" + "a S3 location") +parser.add_argument("--run", type=str, nargs="+") +parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") +parser.add_argument("--partition", type=str, nargs='?', default="default") +parser.add_argument("--list_partitions", action="store_true") +parser.add_argument("--check_status", type=str, nargs='+') +parser.add_argument("--list_queries", action="store_true") + + +if __name__ == "__main__": + args = parser.parse_args() + queryEngine = AthenaQuery() + + if args.run: + execution_ids = [] + for query_name in args.run: + execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) + print(execution_ids) + if args.list_partitions: + print(queryEngine.existing_partitions) + if args.check_status: + status = dict() + for execution_id in args.check_status: + status[execution_id] = queryEngine.check_status(execution_id) + print(status) + if args.list_queries: + for q in QueryStrings: + print(q) diff --git a/examples/simulate.py b/examples/simulate.py index 848f030a4..f54bb38d9 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -48,6 +48,12 @@ def parse_args(args): action='store_true', help='Specifies whether to generate an emission file from the ' 'simulation.') + parser.add_argument( + '--to_aws', + type=str, nargs='?', default=None, const="default", + help='Specifies the name of the partition to store the output' + 'file on S3. Putting not None value for this argument' + 'automatically set gen_emission to True.') return parser.parse_known_args(args)[0] @@ -55,6 +61,8 @@ def parse_args(args): if __name__ == "__main__": flags = parse_args(sys.argv[1:]) + flags.gen_emission = flags.gen_emission or flags.to_aws + # Get the flow_params object. module = __import__("exp_configs.non_rl", fromlist=[flags.exp_config]) flow_params = getattr(module, flags.exp_config).flow_params @@ -83,4 +91,4 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 4004b1c4d..6e6734764 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -88,6 +88,9 @@ def get_action(self, env): float the modified form of the acceleration """ + # clear the current stored accel_without_noise of this vehicle None + env.k.vehicle.update_accel_without_noise(self.veh_id, None) + # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed if len(env.k.vehicle.get_edge(self.veh_id)) == 0: @@ -105,6 +108,15 @@ def get_action(self, env): if accel is None: return None + # store the acceleration without noise to each vehicle + # run fail safe if requested + accel_without_noice = accel + if self.fail_safe == 'instantaneous': + accel_without_noice = self.get_safe_action_instantaneous(env, accel_without_noice) + elif self.fail_safe == 'safe_velocity': + accel_without_noice = self.get_safe_velocity_action(env, accel_without_noice) + env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noice) + # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index a0497b595..1f0cce355 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,6 +1,7 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env +from examples.data_pipeline import generate_trajectory_table, upload_to_s3 import datetime import logging import time @@ -85,7 +86,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters @@ -98,6 +99,10 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False): convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file + partition_name: str + Specifies the S3 partition you want to store the output file, + will be used to later for query. If NONE, won't upload output + to S3. Returns ------- @@ -136,6 +141,8 @@ def rl_actions(*_): # time profiling information t = time.time() times = [] + extra_info = {"time": [], "id": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], + "leader_rel_speed": [], "accel_without_noise": [], "road_grade": []} for i in range(num_runs): ret = 0 @@ -153,6 +160,18 @@ def rl_actions(*_): vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward + # collect additional information for the data pipeline + for vid in veh_ids: + extra_info["time"].append(self.env.k.vehicle.get_timestep(veh_ids[0]) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) + extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) + extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) + extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) + extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed(self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) + extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) + extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) + # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) @@ -195,4 +214,10 @@ def rl_actions(*_): # Delete the .xml version of the emission file. os.remove(emission_path) + output_file = generate_trajectory_table(emission_path[:-4] + ".csv", extra_info, partition_name) + + if partition_name: + upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + output_file.split('/')[-1], output_file) + return info_dict diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 706504027..0c992503c 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -684,3 +684,19 @@ def get_max_speed(self, veh_id, error): float """ raise NotImplementedError + + ########################################################################### + # Methods for Datapipeline # + ########################################################################### + + def get_accel(self, veh_id): + """ see traci class """ + raise NotImplementedError + + def update_accel_without_noise(self, veh_id, accel_without_noise): + """ see traci class """ + raise NotImplementedError + + def get_accel_without_noise(self, veh_id): + """ see traci class """ + raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 58eddfd1c..b06ab112b 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -113,6 +113,7 @@ def initialize(self, vehicles): self.__vehicles[veh_id] = dict() self.__vehicles[veh_id]['type'] = typ['veh_id'] self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed'] + self.__vehicles[veh_id]["accel_without_noise"] = None self.num_vehicles += 1 if typ['acceleration_controller'][0] == RLController: self.num_rl_vehicles += 1 @@ -1128,3 +1129,17 @@ def get_max_speed(self, veh_id, error=-1001): def set_max_speed(self, veh_id, max_speed): """See parent class.""" self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) + + # add for data pipeline + def get_accel(self, veh_id): + return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + + def update_accel_without_noise(self, veh_id, accel_without_noise): + self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise + + def get_accel_without_noise(self, veh_id): + return self.__vehicles[veh_id]["accel_without_noise"] + + def get_road_grade(self, veh_id): + # TODO + return 0 From a88c209f5fa6eb057c978c6583ab040cd11a8aa0 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 15:56:53 -0700 Subject: [PATCH 126/335] get up to date with i210_dev --- .../exp_configs/non_rl/i210_subnetwork.py | 2 +- .../exp_configs/templates/sumo/test2.net.xml | 78 +++++-------------- 2 files changed, 22 insertions(+), 58 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 194da1099..25565bb49 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -117,7 +117,7 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=False, + color_by_speed=True, use_ballistic=True ), diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 16170b917..00e3edcd5 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,41 +1,5 @@ - - @@ -4716,24 +4680,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4837,10 +4801,10 @@ - + - - + + From 89f8d1d504a4e4c98bc564967c1490f0718774cd Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:45:08 -0700 Subject: [PATCH 127/335] remove dupe imports --- examples/train.py | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/examples/train.py b/examples/train.py index e34b2935c..7cb84d361 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,8 +124,6 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ - from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv - from stable_baselines import PPO2 if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() @@ -175,12 +173,7 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ - from ray import tune from ray.tune.registry import register_env - try: - from ray.rllib.agents.agent import get_agent_class - except ImportError: - from ray.rllib.agents.registry import get_agent_class horizon = flow_params['env'].horizon @@ -263,8 +256,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - import ray - from ray.tune import run_experiments flow_params = submodule.flow_params flow_params['sim'].render = flags.render @@ -413,8 +404,6 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" - from stable_baselines.common.vec_env import DummyVecEnv - from stable_baselines import PPO2 flow_params = submodule.flow_params # Path to the saved files From 306a01fe55f3e756931098e306d03872602b88b2 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:51:14 -0700 Subject: [PATCH 128/335] remove blank lines after docstrings --- examples/train.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/train.py b/examples/train.py index 7cb84d361..5a9ab5903 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,7 +124,6 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ - if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -256,7 +255,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) @@ -404,7 +402,6 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" - flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] From 0d5fa6bda67aca96014b8be335cde547b47d7f7b Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:59:00 -0700 Subject: [PATCH 129/335] add back ray import --- examples/train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/train.py b/examples/train.py index 5a9ab5903..50720b756 100644 --- a/examples/train.py +++ b/examples/train.py @@ -255,6 +255,8 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" + import ray + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) From 0ade197b74f7ec0a5a4890e419d605ff3933f824 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 21:04:56 -0700 Subject: [PATCH 130/335] remove whitespace --- examples/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/train.py b/examples/train.py index 50720b756..1689d846f 100644 --- a/examples/train.py +++ b/examples/train.py @@ -256,7 +256,7 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" import ray - + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) From 1111e9aa34a4ce46058ec282255c43d03b117123 Mon Sep 17 00:00:00 2001 From: chendiw <31671291+chendiw@users.noreply.github.com> Date: Tue, 21 Apr 2020 15:14:31 -0700 Subject: [PATCH 131/335] moved imports under functions in train.py (#903) * deleting unworking params from SumoChangeLaneParams * deleted unworking params, sublane working in highway : * moved imports inside functions * Apply suggestions from code review * bug fixes * bug fix Co-authored-by: Aboudy Kreidieh --- examples/train.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/examples/train.py b/examples/train.py index 1689d846f..d9e7dde07 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,6 +124,9 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ + from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv + from stable_baselines import PPO2 + if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -172,7 +175,12 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ + from ray import tune from ray.tune.registry import register_env + try: + from ray.rllib.agents.agent import get_agent_class + except ImportError: + from ray.rllib.agents.registry import get_agent_class horizon = flow_params['env'].horizon @@ -404,6 +412,9 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" + from stable_baselines.common.vec_env import DummyVecEnv + from stable_baselines import PPO2 + flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] From a4c7d67758bd4187f176e1b5f1f63bc12a10af81 Mon Sep 17 00:00:00 2001 From: Yashar Zeinali Farid <34227133+Yasharzf@users.noreply.github.com> Date: Thu, 7 May 2020 23:51:53 -0700 Subject: [PATCH 132/335] get not departed vehicles (#922) * added function to kernel/vehicle to get number of not departed vehiles * fixed over indentation of the docstring * indentation edit * pep8 Co-authored-by: AboudyKreidieh --- flow/core/kernel/vehicle/traci.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index b06ab112b..1c814b7b4 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -225,6 +225,10 @@ def update(self, reset): self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] + # update the number of not departed vehicles + self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ + sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] + # update the "headway", "leader", and "follower" variables for veh_id in self.__ids: try: @@ -552,6 +556,10 @@ def get_num_not_departed(self): """See parent class.""" return self.num_not_departed + def get_num_not_departed(self): + """See parent class.""" + return self.num_not_departed + def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): From 36e8851f7f7ae71a25b2d5ca5a927396b9e1e41a Mon Sep 17 00:00:00 2001 From: Yashar Zeinali Farid <34227133+Yasharzf@users.noreply.github.com> Date: Sat, 9 May 2020 15:31:44 -0700 Subject: [PATCH 133/335] changed _departed_ids, and _arrived_ids in the update function (#926) * changed _departed_ids, and _arrived_ids in the update function * fixed bug in get_departed_ids and get_arrived_ids --- flow/core/kernel/vehicle/traci.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 1c814b7b4..bdf94579a 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -552,14 +552,6 @@ def get_fuel_consumption(self, veh_id, error=-1001): return [self.get_fuel_consumption(vehID, error) for vehID in veh_id] return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_FUELCONSUMPTION, error) * ml_to_gallons - def get_num_not_departed(self): - """See parent class.""" - return self.num_not_departed - - def get_num_not_departed(self): - """See parent class.""" - return self.num_not_departed - def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): From ebb29215ad82c0b2a6b89625ea1b899b5587420a Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 18 Mar 2020 16:43:22 -0700 Subject: [PATCH 134/335] Add an on ramp option --- .../exp_configs/non_rl/i210_subnetwork.py | 64 ++++++++++++++++++- 1 file changed, 63 insertions(+), 1 deletion(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 25565bb49..474d7335e 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -2,8 +2,13 @@ import os import numpy as np +<<<<<<< HEAD from flow.controllers import IDMController from flow.controllers import I210Router +======= +from flow.controllers.car_following_models import IDMController +from flow.controllers.routing_controllers import I210Router +>>>>>>> Add an on ramp option from flow.core.params import SumoParams from flow.core.params import EnvParams from flow.core.params import NetParams @@ -15,6 +20,7 @@ from flow.envs import TestEnv from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION +<<<<<<< HEAD # =========================================================================== # # Specify some configurable constants. # # =========================================================================== # @@ -72,6 +78,37 @@ }), routing_controller=(I210Router, {}) if ON_RAMP else None, ) +======= +ON_RAMP = True + +if ON_RAMP: + vehicles = VehicleParams() + vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + routing_controller=(I210Router, {}) + ) + +else: + # create the base vehicle type that will be used for inflows + vehicles = VehicleParams() + vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + ) +>>>>>>> Add an on ramp option inflow = InFlows() # main highway @@ -86,6 +123,7 @@ inflow.add( veh_type="human", edge="27414345", +<<<<<<< HEAD vehs_per_hour=500, departLane="random", departSpeed=10) @@ -99,6 +137,21 @@ # =========================================================================== # # Generate the flow_params dict with all relevant simulation information. # # =========================================================================== # +======= + vehs_per_hour=321, + departLane="random", + departSpeed=20) + inflow.add( + veh_type="human", + edge="27414342#0", + vehs_per_hour=421, + departLane="random", + departSpeed=20) + +NET_TEMPLATE = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/test2.net.xml") +>>>>>>> Add an on ramp option flow_params = dict( # name of the experiment @@ -117,24 +170,33 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=True, + color_by_speed=False, use_ballistic=True ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( +<<<<<<< HEAD horizon=10000, +======= + horizon=7200, +>>>>>>> Add an on ramp option ), # network-related parameters (see flow.core.params.NetParams and the # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( inflows=inflow, +<<<<<<< HEAD template=net_template, additional_params={ "on_ramp": ON_RAMP, "ghost_edge": WANT_GHOST_CELL, } +======= + template=NET_TEMPLATE, + additional_params={"use_on_ramp": ON_RAMP} +>>>>>>> Add an on ramp option ), # vehicles to be placed in the network at the start of a rollout (see From e4c02bb1f5513e905f2ea0c5e635d3946fe4d38a Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 11:32:12 -0700 Subject: [PATCH 135/335] Increased inflows to 10800 to match density in Bennis ring --- .../exp_configs/non_rl/i210_subnetwork.py | 66 +------------------ 1 file changed, 2 insertions(+), 64 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 474d7335e..3704a7a1c 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -2,13 +2,8 @@ import os import numpy as np -<<<<<<< HEAD from flow.controllers import IDMController from flow.controllers import I210Router -======= -from flow.controllers.car_following_models import IDMController -from flow.controllers.routing_controllers import I210Router ->>>>>>> Add an on ramp option from flow.core.params import SumoParams from flow.core.params import EnvParams from flow.core.params import NetParams @@ -20,7 +15,6 @@ from flow.envs import TestEnv from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -<<<<<<< HEAD # =========================================================================== # # Specify some configurable constants. # # =========================================================================== # @@ -78,37 +72,6 @@ }), routing_controller=(I210Router, {}) if ON_RAMP else None, ) -======= -ON_RAMP = True - -if ON_RAMP: - vehicles = VehicleParams() - vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, { - "a": 0.3, "b": 2.0, "noise": 0.5 - }), - routing_controller=(I210Router, {}) - ) - -else: - # create the base vehicle type that will be used for inflows - vehicles = VehicleParams() - vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, { - "a": 0.3, "b": 2.0, "noise": 0.5 - }), - ) ->>>>>>> Add an on ramp option inflow = InFlows() # main highway @@ -123,7 +86,6 @@ inflow.add( veh_type="human", edge="27414345", -<<<<<<< HEAD vehs_per_hour=500, departLane="random", departSpeed=10) @@ -137,21 +99,6 @@ # =========================================================================== # # Generate the flow_params dict with all relevant simulation information. # # =========================================================================== # -======= - vehs_per_hour=321, - departLane="random", - departSpeed=20) - inflow.add( - veh_type="human", - edge="27414342#0", - vehs_per_hour=421, - departLane="random", - departSpeed=20) - -NET_TEMPLATE = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml") ->>>>>>> Add an on ramp option flow_params = dict( # name of the experiment @@ -170,33 +117,24 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=False, + color_by_speed=True, use_ballistic=True ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( -<<<<<<< HEAD horizon=10000, -======= - horizon=7200, ->>>>>>> Add an on ramp option ), # network-related parameters (see flow.core.params.NetParams and the # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( inflows=inflow, -<<<<<<< HEAD template=net_template, additional_params={ "on_ramp": ON_RAMP, "ghost_edge": WANT_GHOST_CELL, } -======= - template=NET_TEMPLATE, - additional_params={"use_on_ramp": ON_RAMP} ->>>>>>> Add an on ramp option ), # vehicles to be placed in the network at the start of a rollout (see @@ -225,4 +163,4 @@ "avg_density": lambda env: 5 * 1000 * len(env.k.vehicle.get_ids_by_edge( edge_id)) / (env.k.network.edge_length(edge_id) * env.k.network.num_lanes(edge_id)), -} +} \ No newline at end of file From 505d646beb9814daaa527f417740f8309a9f1c85 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 19 Mar 2020 12:10:07 -0700 Subject: [PATCH 136/335] Upgrade the network to not have keepclear value on the junctions --- .../exp_configs/templates/sumo/test2.net.xml | 78 ++++++++++++++----- 1 file changed, 57 insertions(+), 21 deletions(-) diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 00e3edcd5..16170b917 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,5 +1,41 @@ + + @@ -4680,24 +4716,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4801,10 +4837,10 @@ - + - - + + From 7d52445fdaa2f6ef358bad6cd58f6b26775a4f36 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Tue, 24 Mar 2020 22:49:17 -0700 Subject: [PATCH 137/335] Add 1 lane highway network for Benni --- examples/exp_configs/non_rl/highway.py | 40 +++++++++----------------- flow/networks/highway.py | 2 +- 2 files changed, 15 insertions(+), 27 deletions(-) diff --git a/examples/exp_configs/non_rl/highway.py b/examples/exp_configs/non_rl/highway.py index e7505f2d7..1905e2f7f 100644 --- a/examples/exp_configs/non_rl/highway.py +++ b/examples/exp_configs/non_rl/highway.py @@ -5,25 +5,19 @@ from flow.core.params import VehicleParams, InFlows from flow.envs.ring.lane_change_accel import ADDITIONAL_ENV_PARAMS from flow.networks.highway import HighwayNetwork, ADDITIONAL_NET_PARAMS -from flow.envs import LaneChangeAccelEnv +from flow.envs import TestEnv vehicles = VehicleParams() vehicles.add( - veh_id="human", - acceleration_controller=(IDMController, {}), - lane_change_params=SumoLaneChangeParams( - model="SL2015", - lc_sublane=2.0, - ), - num_vehicles=20) -vehicles.add( - veh_id="human2", - acceleration_controller=(IDMController, {}), - lane_change_params=SumoLaneChangeParams( - model="SL2015", - lc_sublane=2.0, - ), - num_vehicles=20) + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=(IDMController, { + "a": 0.3, "b": 2.0, "noise": 0.5 + }), + ) env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) @@ -31,13 +25,7 @@ inflow.add( veh_type="human", edge="highway_0", - probability=0.25, - departLane="free", - departSpeed=20) -inflow.add( - veh_type="human2", - edge="highway_0", - probability=0.25, + vehs_per_hour=10800 / 5.0, departLane="free", departSpeed=20) @@ -47,7 +35,7 @@ exp_tag='highway', # name of the flow environment the experiment is running on - env_name=LaneChangeAccelEnv, + env_name=TestEnv, # name of the network class the experiment is running on network=HighwayNetwork, @@ -58,12 +46,12 @@ # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( render=True, - lateral_resolution=1.0, + sim_step=0.5 ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=1500, + horizon=4000, additional_params=ADDITIONAL_ENV_PARAMS.copy(), ), diff --git a/flow/networks/highway.py b/flow/networks/highway.py index 6f10d3279..871e7f415 100644 --- a/flow/networks/highway.py +++ b/flow/networks/highway.py @@ -9,7 +9,7 @@ # length of the highway "length": 1000, # number of lanes - "lanes": 4, + "lanes": 1, # speed limit for all edges "speed_limit": 30, # number of edges to divide the highway into From c3b2a51aa3fcf2c60c0678e7e3c385febf11d867 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Fri, 10 Apr 2020 19:54:30 -0700 Subject: [PATCH 138/335] multiple runs issue solved, testing added --- examples/data_pipeline.py | 55 +++++++- examples/datapipeline_test.py | 33 +++++ examples/query.py | 13 +- examples/run_query.py | 6 +- flow/core/experiment.py | 224 +----------------------------- flow/core/kernel/vehicle/base.py | 4 + flow/core/kernel/vehicle/traci.py | 3 + 7 files changed, 107 insertions(+), 231 deletions(-) create mode 100644 examples/datapipeline_test.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 5fdc30cf2..9d56548c2 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -1,7 +1,8 @@ import pandas as pd +import numpy as np import boto3 from botocore.exceptions import ClientError -from examples.query import QueryStrings +from examples.query import QueryStrings, testing_functions from time import time @@ -30,13 +31,22 @@ def generate_trajectory_table(data_path, extra_info, partition_name): raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) # add the partition column - raw_output['partition'] = partition_name - + # raw_output['partition'] = partition_name + raw_output = raw_output.sort_values(by=["time", "id"]) output_file_path = data_path[:-4]+"_trajectory.csv" raw_output.to_csv(output_file_path, index=False) return output_file_path +def generate_trajectory_from_flow(data_path, extra_info, partition_name): + extra_info = pd.DataFrame.from_dict(extra_info) + # extra_info["partition"] = partition_name + extra_info.to_csv(data_path, index=False) + upload_only_file_path = data_path[:-4] + "_upload" + ".csv" + extra_info.to_csv(upload_only_file_path, index=False, header=False) + return upload_only_file_path + + def upload_to_s3(bucket_name, bucket_key, file_path): """ upload a file to S3 bucket @@ -176,4 +186,41 @@ def run_query(self, query_name, result_location="s3://brent.experiments/query-re }, WorkGroup='primary' ) - return response['QueryExecutionId'] \ No newline at end of file + return response['QueryExecutionId'] + +########################################################################### +# Helpers for testing the SQL Queries # +########################################################################### + + +def test_sql_query(query_name): + if query_name not in testing_functions: + raise ValueError("no tests supported for this query") + + # Run the respective sql query + queryEngine = AthenaQuery() + execution_id = queryEngine.run_query(query_name, result_location="s3://brent.experiments/query-result/query-test", + partition="test") + if queryEngine.wait_for_execution(execution_id): + raise RuntimeError("execution timed out") + + # get the Athena query result from S3 + s3 = boto3.resource("s3") + s3.Bucket("brent.experiments").download_file("query-result/query-test/"+execution_id+".csv", + "data/athena_result.csv") + athena_result = pd.read_csv("data/athena_result.csv") + athena_result = athena_result.sort_values(by=["time", "id"]) + + # get the python expected result + expected_result = pd.read_csv("data/test_data.csv") + expected_result = expected_result.apply(testing_functions[query_name], axis=1, result_type="expand") + expected_result.columns = ["time", "id", "power"] + expected_result = expected_result.sort_values(by=["time", "id"]) + + difference = athena_result["power"] - expected_result["power"] + print("average difference is: " + str(np.mean(difference))) + print("std of difference is: " + str(np.std(difference))) + print("average ratio of difference to expected is: " + + str(np.mean(np.divide(difference, expected_result["power"])))) + difference = pd.DataFrame(difference) + difference.to_csv("./difference.csv") diff --git a/examples/datapipeline_test.py b/examples/datapipeline_test.py new file mode 100644 index 000000000..564060d3b --- /dev/null +++ b/examples/datapipeline_test.py @@ -0,0 +1,33 @@ +import math + +# Vehicle Mass +M = 1200 +# Gravity +g = 9.81 +# Density of Air +ro_air = 1.225 +# Rolling resistance coefficient +C_r = .005 +# Aerodynamic drag coefficient +C_a = 0.3 +# Vehicle Cross sectional Area +A = 2.6 +# Road grade +theta = 0 + + +def heavyside(inp): + return 0 if inp <= 0 else 1 + + +def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): + acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) + accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) + rolling_friction = M * g * C_r * mu + air_drag = .5 * ro_air * A * C_a * mu**3 + power = accel_and_slope + rolling_friction + air_drag + return power + + +def apply_energy_one(row): + return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py index 3fbbe69e1..6354cec3b 100644 --- a/examples/query.py +++ b/examples/query.py @@ -1,8 +1,17 @@ from enum import Enum +from examples.datapipeline_test import apply_energy_one -tags = {} +tags = {"energy": ["ENERGY_ONE"]} + +testing_functions = {"ENERGY_ONE": apply_energy_one} class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" - UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" + ENERGY_ONE = "SELECT id, time, 1200 * speed * " \ + "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py index 7b4a5af7d..ea8839b09 100644 --- a/examples/run_query.py +++ b/examples/run_query.py @@ -1,6 +1,5 @@ import argparse -import sys -from examples.data_pipeline import AthenaQuery +from examples.data_pipeline import AthenaQuery, test_sql_query from examples.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" @@ -11,6 +10,7 @@ parser.add_argument("--list_partitions", action="store_true") parser.add_argument("--check_status", type=str, nargs='+') parser.add_argument("--list_queries", action="store_true") +parser.add_argument("--test_query", nargs=1) if __name__ == "__main__": @@ -32,3 +32,5 @@ if args.list_queries: for q in QueryStrings: print(q) + if args.test_query: + test_sql_query(args.test_query[0]) \ No newline at end of file diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 1f0cce355..63c92e798 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,223 +1 @@ -"""Contains an experiment class for running simulations.""" -from flow.core.util import emission_to_csv -from flow.utils.registry import make_create_env -from examples.data_pipeline import generate_trajectory_table, upload_to_s3 -import datetime -import logging -import time -import os -import numpy as np - - -class Experiment: - """ - Class for systematically running simulations in any supported simulator. - - This class acts as a runner for a network and environment. In order to use - it to run an network and environment in the absence of a method specifying - the actions of RL agents in the network, type the following: - - >>> from flow.envs import Env - >>> flow_params = dict(...) # see the examples in exp_config - >>> exp = Experiment(flow_params) # for some experiment configuration - >>> exp.run(num_runs=1) - - If you wish to specify the actions of RL agents in the network, this may be - done as follows: - - >>> rl_actions = lambda state: 0 # replace with something appropriate - >>> exp.run(num_runs=1, rl_actions=rl_actions) - - Finally, if you would like to like to plot and visualize your results, this - class can generate csv files from emission files produced by sumo. These - files will contain the speeds, positions, edges, etc... of every vehicle - in the network at every time step. - - In order to ensure that the simulator constructs an emission file, set the - ``emission_path`` attribute in ``SimParams`` to some path. - - >>> from flow.core.params import SimParams - >>> flow_params['sim'] = SimParams(emission_path="./data") - - Once you have included this in your environment, run your Experiment object - as follows: - - >>> exp.run(num_runs=1, convert_to_csv=True) - - After the experiment is complete, look at the "./data" directory. There - will be two files, one with the suffix .xml and another with the suffix - .csv. The latter should be easily interpretable from any csv reader (e.g. - Excel), and can be parsed using tools such as numpy and pandas. - - Attributes - ---------- - custom_callables : dict < str, lambda > - strings and lambda functions corresponding to some information we want - to extract from the environment. The lambda will be called at each step - to extract information from the env and it will be stored in a dict - keyed by the str. - env : flow.envs.Env - the environment object the simulator will run - """ - - def __init__(self, flow_params, custom_callables=None): - """Instantiate the Experiment class. - - Parameters - ---------- - flow_params : dict - flow-specific parameters - custom_callables : dict < str, lambda > - strings and lambda functions corresponding to some information we - want to extract from the environment. The lambda will be called at - each step to extract information from the env and it will be stored - in a dict keyed by the str. - """ - self.custom_callables = custom_callables or {} - - # Get the env name and a creator for the environment. - create_env, _ = make_create_env(flow_params) - - # Create the environment. - self.env = create_env() - - logging.info(" Starting experiment {} at {}".format( - self.env.network.name, str(datetime.datetime.utcnow()))) - - logging.info("Initializing environment.") - - def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): - """Run the given network for a set number of runs. - - Parameters - ---------- - num_runs : int - number of runs the experiment should perform - rl_actions : method, optional - maps states to actions to be performed by the RL agents (if - there are any) - convert_to_csv : bool - Specifies whether to convert the emission file created by sumo - into a csv file - partition_name: str - Specifies the S3 partition you want to store the output file, - will be used to later for query. If NONE, won't upload output - to S3. - - Returns - ------- - info_dict : dict < str, Any > - contains returns, average speed per step - """ - num_steps = self.env.env_params.horizon - - # raise an error if convert_to_csv is set to True but no emission - # file will be generated, to avoid getting an error at the end of the - # simulation - if convert_to_csv and self.env.sim_params.emission_path is None: - raise ValueError( - 'The experiment was run with convert_to_csv set ' - 'to True, but no emission file will be generated. If you wish ' - 'to generate an emission file, you should set the parameter ' - 'emission_path in the simulation parameters (SumoParams or ' - 'AimsunParams) to the path of the folder where emissions ' - 'output should be generated. If you do not wish to generate ' - 'emissions, set the convert_to_csv parameter to False.') - - # used to store - info_dict = { - "returns": [], - "velocities": [], - "outflows": [], - } - info_dict.update({ - key: [] for key in self.custom_callables.keys() - }) - - if rl_actions is None: - def rl_actions(*_): - return None - - # time profiling information - t = time.time() - times = [] - extra_info = {"time": [], "id": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], - "leader_rel_speed": [], "accel_without_noise": [], "road_grade": []} - - for i in range(num_runs): - ret = 0 - vel = [] - custom_vals = {key: [] for key in self.custom_callables.keys()} - state = self.env.reset() - for j in range(num_steps): - t0 = time.time() - state, reward, done, _ = self.env.step(rl_actions(state)) - t1 = time.time() - times.append(1 / (t1 - t0)) - - # Compute the velocity speeds and cumulative returns. - veh_ids = self.env.k.vehicle.get_ids() - vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) - ret += reward - - # collect additional information for the data pipeline - for vid in veh_ids: - extra_info["time"].append(self.env.k.vehicle.get_timestep(veh_ids[0]) / 1000) - extra_info["id"].append(vid) - extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) - extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) - extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) - extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) - extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed(self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) - extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) - extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) - - # Compute the results for the custom callables. - for (key, lambda_func) in self.custom_callables.items(): - custom_vals[key].append(lambda_func(self.env)) - - if type(done) is dict and done['__all__'] or type(done) is not dict and done: - break - - # Store the information from the run in info_dict. - outflow = self.env.k.vehicle.get_outflow_rate(int(500)) - info_dict["returns"].append(ret) - info_dict["velocities"].append(np.mean(vel)) - info_dict["outflows"].append(outflow) - for key in custom_vals.keys(): - info_dict[key].append(np.mean(custom_vals[key])) - - print("Round {0}, return: {1}".format(i, ret)) - - # Print the averages/std for all variables in the info_dict. - for key in info_dict.keys(): - print("Average, std {}: {}, {}".format( - key, np.mean(info_dict[key]), np.std(info_dict[key]))) - - print("Total time:", time.time() - t) - print("steps/second:", np.mean(times)) - self.env.terminate() - - if convert_to_csv and self.env.simulator == "traci": - # wait a short period of time to ensure the xml file is readable - time.sleep(0.1) - - # collect the location of the emission file - dir_path = self.env.sim_params.emission_path - emission_filename = \ - "{0}-emission.xml".format(self.env.network.name) - emission_path = os.path.join(dir_path, emission_filename) - - # convert the emission file into a csv - emission_to_csv(emission_path) - - # Delete the .xml version of the emission file. - os.remove(emission_path) - - output_file = generate_trajectory_table(emission_path[:-4] + ".csv", extra_info, partition_name) - - if partition_name: - upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + output_file.split('/')[-1], output_file) - - return info_dict +"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = self.env.network.name for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = emission_path[:-4] + "_trajectory.csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1], upload_file_path) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 0c992503c..3c285697f 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -697,6 +697,10 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): """ see traci class """ raise NotImplementedError + def get_2D_position(self, veh_id, error=-1001): + """ see traci class """ + raise NotImplementedError + def get_accel_without_noise(self, veh_id): """ see traci class """ raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index bdf94579a..889528b36 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1140,6 +1140,9 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): def get_accel_without_noise(self, veh_id): return self.__vehicles[veh_id]["accel_without_noise"] + def get_2D_position(self, veh_id, error=-1001): + return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) + def get_road_grade(self, veh_id): # TODO return 0 From dc881e06442f642538320c1792dec529abad6086 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Wed, 22 Apr 2020 05:22:01 -0700 Subject: [PATCH 139/335] added more support for lambda function --- examples/data_pipeline.py | 28 ++++++++++++++++++++++++++-- examples/lambda_function.py | 26 ++++++++++++++++++++++++++ examples/simulate.py | 8 +++++++- flow/core/experiment.py | 2 +- 4 files changed, 60 insertions(+), 4 deletions(-) create mode 100644 examples/lambda_function.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 9d56548c2..28d3b5e73 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -39,6 +39,24 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def generate_trajectory_from_flow(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based only on flow output + + Parameters + ---------- + data_path : str + output file path + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file that should be used for + upload to s3 only, it does not the human readable column names and + will be deleted after uploading to s3. A copy of this file with all + the column name will remain in the ./data folder + """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) @@ -47,7 +65,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name): return upload_only_file_path -def upload_to_s3(bucket_name, bucket_key, file_path): +def upload_to_s3(bucket_name, bucket_key, file_path, only_query): """ upload a file to S3 bucket Parameters @@ -58,9 +76,15 @@ def upload_to_s3(bucket_name, bucket_key, file_path): the key within the bucket for the file file_path: str the path of the file to be uploaded + only_query: str + specify which query should be run on this file by lambda: + if empty: run none of them + if "all": run all available analysis query + if a string of list of queries: run only those mentioned in the list """ s3 = boto3.resource("s3") - s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + s3.Bucket(bucket_name).upload_file(file_path, bucket_key, + ExtraArgs={"Metadata": {"run-query": only_query}}) return diff --git a/examples/lambda_function.py b/examples/lambda_function.py new file mode 100644 index 000000000..01ce1512a --- /dev/null +++ b/examples/lambda_function.py @@ -0,0 +1,26 @@ +import boto3 +from urllib.parse import unquote_plus +from examples.data_pipeline import AthenaQuery +from examples.query import tags + +s3 = boto3.client('s3') +queryEngine = AthenaQuery() + + +def lambda_handler(event, context): + for record in event['Records']: + bucket = record['s3']['bucket']['name'] + key = unquote_plus(record['s3']['object']['key']) + partition = key.split('/')[-2].split('=')[-1] + response = s3.head_object(Bucket=bucket, Key=key) + run_query = response["Metadata"]["run-query"] + + if bucket == 'brent.experiments' and 'trajectory-output/' in key: + if run_query == "all": + query_list = tags["analysis"] + elif not run_query: + break + else: + query_list = run_query.split("\', \'") + for query_name in query_list: + queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) \ No newline at end of file diff --git a/examples/simulate.py b/examples/simulate.py index f54bb38d9..69e11b2fb 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -54,6 +54,12 @@ def parse_args(args): help='Specifies the name of the partition to store the output' 'file on S3. Putting not None value for this argument' 'automatically set gen_emission to True.') + parser.add_argument( + '--only_query', + nargs='*', default="[\'all\']", + help='specify which query should be run by lambda' + 'for detail, see upload_to_s3 in data_pipeline.py' + ) return parser.parse_known_args(args)[0] @@ -91,4 +97,4 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, only_query=flags.only_query) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 63c92e798..12391f9ae 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1 +1 @@ -"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = self.env.network.name for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = emission_path[:-4] + "_trajectory.csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1], upload_file_path) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file +"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np import uuid class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. only_query: str Specifies whether queries should be automatically run the simulation data when it gets uploaded to s3 Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = uuid.uuid4().hex for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file From ee1188ec7b5796aeb96bc7de89c5d9bfd10168de Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 02:54:33 -0700 Subject: [PATCH 140/335] fix windoes line ending issue with experiment.py --- flow/core/experiment.py | 240 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 239 insertions(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 12391f9ae..80d607e7d 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1 +1,239 @@ -"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time import os import numpy as np import uuid class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. only_query: str Specifies whether queries should be automatically run the simulation data when it gets uploaded to s3 Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], "road_grade": [], "source_id": []} source_id = uuid.uuid4().hex for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline for vid in veh_ids: extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) position = self.env.k.vehicle.get_2D_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file +"""Contains an experiment class for running simulations.""" +from flow.core.util import emission_to_csv +from flow.utils.registry import make_create_env +from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 +import datetime +import logging +import time +import os +import numpy as np +import uuid + + +class Experiment: + """ + Class for systematically running simulations in any supported simulator. + + This class acts as a runner for a network and environment. In order to use + it to run an network and environment in the absence of a method specifying + the actions of RL agents in the network, type the following: + + >>> from flow.envs import Env + >>> flow_params = dict(...) # see the examples in exp_config + >>> exp = Experiment(flow_params) # for some experiment configuration + >>> exp.run(num_runs=1) + + If you wish to specify the actions of RL agents in the network, this may be + done as follows: + + >>> rl_actions = lambda state: 0 # replace with something appropriate + >>> exp.run(num_runs=1, rl_actions=rl_actions) + + Finally, if you would like to like to plot and visualize your results, this + class can generate csv files from emission files produced by sumo. These + files will contain the speeds, positions, edges, etc... of every vehicle + in the network at every time step. + + In order to ensure that the simulator constructs an emission file, set the + ``emission_path`` attribute in ``SimParams`` to some path. + + >>> from flow.core.params import SimParams + >>> flow_params['sim'] = SimParams(emission_path="./data") + + Once you have included this in your environment, run your Experiment object + as follows: + + >>> exp.run(num_runs=1, convert_to_csv=True) + + After the experiment is complete, look at the "./data" directory. There + will be two files, one with the suffix .xml and another with the suffix + .csv. The latter should be easily interpretable from any csv reader (e.g. + Excel), and can be parsed using tools such as numpy and pandas. + + Attributes + ---------- + custom_callables : dict < str, lambda > + strings and lambda functions corresponding to some information we want + to extract from the environment. The lambda will be called at each step + to extract information from the env and it will be stored in a dict + keyed by the str. + env : flow.envs.Env + the environment object the simulator will run + """ + + def __init__(self, flow_params, custom_callables=None): + """Instantiate the Experiment class. + + Parameters + ---------- + flow_params : dict + flow-specific parameters + custom_callables : dict < str, lambda > + strings and lambda functions corresponding to some information we + want to extract from the environment. The lambda will be called at + each step to extract information from the env and it will be stored + in a dict keyed by the str. + """ + self.custom_callables = custom_callables or {} + + # Get the env name and a creator for the environment. + create_env, _ = make_create_env(flow_params) + + # Create the environment. + self.env = create_env() + + logging.info(" Starting experiment {} at {}".format( + self.env.network.name, str(datetime.datetime.utcnow()))) + + logging.info("Initializing environment.") + + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): + """Run the given network for a set number of runs. + + Parameters + ---------- + num_runs : int + number of runs the experiment should perform + rl_actions : method, optional + maps states to actions to be performed by the RL agents (if + there are any) + convert_to_csv : bool + Specifies whether to convert the emission file created by sumo + into a csv file + partition_name: str + Specifies the S3 partition you want to store the output file, + will be used to later for query. If NONE, won't upload output + to S3. + only_query: str + Specifies whether queries should be automatically run the + simulation data when it gets uploaded to s3 + + Returns + ------- + info_dict : dict < str, Any > + contains returns, average speed per step + """ + num_steps = self.env.env_params.horizon + + # raise an error if convert_to_csv is set to True but no emission + # file will be generated, to avoid getting an error at the end of the + # simulation + if convert_to_csv and self.env.sim_params.emission_path is None: + raise ValueError( + 'The experiment was run with convert_to_csv set ' + 'to True, but no emission file will be generated. If you wish ' + 'to generate an emission file, you should set the parameter ' + 'emission_path in the simulation parameters (SumoParams or ' + 'AimsunParams) to the path of the folder where emissions ' + 'output should be generated. If you do not wish to generate ' + 'emissions, set the convert_to_csv parameter to False.') + + # used to store + info_dict = { + "returns": [], + "velocities": [], + "outflows": [], + } + info_dict.update({ + key: [] for key in self.custom_callables.keys() + }) + + if rl_actions is None: + def rl_actions(*_): + return None + + # time profiling information + t = time.time() + times = [] + extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], + "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], + "road_grade": [], "source_id": []} + source_id = uuid.uuid4().hex + + for i in range(num_runs): + ret = 0 + vel = [] + custom_vals = {key: [] for key in self.custom_callables.keys()} + state = self.env.reset() + for j in range(num_steps): + t0 = time.time() + state, reward, done, _ = self.env.step(rl_actions(state)) + t1 = time.time() + times.append(1 / (t1 - t0)) + + # Compute the velocity speeds and cumulative returns. + veh_ids = self.env.k.vehicle.get_ids() + vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) + ret += reward + + # collect additional information for the data pipeline + for vid in veh_ids: + extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) + extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) + extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) + extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) + extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( + self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) + extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) + extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) + position = self.env.k.vehicle.get_2D_position(vid) + extra_info["x"].append(position[0]) + extra_info["y"].append(position[1]) + extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) + extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) + + # Compute the results for the custom callables. + for (key, lambda_func) in self.custom_callables.items(): + custom_vals[key].append(lambda_func(self.env)) + + if done: + break + + # Store the information from the run in info_dict. + outflow = self.env.k.vehicle.get_outflow_rate(int(500)) + info_dict["returns"].append(ret) + info_dict["velocities"].append(np.mean(vel)) + info_dict["outflows"].append(outflow) + for key in custom_vals.keys(): + info_dict[key].append(np.mean(custom_vals[key])) + + print("Round {0}, return: {1}".format(i, ret)) + + # Print the averages/std for all variables in the info_dict. + for key in info_dict.keys(): + print("Average, std {}: {}, {}".format( + key, np.mean(info_dict[key]), np.std(info_dict[key]))) + + print("Total time:", time.time() - t) + print("steps/second:", np.mean(times)) + self.env.terminate() + + if convert_to_csv and self.env.simulator == "traci": + # wait a short period of time to ensure the xml file is readable + time.sleep(0.1) + + # collect the location of the emission file + dir_path = self.env.sim_params.emission_path + emission_filename = \ + "{0}-emission.xml".format(self.env.network.name) + emission_path = os.path.join(dir_path, emission_filename) + + # convert the emission file into a csv + emission_to_csv(emission_path) + + # Delete the .xml version of the emission file. + os.remove(emission_path) + + trajectory_table_path = './data/' + source_id + ".csv" + upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) + + if partition_name: + upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) + + # delete the S3-only version of the trajectory file + os.remove(upload_file_path) + + return info_dict From 65c9ee061541b4e9660bf54d241a603dabf77e95 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:35:54 -0700 Subject: [PATCH 141/335] fix style issue --- examples/data_pipeline.py | 113 +++++++++++++++++++----------- examples/datapipeline_test.py | 4 ++ examples/lambda_function.py | 10 +++ examples/query.py | 11 ++- examples/run_query.py | 1 + flow/core/kernel/vehicle/base.py | 12 ++-- flow/core/kernel/vehicle/traci.py | 5 ++ 7 files changed, 110 insertions(+), 46 deletions(-) diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 28d3b5e73..03b0f87e5 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -1,3 +1,4 @@ +"""contains class and helper functions for the data pipeline.""" import pandas as pd import numpy as np import boto3 @@ -7,21 +8,21 @@ def generate_trajectory_table(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based on standard SUMO emission + """Generate desired output for the trajectory_table based on standard SUMO emission. - Parameters - ---------- - data_path : str - path to the standard SUMO emission - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file - """ + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ raw_output = pd.read_csv(data_path, index_col=["time", "id"]) required_cols = {"time", "id", "speed", "x", "y"} raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) @@ -39,24 +40,24 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def generate_trajectory_from_flow(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based only on flow output - - Parameters - ---------- - data_path : str - output file path - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file that should be used for - upload to s3 only, it does not the human readable column names and - will be deleted after uploading to s3. A copy of this file with all - the column name will remain in the ./data folder - """ + """Generate desired output for the trajectory_table based only on flow output. + + Parameters + ---------- + data_path : str + output file path + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file that should be used for + upload to s3 only, it does not the human readable column names and + will be deleted after uploading to s3. A copy of this file with all + the column name will remain in the ./data folder + """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) @@ -66,7 +67,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name): def upload_to_s3(bucket_name, bucket_key, file_path, only_query): - """ upload a file to S3 bucket + """Upload a file to S3 bucket. Parameters ---------- @@ -89,15 +90,40 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): class AthenaQuery: + """ + Class used to run query. + + Act as a query engine, maintains an open session with AWS Athena. + + Attributes + ---------- + MAX_WAIT: int + maximum number of seconds to wait before declares time-out + client: boto3.client + the athena client that is used to run the query + existing_partitions: list + a list of partitions that is already recorded in Athena's datalog, + this is obtained through query at the initialization of this class + instance. + """ def __init__(self): + """Initialize AthenaQuery instance. + + initialize a client session with AWS Athena, + query Athena to obtain extisting_partition. + """ self.MAX_WAIT = 60 self.client = boto3.client("athena") self.existing_partitions = self.get_existing_partitions() def get_existing_partitions(self): - """prints the existing partitions in the S3 bucket""" + """Return the existing partitions in the S3 bucket. + Returns + ------- + partitions: a list of existing partitions on S3 bucket + """ response = self.client.start_query_execution( QueryString='SHOW PARTITIONS trajectory_table', QueryExecutionContext={ @@ -114,7 +140,7 @@ def get_existing_partitions(self): return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] def check_status(self, execution_id): - """ Return the status of the execution with given id + """Return the status of the execution with given id. Parameters ---------- @@ -125,14 +151,13 @@ def check_status(self, execution_id): status: str QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED """ - response = self.client.get_query_execution( QueryExecutionId=execution_id ) return response['QueryExecution']['Status']['State'] def wait_for_execution(self, execution_id): - """ wait for the execution to finish or time-out + """Wait for the execution to finish or time-out. Parameters ---------- @@ -156,7 +181,7 @@ def wait_for_execution(self, execution_id): return True def update_partition(self, partition): - """ load the given partition to the trajectory_table on Athena + """Load the given partition to the trajectory_table on Athena. Parameters ---------- @@ -176,7 +201,7 @@ def update_partition(self, partition): return def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): - """ start the execution of a query, does not wait for it to finish + """Start the execution of a query, does not wait for it to finish. Parameters ---------- @@ -218,6 +243,16 @@ def run_query(self, query_name, result_location="s3://brent.experiments/query-re def test_sql_query(query_name): + """Start the execution of a query, does not wait for it to finish. + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be tested + Raises + ------ + RuntimeError: if timeout + """ if query_name not in testing_functions: raise ValueError("no tests supported for this query") diff --git a/examples/datapipeline_test.py b/examples/datapipeline_test.py index 564060d3b..ae0ea382f 100644 --- a/examples/datapipeline_test.py +++ b/examples/datapipeline_test.py @@ -1,3 +1,4 @@ +"""functions that calculates the expected result for testing.""" import math # Vehicle Mass @@ -17,10 +18,12 @@ def heavyside(inp): + """Return 1 if input is positive.""" return 0 if inp <= 0 else 1 def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): + """Calculate the expected power for POWER_DEMAND_MODEL query.""" acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) rolling_friction = M * g * C_r * mu @@ -30,4 +33,5 @@ def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_ def apply_energy_one(row): + """Apply the power calculation to a row of the dataframe.""" return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file diff --git a/examples/lambda_function.py b/examples/lambda_function.py index 01ce1512a..4f7937c85 100644 --- a/examples/lambda_function.py +++ b/examples/lambda_function.py @@ -1,3 +1,4 @@ +"""lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus from examples.data_pipeline import AthenaQuery @@ -8,6 +9,15 @@ def lambda_handler(event, context): + """Invoke by AWS Lambda upon triggered by an event. + + Parameters + ---------- + event : dic < str: dic > + an S3 event + context: + not used + """ for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) diff --git a/examples/query.py b/examples/query.py index 6354cec3b..0f0ee13b4 100644 --- a/examples/query.py +++ b/examples/query.py @@ -1,15 +1,20 @@ +"""stores all the pre-defined query strings.""" from enum import Enum from examples.datapipeline_test import apply_energy_one -tags = {"energy": ["ENERGY_ONE"]} +# tags for different queries +tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} -testing_functions = {"ENERGY_ONE": apply_energy_one} +# specify the function to calculate the expected result of each query +testing_functions = {"POWER_DEMAND_MODEL": apply_energy_one} class QueryStrings(Enum): + """An enumeration of all the pre-defined query strings.""" + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - ENERGY_ONE = "SELECT id, time, 1200 * speed * " \ + POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ diff --git a/examples/run_query.py b/examples/run_query.py index ea8839b09..64baa6656 100644 --- a/examples/run_query.py +++ b/examples/run_query.py @@ -1,3 +1,4 @@ +"""runner script for invoking query manually.""" import argparse from examples.data_pipeline import AthenaQuery, test_sql_query from examples.query import QueryStrings diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 3c285697f..080162c7b 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -690,17 +690,21 @@ def get_max_speed(self, veh_id, error): ########################################################################### def get_accel(self, veh_id): - """ see traci class """ + """Return the acceleration of vehicle with veh_id.""" raise NotImplementedError def update_accel_without_noise(self, veh_id, accel_without_noise): - """ see traci class """ + """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError def get_2D_position(self, veh_id, error=-1001): - """ see traci class """ + """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError def get_accel_without_noise(self, veh_id): - """ see traci class """ + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_road_grade(self, veh_id): + """Return the road-grade of the vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 889528b36..b473a1fa7 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1132,17 +1132,22 @@ def set_max_speed(self, veh_id, max_speed): # add for data pipeline def get_accel(self, veh_id): + """See parent class.""" return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step def update_accel_without_noise(self, veh_id, accel_without_noise): + """See parent class.""" self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise def get_accel_without_noise(self, veh_id): + """See parent class.""" return self.__vehicles[veh_id]["accel_without_noise"] def get_2D_position(self, veh_id, error=-1001): + """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) def get_road_grade(self, veh_id): + """See parent class.""" # TODO return 0 From 5a3ff57fb2d70f2736a9f1ba091aa5730d7006d4 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:38:47 -0700 Subject: [PATCH 142/335] reorganized file locations --- {examples => flow/data_pipeline}/data_pipeline.py | 0 {examples => flow/data_pipeline}/datapipeline_test.py | 0 {examples => flow/data_pipeline}/lambda_function.py | 0 {examples => flow/data_pipeline}/query.py | 0 {examples => flow/data_pipeline}/run_query.py | 0 5 files changed, 0 insertions(+), 0 deletions(-) rename {examples => flow/data_pipeline}/data_pipeline.py (100%) rename {examples => flow/data_pipeline}/datapipeline_test.py (100%) rename {examples => flow/data_pipeline}/lambda_function.py (100%) rename {examples => flow/data_pipeline}/query.py (100%) rename {examples => flow/data_pipeline}/run_query.py (100%) diff --git a/examples/data_pipeline.py b/flow/data_pipeline/data_pipeline.py similarity index 100% rename from examples/data_pipeline.py rename to flow/data_pipeline/data_pipeline.py diff --git a/examples/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py similarity index 100% rename from examples/datapipeline_test.py rename to flow/data_pipeline/datapipeline_test.py diff --git a/examples/lambda_function.py b/flow/data_pipeline/lambda_function.py similarity index 100% rename from examples/lambda_function.py rename to flow/data_pipeline/lambda_function.py diff --git a/examples/query.py b/flow/data_pipeline/query.py similarity index 100% rename from examples/query.py rename to flow/data_pipeline/query.py diff --git a/examples/run_query.py b/flow/data_pipeline/run_query.py similarity index 100% rename from examples/run_query.py rename to flow/data_pipeline/run_query.py From ddc53fb03ae5474c6c2faf2627feb11a6bdac7da Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:58:44 -0700 Subject: [PATCH 143/335] fix some more style issues --- examples/simulate.py | 3 ++- flow/controllers/base_controller.py | 10 +++++----- flow/core/experiment.py | 3 ++- flow/core/kernel/vehicle/base.py | 2 +- flow/core/kernel/vehicle/traci.py | 2 +- flow/data_pipeline/__init__.py | 1 + flow/data_pipeline/data_pipeline.py | 3 +-- flow/data_pipeline/datapipeline_test.py | 2 +- flow/data_pipeline/lambda_function.py | 2 +- flow/data_pipeline/query.py | 12 ++++++------ flow/data_pipeline/run_query.py | 8 ++++---- flow/envs/base.py | 1 - 12 files changed, 25 insertions(+), 24 deletions(-) create mode 100644 flow/data_pipeline/__init__.py diff --git a/examples/simulate.py b/examples/simulate.py index 69e11b2fb..86d14aa14 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -97,4 +97,5 @@ def parse_args(args): exp = Experiment(flow_params, callables) # Run for the specified number of rollouts. - exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, only_query=flags.only_query) + exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, partition_name=flags.to_aws, + only_query=flags.only_query) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 6e6734764..7adcdf310 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -110,18 +110,18 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested - accel_without_noice = accel + accel_without_noise = accel if self.fail_safe == 'instantaneous': - accel_without_noice = self.get_safe_action_instantaneous(env, accel_without_noice) + accel_without_noise = self.get_safe_action_instantaneous(env, accel_without_noise) elif self.fail_safe == 'safe_velocity': - accel_without_noice = self.get_safe_velocity_action(env, accel_without_noice) - env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noice) + accel_without_noise = self.get_safe_velocity_action(env, accel_without_noise) + env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noise) # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) - # run the failsafes, if requested + # run the fail-safes, if requested if self.fail_safe == 'instantaneous': accel = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 80d607e7d..aa5028836 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -231,7 +231,8 @@ def rl_actions(*_): if partition_name: upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 080162c7b..1b729d159 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -697,7 +697,7 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError - def get_2D_position(self, veh_id, error=-1001): + def get_2d_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index b473a1fa7..81d759988 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1143,7 +1143,7 @@ def get_accel_without_noise(self, veh_id): """See parent class.""" return self.__vehicles[veh_id]["accel_without_noise"] - def get_2D_position(self, veh_id, error=-1001): + def get_2d_position(self, veh_id, error=-1001): """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) diff --git a/flow/data_pipeline/__init__.py b/flow/data_pipeline/__init__.py new file mode 100644 index 000000000..622e09b06 --- /dev/null +++ b/flow/data_pipeline/__init__.py @@ -0,0 +1 @@ +"""Empty init file to ensure that data_pipeline is recognized as a package""" diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 03b0f87e5..afbc09f92 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -2,8 +2,7 @@ import pandas as pd import numpy as np import boto3 -from botocore.exceptions import ClientError -from examples.query import QueryStrings, testing_functions +from flow.data_pipeline.query import QueryStrings, testing_functions from time import time diff --git a/flow/data_pipeline/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py index ae0ea382f..0e1a50518 100644 --- a/flow/data_pipeline/datapipeline_test.py +++ b/flow/data_pipeline/datapipeline_test.py @@ -34,4 +34,4 @@ def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_ def apply_energy_one(row): """Apply the power calculation to a row of the dataframe.""" - return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file + return [row[0], row[1], calculate_power(row[4], row[6])] diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 4f7937c85..afef55a4b 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -33,4 +33,4 @@ def lambda_handler(event, context): else: query_list = run_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) \ No newline at end of file + queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 0f0ee13b4..af1b51ce7 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -1,6 +1,6 @@ """stores all the pre-defined query strings.""" from enum import Enum -from examples.datapipeline_test import apply_energy_one +from flow.data_pipeline.datapipeline_test import apply_energy_one # tags for different queries tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} @@ -15,8 +15,8 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ - "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ - "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" \ No newline at end of file + "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" diff --git a/flow/data_pipeline/run_query.py b/flow/data_pipeline/run_query.py index 64baa6656..f065a726e 100644 --- a/flow/data_pipeline/run_query.py +++ b/flow/data_pipeline/run_query.py @@ -1,10 +1,10 @@ """runner script for invoking query manually.""" import argparse -from examples.data_pipeline import AthenaQuery, test_sql_query -from examples.query import QueryStrings +from flow.data_pipeline.data_pipeline import AthenaQuery, test_sql_query +from flow.data_pipeline.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" - "a S3 location") + "a S3 location") parser.add_argument("--run", type=str, nargs="+") parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") parser.add_argument("--partition", type=str, nargs='?', default="default") @@ -34,4 +34,4 @@ for q in QueryStrings: print(q) if args.test_query: - test_sql_query(args.test_query[0]) \ No newline at end of file + test_sql_query(args.test_query[0]) diff --git a/flow/envs/base.py b/flow/envs/base.py index f2067d947..cf1674355 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -417,7 +417,6 @@ def step(self, rl_actions): "**********************************************************" ) - # compute the info for each agent infos = {} From e7ac1a9afa6513f0cb425a2e37c3db26b259f6f0 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 13:02:33 -0700 Subject: [PATCH 144/335] fix one more style issue --- flow/data_pipeline/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/__init__.py b/flow/data_pipeline/__init__.py index 622e09b06..d9d6a6573 100644 --- a/flow/data_pipeline/__init__.py +++ b/flow/data_pipeline/__init__.py @@ -1 +1 @@ -"""Empty init file to ensure that data_pipeline is recognized as a package""" +"""Empty init file to ensure that data_pipeline is recognized as a package.""" From c97021992460a6d628ad769c289975f83bdf9628 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sat, 9 May 2020 22:06:30 -0700 Subject: [PATCH 145/335] added new two new quries --- flow/core/experiment.py | 4 ++-- flow/core/kernel/vehicle/base.py | 4 ++++ flow/core/kernel/vehicle/traci.py | 4 ++++ flow/data_pipeline/query.py | 38 ++++++++++++++++++++++++++++++- 4 files changed, 47 insertions(+), 3 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index aa5028836..37fcb03af 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,7 +1,7 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env -from examples.data_pipeline import generate_trajectory_from_flow, upload_to_s3 +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3 import datetime import logging import time @@ -178,7 +178,7 @@ def rl_actions(*_): self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) - position = self.env.k.vehicle.get_2D_position(vid) + position = self.env.k.vehicle.get_2d_position(vid) extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 1b729d159..7609cf252 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -705,6 +705,10 @@ def get_accel_without_noise(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError + def get_velocity_without_noise(self, veh_id): + """Return the velocity without noise of vehicle with veh_id.""" + raise NotImplementedError + def get_road_grade(self, veh_id): """Return the road-grade of the vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 81d759988..1f697f046 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1143,6 +1143,10 @@ def get_accel_without_noise(self, veh_id): """See parent class.""" return self.__vehicles[veh_id]["accel_without_noise"] + def get_velocity_without_noise(self, veh_id): + """See parent class.""" + return max([self.get_speed(veh_id) + self.get_accel_without_noise(veh_id) * self.sim_step, 0]) + def get_2d_position(self, veh_id, error=-1001): """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index af1b51ce7..0c87b3dcc 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -3,7 +3,8 @@ from flow.data_pipeline.datapipeline_test import apply_energy_one # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} +tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], + "analysis": ["POWER_DEMAND_MODEL"]} # specify the function to calculate the expected result of each query testing_functions = {"POWER_DEMAND_MODEL": apply_energy_one} @@ -20,3 +21,38 @@ class QueryStrings(Enum): "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ "FROM trajectory_table " \ "WHERE partition_name=\'{partition}\'" + POWER_DEMAND_MODEL_DENOISED_ACCEL = \ + "SELECT id, time, 1200 * speed * " \ + "((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise)+0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" + POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = \ + "WITH sub1 AS ( " \ + "SELECT" \ + "time, id, speed, acceleration, accel_without_noise, road_grade, source_id," \ + "time - LAG(time, 1) " \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step," \ + "LAG(speed, 1) " \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed," \ + "LAG(acceleration, 1) " \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel," \ + "LAG(accel_without_noise, 1) " \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised" \ + "FROM trajectory_table" \ + "WHERE partition_name=\'{partition}\'" \ + ")," \ + "sub2 AS (" \ + "SELECT time, id, speed, acceleration, accel_without_noise, " \ + "road_grade, source_id, " \ + "speed-prev_accel*sim_step+prev_accel_denoised*sim_step AS speed_denoised" \ + "FROM sub1" \ + ") " \ + "SELECT id, time, speed_denoised, accel_without_noise," \ + "1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 " \ + "THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 " \ + "* SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 " \ + "* 2.6 * 0.3 * POW(speed_denoised,3) AS power, " \ + "'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id" \ + "FROM sub2 " From 3b10524a6830986f3ec446907a9655a08c3f85dd Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sun, 10 May 2020 23:03:35 -0700 Subject: [PATCH 146/335] including next_V for testing only --- flow/core/experiment.py | 1 + flow/core/kernel/vehicle/traci.py | 15 ++++++++++- flow/data_pipeline/query.py | 41 ++++++++++++++++--------------- 3 files changed, 36 insertions(+), 21 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 37fcb03af..8b5cbac02 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -182,6 +182,7 @@ def rl_actions(*_): extra_info["x"].append(position[0]) extra_info["y"].append(position[1]) extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) + #extra_info["next_v"].append(self.env.k.vehicle.get_next_v(vid)) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 1f697f046..13ca8efa6 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -967,8 +967,10 @@ def apply_acceleration(self, veh_ids, acc): for i, vid in enumerate(veh_ids): if acc[i] is not None and vid in self.get_ids(): + self.__vehicles[vid]["accel"] = acc[i] this_vel = self.get_speed(vid) next_vel = max([this_vel + acc[i] * self.sim_step, 0]) + #self.__vehicles[vid]["next_v"] = next_vel self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3) def apply_lane_change(self, veh_ids, direction): @@ -1131,9 +1133,18 @@ def set_max_speed(self, veh_id, max_speed): self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) # add for data pipeline + def get_next_v(self, veh_id): + """See parent class.""" + if not "next_v" in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["next_v"] = None + return self.__vehicles[veh_id]["next_v"] + #return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + def get_accel(self, veh_id): """See parent class.""" - return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step + if not "accel" in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel"] = None + return self.__vehicles[veh_id]["accel"] def update_accel_without_noise(self, veh_id, accel_without_noise): """See parent class.""" @@ -1141,6 +1152,8 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): def get_accel_without_noise(self, veh_id): """See parent class.""" + if not "accel_without_noise" in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_without_noise"] = None return self.__vehicles[veh_id]["accel_without_noise"] def get_velocity_without_noise(self, veh_id): diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 0c87b3dcc..9054364e6 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -15,44 +15,45 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ + POWER_DEMAND_MODEL = "SELECT id, time, speed, acceleration, 1200 * speed * " \ "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "* POW(speed,3) AS power, 'POWER_DEMAND_MODEL' AS energy_model_id, source_id " \ "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" + "WHERE partition_name=\'{partition}\' " \ + "ORDER BY id, time " POWER_DEMAND_MODEL_DENOISED_ACCEL = \ - "SELECT id, time, 1200 * speed * " \ + "SELECT id, time, speed, accel_without_noise, 1200 * speed * " \ "((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise)+0.8 " \ "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "* POW(speed,3) AS power, 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id " \ "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" + "WHERE partition_name=\'{partition}\' " \ + "ORDER BY id, time " POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = \ "WITH sub1 AS ( " \ - "SELECT" \ - "time, id, speed, acceleration, accel_without_noise, road_grade, source_id," \ + "SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, " \ "time - LAG(time, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step," \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, " \ "LAG(speed, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed," \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, " \ "LAG(acceleration, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel," \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, " \ "LAG(accel_without_noise, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised" \ - "FROM trajectory_table" \ + "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised "\ + "FROM trajectory_table " \ "WHERE partition_name=\'{partition}\'" \ ")," \ "sub2 AS (" \ - "SELECT time, id, speed, acceleration, accel_without_noise, " \ - "road_grade, source_id, " \ - "speed-prev_accel*sim_step+prev_accel_denoised*sim_step AS speed_denoised" \ + "SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, " \ + "prev_speed+accel_without_noise*sim_step AS speed_denoised " \ "FROM sub1" \ ") " \ - "SELECT id, time, speed_denoised, accel_without_noise," \ - "1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 " \ + "SELECT id, time, speed_denoised, accel_without_noise, " \ + "1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 " \ "THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 " \ "* SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 " \ "* 2.6 * 0.3 * POW(speed_denoised,3) AS power, " \ - "'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id" \ - "FROM sub2 " + "'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id " \ + "FROM sub2 " \ + "ORDER BY id, time " From 638f9b4ff1a7baec698264f2f2cdbb35d507b669 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 18 May 2020 12:25:00 -0700 Subject: [PATCH 147/335] change the bucket to a common bucket --- flow/core/experiment.py | 29 +++++++---------------- flow/core/kernel/vehicle/base.py | 4 ++-- flow/core/kernel/vehicle/traci.py | 13 +++------- flow/data_pipeline/data_pipeline.py | 34 ++++++++++++++++++++++++--- flow/data_pipeline/lambda_function.py | 4 ++-- flow/visualize/i210_replay.py | 14 ++++++++--- 6 files changed, 57 insertions(+), 41 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 8b5cbac02..2296ef635 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,10 +1,11 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env -from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3 +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info import datetime import logging import time +from datetime import date import os import numpy as np import uuid @@ -145,9 +146,7 @@ def rl_actions(*_): # time profiling information t = time.time() times = [] - extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], - "leader_id": [], "follower_id": [], "leader_rel_speed": [], "accel_without_noise": [], - "road_grade": [], "source_id": []} + extra_info = extra_init() source_id = uuid.uuid4().hex for i in range(num_runs): @@ -167,22 +166,7 @@ def rl_actions(*_): ret += reward # collect additional information for the data pipeline - for vid in veh_ids: - extra_info["time"].append(self.env.k.vehicle.get_timestep(vid) / 1000) - extra_info["id"].append(vid) - extra_info["headway"].append(self.env.k.vehicle.get_headway(vid)) - extra_info["acceleration"].append(self.env.k.vehicle.get_accel(vid)) - extra_info["leader_id"].append(self.env.k.vehicle.get_leader(vid)) - extra_info["follower_id"].append(self.env.k.vehicle.get_follower(vid)) - extra_info["leader_rel_speed"].append(self.env.k.vehicle.get_speed( - self.env.k.vehicle.get_leader(vid)) - self.env.k.vehicle.get_speed(vid)) - extra_info["accel_without_noise"].append(self.env.k.vehicle.get_accel_without_noise(vid)) - extra_info["road_grade"].append(self.env.k.vehicle.get_road_grade(vid)) - position = self.env.k.vehicle.get_2d_position(vid) - extra_info["x"].append(position[0]) - extra_info["y"].append(position[1]) - extra_info["speed"].append(self.env.k.vehicle.get_speed(vid)) - #extra_info["next_v"].append(self.env.k.vehicle.get_next_v(vid)) + get_extra_info(self.env.k.vehicle, extra_info, veh_ids) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. @@ -231,7 +215,10 @@ def rl_actions(*_): upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: - upload_to_s3('brent.experiments', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + if partition_name == "default": + partition_name = source_id[0:3] + partition_name = date.today().isoformat() + " " + partition_name + upload_to_s3('circles.data', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 7609cf252..647ef37fe 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -705,8 +705,8 @@ def get_accel_without_noise(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError - def get_velocity_without_noise(self, veh_id): - """Return the velocity without noise of vehicle with veh_id.""" + def get_realized_accel(self, veh_id): + """Return the acceleration that the vehicle actually make.""" raise NotImplementedError def get_road_grade(self, veh_id): diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 13ca8efa6..f40eed99c 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1133,13 +1133,6 @@ def set_max_speed(self, veh_id, max_speed): self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) # add for data pipeline - def get_next_v(self, veh_id): - """See parent class.""" - if not "next_v" in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["next_v"] = None - return self.__vehicles[veh_id]["next_v"] - #return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step - def get_accel(self, veh_id): """See parent class.""" if not "accel" in self.__vehicles[veh_id]: @@ -1156,9 +1149,9 @@ def get_accel_without_noise(self, veh_id): self.__vehicles[veh_id]["accel_without_noise"] = None return self.__vehicles[veh_id]["accel_without_noise"] - def get_velocity_without_noise(self, veh_id): + def get_realized_accel(self, veh_id): """See parent class.""" - return max([self.get_speed(veh_id) + self.get_accel_without_noise(veh_id) * self.sim_step, 0]) + return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step def get_2d_position(self, veh_id, error=-1001): """See parent class.""" @@ -1166,5 +1159,5 @@ def get_2d_position(self, veh_id, error=-1001): def get_road_grade(self, veh_id): """See parent class.""" - # TODO + # TODO : Brent return 0 diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index afbc09f92..0cd0cbc79 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -88,6 +88,34 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): return +def extra_init(): + """Return the dictionary with all the feild pre-populated with empty list.""" + extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], + "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], + "leader_rel_speed": [], "road_grade": [], "source_id": []} + return extra_info + + +def get_extra_info(veh_kernel, extra_info, veh_ids): + """Get all the necessary information for the trajectory output from flow.""" + for vid in veh_ids: + extra_info["time"].append(veh_kernel.get_timestep(vid) / 1000) + extra_info["id"].append(vid) + extra_info["headway"].append(veh_kernel.get_headway(vid)) + extra_info["acceleration"].append(veh_kernel.get_accel(vid)) + extra_info["leader_id"].append(veh_kernel.get_leader(vid)) + extra_info["follower_id"].append(veh_kernel.get_follower(vid)) + extra_info["leader_rel_speed"].append(veh_kernel.get_speed( + veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) + extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) + extra_info["realilzed_accel"].append(veh_kernel.get_realized_accel(vid)) + extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) + position = veh_kernel.get_2d_position(vid) + extra_info["x"].append(position[0]) + extra_info["y"].append(position[1]) + extra_info["speed"].append(veh_kernel.get_speed(vid)) + + class AthenaQuery: """ Class used to run query. @@ -199,7 +227,7 @@ def update_partition(self, partition): self.existing_partitions.append(partition) return - def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): + def run_query(self, query_name, result_location="s3://circles.data/query-result/", partition="default"): """Start the execution of a query, does not wait for it to finish. Parameters @@ -257,14 +285,14 @@ def test_sql_query(query_name): # Run the respective sql query queryEngine = AthenaQuery() - execution_id = queryEngine.run_query(query_name, result_location="s3://brent.experiments/query-result/query-test", + execution_id = queryEngine.run_query(query_name, result_location="s3://circles.data/query-result/query-test", partition="test") if queryEngine.wait_for_execution(execution_id): raise RuntimeError("execution timed out") # get the Athena query result from S3 s3 = boto3.resource("s3") - s3.Bucket("brent.experiments").download_file("query-result/query-test/"+execution_id+".csv", + s3.Bucket("circles.data").download_file("query-result/query-test/"+execution_id+".csv", "data/athena_result.csv") athena_result = pd.read_csv("data/athena_result.csv") athena_result = athena_result.sort_values(by=["time", "id"]) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index afef55a4b..3f0abb8a1 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -25,7 +25,7 @@ def lambda_handler(event, context): response = s3.head_object(Bucket=bucket, Key=key) run_query = response["Metadata"]["run-query"] - if bucket == 'brent.experiments' and 'trajectory-output/' in key: + if bucket == 'circles.data' and 'trajectory-output/' in key: if run_query == "all": query_list = tags["analysis"] elif not run_query: @@ -33,4 +33,4 @@ def lambda_handler(event, context): else: query_list = run_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) + queryEngine.run_query(query_name, 's3://circles.data/query-result/auto/', partition) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index a37bac95b..c50f12a05 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -32,6 +32,9 @@ from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info +import uuid + EXAMPLE_USAGE = """ example usage: python i210_replay.py -r /ray_results/experiment_dir/result_dir -c 1 @@ -205,9 +208,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= key: [] for key in custom_callables.keys() }) - i = 0 - while i < args.num_rollouts: - print("Rollout iter", i) + extra_info = extra_init() + source_id = uuid.uuid4().hex + + for i in range(args.num_rollouts): vel = [] per_vehicle_energy_trace = defaultdict(lambda: []) completed_veh_types = {} @@ -243,6 +247,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= veh_ids = env.k.vehicle.get_ids() vel.append(np.mean(env.k.vehicle.get_speed(veh_ids))) + # Collect information from flow for the trajectory output + get_extra_info(env.k.vehicle, extra_info, veh_ids) + extra_info["source_id"].extend([source_id + "run" + str(i)] * len(veh_ids)) + # Compute the results for the custom callables. for (key, lambda_func) in custom_callables.items(): custom_vals[key].append(lambda_func(env)) From bc8584a30d3736169d9c0f985ddc677d34144dfd Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 18 May 2020 12:28:17 -0700 Subject: [PATCH 148/335] removed the old tests --- flow/data_pipeline/datapipeline_test.py | 37 ------------------------- 1 file changed, 37 deletions(-) delete mode 100644 flow/data_pipeline/datapipeline_test.py diff --git a/flow/data_pipeline/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py deleted file mode 100644 index 0e1a50518..000000000 --- a/flow/data_pipeline/datapipeline_test.py +++ /dev/null @@ -1,37 +0,0 @@ -"""functions that calculates the expected result for testing.""" -import math - -# Vehicle Mass -M = 1200 -# Gravity -g = 9.81 -# Density of Air -ro_air = 1.225 -# Rolling resistance coefficient -C_r = .005 -# Aerodynamic drag coefficient -C_a = 0.3 -# Vehicle Cross sectional Area -A = 2.6 -# Road grade -theta = 0 - - -def heavyside(inp): - """Return 1 if input is positive.""" - return 0 if inp <= 0 else 1 - - -def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): - """Calculate the expected power for POWER_DEMAND_MODEL query.""" - acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) - accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) - rolling_friction = M * g * C_r * mu - air_drag = .5 * ro_air * A * C_a * mu**3 - power = accel_and_slope + rolling_friction + air_drag - return power - - -def apply_energy_one(row): - """Apply the power calculation to a row of the dataframe.""" - return [row[0], row[1], calculate_power(row[4], row[6])] From 0ee66469dcb5f21d542a57b464b3ad5fe7b11008 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 18 Mar 2020 16:43:22 -0700 Subject: [PATCH 149/335] Add an on ramp option --- examples/exp_configs/non_rl/i210_subnetwork.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 3704a7a1c..8970e6165 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -117,7 +117,7 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=True, + color_by_speed=False, use_ballistic=True ), From 3af559503e36d69c4f1481ee405778aab01c6840 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 6 Apr 2020 15:28:57 -0700 Subject: [PATCH 150/335] datapip pipeline implemented --- examples/data_pipeline.py | 179 ++++++++++++++++++++++++++++++++++++++ examples/query.py | 8 ++ examples/run_query.py | 34 ++++++++ 3 files changed, 221 insertions(+) create mode 100644 examples/data_pipeline.py create mode 100644 examples/query.py create mode 100644 examples/run_query.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py new file mode 100644 index 000000000..5fdc30cf2 --- /dev/null +++ b/examples/data_pipeline.py @@ -0,0 +1,179 @@ +import pandas as pd +import boto3 +from botocore.exceptions import ClientError +from examples.query import QueryStrings +from time import time + + +def generate_trajectory_table(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based on standard SUMO emission + + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ + raw_output = pd.read_csv(data_path, index_col=["time", "id"]) + required_cols = {"time", "id", "speed", "x", "y"} + raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) + + extra_info = pd.DataFrame.from_dict(extra_info) + extra_info.set_index(["time", "id"]) + raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) + + # add the partition column + raw_output['partition'] = partition_name + + output_file_path = data_path[:-4]+"_trajectory.csv" + raw_output.to_csv(output_file_path, index=False) + return output_file_path + + +def upload_to_s3(bucket_name, bucket_key, file_path): + """ upload a file to S3 bucket + + Parameters + ---------- + bucket_name : str + the bucket to upload to + bucket_key: str + the key within the bucket for the file + file_path: str + the path of the file to be uploaded + """ + s3 = boto3.resource("s3") + s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + return + + +class AthenaQuery: + + def __init__(self): + self.MAX_WAIT = 60 + self.client = boto3.client("athena") + self.existing_partitions = self.get_existing_partitions() + + def get_existing_partitions(self): + """prints the existing partitions in the S3 bucket""" + + response = self.client.start_query_execution( + QueryString='SHOW PARTITIONS trajectory_table', + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("get current partitions timed out") + response = self.client.get_query_results( + QueryExecutionId=response['QueryExecutionId'], + MaxResults=1000 + ) + return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] + + def check_status(self, execution_id): + """ Return the status of the execution with given id + + Parameters + ---------- + execution_id : string + id of the execution that is checked for + Returns + ------- + status: str + QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED + """ + + response = self.client.get_query_execution( + QueryExecutionId=execution_id + ) + return response['QueryExecution']['Status']['State'] + + def wait_for_execution(self, execution_id): + """ wait for the execution to finish or time-out + + Parameters + ---------- + execution_id : str + id of the execution this is watiing for + Returns + ------- + time_out: bool + True if time-out, False if success + Raises + ------ + RuntimeError: if execution failed or get canceled + """ + start = time() + while time() - start < self.MAX_WAIT: + state = self.check_status(execution_id) + if state == 'FAILED' or state == 'CANCELLED': + raise RuntimeError("update partition failed") + elif state == 'SUCCEEDED': + return False + return True + + def update_partition(self, partition): + """ load the given partition to the trajectory_table on Athena + + Parameters + ---------- + partition : str + the new partition that needs to be loaded + """ + response = self.client.start_query_execution( + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + WorkGroup='primary' + ) + if self.wait_for_execution(response['QueryExecutionId']): + raise RuntimeError("update partition timed out") + self.existing_partitions.append(partition) + return + + def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): + """ start the execution of a query, does not wait for it to finish + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be run + result_location: str, optional + location on the S3 bucket where the result will be stored + partition: str, optional + name of the partition to run this query on + Returns + ------- + execution_id: str + the execution id of the execution started by this method + Raises + ------ + ValueError: if tries to run a query not existed in QueryStrings enum + """ + if query_name not in QueryStrings.__members__: + raise ValueError("query not existed: please add it to query.py") + + if partition not in self.existing_partitions: + self.update_partition(partition) + + response = self.client.start_query_execution( + QueryString=QueryStrings[query_name].value.format(partition=partition), + QueryExecutionContext={ + 'Database': 'simulation' + }, + ResultConfiguration={ + 'OutputLocation': result_location, + }, + WorkGroup='primary' + ) + return response['QueryExecutionId'] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py new file mode 100644 index 000000000..3fbbe69e1 --- /dev/null +++ b/examples/query.py @@ -0,0 +1,8 @@ +from enum import Enum + +tags = {} + + +class QueryStrings(Enum): + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py new file mode 100644 index 000000000..7b4a5af7d --- /dev/null +++ b/examples/run_query.py @@ -0,0 +1,34 @@ +import argparse +import sys +from examples.data_pipeline import AthenaQuery +from examples.query import QueryStrings + +parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" + "a S3 location") +parser.add_argument("--run", type=str, nargs="+") +parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") +parser.add_argument("--partition", type=str, nargs='?', default="default") +parser.add_argument("--list_partitions", action="store_true") +parser.add_argument("--check_status", type=str, nargs='+') +parser.add_argument("--list_queries", action="store_true") + + +if __name__ == "__main__": + args = parser.parse_args() + queryEngine = AthenaQuery() + + if args.run: + execution_ids = [] + for query_name in args.run: + execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) + print(execution_ids) + if args.list_partitions: + print(queryEngine.existing_partitions) + if args.check_status: + status = dict() + for execution_id in args.check_status: + status[execution_id] = queryEngine.check_status(execution_id) + print(status) + if args.list_queries: + for q in QueryStrings: + print(q) From 8d4ad2904bb76afeb6c03cd8d90d8ea1e038df15 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Fri, 10 Apr 2020 19:54:30 -0700 Subject: [PATCH 151/335] multiple runs issue solved, testing added --- examples/data_pipeline.py | 55 +++++++- examples/datapipeline_test.py | 33 +++++ examples/query.py | 13 +- examples/run_query.py | 6 +- flow/core/experiment.py | 229 +------------------------------ flow/core/kernel/vehicle/base.py | 4 + 6 files changed, 104 insertions(+), 236 deletions(-) create mode 100644 examples/datapipeline_test.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 5fdc30cf2..9d56548c2 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -1,7 +1,8 @@ import pandas as pd +import numpy as np import boto3 from botocore.exceptions import ClientError -from examples.query import QueryStrings +from examples.query import QueryStrings, testing_functions from time import time @@ -30,13 +31,22 @@ def generate_trajectory_table(data_path, extra_info, partition_name): raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) # add the partition column - raw_output['partition'] = partition_name - + # raw_output['partition'] = partition_name + raw_output = raw_output.sort_values(by=["time", "id"]) output_file_path = data_path[:-4]+"_trajectory.csv" raw_output.to_csv(output_file_path, index=False) return output_file_path +def generate_trajectory_from_flow(data_path, extra_info, partition_name): + extra_info = pd.DataFrame.from_dict(extra_info) + # extra_info["partition"] = partition_name + extra_info.to_csv(data_path, index=False) + upload_only_file_path = data_path[:-4] + "_upload" + ".csv" + extra_info.to_csv(upload_only_file_path, index=False, header=False) + return upload_only_file_path + + def upload_to_s3(bucket_name, bucket_key, file_path): """ upload a file to S3 bucket @@ -176,4 +186,41 @@ def run_query(self, query_name, result_location="s3://brent.experiments/query-re }, WorkGroup='primary' ) - return response['QueryExecutionId'] \ No newline at end of file + return response['QueryExecutionId'] + +########################################################################### +# Helpers for testing the SQL Queries # +########################################################################### + + +def test_sql_query(query_name): + if query_name not in testing_functions: + raise ValueError("no tests supported for this query") + + # Run the respective sql query + queryEngine = AthenaQuery() + execution_id = queryEngine.run_query(query_name, result_location="s3://brent.experiments/query-result/query-test", + partition="test") + if queryEngine.wait_for_execution(execution_id): + raise RuntimeError("execution timed out") + + # get the Athena query result from S3 + s3 = boto3.resource("s3") + s3.Bucket("brent.experiments").download_file("query-result/query-test/"+execution_id+".csv", + "data/athena_result.csv") + athena_result = pd.read_csv("data/athena_result.csv") + athena_result = athena_result.sort_values(by=["time", "id"]) + + # get the python expected result + expected_result = pd.read_csv("data/test_data.csv") + expected_result = expected_result.apply(testing_functions[query_name], axis=1, result_type="expand") + expected_result.columns = ["time", "id", "power"] + expected_result = expected_result.sort_values(by=["time", "id"]) + + difference = athena_result["power"] - expected_result["power"] + print("average difference is: " + str(np.mean(difference))) + print("std of difference is: " + str(np.std(difference))) + print("average ratio of difference to expected is: " + + str(np.mean(np.divide(difference, expected_result["power"])))) + difference = pd.DataFrame(difference) + difference.to_csv("./difference.csv") diff --git a/examples/datapipeline_test.py b/examples/datapipeline_test.py new file mode 100644 index 000000000..564060d3b --- /dev/null +++ b/examples/datapipeline_test.py @@ -0,0 +1,33 @@ +import math + +# Vehicle Mass +M = 1200 +# Gravity +g = 9.81 +# Density of Air +ro_air = 1.225 +# Rolling resistance coefficient +C_r = .005 +# Aerodynamic drag coefficient +C_a = 0.3 +# Vehicle Cross sectional Area +A = 2.6 +# Road grade +theta = 0 + + +def heavyside(inp): + return 0 if inp <= 0 else 1 + + +def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): + acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) + accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) + rolling_friction = M * g * C_r * mu + air_drag = .5 * ro_air * A * C_a * mu**3 + power = accel_and_slope + rolling_friction + air_drag + return power + + +def apply_energy_one(row): + return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file diff --git a/examples/query.py b/examples/query.py index 3fbbe69e1..6354cec3b 100644 --- a/examples/query.py +++ b/examples/query.py @@ -1,8 +1,17 @@ from enum import Enum +from examples.datapipeline_test import apply_energy_one -tags = {} +tags = {"energy": ["ENERGY_ONE"]} + +testing_functions = {"ENERGY_ONE": apply_energy_one} class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" - UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" \ No newline at end of file + UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" + ENERGY_ONE = "SELECT id, time, 1200 * speed * " \ + "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ + "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ + "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ + "FROM trajectory_table " \ + "WHERE partition_name=\'{partition}\'" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py index 7b4a5af7d..ea8839b09 100644 --- a/examples/run_query.py +++ b/examples/run_query.py @@ -1,6 +1,5 @@ import argparse -import sys -from examples.data_pipeline import AthenaQuery +from examples.data_pipeline import AthenaQuery, test_sql_query from examples.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" @@ -11,6 +10,7 @@ parser.add_argument("--list_partitions", action="store_true") parser.add_argument("--check_status", type=str, nargs='+') parser.add_argument("--list_queries", action="store_true") +parser.add_argument("--test_query", nargs=1) if __name__ == "__main__": @@ -32,3 +32,5 @@ if args.list_queries: for q in QueryStrings: print(q) + if args.test_query: + test_sql_query(args.test_query[0]) \ No newline at end of file diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 2296ef635..97467adb5 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,228 +1 @@ -"""Contains an experiment class for running simulations.""" -from flow.core.util import emission_to_csv -from flow.utils.registry import make_create_env -from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info -import datetime -import logging -import time -from datetime import date -import os -import numpy as np -import uuid - - -class Experiment: - """ - Class for systematically running simulations in any supported simulator. - - This class acts as a runner for a network and environment. In order to use - it to run an network and environment in the absence of a method specifying - the actions of RL agents in the network, type the following: - - >>> from flow.envs import Env - >>> flow_params = dict(...) # see the examples in exp_config - >>> exp = Experiment(flow_params) # for some experiment configuration - >>> exp.run(num_runs=1) - - If you wish to specify the actions of RL agents in the network, this may be - done as follows: - - >>> rl_actions = lambda state: 0 # replace with something appropriate - >>> exp.run(num_runs=1, rl_actions=rl_actions) - - Finally, if you would like to like to plot and visualize your results, this - class can generate csv files from emission files produced by sumo. These - files will contain the speeds, positions, edges, etc... of every vehicle - in the network at every time step. - - In order to ensure that the simulator constructs an emission file, set the - ``emission_path`` attribute in ``SimParams`` to some path. - - >>> from flow.core.params import SimParams - >>> flow_params['sim'] = SimParams(emission_path="./data") - - Once you have included this in your environment, run your Experiment object - as follows: - - >>> exp.run(num_runs=1, convert_to_csv=True) - - After the experiment is complete, look at the "./data" directory. There - will be two files, one with the suffix .xml and another with the suffix - .csv. The latter should be easily interpretable from any csv reader (e.g. - Excel), and can be parsed using tools such as numpy and pandas. - - Attributes - ---------- - custom_callables : dict < str, lambda > - strings and lambda functions corresponding to some information we want - to extract from the environment. The lambda will be called at each step - to extract information from the env and it will be stored in a dict - keyed by the str. - env : flow.envs.Env - the environment object the simulator will run - """ - - def __init__(self, flow_params, custom_callables=None): - """Instantiate the Experiment class. - - Parameters - ---------- - flow_params : dict - flow-specific parameters - custom_callables : dict < str, lambda > - strings and lambda functions corresponding to some information we - want to extract from the environment. The lambda will be called at - each step to extract information from the env and it will be stored - in a dict keyed by the str. - """ - self.custom_callables = custom_callables or {} - - # Get the env name and a creator for the environment. - create_env, _ = make_create_env(flow_params) - - # Create the environment. - self.env = create_env() - - logging.info(" Starting experiment {} at {}".format( - self.env.network.name, str(datetime.datetime.utcnow()))) - - logging.info("Initializing environment.") - - def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): - """Run the given network for a set number of runs. - - Parameters - ---------- - num_runs : int - number of runs the experiment should perform - rl_actions : method, optional - maps states to actions to be performed by the RL agents (if - there are any) - convert_to_csv : bool - Specifies whether to convert the emission file created by sumo - into a csv file - partition_name: str - Specifies the S3 partition you want to store the output file, - will be used to later for query. If NONE, won't upload output - to S3. - only_query: str - Specifies whether queries should be automatically run the - simulation data when it gets uploaded to s3 - - Returns - ------- - info_dict : dict < str, Any > - contains returns, average speed per step - """ - num_steps = self.env.env_params.horizon - - # raise an error if convert_to_csv is set to True but no emission - # file will be generated, to avoid getting an error at the end of the - # simulation - if convert_to_csv and self.env.sim_params.emission_path is None: - raise ValueError( - 'The experiment was run with convert_to_csv set ' - 'to True, but no emission file will be generated. If you wish ' - 'to generate an emission file, you should set the parameter ' - 'emission_path in the simulation parameters (SumoParams or ' - 'AimsunParams) to the path of the folder where emissions ' - 'output should be generated. If you do not wish to generate ' - 'emissions, set the convert_to_csv parameter to False.') - - # used to store - info_dict = { - "returns": [], - "velocities": [], - "outflows": [], - } - info_dict.update({ - key: [] for key in self.custom_callables.keys() - }) - - if rl_actions is None: - def rl_actions(*_): - return None - - # time profiling information - t = time.time() - times = [] - extra_info = extra_init() - source_id = uuid.uuid4().hex - - for i in range(num_runs): - ret = 0 - vel = [] - custom_vals = {key: [] for key in self.custom_callables.keys()} - state = self.env.reset() - for j in range(num_steps): - t0 = time.time() - state, reward, done, _ = self.env.step(rl_actions(state)) - t1 = time.time() - times.append(1 / (t1 - t0)) - - # Compute the velocity speeds and cumulative returns. - veh_ids = self.env.k.vehicle.get_ids() - vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) - ret += reward - - # collect additional information for the data pipeline - get_extra_info(self.env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) - - # Compute the results for the custom callables. - for (key, lambda_func) in self.custom_callables.items(): - custom_vals[key].append(lambda_func(self.env)) - - if done: - break - - # Store the information from the run in info_dict. - outflow = self.env.k.vehicle.get_outflow_rate(int(500)) - info_dict["returns"].append(ret) - info_dict["velocities"].append(np.mean(vel)) - info_dict["outflows"].append(outflow) - for key in custom_vals.keys(): - info_dict[key].append(np.mean(custom_vals[key])) - - print("Round {0}, return: {1}".format(i, ret)) - - # Print the averages/std for all variables in the info_dict. - for key in info_dict.keys(): - print("Average, std {}: {}, {}".format( - key, np.mean(info_dict[key]), np.std(info_dict[key]))) - - print("Total time:", time.time() - t) - print("steps/second:", np.mean(times)) - self.env.terminate() - - if convert_to_csv and self.env.simulator == "traci": - # wait a short period of time to ensure the xml file is readable - time.sleep(0.1) - - # collect the location of the emission file - dir_path = self.env.sim_params.emission_path - emission_filename = \ - "{0}-emission.xml".format(self.env.network.name) - emission_path = os.path.join(dir_path, emission_filename) - - # convert the emission file into a csv - emission_to_csv(emission_path) - - # Delete the .xml version of the emission file. - os.remove(emission_path) - - trajectory_table_path = './data/' + source_id + ".csv" - upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) - - if partition_name: - if partition_name == "default": - partition_name = source_id[0:3] - partition_name = date.today().isoformat() + " " + partition_name - upload_to_s3('circles.data', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + upload_file_path.split('/')[-1].split('_')[0] + '.csv', - upload_file_path, str(only_query)[2:-2]) - - # delete the S3-only version of the trajectory file - os.remove(upload_file_path) - - return info_dict +"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info import datetime import logging import time from datetime import date import os import numpy as np import uuid class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. only_query: str Specifies whether queries should be automatically run the simulation data when it gets uploaded to s3 Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = extra_init() source_id = uuid.uuid4().hex for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline get_extra_info(self.env.k.vehicle, extra_info, veh_ids) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: if partition_name == "default": partition_name = source_id[0:3] partition_name = date.today().isoformat() + " " + partition_name upload_to_s3('circles.data', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 647ef37fe..16331ad08 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -701,6 +701,10 @@ def get_2d_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError + def get_2D_position(self, veh_id, error=-1001): + """ see traci class """ + raise NotImplementedError + def get_accel_without_noise(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError From aa14dbf247bbe5610d4f3741ed81581152596293 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Wed, 22 Apr 2020 05:22:01 -0700 Subject: [PATCH 152/335] added more support for lambda function --- examples/data_pipeline.py | 28 ++++++++++++++++++++++++++-- examples/lambda_function.py | 26 ++++++++++++++++++++++++++ 2 files changed, 52 insertions(+), 2 deletions(-) create mode 100644 examples/lambda_function.py diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 9d56548c2..28d3b5e73 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -39,6 +39,24 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def generate_trajectory_from_flow(data_path, extra_info, partition_name): + """ generate desired output for the trajectory_table based only on flow output + + Parameters + ---------- + data_path : str + output file path + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file that should be used for + upload to s3 only, it does not the human readable column names and + will be deleted after uploading to s3. A copy of this file with all + the column name will remain in the ./data folder + """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) @@ -47,7 +65,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name): return upload_only_file_path -def upload_to_s3(bucket_name, bucket_key, file_path): +def upload_to_s3(bucket_name, bucket_key, file_path, only_query): """ upload a file to S3 bucket Parameters @@ -58,9 +76,15 @@ def upload_to_s3(bucket_name, bucket_key, file_path): the key within the bucket for the file file_path: str the path of the file to be uploaded + only_query: str + specify which query should be run on this file by lambda: + if empty: run none of them + if "all": run all available analysis query + if a string of list of queries: run only those mentioned in the list """ s3 = boto3.resource("s3") - s3.Bucket(bucket_name).upload_file(file_path, bucket_key) + s3.Bucket(bucket_name).upload_file(file_path, bucket_key, + ExtraArgs={"Metadata": {"run-query": only_query}}) return diff --git a/examples/lambda_function.py b/examples/lambda_function.py new file mode 100644 index 000000000..01ce1512a --- /dev/null +++ b/examples/lambda_function.py @@ -0,0 +1,26 @@ +import boto3 +from urllib.parse import unquote_plus +from examples.data_pipeline import AthenaQuery +from examples.query import tags + +s3 = boto3.client('s3') +queryEngine = AthenaQuery() + + +def lambda_handler(event, context): + for record in event['Records']: + bucket = record['s3']['bucket']['name'] + key = unquote_plus(record['s3']['object']['key']) + partition = key.split('/')[-2].split('=')[-1] + response = s3.head_object(Bucket=bucket, Key=key) + run_query = response["Metadata"]["run-query"] + + if bucket == 'brent.experiments' and 'trajectory-output/' in key: + if run_query == "all": + query_list = tags["analysis"] + elif not run_query: + break + else: + query_list = run_query.split("\', \'") + for query_name in query_list: + queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) \ No newline at end of file From 00a526b43f8ee069c768b27629233b074ca60260 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 02:54:33 -0700 Subject: [PATCH 153/335] fix windoes line ending issue with experiment.py --- flow/core/experiment.py | 229 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 228 insertions(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 97467adb5..2296ef635 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1 +1,228 @@ -"""Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info import datetime import logging import time from datetime import date import os import numpy as np import uuid class Experiment: """ Class for systematically running simulations in any supported simulator. This class acts as a runner for a network and environment. In order to use it to run an network and environment in the absence of a method specifying the actions of RL agents in the network, type the following: >>> from flow.envs import Env >>> flow_params = dict(...) # see the examples in exp_config >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be done as follows: >>> rl_actions = lambda state: 0 # replace with something appropriate >>> exp.run(num_runs=1, rl_actions=rl_actions) Finally, if you would like to like to plot and visualize your results, this class can generate csv files from emission files produced by sumo. These files will contain the speeds, positions, edges, etc... of every vehicle in the network at every time step. In order to ensure that the simulator constructs an emission file, set the ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: >>> exp.run(num_runs=1, convert_to_csv=True) After the experiment is complete, look at the "./data" directory. There will be two files, one with the suffix .xml and another with the suffix .csv. The latter should be easily interpretable from any csv reader (e.g. Excel), and can be parsed using tools such as numpy and pandas. Attributes ---------- custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. env : flow.envs.Env the environment object the simulator will run """ def __init__(self, flow_params, custom_callables=None): """Instantiate the Experiment class. Parameters ---------- flow_params : dict flow-specific parameters custom_callables : dict < str, lambda > strings and lambda functions corresponding to some information we want to extract from the environment. The lambda will be called at each step to extract information from the env and it will be stored in a dict keyed by the str. """ self.custom_callables = custom_callables or {} # Get the env name and a creator for the environment. create_env, _ = make_create_env(flow_params) # Create the environment. self.env = create_env() logging.info(" Starting experiment {} at {}".format( self.env.network.name, str(datetime.datetime.utcnow()))) logging.info("Initializing environment.") def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): """Run the given network for a set number of runs. Parameters ---------- num_runs : int number of runs the experiment should perform rl_actions : method, optional maps states to actions to be performed by the RL agents (if there are any) convert_to_csv : bool Specifies whether to convert the emission file created by sumo into a csv file partition_name: str Specifies the S3 partition you want to store the output file, will be used to later for query. If NONE, won't upload output to S3. only_query: str Specifies whether queries should be automatically run the simulation data when it gets uploaded to s3 Returns ------- info_dict : dict < str, Any > contains returns, average speed per step """ num_steps = self.env.env_params.horizon # raise an error if convert_to_csv is set to True but no emission # file will be generated, to avoid getting an error at the end of the # simulation if convert_to_csv and self.env.sim_params.emission_path is None: raise ValueError( 'The experiment was run with convert_to_csv set ' 'to True, but no emission file will be generated. If you wish ' 'to generate an emission file, you should set the parameter ' 'emission_path in the simulation parameters (SumoParams or ' 'AimsunParams) to the path of the folder where emissions ' 'output should be generated. If you do not wish to generate ' 'emissions, set the convert_to_csv parameter to False.') # used to store info_dict = { "returns": [], "velocities": [], "outflows": [], } info_dict.update({ key: [] for key in self.custom_callables.keys() }) if rl_actions is None: def rl_actions(*_): return None # time profiling information t = time.time() times = [] extra_info = extra_init() source_id = uuid.uuid4().hex for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} state = self.env.reset() for j in range(num_steps): t0 = time.time() state, reward, done, _ = self.env.step(rl_actions(state)) t1 = time.time() times.append(1 / (t1 - t0)) # Compute the velocity speeds and cumulative returns. veh_ids = self.env.k.vehicle.get_ids() vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward # collect additional information for the data pipeline get_extra_info(self.env.k.vehicle, extra_info, veh_ids) extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) if done: break # Store the information from the run in info_dict. outflow = self.env.k.vehicle.get_outflow_rate(int(500)) info_dict["returns"].append(ret) info_dict["velocities"].append(np.mean(vel)) info_dict["outflows"].append(outflow) for key in custom_vals.keys(): info_dict[key].append(np.mean(custom_vals[key])) print("Round {0}, return: {1}".format(i, ret)) # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( key, np.mean(info_dict[key]), np.std(info_dict[key]))) print("Total time:", time.time() - t) print("steps/second:", np.mean(times)) self.env.terminate() if convert_to_csv and self.env.simulator == "traci": # wait a short period of time to ensure the xml file is readable time.sleep(0.1) # collect the location of the emission file dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) trajectory_table_path = './data/' + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: if partition_name == "default": partition_name = source_id[0:3] partition_name = date.today().isoformat() + " " + partition_name upload_to_s3('circles.data', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + upload_file_path.split('/')[-1].split('_')[0] + '.csv', upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file os.remove(upload_file_path) return info_dict \ No newline at end of file +"""Contains an experiment class for running simulations.""" +from flow.core.util import emission_to_csv +from flow.utils.registry import make_create_env +from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, extra_init, get_extra_info +import datetime +import logging +import time +from datetime import date +import os +import numpy as np +import uuid + + +class Experiment: + """ + Class for systematically running simulations in any supported simulator. + + This class acts as a runner for a network and environment. In order to use + it to run an network and environment in the absence of a method specifying + the actions of RL agents in the network, type the following: + + >>> from flow.envs import Env + >>> flow_params = dict(...) # see the examples in exp_config + >>> exp = Experiment(flow_params) # for some experiment configuration + >>> exp.run(num_runs=1) + + If you wish to specify the actions of RL agents in the network, this may be + done as follows: + + >>> rl_actions = lambda state: 0 # replace with something appropriate + >>> exp.run(num_runs=1, rl_actions=rl_actions) + + Finally, if you would like to like to plot and visualize your results, this + class can generate csv files from emission files produced by sumo. These + files will contain the speeds, positions, edges, etc... of every vehicle + in the network at every time step. + + In order to ensure that the simulator constructs an emission file, set the + ``emission_path`` attribute in ``SimParams`` to some path. + + >>> from flow.core.params import SimParams + >>> flow_params['sim'] = SimParams(emission_path="./data") + + Once you have included this in your environment, run your Experiment object + as follows: + + >>> exp.run(num_runs=1, convert_to_csv=True) + + After the experiment is complete, look at the "./data" directory. There + will be two files, one with the suffix .xml and another with the suffix + .csv. The latter should be easily interpretable from any csv reader (e.g. + Excel), and can be parsed using tools such as numpy and pandas. + + Attributes + ---------- + custom_callables : dict < str, lambda > + strings and lambda functions corresponding to some information we want + to extract from the environment. The lambda will be called at each step + to extract information from the env and it will be stored in a dict + keyed by the str. + env : flow.envs.Env + the environment object the simulator will run + """ + + def __init__(self, flow_params, custom_callables=None): + """Instantiate the Experiment class. + + Parameters + ---------- + flow_params : dict + flow-specific parameters + custom_callables : dict < str, lambda > + strings and lambda functions corresponding to some information we + want to extract from the environment. The lambda will be called at + each step to extract information from the env and it will be stored + in a dict keyed by the str. + """ + self.custom_callables = custom_callables or {} + + # Get the env name and a creator for the environment. + create_env, _ = make_create_env(flow_params) + + # Create the environment. + self.env = create_env() + + logging.info(" Starting experiment {} at {}".format( + self.env.network.name, str(datetime.datetime.utcnow()))) + + logging.info("Initializing environment.") + + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): + """Run the given network for a set number of runs. + + Parameters + ---------- + num_runs : int + number of runs the experiment should perform + rl_actions : method, optional + maps states to actions to be performed by the RL agents (if + there are any) + convert_to_csv : bool + Specifies whether to convert the emission file created by sumo + into a csv file + partition_name: str + Specifies the S3 partition you want to store the output file, + will be used to later for query. If NONE, won't upload output + to S3. + only_query: str + Specifies whether queries should be automatically run the + simulation data when it gets uploaded to s3 + + Returns + ------- + info_dict : dict < str, Any > + contains returns, average speed per step + """ + num_steps = self.env.env_params.horizon + + # raise an error if convert_to_csv is set to True but no emission + # file will be generated, to avoid getting an error at the end of the + # simulation + if convert_to_csv and self.env.sim_params.emission_path is None: + raise ValueError( + 'The experiment was run with convert_to_csv set ' + 'to True, but no emission file will be generated. If you wish ' + 'to generate an emission file, you should set the parameter ' + 'emission_path in the simulation parameters (SumoParams or ' + 'AimsunParams) to the path of the folder where emissions ' + 'output should be generated. If you do not wish to generate ' + 'emissions, set the convert_to_csv parameter to False.') + + # used to store + info_dict = { + "returns": [], + "velocities": [], + "outflows": [], + } + info_dict.update({ + key: [] for key in self.custom_callables.keys() + }) + + if rl_actions is None: + def rl_actions(*_): + return None + + # time profiling information + t = time.time() + times = [] + extra_info = extra_init() + source_id = uuid.uuid4().hex + + for i in range(num_runs): + ret = 0 + vel = [] + custom_vals = {key: [] for key in self.custom_callables.keys()} + state = self.env.reset() + for j in range(num_steps): + t0 = time.time() + state, reward, done, _ = self.env.step(rl_actions(state)) + t1 = time.time() + times.append(1 / (t1 - t0)) + + # Compute the velocity speeds and cumulative returns. + veh_ids = self.env.k.vehicle.get_ids() + vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) + ret += reward + + # collect additional information for the data pipeline + get_extra_info(self.env.k.vehicle, extra_info, veh_ids) + extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) + + # Compute the results for the custom callables. + for (key, lambda_func) in self.custom_callables.items(): + custom_vals[key].append(lambda_func(self.env)) + + if done: + break + + # Store the information from the run in info_dict. + outflow = self.env.k.vehicle.get_outflow_rate(int(500)) + info_dict["returns"].append(ret) + info_dict["velocities"].append(np.mean(vel)) + info_dict["outflows"].append(outflow) + for key in custom_vals.keys(): + info_dict[key].append(np.mean(custom_vals[key])) + + print("Round {0}, return: {1}".format(i, ret)) + + # Print the averages/std for all variables in the info_dict. + for key in info_dict.keys(): + print("Average, std {}: {}, {}".format( + key, np.mean(info_dict[key]), np.std(info_dict[key]))) + + print("Total time:", time.time() - t) + print("steps/second:", np.mean(times)) + self.env.terminate() + + if convert_to_csv and self.env.simulator == "traci": + # wait a short period of time to ensure the xml file is readable + time.sleep(0.1) + + # collect the location of the emission file + dir_path = self.env.sim_params.emission_path + emission_filename = \ + "{0}-emission.xml".format(self.env.network.name) + emission_path = os.path.join(dir_path, emission_filename) + + # convert the emission file into a csv + emission_to_csv(emission_path) + + # Delete the .xml version of the emission file. + os.remove(emission_path) + + trajectory_table_path = './data/' + source_id + ".csv" + upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) + + if partition_name: + if partition_name == "default": + partition_name = source_id[0:3] + partition_name = date.today().isoformat() + " " + partition_name + upload_to_s3('circles.data', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + upload_file_path, str(only_query)[2:-2]) + + # delete the S3-only version of the trajectory file + os.remove(upload_file_path) + + return info_dict From de35f9009e9de0c75de7ba4c1eccdccac794e877 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:35:54 -0700 Subject: [PATCH 154/335] fix style issue --- examples/data_pipeline.py | 113 ++++++++++++++++++++----------- examples/datapipeline_test.py | 4 ++ examples/lambda_function.py | 10 +++ examples/query.py | 11 ++- examples/run_query.py | 1 + flow/core/kernel/vehicle/base.py | 2 +- 6 files changed, 98 insertions(+), 43 deletions(-) diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py index 28d3b5e73..03b0f87e5 100644 --- a/examples/data_pipeline.py +++ b/examples/data_pipeline.py @@ -1,3 +1,4 @@ +"""contains class and helper functions for the data pipeline.""" import pandas as pd import numpy as np import boto3 @@ -7,21 +8,21 @@ def generate_trajectory_table(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based on standard SUMO emission + """Generate desired output for the trajectory_table based on standard SUMO emission. - Parameters - ---------- - data_path : str - path to the standard SUMO emission - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file - """ + Parameters + ---------- + data_path : str + path to the standard SUMO emission + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file + """ raw_output = pd.read_csv(data_path, index_col=["time", "id"]) required_cols = {"time", "id", "speed", "x", "y"} raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) @@ -39,24 +40,24 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def generate_trajectory_from_flow(data_path, extra_info, partition_name): - """ generate desired output for the trajectory_table based only on flow output - - Parameters - ---------- - data_path : str - output file path - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file that should be used for - upload to s3 only, it does not the human readable column names and - will be deleted after uploading to s3. A copy of this file with all - the column name will remain in the ./data folder - """ + """Generate desired output for the trajectory_table based only on flow output. + + Parameters + ---------- + data_path : str + output file path + extra_info: dict + extra information needed in the trajectory table, collected from flow + partition_name: str + the name of the partition to put this output to + Returns + ------- + output_file_path: str + the local path of the outputted csv file that should be used for + upload to s3 only, it does not the human readable column names and + will be deleted after uploading to s3. A copy of this file with all + the column name will remain in the ./data folder + """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) @@ -66,7 +67,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name): def upload_to_s3(bucket_name, bucket_key, file_path, only_query): - """ upload a file to S3 bucket + """Upload a file to S3 bucket. Parameters ---------- @@ -89,15 +90,40 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): class AthenaQuery: + """ + Class used to run query. + + Act as a query engine, maintains an open session with AWS Athena. + + Attributes + ---------- + MAX_WAIT: int + maximum number of seconds to wait before declares time-out + client: boto3.client + the athena client that is used to run the query + existing_partitions: list + a list of partitions that is already recorded in Athena's datalog, + this is obtained through query at the initialization of this class + instance. + """ def __init__(self): + """Initialize AthenaQuery instance. + + initialize a client session with AWS Athena, + query Athena to obtain extisting_partition. + """ self.MAX_WAIT = 60 self.client = boto3.client("athena") self.existing_partitions = self.get_existing_partitions() def get_existing_partitions(self): - """prints the existing partitions in the S3 bucket""" + """Return the existing partitions in the S3 bucket. + Returns + ------- + partitions: a list of existing partitions on S3 bucket + """ response = self.client.start_query_execution( QueryString='SHOW PARTITIONS trajectory_table', QueryExecutionContext={ @@ -114,7 +140,7 @@ def get_existing_partitions(self): return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] def check_status(self, execution_id): - """ Return the status of the execution with given id + """Return the status of the execution with given id. Parameters ---------- @@ -125,14 +151,13 @@ def check_status(self, execution_id): status: str QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED """ - response = self.client.get_query_execution( QueryExecutionId=execution_id ) return response['QueryExecution']['Status']['State'] def wait_for_execution(self, execution_id): - """ wait for the execution to finish or time-out + """Wait for the execution to finish or time-out. Parameters ---------- @@ -156,7 +181,7 @@ def wait_for_execution(self, execution_id): return True def update_partition(self, partition): - """ load the given partition to the trajectory_table on Athena + """Load the given partition to the trajectory_table on Athena. Parameters ---------- @@ -176,7 +201,7 @@ def update_partition(self, partition): return def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): - """ start the execution of a query, does not wait for it to finish + """Start the execution of a query, does not wait for it to finish. Parameters ---------- @@ -218,6 +243,16 @@ def run_query(self, query_name, result_location="s3://brent.experiments/query-re def test_sql_query(query_name): + """Start the execution of a query, does not wait for it to finish. + + Parameters + ---------- + query_name : str + name of the query in QueryStrings enum that will be tested + Raises + ------ + RuntimeError: if timeout + """ if query_name not in testing_functions: raise ValueError("no tests supported for this query") diff --git a/examples/datapipeline_test.py b/examples/datapipeline_test.py index 564060d3b..ae0ea382f 100644 --- a/examples/datapipeline_test.py +++ b/examples/datapipeline_test.py @@ -1,3 +1,4 @@ +"""functions that calculates the expected result for testing.""" import math # Vehicle Mass @@ -17,10 +18,12 @@ def heavyside(inp): + """Return 1 if input is positive.""" return 0 if inp <= 0 else 1 def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): + """Calculate the expected power for POWER_DEMAND_MODEL query.""" acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) rolling_friction = M * g * C_r * mu @@ -30,4 +33,5 @@ def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_ def apply_energy_one(row): + """Apply the power calculation to a row of the dataframe.""" return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file diff --git a/examples/lambda_function.py b/examples/lambda_function.py index 01ce1512a..4f7937c85 100644 --- a/examples/lambda_function.py +++ b/examples/lambda_function.py @@ -1,3 +1,4 @@ +"""lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus from examples.data_pipeline import AthenaQuery @@ -8,6 +9,15 @@ def lambda_handler(event, context): + """Invoke by AWS Lambda upon triggered by an event. + + Parameters + ---------- + event : dic < str: dic > + an S3 event + context: + not used + """ for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) diff --git a/examples/query.py b/examples/query.py index 6354cec3b..0f0ee13b4 100644 --- a/examples/query.py +++ b/examples/query.py @@ -1,15 +1,20 @@ +"""stores all the pre-defined query strings.""" from enum import Enum from examples.datapipeline_test import apply_energy_one -tags = {"energy": ["ENERGY_ONE"]} +# tags for different queries +tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} -testing_functions = {"ENERGY_ONE": apply_energy_one} +# specify the function to calculate the expected result of each query +testing_functions = {"POWER_DEMAND_MODEL": apply_energy_one} class QueryStrings(Enum): + """An enumeration of all the pre-defined query strings.""" + SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - ENERGY_ONE = "SELECT id, time, 1200 * speed * " \ + POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ diff --git a/examples/run_query.py b/examples/run_query.py index ea8839b09..64baa6656 100644 --- a/examples/run_query.py +++ b/examples/run_query.py @@ -1,3 +1,4 @@ +"""runner script for invoking query manually.""" import argparse from examples.data_pipeline import AthenaQuery, test_sql_query from examples.query import QueryStrings diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 16331ad08..20a11cf99 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -702,7 +702,7 @@ def get_2d_position(self, veh_id, error=-1001): raise NotImplementedError def get_2D_position(self, veh_id, error=-1001): - """ see traci class """ + """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError def get_accel_without_noise(self, veh_id): From 979d0476fbd2e3308d4bc75f0fc3576306ae6ad5 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:38:47 -0700 Subject: [PATCH 155/335] reorganized file locations --- examples/data_pipeline.py | 285 ------------------ examples/lambda_function.py | 36 --- examples/query.py | 22 -- examples/run_query.py | 37 --- .../data_pipeline}/datapipeline_test.py | 0 5 files changed, 380 deletions(-) delete mode 100644 examples/data_pipeline.py delete mode 100644 examples/lambda_function.py delete mode 100644 examples/query.py delete mode 100644 examples/run_query.py rename {examples => flow/data_pipeline}/datapipeline_test.py (100%) diff --git a/examples/data_pipeline.py b/examples/data_pipeline.py deleted file mode 100644 index 03b0f87e5..000000000 --- a/examples/data_pipeline.py +++ /dev/null @@ -1,285 +0,0 @@ -"""contains class and helper functions for the data pipeline.""" -import pandas as pd -import numpy as np -import boto3 -from botocore.exceptions import ClientError -from examples.query import QueryStrings, testing_functions -from time import time - - -def generate_trajectory_table(data_path, extra_info, partition_name): - """Generate desired output for the trajectory_table based on standard SUMO emission. - - Parameters - ---------- - data_path : str - path to the standard SUMO emission - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file - """ - raw_output = pd.read_csv(data_path, index_col=["time", "id"]) - required_cols = {"time", "id", "speed", "x", "y"} - raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) - - extra_info = pd.DataFrame.from_dict(extra_info) - extra_info.set_index(["time", "id"]) - raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) - - # add the partition column - # raw_output['partition'] = partition_name - raw_output = raw_output.sort_values(by=["time", "id"]) - output_file_path = data_path[:-4]+"_trajectory.csv" - raw_output.to_csv(output_file_path, index=False) - return output_file_path - - -def generate_trajectory_from_flow(data_path, extra_info, partition_name): - """Generate desired output for the trajectory_table based only on flow output. - - Parameters - ---------- - data_path : str - output file path - extra_info: dict - extra information needed in the trajectory table, collected from flow - partition_name: str - the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file that should be used for - upload to s3 only, it does not the human readable column names and - will be deleted after uploading to s3. A copy of this file with all - the column name will remain in the ./data folder - """ - extra_info = pd.DataFrame.from_dict(extra_info) - # extra_info["partition"] = partition_name - extra_info.to_csv(data_path, index=False) - upload_only_file_path = data_path[:-4] + "_upload" + ".csv" - extra_info.to_csv(upload_only_file_path, index=False, header=False) - return upload_only_file_path - - -def upload_to_s3(bucket_name, bucket_key, file_path, only_query): - """Upload a file to S3 bucket. - - Parameters - ---------- - bucket_name : str - the bucket to upload to - bucket_key: str - the key within the bucket for the file - file_path: str - the path of the file to be uploaded - only_query: str - specify which query should be run on this file by lambda: - if empty: run none of them - if "all": run all available analysis query - if a string of list of queries: run only those mentioned in the list - """ - s3 = boto3.resource("s3") - s3.Bucket(bucket_name).upload_file(file_path, bucket_key, - ExtraArgs={"Metadata": {"run-query": only_query}}) - return - - -class AthenaQuery: - """ - Class used to run query. - - Act as a query engine, maintains an open session with AWS Athena. - - Attributes - ---------- - MAX_WAIT: int - maximum number of seconds to wait before declares time-out - client: boto3.client - the athena client that is used to run the query - existing_partitions: list - a list of partitions that is already recorded in Athena's datalog, - this is obtained through query at the initialization of this class - instance. - """ - - def __init__(self): - """Initialize AthenaQuery instance. - - initialize a client session with AWS Athena, - query Athena to obtain extisting_partition. - """ - self.MAX_WAIT = 60 - self.client = boto3.client("athena") - self.existing_partitions = self.get_existing_partitions() - - def get_existing_partitions(self): - """Return the existing partitions in the S3 bucket. - - Returns - ------- - partitions: a list of existing partitions on S3 bucket - """ - response = self.client.start_query_execution( - QueryString='SHOW PARTITIONS trajectory_table', - QueryExecutionContext={ - 'Database': 'simulation' - }, - WorkGroup='primary' - ) - if self.wait_for_execution(response['QueryExecutionId']): - raise RuntimeError("get current partitions timed out") - response = self.client.get_query_results( - QueryExecutionId=response['QueryExecutionId'], - MaxResults=1000 - ) - return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] - - def check_status(self, execution_id): - """Return the status of the execution with given id. - - Parameters - ---------- - execution_id : string - id of the execution that is checked for - Returns - ------- - status: str - QUEUED|RUNNING|SUCCEEDED|FAILED|CANCELLED - """ - response = self.client.get_query_execution( - QueryExecutionId=execution_id - ) - return response['QueryExecution']['Status']['State'] - - def wait_for_execution(self, execution_id): - """Wait for the execution to finish or time-out. - - Parameters - ---------- - execution_id : str - id of the execution this is watiing for - Returns - ------- - time_out: bool - True if time-out, False if success - Raises - ------ - RuntimeError: if execution failed or get canceled - """ - start = time() - while time() - start < self.MAX_WAIT: - state = self.check_status(execution_id) - if state == 'FAILED' or state == 'CANCELLED': - raise RuntimeError("update partition failed") - elif state == 'SUCCEEDED': - return False - return True - - def update_partition(self, partition): - """Load the given partition to the trajectory_table on Athena. - - Parameters - ---------- - partition : str - the new partition that needs to be loaded - """ - response = self.client.start_query_execution( - QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), - QueryExecutionContext={ - 'Database': 'simulation' - }, - WorkGroup='primary' - ) - if self.wait_for_execution(response['QueryExecutionId']): - raise RuntimeError("update partition timed out") - self.existing_partitions.append(partition) - return - - def run_query(self, query_name, result_location="s3://brent.experiments/query-result/", partition="default"): - """Start the execution of a query, does not wait for it to finish. - - Parameters - ---------- - query_name : str - name of the query in QueryStrings enum that will be run - result_location: str, optional - location on the S3 bucket where the result will be stored - partition: str, optional - name of the partition to run this query on - Returns - ------- - execution_id: str - the execution id of the execution started by this method - Raises - ------ - ValueError: if tries to run a query not existed in QueryStrings enum - """ - if query_name not in QueryStrings.__members__: - raise ValueError("query not existed: please add it to query.py") - - if partition not in self.existing_partitions: - self.update_partition(partition) - - response = self.client.start_query_execution( - QueryString=QueryStrings[query_name].value.format(partition=partition), - QueryExecutionContext={ - 'Database': 'simulation' - }, - ResultConfiguration={ - 'OutputLocation': result_location, - }, - WorkGroup='primary' - ) - return response['QueryExecutionId'] - -########################################################################### -# Helpers for testing the SQL Queries # -########################################################################### - - -def test_sql_query(query_name): - """Start the execution of a query, does not wait for it to finish. - - Parameters - ---------- - query_name : str - name of the query in QueryStrings enum that will be tested - Raises - ------ - RuntimeError: if timeout - """ - if query_name not in testing_functions: - raise ValueError("no tests supported for this query") - - # Run the respective sql query - queryEngine = AthenaQuery() - execution_id = queryEngine.run_query(query_name, result_location="s3://brent.experiments/query-result/query-test", - partition="test") - if queryEngine.wait_for_execution(execution_id): - raise RuntimeError("execution timed out") - - # get the Athena query result from S3 - s3 = boto3.resource("s3") - s3.Bucket("brent.experiments").download_file("query-result/query-test/"+execution_id+".csv", - "data/athena_result.csv") - athena_result = pd.read_csv("data/athena_result.csv") - athena_result = athena_result.sort_values(by=["time", "id"]) - - # get the python expected result - expected_result = pd.read_csv("data/test_data.csv") - expected_result = expected_result.apply(testing_functions[query_name], axis=1, result_type="expand") - expected_result.columns = ["time", "id", "power"] - expected_result = expected_result.sort_values(by=["time", "id"]) - - difference = athena_result["power"] - expected_result["power"] - print("average difference is: " + str(np.mean(difference))) - print("std of difference is: " + str(np.std(difference))) - print("average ratio of difference to expected is: " + - str(np.mean(np.divide(difference, expected_result["power"])))) - difference = pd.DataFrame(difference) - difference.to_csv("./difference.csv") diff --git a/examples/lambda_function.py b/examples/lambda_function.py deleted file mode 100644 index 4f7937c85..000000000 --- a/examples/lambda_function.py +++ /dev/null @@ -1,36 +0,0 @@ -"""lambda function on AWS Lambda.""" -import boto3 -from urllib.parse import unquote_plus -from examples.data_pipeline import AthenaQuery -from examples.query import tags - -s3 = boto3.client('s3') -queryEngine = AthenaQuery() - - -def lambda_handler(event, context): - """Invoke by AWS Lambda upon triggered by an event. - - Parameters - ---------- - event : dic < str: dic > - an S3 event - context: - not used - """ - for record in event['Records']: - bucket = record['s3']['bucket']['name'] - key = unquote_plus(record['s3']['object']['key']) - partition = key.split('/')[-2].split('=')[-1] - response = s3.head_object(Bucket=bucket, Key=key) - run_query = response["Metadata"]["run-query"] - - if bucket == 'brent.experiments' and 'trajectory-output/' in key: - if run_query == "all": - query_list = tags["analysis"] - elif not run_query: - break - else: - query_list = run_query.split("\', \'") - for query_name in query_list: - queryEngine.run_query(query_name, 's3://brent.experiments/query-result/auto/', partition) \ No newline at end of file diff --git a/examples/query.py b/examples/query.py deleted file mode 100644 index 0f0ee13b4..000000000 --- a/examples/query.py +++ /dev/null @@ -1,22 +0,0 @@ -"""stores all the pre-defined query strings.""" -from enum import Enum -from examples.datapipeline_test import apply_energy_one - -# tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL"], "analysis": ["POWER_DEMAND_MODEL"]} - -# specify the function to calculate the expected result of each query -testing_functions = {"POWER_DEMAND_MODEL": apply_energy_one} - - -class QueryStrings(Enum): - """An enumeration of all the pre-defined query strings.""" - - SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" - UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - POWER_DEMAND_MODEL = "SELECT id, time, 1200 * speed * " \ - "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ - "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 1 AS energy_model_id, source_id " \ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" \ No newline at end of file diff --git a/examples/run_query.py b/examples/run_query.py deleted file mode 100644 index 64baa6656..000000000 --- a/examples/run_query.py +++ /dev/null @@ -1,37 +0,0 @@ -"""runner script for invoking query manually.""" -import argparse -from examples.data_pipeline import AthenaQuery, test_sql_query -from examples.query import QueryStrings - -parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" - "a S3 location") -parser.add_argument("--run", type=str, nargs="+") -parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") -parser.add_argument("--partition", type=str, nargs='?', default="default") -parser.add_argument("--list_partitions", action="store_true") -parser.add_argument("--check_status", type=str, nargs='+') -parser.add_argument("--list_queries", action="store_true") -parser.add_argument("--test_query", nargs=1) - - -if __name__ == "__main__": - args = parser.parse_args() - queryEngine = AthenaQuery() - - if args.run: - execution_ids = [] - for query_name in args.run: - execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) - print(execution_ids) - if args.list_partitions: - print(queryEngine.existing_partitions) - if args.check_status: - status = dict() - for execution_id in args.check_status: - status[execution_id] = queryEngine.check_status(execution_id) - print(status) - if args.list_queries: - for q in QueryStrings: - print(q) - if args.test_query: - test_sql_query(args.test_query[0]) \ No newline at end of file diff --git a/examples/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py similarity index 100% rename from examples/datapipeline_test.py rename to flow/data_pipeline/datapipeline_test.py From fdd983eb19b7a4acd75b9101568dfa8441c86294 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 23 Apr 2020 12:58:44 -0700 Subject: [PATCH 156/335] fix some more style issues --- flow/data_pipeline/datapipeline_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py index ae0ea382f..0e1a50518 100644 --- a/flow/data_pipeline/datapipeline_test.py +++ b/flow/data_pipeline/datapipeline_test.py @@ -34,4 +34,4 @@ def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_ def apply_energy_one(row): """Apply the power calculation to a row of the dataframe.""" - return [row[0], row[1], calculate_power(row[4], row[6])] \ No newline at end of file + return [row[0], row[1], calculate_power(row[4], row[6])] From 6af7e02c86ddfbce78851d2c85a2042ae3b9ea6c Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 04:01:41 -0700 Subject: [PATCH 157/335] added auto upload to s3 feature for the reply scipt and fix some other minor issues --- flow/core/experiment.py | 15 ++--- flow/core/kernel/vehicle/traci.py | 5 +- flow/data_pipeline/data_pipeline.py | 12 ++-- flow/data_pipeline/lambda_function.py | 4 +- flow/data_pipeline/query.py | 86 ++++++++++++++------------- flow/data_pipeline/run_query.py | 2 +- flow/visualize/i210_replay.py | 21 ++++++- 7 files changed, 82 insertions(+), 63 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 2296ef635..84a34d0e3 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -88,7 +88,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=None): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=None, only_query=""): """Run the given network for a set number of runs. Parameters @@ -106,8 +106,9 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False, partition_name=No will be used to later for query. If NONE, won't upload output to S3. only_query: str - Specifies whether queries should be automatically run the - simulation data when it gets uploaded to s3 + Specifies which queries should be automatically run when the + simulation data gets uploaded to S3. If an empty str is passed in, + then it implies no queries should be run on this. Returns ------- @@ -147,7 +148,7 @@ def rl_actions(*_): t = time.time() times = [] extra_info = extra_init() - source_id = uuid.uuid4().hex + source_id = 'flow_{}'.format(uuid.uuid4().hex) for i in range(num_runs): ret = 0 @@ -167,7 +168,7 @@ def rl_actions(*_): # collect additional information for the data pipeline get_extra_info(self.env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend([source_id+"run" + str(i)] * len(veh_ids)) + extra_info["source_id"].extend(['{}_run_{}'.format(source_id, i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): @@ -218,8 +219,8 @@ def rl_actions(*_): if partition_name == "default": partition_name = source_id[0:3] partition_name = date.today().isoformat() + " " + partition_name - upload_to_s3('circles.data', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + upload_to_s3('circles.data.pipeline', 'trajectory-output/partition_name={}/{}.csv'.format( + partition_name, upload_file_path.split('/')[-1].split('_')[0]), upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index f40eed99c..824ec4b0c 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -970,7 +970,6 @@ def apply_acceleration(self, veh_ids, acc): self.__vehicles[vid]["accel"] = acc[i] this_vel = self.get_speed(vid) next_vel = max([this_vel + acc[i] * self.sim_step, 0]) - #self.__vehicles[vid]["next_v"] = next_vel self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3) def apply_lane_change(self, veh_ids, direction): @@ -1135,7 +1134,7 @@ def set_max_speed(self, veh_id, max_speed): # add for data pipeline def get_accel(self, veh_id): """See parent class.""" - if not "accel" in self.__vehicles[veh_id]: + if "accel" not in self.__vehicles[veh_id]: self.__vehicles[veh_id]["accel"] = None return self.__vehicles[veh_id]["accel"] @@ -1145,7 +1144,7 @@ def update_accel_without_noise(self, veh_id, accel_without_noise): def get_accel_without_noise(self, veh_id): """See parent class.""" - if not "accel_without_noise" in self.__vehicles[veh_id]: + if "accel_without_noise" not in self.__vehicles[veh_id]: self.__vehicles[veh_id]["accel_without_noise"] = None return self.__vehicles[veh_id]["accel_without_noise"] diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 0cd0cbc79..fbd975c5e 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -38,7 +38,7 @@ def generate_trajectory_table(data_path, extra_info, partition_name): return output_file_path -def generate_trajectory_from_flow(data_path, extra_info, partition_name): +def generate_trajectory_from_flow(data_path, extra_info, partition_name=None): """Generate desired output for the trajectory_table based only on flow output. Parameters @@ -227,7 +227,7 @@ def update_partition(self, partition): self.existing_partitions.append(partition) return - def run_query(self, query_name, result_location="s3://circles.data/query-result/", partition="default"): + def run_query(self, query_name, result_location="s3://circles.data.pipeline/query-result/", partition="default"): """Start the execution of a query, does not wait for it to finish. Parameters @@ -285,15 +285,15 @@ def test_sql_query(query_name): # Run the respective sql query queryEngine = AthenaQuery() - execution_id = queryEngine.run_query(query_name, result_location="s3://circles.data/query-result/query-test", - partition="test") + execution_id = queryEngine.run_query(query_name, result_location="s3://circles.data.pipeline/" + "query-result/query-test", partition="test") if queryEngine.wait_for_execution(execution_id): raise RuntimeError("execution timed out") # get the Athena query result from S3 s3 = boto3.resource("s3") - s3.Bucket("circles.data").download_file("query-result/query-test/"+execution_id+".csv", - "data/athena_result.csv") + s3.Bucket("circles.data.pipeline").download_file("query-result/query-test/"+execution_id+".csv", + "data/athena_result.csv") athena_result = pd.read_csv("data/athena_result.csv") athena_result = athena_result.sort_values(by=["time", "id"]) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 3f0abb8a1..fd50ba8f5 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -25,7 +25,7 @@ def lambda_handler(event, context): response = s3.head_object(Bucket=bucket, Key=key) run_query = response["Metadata"]["run-query"] - if bucket == 'circles.data' and 'trajectory-output/' in key: + if bucket == 'circles.data.pipeline' and 'trajectory-output/' in key: if run_query == "all": query_list = tags["analysis"] elif not run_query: @@ -33,4 +33,4 @@ def lambda_handler(event, context): else: query_list = run_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://circles.data/query-result/auto/', partition) + queryEngine.run_query(query_name, 's3://circles.data.pipeline/query-result/auto/', partition) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 9054364e6..7b8cf70c8 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -15,45 +15,47 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - POWER_DEMAND_MODEL = "SELECT id, time, speed, acceleration, 1200 * speed * " \ - "((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 " \ - "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 'POWER_DEMAND_MODEL' AS energy_model_id, source_id " \ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\' " \ - "ORDER BY id, time " - POWER_DEMAND_MODEL_DENOISED_ACCEL = \ - "SELECT id, time, speed, accel_without_noise, 1200 * speed * " \ - "((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise)+0.8 " \ - "+ 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 " \ - "* POW(speed,3) AS power, 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id " \ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\' " \ - "ORDER BY id, time " - POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = \ - "WITH sub1 AS ( " \ - "SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, " \ - "time - LAG(time, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, " \ - "LAG(speed, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, " \ - "LAG(acceleration, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, " \ - "LAG(accel_without_noise, 1) " \ - "OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised "\ - "FROM trajectory_table " \ - "WHERE partition_name=\'{partition}\'" \ - ")," \ - "sub2 AS (" \ - "SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, " \ - "prev_speed+accel_without_noise*sim_step AS speed_denoised " \ - "FROM sub1" \ - ") " \ - "SELECT id, time, speed_denoised, accel_without_noise, " \ - "1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 " \ - "THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 " \ - "* SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 " \ - "* 2.6 * 0.3 * POW(speed_denoised,3) AS power, " \ - "'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id " \ - "FROM sub2 " \ - "ORDER BY id, time " + POWER_DEMAND_MODEL = """ + SELECT id, time, speed, acceleration, 1200 * speed * + ((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 + + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 + * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL' AS energy_model_id, source_id + FROM trajectory_table + WHERE partition_name=\'{partition}\' + ORDER BY id, time """ + POWER_DEMAND_MODEL_DENOISED_ACCEL = """ + SELECT id, time, speed, accel_without_noise, + 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) + * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) + + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id + FROM trajectory_table + WHERE partition_name=\'{partition}\' + ORDER BY id, time """ + POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ + WITH lagged_timestep AS ( + SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, + time - LAG(time, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + LAG(speed, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, + LAG(acceleration, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, + LAG(accel_without_noise, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised + FROM trajectory_table + WHERE partition_name=\'{partition}\'), + speed_denoised_table AS ( + SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, + prev_speed+accel_without_noise*sim_step AS speed_denoised + FROM lagged_timestep + ) + SELECT id, time, speed_denoised, accel_without_noise, + 1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 + THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 + * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 + * 2.6 * 0.3 * POW(speed_denoised,3) AS power, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id + FROM speed_denoised_table + ORDER BY id, time """ diff --git a/flow/data_pipeline/run_query.py b/flow/data_pipeline/run_query.py index f065a726e..ac927c749 100644 --- a/flow/data_pipeline/run_query.py +++ b/flow/data_pipeline/run_query.py @@ -6,7 +6,7 @@ parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" "a S3 location") parser.add_argument("--run", type=str, nargs="+") -parser.add_argument("--result_location", type=str, nargs='?', default="s3://brent.experiments/query-result/") +parser.add_argument("--result_location", type=str, nargs='?', default="s3://circles.data.pipeline/query-result/") parser.add_argument("--partition", type=str, nargs='?', default="default") parser.add_argument("--list_partitions", action="store_true") parser.add_argument("--check_status", type=str, nargs='+') diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index c50f12a05..0df23942e 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -209,7 +209,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= }) extra_info = extra_init() - source_id = uuid.uuid4().hex + source_id = 'flow_{}'.format(uuid.uuid4().hex) for i in range(args.num_rollouts): vel = [] @@ -249,7 +249,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # Collect information from flow for the trajectory output get_extra_info(env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend([source_id + "run" + str(i)] * len(veh_ids)) + extra_info["source_id"].extend(['{}_run_{}'.format(source_id, i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in custom_callables.items(): @@ -326,6 +326,17 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # convert the emission file into a csv file emission_to_csv(emission_path, output_path=output_path) + # generate the trajectory output file + trajectory_table_path = './data/' + source_id + ".csv" + upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) + + # upload to s3 if asked + if args.use_s3: + partition_name = date.today().isoformat() + " " + source_id[0:3] + upload_to_s3('circles.data.pipeline', 'trajectory-output/' + 'partition_name=' + partition_name + '/' + + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + upload_file_path, str(args.only_query)[2:-2]) + # print the location of the emission csv file print("\nGenerated emission file at " + output_path) @@ -435,6 +446,12 @@ def create_parser(): 'be run in cluster mode') parser.add_argument('--exp_title', type=str, required=False, default=None, help='Informative experiment title to help distinguish results') + parser.add_argument( + '--only_query', + nargs='*', default="[\'all\']", + help='specify which query should be run by lambda' + 'for detail, see upload_to_s3 in data_pipeline.py' + ) return parser From 72d4733f07458a2863bb2c95cb7ef75c89935d33 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 04:07:29 -0700 Subject: [PATCH 158/335] fix trailing white space style issue --- flow/data_pipeline/query.py | 79 ++++++++++++++++++------------------- 1 file changed, 39 insertions(+), 40 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 7b8cf70c8..c915d44bf 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -16,46 +16,45 @@ class QueryStrings(Enum): SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" POWER_DEMAND_MODEL = """ - SELECT id, time, speed, acceleration, 1200 * speed * - ((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 - + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 - * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL' AS energy_model_id, source_id - FROM trajectory_table - WHERE partition_name=\'{partition}\' - ORDER BY id, time """ + SELECT id, time, speed, acceleration, 1200 * speed * + ((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 + + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 + * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL' AS energy_model_id, source_id + FROM trajectory_table + WHERE partition_name=\'{partition}\' + ORDER BY id, time""" POWER_DEMAND_MODEL_DENOISED_ACCEL = """ - SELECT id, time, speed, accel_without_noise, - 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) - * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) - + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id - FROM trajectory_table - WHERE partition_name=\'{partition}\' - ORDER BY id, time """ + SELECT id, time, speed, accel_without_noise, + 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) + * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) + + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id + FROM trajectory_table + WHERE partition_name=\'{partition}\' + ORDER BY id, time""" POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ - WITH lagged_timestep AS ( - SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, - time - LAG(time, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, - LAG(speed, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, - LAG(acceleration, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, - LAG(accel_without_noise, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised - FROM trajectory_table + WITH lagged_timestep AS ( + SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, + time - LAG(time, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + LAG(speed, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, + LAG(acceleration, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, + LAG(accel_without_noise, 1) + OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised + FROM trajectory_table WHERE partition_name=\'{partition}\'), - speed_denoised_table AS ( - SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, - prev_speed+accel_without_noise*sim_step AS speed_denoised - FROM lagged_timestep - ) - SELECT id, time, speed_denoised, accel_without_noise, - 1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 - THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 - * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 - * 2.6 * 0.3 * POW(speed_denoised,3) AS power, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id - FROM speed_denoised_table - ORDER BY id, time """ + speed_denoised_table AS ( + SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, + prev_speed+accel_without_noise*sim_step AS speed_denoised + FROM lagged_timestep) + SELECT id, time, speed_denoised, accel_without_noise, + 1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 + THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 + * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 + * 2.6 * 0.3 * POW(speed_denoised,3) AS power, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id + FROM speed_denoised_table + ORDER BY id, time""" From 420ea3f798d00e2a79260b82b79092f304ee9b72 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 04:10:43 -0700 Subject: [PATCH 159/335] some minor issue fixed --- flow/data_pipeline/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index c915d44bf..d40e14c45 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -26,7 +26,7 @@ class QueryStrings(Enum): ORDER BY id, time""" POWER_DEMAND_MODEL_DENOISED_ACCEL = """ SELECT id, time, speed, accel_without_noise, - 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) + 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id From e45eb92cc420836fa297c0ccceb2d93d88d06359 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 08:42:29 -0700 Subject: [PATCH 160/335] reformatting energy queries --- flow/data_pipeline/query.py | 94 ++++++++++++++++++++----------------- 1 file changed, 51 insertions(+), 43 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index d40e14c45..c6be5efe4 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -13,48 +13,56 @@ class QueryStrings(Enum): """An enumeration of all the pre-defined query strings.""" - SAMPLE = "SELECT * FROM trajectory_table WHERE partition_name=\'{partition}\' LIMIT 15;" - UPDATE_PARTITION = "ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\');" - POWER_DEMAND_MODEL = """ - SELECT id, time, speed, acceleration, 1200 * speed * - ((CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 - + 9.81 * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 - * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL' AS energy_model_id, source_id - FROM trajectory_table - WHERE partition_name=\'{partition}\' - ORDER BY id, time""" + SAMPLE = """ + SELECT * + FROM trajectory_table + WHERE partition_name=\'{partition}\' + LIMIT 15; + """ + + UPDATE_PARTITION = """ + ALTER TABLE trajectory_table + ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\'); + """ + + POWER_DEMAND_MODEL = VEHICLE_POWER_DEMAND_SUBQUERY.format('trajectory_table') + POWER_DEMAND_MODEL_DENOISED_ACCEL = """ - SELECT id, time, speed, accel_without_noise, - 1200 * speed * ((CASE WHEN accel_without_noise > 0 THEN 1 ELSE 0 END * (1-0.8) - * accel_without_noise)+0.8 + 9.81 * SIN(road_grade)) - + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL' AS energy_model_id, source_id - FROM trajectory_table - WHERE partition_name=\'{partition}\' - ORDER BY id, time""" + WITH denoised_accel_cte AS ( + SELECT + id, + "time", + speed, + accel_without_noise AS acceleration, + road_grade, + source_id + FROM trajectory_table + ) + {}""".format(VEHICLE_POWER_DEMAND_SUBQUERY.format('denoised_accel_cte')) + POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ - WITH lagged_timestep AS ( - SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, - time - LAG(time, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, - LAG(speed, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed, - LAG(acceleration, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel, - LAG(accel_without_noise, 1) - OVER (PARTITION BY id ORDER BY time ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_accel_denoised - FROM trajectory_table - WHERE partition_name=\'{partition}\'), - speed_denoised_table AS ( - SELECT time, id, speed, acceleration, accel_without_noise, road_grade, source_id, - prev_speed+accel_without_noise*sim_step AS speed_denoised - FROM lagged_timestep) - SELECT id, time, speed_denoised, accel_without_noise, - 1200 * speed_denoised * ((CASE WHEN accel_without_noise > 0 - THEN 1 ELSE 0 END * (1-0.8) * accel_without_noise) + 0.8 + 9.81 - * SIN(road_grade)) + 1200 * 9.81 * 0.005 * speed_denoised + 0.5 * 1.225 - * 2.6 * 0.3 * POW(speed_denoised,3) AS power, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL' AS energy_model, source_id - FROM speed_denoised_table - ORDER BY id, time""" + WITH lagged_timestep AS ( + SELECT + "time", + id, + accel_without_noise, + road_grade, + source_id, + "time" - LAG("time", 1) + OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + LAG(speed, 1) + OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed + FROM trajectory_table + WHERE 1 = 1 + AND partition_name=\'{partition}\' + ), denoised_speed_cte AS ( + SELECT + id, + "time", + prev_speed + accel_without_noise * sim_step AS speed, + accel_without_noise AS acceleration, + road_grade, + source_id + FROM lagged_timestep + ) + {}""".format(VEHICLE_POWER_DEMAND_SUBQUERY.format('denoised_speed_cte')) From d578e6337b117316ce9d0633c7e18070ec27d6dc Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 08:52:17 -0700 Subject: [PATCH 161/335] rename vehicle power demand query --- flow/data_pipeline/query.py | 27 +++++++++++++++++++++------ 1 file changed, 21 insertions(+), 6 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index c6be5efe4..826c28242 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -6,9 +6,24 @@ tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], "analysis": ["POWER_DEMAND_MODEL"]} -# specify the function to calculate the expected result of each query -testing_functions = {"POWER_DEMAND_MODEL": apply_energy_one} - +VEHICLE_POWER_DEMAND_FINAL_SELECT = """ + SELECT + id, + "time", + speed, + acceleration, + road_grade, + 1200 * speed * ( + (CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + + 0.8 + 9.81 * SIN(road_grade) + ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, + 'POWER_DEMAND_MODEL' AS energy_model_id, + source_id + FROM {} + WHERE 1 = 1 + AND partition_name=\'{partition}\' + ORDER BY id, "time" + """ class QueryStrings(Enum): """An enumeration of all the pre-defined query strings.""" @@ -25,7 +40,7 @@ class QueryStrings(Enum): ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\'); """ - POWER_DEMAND_MODEL = VEHICLE_POWER_DEMAND_SUBQUERY.format('trajectory_table') + POWER_DEMAND_MODEL = VEHICLE_POWER_DEMAND_FINAL_SELECT.format('trajectory_table') POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -38,7 +53,7 @@ class QueryStrings(Enum): source_id FROM trajectory_table ) - {}""".format(VEHICLE_POWER_DEMAND_SUBQUERY.format('denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -65,4 +80,4 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_SUBQUERY.format('denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_speed_cte')) From 32c052866e2d750fb4e4911c06320cb89ccd3157 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 10:44:06 -0700 Subject: [PATCH 162/335] move partition condition to cte's --- flow/data_pipeline/query.py | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 826c28242..2eb2146f2 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -20,8 +20,6 @@ 'POWER_DEMAND_MODEL' AS energy_model_id, source_id FROM {} - WHERE 1 = 1 - AND partition_name=\'{partition}\' ORDER BY id, "time" """ @@ -40,7 +38,20 @@ class QueryStrings(Enum): ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\'); """ - POWER_DEMAND_MODEL = VEHICLE_POWER_DEMAND_FINAL_SELECT.format('trajectory_table') + POWER_DEMAND_MODEL = """ + WITH regular_cte AS ( + SELECT + id, + "time", + speed, + acceleration, + road_grade, + source_id + FROM trajectory_table + WHERE 1 = 1 + AND partition_name=\'{partition}\' + ) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -52,14 +63,16 @@ class QueryStrings(Enum): road_grade, source_id FROM trajectory_table + WHERE 1 = 1 + AND partition_name=\'{partition}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( SELECT - "time", id, + "time", accel_without_noise, road_grade, source_id, From c7cd96303620e97530bceb9507a085d6e4089cc9 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 13:41:17 -0700 Subject: [PATCH 163/335] fix some query string formatting issue --- flow/data_pipeline/query.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 2eb2146f2..ca59a12b1 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -49,7 +49,7 @@ class QueryStrings(Enum): source_id FROM trajectory_table WHERE 1 = 1 - AND partition_name=\'{partition}\' + AND partition_name=\'{{partition}}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('regular_cte')) @@ -64,7 +64,7 @@ class QueryStrings(Enum): source_id FROM trajectory_table WHERE 1 = 1 - AND partition_name=\'{partition}\' + AND partition_name=\'{{partition}}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) @@ -82,7 +82,7 @@ class QueryStrings(Enum): OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed FROM trajectory_table WHERE 1 = 1 - AND partition_name=\'{partition}\' + AND partition_name=\'{{partition}}\' ), denoised_speed_cte AS ( SELECT id, From b5be92ac038b118b4055ef6489612a9836cf00f2 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 15:28:54 -0700 Subject: [PATCH 164/335] fix some style issue --- flow/data_pipeline/query.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index ca59a12b1..e1f98aaf1 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -23,6 +23,7 @@ ORDER BY id, "time" """ + class QueryStrings(Enum): """An enumeration of all the pre-defined query strings.""" From 6884960aecf8adb4704143b17383fcddd2aa0ffa Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 15:56:53 -0700 Subject: [PATCH 165/335] get up to date with i210_dev --- examples/exp_configs/non_rl/highway.py | 40 ++++++---- .../exp_configs/non_rl/i210_subnetwork.py | 2 +- .../exp_configs/templates/sumo/test2.net.xml | 78 +++++-------------- 3 files changed, 48 insertions(+), 72 deletions(-) diff --git a/examples/exp_configs/non_rl/highway.py b/examples/exp_configs/non_rl/highway.py index 1905e2f7f..e7505f2d7 100644 --- a/examples/exp_configs/non_rl/highway.py +++ b/examples/exp_configs/non_rl/highway.py @@ -5,19 +5,25 @@ from flow.core.params import VehicleParams, InFlows from flow.envs.ring.lane_change_accel import ADDITIONAL_ENV_PARAMS from flow.networks.highway import HighwayNetwork, ADDITIONAL_NET_PARAMS -from flow.envs import TestEnv +from flow.envs import LaneChangeAccelEnv vehicles = VehicleParams() vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, { - "a": 0.3, "b": 2.0, "noise": 0.5 - }), - ) + veh_id="human", + acceleration_controller=(IDMController, {}), + lane_change_params=SumoLaneChangeParams( + model="SL2015", + lc_sublane=2.0, + ), + num_vehicles=20) +vehicles.add( + veh_id="human2", + acceleration_controller=(IDMController, {}), + lane_change_params=SumoLaneChangeParams( + model="SL2015", + lc_sublane=2.0, + ), + num_vehicles=20) env_params = EnvParams(additional_params=ADDITIONAL_ENV_PARAMS) @@ -25,7 +31,13 @@ inflow.add( veh_type="human", edge="highway_0", - vehs_per_hour=10800 / 5.0, + probability=0.25, + departLane="free", + departSpeed=20) +inflow.add( + veh_type="human2", + edge="highway_0", + probability=0.25, departLane="free", departSpeed=20) @@ -35,7 +47,7 @@ exp_tag='highway', # name of the flow environment the experiment is running on - env_name=TestEnv, + env_name=LaneChangeAccelEnv, # name of the network class the experiment is running on network=HighwayNetwork, @@ -46,12 +58,12 @@ # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( render=True, - sim_step=0.5 + lateral_resolution=1.0, ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=4000, + horizon=1500, additional_params=ADDITIONAL_ENV_PARAMS.copy(), ), diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 8970e6165..3704a7a1c 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -117,7 +117,7 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=False, + color_by_speed=True, use_ballistic=True ), diff --git a/examples/exp_configs/templates/sumo/test2.net.xml b/examples/exp_configs/templates/sumo/test2.net.xml index 16170b917..00e3edcd5 100644 --- a/examples/exp_configs/templates/sumo/test2.net.xml +++ b/examples/exp_configs/templates/sumo/test2.net.xml @@ -1,41 +1,5 @@ - - @@ -4716,24 +4680,24 @@ - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + @@ -4837,10 +4801,10 @@ - + - - + + From 7e549be514a427b1877f19cab3ecb603a02c4f50 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 21:18:43 -0700 Subject: [PATCH 166/335] update lambda function, change partition into multi-column --- flow/core/experiment.py | 8 +-- flow/data_pipeline/data_pipeline.py | 84 ++++++++------------------- flow/data_pipeline/lambda_function.py | 26 +++------ flow/data_pipeline/query.py | 29 ++++----- flow/data_pipeline/run_query.py | 6 +- flow/visualize/i210_replay.py | 7 ++- 6 files changed, 58 insertions(+), 102 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 84a34d0e3..01f732379 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -217,10 +217,10 @@ def rl_actions(*_): if partition_name: if partition_name == "default": - partition_name = source_id[0:3] - partition_name = date.today().isoformat() + " " + partition_name - upload_to_s3('circles.data.pipeline', 'trajectory-output/partition_name={}/{}.csv'.format( - partition_name, upload_file_path.split('/')[-1].split('_')[0]), + partition_name = source_id[-3:] + cur_date = date.today().isoformat() + upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( + cur_date, partition_name, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(only_query)[2:-2]) # delete the S3-only version of the trajectory file diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index fbd975c5e..111c41994 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -2,8 +2,9 @@ import pandas as pd import numpy as np import boto3 -from flow.data_pipeline.query import QueryStrings, testing_functions +from flow.data_pipeline.query import QueryStrings from time import time +from datetime import date def generate_trajectory_table(data_path, extra_info, partition_name): @@ -90,7 +91,7 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): def extra_init(): """Return the dictionary with all the feild pre-populated with empty list.""" - extra_info = {"time": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], + extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "road_grade": [], "source_id": []} return extra_info @@ -99,7 +100,7 @@ def extra_init(): def get_extra_info(veh_kernel, extra_info, veh_ids): """Get all the necessary information for the trajectory output from flow.""" for vid in veh_ids: - extra_info["time"].append(veh_kernel.get_timestep(vid) / 1000) + extra_info["time_step"].append(veh_kernel.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(veh_kernel.get_headway(vid)) extra_info["acceleration"].append(veh_kernel.get_accel(vid)) @@ -154,7 +155,7 @@ def get_existing_partitions(self): response = self.client.start_query_execution( QueryString='SHOW PARTITIONS trajectory_table', QueryExecutionContext={ - 'Database': 'simulation' + 'Database': 'circles' }, WorkGroup='primary' ) @@ -164,7 +165,7 @@ def get_existing_partitions(self): QueryExecutionId=response['QueryExecutionId'], MaxResults=1000 ) - return [data['Data'][0]['VarCharValue'].split('=')[-1] for data in response['ResultSet']['Rows']] + return [data['Data'][0]['VarCharValue'] for data in response['ResultSet']['Rows']] def check_status(self, execution_id): """Return the status of the execution with given id. @@ -207,27 +208,30 @@ def wait_for_execution(self, execution_id): return False return True - def update_partition(self, partition): + def update_partition(self, query_date, partition): """Load the given partition to the trajectory_table on Athena. Parameters ---------- + query_date : str + the new partition date that needs to be loaded partition : str the new partition that needs to be loaded """ response = self.client.start_query_execution( - QueryString=QueryStrings['UPDATE_PARTITION'].value.format(partition=partition), + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(date=query_date, partition=partition), QueryExecutionContext={ - 'Database': 'simulation' + 'Database': 'circles' }, WorkGroup='primary' ) if self.wait_for_execution(response['QueryExecutionId']): raise RuntimeError("update partition timed out") - self.existing_partitions.append(partition) + self.existing_partitions.append("date={}/partition_name={}".format(query_date, partition)) return - def run_query(self, query_name, result_location="s3://circles.data.pipeline/query-result/", partition="default"): + def run_query(self, query_name, result_location="s3://circles.data.pipeline/result/", + query_date="today", partition="default"): """Start the execution of a query, does not wait for it to finish. Parameters @@ -236,6 +240,8 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/quer name of the query in QueryStrings enum that will be run result_location: str, optional location on the S3 bucket where the result will be stored + query_date : str + name of the partition date to run this query on partition: str, optional name of the partition to run this query on Returns @@ -249,13 +255,16 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/quer if query_name not in QueryStrings.__members__: raise ValueError("query not existed: please add it to query.py") - if partition not in self.existing_partitions: - self.update_partition(partition) + if query_date == "today": + query_date = date.today().isoformat() + + if "date={}/partition_name={}".format(query_date, partition) not in self.existing_partitions: + self.update_partition(query_date, partition) response = self.client.start_query_execution( - QueryString=QueryStrings[query_name].value.format(partition=partition), + QueryString=QueryStrings[query_name].value.format(date=query_date, partition=partition), QueryExecutionContext={ - 'Database': 'simulation' + 'Database': 'circles' }, ResultConfiguration={ 'OutputLocation': result_location, @@ -263,50 +272,3 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/quer WorkGroup='primary' ) return response['QueryExecutionId'] - -########################################################################### -# Helpers for testing the SQL Queries # -########################################################################### - - -def test_sql_query(query_name): - """Start the execution of a query, does not wait for it to finish. - - Parameters - ---------- - query_name : str - name of the query in QueryStrings enum that will be tested - Raises - ------ - RuntimeError: if timeout - """ - if query_name not in testing_functions: - raise ValueError("no tests supported for this query") - - # Run the respective sql query - queryEngine = AthenaQuery() - execution_id = queryEngine.run_query(query_name, result_location="s3://circles.data.pipeline/" - "query-result/query-test", partition="test") - if queryEngine.wait_for_execution(execution_id): - raise RuntimeError("execution timed out") - - # get the Athena query result from S3 - s3 = boto3.resource("s3") - s3.Bucket("circles.data.pipeline").download_file("query-result/query-test/"+execution_id+".csv", - "data/athena_result.csv") - athena_result = pd.read_csv("data/athena_result.csv") - athena_result = athena_result.sort_values(by=["time", "id"]) - - # get the python expected result - expected_result = pd.read_csv("data/test_data.csv") - expected_result = expected_result.apply(testing_functions[query_name], axis=1, result_type="expand") - expected_result.columns = ["time", "id", "power"] - expected_result = expected_result.sort_values(by=["time", "id"]) - - difference = athena_result["power"] - expected_result["power"] - print("average difference is: " + str(np.mean(difference))) - print("std of difference is: " + str(np.std(difference))) - print("average ratio of difference to expected is: " + - str(np.mean(np.divide(difference, expected_result["power"])))) - difference = pd.DataFrame(difference) - difference.to_csv("./difference.csv") diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index fd50ba8f5..35dcbfba8 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -1,36 +1,28 @@ """lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus -from examples.data_pipeline import AthenaQuery -from examples.query import tags +from flow.data_pipeline.data_pipeline import AthenaQuery +from flow.data_pipeline.query import tags s3 = boto3.client('s3') queryEngine = AthenaQuery() def lambda_handler(event, context): - """Invoke by AWS Lambda upon triggered by an event. - - Parameters - ---------- - event : dic < str: dic > - an S3 event - context: - not used - """ for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) + query_date = key.split('/')[-3].split('=')[-1] partition = key.split('/')[-2].split('=')[-1] response = s3.head_object(Bucket=bucket, Key=key) - run_query = response["Metadata"]["run-query"] + required_query = response["Metadata"]["run-query"] if bucket == 'circles.data.pipeline' and 'trajectory-output/' in key: - if run_query == "all": - query_list = tags["analysis"] - elif not run_query: + if required_query == "all": + query_list = tags["energy"] + elif not required_query: break else: - query_list = run_query.split("\', \'") + query_list = required_query.split("\', \'") for query_name in query_list: - queryEngine.run_query(query_name, 's3://circles.data.pipeline/query-result/auto/', partition) + queryEngine.run_query(query_name, 's3://circles.data.pipeline/result/auto/', query_date, partition) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index e1f98aaf1..1d805279b 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -3,13 +3,12 @@ from flow.data_pipeline.datapipeline_test import apply_energy_one # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], - "analysis": ["POWER_DEMAND_MODEL"]} +tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT id, - "time", + time_step, speed, acceleration, road_grade, @@ -20,7 +19,7 @@ 'POWER_DEMAND_MODEL' AS energy_model_id, source_id FROM {} - ORDER BY id, "time" + ORDER BY id, time_step """ @@ -30,26 +29,28 @@ class QueryStrings(Enum): SAMPLE = """ SELECT * FROM trajectory_table - WHERE partition_name=\'{partition}\' + WHERE date = \'{date}\' + AND partition_name=\'{partition}\' LIMIT 15; """ UPDATE_PARTITION = """ ALTER TABLE trajectory_table - ADD IF NOT EXISTS PARTITION (partition_name=\'{partition}\'); + ADD IF NOT EXISTS PARTITION (date = \'{date}\', partition_name=\'{partition}\'); """ POWER_DEMAND_MODEL = """ WITH regular_cte AS ( SELECT id, - "time", + time_step, speed, acceleration, road_grade, source_id FROM trajectory_table WHERE 1 = 1 + AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('regular_cte')) @@ -58,13 +59,14 @@ class QueryStrings(Enum): WITH denoised_accel_cte AS ( SELECT id, - "time", + time_step, speed, accel_without_noise AS acceleration, road_grade, source_id FROM trajectory_table WHERE 1 = 1 + AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) @@ -73,21 +75,22 @@ class QueryStrings(Enum): WITH lagged_timestep AS ( SELECT id, - "time", + time_step, accel_without_noise, road_grade, source_id, - "time" - LAG("time", 1) - OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + time_step - LAG(time_step, 1) + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) - OVER (PARTITION BY id ORDER BY "time" ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed FROM trajectory_table WHERE 1 = 1 + AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ), denoised_speed_cte AS ( SELECT id, - "time", + time_step, prev_speed + accel_without_noise * sim_step AS speed, accel_without_noise AS acceleration, road_grade, diff --git a/flow/data_pipeline/run_query.py b/flow/data_pipeline/run_query.py index ac927c749..1eb802205 100644 --- a/flow/data_pipeline/run_query.py +++ b/flow/data_pipeline/run_query.py @@ -1,6 +1,6 @@ """runner script for invoking query manually.""" import argparse -from flow.data_pipeline.data_pipeline import AthenaQuery, test_sql_query +from flow.data_pipeline.data_pipeline import AthenaQuery from flow.data_pipeline.query import QueryStrings parser = argparse.ArgumentParser(prog="run_query", description="runs query on AWS Athena and stores the result to" @@ -21,7 +21,7 @@ if args.run: execution_ids = [] for query_name in args.run: - execution_ids.append(queryEngine.run_query(query_name, args.result_location, args.partition)) + execution_ids.append(queryEngine.run_query(query_name, args.result_location, partition=args.partition)) print(execution_ids) if args.list_partitions: print(queryEngine.existing_partitions) @@ -33,5 +33,3 @@ if args.list_queries: for q in QueryStrings: print(q) - if args.test_query: - test_sql_query(args.test_query[0]) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 0df23942e..f21808705 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -332,9 +332,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # upload to s3 if asked if args.use_s3: - partition_name = date.today().isoformat() + " " + source_id[0:3] - upload_to_s3('circles.data.pipeline', 'trajectory-output/' + 'partition_name=' + partition_name + '/' - + upload_file_path.split('/')[-1].split('_')[0] + '.csv', + partition_name = source_id[-3:] + cur_date = date.today().isoformat() + upload_to_s3('circles.data.pipeline', 'trajectory-output/date={}/partition_name={}/{}.csv'.format( + cur_date, partition_name, upload_file_path.split('/')[-1].split('_upload')[0]), upload_file_path, str(args.only_query)[2:-2]) # print the location of the emission csv file From a799abda655a821b66828b44669210c8a8dd35ea Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:45:08 -0700 Subject: [PATCH 167/335] remove dupe imports --- examples/train.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/examples/train.py b/examples/train.py index d9e7dde07..7cb84d361 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,8 +124,6 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ - from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv - from stable_baselines import PPO2 if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() @@ -175,12 +173,7 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ - from ray import tune from ray.tune.registry import register_env - try: - from ray.rllib.agents.agent import get_agent_class - except ImportError: - from ray.rllib.agents.registry import get_agent_class horizon = flow_params['env'].horizon @@ -263,7 +256,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - import ray flow_params = submodule.flow_params flow_params['sim'].render = flags.render @@ -412,8 +404,6 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" - from stable_baselines.common.vec_env import DummyVecEnv - from stable_baselines import PPO2 flow_params = submodule.flow_params # Path to the saved files From f4fa42632a13c17b76ba49e73d67d13559f19062 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:51:14 -0700 Subject: [PATCH 168/335] remove blank lines after docstrings --- examples/train.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/examples/train.py b/examples/train.py index 7cb84d361..5a9ab5903 100644 --- a/examples/train.py +++ b/examples/train.py @@ -124,7 +124,6 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ - if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -256,7 +255,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) @@ -404,7 +402,6 @@ def train_h_baselines(flow_params, args, multiagent): def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" - flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] From 2563818e4e31cf61606f53955a7b7aed35557a7b Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 20:59:00 -0700 Subject: [PATCH 169/335] add back ray import --- examples/train.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/examples/train.py b/examples/train.py index 5a9ab5903..50720b756 100644 --- a/examples/train.py +++ b/examples/train.py @@ -255,6 +255,8 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" + import ray + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) From 498e08aa1f35d2c37bb1551b35b5d8c98635afa4 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Tue, 19 May 2020 21:04:56 -0700 Subject: [PATCH 170/335] remove whitespace --- examples/train.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/train.py b/examples/train.py index 50720b756..1689d846f 100644 --- a/examples/train.py +++ b/examples/train.py @@ -256,7 +256,7 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" import ray - + flow_params = submodule.flow_params flow_params['sim'].render = flags.render policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) From d7da535e81b50dd7b14b2cbb5c72d8cd65fa1825 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 19 May 2020 21:47:19 -0700 Subject: [PATCH 171/335] style fixed --- flow/data_pipeline/data_pipeline.py | 1 - flow/data_pipeline/query.py | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 111c41994..a999b6eb1 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -1,6 +1,5 @@ """contains class and helper functions for the data pipeline.""" import pandas as pd -import numpy as np import boto3 from flow.data_pipeline.query import QueryStrings from time import time diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 1d805279b..c2a64013c 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -80,9 +80,9 @@ class QueryStrings(Enum): road_grade, source_id, time_step - LAG(time_step, 1) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) - OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed + OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed FROM trajectory_table WHERE 1 = 1 AND date = \'{{date}}\' From 3df23123743b183a737adb0c7f29516771f2d353 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 11:49:38 -0700 Subject: [PATCH 172/335] specify power demand model names --- flow/data_pipeline/query.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index c2a64013c..a319550e2 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -16,7 +16,7 @@ (CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) + 0.8 + 9.81 * SIN(road_grade) ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, - 'POWER_DEMAND_MODEL' AS energy_model_id, + \'{}\' AS energy_model_id, source_id FROM {} ORDER BY id, time_step @@ -53,7 +53,7 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('regular_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -69,7 +69,7 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', 'denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -97,4 +97,4 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) From 28d4f73c4170c05b8fde403d8a6148347d2d1351 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 24 May 2020 21:29:11 -0700 Subject: [PATCH 173/335] fix bug in vehicle power demand --- flow/data_pipeline/query.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index a319550e2..bbc0b9709 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -12,9 +12,12 @@ speed, acceleration, road_grade, - 1200 * speed * ( - (CASE WHEN acceleration > 0 THEN 1 ELSE 0 END * (1-0.8) * acceleration) - + 0.8 + 9.81 * SIN(road_grade) + 1200 * speed * MAX(0, ( + CASE + WHEN acceleration > 0 THEN 1 + WHEN acceleration < 0 THEN 0 + ELSE 0.5 + END * (1 - 0.8) + 0.8) * acceleration + 9.81 * SIN(road_grade) ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3) AS power, \'{}\' AS energy_model_id, source_id From 077983206ea4454190ffa98987dc81e4ba5d2954 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:31:20 -0700 Subject: [PATCH 174/335] Add several accelerations (with/without noise, with/without failsafes) to custom output --- flow/controllers/base_controller.py | 18 +++++++++------ flow/core/kernel/vehicle/base.py | 20 ++++++++++++++-- flow/core/kernel/vehicle/traci.py | 36 +++++++++++++++++++++++------ flow/data_pipeline/data_pipeline.py | 19 ++++++++++----- 4 files changed, 71 insertions(+), 22 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 7adcdf310..c417bb73a 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -88,8 +88,10 @@ def get_action(self, env): float the modified form of the acceleration """ - # clear the current stored accel_without_noise of this vehicle None - env.k.vehicle.update_accel_without_noise(self.veh_id, None) + # clear the current stored accel_no_noise_no_failsafe of this vehicle None + env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, None) + env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, None) + env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, None) # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed @@ -110,23 +112,25 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested - accel_without_noise = accel + env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, accel) if self.fail_safe == 'instantaneous': - accel_without_noise = self.get_safe_action_instantaneous(env, accel_without_noise) + accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': - accel_without_noise = self.get_safe_velocity_action(env, accel_without_noise) - env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noise) + accel_no_noise_with_failsafe = self.get_safe_velocity_action(env, accel) + env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, accel_no_noise_with_failsafe) # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) + env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, accel) # run the fail-safes, if requested if self.fail_safe == 'instantaneous': accel = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': accel = self.get_safe_velocity_action(env, accel) - + env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, accel) + return accel def get_safe_action_instantaneous(self, env, action): diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 20a11cf99..eb88ff397 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -693,7 +693,15 @@ def get_accel(self, veh_id): """Return the acceleration of vehicle with veh_id.""" raise NotImplementedError - def update_accel_without_noise(self, veh_id, accel_without_noise): + def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): + """Update stored acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): + """Update stored acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): """Update stored acceleration without noise of vehicle with veh_id.""" raise NotImplementedError @@ -705,7 +713,15 @@ def get_2D_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError - def get_accel_without_noise(self, veh_id): + def get_accel_no_noise_no_failsafe(self, veh_id): + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_accel_no_noise_with_failsafe(self, veh_id): + """Return the acceleration without noise of vehicle with veh_id.""" + raise NotImplementedError + + def get_accel_with_noise_no_failsafe(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 824ec4b0c..344bcfde2 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -113,7 +113,9 @@ def initialize(self, vehicles): self.__vehicles[veh_id] = dict() self.__vehicles[veh_id]['type'] = typ['veh_id'] self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed'] - self.__vehicles[veh_id]["accel_without_noise"] = None + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = None + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = None + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None self.num_vehicles += 1 if typ['acceleration_controller'][0] == RLController: self.num_rl_vehicles += 1 @@ -1138,15 +1140,35 @@ def get_accel(self, veh_id): self.__vehicles[veh_id]["accel"] = None return self.__vehicles[veh_id]["accel"] - def update_accel_without_noise(self, veh_id, accel_without_noise): + def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): """See parent class.""" - self.__vehicles[veh_id]["accel_without_noise"] = accel_without_noise + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = accel_no_noise_no_failsafe - def get_accel_without_noise(self, veh_id): + def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): """See parent class.""" - if "accel_without_noise" not in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["accel_without_noise"] = None - return self.__vehicles[veh_id]["accel_without_noise"] + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = accel_no_noise_with_failsafe + + def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): + """See parent class.""" + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = accel_with_noise_no_failsafe + + def get_accel_no_noise_no_failsafe(self, veh_id): + """See parent class.""" + if "accel_no_noise_no_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = None + return self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] + + def get_accel_no_noise_with_failsafe(self, veh_id): + """See parent class.""" + if "accel_no_noise_with_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = None + return self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] + + def get_accel_with_noise_no_failsafe(self, veh_id): + """See parent class.""" + if "accel_with_noise_no_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None + return self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] def get_realized_accel(self, veh_id): """See parent class.""" diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index a999b6eb1..11d85cb0d 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -89,9 +89,11 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): def extra_init(): - """Return the dictionary with all the feild pre-populated with empty list.""" - extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], "acceleration": [], - "accel_without_noise": [], "realilzed_accel": [], "leader_id": [], "follower_id": [], + """Return the dictionary with all the field pre-populated with empty list.""" + extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], + "target_accel_with_noise_with_failsafe": [], "target_accel_no_noise_no_failsafe": [], + "target_accel_with_noise_no_failsafe": [], "target_accel_no_noise_with_failsafe": [], + "realized_accel": [], "leader_id": [], "follower_id": [], "leader_rel_speed": [], "road_grade": [], "source_id": []} return extra_info @@ -102,13 +104,18 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): extra_info["time_step"].append(veh_kernel.get_timestep(vid) / 1000) extra_info["id"].append(vid) extra_info["headway"].append(veh_kernel.get_headway(vid)) - extra_info["acceleration"].append(veh_kernel.get_accel(vid)) + extra_info["target_accel_with_noise_with_failsafe"].append(veh_kernel.get_accel(vid)) extra_info["leader_id"].append(veh_kernel.get_leader(vid)) extra_info["follower_id"].append(veh_kernel.get_follower(vid)) extra_info["leader_rel_speed"].append(veh_kernel.get_speed( veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) - extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) - extra_info["realilzed_accel"].append(veh_kernel.get_realized_accel(vid)) + extra_info["target_accel_no_noise_no_failsafe"].append( + veh_kernel.get_accel_no_noise_no_failsafe(vid)) + extra_info["target_accel_with_noise_no_failsafe"].append( + veh_kernel.get_accel_with_noise_no_failsafe(vid)) + extra_info["target_accel_no_noise_with_failsafe"].append( + veh_kernel.get_accel_no_noise_with_failsafe(vid)) + extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) position = veh_kernel.get_2d_position(vid) extra_info["x"].append(position[0]) From b3f15a3c2a4527b59139ed1d9198f68110c93270 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:44:15 -0700 Subject: [PATCH 175/335] update queries with new column names --- flow/data_pipeline/query.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index bbc0b9709..d3f136a72 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -31,14 +31,14 @@ class QueryStrings(Enum): SAMPLE = """ SELECT * - FROM trajectory_table + FROM fact_vehicle_trace WHERE date = \'{date}\' AND partition_name=\'{partition}\' LIMIT 15; """ UPDATE_PARTITION = """ - ALTER TABLE trajectory_table + ALTER TABLE fact_vehicle_trace ADD IF NOT EXISTS PARTITION (date = \'{date}\', partition_name=\'{partition}\'); """ @@ -48,10 +48,10 @@ class QueryStrings(Enum): id, time_step, speed, - acceleration, + target_accel_with_noise_with_failsafe AS acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -64,10 +64,10 @@ class QueryStrings(Enum): id, time_step, speed, - accel_without_noise AS acceleration, + target_accel_no_noise_with_failsafe AS acceleration, road_grade, source_id - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -79,14 +79,14 @@ class QueryStrings(Enum): SELECT id, time_step, - accel_without_noise, + target_accel_no_noise_with_failsafe, road_grade, source_id, time_step - LAG(time_step, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed - FROM trajectory_table + FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -94,8 +94,8 @@ class QueryStrings(Enum): SELECT id, time_step, - prev_speed + accel_without_noise * sim_step AS speed, - accel_without_noise AS acceleration, + prev_speed + target_accel_no_noise_with_failsafe * sim_step AS speed, + target_accel_no_noise_with_failsafe AS acceleration, road_grade, source_id FROM lagged_timestep From d66a0ab6542a6075ce9495991790afadf8a4d3e4 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:47:44 -0700 Subject: [PATCH 176/335] fix flake8 issues --- flow/controllers/base_controller.py | 2 +- flow/data_pipeline/query.py | 17 +++++++++++++---- 2 files changed, 14 insertions(+), 5 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index c417bb73a..3f6a0f4ae 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -130,7 +130,7 @@ def get_action(self, env): elif self.fail_safe == 'safe_velocity': accel = self.get_safe_velocity_action(env, accel) env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, accel) - + return accel def get_safe_action_instantaneous(self, env, action): diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index d3f136a72..b8cd24b55 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -3,7 +3,13 @@ from flow.data_pipeline.datapipeline_test import apply_energy_one # tags for different queries -tags = {"energy": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"]} +tags = { + "energy": [ + "POWER_DEMAND_MODEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" + ] + } VEHICLE_POWER_DEMAND_FINAL_SELECT = """ SELECT @@ -56,7 +62,8 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', + 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -72,7 +79,8 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', 'denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL', + 'denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -100,4 +108,5 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + 'denoised_speed_cte')) From 38af177a02bd47cc691201083f4192f61fa2dedc Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:51:46 -0700 Subject: [PATCH 177/335] remove trailing whitespaces --- flow/data_pipeline/query.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index b8cd24b55..57def52de 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -5,8 +5,8 @@ # tags for different queries tags = { "energy": [ - "POWER_DEMAND_MODEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" ] } @@ -62,7 +62,7 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL', 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ @@ -108,5 +108,5 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) From fceedf874599c68852aa8feb016921b12abd358e Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 20 May 2020 21:31:20 -0700 Subject: [PATCH 178/335] Add several accelerations (with/without noise, with/without failsafes) to custom output --- flow/core/kernel/vehicle/base.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index eb88ff397..ed53773cb 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -709,10 +709,6 @@ def get_2d_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" raise NotImplementedError - def get_2D_position(self, veh_id, error=-1001): - """Return (x, y) position of vehicle with veh_id.""" - raise NotImplementedError - def get_accel_no_noise_no_failsafe(self, veh_id): """Return the acceleration without noise of vehicle with veh_id.""" raise NotImplementedError From df182ad6c820b1fd2b05db9ce6a305aee248cec5 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 24 May 2020 23:20:29 -0700 Subject: [PATCH 179/335] fix accel with noise with failsafe output --- flow/controllers/base_controller.py | 1 + flow/core/kernel/vehicle/base.py | 20 ++++++++++++++------ flow/core/kernel/vehicle/traci.py | 10 ++++++++++ 3 files changed, 25 insertions(+), 6 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 3f6a0f4ae..1169ce5b8 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -113,6 +113,7 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, accel) + accel_no_noise_with_failsafe = accel if self.fail_safe == 'instantaneous': accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index ed53773cb..f6f8ee382 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -694,15 +694,19 @@ def get_accel(self, veh_id): raise NotImplementedError def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): - """Update stored acceleration without noise of vehicle with veh_id.""" + """Update stored acceleration without noise without failsafe of vehicle with veh_id.""" raise NotImplementedError def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): - """Update stored acceleration without noise of vehicle with veh_id.""" + """Update stored acceleration without noise with failsafe of vehicle with veh_id.""" raise NotImplementedError def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): - """Update stored acceleration without noise of vehicle with veh_id.""" + """Update stored acceleration with noise without failsafe of vehicle with veh_id.""" + raise NotImplementedError + + def update_accel_with_noise_with_failsafe(self, veh_id, accel_with_noise_with_failsafe): + """Update stored acceleration with noise with failsafe of vehicle with veh_id.""" raise NotImplementedError def get_2d_position(self, veh_id, error=-1001): @@ -710,15 +714,19 @@ def get_2d_position(self, veh_id, error=-1001): raise NotImplementedError def get_accel_no_noise_no_failsafe(self, veh_id): - """Return the acceleration without noise of vehicle with veh_id.""" + """Return the acceleration without noise without failsafe of vehicle with veh_id.""" raise NotImplementedError def get_accel_no_noise_with_failsafe(self, veh_id): - """Return the acceleration without noise of vehicle with veh_id.""" + """Return the acceleration without noise with failsafe of vehicle with veh_id.""" raise NotImplementedError def get_accel_with_noise_no_failsafe(self, veh_id): - """Return the acceleration without noise of vehicle with veh_id.""" + """Return the acceleration with noise without failsafe of vehicle with veh_id.""" + raise NotImplementedError + + def get_accel_with_noise_with_failsafe(self, veh_id): + """Return the acceleration with noise with failsafe of vehicle with veh_id.""" raise NotImplementedError def get_realized_accel(self, veh_id): diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 344bcfde2..5de35956f 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1152,6 +1152,10 @@ def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsa """See parent class.""" self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = accel_with_noise_no_failsafe + def update_accel_with_noise_with_failsafe(self, veh_id, accel_with_noise_with_failsafe): + """See parent class.""" + self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] = accel_with_noise_with_failsafe + def get_accel_no_noise_no_failsafe(self, veh_id): """See parent class.""" if "accel_no_noise_no_failsafe" not in self.__vehicles[veh_id]: @@ -1170,6 +1174,12 @@ def get_accel_with_noise_no_failsafe(self, veh_id): self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None return self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] + def get_accel_with_noise_with_failsafe(self, veh_id): + """See parent class.""" + if "accel_with_noise_with_failsafe" not in self.__vehicles[veh_id]: + self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] = None + return self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] + def get_realized_accel(self, veh_id): """See parent class.""" return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step From d88840578f88c70da428d829b7b9d22024d6bf52 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 16:57:52 -0700 Subject: [PATCH 180/335] fix rebase errors --- flow/controllers/base_controller.py | 1 + flow/core/kernel/vehicle/traci.py | 8 -------- flow/data_pipeline/data_pipeline.py | 10 ---------- 3 files changed, 1 insertion(+), 18 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 1169ce5b8..ac29bca2e 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -92,6 +92,7 @@ def get_action(self, env): env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, None) env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, None) env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, None) + env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, None) # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 5de35956f..1c0b5f19b 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -223,14 +223,6 @@ def update(self, reset): self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] - # update the number of not departed vehicles - self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ - sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] - - # update the number of not departed vehicles - self.num_not_departed += sim_obs[tc.VAR_LOADED_VEHICLES_NUMBER] - \ - sim_obs[tc.VAR_DEPARTED_VEHICLES_NUMBER] - # update the "headway", "leader", and "follower" variables for veh_id in self.__ids: try: diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 11d85cb0d..aea9b349c 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -88,16 +88,6 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): return -def extra_init(): - """Return the dictionary with all the field pre-populated with empty list.""" - extra_info = {"time_step": [], "id": [], "x": [], "y": [], "speed": [], "headway": [], - "target_accel_with_noise_with_failsafe": [], "target_accel_no_noise_no_failsafe": [], - "target_accel_with_noise_no_failsafe": [], "target_accel_no_noise_with_failsafe": [], - "realized_accel": [], "leader_id": [], "follower_id": [], - "leader_rel_speed": [], "road_grade": [], "source_id": []} - return extra_info - - def get_extra_info(veh_kernel, extra_info, veh_ids): """Get all the necessary information for the trajectory output from flow.""" for vid in veh_ids: From 69f6f5536a3be4d885652471c3008da258e58416 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 17:57:41 -0700 Subject: [PATCH 181/335] rm deleted file --- flow/data_pipeline/datapipeline_test.py | 37 ------------------------- 1 file changed, 37 deletions(-) delete mode 100644 flow/data_pipeline/datapipeline_test.py diff --git a/flow/data_pipeline/datapipeline_test.py b/flow/data_pipeline/datapipeline_test.py deleted file mode 100644 index 0e1a50518..000000000 --- a/flow/data_pipeline/datapipeline_test.py +++ /dev/null @@ -1,37 +0,0 @@ -"""functions that calculates the expected result for testing.""" -import math - -# Vehicle Mass -M = 1200 -# Gravity -g = 9.81 -# Density of Air -ro_air = 1.225 -# Rolling resistance coefficient -C_r = .005 -# Aerodynamic drag coefficient -C_a = 0.3 -# Vehicle Cross sectional Area -A = 2.6 -# Road grade -theta = 0 - - -def heavyside(inp): - """Return 1 if input is positive.""" - return 0 if inp <= 0 else 1 - - -def calculate_power(mu, acceleration, M=M, g=g, theta=theta, C_r=C_r, ro_air=ro_air, A=A, C_a=C_a): - """Calculate the expected power for POWER_DEMAND_MODEL query.""" - acceleration = (0.8 + ((1 - 0.8) * heavyside(acceleration)) * acceleration) - accel_and_slope = M * mu * (acceleration + g * math.sin(theta)) - rolling_friction = M * g * C_r * mu - air_drag = .5 * ro_air * A * C_a * mu**3 - power = accel_and_slope + rolling_friction + air_drag - return power - - -def apply_energy_one(row): - """Apply the power calculation to a row of the dataframe.""" - return [row[0], row[1], calculate_power(row[4], row[6])] From 4f2f23ec7d47bff699baeac9bf8810af68f2f465 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 17:58:58 -0700 Subject: [PATCH 182/335] add return carriage to eof --- examples/exp_configs/non_rl/i210_subnetwork.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 3704a7a1c..25565bb49 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -163,4 +163,4 @@ "avg_density": lambda env: 5 * 1000 * len(env.k.vehicle.get_ids_by_edge( edge_id)) / (env.k.network.edge_length(edge_id) * env.k.network.num_lanes(edge_id)), -} \ No newline at end of file +} From d2ba0694ef7cf0e4f6c913d4e855011fbcdc76e2 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 18:00:46 -0700 Subject: [PATCH 183/335] revert accidental change --- flow/core/experiment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index f25a9fcac..779fdb0f4 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -175,7 +175,7 @@ def rl_actions(*_): for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) - if done: + if type(done) is dict and done['__all__'] or type(done) is not dict and done: break # Store the information from the run in info_dict. From 8eee7722bc28ae05ac330e741e33ee9b659391a2 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 25 May 2020 18:03:02 -0700 Subject: [PATCH 184/335] rename trajectory table --- flow/data_pipeline/query.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index e8ac34abc..b6e66fcec 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -36,14 +36,14 @@ class QueryStrings(Enum): SAMPLE = """ SELECT * - FROM fact_vehicle_trace + FROM trajectory_table WHERE date = \'{date}\' AND partition_name=\'{partition}\' LIMIT 15; """ UPDATE_PARTITION = """ - ALTER TABLE fact_vehicle_trace + ALTER TABLE trajectory_table ADD IF NOT EXISTS PARTITION (date = \'{date}\', partition_name=\'{partition}\'); """ @@ -56,7 +56,7 @@ class QueryStrings(Enum): target_accel_with_noise_with_failsafe AS acceleration, road_grade, source_id - FROM fact_vehicle_trace + FROM trajectory_table WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -73,7 +73,7 @@ class QueryStrings(Enum): target_accel_no_noise_with_failsafe AS acceleration, road_grade, source_id - FROM fact_vehicle_trace + FROM trajectory_table WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' @@ -93,7 +93,7 @@ class QueryStrings(Enum): OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, LAG(speed, 1) OVER (PARTITION BY id ORDER BY time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS prev_speed - FROM fact_vehicle_trace + FROM trajectory_table WHERE 1 = 1 AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' From db0442b45d1145cd8097d0fc446d41c89b81b599 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Mon, 25 May 2020 22:01:13 -0700 Subject: [PATCH 185/335] Ported to Keras, initial implementation of loading to RLLib --- .../imitating_controller.py | 2 +- .../imitation_learning/imitating_network2.py | 131 ++++++++++++++++++ .../imitation_learning/keras_utils.py | 47 +++++++ .../imitation_learning/ppo_model.py | 69 +++++++++ .../controllers/imitation_learning/trainer.py | 6 +- 5 files changed, 251 insertions(+), 4 deletions(-) create mode 100644 flow/controllers/imitation_learning/imitating_network2.py create mode 100644 flow/controllers/imitation_learning/keras_utils.py create mode 100644 flow/controllers/imitation_learning/ppo_model.py diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index 70c483596..935a66831 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -58,4 +58,4 @@ def get_accel(self, env): return action[ind] # in other cases, acceleration is the output of the network - return action[0] + return action diff --git a/flow/controllers/imitation_learning/imitating_network2.py b/flow/controllers/imitation_learning/imitating_network2.py new file mode 100644 index 000000000..f750fbad6 --- /dev/null +++ b/flow/controllers/imitation_learning/imitating_network2.py @@ -0,0 +1,131 @@ +import numpy as np +import tensorflow as tf +from utils_tensorflow import * +from keras_utils import * +import tensorflow_probability as tfp +from flow.controllers.base_controller import BaseController +from replay_buffer import ReplayBuffer + + +class ImitatingNetwork2(): + """ + Class containing neural network which learns to imitate a given expert controller. + """ + + def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, stochastic=False, policy_scope='policy_vars', load_existing=False, load_path=''): + + """ + Initializes and constructs neural network + + Args: + sess: Tensorflow session variable + action_dim: dimension of action space (determines size of network output) + obs_dim: dimension of observation space (size of network input) + num_layers: number of hidden layers (for an MLP) + size: size of each layer in network + learning_rate: learning rate used in optimizer + replay_buffer_size: maximum size of replay buffer used to hold data for training + training: boolean, whether the network will be trained (as opposed to loaded) + stochastic: boolean indicating if the network outputs a stochastic (multivariate Gaussian) or deterministic policy + policy_scope: variable scope used by Tensorflow for weights/biases + load_existing: boolean, whether to load an existing tensorflow model + load_path: path to directory containing an existing tensorflow model + + """ + + self.sess = sess + self.action_dim = action_dim + self.obs_dim = obs_dim + self.num_layers = num_layers + self.size = size + self.learning_rate = learning_rate + self.training = training + self.stochastic=stochastic + + print("INNNNNITITTTTT") + + # load network if specified, or construct network + if load_existing: + self.load_network(load_path) + + else: + self.build_network() + self.compile_network() + + + # init replay buffer + if self.training: + self.replay_buffer = ReplayBuffer(replay_buffer_size) + else: + self.replay_buffer = None + + + def build_network(self): + """ + Defines neural network for choosing actions. Defines placeholders and forward pass + """ + # setup placeholders for network input and labels for training, and hidden layers/output + if self.stochastic: + self.model = build_neural_net_stochastic(self.obs_dim, self.action_dim, self.num_layers, self.size) + else: + self.model = build_neural_net_deterministic(self.obs_dim, self.action_dim, self.num_layers, self.size) + + + def compile_network(self): + loss = get_loss(self.stochastic) + self.model.compile(loss=loss, optimizer='adam') + + + def train(self, observation_batch, action_batch): + """ + Executes one training step for the given batch of observation and action data + """ + # reshape action_batch to ensure a shape (batch_size, action_dim) + action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) + batch_size = action_batch.shape[0] + self.model.fit(observation_batch, action_batch, batch_size=batch_size, epochs=1, steps_per_epoch=1) + + def get_accel_from_observation(self, observation): + """ + Gets the network's acceleration prediction based on given observation/state + """ + + # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays + if len(observation.shape)<=1: + observation = observation[None] + # "batch size" is 1, so just get single acceleration/acceleration vector + network_output = self.model.predict(observation) + if self.stochastic: + mean, log_std = network_output[:, :self.action_dim], network_output[:, self.action_dim:] + var = np.exp(2 * log_std) + action = np.random.multivariate_normal(mean[0], var) + return action + else: + return network_output + + def get_accel(self, env): + """ + Get network's acceleration prediction(s) based on given env + """ + observation = env.get_state() + return self.get_accel_from_observation(observation) + + + def add_to_replay_buffer(self, rollout_list): + """ Add rollouts to replay buffer """ + + self.replay_buffer.add_rollouts(rollout_list) + + + def sample_data(self, batch_size): + """ Sample a batch of data from replay buffer """ + + return self.replay_buffer.sample_batch(batch_size) + + def save_network(self, save_path): + """ Save network to given path and to tensorboard """ + + self.model.save(save_path) + # tensorboard + + # writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) diff --git a/flow/controllers/imitation_learning/keras_utils.py b/flow/controllers/imitation_learning/keras_utils.py new file mode 100644 index 000000000..429c75bea --- /dev/null +++ b/flow/controllers/imitation_learning/keras_utils.py @@ -0,0 +1,47 @@ +import tensorflow as tf +import tensorflow_probability as tfp +from tensorflow.keras import Input +from tensorflow.keras.layers import Dense + +def build_neural_net_deterministic(input_dim, action_dim, n_layers, size): + input_layer = Input(shape=(input_dim, )) + curr_layer = input_layer + + for _ in range(n_layers): + dense = Dense(size, activation="tanh") + curr_layer = dense(curr_layer) + output_layer = Dense(action_dim, activation=None)(curr_layer) + model = tf.keras.Model(inputs=input_layer, outputs=output_layer, name="policy_network") + + return model + +def build_neural_net_stochastic(input_dim, action_dim, n_layers, size): + input_layer = Input(shape=(input_dim, )) + curr_layer = input_layer + + for _ in range(n_layers): + dense = Dense(size, activation="tanh") + curr_layer = dense(curr_layer) + + out = Dense(2 * action_dim, activation=None)(curr_layer) + model = tf.keras.Model(inputs=input_layer, outputs=out, name="policy_network") + + return model + +def get_loss(stochastic): + if stochastic: + return negative_log_likelihood_loss + else: + return tf.keras.losses.mean_squared_error + +def negative_log_likelihood_loss(y, distribution_params): + assert distribution_params.shape[1] % 2 == 0, "Stochastic policies must output vectors of even length" + action_dim = distribution_params.shape[1]//2 + means, log_stds = distribution_params[:, :action_dim], distribution_params[:, action_dim:] + stds = tf.math.exp(log_stds) + variances = tf.math.square(stds) + dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=variances) + loss = dist.log_prob(y) + loss = tf.negative(loss) + loss = tf.reduce_mean(loss) + (0.5 * tf.norm(variances)) + return loss diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py new file mode 100644 index 000000000..f5c022cb8 --- /dev/null +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -0,0 +1,69 @@ +import numpy as np + +from ray.rllib.models.tf.misc import normc_initializer +from ray.rllib.models.tf.tf_modelv2 import TFModelV2 +from ray.rllib.utils.framework import get_activation_fn, try_import_tf + +tf = try_import_tf() + + + +class PPONetwork(TFModelV2): + + def __init__(self, obs_space, action_space, num_outputs, model_config, name): + + super(PPONetwork, self).__init__(obs_space, action_space, num_outputs, model_config, name) + self.setup_model(obs_space, action_space, model_config, num_outputs, None) + + def setup_model(self, obs_space, action_space, model_config, num_outputs, load_path): + if load_path: + try: + loaded_policy_model = tf.keras.load_model(load_path) + inp_layer = loaded_policy_model.input + curr_layer = loaded_policy_model.layers[-2].output + + except Exception as e: + print("Error in loading existing model specified by load_path") + raise e + else: + activation = get_activation_fn(model_config.get("fcnet_activation")) + hiddens = model_config.get("fcnet_hiddens", []) + vf_share_layers = model_config.get("vf_share_layers") + + inp_layer = tf.keras.layers.Input(shape=obs_space.shape, name="input_layer") + curr_layer = inp_layer + + i = 1 + for size in hiddens: + curr_layer = tf.keras.layers.Dense(size, name="policy_hidden_layer_{}".format(i), activation=activation)(curr_layer) + i += 1 + + output_layer_policy = tf.keras.layers.Dense(num_outputs, name="policy_output_layer", activation=None)(curr_layer) + + if not vf_share_layers: + curr_layer = inp_layer + i = 1 + for size in hiddens: + curr_layer = tf.keras.layers.Dense(size, name="vf_hidden_layer_{}".format(i), activation=activation)(curr_layer) + i += 1 + + output_layer_vf = tf.keras.layers.Dense(1, name="vf_output_layer", activation=None)(curr_layer) + + self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) + self.register_variables(self.base_model.variables) + + + def forward(self, input_dict, state, seq_lens): + policy_out, value_out = self.base_model(input_dict["obs_flat"]) + self.value_out = value_out + return policy_out, state + + def value_function(self): + return tf.reshape(self.value_out, [-1]) + + def import_from_h5(self, import_file): + self.setup_model(self, self.obs_space, self.action_space, self.model_config, self.num_outputs, import_file) + + + + diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index b6d04ed25..940feffb8 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -7,7 +7,7 @@ from flow.utils.registry import make_create_env from examples.exp_configs.rl.multiagent.multiagent_straight_road import flow_params from imitating_controller import ImitatingController -from imitating_network import ImitatingNetwork +from imitating_network2 import ImitatingNetwork2 from flow.controllers.car_following_models import IDMController from flow.controllers.velocity_controllers import FollowerStopper from flow.core.params import SumoCarFollowingParams @@ -51,9 +51,9 @@ def __init__(self, params): self.params['obs_dim'] = obs_dim # initialize neural network class and tf variables - self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], stochastic=self.params['stochastic']) + self.action_network = ImitatingNetwork2(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], stochastic=self.params['stochastic']) - tf.global_variables_initializer().run(session=self.sess) + # tf.global_variables_initializer().run(session=self.sess) # controllers setup v_des = self.params['v_des'] # for FollowerStopper From ed065b3112432ccaae1e4f0d872a578dd4b61773 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Tue, 26 May 2020 13:20:29 -0700 Subject: [PATCH 186/335] Bug fixes for starting training from imitation model --- .../rl/multiagent/multiagent_straight_road.py | 3 +- examples/train.py | 55 ++++++++++++- .../imitation_learning/ppo_model.py | 80 ++++++++++++------- flow/envs/multiagent/i210.py | 49 ++++++------ 4 files changed, 131 insertions(+), 56 deletions(-) diff --git a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py index ec71a2f42..00b60ff0b 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py +++ b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py @@ -58,7 +58,8 @@ 'local_reward': True, 'lead_obs': True, # whether to reroute vehicles once they have exited - "reroute_on_exit": True + "reroute_on_exit": True, + "control_range": [500, 2300] }) diff --git a/examples/train.py b/examples/train.py index 1689d846f..361a1f277 100644 --- a/examples/train.py +++ b/examples/train.py @@ -184,7 +184,7 @@ def setup_exps_rllib(flow_params, config["num_workers"] = n_cpus config["horizon"] = horizon - config["model"].update({"fcnet_hiddens": [32, 32, 32]}) + config["model"].update({"fcnet_hiddens": [12, 12]}) config["train_batch_size"] = horizon * n_rollouts config["gamma"] = 0.999 # discount rate config["use_gae"] = True @@ -252,6 +252,56 @@ def on_episode_end(info): register_env(gym_name, create_env) return alg_run, gym_name, config +def train_rllib_with_imitation(submodule, flags): + """Train policies using the PPO algorithm in RLlib.""" + import ray + from flow.controllers.imitation_learning.ppo_model import PPONetwork + from ray.rllib.models import ModelCatalog + + flow_params = submodule.flow_params + flow_params['sim'].render = flags.render + policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) + policy_mapping_fn = getattr(submodule, "policy_mapping_fn", None) + policies_to_train = getattr(submodule, "policies_to_train", None) + + alg_run, gym_name, config = setup_exps_rllib( + flow_params, flags.num_cpus, flags.num_rollouts, flags, + policy_graphs, policy_mapping_fn, policies_to_train) + + ModelCatalog.register_custom_model("Imitation_Learning", PPONetwork) + + config['num_workers'] = flags.num_cpus + config['env'] = gym_name + config['model']['custom_model'] = "Imitation_Learning" + + # create a custom string that makes looking at the experiment names easier + def trial_str_creator(trial): + return "{}_{}".format(trial.trainable_name, trial.experiment_tag) + + if flags.local_mode: + ray.init(local_mode=True) + else: + ray.init() + + exp_dict = { + "run_or_experiment": alg_run, + "name": gym_name, + "config": config, + "checkpoint_freq": flags.checkpoint_freq, + "checkpoint_at_end": True, + 'trial_name_creator': trial_str_creator, + "max_failures": 0, + "stop": { + "training_iteration": flags.num_iterations, + }, + } + date = datetime.now(tz=pytz.utc) + date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") + s3_string = "s3://i210.experiments/i210/" \ + + date + '/' + flags.exp_title + if flags.use_s3: + exp_dict['upload_dir'] = s3_string + tune.run(**exp_dict, queue_trials=False, raise_on_failed_trial=False) def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" @@ -278,6 +328,7 @@ def trial_str_creator(trial): ray.init(local_mode=True) else: ray.init() + exp_dict = { "run_or_experiment": alg_run, "name": gym_name, @@ -472,7 +523,7 @@ def main(args): # Perform the training operation. if flags.rl_trainer.lower() == "rllib": - train_rllib(submodule, flags) + train_rllib_with_imitation(submodule, flags) elif flags.rl_trainer.lower() == "stable-baselines": train_stable_baselines(submodule, flags) elif flags.rl_trainer.lower() == "h-baselines": diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index f5c022cb8..643fd5670 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -1,8 +1,10 @@ import numpy as np - +import json +import h5py from ray.rllib.models.tf.misc import normc_initializer from ray.rllib.models.tf.tf_modelv2 import TFModelV2 from ray.rllib.utils.framework import get_activation_fn, try_import_tf +# from flow.controllers.imitation_learning.keras_utils import * tf = try_import_tf() @@ -13,32 +15,25 @@ class PPONetwork(TFModelV2): def __init__(self, obs_space, action_space, num_outputs, model_config, name): super(PPONetwork, self).__init__(obs_space, action_space, num_outputs, model_config, name) - self.setup_model(obs_space, action_space, model_config, num_outputs, None) - - def setup_model(self, obs_space, action_space, model_config, num_outputs, load_path): - if load_path: - try: - loaded_policy_model = tf.keras.load_model(load_path) - inp_layer = loaded_policy_model.input - curr_layer = loaded_policy_model.layers[-2].output - - except Exception as e: - print("Error in loading existing model specified by load_path") - raise e - else: - activation = get_activation_fn(model_config.get("fcnet_activation")) - hiddens = model_config.get("fcnet_hiddens", []) - vf_share_layers = model_config.get("vf_share_layers") - - inp_layer = tf.keras.layers.Input(shape=obs_space.shape, name="input_layer") - curr_layer = inp_layer + self.setup_model(obs_space, action_space, model_config, num_outputs, '/Users/akashvelu/Desktop/follower_stopper1.h5') + self.register_variables(self.base_model.variables) - i = 1 - for size in hiddens: - curr_layer = tf.keras.layers.Dense(size, name="policy_hidden_layer_{}".format(i), activation=activation)(curr_layer) - i += 1 + def setup_model(self, obs_space, action_space, model_config, num_outputs, imitation_h5_path): + + activation = get_activation_fn(model_config.get("fcnet_activation")) + hiddens = model_config.get("fcnet_hiddens", []) + vf_share_layers = model_config.get("vf_share_layers") - output_layer_policy = tf.keras.layers.Dense(num_outputs, name="policy_output_layer", activation=None)(curr_layer) + inp_layer = tf.keras.layers.Input(shape=obs_space.shape, name="input_layer") + curr_layer = inp_layer + + i = 1 + for size in hiddens: + curr_layer = tf.keras.layers.Dense(size, name="policy_hidden_layer_{}".format(i), activation=activation)(curr_layer) + i += 1 + + + output_layer_policy = tf.keras.layers.Dense(num_outputs, name="policy_output_layer", activation=None)(curr_layer) if not vf_share_layers: curr_layer = inp_layer @@ -50,7 +45,36 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, load_p output_layer_vf = tf.keras.layers.Dense(1, name="vf_output_layer", activation=None)(curr_layer) self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) - self.register_variables(self.base_model.variables) + + + if imitation_h5_path: + # imitation_model = tf.keras.models.load_model(imitation_h5_path, custom_objects={"negative_log_likelihood_loss": negative_log_likelihood_loss}) + imitation_inp = tf.keras.layers.Input(shape=(3,), name="imitation_inp") + curr_imitation_layer = imitation_inp + i = 1 + for size in hiddens: + curr_imitation_layer = tf.keras.layers.Dense(size, name="imitation_hidden_layer_{}".format(i), activation=activation)(curr_imitation_layer) + i += 1 + + imitation_output_layer = tf.keras.layers.Dense(num_outputs, name="imitation_output_layer", activation=None)(curr_imitation_layer) + imitation_model = tf.keras.Model(imitation_inp, [imitation_output_layer]) + + imitation_model.load_weights(imitation_h5_path) + self.register_variables(imitation_model.variables) + + for i in range(len(hiddens)): + imitation_layer = imitation_model.layers[i + 1] + base_model_layer_name = 'policy_hidden_layer_' + str(i + 1) + base_model_layer = self.base_model.get_layer(base_model_layer_name) + base_model_layer.set_weights(imitation_layer.get_weights()) + + imitation_layer = imitation_model.layers[-1] + base_model_layer_name = 'policy_output_layer' + base_model_layer = self.base_model.get_layer(base_model_layer_name) + base_model_layer.set_weights(imitation_layer.get_weights()) + + + def forward(self, input_dict, state, seq_lens): @@ -63,7 +87,3 @@ def value_function(self): def import_from_h5(self, import_file): self.setup_model(self, self.obs_space, self.action_space, self.model_config, self.num_outputs, import_file) - - - - diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index a6e39cdec..14e34d927 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -68,6 +68,7 @@ def __init__(self, env_params, sim_params, network, simulator='traci'): self.num_enter_lanes = 5 self.entrance_edge = "119257914" self.exit_edge = "119257908#3" + self.control_range = env_params.additional_params['control_range'] self.leader = [] @property @@ -126,17 +127,18 @@ def get_state(self): if self.lead_obs: veh_info = {} for rl_id in self.k.vehicle.get_rl_ids(): - speed = self.k.vehicle.get_speed(rl_id) - lead_id = self.k.vehicle.get_leader(rl_id) - if lead_id in ["", None]: - # in case leader is not visible - lead_speed = SPEED_SCALE - headway = HEADWAY_SCALE - else: - lead_speed = self.k.vehicle.get_speed(lead_id) - headway = self.k.vehicle.get_headway(rl_id) - veh_info.update({rl_id: np.array([speed / SPEED_SCALE, headway / HEADWAY_SCALE, - lead_speed / SPEED_SCALE])}) + if self.k.vehicle.get_x_by_id(rl_id) < self.control_range[1] and self.k.vehicle.get_x_by_id(rl_id) > self.control_range[0]: + speed = self.k.vehicle.get_speed(rl_id) + lead_id = self.k.vehicle.get_leader(rl_id) + if lead_id in ["", None]: + # in case leader is not visible + lead_speed = SPEED_SCALE + headway = HEADWAY_SCALE + else: + lead_speed = self.k.vehicle.get_speed(lead_id) + headway = self.k.vehicle.get_headway(rl_id) + veh_info.update({rl_id: np.array([speed / SPEED_SCALE, headway / HEADWAY_SCALE, + lead_speed / SPEED_SCALE])}) else: veh_info = {rl_id: np.concatenate((self.state_util(rl_id), self.veh_statistics(rl_id))) @@ -153,24 +155,25 @@ def compute_reward(self, rl_actions, **kwargs): if self.env_params.additional_params["local_reward"]: des_speed = self.env_params.additional_params["target_velocity"] for rl_id in self.k.vehicle.get_rl_ids(): - rewards[rl_id] = 0 - speeds = [] - follow_speed = self.k.vehicle.get_speed(self.k.vehicle.get_follower(rl_id)) - if follow_speed >= 0: - speeds.append(follow_speed) - if self.k.vehicle.get_speed(rl_id) >= 0: - speeds.append(self.k.vehicle.get_speed(rl_id)) - if len(speeds) > 0: - # rescale so the critic can estimate it quickly - rewards[rl_id] = np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 - for speed in speeds]) / (des_speed ** 2) + if self.k.vehicle.get_x_by_id(rl_id) < self.control_range[1] and self.k.vehicle.get_x_by_id(rl_id) > self.control_range[0]: + rewards[rl_id] = 0 + speeds = [] + follow_speed = self.k.vehicle.get_speed(self.k.vehicle.get_follower(rl_id)) + if follow_speed >= 0: + speeds.append(follow_speed) + if self.k.vehicle.get_speed(rl_id) >= 0: + speeds.append(self.k.vehicle.get_speed(rl_id)) + if len(speeds) > 0: + # rescale so the critic can estimate it quickly + rewards[rl_id] = np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 + for speed in speeds]) / (des_speed ** 2) else: speeds = self.k.vehicle.get_speed(self.k.vehicle.get_ids()) des_speed = self.env_params.additional_params["target_velocity"] # rescale so the critic can estimate it quickly reward = np.nan_to_num(np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 for speed in speeds]) / (des_speed ** 2)) - rewards = {rl_id: reward for rl_id in self.k.vehicle.get_rl_ids()} + rewards = {rl_id: reward for rl_id in self.k.vehicle.get_rl_ids() if self.k.vehicle.get_x_by_id(rl_id) < self.control_range[1] and self.k.vehicle.get_x_by_id(rl_id) > self.control_range[0]} return rewards def additional_command(self): From cc0aa3276e1d52dc12e4059ff0abd225b5af8858 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Tue, 26 May 2020 13:35:40 -0700 Subject: [PATCH 187/335] Minor cleanup --- examples/train.py | 5 ++++- .../imitation_learning/ppo_model.py | 19 ++++++++++++++----- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/examples/train.py b/examples/train.py index 361a1f277..8c65a68c1 100644 --- a/examples/train.py +++ b/examples/train.py @@ -253,7 +253,7 @@ def on_episode_end(info): return alg_run, gym_name, config def train_rllib_with_imitation(submodule, flags): - """Train policies using the PPO algorithm in RLlib.""" + """Train policies using the PPO algorithm in RLlib, with initiale policy weights from imitation learning.""" import ray from flow.controllers.imitation_learning.ppo_model import PPONetwork from ray.rllib.models import ModelCatalog @@ -268,10 +268,13 @@ def train_rllib_with_imitation(submodule, flags): flow_params, flags.num_cpus, flags.num_rollouts, flags, policy_graphs, policy_mapping_fn, policies_to_train) + # Register custom model ModelCatalog.register_custom_model("Imitation_Learning", PPONetwork) config['num_workers'] = flags.num_cpus config['env'] = gym_name + + # set model to the custom model for run config['model']['custom_model'] = "Imitation_Learning" # create a custom string that makes looking at the experiment names easier diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index 643fd5670..68e7f13dc 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -15,26 +15,32 @@ class PPONetwork(TFModelV2): def __init__(self, obs_space, action_space, num_outputs, model_config, name): super(PPONetwork, self).__init__(obs_space, action_space, num_outputs, model_config, name) + + # setup model with weights loaded in from model in h5 path self.setup_model(obs_space, action_space, model_config, num_outputs, '/Users/akashvelu/Desktop/follower_stopper1.h5') + # register variables for base model self.register_variables(self.base_model.variables) + def setup_model(self, obs_space, action_space, model_config, num_outputs, imitation_h5_path): activation = get_activation_fn(model_config.get("fcnet_activation")) hiddens = model_config.get("fcnet_hiddens", []) vf_share_layers = model_config.get("vf_share_layers") + # set up model inp_layer = tf.keras.layers.Input(shape=obs_space.shape, name="input_layer") curr_layer = inp_layer + # hidden layers and output for policy i = 1 for size in hiddens: curr_layer = tf.keras.layers.Dense(size, name="policy_hidden_layer_{}".format(i), activation=activation)(curr_layer) i += 1 - output_layer_policy = tf.keras.layers.Dense(num_outputs, name="policy_output_layer", activation=None)(curr_layer) + # set up value function if not vf_share_layers: curr_layer = inp_layer i = 1 @@ -44,12 +50,15 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat output_layer_vf = tf.keras.layers.Dense(1, name="vf_output_layer", activation=None)(curr_layer) + # build model from layers self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) if imitation_h5_path: # imitation_model = tf.keras.models.load_model(imitation_h5_path, custom_objects={"negative_log_likelihood_loss": negative_log_likelihood_loss}) - imitation_inp = tf.keras.layers.Input(shape=(3,), name="imitation_inp") + + # set up a model to load in weights from imitation network (without the training variables, e.g. adam variables) + imitation_inp = tf.keras.layers.Input(shape=obs_space.shape, name="imitation_inp") curr_imitation_layer = imitation_inp i = 1 for size in hiddens: @@ -59,9 +68,12 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat imitation_output_layer = tf.keras.layers.Dense(num_outputs, name="imitation_output_layer", activation=None)(curr_imitation_layer) imitation_model = tf.keras.Model(imitation_inp, [imitation_output_layer]) + # load weights from file into model imitation_model.load_weights(imitation_h5_path) + # register model variables (to prevent error) self.register_variables(imitation_model.variables) + # copy these weights into the base model (only the policy hidden layer and output weights) for i in range(len(hiddens)): imitation_layer = imitation_model.layers[i + 1] base_model_layer_name = 'policy_hidden_layer_' + str(i + 1) @@ -74,9 +86,6 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat base_model_layer.set_weights(imitation_layer.get_weights()) - - - def forward(self, input_dict, state, seq_lens): policy_out, value_out = self.base_model(input_dict["obs_flat"]) self.value_out = value_out From 3c6dcf71c0ac4219e13da3a3a58471a69dfc88d1 Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Tue, 26 May 2020 13:38:08 -0700 Subject: [PATCH 188/335] added apply acceleratino function which uses setSpeed() method instead of slowDown() --- flow/core/kernel/network/flow_params.json | 2 ++ flow/core/kernel/vehicle/base.py | 17 +++++++++++++++++ flow/core/kernel/vehicle/traci.py | 14 ++++++++++++++ 3 files changed, 33 insertions(+) create mode 100644 flow/core/kernel/network/flow_params.json diff --git a/flow/core/kernel/network/flow_params.json b/flow/core/kernel/network/flow_params.json new file mode 100644 index 000000000..c70a22e63 --- /dev/null +++ b/flow/core/kernel/network/flow_params.json @@ -0,0 +1,2 @@ +{ + "env": \ No newline at end of file diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 647ef37fe..21edd8d4d 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -124,6 +124,23 @@ def remove(self, veh_id): def apply_acceleration(self, veh_id, acc): """Apply the acceleration requested by a vehicle in the simulator. + In SUMO, this function applies slowDown method which applies smoothing. + + Parameters + ---------- + veh_id : str or list of str + list of vehicle identifiers + acc : float or array_like + requested accelerations from the vehicles + """ + raise NotImplementedError + + def apply_acceleration_not_smooth(self, veh_id, acc): + """Apply the acceleration requested by a vehicle in the simulator. + + In SUMO, this function applies setSpeed method which doesn't apply + smoothing. + Parameters ---------- veh_id : str or list of str diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 2a4e06257..b56e36ae0 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -964,6 +964,20 @@ def apply_acceleration(self, veh_ids, acc): next_vel = max([this_vel + acc[i] * self.sim_step, 0]) self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3) + def apply_acceleration_not_smooth(self, veh_ids, acc): + """See parent class.""" + # to hand the case of a single vehicle + if type(veh_ids) == str: + veh_ids = [veh_ids] + acc = [acc] + + for i, vid in enumerate(veh_ids): + if acc[i] is not None and vid in self.get_ids(): + self.__vehicles[vid]["accel"] = acc[i] + this_vel = self.get_speed(vid) + next_vel = max([this_vel + acc[i] * self.sim_step, 0]) + self.kernel_api.vehicle.setSpeed(vid, next_vel) + def apply_lane_change(self, veh_ids, direction): """See parent class.""" # to hand the case of a single vehicle From 3a2e1359d1c37194351f6b54bd840ad6722fc279 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Tue, 26 May 2020 13:38:12 -0700 Subject: [PATCH 189/335] Minor cleanup --- .../imitation_learning/ppo_model.py | 20 +++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index 68e7f13dc..49d354488 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -74,16 +74,20 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat self.register_variables(imitation_model.variables) # copy these weights into the base model (only the policy hidden layer and output weights) - for i in range(len(hiddens)): - imitation_layer = imitation_model.layers[i + 1] - base_model_layer_name = 'policy_hidden_layer_' + str(i + 1) + try: + for i in range(len(hiddens)): + imitation_layer = imitation_model.layers[i + 1] + base_model_layer_name = 'policy_hidden_layer_' + str(i + 1) + base_model_layer = self.base_model.get_layer(base_model_layer_name) + base_model_layer.set_weights(imitation_layer.get_weights()) + + imitation_layer = imitation_model.layers[-1] + base_model_layer_name = 'policy_output_layer' base_model_layer = self.base_model.get_layer(base_model_layer_name) base_model_layer.set_weights(imitation_layer.get_weights()) - - imitation_layer = imitation_model.layers[-1] - base_model_layer_name = 'policy_output_layer' - base_model_layer = self.base_model.get_layer(base_model_layer_name) - base_model_layer.set_weights(imitation_layer.get_weights()) + except Exception as e: + print("Error in loading weights from h5 file to this model") + raise e def forward(self, input_dict, state, seq_lens): From ddf6a2435d0c2ca7eafe0dd6292ec574626bd397 Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Tue, 26 May 2020 14:12:23 -0700 Subject: [PATCH 190/335] added failsafe methods for max accel/decel and speed limit, and all --- flow/controllers/base_controller.py | 78 +++++++++++++++++++++++++++-- 1 file changed, 75 insertions(+), 3 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 7adcdf310..0984349d3 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -34,7 +34,7 @@ class BaseController: delay : int delay in applying the action (time) fail_safe : str - Should be either "instantaneous" or "safe_velocity" + Should be "instantaneous", "safe_velocity", "feasible_accel", or "all" noise : double variance of the gaussian from which to sample a noisy acceleration """ @@ -75,8 +75,10 @@ def get_action(self, env): time step. This method also augments the controller with the desired level of - stochastic noise, and utlizes the "instantaneous" or "safe_velocity" - failsafes if requested. + stochastic noise, and utlizes the "instantaneous", "safe_velocity", + "feasible_accel", or "all" failsafes if requested. The "all" failsafe + performs all three failsafes with this order: 1)"safe_velocity", + 2) "feasible_accel", 3) "instantaneous". Parameters ---------- @@ -115,6 +117,13 @@ def get_action(self, env): accel_without_noise = self.get_safe_action_instantaneous(env, accel_without_noise) elif self.fail_safe == 'safe_velocity': accel_without_noise = self.get_safe_velocity_action(env, accel_without_noise) + elif self.fail_safe == 'feasible_accel': + accel_without_noise = self.get_feasible_action(accel_without_noise) + elif self.fail_safe == 'all': + accel_without_noise = self.get_safe_velocity_action(env, accel_without_noise) + accel_without_noise = self.get_feasible_action(accel_without_noise) + accel_without_noise = self.get_safe_action_instantaneous(env, accel_without_noise) + env.k.vehicle.update_accel_without_noise(self.veh_id, accel_without_noise) # add noise to the accelerations, if requested @@ -126,6 +135,12 @@ def get_action(self, env): accel = self.get_safe_action_instantaneous(env, accel) elif self.fail_safe == 'safe_velocity': accel = self.get_safe_velocity_action(env, accel) + elif self.fail_safe == 'feasible_accel': + accel = self.get_feasible_action(accel) + elif self.fail_safe == 'all': + accel = self.get_safe_velocity_action(env, accel) + accel = self.get_feasible_action(accel) + accel = self.get_safe_action_instantaneous(env, accel) return accel @@ -172,6 +187,14 @@ def get_safe_action_instantaneous(self, env, action): # if the vehicle will crash into the vehicle ahead of it in the # next time step (assuming the vehicle ahead of it is not # moving), then stop immediately + print( + "=====================================\n" + "Vehicle {} is about to crash. Instantaneous acceleration " + "clipping applied.\n" + "=====================================".format(self.veh_id)) + + print("Vehicle {} is about to crash. Instantaneous acceleration" + "clipping applied.".format(self.veh_id)) return -this_vel / sim_step else: # if the vehicle is not in danger of crashing, continue with @@ -245,4 +268,53 @@ def safe_velocity(self, env): v_safe = 2 * h / env.sim_step + dv - this_vel * (2 * self.delay) + # check for speed limit + this_edge = env.k.vehicle.get_edge(self.veh_id) + edge_speed_limit = env.k.network.speed_limit(this_edge) + + if v_safe > edge_speed_limit: + v_safe = edge_speed_limit + print( + "=====================================\n" + "Speed of vehicle {} is greater than speed limit. Safe " + "velocity clipping applied.\n" + "=====================================".format(self.veh_id)) + return v_safe + + def get_feasible_action(self, action): + """Perform the "feasible_accel" failsafe action. + + Checks if the computed acceleration would put us above maximum + acceleration or deceleration. If it would, output the acceleration + equal to maximum acceleration or deceleration. + + Parameters + ---------- + action : float + requested acceleration action + + Returns + ------- + float + the requested action clipped by the safe velocity + """ + if action > self.max_accel: + action = self.max_accel + + print( + "=====================================\n" + "Acceleration of vehicle {} is greater than the max " + "acceleration. Feasible acceleration clipping applied.\n" + "=====================================".format(self.veh_id)) + + if action < -self.max_deaccel: + action = -self.max_deaccel + + print( + "=====================================\n" + "Deceleration of vehicle {} is greater than the max " + "deceleration. Feasible acceleration clipping applied.\n" + "=====================================".format(self.veh_id)) + + return action From 53cf035684b02668fa2116942c75b02cd4398d29 Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Tue, 26 May 2020 14:22:16 -0700 Subject: [PATCH 191/335] removed json file which was added by mistake --- flow/core/kernel/network/flow_params.json | 2 -- 1 file changed, 2 deletions(-) delete mode 100644 flow/core/kernel/network/flow_params.json diff --git a/flow/core/kernel/network/flow_params.json b/flow/core/kernel/network/flow_params.json deleted file mode 100644 index c70a22e63..000000000 --- a/flow/core/kernel/network/flow_params.json +++ /dev/null @@ -1,2 +0,0 @@ -{ - "env": \ No newline at end of file From b16d949c32b0ef2ef6d1a9ad400fe514769418a0 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 26 May 2020 14:28:40 -0700 Subject: [PATCH 192/335] leader utils added --- flow/data_pipeline/data_pipeline.py | 12 +-- flow/data_pipeline/leaderboard_utils.py | 116 ++++++++++++++++++++++++ flow/data_pipeline/query.py | 13 +-- 3 files changed, 129 insertions(+), 12 deletions(-) create mode 100644 flow/data_pipeline/leaderboard_utils.py diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 72caa5218..d414aa420 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -93,19 +93,19 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): for vid in veh_ids: extra_info["time_step"].append(veh_kernel.get_timestep(vid) / 1000) extra_info["id"].append(vid) + position = veh_kernel.get_2d_position(vid) + extra_info["x"].append(position[0]) + extra_info["y"].append(position[1]) + extra_info["speed"].append(veh_kernel.get_speed(vid)) extra_info["headway"].append(veh_kernel.get_headway(vid)) extra_info["acceleration"].append(veh_kernel.get_accel(vid)) + extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) + extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["leader_id"].append(veh_kernel.get_leader(vid)) extra_info["follower_id"].append(veh_kernel.get_follower(vid)) extra_info["leader_rel_speed"].append(veh_kernel.get_speed( veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) - extra_info["accel_without_noise"].append(veh_kernel.get_accel_without_noise(vid)) - extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) - position = veh_kernel.get_2d_position(vid) - extra_info["x"].append(position[0]) - extra_info["y"].append(position[1]) - extra_info["speed"].append(veh_kernel.get_speed(vid)) class AthenaQuery: diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py new file mode 100644 index 000000000..5eb5a7be6 --- /dev/null +++ b/flow/data_pipeline/leaderboard_utils.py @@ -0,0 +1,116 @@ +import os +import boto3 +import pandas as pd +from io import StringIO + + +def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipeline"): + """Fetch tables from s3 and store in ./result directory. + + Parameters + ---------- + table_name: str + The name of table to retrieve from S3, the current available tables are: + fact_vehicle_trace + fact_energy_trace + fact_network_throughput_agg + fact_network_inflows_outflows + fact_vehicle_fuel_efficiency_agg + fact_network_metrics_by_distance_agg + fact_network_metrics_by_time_agg + fact_network_fuel_efficiency_agg + leaderboard_chart + bucket: str + the S3 bucket that holds these tables + """ + try: + os.makedirs("result/{}".format(table_name)) + except FileExistsError as e: + pass + s3 = boto3.client("s3") + response = s3.list_objects_v2(Bucket=bucket) + keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] + names = ["{}_{}.csv".format(e.split("/")[1].replace("date=", ""), + e.split("/")[2].replace("partition_name=", ""))for e in keys] + existing_results = os.listdir("./result/{}".format(table_name)) + for index in range(len(keys)): + if names[index] not in existing_results: + s3.download_file(bucket, keys[index], "./result/{}/{}".format(table_name, names[index])) + + +def get_table_memory(table_name="fact_vehicle_trace", bucket="circles.data.pipeline", existing_results=()): + """Fetch tables from s3 and return them as in-memory pandas dataframe objects. + + Parameters + ---------- + bucket: str + the S3 bucket that holds the tables + table_name: str + the name of the name to retrieve from S3, for detail see get_table_disk + existing_results: list + tables that should not be fetched, + the names must follow the convention: + {source_id(no run number)}_{query_name}.csv + + Returns + ------- + file_list: dict + a dictionary of pandas dataframes, each contains a table from S3 + The dataframs are keyed by their name: {source_id(no run number)}_{query_name}.csv + + """ + s3 = boto3.client("s3") + response = s3.list_objects_v2(Bucket=bucket) + keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] + names = ["{}_{}.csv".format(e.split("/")[1].replace("date=", ""), + e.split("/")[2].replace("partition_name=", ""))for e in keys] + results = dict() + for index in range(len(keys)): + if names[index] not in existing_results: + obj = s3.get_object(Bucket=bucket, Key=keys[index])["Body"] + obj_str = obj.read().decode("utf-8") + results[names[index]] = pd.read_csv(StringIO(obj_str)) + return results + + +def get_table_url(table_name="fact_vehicle_trace", bucket="circles.data.pipeline", existing_results=()): + """Fetch tables from s3 and return as urls, requires the bucket to have public access. + + Parameters + ---------- + bucket: str + the S3 bucket that holds the tables + table_name: str + the name of the name to retrieve from S3, for detail see get_table_disk + existing_results: list + tables that should not be fetched, + the names must follow the convention: + {source_id(no run number)}_{query_name}.csv + + Returns + ------- + file_list: dict + a dictionary of urls, each contains a table from S3 + The urls are keyed by their name: {source_id(no run number)}_{query_name}.csv + + """ + s3 = boto3.client("s3") + response = s3.list_objects_v2(Bucket=bucket) + keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] + names = ["{}_{}.csv".format(e.split("/")[1].replace("date=", ""), + e.split("/")[2].replace("partition_name=", "")) for e in keys] + results = dict() + for index in range(len(keys)): + if names[index] not in existing_results: + results[names[index]] = "https://{}.s3.{}.amazonaws.com/{}".format(bucket, "us-west-2", keys[index]) + return results + + +def get_metadata(name, bucket="circles.data.pipeline"): + s3 = boto3.client("s3") + name_list = name.split('_') + source_id = "flow_{}".format(name_list[2]) + response = s3.head_object(Bucket=bucket, + Key="vehicle_trace_table/date={0}/partition_name={1}/{1}.csv".format(name_list[0], + source_id)) + return response["Metadata"] diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index abc3bcd53..5e9aa27b2 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -19,7 +19,7 @@ "fact_vehicle_fuel_efficiency_agg", "fact_network_metrics_by_distance_agg", "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", "leaderboard_chart"] -VEHICLE_POWER_DEMAND_FINAL_SELECT = """ +VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ SELECT id, time_step, @@ -31,11 +31,11 @@ WHEN acceleration > 0 THEN 1 WHEN acceleration < 0 THEN 0 ELSE 0.5 - END * (1 - {}) + {}) * acceleration + 9.81 * SIN(road_grade) + END * (1 - {0}) + {0}) * acceleration + 9.81 * SIN(road_grade) ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3)) AS power, - \'{}\' AS energy_model_id, + \'{1}\' AS energy_model_id, source_id - FROM {} + FROM {2} ORDER BY id, time_step """ @@ -119,8 +119,9 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_FINAL_SELECT.format('POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', - 'denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + 'denoised_speed_cte')) FACT_NETWORK_THROUGHPUT_AGG = """ WITH min_time AS ( From 528f0aace706fc8a3de99aba720bda7c0eb309b4 Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Tue, 26 May 2020 15:14:00 -0700 Subject: [PATCH 193/335] fixed docstrings --- flow/controllers/base_controller.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 2f01faef2..7cbbef6db 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -261,8 +261,8 @@ def safe_velocity(self, env): Returns ------- float - maximum safe velocity given a maximum deceleration and delay in - performing the breaking action + maximum safe velocity given a maximum deceleration, delay in + performing the breaking action, and speed limit """ lead_id = env.k.vehicle.get_leader(self.veh_id) lead_vel = env.k.vehicle.get_speed(lead_id) @@ -302,7 +302,8 @@ def get_feasible_action(self, action): Returns ------- float - the requested action clipped by the safe velocity + the requested action clipped by the feasible acceleration or + deceleration. """ if action > self.max_accel: action = self.max_accel From cbf6a420b727f5bf1d60a9bf8ff7cef92bbfe5ae Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Tue, 26 May 2020 15:15:05 -0700 Subject: [PATCH 194/335] removed duplicated print --- flow/controllers/base_controller.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 7cbbef6db..95ecd1737 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -198,8 +198,6 @@ def get_safe_action_instantaneous(self, env, action): "clipping applied.\n" "=====================================".format(self.veh_id)) - print("Vehicle {} is about to crash. Instantaneous acceleration" - "clipping applied.".format(self.veh_id)) return -this_vel / sim_step else: # if the vehicle is not in danger of crashing, continue with From 288a1cf87e5216c56f6a8aeb0ea43eb058c108c1 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Tue, 26 May 2020 16:54:10 -0700 Subject: [PATCH 195/335] Removed usage of rllib.utils.freamwork --- flow/controllers/imitation_learning/ppo_model.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index 49d354488..c5a30fb1b 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -3,10 +3,9 @@ import h5py from ray.rllib.models.tf.misc import normc_initializer from ray.rllib.models.tf.tf_modelv2 import TFModelV2 -from ray.rllib.utils.framework import get_activation_fn, try_import_tf +# from ray.rllib.utils.framework import get_activation_fn, try_import_tf # from flow.controllers.imitation_learning.keras_utils import * - -tf = try_import_tf() +import tensorflow as tf @@ -24,7 +23,7 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, name): def setup_model(self, obs_space, action_space, model_config, num_outputs, imitation_h5_path): - activation = get_activation_fn(model_config.get("fcnet_activation")) + activation = model_config.get("fcnet_activation") hiddens = model_config.get("fcnet_hiddens", []) vf_share_layers = model_config.get("vf_share_layers") From c1db60af46d9e08751bc263c3de1138afd15d0a7 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Tue, 26 May 2020 17:03:22 -0700 Subject: [PATCH 196/335] Changed location of h5 file --- .../model_files/follower_stopper1.h5 | Bin 0 -> 35456 bytes .../controllers/imitation_learning/ppo_model.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) create mode 100644 flow/controllers/imitation_learning/model_files/follower_stopper1.h5 diff --git a/flow/controllers/imitation_learning/model_files/follower_stopper1.h5 b/flow/controllers/imitation_learning/model_files/follower_stopper1.h5 new file mode 100644 index 0000000000000000000000000000000000000000..45b46d582cb089fed0c240f42bbf47cf14e28397 GIT binary patch literal 35456 zcmeHP30xD$_YVk46%a4P8!1IZg>Y2}$?oRh&?@4MT8;n#QbV95D0otp3iZOHcvPy^ zs#vR{SOiISQ?=ED6*fZV|bCidZUhJXeX$zT)lSWMbUeO8ONE|mDcg8 zifp+iVVWXaqs&y(^*!hbn4QG@2Xi^l-_Un@t1$&o#kM>02soYtJ6@7Jd8$I4VsRXH zrH*V#fh^BxkBqm1)eBmgGuo*C-dWMbUnZiC}v3c@i8`=VfE0E99 zzK-e*7~v9NFDq#+2ypkcH(dBes7D9~r8+A|o8Zq&QK&Tvx{{@+oDcx_7>|eS0_tau zqJ5pnY-0M859f2FEezP#jKVVgl zO%pG`H^4V&h&C0OSr91jcv|{8WM-$TGN*%n(4F&G`$1RD^hj38H5wK<4}rVKNX9OE z40iVbriiYZ>A|4Nv}I+gl*xGsYK3-sX7*HuiA7yQw@qo}g2=LC7!5s)QH>GIb>fVl zB-bXVCxCvMba@st*%Wn-N;TL$Fpi!jMVrUgdC<#{YXbwA?wTxlwuWI$m8&!gx&jG_ zrk_pMDb-1tIqH-IHN7ZS$~d~VQBwADo60F;RF@+n8vSt>ZMIygmM5tg5wzJkj08=I znWI)}nVJ0qXa;$*Ryj?sr5z7lU{IrJUh{1JOQ8O*=;=-B&4BF;08ik*7-Pd|Sryn$e^3=n&4Nm)R>)ZkX*CQj zY#3NwV0ZA7+7L$@M_@7i;-byS=p=eh#qxKNXdRxY(totBU-*D}6~@5|@=^T8)*Xi(_T zpkV(%dT}X=WO*KFXPkvXE2n{C*h50u&(K%>gG2m-0{l^H0NYBpF=P1$gbWROl^@8A zq{z}JRjkYNR}A3~rEBCFnzU>=%OK2`c4Og-V&8 znVCWt>2|trhC-XIOa{)JE`kH3A|;`*9=aWkJR?h`fTzg>=6FR958If*&^Z&<&W6_k zku~T7pb9IsB49}}i0=w4 z^M*Z#MZ5r0Hs)Rbd_EY&ZZILkSsTcw^32~S;qY6FtqTEeJ{9HF>w6jFfc9;Sadopx zz>YNDJ#^^{hC=g%z;diRs7H3B7A9;FUjr<=1p+?E&MOWBWyDtn%XK19kJeeoBxmUl z;wy*w(OS;>0oRN77v-h7ayQx+=Dv;_4;)Y$6VsjCacIK{UQh>K&~Gpyi}KW_D{b1o zn9Cd2x`Ppl;Xsz|>+oLzjOe<+ZdTGt5a8M&q;0wG#sILH4Y~^L*C^xWKU_llhih*d z@9Q|R_jS1Yhq!AA`h_Z6}f!wix4@ykBSOD!6xYKJYt_`m(SQ}iR4bPip zvatmf=)Q>~>}pzeWBYei^{YTepjR%;q$`epa_9fQ~@LBB(z@fGM<=Pi_ z2tXawAA|?mv8aspEZBWKUI=RVofO15%+n9xlLhq_jmOpF-#KinV?*aoWCsj>Plo{E zo!I~Be66i@`!`Fmv-&T|y=%*;)N)U1`uNwW%j5$5(kJgyx7D+$p8gZ5;Ukw*{7XLg z-1>RcM>igkdCTpobEmqH9k;K+6?@;qqIca8R`_lu1^dpD-xb`Tew=%QJmlhr-BuT3 z?UHv2Ucrwl)^Eqyt8 z|JM7$_>g$q*?l`zkk~@tpT7v&)f))>5_zv}B zfP(6HbqbXeT|dr$`7G5F@GcGUaB#^4{Xn@43k=VR|p8i2ptK|_i3pAx#R zUex)C1hL9C0e?gu)@OuVCu{ObsjtsD;?)#InAee8 zWOQg(VqAQ%A=Y=EesSeetZkhS_HEjF?43=^sk;)5;r77g*x<-T)cT$Uc>N+LvTI%> zaaH&gl{RH8#sBacwr#(c{#M(ESjj|htYE*rL1OnU>FQ^1_<7C{YW5dAyk8qn{FlnJ zjU9FxXUOxPXsEk0gX&#$hn&~W1D`(p09i@AO+55YH&8t~ zQ@hF)*yc?oslyZVv6>4r4Zp6Rjla7@juRaM zsOn#);o0RneayZB!{vH!+*S1fMd~(S>xuWtHXr6-mjvs{jM^TCy!yNNgf%~lH1X^7 z8F#vosu{17{rMNkjq9AShIPHMk@bDCzP8R}rFb$fmOmivl0PH`L*AqM9ZSYDlP_R* zv1012%un!ky9Q9(y*e2d?#ZKy9$djM*lg2}-{(mctz3rxwB#4;;47yo@3xz+Oa$t$7&Md}lPrrrT zpJt#6OFJ4|oX1iB?BGO5*XA1T#=T?+{`WNU^S(O`hxEhn-X}?e;iNwv6zGH5e*Oj( z?^H#l2HH~NwOjClY9~W`+YhK)wpDumu?WNcYxl5^CMgZISTXK;>mw}6tDIUD+|_XU zbDZ+~>=FL;gh_^nqZ_EnFLxu){Ib^2&o>M&esibc!^n9=&EXI6sf#}_w7u)_>E&EE zLsa4w;>g*^Pq$ap8SK{HAXBTCd^#Y-h3NbJ#!n+QJtX@av%?mAya4-h;Y4cMVO3Z= z%^`hJUbHB8ODT5MZ4>sdJAG@0t~^2YIUk9isTx9gR`kL)KB&T??BAmt<5z?!g1*-K zye$>w%>51%tiOj{DIXm=Yd~j%s(cwfMN>(x_x2Ob9))9*2a2hS0d<11JDqh}VvVk@ zOA)sI(l%_*8f6CPgRfs z-}WVsh3^bo>a_$9x$i)g=y#BBXUB`an(vRTIqOB$-trZ=+^o{sy^~&Z(k2D_^5-QO zZ&&vaSKE!$fp^?+`KA@3dwZ|x3P+UH2qz?A7e`bG8nqCBN{Nrsm8W+q3FB5di}EZ?DZKx@7B91>}%9R zys_J(Khsw&9u&5BN4RKM?hO5ZDk}AE`|NAl?j0#AqxHQ~{*AEnT)zHl+Z#2d(R0c6 zx3-D|X9rR0cY9*{_wK=@-L_Eo$9x{PYVvM%f$w)Q1S@d*X5kxoo+o1>)Wrz zT|e>Cc@3E_bWr_QR~X%qJn?eYff?H)M7#N=dUZ;YDA)UaVfRIq2Rc-&By;CDVvCm^ z33FdCQh(*tSkW1*NL0MlIqb}l-g->CR#bO=ly2}6k*F}*PPlJrJa(ez>-tB&o%Dk5 zH|fsax+AQ|`jEfgf91dt;cDS+)k9tHRcg`n)T5&4A7_O<$SxuGMRdfzd{}@R4&Bz@ z+}NLrm)yj^9MoCYJ24E?tqLMt^{-Gf>*J|1nNatjcsz09>Q|KPfd_WP(V09@G!o;r z?}$}=CB+MpMq^I>x{#iuDn+XYg;9~+mbfKrFmkXPsPsG}##bR**5 z6?$I0p>x{P4tFg)N**TqU|G_3Ls=JTHB&%yNSy#UF=_UdSFta`?u0aa>+x+7 zr<2}{tV&)Q5w)?Gbj8(sk@jEr3b!eBje4kFDShSgal-U`2_h3K32usx^0=JHp?FrSD{!PmSdgvIU&G3U%Eg! zLZBWYvA~l#{_LJ&>Y`9XGbzf4aP#25Cx6Ari1t0oTOkVNG|pfp`1_&raC7rl$gY^9 z&9^uC(B2Se2`r+f_9nfVNH?#Wx%^dZFc>cv+6_zgB_tH^TkT8puESrw7uUYH!v(E@ z`h(Wj()lYs)LWFl;_5My!%B@2;O4LL1;*{%658kPLEM(sU4C})tHB2#{nw0NV=y57 z?~Px}C4e94UF+7e*0*}De?jqU1AJZrAp+vU#`ouP7%DL!jNT6u&k|0l(G07+v00Q{dVccewIcs6PmZ zmX2TRaWE5#U%7gW1i0}l5pLYhEunpW1;!!V_SRaq6u%-rf@t&oNz5RCF}f0< zWa>|zZ~nFMIYk%t_b1r*7Mb_Lz{_P8e}95~FAXjcyefqv7h?@Gm*>re&!2CE2Emda zyB^&Xs>X9!OMhPkxPA<2n>*({2If6S1_k72qW&Z#C{E_SH{r-jw_#qnu;H11PO&@F z4xdwWf^s0r$myrlDzY0NOk_CNYVgz|@W_4{48s%EOd=RmK3Pp?h#UDExnbg%J4gbG z&zXiNxIviCu%z}2*lD*}}0Dgeki?v3p zG%b$+*WP-=DT|^0APJBOV{H~)3GZJ)+L*7Pjo9z{aG%@shI)(kGgps)*PyMC3E1G5l~C|8#4*;x%|QTt?Yjf1kCAgIn*DtO7{3h z7tl%O&+0GA2O_;In*wG+!JCe7xP=q7GCwKJISB~&@pY?RU z*4Ano9|22_@*e0u9iq+8tJcGKh;Ib3X*`r`)c&js=KKqHUt01n>GT_)%m?WmC|Uou z76i=s7Z<1(2#1#PFUT&gnT%#?nj-z=bALdsY4%oAYibw0*|no{BeI9)X!Gr)`(OaF z`R8$8q;n%TzhKEO5~*NyNIzSRLRlBeB4Exgdc*t_g6UbE8_VFlM-^aZOZr=w2KbQO zMDki`>p;Mq{$7XrgHUKGzmD|oiW1C(&VvqbfU>s=P-I{8p?)JiuD&CATMOl$8_N@( zzH)>&G3V)gt*zBGK5D6RW1=s>mTv^{U*+7$-Itd9%SO0=k=_|C+FJPABVf+IL_xhk z0JM~UL4Kn)9nI7JT5D<-y;&%@=SK9Lq9X(fm79NVEQWS6(Fl4|J89v< z8ukA31#|f{?)_#oLx+W&nuv{Jgal#6*Mo@OH2A|$N>@} zyNTqr($;~1IsJ8q`h!qtDZh^N&JEsPLFYjNmJ??H36OoQg8Gg4xcZLdZ7r00ZoJMu zrEGrn2=Bx~^E|Dw#WX%LJ~yH`2+`)pL-jBoLK+RwHjRhi26#>v!(4pC-Itd9OC8+5 zNbjE0a;(!nCj`v-mwc!fNUWCfFKCxJOh+>{O_6?P6*b-7T5C=1qBjc#Iybg~z@YP@ zX}P(-H`V~{WU&$Srgk#jsJ*2Nrg#|p34{Zp&G!?9@E39UMoThX|MT_}Pyc;$go8QS ze0*ZyPoTe8_oEXA?U8&tOe~(&yFz?^=Qe4>YC;gSvYeRwu|HOF+gEaDjlJZxRpnt5 z56qF-c}$nKAM7i2=}L+nzxR{$&hnIf;p`Wda&L(E^5G-cgvmQ3KiSWhWbyNaXHNYh z`~27)>D1A)B^7lqiL-p>iqBT6rSd^{1Qq^y#DYWr5OqJ(AU^R(FHZfzUiibTU|D&H zpLFopK*QxYDW+8X;Ohfn)oop6j&{ZTgvc@Cw&E2HBYi~D^JWTI)Ue}n~9y+eH@bwuO{OC&xg(t+!k%o z923q8kl_-ZEAH8?LNLQ^iFoaU1;k#@YH3-&$&z`MI!X4)9kSlNa)qk%m&DW44iQuQ z0;MAb?WJDFCy9Q_n-|{a+DzG&iM8UsE4E-S9sE$dW8OggrwswpUSI!)i&l*n9~xCc zByC(FjURrT_;1M|Y3-Zu;pM`WQnfgfa1C~n$fD;H>)X#Kq?6C#2l^LE?bWAtO!^Uup(eZz$^mKEly~^NV$UK Date: Tue, 26 May 2020 17:07:41 -0700 Subject: [PATCH 197/335] minor docstring formatting --- flow/controllers/base_controller.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 95ecd1737..2fdb2f399 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -77,8 +77,10 @@ def get_action(self, env): This method also augments the controller with the desired level of stochastic noise, and utlizes the "instantaneous", "safe_velocity", "feasible_accel", or "all" failsafes if requested. The "all" failsafe - performs all three failsafes with this order: 1)"safe_velocity", - 2) "feasible_accel", 3) "instantaneous". + performs all three failsafes with this order: + 1) "safe_velocity", + 2) "feasible_accel", + 3) "instantaneous". Parameters ---------- From 6f8d878c68ba6d6ef4413af6d8e1d7c4d86f32dc Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 26 May 2020 19:07:14 -0700 Subject: [PATCH 198/335] fixed a monor error in energy query, added network in metadata --- flow/core/experiment.py | 9 +++++++-- flow/data_pipeline/data_pipeline.py | 11 ++++------- flow/data_pipeline/leaderboard_utils.py | 17 ++++++++++++++++- flow/data_pipeline/query.py | 2 +- 4 files changed, 28 insertions(+), 11 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index c50648746..16f2f04e3 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -148,8 +148,12 @@ def rl_actions(*_): # time profiling information t = time.time() times = [] + + # data pipeline extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) + metadate = defaultdict(lambda: "") + metadate['network'] = self.env.network.name.split('_')[0] for i in range(num_runs): ret = 0 @@ -169,7 +173,8 @@ def rl_actions(*_): # collect additional information for the data pipeline get_extra_info(self.env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend(['{}_run_{}'.format(source_id, i)] * len(veh_ids)) + extra_info["source_id"].extend([source_id] * len(veh_ids)) + extra_info["run_id"].extend(['run_{}'.format(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): @@ -220,7 +225,7 @@ def rl_actions(*_): cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( cur_date, source_id, source_id), - trajectory_table_path, str(only_query)[2:-2]) + trajectory_table_path, metadate) # delete the S3-only version of the trajectory file # os.remove(upload_file_path) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index d414aa420..6649273a6 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -65,7 +65,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name=None): return -def upload_to_s3(bucket_name, bucket_key, file_path, only_query): +def upload_to_s3(bucket_name, bucket_key, file_path, metadata): """Upload a file to S3 bucket. Parameters @@ -76,15 +76,12 @@ def upload_to_s3(bucket_name, bucket_key, file_path, only_query): the key within the bucket for the file file_path: str the path of the file to be uploaded - only_query: str - specify which query should be run on this file by lambda: - if empty: run none of them - if "all": run all available analysis query - if a string of list of queries: run only those mentioned in the list + metadata: dict + all the metadata that should be attached to this simulation """ s3 = boto3.resource("s3") s3.Bucket(bucket_name).upload_file(file_path, bucket_key, - ExtraArgs={"Metadata": {"run-query": only_query}}) + ExtraArgs={"Metadata": metadata}) return diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 5eb5a7be6..c227a50dc 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -107,10 +107,25 @@ def get_table_url(table_name="fact_vehicle_trace", bucket="circles.data.pipeline def get_metadata(name, bucket="circles.data.pipeline"): + """Get the metadata by name. + + Parameters + ---------- + name: str + the name of the table whose metadata will be returned + bucket: str + the bucket that hold the table + + Returns + ------- + metadata: dict + a dictionary of all the metadata, there is no guarantee + for which keys are included + """ s3 = boto3.client("s3") name_list = name.split('_') source_id = "flow_{}".format(name_list[2]) response = s3.head_object(Bucket=bucket, - Key="vehicle_trace_table/date={0}/partition_name={1}/{1}.csv".format(name_list[0], + Key="fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv".format(name_list[0], source_id)) return response["Metadata"] diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 5e9aa27b2..3242cae96 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -26,7 +26,7 @@ speed, acceleration, road_grade, - MAX(0, 1200 * speed * ( + GREATEST(0, 1200 * speed * (( CASE WHEN acceleration > 0 THEN 1 WHEN acceleration < 0 THEN 0 From db33f7c084c9b5bc40283a335899859cf919d785 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 26 May 2020 19:12:54 -0700 Subject: [PATCH 199/335] fix a minor mistake in docstring --- flow/data_pipeline/leaderboard_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index c227a50dc..3c86e3090 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -50,7 +50,7 @@ def get_table_memory(table_name="fact_vehicle_trace", bucket="circles.data.pipel existing_results: list tables that should not be fetched, the names must follow the convention: - {source_id(no run number)}_{query_name}.csv + {date}_{source_id(no run number)}_{query_name}.csv Returns ------- @@ -85,7 +85,7 @@ def get_table_url(table_name="fact_vehicle_trace", bucket="circles.data.pipeline existing_results: list tables that should not be fetched, the names must follow the convention: - {source_id(no run number)}_{query_name}.csv + {date}_{source_id(no run number)}_{query_name}.csv Returns ------- From 089822a82b5d95b35480f0fd8f53b3ab7c0585dd Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 26 May 2020 19:22:09 -0700 Subject: [PATCH 200/335] flake8 fix --- flow/data_pipeline/lambda_function.py | 2 +- flow/data_pipeline/leaderboard_utils.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 22145af9c..483439eb5 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -9,7 +9,7 @@ def lambda_handler(event, context): - """Handles S3 put event on AWS Lambda.""" + """Handle S3 put event on AWS Lambda.""" records = [] # delete all unwanted metadata for record in event['Records']: diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 3c86e3090..49083ce3e 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -1,3 +1,4 @@ +"""APIs for the leader board front end""" import os import boto3 import pandas as pd From c3756f8745cac215ec7f53845aa439d5aac4ef74 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 27 May 2020 09:07:46 -0700 Subject: [PATCH 201/335] Fixed trajectory_table_path --- flow/core/experiment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 779fdb0f4..82cdcd943 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -213,7 +213,7 @@ def rl_actions(*_): # Delete the .xml version of the emission file. os.remove(emission_path) - trajectory_table_path = './data/' + source_id + ".csv" + trajectory_table_path = dir_path + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: From 7f68c503945c14eec9ca81fab228759d50668b39 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 27 May 2020 09:21:09 -0700 Subject: [PATCH 202/335] Fixed trajectory_table_path --- flow/visualize/i210_replay.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 57e72586a..af19111dc 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -329,7 +329,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= emission_to_csv(emission_path, output_path=output_path) # generate the trajectory output file - trajectory_table_path = './data/' + source_id + ".csv" + trajectory_table_path = dir_path + source_id + ".csv" upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) # upload to s3 if asked From 16697871d63f735a90aad66ace14b5d757ce73e0 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 27 May 2020 10:05:09 -0700 Subject: [PATCH 203/335] addressing comments --- flow/core/kernel/vehicle/traci.py | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 2166709b6..9485572b2 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -113,10 +113,6 @@ def initialize(self, vehicles): self.__vehicles[veh_id] = dict() self.__vehicles[veh_id]['type'] = typ['veh_id'] self.__vehicles[veh_id]['initial_speed'] = typ['initial_speed'] - self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = None - self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = None - self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None - self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] = None self.num_vehicles += 1 if typ['acceleration_controller'][0] == RLController: self.num_rl_vehicles += 1 @@ -955,7 +951,7 @@ def _prev_edge_followers(self, veh_id, edge_dict, lane, num_edges): def apply_acceleration(self, veh_ids, acc): """See parent class.""" - # to hand the case of a single vehicle + # to handle the case of a single vehicle if type(veh_ids) == str: veh_ids = [veh_ids] acc = [acc] @@ -969,7 +965,7 @@ def apply_acceleration(self, veh_ids, acc): def apply_acceleration_not_smooth(self, veh_ids, acc): """See parent class.""" - # to hand the case of a single vehicle + # to handle the case of a single vehicle if type(veh_ids) == str: veh_ids = [veh_ids] acc = [acc] From 4b853b599c0d75015f5658e8ec20a6e41f0ee269 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 27 May 2020 11:40:14 -0700 Subject: [PATCH 204/335] Function for ppo architecture --- .../imitation_learning/imitating_network2.py | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) diff --git a/flow/controllers/imitation_learning/imitating_network2.py b/flow/controllers/imitation_learning/imitating_network2.py index f750fbad6..12b1a45d4 100644 --- a/flow/controllers/imitation_learning/imitating_network2.py +++ b/flow/controllers/imitation_learning/imitating_network2.py @@ -129,3 +129,42 @@ def save_network(self, save_path): # tensorboard # writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) + + + def save_network_PPO(self, save_path): + input = tf.keras.layers.Input(self.model.input.shape[1].value) + curr_layer = input + + # build layers for policy + for i in range(num_layers): + size = self.model.layers[i + 1].output.shape[1].value + activation = tf.keras.activations.serialize(self.model.layers[i + 1].activation) + curr_layer = tf.keras.layers.Dense(size, activation=activation, name="policy_hidden_layer_{}".format(i + 1))(curr_layer) + output_layer_policy = tf.keras.layers.Dense(self.model.output.shape[1].value, activation=None, name="policy_output_layer") + + # build layers for value function + curr_layer = input + for i in range(num_layers): + curr_layer = tf.keras.layers.Dense(self.size, activation="tanh", name="vf_hidden_layer_{}".format(i+1))(curr_layer) + output_layer_vf = tf.keras.layers.Dense(1, activation=None, name="vf_output_layer") + + ppo_model = tf.keras.Model(inputs=input, outputs=[output_layer_policy, output_layer_vf], name="ppo_model") + + # set the policy weights to those learned from imitation + for i in range(num_layers): + policy_layer = ppo_model.get_layer(name="policy_hidden_layer_{}".format(i + 1)) + policy_layer.set_weights(self.model.layers[i + 1].get_weights()) + policy_output = ppo_model.get_layer("policy_output_layer") + policy_output.set_weights(self.model.layers[-1].get_weights) + + # save the model (as a h5 file) + ppo_model.save(save_path) + + + + + + + + + From 5c0923d0d1d170679c68a71e9f33b8834ef232c4 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 27 May 2020 13:47:21 -0700 Subject: [PATCH 205/335] Load weights for training in train.py --- examples/train.py | 119 ++++++++++-------- .../imitation_learning/imitating_network2.py | 15 ++- .../imitation_learning/ppo_model.py | 87 +++++-------- 3 files changed, 108 insertions(+), 113 deletions(-) diff --git a/examples/train.py b/examples/train.py index 8c65a68c1..3764f7318 100644 --- a/examples/train.py +++ b/examples/train.py @@ -64,6 +64,10 @@ def parse_args(args): parser.add_argument( '--rl_trainer', type=str, default="rllib", help='the RL trainer to use. either rllib or Stable-Baselines') + parser.add_argument( + '--load_weights_path', type=str, default=None, + help='Path to h5 file containing a pretrained model. Relevent for PPO with RLLib' + ) parser.add_argument( '--algorithm', type=str, default="PPO", help='RL algorithm to use. Options are PPO, TD3, MATD3 (MADDPG w/ TD3) right now.' @@ -191,6 +195,17 @@ def setup_exps_rllib(flow_params, config["lambda"] = 0.97 config["kl_target"] = 0.02 config["num_sgd_iter"] = 10 + + + if flags.load_weights_path: + from flow.controllers.imitation_learning.ppo_model import PPONetwork + from ray.rllib.models import ModelCatalog + # Register custom model + ModelCatalog.register_custom_model("PPO_loaded_weights", PPONetwork) + # set model to the custom model for run + config['model']['custom_model'] = "PPO_loaded_weights" + config['model']['custom_options'] = {"h5_load_path": flags.load_weights_path} + elif alg_run == "TD3": agent_cls = get_agent_class(alg_run) config = deepcopy(agent_cls._default_config) @@ -252,59 +267,55 @@ def on_episode_end(info): register_env(gym_name, create_env) return alg_run, gym_name, config -def train_rllib_with_imitation(submodule, flags): - """Train policies using the PPO algorithm in RLlib, with initiale policy weights from imitation learning.""" - import ray - from flow.controllers.imitation_learning.ppo_model import PPONetwork - from ray.rllib.models import ModelCatalog - - flow_params = submodule.flow_params - flow_params['sim'].render = flags.render - policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) - policy_mapping_fn = getattr(submodule, "policy_mapping_fn", None) - policies_to_train = getattr(submodule, "policies_to_train", None) - - alg_run, gym_name, config = setup_exps_rllib( - flow_params, flags.num_cpus, flags.num_rollouts, flags, - policy_graphs, policy_mapping_fn, policies_to_train) - - # Register custom model - ModelCatalog.register_custom_model("Imitation_Learning", PPONetwork) - - config['num_workers'] = flags.num_cpus - config['env'] = gym_name - - # set model to the custom model for run - config['model']['custom_model'] = "Imitation_Learning" - - # create a custom string that makes looking at the experiment names easier - def trial_str_creator(trial): - return "{}_{}".format(trial.trainable_name, trial.experiment_tag) - - if flags.local_mode: - ray.init(local_mode=True) - else: - ray.init() - - exp_dict = { - "run_or_experiment": alg_run, - "name": gym_name, - "config": config, - "checkpoint_freq": flags.checkpoint_freq, - "checkpoint_at_end": True, - 'trial_name_creator': trial_str_creator, - "max_failures": 0, - "stop": { - "training_iteration": flags.num_iterations, - }, - } - date = datetime.now(tz=pytz.utc) - date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") - s3_string = "s3://i210.experiments/i210/" \ - + date + '/' + flags.exp_title - if flags.use_s3: - exp_dict['upload_dir'] = s3_string - tune.run(**exp_dict, queue_trials=False, raise_on_failed_trial=False) +# def train_rllib_with_imitation(submodule, flags): +# """Train policies using the PPO algorithm in RLlib, with initiale policy weights from imitation learning.""" +# import ray +# from flow.controllers.imitation_learning.ppo_model import PPONetwork +# from ray.rllib.models import ModelCatalog +# +# flow_params = submodule.flow_params +# flow_params['sim'].render = flags.render +# policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) +# policy_mapping_fn = getattr(submodule, "policy_mapping_fn", None) +# policies_to_train = getattr(submodule, "policies_to_train", None) +# +# alg_run, gym_name, config = setup_exps_rllib( +# flow_params, flags.num_cpus, flags.num_rollouts, flags, +# policy_graphs, policy_mapping_fn, policies_to_train) +# +# +# +# config['num_workers'] = flags.num_cpus +# config['env'] = gym_name +# +# # create a custom string that makes looking at the experiment names easier +# def trial_str_creator(trial): +# return "{}_{}".format(trial.trainable_name, trial.experiment_tag) +# +# if flags.local_mode: +# ray.init(local_mode=True) +# else: +# ray.init() +# +# exp_dict = { +# "run_or_experiment": alg_run, +# "name": gym_name, +# "config": config, +# "checkpoint_freq": flags.checkpoint_freq, +# "checkpoint_at_end": True, +# 'trial_name_creator': trial_str_creator, +# "max_failures": 0, +# "stop": { +# "training_iteration": flags.num_iterations, +# }, +# } +# date = datetime.now(tz=pytz.utc) +# date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") +# s3_string = "s3://i210.experiments/i210/" \ +# + date + '/' + flags.exp_title +# if flags.use_s3: +# exp_dict['upload_dir'] = s3_string +# tune.run(**exp_dict, queue_trials=False, raise_on_failed_trial=False) def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" @@ -526,7 +537,7 @@ def main(args): # Perform the training operation. if flags.rl_trainer.lower() == "rllib": - train_rllib_with_imitation(submodule, flags) + train_rllib(submodule, flags) elif flags.rl_trainer.lower() == "stable-baselines": train_stable_baselines(submodule, flags) elif flags.rl_trainer.lower() == "h-baselines": diff --git a/flow/controllers/imitation_learning/imitating_network2.py b/flow/controllers/imitation_learning/imitating_network2.py index 12b1a45d4..a6b502165 100644 --- a/flow/controllers/imitation_learning/imitating_network2.py +++ b/flow/controllers/imitation_learning/imitating_network2.py @@ -130,23 +130,30 @@ def save_network(self, save_path): # writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) + def load_network(self, load_path): + if self.stochastic: + self.model = tf.keras.models.load_model(load_path, custom_objects={'negative_log_likelihood_loss': negative_log_likelihood_loss}) + def save_network_PPO(self, save_path): input = tf.keras.layers.Input(self.model.input.shape[1].value) curr_layer = input + # number of hidden layers + num_layers = len(self.model.layers) - 2 + # build layers for policy for i in range(num_layers): size = self.model.layers[i + 1].output.shape[1].value activation = tf.keras.activations.serialize(self.model.layers[i + 1].activation) curr_layer = tf.keras.layers.Dense(size, activation=activation, name="policy_hidden_layer_{}".format(i + 1))(curr_layer) - output_layer_policy = tf.keras.layers.Dense(self.model.output.shape[1].value, activation=None, name="policy_output_layer") + output_layer_policy = tf.keras.layers.Dense(self.model.output.shape[1].value, activation=None, name="policy_output_layer")(curr_layer) # build layers for value function curr_layer = input for i in range(num_layers): curr_layer = tf.keras.layers.Dense(self.size, activation="tanh", name="vf_hidden_layer_{}".format(i+1))(curr_layer) - output_layer_vf = tf.keras.layers.Dense(1, activation=None, name="vf_output_layer") + output_layer_vf = tf.keras.layers.Dense(1, activation=None, name="vf_output_layer")(curr_layer) ppo_model = tf.keras.Model(inputs=input, outputs=[output_layer_policy, output_layer_vf], name="ppo_model") @@ -155,9 +162,9 @@ def save_network_PPO(self, save_path): policy_layer = ppo_model.get_layer(name="policy_hidden_layer_{}".format(i + 1)) policy_layer.set_weights(self.model.layers[i + 1].get_weights()) policy_output = ppo_model.get_layer("policy_output_layer") - policy_output.set_weights(self.model.layers[-1].get_weights) + policy_output.set_weights(self.model.layers[-1].get_weights()) - # save the model (as a h5 file) + # save the model (as a h5 file) ppo_model.save(save_path) diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index ac606f363..f9668c229 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -8,85 +8,62 @@ import tensorflow as tf - class PPONetwork(TFModelV2): def __init__(self, obs_space, action_space, num_outputs, model_config, name): super(PPONetwork, self).__init__(obs_space, action_space, num_outputs, model_config, name) + h5_path = model_config.get("custom_options").get("h5_load_path", "") + # setup model with weights loaded in from model in h5 path - self.setup_model(obs_space, action_space, model_config, num_outputs, 'model_files/follower_stopper1.h5') + self.setup_model(obs_space, action_space, model_config, num_outputs, h5_path) + # register variables for base model self.register_variables(self.base_model.variables) def setup_model(self, obs_space, action_space, model_config, num_outputs, imitation_h5_path): - activation = model_config.get("fcnet_activation") - hiddens = model_config.get("fcnet_hiddens", []) - vf_share_layers = model_config.get("vf_share_layers") - - # set up model - inp_layer = tf.keras.layers.Input(shape=obs_space.shape, name="input_layer") - curr_layer = inp_layer + if imitation_h5_path: + # imitation_model = tf.keras.models.load_model(imitation_h5_path, custom_objects={"negative_log_likelihood_loss": negative_log_likelihood_loss}) - # hidden layers and output for policy - i = 1 - for size in hiddens: - curr_layer = tf.keras.layers.Dense(size, name="policy_hidden_layer_{}".format(i), activation=activation)(curr_layer) - i += 1 + # set up a model to load in weights from imitation network (without the training variables, e.g. adam variables) + self.base_model = tf.keras.models.load_model(imitation_h5_path) - output_layer_policy = tf.keras.layers.Dense(num_outputs, name="policy_output_layer", activation=None)(curr_layer) + else: + activation = model_config.get("fcnet_activation") + hiddens = model_config.get("fcnet_hiddens", []) + vf_share_layers = model_config.get("vf_share_layers") - # set up value function - if not vf_share_layers: + # set up model + inp_layer = tf.keras.layers.Input(shape=obs_space.shape, name="input_layer") curr_layer = inp_layer + + # hidden layers and output for policy i = 1 for size in hiddens: - curr_layer = tf.keras.layers.Dense(size, name="vf_hidden_layer_{}".format(i), activation=activation)(curr_layer) + curr_layer = tf.keras.layers.Dense(size, name="policy_hidden_layer_{}".format(i), + activation=activation)(curr_layer) i += 1 - output_layer_vf = tf.keras.layers.Dense(1, name="vf_output_layer", activation=None)(curr_layer) + output_layer_policy = tf.keras.layers.Dense(num_outputs, name="policy_output_layer", activation=None)( + curr_layer) - # build model from layers - self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) + # set up value function + if not vf_share_layers: + curr_layer = inp_layer + i = 1 + for size in hiddens: + curr_layer = tf.keras.layers.Dense(size, name="vf_hidden_layer_{}".format(i), + activation=activation)(curr_layer) + i += 1 + output_layer_vf = tf.keras.layers.Dense(1, name="vf_output_layer", activation=None)(curr_layer) - if imitation_h5_path: - # imitation_model = tf.keras.models.load_model(imitation_h5_path, custom_objects={"negative_log_likelihood_loss": negative_log_likelihood_loss}) - - # set up a model to load in weights from imitation network (without the training variables, e.g. adam variables) - imitation_inp = tf.keras.layers.Input(shape=obs_space.shape, name="imitation_inp") - curr_imitation_layer = imitation_inp - i = 1 - for size in hiddens: - curr_imitation_layer = tf.keras.layers.Dense(size, name="imitation_hidden_layer_{}".format(i), activation=activation)(curr_imitation_layer) - i += 1 - - imitation_output_layer = tf.keras.layers.Dense(num_outputs, name="imitation_output_layer", activation=None)(curr_imitation_layer) - imitation_model = tf.keras.Model(imitation_inp, [imitation_output_layer]) - - # load weights from file into model - imitation_model.load_weights(imitation_h5_path) - # register model variables (to prevent error) - self.register_variables(imitation_model.variables) - - # copy these weights into the base model (only the policy hidden layer and output weights) - try: - for i in range(len(hiddens)): - imitation_layer = imitation_model.layers[i + 1] - base_model_layer_name = 'policy_hidden_layer_' + str(i + 1) - base_model_layer = self.base_model.get_layer(base_model_layer_name) - base_model_layer.set_weights(imitation_layer.get_weights()) - - imitation_layer = imitation_model.layers[-1] - base_model_layer_name = 'policy_output_layer' - base_model_layer = self.base_model.get_layer(base_model_layer_name) - base_model_layer.set_weights(imitation_layer.get_weights()) - except Exception as e: - print("Error in loading weights from h5 file to this model") - raise e + # build model from layers + self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) + def forward(self, input_dict, state, seq_lens): From c785944471ebb08a63511350029bb67bad516971 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 27 May 2020 20:52:31 -0700 Subject: [PATCH 206/335] Code structure changes --- examples/train.py | 2 +- .../imitating_controller.py | 3 +- .../imitation_learning/imitating_network.py | 208 +++++++----------- .../imitation_learning/imitating_network2.py | 177 --------------- .../imitation_learning/keras_utils.py | 41 ++-- .../imitation_learning/ppo_model.py | 28 ++- flow/controllers/imitation_learning/run.py | 30 ++- .../controllers/imitation_learning/trainer.py | 18 +- flow/controllers/imitation_learning/utils.py | 21 +- 9 files changed, 169 insertions(+), 359 deletions(-) delete mode 100644 flow/controllers/imitation_learning/imitating_network2.py diff --git a/examples/train.py b/examples/train.py index 3764f7318..bc731e465 100644 --- a/examples/train.py +++ b/examples/train.py @@ -188,7 +188,7 @@ def setup_exps_rllib(flow_params, config["num_workers"] = n_cpus config["horizon"] = horizon - config["model"].update({"fcnet_hiddens": [12, 12]}) + config["model"].update({"fcnet_hiddens": [32, 32, 32]}) config["train_batch_size"] = horizon * n_rollouts config["gamma"] = 0.999 # discount rate config["use_gae"] = True diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index 935a66831..a13ce2083 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -9,7 +9,8 @@ class ImitatingController(BaseController): """ Controller which uses a given neural net to imitate an expert. Subclasses BaseController """ - # Implementation in Tensorflow + + # Implementation in Tensorflow Keras def __init__(self, veh_id, action_network, multiagent, car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): """ diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 04a0a4ce4..7d68c076a 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -1,6 +1,7 @@ import numpy as np import tensorflow as tf from utils_tensorflow import * +from keras_utils import * import tensorflow_probability as tfp from flow.controllers.base_controller import BaseController from replay_buffer import ReplayBuffer @@ -11,7 +12,7 @@ class ImitatingNetwork(): Class containing neural network which learns to imitate a given expert controller. """ - def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, stochastic=False, policy_scope='policy_vars', load_existing=False, load_path=''): + def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, stochastic=False, variance_regularizer = 0, load_existing=False, load_path=''): """ Initializes and constructs neural network @@ -22,11 +23,8 @@ def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, r obs_dim: dimension of observation space (size of network input) num_layers: number of hidden layers (for an MLP) size: size of each layer in network - learning_rate: learning rate used in optimizer replay_buffer_size: maximum size of replay buffer used to hold data for training - training: boolean, whether the network will be trained (as opposed to loaded) stochastic: boolean indicating if the network outputs a stochastic (multivariate Gaussian) or deterministic policy - policy_scope: variable scope used by Tensorflow for weights/biases load_existing: boolean, whether to load an existing tensorflow model load_path: path to directory containing an existing tensorflow model @@ -35,148 +33,37 @@ def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, r self.sess = sess self.action_dim = action_dim self.obs_dim = obs_dim - self.num_layers = num_layers - self.size = size - self.learning_rate = learning_rate - self.training = training + self.fcnet_hiddens = fcnet_hiddens self.stochastic=stochastic + self.variance_regularizer = variance_regularizer # load network if specified, or construct network if load_existing: self.load_network(load_path) else: - print("HERE") self.build_network() + self.compile_network() + self.replay_buffer = ReplayBuffer(replay_buffer_size) - # init replay buffer - if self.training: - self.replay_buffer = ReplayBuffer(replay_buffer_size) - else: - self.replay_buffer = None - - # set up policy variables, and saver to save model. Save only non-training variables (weights/biases) - if not load_existing: - self.policy_vars = [v for v in tf.all_variables() if 'network_scope' in v.name and 'train' not in v.name] - self.saver = tf.train.Saver(self.policy_vars, max_to_keep=None) - # tensorboard - self.writer = tf.summary.FileWriter('/Users/akashvelu/Documents/Random/tensorboard/', tf.get_default_graph()) - # track number of training steps - self.train_steps = 0 def build_network(self): """ Defines neural network for choosing actions. Defines placeholders and forward pass """ # setup placeholders for network input and labels for training, and hidden layers/output - self.define_placeholders() - self.define_forward_pass() - # set up training operation (e.g. Adam optimizer) - if self.training: - with tf.variable_scope('train'): - self.define_train_op() - - - - def load_network(self, path): - """ - Load tensorflow model from the path specified, set action prediction to proper placeholder - """ - # load and restore model - loader = tf.train.import_meta_graph(path + 'model.ckpt.meta') - loader.restore(self.sess, path+'model.ckpt') - - # get observation placeholder (for input into network) - self.obs_placeholder = tf.get_default_graph().get_tensor_by_name('policy_vars/observation:0') - # get output tensor (using name of appropriate tensor) - network_output = tf.get_default_graph().get_tensor_by_name('policy_vars/network_scope/Output_Layer/BiasAdd:0') - - # for stochastic policies, the network output is twice the action dimension. First half specifies the mean of a multivariate gaussian distribution, second half specifies the diagonal entries for the diagonal covariance matrix. - # for deterministic policies, network output is the action. - if self.stochastic: - # determine means and (diagonal entries of ) covariance matrices (could be many in the case of batch) for action distribution - means = network_output[:, :self.action_dim] - log_vars = network_output[:, self.action_dim:] - vars = tf.math.exp(log_vars) - - # set up action distribution (parameterized by network output) - # if a batch of size k is input as observations, then the the self.dist will store k different Gaussians - self.dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=vars, name='Prediction Distribution') - # action is a sample from this distribution; one sample output per Gaussian contained in self.dist - self.action_predictions = self.dist.sample() - else: - self.dist = None - self.action_predictions = network_output - - def define_placeholders(self): - """ - Defines input, output, and training placeholders for neural net - """ - # placeholder for observations (input into network) - self.obs_placeholder = tf.placeholder(shape=[None, self.obs_dim], name="observation", dtype=tf.float32) - - # if training, define placeholder for labels (supervised learning) - if self.training: - self.action_labels_placeholder = tf.placeholder(shape=[None, self.action_dim], name="labels", dtype=tf.float32) - - - def define_forward_pass(self): - """ - Build network and initialize proper action prediction op - """ - # network output is twice action dim if stochastic (1st half mean, 2nd half diagonal elements of covariance) if self.stochastic: - output_size = 2 * self.action_dim + self.model = build_neural_net_stochastic(self.obs_dim, self.action_dim, self.fcnet_hiddens) else: - output_size = self.action_dim - - # build forward pass and get the tensor for output of last layer - network_output = build_neural_net(self.obs_placeholder, output_size=output_size, scope='network_scope', n_layers=self.num_layers, size=self.size) - - # parse the mean and covariance from output if stochastic, and set up distribution - if self.stochastic: - # determine means and (diagonal entries of ) covariance matrices (could be many in the case of batch) for action distribution - - means, log_vars = tf.split(network_output, num_or_size_splits=2, axis=1) - vars = tf.math.exp(log_vars) - - # set up action distribution (parameterized by network output) - # if a batch of size k is input as observations, then the the self.dist will store k different Gaussians - with tf.variable_scope('Action_Distribution'): - self.dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=vars) - # action is a sample from this distribution; one sample output per Gaussian contained in self.dist - self.action_predictions = self.dist.sample() - - else: - self.dist = None - self.action_predictions = network_output - - - def define_train_op(self): - """ - Defines training operations for network (loss function and optimizer) - """ - # labels - true_actions = self.action_labels_placeholder - predicted_actions = self.action_predictions + self.model = build_neural_net_deterministic(self.obs_dim, self.action_dim, self.fcnet_hiddens) - if self.stochastic: - # negative log likelihood loss for stochastic policy - self.loss = self.dist.log_prob(true_actions) - self.loss = tf.negative(self.loss) - self.loss = tf.reduce_mean(self.loss) - summary_name = 'Loss_tracking_NLL' - else: - # MSE loss for deterministic policy - self.loss = tf.losses.mean_squared_error(true_actions, predicted_actions) - summary_name = 'Loss_tracking_MSE' + def compile_network(self): + loss = get_loss(self.stochastic, self.variance_regularizer) + self.model.compile(loss=loss, optimizer='adam') - self.loss_summary = tf.summary.scalar(name=summary_name, tensor=self.loss) - # Adam optimizer - self.train_op = tf.train.AdamOptimizer(self.learning_rate).minimize(self.loss) def train(self, observation_batch, action_batch): """ @@ -184,9 +71,8 @@ def train(self, observation_batch, action_batch): """ # reshape action_batch to ensure a shape (batch_size, action_dim) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - _, loss, summary = self.sess.run([self.train_op, self.loss, self.loss_summary], feed_dict={self.obs_placeholder: observation_batch, self.action_labels_placeholder: action_batch}) - self.writer.add_summary(summary, global_step=self.train_steps) - self.train_steps += 1 + batch_size = action_batch.shape[0] + self.model.fit(observation_batch, action_batch, batch_size=batch_size, epochs=1, steps_per_epoch=1, verbose=0) def get_accel_from_observation(self, observation): """ @@ -197,8 +83,14 @@ def get_accel_from_observation(self, observation): if len(observation.shape)<=1: observation = observation[None] # "batch size" is 1, so just get single acceleration/acceleration vector - ret_val = self.sess.run([self.action_predictions], feed_dict={self.obs_placeholder: observation})[0] - return ret_val + network_output = self.model.predict(observation) + if self.stochastic: + mean, log_std = network_output[:, :self.action_dim], network_output[:, self.action_dim:] + var = np.exp(2 * log_std) + action = np.random.multivariate_normal(mean[0], var) + return action + else: + return network_output def get_accel(self, env): """ @@ -222,6 +114,60 @@ def sample_data(self, batch_size): def save_network(self, save_path): """ Save network to given path and to tensorboard """ - self.saver.save(self.sess, save_path) + self.model.save(save_path) # tensorboard - writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) + + # writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) + + def load_network(self, load_path): + if self.stochastic: + self.model = tf.keras.models.load_model(load_path, custom_objects={'negative_log_likelihood_loss': negative_log_likelihood_loss}) + + + def save_network_PPO(self, save_path): + """ + Builds and saves keras model for training PPO using policy weights learned from imitation. + + Args: + save_path: path (including h5 format filename) where the PPO model should be saved + + """ + input = tf.keras.layers.Input(self.model.input.shape[1].value) + curr_layer = input + + # number of hidden layers + num_layers = len(self.model.layers) - 2 + + # build layers for policy + for i in range(num_layers): + size = self.model.layers[i + 1].output.shape[1].value + activation = tf.keras.activations.serialize(self.model.layers[i + 1].activation) + curr_layer = tf.keras.layers.Dense(size, activation=activation, name="policy_hidden_layer_{}".format(i + 1))(curr_layer) + output_layer_policy = tf.keras.layers.Dense(self.model.output.shape[1].value, activation=None, name="policy_output_layer")(curr_layer) + + # build layers for value function + curr_layer = input + for i in range(num_layers): + curr_layer = tf.keras.layers.Dense(self.size, activation="tanh", name="vf_hidden_layer_{}".format(i+1))(curr_layer) + output_layer_vf = tf.keras.layers.Dense(1, activation=None, name="vf_output_layer")(curr_layer) + + ppo_model = tf.keras.Model(inputs=input, outputs=[output_layer_policy, output_layer_vf], name="ppo_model") + + # set the policy weights to those learned from imitation + for i in range(num_layers): + policy_layer = ppo_model.get_layer(name="policy_hidden_layer_{}".format(i + 1)) + policy_layer.set_weights(self.model.layers[i + 1].get_weights()) + policy_output = ppo_model.get_layer("policy_output_layer") + policy_output.set_weights(self.model.layers[-1].get_weights()) + + # save the model (as a h5 file) + ppo_model.save(save_path) + + + + + + + + + diff --git a/flow/controllers/imitation_learning/imitating_network2.py b/flow/controllers/imitation_learning/imitating_network2.py deleted file mode 100644 index a6b502165..000000000 --- a/flow/controllers/imitation_learning/imitating_network2.py +++ /dev/null @@ -1,177 +0,0 @@ -import numpy as np -import tensorflow as tf -from utils_tensorflow import * -from keras_utils import * -import tensorflow_probability as tfp -from flow.controllers.base_controller import BaseController -from replay_buffer import ReplayBuffer - - -class ImitatingNetwork2(): - """ - Class containing neural network which learns to imitate a given expert controller. - """ - - def __init__(self, sess, action_dim, obs_dim, num_layers, size, learning_rate, replay_buffer_size, training = True, stochastic=False, policy_scope='policy_vars', load_existing=False, load_path=''): - - """ - Initializes and constructs neural network - - Args: - sess: Tensorflow session variable - action_dim: dimension of action space (determines size of network output) - obs_dim: dimension of observation space (size of network input) - num_layers: number of hidden layers (for an MLP) - size: size of each layer in network - learning_rate: learning rate used in optimizer - replay_buffer_size: maximum size of replay buffer used to hold data for training - training: boolean, whether the network will be trained (as opposed to loaded) - stochastic: boolean indicating if the network outputs a stochastic (multivariate Gaussian) or deterministic policy - policy_scope: variable scope used by Tensorflow for weights/biases - load_existing: boolean, whether to load an existing tensorflow model - load_path: path to directory containing an existing tensorflow model - - """ - - self.sess = sess - self.action_dim = action_dim - self.obs_dim = obs_dim - self.num_layers = num_layers - self.size = size - self.learning_rate = learning_rate - self.training = training - self.stochastic=stochastic - - print("INNNNNITITTTTT") - - # load network if specified, or construct network - if load_existing: - self.load_network(load_path) - - else: - self.build_network() - self.compile_network() - - - # init replay buffer - if self.training: - self.replay_buffer = ReplayBuffer(replay_buffer_size) - else: - self.replay_buffer = None - - - def build_network(self): - """ - Defines neural network for choosing actions. Defines placeholders and forward pass - """ - # setup placeholders for network input and labels for training, and hidden layers/output - if self.stochastic: - self.model = build_neural_net_stochastic(self.obs_dim, self.action_dim, self.num_layers, self.size) - else: - self.model = build_neural_net_deterministic(self.obs_dim, self.action_dim, self.num_layers, self.size) - - - def compile_network(self): - loss = get_loss(self.stochastic) - self.model.compile(loss=loss, optimizer='adam') - - - def train(self, observation_batch, action_batch): - """ - Executes one training step for the given batch of observation and action data - """ - # reshape action_batch to ensure a shape (batch_size, action_dim) - action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - batch_size = action_batch.shape[0] - self.model.fit(observation_batch, action_batch, batch_size=batch_size, epochs=1, steps_per_epoch=1) - - def get_accel_from_observation(self, observation): - """ - Gets the network's acceleration prediction based on given observation/state - """ - - # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays - if len(observation.shape)<=1: - observation = observation[None] - # "batch size" is 1, so just get single acceleration/acceleration vector - network_output = self.model.predict(observation) - if self.stochastic: - mean, log_std = network_output[:, :self.action_dim], network_output[:, self.action_dim:] - var = np.exp(2 * log_std) - action = np.random.multivariate_normal(mean[0], var) - return action - else: - return network_output - - def get_accel(self, env): - """ - Get network's acceleration prediction(s) based on given env - """ - observation = env.get_state() - return self.get_accel_from_observation(observation) - - - def add_to_replay_buffer(self, rollout_list): - """ Add rollouts to replay buffer """ - - self.replay_buffer.add_rollouts(rollout_list) - - - def sample_data(self, batch_size): - """ Sample a batch of data from replay buffer """ - - return self.replay_buffer.sample_batch(batch_size) - - def save_network(self, save_path): - """ Save network to given path and to tensorboard """ - - self.model.save(save_path) - # tensorboard - - # writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) - - def load_network(self, load_path): - if self.stochastic: - self.model = tf.keras.models.load_model(load_path, custom_objects={'negative_log_likelihood_loss': negative_log_likelihood_loss}) - - - def save_network_PPO(self, save_path): - input = tf.keras.layers.Input(self.model.input.shape[1].value) - curr_layer = input - - # number of hidden layers - num_layers = len(self.model.layers) - 2 - - # build layers for policy - for i in range(num_layers): - size = self.model.layers[i + 1].output.shape[1].value - activation = tf.keras.activations.serialize(self.model.layers[i + 1].activation) - curr_layer = tf.keras.layers.Dense(size, activation=activation, name="policy_hidden_layer_{}".format(i + 1))(curr_layer) - output_layer_policy = tf.keras.layers.Dense(self.model.output.shape[1].value, activation=None, name="policy_output_layer")(curr_layer) - - # build layers for value function - curr_layer = input - for i in range(num_layers): - curr_layer = tf.keras.layers.Dense(self.size, activation="tanh", name="vf_hidden_layer_{}".format(i+1))(curr_layer) - output_layer_vf = tf.keras.layers.Dense(1, activation=None, name="vf_output_layer")(curr_layer) - - ppo_model = tf.keras.Model(inputs=input, outputs=[output_layer_policy, output_layer_vf], name="ppo_model") - - # set the policy weights to those learned from imitation - for i in range(num_layers): - policy_layer = ppo_model.get_layer(name="policy_hidden_layer_{}".format(i + 1)) - policy_layer.set_weights(self.model.layers[i + 1].get_weights()) - policy_output = ppo_model.get_layer("policy_output_layer") - policy_output.set_weights(self.model.layers[-1].get_weights()) - - # save the model (as a h5 file) - ppo_model.save(save_path) - - - - - - - - - diff --git a/flow/controllers/imitation_learning/keras_utils.py b/flow/controllers/imitation_learning/keras_utils.py index 429c75bea..e8dbaf458 100644 --- a/flow/controllers/imitation_learning/keras_utils.py +++ b/flow/controllers/imitation_learning/keras_utils.py @@ -3,11 +3,12 @@ from tensorflow.keras import Input from tensorflow.keras.layers import Dense -def build_neural_net_deterministic(input_dim, action_dim, n_layers, size): +def build_neural_net_deterministic(input_dim, action_dim, fcnet_hiddens): input_layer = Input(shape=(input_dim, )) curr_layer = input_layer - for _ in range(n_layers): + for i in range(len(fcnet_hiddens)): + size = fcnet_hiddens[i] dense = Dense(size, activation="tanh") curr_layer = dense(curr_layer) output_layer = Dense(action_dim, activation=None)(curr_layer) @@ -15,11 +16,12 @@ def build_neural_net_deterministic(input_dim, action_dim, n_layers, size): return model -def build_neural_net_stochastic(input_dim, action_dim, n_layers, size): +def build_neural_net_stochastic(input_dim, action_dim, fcnet_hiddens): input_layer = Input(shape=(input_dim, )) curr_layer = input_layer - for _ in range(n_layers): + for i in range(len(fcnet_hiddens)): + size = fcnet_hiddens[i] dense = Dense(size, activation="tanh") curr_layer = dense(curr_layer) @@ -28,20 +30,25 @@ def build_neural_net_stochastic(input_dim, action_dim, n_layers, size): return model -def get_loss(stochastic): +def get_loss(stochastic, variance_regularizer): if stochastic: - return negative_log_likelihood_loss + return negative_log_likelihood_loss(variance_regularizer) else: return tf.keras.losses.mean_squared_error -def negative_log_likelihood_loss(y, distribution_params): - assert distribution_params.shape[1] % 2 == 0, "Stochastic policies must output vectors of even length" - action_dim = distribution_params.shape[1]//2 - means, log_stds = distribution_params[:, :action_dim], distribution_params[:, action_dim:] - stds = tf.math.exp(log_stds) - variances = tf.math.square(stds) - dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=variances) - loss = dist.log_prob(y) - loss = tf.negative(loss) - loss = tf.reduce_mean(loss) + (0.5 * tf.norm(variances)) - return loss +def negative_log_likelihood_loss(variance_regularizer): + + def nll_loss(y, distribution_params): + assert distribution_params.shape[1] % 2 == 0, "Stochastic policies must output vectors of even length" + + action_dim = distribution_params.shape[1]//2 + means, log_stds = distribution_params[:, :action_dim], distribution_params[:, action_dim:] + stds = tf.math.exp(log_stds) + variances = tf.math.square(stds) + dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=variances) + loss = dist.log_prob(y) + loss = tf.negative(loss) + loss = tf.reduce_mean(loss) + (variance_regularizer * tf.norm(variances)) + return loss + + return nll_loss diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index f9668c229..5ad97a75d 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -3,12 +3,14 @@ import h5py from ray.rllib.models.tf.misc import normc_initializer from ray.rllib.models.tf.tf_modelv2 import TFModelV2 -# from ray.rllib.utils.framework import get_activation_fn, try_import_tf -# from flow.controllers.imitation_learning.keras_utils import * import tensorflow as tf class PPONetwork(TFModelV2): + """ + Custom RLLib PPOModel (using tensorflow keras) to load weights from a pretained policy model (e.g. from imitation learning) and start RL training with loaded weights. + Subclass of TFModelV2 + """ def __init__(self, obs_space, action_space, num_outputs, model_config, name): @@ -24,11 +26,18 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, name): def setup_model(self, obs_space, action_space, model_config, num_outputs, imitation_h5_path): + """ + Loads/builds model for both policy and value function + Args: + obs_space: observation space of env + action_space: action space of env + model_config: configuration parameters for model + num_outputs: number of outputs expected for policy + imitation_h5_path: path to h5 file containing weights of a pretrained network (empty string if no such file) + """ if imitation_h5_path: - # imitation_model = tf.keras.models.load_model(imitation_h5_path, custom_objects={"negative_log_likelihood_loss": negative_log_likelihood_loss}) - - # set up a model to load in weights from imitation network (without the training variables, e.g. adam variables) + # set base model to be loaded model self.base_model = tf.keras.models.load_model(imitation_h5_path) else: @@ -63,15 +72,22 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat # build model from layers self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) - + def forward(self, input_dict, state, seq_lens): + """ + Overrides parent class's method. Used to pass a input through model and get policy/vf output. + """ + policy_out, value_out = self.base_model(input_dict["obs_flat"]) self.value_out = value_out return policy_out, state def value_function(self): + """ + Overrides parent class's method. Get value function method. + """ return tf.reshape(self.value_out, [-1]) def import_from_h5(self, import_file): diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 265991e20..eba837b9e 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -12,7 +12,13 @@ def __init__(self, params): # initialize trainer class instance and params self.params = params - self.trainer = Trainer(params) + if self.params['multiagent']: + module = __import__("examples.exp_configs.rl.multiagent", fromlist=[self.params['exp_config']]) + else: + module = __import__("examples.exp_configs.rl.singleagent", fromlist=[self.params['exp_config']]) + + submodule = getattr(module, self.params['exp_config']) + self.trainer = Trainer(params, submodule) def run_training_loop(self): """ @@ -39,6 +45,13 @@ def main(): """ import argparse parser = argparse.ArgumentParser() + + # required input parameters + parser.add_argument( + 'exp_config', type=str, + help='Name of the experiment configuration file, as located in ' + 'exp_configs/rl/singleagent or exp_configs/rl/multiagent.') + parser.add_argument('--ep_len', type=int, default=5000) parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy @@ -50,23 +63,23 @@ def main(): parser.add_argument('--train_batch_size', type=int, default=100) # number of sampled data points to be used per gradient/train step - parser.add_argument('--num_layers', type=int, default=3) # number of hidden layers, of policy to be learned - parser.add_argument('--size', type=int, default=64) # width of each layer, of policy to be learned - parser.add_argument('--learning_rate', '-lr', type=float, default=5e-3) # learning rate for supervised learning + parser.add_argument('--replay_buffer_size', type=int, default=1000000) parser.add_argument('--save_path', type=str, default='') parser.add_argument('--save_model', type=int, default=0) - parser.add_argument('--num_eval_episodes', type=int, default=30) + parser.add_argument('--num_eval_episodes', type=int, default=0) parser.add_argument('--stochastic', type=bool, default=False) - parser.add_argument('--noise_variance',type=float, default=0.5) - parser.add_argument('--vehicle_id', type=str, default='rl_0') parser.add_argument('--multiagent', type=bool, default=False) parser.add_argument('--v_des', type=float, default=15) - + parser.add_argument('--variance_regularizer', type=float, default=0.5) args = parser.parse_args() # convert args to dictionary params = vars(args) + + # change this to determine number and size of hidden layers + params["fcnet_hiddens"] = [32, 32, 32] + assert args.n_iter>1, ('DAgger needs >1 iteration') @@ -78,7 +91,6 @@ def main(): if params['save_model'] == 1: train.save_controller_network() - # evaluate train.evaluate() print("DONE") diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 940feffb8..6bd0e5dd1 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -5,9 +5,8 @@ import gym import os from flow.utils.registry import make_create_env -from examples.exp_configs.rl.multiagent.multiagent_straight_road import flow_params from imitating_controller import ImitatingController -from imitating_network2 import ImitatingNetwork2 +from imitating_network import ImitatingNetwork from flow.controllers.car_following_models import IDMController from flow.controllers.velocity_controllers import FollowerStopper from flow.core.params import SumoCarFollowingParams @@ -20,18 +19,21 @@ class Trainer(object): Class to initialize and run training for imitation learning (with DAgger) """ - def __init__(self, params): + def __init__(self, params, submodule): - # param setup + # get flow params + self.flow_params = submodule.flow_params + + # setup parameters for training self.params = params self.sess = create_tf_session() # environment setup - create_env, _ = make_create_env(flow_params) + create_env, _ = make_create_env(self.flow_params) self.env = create_env() # vehicle setup - self.multiagent = params['multiagent'] # multiagent or singleagent env + self.multiagent = self.params['multiagent'] # multiagent or singleagent env if not self.multiagent and self.env.action_space.shape[0] > 1: # use sorted rl ids if the method exists (e.g.. singlagent straightroad) @@ -51,7 +53,7 @@ def __init__(self, params): self.params['obs_dim'] = obs_dim # initialize neural network class and tf variables - self.action_network = ImitatingNetwork2(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['num_layers'], self.params['size'], self.params['learning_rate'], self.params['replay_buffer_size'], stochastic=self.params['stochastic']) + self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['fcnet_hiddens'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], variance_regularizer=self.params['variance_regularizer']) # tf.global_variables_initializer().run(session=self.sess) @@ -111,7 +113,7 @@ def collect_training_trajectories(self, itr, batch_size): """ print("\nCollecting data to be used for training...") - max_decel = flow_params['env'].additional_params['max_decel'] + max_decel = self.flow_params['env'].additional_params['max_decel'] trajectories, envsteps_this_batch = sample_trajectories(self.env, self.controllers, self.action_network, batch_size, self.params['ep_len'], self.multiagent, use_expert=itr==0, v_des=self.params['v_des'], max_decel=max_decel) return trajectories, envsteps_this_batch diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index a55f32c97..3be12f849 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -68,6 +68,7 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto for i in range(action_dim): # if max number of RL vehicles is not reached, insert dummy values if i >= len(vehicle_ids): + # dummy value is -2 * max_decel ignore_accel = -2 * max_decel rl_actions.append(ignore_accel) actions_expert.append(ignore_accel) @@ -149,7 +150,9 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector while True: - vehicle_ids = env.k.vehicle.get_rl_ids() + + # vehicle_ids = env.k.vehicle.get_rl_ids() **this doesn't work now due to control range restriction** + vehicle_ids = list(observation_dict.keys()) # add nothing to replay buffer if no vehicles if len(vehicle_ids) == 0: observation_dict, reward, done, _ = env.step(None) @@ -213,8 +216,8 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector terminate_rollout = done_dict['__all__'] or (traj_length == max_trajectory_length) for vehicle_id in vehicle_ids: - next_observations.append(observation_dict[vehicle_id]) - rewards.append(reward_dict[vehicle_id]) + next_observations.append(observation_dict.get(vehicle_id, None)) + rewards.append(reward_dict.get(vehicle_id, 0)) terminals.append(terminate_rollout) traj_length += 1 @@ -292,9 +295,9 @@ def traj_dict(observations, actions, expert_actions, rewards, next_observations, """ Collects individual observation, action, expert_action, rewards, next observation, terminal arrays into a single rollout dictionary """ - return {"observations" : np.array(observations, dtype=np.float32), - "actions" : np.array(actions, dtype=np.float32), - "expert_actions": np.array(expert_actions, dtype=np.float32), - "rewards" : np.array(rewards, dtype=np.float32), - "next_observations": np.array(next_observations, dtype=np.float32), - "terminals": np.array(terminals, dtype=np.float32)} + return {"observations" : np.array(observations), + "actions" : np.array(actions), + "expert_actions": np.array(expert_actions), + "rewards" : np.array(rewards), + "next_observations": np.array(next_observations), + "terminals": np.array(terminals)} From 1857f834e8187495dc0985c0fb18c330409c2750 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Wed, 27 May 2020 21:46:11 -0700 Subject: [PATCH 207/335] fixed naming convention --- flow/data_pipeline/leaderboard_utils.py | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 49083ce3e..cc60f0dd2 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -5,6 +5,20 @@ from io import StringIO +def key_to_name(key): + """Return the standard formatted file name from object key.""" + k_list = key.split("/") + date = k_list[1].replace("date=", "") + name = k_list[2].replace("partition_name=", "") + index = name.find("_", 5) + source_id = name + query_name = "" + if index != -1: + source_id = name[0:index] + query_name = "_" + name[index+1:].replace("_", "-") + return "{}_{}{}.csv".format(date, source_id.replace("_", "-"), query_name) + + def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipeline"): """Fetch tables from s3 and store in ./result directory. @@ -31,8 +45,7 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin s3 = boto3.client("s3") response = s3.list_objects_v2(Bucket=bucket) keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] - names = ["{}_{}.csv".format(e.split("/")[1].replace("date=", ""), - e.split("/")[2].replace("partition_name=", ""))for e in keys] + names = [key_to_name(k) for k in keys] existing_results = os.listdir("./result/{}".format(table_name)) for index in range(len(keys)): if names[index] not in existing_results: @@ -63,8 +76,7 @@ def get_table_memory(table_name="fact_vehicle_trace", bucket="circles.data.pipel s3 = boto3.client("s3") response = s3.list_objects_v2(Bucket=bucket) keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] - names = ["{}_{}.csv".format(e.split("/")[1].replace("date=", ""), - e.split("/")[2].replace("partition_name=", ""))for e in keys] + names = [key_to_name(k) for k in keys] results = dict() for index in range(len(keys)): if names[index] not in existing_results: @@ -98,8 +110,7 @@ def get_table_url(table_name="fact_vehicle_trace", bucket="circles.data.pipeline s3 = boto3.client("s3") response = s3.list_objects_v2(Bucket=bucket) keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] - names = ["{}_{}.csv".format(e.split("/")[1].replace("date=", ""), - e.split("/")[2].replace("partition_name=", "")) for e in keys] + names = [key_to_name(k) for k in keys] results = dict() for index in range(len(keys)): if names[index] not in existing_results: From 87ebf59836869ac45ae96017acc02e08d086ac4b Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Wed, 27 May 2020 23:06:08 -0700 Subject: [PATCH 208/335] do repair partition for all new data upon arrival --- flow/data_pipeline/data_pipeline.py | 19 +++++++++---------- flow/data_pipeline/query.py | 3 +++ 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 6649273a6..630782e6d 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -221,8 +221,16 @@ def update_partition(self, table, query_date, partition): self.existing_partitions[table].append("date={}/partition_name={}".format(query_date, partition)) return + def repair_partition(self, table, query_date, partition): + """Load the missing partitions.""" + if table not in self.existing_partitions.keys(): + self.existing_partitions[table] = self.get_existing_partitions(table) + if "date={}/partition_name={}".format(query_date, partition) not in \ + self.existing_partitions[table]: + self.update_partition(table, query_date, partition) + def run_query(self, query_name, result_location="s3://circles.data.pipeline/result/", - query_date="today", partition="default", primary_table=""): + query_date="today", partition="default"): """Start the execution of a query, does not wait for it to finish. Parameters @@ -235,8 +243,6 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu name of the partition date to run this query on partition: str, optional name of the partition to run this query on - primary_table: str - the table whose partition that may need update Returns ------- execution_id: str @@ -253,13 +259,6 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu source_id = "flow_{}".format(partition.split('_')[1]) - if primary_table: - if primary_table not in self.existing_partitions.keys(): - self.existing_partitions[primary_table] = self.get_existing_partitions(primary_table) - if "date={}/partition_name={}".format(query_date, partition) not in \ - self.existing_partitions[primary_table]: - self.update_partition(primary_table, query_date, partition) - response = self.client.start_query_execution( QueryString=QueryStrings[query_name].value.format(date=query_date, partition=source_id), QueryExecutionContext={ diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 3242cae96..0d026eb3c 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -280,6 +280,7 @@ class QueryStrings(Enum): FULL OUTER JOIN outflows o ON 1 = 1 AND i.time_step = o.time_step AND i.source_id = o.source_id + ORDER BY time_step ;""" FACT_NETWORK_METRICS_BY_DISTANCE_AGG = """ @@ -388,6 +389,7 @@ class QueryStrings(Enum): FULL OUTER JOIN binned_energy be ON 1 = 1 AND COALESCE(bce.source_id, bsa.source_id) = be.source_id AND COALESCE(bce.distance_meters_bin, bce.distance_meters_bin) = be.distance_meters_bin + ORDER BY distance_meters_bin ASC ;""" FACT_NETWORK_METRICS_BY_TIME_AGG = """ @@ -495,4 +497,5 @@ class QueryStrings(Enum): FULL OUTER JOIN binned_energy be ON 1 = 1 AND COALESCE(bce.source_id, bsa.source_id) = be.source_id AND COALESCE(bce.time_seconds_bin, bce.time_seconds_bin) = be.time_seconds_bin + ORDER BY time_seconds_bin ASC ;""" From 05e793a37a8d792d1d373d6d68097e00a8881592 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 28 May 2020 01:23:13 -0700 Subject: [PATCH 209/335] added leaderboard chart aggregation --- flow/core/experiment.py | 2 +- flow/data_pipeline/data_pipeline.py | 9 ++++++ flow/data_pipeline/query.py | 44 ++++++++++++++++++++--------- flow/visualize/i210_replay.py | 2 +- 4 files changed, 42 insertions(+), 15 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 20154977a..e9668d9db 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -218,7 +218,7 @@ def rl_actions(*_): # Delete the .xml version of the emission file. os.remove(emission_path) - trajectory_table_path = dir_path + source_id + ".csv" + trajectory_table_path = "{}/{}.csv".format(dir_path, source_id) generate_trajectory_from_flow(trajectory_table_path, extra_info) if to_aws: diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index a8ffb81c4..5c9346c40 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -110,6 +110,15 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) +def delete_obsolete_data(s3, latest_key, table, bucket="circles.data.pipeline"): + """Delete the obsolete data on S3""" + response = s3.list_objects_v2(Bucket=bucket) + keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table) == 0 and e["Key"][-4:] == ".csv"] + keys.remove(latest_key) + for key in keys: + s3.delete_object(Bucket=bucket, Key=key) + + class AthenaQuery: """ Class used to run query. diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 0a75eb382..04793cc73 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -12,12 +12,14 @@ ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, "fact_vehicle_fuel_efficiency_agg": {"fact_network_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"]}, - "fact_network_fuel_efficiency_agg": {"leaderboard_chart": ["LEADERBOARD_CHART"]} + "fact_network_fuel_efficiency_agg": {"leaderboard_chart": ["LEADERBOARD_CHART"]}, + "leaderboard_chart": {"leaderboard_chart_agg": ["LEADERBOARD_CHART_AGG"]} } tables = ["fact_vehicle_trace", "fact_energy_trace", "fact_network_throughput_agg", "fact_network_inflows_outflows", "fact_vehicle_fuel_efficiency_agg", "fact_network_metrics_by_distance_agg", - "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", "leaderboard_chart"] + "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", "leaderboard_chart", + "leaderboard_chart_agg"] VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ SELECT @@ -343,9 +345,9 @@ class QueryStrings(Enum): AVG(speed) AS speed_avg, AVG(speed) + STDDEV(speed) AS speed_upper_bound, AVG(speed) - STDDEV(speed) AS speed_lower_bound, - AVG(accel_without_noise) AS accel_avg, - AVG(accel_without_noise) + STDDEV(accel_without_noise) AS accel_upper_bound, - AVG(accel_without_noise) - STDDEV(accel_without_noise) AS accel_lower_bound + AVG(target_accel_no_noise_with_failsafe) AS accel_avg, + AVG(target_accel_no_noise_with_failsafe) + STDDEV(target_accel_no_noise_with_failsafe) AS accel_upper_bound, + AVG(target_accel_no_noise_with_failsafe) - STDDEV(target_accel_no_noise_with_failsafe) AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' @@ -405,10 +407,11 @@ class QueryStrings(Enum): vt.x, energy_model_id, vt.time_step - LAG(vt.time_step, 1) - OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) + AS sim_step, SUM(power) - OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS - cumulative_power + OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) + AS cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 AND vt.date = \'{date}\' @@ -451,9 +454,11 @@ class QueryStrings(Enum): AVG(speed) AS speed_avg, AVG(speed) + STDDEV(speed) AS speed_upper_bound, AVG(speed) - STDDEV(speed) AS speed_lower_bound, - AVG(accel_without_noise) AS accel_avg, - AVG(accel_without_noise) + STDDEV(accel_without_noise) AS accel_upper_bound, - AVG(accel_without_noise) - STDDEV(accel_without_noise) AS accel_lower_bound + AVG(target_accel_no_noise_with_failsafe) AS accel_avg, + AVG(target_accel_no_noise_with_failsafe) + STDDEV(target_accel_no_noise_with_failsafe) + AS accel_upper_bound, + AVG(target_accel_no_noise_with_failsafe) - STDDEV(target_accel_no_noise_with_failsafe) + AS accel_lower_bound FROM fact_vehicle_trace WHERE 1 = 1 AND date = \'{date}\' @@ -466,8 +471,10 @@ class QueryStrings(Enum): source_id, id, CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, - FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_start, - LAST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_end + FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) + AS energy_start, + LAST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) + AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT @@ -503,3 +510,14 @@ class QueryStrings(Enum): AND COALESCE(bce.time_seconds_bin, bce.time_seconds_bin) = be.time_seconds_bin ORDER BY time_seconds_bin ASC ;""" + + LEADERBOARD_CHART_AGG = """ + SELECT + source_id, + energy_model_id, + efficiency_meters_per_joules, + efficiency_miles_per_gallon, + throughput_per_hour + FROM leaderboard_chart + ORDER BY date, source_id ASC + ;""" diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 2ac8873d5..0f1d54eb5 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -329,7 +329,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= emission_to_csv(emission_path, output_path=output_path) # generate the trajectory output file - trajectory_table_path = dir_path + source_id + ".csv" + trajectory_table_path = "{}/{}.csv".format(dir_path, source_id) upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) # upload to s3 if asked From 7a75c6e36f267c441a7f1c4c3b1a3347a5cc5ac5 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 28 May 2020 01:38:30 -0700 Subject: [PATCH 210/335] update lambda function, added some comments --- flow/data_pipeline/lambda_function.py | 28 ++++++++++++++++++------- flow/data_pipeline/leaderboard_utils.py | 6 ++++++ 2 files changed, 26 insertions(+), 8 deletions(-) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 483439eb5..3a9f55ded 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -1,7 +1,7 @@ """lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus -from flow.data_pipeline.data_pipeline import AthenaQuery +from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data from flow.data_pipeline.query import tags, tables s3 = boto3.client('s3') @@ -11,22 +11,34 @@ def lambda_handler(event, context): """Handle S3 put event on AWS Lambda.""" records = [] - # delete all unwanted metadata + # do a pre-sweep to handle tasks other than initalizing a query for record in event['Records']: bucket = record['s3']['bucket']['name'] key = unquote_plus(record['s3']['object']['key']) table = key.split('/')[0] if table not in tables: continue - if key[-9:] == '.metadata': + + # delete unwanted metadata files + if (key[-9:] == '.metadata'): s3.delete_object(Bucket=bucket, Key=key) continue - if table in tags.keys(): - records.append((bucket, key, table)) - # initialize the queries - for bucket, key, table in records: + + # load the partition for newly added table query_date = key.split('/')[-3].split('=')[-1] partition = key.split('/')[-2].split('=')[-1] + queryEngine.repair_partition(table, query_date, partition) + + # delete obsolete data + if table == "leaderboard_chart_agg": + delete_obsolete_data(s3, key, table) + + # add table that need to start a query to list + if table in tags.keys(): + records.append((bucket, key, table, query_date, partition)) + + # initialize the queries + for bucket, key, table, query_date, partition in records: source_id = "flow_{}".format(partition.split('_')[1]) # response = s3.head_object(Bucket=bucket, Key=key) # required_query = response["Metadata"]["run-query"] @@ -45,4 +57,4 @@ def lambda_handler(event, context): query_date, source_id, query_name) - queryEngine.run_query(query_name, result_location, query_date, partition, table) + queryEngine.run_query(query_name, result_location, query_date, partition) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index cc60f0dd2..7a6eb6b5b 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -35,6 +35,12 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin fact_network_metrics_by_time_agg fact_network_fuel_efficiency_agg leaderboard_chart + leaderboard_chart_agg + Note that leaderboard_chart_agg is a combination of all previous + learderboard_chart entries in one CSV file. It's only used to + avoid burdening the web server with more calculation. The date + and source_id in its name is always going to reflect the latest + leaderboard_chart entry. bucket: str the S3 bucket that holds these tables """ From fef3a831c941ff15cbd9b52ae3f48e338c50a87b Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 28 May 2020 10:22:15 -0700 Subject: [PATCH 211/335] Combine imitation and PPO training into one step --- .../imitation_learning/imitating_network.py | 3 +- flow/controllers/imitation_learning/run.py | 3 + .../train_with_imitation.py | 164 ++++++++++++++++++ .../controllers/imitation_learning/trainer.py | 3 + .../imitation_learning/utils_tensorflow.py | 4 +- 5 files changed, 174 insertions(+), 3 deletions(-) create mode 100644 flow/controllers/imitation_learning/train_with_imitation.py diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 7d68c076a..c2ab892cc 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -148,7 +148,8 @@ def save_network_PPO(self, save_path): # build layers for value function curr_layer = input for i in range(num_layers): - curr_layer = tf.keras.layers.Dense(self.size, activation="tanh", name="vf_hidden_layer_{}".format(i+1))(curr_layer) + size = self.fcnet_hiddens[i] + curr_layer = tf.keras.layers.Dense(size, activation="tanh", name="vf_hidden_layer_{}".format(i+1))(curr_layer) output_layer_vf = tf.keras.layers.Dense(1, activation=None, name="vf_output_layer")(curr_layer) ppo_model = tf.keras.Model(inputs=input, outputs=[output_layer_policy, output_layer_vf], name="ppo_model") diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index eba837b9e..439b5e5d0 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -38,6 +38,9 @@ def save_controller_network(self): """ self.trainer.save_controller_network() + def save_controller_for_PPO(self): + self.trainer.save_controller_for_PPO() + def main(): """ diff --git a/flow/controllers/imitation_learning/train_with_imitation.py b/flow/controllers/imitation_learning/train_with_imitation.py new file mode 100644 index 000000000..3dfbb1265 --- /dev/null +++ b/flow/controllers/imitation_learning/train_with_imitation.py @@ -0,0 +1,164 @@ +from run import * +from examples.train import * + +def parse_args(args): + """Parse training options user can specify in command line. + + Returns + ------- + argparse.Namespace + the output parser object + """ + + # train.py args + + + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description="Parse argument used when running a Flow simulation.", + epilog="python train.py EXP_CONFIG") + + # required input parameters + parser.add_argument( + 'exp_config', type=str, + help='Name of the experiment configuration file, as located in ' + 'exp_configs/rl/singleagent or exp_configs/rl/multiagent.') + + parser.add_argument( + 'exp_title', type=str, + help='Title to give the run.') + + + # optional input parameters + parser.add_argument( + '--rl_trainer', type=str, default="rllib", + help='the RL trainer to use. either rllib or Stable-Baselines') + parser.add_argument( + '--load_weights_path', type=str, default=None, + help='Path to h5 file containing a pretrained model. Relevent for PPO with RLLib' + ) + parser.add_argument( + '--algorithm', type=str, default="PPO", + help='RL algorithm to use. Options are PPO, TD3, MATD3 (MADDPG w/ TD3) right now.' + ) + parser.add_argument( + '--num_cpus', type=int, default=1, + help='How many CPUs to use') + parser.add_argument( + '--num_steps', type=int, default=5000, + help='How many total steps to perform learning over. Relevant for stable-baselines') + parser.add_argument( + '--grid_search', action='store_true', default=False, + help='Whether to grid search over hyperparams') + parser.add_argument( + '--num_iterations', type=int, default=200, + help='How many iterations are in a training run.') + parser.add_argument( + '--checkpoint_freq', type=int, default=20, + help='How often to checkpoint.') + parser.add_argument( + '--num_rollouts', type=int, default=1, + help='How many rollouts are in a training batch') + parser.add_argument( + '--rollout_size', type=int, default=1000, + help='How many steps are in a training batch.') + parser.add_argument('--use_s3', action='store_true', help='If true, upload results to s3') + parser.add_argument('--local_mode', action='store_true', default=False, + help='If true only 1 CPU will be used') + parser.add_argument('--render', action='store_true', default=False, + help='If true, we render the display') + parser.add_argument( + '--checkpoint_path', type=str, default=None, + help='Directory with checkpoint to restore training from.') + + + + parser.add_argument('--ep_len', type=int, default=5000) + + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy + parser.add_argument('--n_iter', type=int, default=5) + + parser.add_argument('--batch_size', type=int, default=3000) # training data collected (in the env) during each iteration + parser.add_argument('--init_batch_size', type=int, default=4000) + + parser.add_argument('--train_batch_size', type=int, + default=100) # number of sampled data points to be used per gradient/train step + + + parser.add_argument('--replay_buffer_size', type=int, default=1000000) + parser.add_argument('--num_eval_episodes', type=int, default=0) + parser.add_argument('--stochastic', type=bool, default=False) + parser.add_argument('--multiagent', type=bool, default=False) + parser.add_argument('--v_des', type=float, default=15) + parser.add_argument('--variance_regularizer', type=float, default=0.5) + + parsed_args = parser.parse_known_args(args)[0] + dict_args = vars(parsed_args) + dict_args['save_model'] = 1 + dict_args['save_path'] = dict_args['load_weights_path'] + + return parsed_args, dict_args + + + +def main(args): + """ + Parse args, run training, and evalutation + """ + flags, params = parse_args(args) + params["fcnet_hiddens"] = [32, 32, 32] + + # change this to determine number and size of hidden layers + params["fcnet_hiddens"] = [32, 32, 32] + + assert flags.n_iter>1, ('DAgger needs >1 iteration') + + + # run training + imitation_runner = Runner(params) + imitation_runner.run_training_loop() + + # save model after training + imitation_runner.save_controller_for_PPO() + + ### IMITATION DONE + + + + # Import relevant information from the exp_config script. + module = __import__( + "examples.exp_configs.rl.singleagent", fromlist=[flags.exp_config]) + module_ma = __import__( + "examples.exp_configs.rl.multiagent", fromlist=[flags.exp_config]) + + # Import the sub-module containing the specified exp_config and determine + # whether the environment is single agent or multi-agent. + if hasattr(module, flags.exp_config): + submodule = getattr(module, flags.exp_config) + multiagent = False + elif hasattr(module_ma, flags.exp_config): + submodule = getattr(module_ma, flags.exp_config) + assert flags.rl_trainer.lower() in ["rllib", "h-baselines"], \ + "Currently, multiagent experiments are only supported through "\ + "RLlib. Try running this experiment using RLlib: " \ + "'python train.py EXP_CONFIG'" + multiagent = True + else: + raise ValueError("Unable to find experiment config.") + + # Perform the training operation. + if flags.rl_trainer.lower() == "rllib": + train_rllib(submodule, flags) + elif flags.rl_trainer.lower() == "stable-baselines": + train_stable_baselines(submodule, flags) + elif flags.rl_trainer.lower() == "h-baselines": + flow_params = submodule.flow_params + train_h_baselines(flow_params, args, multiagent) + else: + raise ValueError("rl_trainer should be either 'rllib', 'h-baselines', " + "or 'stable-baselines'.") + + + +if __name__ == "__main__": + main(sys.argv[1:]) diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 6bd0e5dd1..fc055ccda 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -215,3 +215,6 @@ def save_controller_network(self): """ print("Saving tensorflow model to: ", self.params['save_path']) self.action_network.save_network(self.params['save_path']) + + def save_controller_for_PPO(self): + self.action_network.save_network_PPO(self.params['save_path']) diff --git a/flow/controllers/imitation_learning/utils_tensorflow.py b/flow/controllers/imitation_learning/utils_tensorflow.py index 70df79693..7be44cf60 100644 --- a/flow/controllers/imitation_learning/utils_tensorflow.py +++ b/flow/controllers/imitation_learning/utils_tensorflow.py @@ -30,6 +30,6 @@ def build_neural_net(input_placeholder, output_size, scope, n_layers, size, acti return output_placeholder def create_tf_session(): - config = tf.ConfigProto(device_count={'GPU': 0}) - sess = tf.Session(config=config) + config = tf.compat.v1.ConfigProto(device_count={'GPU': 0}) + sess = tf.compat.v1.Session(config=config) return sess From 8fac7208565f22df9063da3df23000a3aaff68b3 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 28 May 2020 11:15:55 -0700 Subject: [PATCH 212/335] minor change to get_table_disk --- flow/data_pipeline/leaderboard_utils.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 7a6eb6b5b..58366d4d6 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -53,6 +53,9 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] names = [key_to_name(k) for k in keys] existing_results = os.listdir("./result/{}".format(table_name)) + if table_name == "leaderboard_chart_agg": + for p in existing_results: + os.remove(p) for index in range(len(keys)): if names[index] not in existing_results: s3.download_file(bucket, keys[index], "./result/{}/{}".format(table_name, names[index])) From 9537bb4b0266995290a578ff16d0af63a4ee1fc7 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 28 May 2020 11:17:58 -0700 Subject: [PATCH 213/335] fix minor path issue --- flow/data_pipeline/leaderboard_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 58366d4d6..f2c26d01b 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -55,7 +55,7 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin existing_results = os.listdir("./result/{}".format(table_name)) if table_name == "leaderboard_chart_agg": for p in existing_results: - os.remove(p) + os.remove("./result/{}/{}".format(table_name, p)) for index in range(len(keys)): if names[index] not in existing_results: s3.download_file(bucket, keys[index], "./result/{}/{}".format(table_name, names[index])) From ff90e8d1b7548deeeeae665f2d3016868e976aa8 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 28 May 2020 11:20:46 -0700 Subject: [PATCH 214/335] move deleting leaderboard_chart_agg to after downloading --- flow/data_pipeline/leaderboard_utils.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index f2c26d01b..f4476b2eb 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -53,12 +53,12 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] names = [key_to_name(k) for k in keys] existing_results = os.listdir("./result/{}".format(table_name)) - if table_name == "leaderboard_chart_agg": - for p in existing_results: - os.remove("./result/{}/{}".format(table_name, p)) for index in range(len(keys)): if names[index] not in existing_results: s3.download_file(bucket, keys[index], "./result/{}/{}".format(table_name, names[index])) + if table_name == "leaderboard_chart_agg": + for p in existing_results: + os.remove("./result/{}/{}".format(table_name, p)) def get_table_memory(table_name="fact_vehicle_trace", bucket="circles.data.pipeline", existing_results=()): From f7a278c585e0076e12ce9436a340e4d3d87aa1c6 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Thu, 28 May 2020 15:27:18 -0400 Subject: [PATCH 215/335] Network update (#953) Add new I210 models and envs. --- docs/source/flow_setup.rst | 16 + examples/exp_configs/non_rl/highway_single.py | 12 +- .../exp_configs/non_rl/i210_subnetwork.py | 249 +- examples/exp_configs/non_rl/straight_road.py | 7 +- .../rl/multiagent/multiagent_i210.py | 273 +- .../rl/multiagent/multiagent_straight_road.py | 58 +- .../i210_with_ghost_cell_with_downstream.xml | 10 +- ...0_with_ghost_cell_with_downstream_test.xml | 5719 +++++++++++++++++ examples/train.py | 94 +- flow/algorithms/centralized_PPO.py | 547 ++ flow/algorithms/custom_ppo.py | 318 + flow/controllers/car_following_models.py | 1 + flow/controllers/velocity_controllers.py | 84 +- flow/core/kernel/vehicle/base.py | 1 + flow/core/rewards.py | 4 +- flow/envs/base.py | 2 + flow/envs/multiagent/__init__.py | 1 + flow/envs/multiagent/base.py | 3 + flow/envs/multiagent/i210.py | 201 +- flow/networks/i210_subnetwork_ghost_cell.py | 162 + flow/visualize/time_space_diagram.py | 29 +- flow/visualize/visualizer_rllib.py | 27 +- scripts/ray_autoscale.yaml | 16 +- 23 files changed, 7550 insertions(+), 284 deletions(-) create mode 100644 examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream_test.xml create mode 100644 flow/algorithms/centralized_PPO.py create mode 100644 flow/algorithms/custom_ppo.py create mode 100644 flow/networks/i210_subnetwork_ghost_cell.py diff --git a/docs/source/flow_setup.rst b/docs/source/flow_setup.rst index 60734b7b1..cbe585d36 100644 --- a/docs/source/flow_setup.rst +++ b/docs/source/flow_setup.rst @@ -112,6 +112,22 @@ If you are a Mac user and the above command gives you the error ``FXApp:openDisplay: unable to open display :0.0``, make sure to open the application XQuartz. +*Troubleshooting*: +If you are a Mac user and the above command gives you the error +``Segmentation fault: 11``, make sure to reinstall ``fox`` using brew. +:: + + # Uninstall Catalina bottle of fox: + $ brew uninstall --ignore-dependencies fox + + # Edit brew Formula of fox: + $ brew edit fox + + # Comment out or delete the following line: sha256 "c6697be294c9a0458580564d59f8db32791beb5e67a05a6246e0b969ffc068bc" => :catalina + # Install Mojave bottle of fox: + $ brew install fox + + Testing your SUMO and Flow installation ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/examples/exp_configs/non_rl/highway_single.py b/examples/exp_configs/non_rl/highway_single.py index 7e0a5eb49..0a9a6774b 100644 --- a/examples/exp_configs/non_rl/highway_single.py +++ b/examples/exp_configs/non_rl/highway_single.py @@ -1,4 +1,5 @@ """Example of an open network with human-driven vehicles and a wave.""" + import numpy as np from flow.controllers import IDMController @@ -10,8 +11,8 @@ from flow.core.params import VehicleParams from flow.core.params import SumoParams from flow.core.params import SumoLaneChangeParams +from flow.core.rewards import miles_per_gallon, miles_per_megajoule from flow.core.params import SumoCarFollowingParams -from flow.core.rewards import miles_per_megajoule from flow.networks import HighwayNetwork from flow.envs import TestEnv from flow.networks.highway import ADDITIONAL_NET_PARAMS @@ -23,7 +24,7 @@ # the inflow rate of vehicles TRAFFIC_FLOW = 2215 # the simulation time horizon (in steps) -HORIZON = 1500 +HORIZON = 1000 # whether to include noise in the car-following models INCLUDE_NOISE = True @@ -64,13 +65,13 @@ ), ) -inflows = InFlows() if PENETRATION_RATE > 0.0: vehicles.add( "av", + color='red', num_vehicles=0, - acceleration_controller=(FollowerStopper, {"v_des": 6.0}), + acceleration_controller=(FollowerStopper, {"v_des": 5.0, "control_length": [500, 2300]}), ) inflows = InFlows() @@ -145,5 +146,8 @@ env.k.vehicle.get_outflow_rate(120)), "miles_per_megajoule": lambda env: np.nan_to_num( miles_per_megajoule(env, env.k.vehicle.get_ids(), gain=1.0) + ), + "miles_per_gallon": lambda env: np.nan_to_num( + miles_per_gallon(env, env.k.vehicle.get_ids(), gain=1.0) ) } diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 25565bb49..65131a6bd 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -2,8 +2,9 @@ import os import numpy as np -from flow.controllers import IDMController -from flow.controllers import I210Router +from flow.controllers.car_following_models import IDMController +from flow.controllers.velocity_controllers import FollowerStopper +from flow.controllers.routing_controllers import I210Router from flow.core.params import SumoParams from flow.core.params import EnvParams from flow.core.params import NetParams @@ -11,94 +12,181 @@ from flow.core.params import VehicleParams from flow.core.params import InitialConfig from flow.core.params import InFlows +from flow.core.rewards import miles_per_gallon, miles_per_megajoule + import flow.config as config from flow.envs import TestEnv -from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -# =========================================================================== # -# Specify some configurable constants. # -# =========================================================================== # +# Instantiate which conditions we want to be true about the network -# whether to include the upstream ghost edge in the network +# whether to include a ghost cell at the entrance WANT_GHOST_CELL = True -# whether to include the downstream slow-down edge in the network -WANT_DOWNSTREAM_BOUNDARY = True # whether to include vehicles on the on-ramp -ON_RAMP = True -# the inflow rate of vehicles (in veh/hr) -INFLOW_RATE = 5 * 2215 +ON_RAMP = False +# fraction of vehicles that are follower-stoppers. 0.10 corresponds to 10% +PENETRATION_RATE = 0.0 +# desired speed of the follower stopper vehicles +V_DES = 5.0 +# horizon over which to run the env +HORIZON = 1000 +# steps to run before follower-stopper is allowed to take control +WARMUP_STEPS = 600 + +# Number of vehicles/hour/lane +inflow_rate = 2050 # the speed of inflowing vehicles from the main edge (in m/s) -INFLOW_SPEED = 24.1 +inflow_speed = 25.5 -# =========================================================================== # -# Specify the path to the network template. # -# =========================================================================== # +accel_data = (IDMController, {'a': 1.3, 'b': 2.0, 'noise': 0.3}) + +if WANT_GHOST_CELL: + from flow.networks.i210_subnetwork_ghost_cell import I210SubNetworkGhostCell, EDGES_DISTRIBUTION -if WANT_DOWNSTREAM_BOUNDARY: - net_template = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_" - "downstream.xml") -elif WANT_GHOST_CELL: - net_template = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/i210_with_ghost_cell.xml") + highway_start_edge = 'ghost0' else: - net_template = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml") + from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -# If the ghost cell is not being used, remove it from the initial edges that -# vehicles can be placed on. -edges_distribution = EDGES_DISTRIBUTION.copy() -if not WANT_GHOST_CELL: - edges_distribution.remove("ghost0") - -# =========================================================================== # -# Specify vehicle-specific information and inflows. # -# =========================================================================== # + highway_start_edge = "119257914" vehicles = VehicleParams() -vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, { - "a": 1.3, - "b": 2.0, - "noise": 0.3, - }), - routing_controller=(I210Router, {}) if ON_RAMP else None, -) inflow = InFlows() -# main highway -inflow.add( - veh_type="human", - edge="ghost0" if WANT_GHOST_CELL else "119257914", - vehs_per_hour=INFLOW_RATE, - departLane="best", - departSpeed=INFLOW_SPEED) -# on ramp + if ON_RAMP: + vehicles.add( + "human", + num_vehicles=0, + color="white", + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=accel_data, + routing_controller=(I210Router, {}) + ) + if PENETRATION_RATE > 0.0: + vehicles.add( + "av", + num_vehicles=0, + color="red", + acceleration_controller=(FollowerStopper, {"v_des": V_DES, + "no_control_edges": ["ghost0", "119257908#3"] + }), + routing_controller=(I210Router, {}) + ) + + # inflow.add( + # veh_type="human", + # edge=highway_start_edge, + # vehs_per_hour=inflow_rate, + # departLane="best", + # departSpeed=inflow_speed) + + lane_list = ['0', '1', '2', '3', '4'] + + for lane in lane_list: + inflow.add( + veh_type="human", + edge=highway_start_edge, + vehs_per_hour=int(inflow_rate * (1 - PENETRATION_RATE)), + departLane=lane, + departSpeed=inflow_speed) + inflow.add( veh_type="human", edge="27414345", - vehs_per_hour=500, + vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), departLane="random", departSpeed=10) inflow.add( veh_type="human", edge="27414342#0", - vehs_per_hour=500, + vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), departLane="random", departSpeed=10) -# =========================================================================== # -# Generate the flow_params dict with all relevant simulation information. # -# =========================================================================== # + if PENETRATION_RATE > 0.0: + for lane in lane_list: + inflow.add( + veh_type="av", + edge=highway_start_edge, + vehs_per_hour=int(inflow_rate * PENETRATION_RATE), + departLane=lane, + departSpeed=inflow_speed) + + inflow.add( + veh_type="av", + edge="27414345", + vehs_per_hour=int(500 * PENETRATION_RATE), + departLane="random", + departSpeed=10) + inflow.add( + veh_type="av", + edge="27414342#0", + vehs_per_hour=int(500 * PENETRATION_RATE), + departLane="random", + departSpeed=10) + +else: + # create the base vehicle type that will be used for inflows + vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=accel_data, + ) + if PENETRATION_RATE > 0.0: + vehicles.add( + "av", + color="red", + num_vehicles=0, + acceleration_controller=(FollowerStopper, {"v_des": V_DES, + "no_control_edges": ["ghost0", "119257908#3"] + }), + ) + + # If you want to turn off the fail safes uncomment this: + + # vehicles.add( + # 'human', + # num_vehicles=0, + # lane_change_params=SumoLaneChangeParams( + # lane_change_mode='strategic', + # ), + # acceleration_controller=accel_data, + # car_following_params=SumoCarFollowingParams(speed_mode='19') + # ) + + lane_list = ['0', '1', '2', '3', '4'] + + for lane in lane_list: + inflow.add( + veh_type="human", + edge=highway_start_edge, + vehs_per_hour=int(inflow_rate * (1 - PENETRATION_RATE)), + departLane=lane, + departSpeed=inflow_speed) + + if PENETRATION_RATE > 0.0: + for lane in lane_list: + inflow.add( + veh_type="av", + edge=highway_start_edge, + vehs_per_hour=int(inflow_rate * PENETRATION_RATE), + departLane=lane, + departSpeed=inflow_speed) + +network_xml_file = "examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream_test.xml" + +# network_xml_file = "examples/exp_configs/templates/sumo/i210_with_congestion.xml" + +NET_TEMPLATE = os.path.join(config.PROJECT_PATH, network_xml_file) + +if WANT_GHOST_CELL: + network = I210SubNetworkGhostCell +else: + network = I210SubNetwork flow_params = dict( # name of the experiment @@ -108,7 +196,7 @@ env_name=TestEnv, # name of the network class the experiment is running on - network=I210SubNetwork, + network=network, # simulator that is used by the experiment simulator='traci', @@ -117,24 +205,23 @@ sim=SumoParams( sim_step=0.4, render=False, - color_by_speed=True, + color_by_speed=False, use_ballistic=True ), # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( - horizon=10000, + horizon=HORIZON, + warmup_steps=WARMUP_STEPS, + sims_per_step=3 ), # network-related parameters (see flow.core.params.NetParams and the # network's documentation or ADDITIONAL_NET_PARAMS component) net=NetParams( inflows=inflow, - template=net_template, - additional_params={ - "on_ramp": ON_RAMP, - "ghost_edge": WANT_GHOST_CELL, - } + template=NET_TEMPLATE, + additional_params={"on_ramp": ON_RAMP, "ghost_edge": WANT_GHOST_CELL} ), # vehicles to be placed in the network at the start of a rollout (see @@ -144,7 +231,7 @@ # parameters specifying the positioning of vehicles upon initialization/ # reset (see flow.core.params.InitialConfig) initial=InitialConfig( - edges_distribution=edges_distribution, + edges_distribution=EDGES_DISTRIBUTION, ), ) @@ -153,14 +240,20 @@ # =========================================================================== # edge_id = "119257908#1-AddedOnRampEdge" + +def valid_ids(env, veh_ids): + return [veh_id for veh_id in veh_ids if env.k.vehicle.get_edge(veh_id) not in ["ghost0", "119257908#3"]] + custom_callables = { "avg_merge_speed": lambda env: np.nan_to_num(np.mean( - env.k.vehicle.get_speed(env.k.vehicle.get_ids()))), + env.k.vehicle.get_speed(valid_ids(env, env.k.vehicle.get_ids())))), "avg_outflow": lambda env: np.nan_to_num( env.k.vehicle.get_outflow_rate(120)), - # we multiply by 5 to account for the vehicle length and by 1000 to convert - # into veh/km - "avg_density": lambda env: 5 * 1000 * len(env.k.vehicle.get_ids_by_edge( - edge_id)) / (env.k.network.edge_length(edge_id) - * env.k.network.num_lanes(edge_id)), + # # we multiply by 5 to account for the vehicle length and by 1000 to convert + # # into veh/km + # "avg_density": lambda env: 5 * 1000 * len(env.k.vehicle.get_ids_by_edge( + # edge_id)) / (env.k.network.edge_length(edge_id) + # * env.k.network.num_lanes(edge_id)), + "mpg": lambda env: miles_per_gallon(env, valid_ids(env, env.k.vehicle.get_ids()), gain=1.0), + "mpj": lambda env: miles_per_megajoule(env, valid_ids(env, env.k.vehicle.get_ids()), gain=1.0), } diff --git a/examples/exp_configs/non_rl/straight_road.py b/examples/exp_configs/non_rl/straight_road.py index c557ce836..1669bb896 100644 --- a/examples/exp_configs/non_rl/straight_road.py +++ b/examples/exp_configs/non_rl/straight_road.py @@ -9,6 +9,7 @@ from flow.controllers.velocity_controllers import FollowerStopper from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ VehicleParams, SumoParams, SumoLaneChangeParams +from flow.core.rewards import miles_per_gallon from flow.networks import HighwayNetwork from flow.envs import TestEnv from flow.networks.highway import ADDITIONAL_NET_PARAMS @@ -58,7 +59,7 @@ vehicles.add( "av", num_vehicles=0, - acceleration_controller=(FollowerStopper, {"v_des": 18.0}), + acceleration_controller=(FollowerStopper, {"v_des": 12.0}), ) # add human vehicles on the highway @@ -98,7 +99,7 @@ # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( horizon=HORIZON, - warmup_steps=0, + warmup_steps=400, sims_per_step=1, ), @@ -128,4 +129,6 @@ custom_callables = { "avg_speed": lambda env: np.nan_to_num(np.mean( env.k.vehicle.get_speed(env.k.vehicle.get_ids_by_edge(['highway_0', 'highway_1'])))), + "mpg": lambda env: miles_per_gallon(env, env.k.vehicle.get_ids(), gain=1.0) + } diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 01b9e6082..f55917e49 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -9,6 +9,7 @@ from ray.tune.registry import register_env from flow.controllers import RLController +from flow.controllers.routing_controllers import I210Router from flow.controllers.car_following_models import IDMController import flow.config as config from flow.core.params import EnvParams @@ -25,20 +26,32 @@ from flow.utils.registry import make_create_env # SET UP PARAMETERS FOR THE SIMULATION +WANT_GHOST_CELL = True +# WANT_DOWNSTREAM_BOUNDARY = True +ON_RAMP = False +PENETRATION_RATE = 0.10 +V_DES = 7.0 +HORIZON = 1000 +WARMUP_STEPS = 600 -# number of steps per rollout -HORIZON = 2000 +inflow_rate = 2050 +inflow_speed = 25.5 + +accel_data = (IDMController, {'a': 1.3, 'b': 2.0, 'noise': 0.3}) VEH_PER_HOUR_BASE_119257914 = 10800 VEH_PER_HOUR_BASE_27414345 = 321 VEH_PER_HOUR_BASE_27414342 = 421 -# percentage of autonomous vehicles compared to human vehicles on highway -PENETRATION_RATE = 10 +if WANT_GHOST_CELL: + from flow.networks.i210_subnetwork_ghost_cell import I210SubNetworkGhostCell, EDGES_DISTRIBUTION -# TODO: temporary fix -edges_distribution = EDGES_DISTRIBUTION.copy() -edges_distribution.remove("ghost0") + edges_distribution = EDGES_DISTRIBUTION + highway_start_edge = 'ghost0' +else: + from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION + edges_distribution = EDGES_DISTRIBUTION + highway_start_edge = "119257914" # SET UP PARAMETERS FOR THE ENVIRONMENT additional_env_params = ADDITIONAL_ENV_PARAMS.copy() @@ -49,84 +62,180 @@ 'lead_obs': True, # whether to add in a reward for the speed of nearby vehicles "local_reward": True, + # whether to use the MPG reward. Otherwise, defaults to a target velocity reward + "mpg_reward": False, + # whether to use the MPJ reward. Otherwise, defaults to a target velocity reward + "mpj_reward": False, + # how many vehicles to look back for the MPG reward + "look_back_length": 1, # whether to reroute vehicles once they have exited "reroute_on_exit": True, - 'target_velocity': 18, + 'target_velocity': 8.0, + # how many AVs there can be at once (this is only for centralized critics) + "max_num_agents": 10, + # which edges we shouldn't apply control on + "no_control_edges": ["ghost0", "119257908#3"], + + # whether to add a slight reward for opening up a gap that will be annealed out N iterations in + "headway_curriculum": False, + # how many timesteps to anneal the headway curriculum over + "headway_curriculum_iters": 100, + # weight of the headway reward + "headway_reward_gain": 2.0, + # desired time headway + "min_time_headway": 2.0, + + # whether to add a slight reward for traveling at a desired speed + "speed_curriculum": True, + # how many timesteps to anneal the headway curriculum over + "speed_curriculum_iters": 20, + # weight of the headway reward + "speed_reward_gain": 0.5, + # penalize stopped vehicles + "penalize_stops": True, + + # penalize accels + "penalize_accel": True }) # CREATE VEHICLE TYPES AND INFLOWS # no vehicles in the network vehicles = VehicleParams() -vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams(lane_change_mode="strategic"), - acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), - car_following_params=SumoCarFollowingParams(speed_mode="no_collide"), -) -vehicles.add( - "av", - acceleration_controller=(RLController, {}), - num_vehicles=0, - color='red' -) inflow = InFlows() -# main highway -pen_rate = PENETRATION_RATE / 100 -assert pen_rate < 1.0, "your penetration rate is over 100%" -assert pen_rate > 0.0, "your penetration rate should be above zero" -inflow.add( - veh_type="human", - edge="119257914", - vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * (1 - pen_rate)), - # probability=1.0, - depart_lane="random", - departSpeed=20) -# # on ramp -# inflow.add( -# veh_type="human", -# edge="27414345", -# vehs_per_hour=321 * pen_rate, -# depart_lane="random", -# depart_speed=20) -# inflow.add( -# veh_type="human", -# edge="27414342#0", -# vehs_per_hour=421 * pen_rate, -# depart_lane="random", -# depart_speed=20) - -# Now add the AVs -# main highway -inflow.add( - veh_type="av", - edge="119257914", - vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * pen_rate), - # probability=1.0, - depart_lane="random", - depart_speed=20) -# # on ramp -# inflow.add( -# veh_type="av", -# edge="27414345", -# vehs_per_hour=int(VEH_PER_HOUR_BASE_27414345 * pen_rate), -# depart_lane="random", -# depart_speed=20) -# inflow.add( -# veh_type="av", -# edge="27414342#0", -# vehs_per_hour=int(VEH_PER_HOUR_BASE_27414342 * pen_rate), -# depart_lane="random", -# depart_speed=20) - -NET_TEMPLATE = os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml") - -warmup_steps = 0 -if additional_env_params['reroute_on_exit']: - warmup_steps = 400 + +if ON_RAMP: + vehicles.add( + "human", + num_vehicles=0, + color="white", + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=accel_data, + routing_controller=(I210Router, {}) + ) + if PENETRATION_RATE > 0.0: + vehicles.add( + "av", + num_vehicles=0, + color="red", + acceleration_controller=(RLController, {}), + routing_controller=(I210Router, {}) + ) + + # inflow.add( + # veh_type="human", + # edge=highway_start_edge, + # vehs_per_hour=inflow_rate, + # departLane="best", + # departSpeed=inflow_speed) + + lane_list = ['0', '1', '2', '3', '4'] + + for lane in lane_list: + inflow.add( + veh_type="human", + edge=highway_start_edge, + vehs_per_hour=int(inflow_rate * (1 - PENETRATION_RATE)), + departLane=lane, + departSpeed=inflow_speed) + + inflow.add( + veh_type="human", + edge="27414345", + vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), + departLane="random", + departSpeed=10) + inflow.add( + veh_type="human", + edge="27414342#0", + vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), + departLane="random", + departSpeed=10) + + if PENETRATION_RATE > 0.0: + for lane in lane_list: + inflow.add( + veh_type="av", + edge=highway_start_edge, + vehs_per_hour=int(inflow_rate * PENETRATION_RATE), + departLane=lane, + departSpeed=inflow_speed) + + inflow.add( + veh_type="av", + edge="27414345", + vehs_per_hour=int(500 * PENETRATION_RATE), + departLane="random", + departSpeed=10) + inflow.add( + veh_type="av", + edge="27414342#0", + vehs_per_hour=int(500 * PENETRATION_RATE), + departLane="random", + departSpeed=10) + +else: + # create the base vehicle type that will be used for inflows + vehicles.add( + "human", + num_vehicles=0, + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + acceleration_controller=accel_data, + ) + if PENETRATION_RATE > 0.0: + vehicles.add( + "av", + color="red", + num_vehicles=0, + acceleration_controller=(RLController, {}), + ) + + # If you want to turn off the fail safes uncomment this: + + # vehicles.add( + # 'human', + # num_vehicles=0, + # lane_change_params=SumoLaneChangeParams( + # lane_change_mode='strategic', + # ), + # acceleration_controller=accel_data, + # car_following_params=SumoCarFollowingParams(speed_mode='19') + # ) + + lane_list = ['0', '1', '2', '3', '4'] + + for lane in lane_list: + inflow.add( + veh_type="human", + edge=highway_start_edge, + vehs_per_hour=int(inflow_rate * (1 - PENETRATION_RATE)), + departLane=lane, + departSpeed=inflow_speed) + + if PENETRATION_RATE > 0.0: + for lane in lane_list: + inflow.add( + veh_type="av", + edge=highway_start_edge, + vehs_per_hour=int(inflow_rate * PENETRATION_RATE), + departLane=lane, + departSpeed=inflow_speed) + + +network_xml_file = "examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream_test.xml" + +# network_xml_file = "examples/exp_configs/templates/sumo/i210_with_congestion.xml" + +NET_TEMPLATE = os.path.join(config.PROJECT_PATH, network_xml_file) + +if WANT_GHOST_CELL: + network = I210SubNetworkGhostCell +else: + network = I210SubNetwork flow_params = dict( # name of the experiment @@ -136,14 +245,14 @@ env_name=I210MultiEnv, # name of the network class the experiment is running on - network=I210SubNetwork, + network=network, # simulator that is used by the experiment simulator='traci', # simulation-related parameters sim=SumoParams( - sim_step=0.5, + sim_step=0.4, render=False, color_by_speed=False, restart_instance=True, @@ -154,8 +263,8 @@ # environment related parameters (see flow.core.params.EnvParams) env=EnvParams( horizon=HORIZON, - sims_per_step=1, - warmup_steps=warmup_steps, + sims_per_step=3, + warmup_steps=WARMUP_STEPS, additional_params=additional_env_params, done_at_exit=False ), @@ -166,8 +275,8 @@ inflows=inflow, template=NET_TEMPLATE, additional_params={ - "on_ramp": False, - "ghost_edge": False + "on_ramp": ON_RAMP, + "ghost_edge": WANT_GHOST_CELL } ), diff --git a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py index ec71a2f42..5816d3fe7 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py +++ b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py @@ -6,14 +6,13 @@ from flow.controllers import RLController, IDMController from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ VehicleParams, SumoParams, SumoLaneChangeParams, SumoCarFollowingParams -from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS from flow.networks import HighwayNetwork +from flow.envs.ring.accel import ADDITIONAL_ENV_PARAMS from flow.envs.multiagent import MultiStraightRoad from flow.networks.highway import ADDITIONAL_NET_PARAMS from flow.utils.registry import make_create_env from ray.tune.registry import register_env - # SET UP PARAMETERS FOR THE SIMULATION # the speed of vehicles entering the network @@ -23,7 +22,7 @@ # the inflow rate of vehicles HIGHWAY_INFLOW_RATE = 2215 # the simulation time horizon (in steps) -HORIZON = 1500 +HORIZON = 1000 # whether to include noise in the car-following models INCLUDE_NOISE = True @@ -54,11 +53,42 @@ additional_env_params.update({ 'max_accel': 2.6, 'max_decel': 4.5, - 'target_velocity': 18, + 'target_velocity': 6.0, 'local_reward': True, 'lead_obs': True, + 'control_range': [500, 2300], # whether to reroute vehicles once they have exited - "reroute_on_exit": True + "reroute_on_exit": True, + # whether to use the MPG reward. Otherwise, defaults to a target velocity reward + "mpg_reward": False, + # whether to use the joules reward. Otherwise, defaults to a target velocity reward + "mpj_reward": False, + # how many vehicles to look back for the MPG reward + "look_back_length": 3, + # how many AVs there can be at once (this is only for centralized critics) + "max_num_agents": 10, + + # whether to add a slight reward for opening up a gap that will be annealed out N iterations in + "headway_curriculum": False, + # how many timesteps to anneal the headway curriculum over + "headway_curriculum_iters": 100, + # weight of the headway reward + "headway_reward_gain": 2.0, + # desired time headway + "min_time_headway": 2.0, + + # whether to add a slight reward for traveling at a desired speed + "speed_curriculum": True, + # how many timesteps to anneal the headway curriculum over + "speed_curriculum_iters": 20, + # weight of the headway reward + "speed_reward_gain": 1.0, + + # penalize stopped vehicles + "penalize_stops": True, + + # penalize accels + "penalize_accel": True }) @@ -66,8 +96,6 @@ vehicles = VehicleParams() inflows = InFlows() - -# human vehicles vehicles.add( "human", acceleration_controller=(IDMController, { @@ -96,7 +124,7 @@ edge="highway_0", vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (1 - PENETRATION_RATE / 100)), depart_lane="free", - depart_speed="23.0", + depart_speed=TRAFFIC_SPEED, name="idm_highway_inflow") # add autonomous vehicles on the highway @@ -106,13 +134,13 @@ edge="highway_0", vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (PENETRATION_RATE / 100)), depart_lane="free", - depart_speed="23.0", + depart_speed=TRAFFIC_SPEED, name="rl_highway_inflow") # SET UP FLOW PARAMETERS warmup_steps = 0 if additional_env_params['reroute_on_exit']: - warmup_steps = 400 + warmup_steps = 500 flow_params = dict( # name of the experiment @@ -131,16 +159,16 @@ env=EnvParams( horizon=HORIZON, warmup_steps=warmup_steps, - sims_per_step=1, # do not put more than one - additional_params=additional_env_params, + sims_per_step=3, + additional_params=additional_env_params ), # sumo-related parameters (see flow.core.params.SumoParams) sim=SumoParams( - sim_step=0.5, + sim_step=0.4, render=False, - use_ballistic=True, - restart_instance=True + restart_instance=True, + use_ballistic=True ), # network-related parameters (see flow.core.params.NetParams and the diff --git a/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml b/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml index 10d4d8d45..b9b2db479 100644 --- a/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml +++ b/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml @@ -3501,11 +3501,11 @@ - - - - - + + + + + diff --git a/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream_test.xml b/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream_test.xml new file mode 100644 index 000000000..ee508b730 --- /dev/null +++ b/examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream_test.xml @@ -0,0 +1,5719 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/examples/train.py b/examples/train.py index 1689d846f..74a6cd71a 100644 --- a/examples/train.py +++ b/examples/train.py @@ -21,17 +21,19 @@ from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv from stable_baselines import PPO2 except ImportError: - print("Stable-baselines not installed") + print("Stable-baselines not installed. Please install it if you need it.") +import ray from ray import tune from ray.rllib.env.group_agents_wrapper import _GroupAgentsWrapper try: from ray.rllib.agents.agent import get_agent_class except ImportError: from ray.rllib.agents.registry import get_agent_class +from ray.tune.registry import register_env from flow.core.util import ensure_dir -from flow.core.rewards import energy_consumption +from flow.core.rewards import miles_per_gallon, miles_per_megajoule from flow.utils.registry import env_constructor from flow.utils.rllib import FlowParamsEncoder, get_flow_params from flow.utils.registry import make_create_env @@ -58,7 +60,7 @@ def parse_args(args): parser.add_argument( 'exp_title', type=str, - help='Title to give the run.') + help='Name of experiment that results will be stored in') # optional input parameters parser.add_argument( @@ -66,7 +68,8 @@ def parse_args(args): help='the RL trainer to use. either rllib or Stable-Baselines') parser.add_argument( '--algorithm', type=str, default="PPO", - help='RL algorithm to use. Options are PPO, TD3, MATD3 (MADDPG w/ TD3) right now.' + help='RL algorithm to use. Options are PPO, TD3, and CENTRALIZEDPPO (which uses a centralized value function)' + ' right now.' ) parser.add_argument( '--num_cpus', type=int, default=1, @@ -172,37 +175,68 @@ def setup_exps_rllib(flow_params, dict training configuration parameters """ - from ray.tune.registry import register_env horizon = flow_params['env'].horizon alg_run = flags.algorithm.upper() if alg_run == "PPO": - agent_cls = get_agent_class(alg_run) - config = deepcopy(agent_cls._default_config) + from flow.algorithms.custom_ppo import CustomPPOTrainer + from ray.rllib.agents.ppo import DEFAULT_CONFIG + alg_run = CustomPPOTrainer + config = deepcopy(DEFAULT_CONFIG) config["num_workers"] = n_cpus config["horizon"] = horizon - config["model"].update({"fcnet_hiddens": [32, 32, 32]}) + config["model"].update({"fcnet_hiddens": [32, 32]}) config["train_batch_size"] = horizon * n_rollouts - config["gamma"] = 0.999 # discount rate + config["gamma"] = 0.995 # discount rate config["use_gae"] = True config["lambda"] = 0.97 config["kl_target"] = 0.02 config["num_sgd_iter"] = 10 + if flags.grid_search: + config["lambda"] = tune.grid_search([0.5, 0.9]) + config["lr"] = tune.grid_search([5e-4, 5e-5]) + elif alg_run == "CENTRALIZEDPPO": + from flow.algorithms.centralized_PPO import CCTrainer, CentralizedCriticModel + from ray.rllib.agents.ppo import DEFAULT_CONFIG + from ray.rllib.models import ModelCatalog + alg_run = CCTrainer + config = deepcopy(DEFAULT_CONFIG) + config['model']['custom_model'] = "cc_model" + config["model"]["custom_options"]["max_num_agents"] = flow_params['env'].additional_params['max_num_agents'] + config["model"]["custom_options"]["central_vf_size"] = 100 + + ModelCatalog.register_custom_model("cc_model", CentralizedCriticModel) + + config["num_workers"] = n_cpus + config["horizon"] = horizon + config["model"].update({"fcnet_hiddens": [32, 32]}) + config["train_batch_size"] = horizon * n_rollouts + config["gamma"] = 0.995 # discount rate + config["use_gae"] = True + config["lambda"] = 0.97 + config["kl_target"] = 0.02 + config["num_sgd_iter"] = 10 + if flags.grid_search: + config["lambda"] = tune.grid_search([0.5, 0.9]) + config["lr"] = tune.grid_search([5e-4, 5e-5]) + elif alg_run == "TD3": agent_cls = get_agent_class(alg_run) config = deepcopy(agent_cls._default_config) config["num_workers"] = n_cpus config["horizon"] = horizon + config["learning_starts"] = 10000 config["buffer_size"] = 20000 # reduced to test if this is the source of memory problems if flags.grid_search: config["prioritized_replay"] = tune.grid_search(['True', 'False']) config["actor_lr"] = tune.grid_search([1e-3, 1e-4]) config["critic_lr"] = tune.grid_search([1e-3, 1e-4]) config["n_step"] = tune.grid_search([1, 10]) + else: sys.exit("We only support PPO, TD3, right now.") @@ -210,27 +244,59 @@ def setup_exps_rllib(flow_params, def on_episode_start(info): episode = info["episode"] episode.user_data["avg_speed"] = [] + episode.user_data["avg_speed_avs"] = [] episode.user_data["avg_energy"] = [] + episode.user_data["avg_mpg"] = [] + episode.user_data["avg_mpj"] = [] + def on_episode_step(info): episode = info["episode"] env = info["env"].get_unwrapped()[0] if isinstance(env, _GroupAgentsWrapper): env = env.env - speed = np.mean([speed for speed in env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]) + if hasattr(env, 'no_control_edges'): + veh_ids = [veh_id for veh_id in env.k.vehicle.get_ids() if (env.k.vehicle.get_speed(veh_id) >= 0 + and env.k.vehicle.get_edge(veh_id) + not in env.no_control_edges)] + rl_ids = [veh_id for veh_id in env.k.vehicle.get_rl_ids() if (env.k.vehicle.get_speed(veh_id) >= 0 + and env.k.vehicle.get_edge(veh_id) + not in env.no_control_edges)] + else: + veh_ids = [veh_id for veh_id in env.k.vehicle.get_ids() if env.k.vehicle.get_speed(veh_id) >= 0] + rl_ids = [veh_id for veh_id in env.k.vehicle.get_rl_ids() if env.k.vehicle.get_speed(veh_id) >= 0] + + speed = np.mean([speed for speed in env.k.vehicle.get_speed(veh_ids)]) if not np.isnan(speed): episode.user_data["avg_speed"].append(speed) - episode.user_data["avg_energy"].append(energy_consumption(env)) + av_speed = np.mean([speed for speed in env.k.vehicle.get_speed(rl_ids) if speed >= 0]) + if not np.isnan(av_speed): + episode.user_data["avg_speed_avs"].append(av_speed) + episode.user_data["avg_mpg"].append(miles_per_gallon(env, veh_ids, gain=1.0)) + episode.user_data["avg_mpj"].append(miles_per_megajoule(env, veh_ids, gain=1.0)) + def on_episode_end(info): episode = info["episode"] avg_speed = np.mean(episode.user_data["avg_speed"]) episode.custom_metrics["avg_speed"] = avg_speed + avg_speed_avs = np.mean(episode.user_data["avg_speed_avs"]) + episode.custom_metrics["avg_speed_avs"] = avg_speed_avs episode.custom_metrics["avg_energy_per_veh"] = np.mean(episode.user_data["avg_energy"]) + episode.custom_metrics["avg_mpg_per_veh"] = np.mean(episode.user_data["avg_mpg"]) + episode.custom_metrics["avg_mpj_per_veh"] = np.mean(episode.user_data["avg_mpj"]) + + def on_train_result(info): + """Store the mean score of the episode, and increment or decrement how many adversaries are on""" + trainer = info["trainer"] + trainer.workers.foreach_worker( + lambda ev: ev.foreach_env( + lambda env: env.set_iteration_num())) config["callbacks"] = {"on_episode_start": tune.function(on_episode_start), "on_episode_step": tune.function(on_episode_step), - "on_episode_end": tune.function(on_episode_end)} + "on_episode_end": tune.function(on_episode_end), + "on_train_result": tune.function(on_train_result)} # save the flow params for replay flow_json = json.dumps( @@ -240,7 +306,6 @@ def on_episode_end(info): # multiagent configuration if policy_graphs is not None: - print("policy_graphs", policy_graphs) config['multiagent'].update({'policies': policy_graphs}) if policy_mapping_fn is not None: config['multiagent'].update({'policy_mapping_fn': tune.function(policy_mapping_fn)}) @@ -255,7 +320,6 @@ def on_episode_end(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" - import ray flow_params = submodule.flow_params flow_params['sim'].render = flags.render @@ -280,7 +344,7 @@ def trial_str_creator(trial): ray.init() exp_dict = { "run_or_experiment": alg_run, - "name": gym_name, + "name": flags.exp_title, "config": config, "checkpoint_freq": flags.checkpoint_freq, "checkpoint_at_end": True, diff --git a/flow/algorithms/centralized_PPO.py b/flow/algorithms/centralized_PPO.py new file mode 100644 index 000000000..8f3b9f261 --- /dev/null +++ b/flow/algorithms/centralized_PPO.py @@ -0,0 +1,547 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function +"""An example of customizing PPO to leverage a centralized critic.""" + +import argparse +import numpy as np + +from gym.spaces import Dict + +from ray import tune +from ray.rllib.agents.ppo.ppo import PPOTrainer +from flow.algorithms.custom_ppo import CustomPPOTFPolicy +from ray.rllib.evaluation.postprocessing import compute_advantages, \ + Postprocessing +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.policy.tf_policy import LearningRateSchedule, \ + EntropyCoeffSchedule, ACTION_LOGP +from ray.rllib.models.modelv2 import ModelV2 +from ray.rllib.models.tf.tf_modelv2 import TFModelV2 +from ray.rllib.models.tf.recurrent_tf_modelv2 import RecurrentTFModelV2 +from ray.rllib.models.model import restore_original_dimensions +from ray.rllib.utils.annotations import override +from ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork +from ray.rllib.utils.explained_variance import explained_variance +from ray.rllib.utils import try_import_tf + + +tf = try_import_tf() + +# Frozen logits of the policy that computed the action +BEHAVIOUR_LOGITS = "behaviour_logits" + +CENTRAL_OBS = "central_obs" +OPPONENT_ACTION = "opponent_action" + +parser = argparse.ArgumentParser() +parser.add_argument("--stop", type=int, default=100000) + +#TODOy + +class CentralizedCriticModel(TFModelV2): + """Multi-agent model that implements a centralized VF.""" + # TODO(@evinitsky) make this work with more than boxes + + def __init__(self, obs_space, action_space, num_outputs, model_config, + name): + super(CentralizedCriticModel, self).__init__( + obs_space, action_space, num_outputs, model_config, name) + # Base of the model + self.model = FullyConnectedNetwork(obs_space, action_space, + num_outputs, model_config, name) + self.register_variables(self.model.variables()) + + # Central VF maps (obs, opp_ops, opp_act) -> vf_pred + self.max_num_agents = model_config['custom_options']['max_num_agents'] + self.obs_space_shape = obs_space.shape[0] + self.obs_space = obs_space + other_obs = tf.keras.layers.Input(shape=(obs_space.shape[0] * self.max_num_agents, ), name="central_obs") + central_vf_dense = tf.keras.layers.Dense( + model_config['custom_options']['central_vf_size'], activation=tf.nn.tanh, name="c_vf_dense")(other_obs) + central_vf_out = tf.keras.layers.Dense( + 1, activation=None, name="c_vf_out")(central_vf_dense) + self.central_vf = tf.keras.Model( + inputs=[other_obs], outputs=central_vf_out) + self.register_variables(self.central_vf.variables) + + def forward(self, input_dict, state, seq_lens): + return self.model.forward(input_dict, state, seq_lens) + + def central_value_function(self, central_obs): + return tf.reshape( + self.central_vf( + [central_obs]), [-1]) + + def value_function(self): + return self.model.value_function() # not used + + +# TODO(@evinitsky) support recurrence +class CentralizedCriticModelRNN(RecurrentTFModelV2): + """Example of using the Keras functional API to define a RNN model.""" + + def __init__(self, + obs_space, + action_space, + num_outputs, + model_config, + name, + hiddens_size=64, + cell_size=64): + super(CentralizedCriticModelRNN, self).__init__(obs_space, action_space, num_outputs, + model_config, name) + self.cell_size = cell_size + + # Define input layers + input_layer = tf.keras.layers.Input( + shape=(None, obs_space.shape[0]), name="inputs") + state_in_h = tf.keras.layers.Input(shape=(cell_size, ), name="h") + state_in_c = tf.keras.layers.Input(shape=(cell_size, ), name="c") + seq_in = tf.keras.layers.Input(shape=(), name="seq_in") + + # Preprocess observation with a hidden layer and send to LSTM cell + dense1 = tf.keras.layers.Dense( + hiddens_size, activation=tf.nn.relu, name="dense1")(input_layer) + lstm_out, state_h, state_c = tf.keras.layers.LSTM( + cell_size, return_sequences=True, return_state=True, name="lstm")( + inputs=dense1, + mask=tf.sequence_mask(seq_in), + initial_state=[state_in_h, state_in_c]) + + # Postprocess LSTM output with another hidden layer and compute values + logits = tf.keras.layers.Dense( + self.num_outputs, + activation=tf.keras.activations.linear, + name="logits")(lstm_out) + values = tf.keras.layers.Dense( + 1, activation=None, name="values")(lstm_out) + + # Create the RNN model + self.model = tf.keras.Model( + inputs=[input_layer, seq_in, state_in_h, state_in_c], + outputs=[logits, values, state_h, state_c]) + self.register_variables(self.model.variables) + self.model.summary() + + #TODO(@evinitsky) add layer sharing to the VF + # Create the centralized VF + # Central VF maps (obs, opp_ops, opp_act) -> vf_pred + self.max_num_agents = model_config.get("max_num_agents", 120) + self.obs_space_shape = obs_space.shape[0] + other_obs = tf.keras.layers.Input(shape=(obs_space.shape[0] * self.max_num_agents,), name="all_agent_obs") + central_vf_dense = tf.keras.layers.Dense( + model_config.get("central_vf_size", 64), activation=tf.nn.tanh, name="c_vf_dense")(other_obs) + central_vf_dense2 = tf.keras.layers.Dense( + model_config.get("central_vf_size", 64), activation=tf.nn.tanh, name="c_vf_dense")(central_vf_dense) + central_vf_out = tf.keras.layers.Dense( + 1, activation=None, name="c_vf_out")(central_vf_dense2) + self.central_vf = tf.keras.Model( + inputs=[other_obs], outputs=central_vf_out) + self.register_variables(self.central_vf.variables) + + @override(RecurrentTFModelV2) + def forward_rnn(self, inputs, state, seq_lens): + model_out, self._value_out, h, c = self.model([inputs, seq_lens] + + state) + return model_out, [h, c] + + @override(ModelV2) + def get_initial_state(self): + return [ + np.zeros(self.cell_size, np.float32), + np.zeros(self.cell_size, np.float32), + ] + + def central_value_function(self, central_obs): + return tf.reshape( + self.central_vf( + [central_obs]), [-1]) + + def value_function(self): + return tf.reshape(self._value_out, [-1]) # not used + + +class CentralizedValueMixin(object): + """Add methods to evaluate the central value function from the model.""" + + def __init__(self): + # TODO(@evinitsky) clean up naming + self.central_value_function = self.model.central_value_function( + self.get_placeholder(CENTRAL_OBS) + ) + + def compute_central_vf(self, central_obs): + feed_dict = { + self.get_placeholder(CENTRAL_OBS): central_obs, + } + return self.get_session().run(self.central_value_function, feed_dict) + + +# Grabs the opponent obs/act and includes it in the experience train_batch, +# and computes GAE using the central vf predictions. +def centralized_critic_postprocessing(policy, + sample_batch, + other_agent_batches=None, + episode=None): + if policy.loss_initialized(): + assert other_agent_batches is not None + + # time_span = (sample_batch['t'][0], sample_batch['t'][-1]) + # # there's a new problem here, namely that a segment might not be continuous due to the rerouting + # other_agent_timespans = {agent_id: + # (other_agent_batches[agent_id][1]["t"][0], + # other_agent_batches[agent_id][1]["t"][-1]) + # for agent_id in other_agent_batches.keys()} + other_agent_times = {agent_id: other_agent_batches[agent_id][1]["t"] + for agent_id in other_agent_batches.keys()} + agent_time = sample_batch['t'] + # # find agents whose time overlaps with the current agent + rel_agents = {agent_id: other_agent_time for agent_id, other_agent_time in other_agent_times.items()} + # if len(rel_agents) > 0: + other_obs = {agent_id: + other_agent_batches[agent_id][1]["obs"].copy() + for agent_id in other_agent_batches.keys()} + # padded_agent_obs = {agent_id: + # overlap_and_pad_agent( + # time_span, + # rel_agent_time, + # other_obs[agent_id]) + # for agent_id, + # rel_agent_time in rel_agents.items()} + padded_agent_obs = {agent_id: + fill_missing( + agent_time, + other_agent_times[agent_id], + other_obs[agent_id]) + for agent_id, + rel_agent_time in rel_agents.items()} + # okay, now we need to stack and sort + central_obs_list = [padded_obs for padded_obs in padded_agent_obs.values()] + try: + central_obs_batch = np.hstack((sample_batch["obs"], np.hstack(central_obs_list))) + except: + # TODO(@ev) this is a bug and needs to be fixed + central_obs_batch = sample_batch["obs"] + max_vf_agents = policy.model.max_num_agents + num_agents = len(rel_agents) + 1 + if num_agents < max_vf_agents: + diff = max_vf_agents - num_agents + zero_pad = np.zeros((central_obs_batch.shape[0], + policy.model.obs_space_shape * diff)) + central_obs_batch = np.hstack((central_obs_batch, + zero_pad)) + elif num_agents > max_vf_agents: + print("Too many agents!") + + # also record the opponent obs and actions in the trajectory + sample_batch[CENTRAL_OBS] = central_obs_batch + + # overwrite default VF prediction with the central VF + sample_batch[SampleBatch.VF_PREDS] = policy.compute_central_vf(sample_batch[CENTRAL_OBS]) + else: + # policy hasn't initialized yet, use zeros + #TODO(evinitsky) put in the right shape + obs_shape = sample_batch[SampleBatch.CUR_OBS].shape[1] + obs_shape = (1, obs_shape * (policy.model.max_num_agents)) + sample_batch[CENTRAL_OBS] = np.zeros(obs_shape) + # TODO(evinitsky) put in the right shape. Will break if actions aren't 1 + sample_batch[SampleBatch.VF_PREDS] = np.zeros(1, dtype=np.float32) + + completed = sample_batch["dones"][-1] + + # if not completed and policy.loss_initialized(): + # last_r = 0.0 + # else: + # next_state = [] + # for i in range(policy.num_state_tensors()): + # next_state.append([sample_batch["state_out_{}".format(i)][-1]]) + # last_r = policy.compute_central_vf(sample_batch[CENTRAL_OBS][-1][np.newaxis, ...])[0] + + batch = compute_advantages( + sample_batch, + 0.0, + policy.config["gamma"], + policy.config["lambda"], + use_gae=policy.config["use_gae"]) + return batch + + + +def time_overlap(time_span, agent_time): + """Check if agent_time overlaps with time_span""" + if agent_time[0] <= time_span[1] and agent_time[1] >= time_span[0]: + return True + else: + return False + + +def fill_missing(agent_time, other_agent_time, obs): + # shortcut, the two overlap perfectly + if np.sum(agent_time == other_agent_time) == agent_time.shape[0]: + return obs + new_obs = np.zeros((agent_time.shape[0], obs.shape[1])) + other_agent_time_set = set(other_agent_time) + for i, time in enumerate(agent_time): + if time in other_agent_time_set: + new_obs[i] = obs[np.where(other_agent_time == time)] + return new_obs + + +def overlap_and_pad_agent(time_span, agent_time, obs): + """take the part of obs that overlaps, pad to length time_span + Arguments: + time_span (tuple): tuple of the first and last time that the agent + of interest is in the system + agent_time (tuple): tuple of the first and last time that the + agent whose obs we are padding is in the system + obs (np.ndarray): observations of the agent whose time is + agent_time + """ + assert time_overlap(time_span, agent_time) + print(time_span) + print(agent_time) + if time_span[0] == 7 or agent_time[0] == 7: + import ipdb; ipdb.set_trace() + # FIXME(ev) some of these conditions can be combined + # no padding needed + if agent_time[0] == time_span[0] and agent_time[1] == time_span[1]: + if obs.shape[0] < 200: + import ipdb; ipdb.set_trace() + return obs + # agent enters before time_span starts and exits before time_span end + if agent_time[0] < time_span[0] and agent_time[1] < time_span[1]: + non_overlap_time = time_span[0] - agent_time[0] + missing_time = time_span[1] - agent_time[1] + overlap_obs = obs[non_overlap_time:] + padding = np.zeros((missing_time, obs.shape[1])) + obs_concat = np.concatenate((overlap_obs, padding)) + if obs_concat.shape[0] < 200: + import ipdb; ipdb.set_trace() + return obs_concat + # agent enters after time_span starts and exits after time_span ends + elif agent_time[0] > time_span[0] and agent_time[1] > time_span[1]: + non_overlap_time = agent_time[1] - time_span[1] + overlap_obs = obs[:-non_overlap_time] + missing_time = agent_time[0] - time_span[0] + padding = np.zeros((missing_time, obs.shape[1])) + obs_concat = np.concatenate((padding, overlap_obs)) + if obs_concat.shape[0] < 200: + import ipdb; ipdb.set_trace() + return obs_concat + # agent time is entirely contained in time_span + elif agent_time[0] >= time_span[0] and agent_time[1] <= time_span[1]: + missing_left = agent_time[0] - time_span[0] + missing_right = time_span[1] - agent_time[1] + obs_concat = obs + if missing_left > 0: + padding = np.zeros((missing_left, obs.shape[1])) + obs_concat = np.concatenate((padding, obs_concat)) + if missing_right > 0: + padding = np.zeros((missing_right, obs.shape[1])) + obs_concat = np.concatenate((obs_concat, padding)) + if obs_concat.shape[0] < 200: + import ipdb; ipdb.set_trace() + return obs_concat + # agent time totally contains time_span + elif agent_time[0] <= time_span[0] and agent_time[1] >= time_span[1]: + non_overlap_left = time_span[0] - agent_time[0] + non_overlap_right = agent_time[1] - time_span[1] + overlap_obs = obs + if non_overlap_left > 0: + overlap_obs = overlap_obs[non_overlap_left:] + if non_overlap_right > 0: + overlap_obs = overlap_obs[:-non_overlap_right] + if overlap_obs.shape[0] < 200: + import ipdb; ipdb.set_trace() + return overlap_obs + + +# Copied from PPO but optimizing the central value function +def loss_with_central_critic(policy, model, dist_class, train_batch): + CentralizedValueMixin.__init__(policy) + + logits, state = model.from_batch(train_batch) + action_dist = dist_class(logits, model) + + policy.loss_obj = PPOLoss( + policy.action_space, + dist_class, + model, + train_batch[Postprocessing.VALUE_TARGETS], + train_batch[Postprocessing.ADVANTAGES], + train_batch[SampleBatch.ACTIONS], + train_batch[BEHAVIOUR_LOGITS], + train_batch[ACTION_LOGP], + train_batch[SampleBatch.VF_PREDS], + action_dist, + policy.central_value_function, + policy.kl_coeff, + tf.ones_like(train_batch[Postprocessing.ADVANTAGES], dtype=tf.bool), + entropy_coeff=policy.entropy_coeff, + clip_param=policy.config["clip_param"], + vf_clip_param=policy.config["vf_clip_param"], + vf_loss_coeff=policy.config["vf_loss_coeff"], + use_gae=policy.config["use_gae"], + model_config=policy.config["model"]) + + return policy.loss_obj.loss + + +class PPOLoss(object): + def __init__(self, + action_space, + dist_class, + model, + value_targets, + advantages, + actions, + prev_logits, + prev_actions_logp, + vf_preds, + curr_action_dist, + value_fn, + cur_kl_coeff, + valid_mask, + entropy_coeff=0, + clip_param=0.1, + vf_clip_param=0.1, + vf_loss_coeff=1.0, + use_gae=True, + model_config=None): + """Constructs the loss for Proximal Policy Objective. + + Arguments: + action_space: Environment observation space specification. + dist_class: action distribution class for logits. + value_targets (Placeholder): Placeholder for target values; used + for GAE. + actions (Placeholder): Placeholder for actions taken + from previous model evaluation. + advantages (Placeholder): Placeholder for calculated advantages + from previous model evaluation. + prev_logits (Placeholder): Placeholder for logits output from + previous model evaluation. + prev_actions_logp (Placeholder): Placeholder for prob output from + previous model evaluation. + vf_preds (Placeholder): Placeholder for value function output + from previous model evaluation. + curr_action_dist (ActionDistribution): ActionDistribution + of the current model. + value_fn (Tensor): Current value function output Tensor. + cur_kl_coeff (Variable): Variable holding the current PPO KL + coefficient. + valid_mask (Tensor): A bool mask of valid input elements (#2992). + entropy_coeff (float): Coefficient of the entropy regularizer. + clip_param (float): Clip parameter + vf_clip_param (float): Clip parameter for the value function + vf_loss_coeff (float): Coefficient of the value function loss + use_gae (bool): If true, use the Generalized Advantage Estimator. + model_config (dict): (Optional) model config for use in specifying + action distributions. + """ + + def reduce_mean_valid(t): + return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) + + prev_dist = dist_class(prev_logits, model) + # Make loss functions. + logp_ratio = tf.exp(curr_action_dist.logp(actions) - prev_actions_logp) + action_kl = prev_dist.kl(curr_action_dist) + self.mean_kl = reduce_mean_valid(action_kl) + + curr_entropy = curr_action_dist.entropy() + self.mean_entropy = reduce_mean_valid(curr_entropy) + + surrogate_loss = tf.minimum( + advantages * logp_ratio, + advantages * tf.clip_by_value(logp_ratio, 1 - clip_param, + 1 + clip_param)) + self.mean_policy_loss = reduce_mean_valid(-surrogate_loss) + + if use_gae: + vf_loss1 = tf.square(value_fn - value_targets) + vf_clipped = vf_preds + tf.clip_by_value( + value_fn - vf_preds, -vf_clip_param, vf_clip_param) + vf_loss2 = tf.square(vf_clipped - value_targets) + vf_loss = tf.maximum(vf_loss1, vf_loss2) + self.mean_vf_loss = reduce_mean_valid(vf_loss) + loss = reduce_mean_valid( + -surrogate_loss + + vf_loss_coeff * vf_loss - entropy_coeff * curr_entropy) + else: + self.mean_vf_loss = tf.constant(0.0) + loss = reduce_mean_valid(-surrogate_loss - + entropy_coeff * curr_entropy) + self.loss = loss + + +def new_ppo_surrogate_loss(policy, model, dist_class, train_batch): + loss = loss_with_central_critic(policy, model, dist_class, train_batch) + return loss + + +class KLCoeffMixin(object): + def __init__(self, config): + # KL Coefficient + self.kl_coeff_val = config["kl_coeff"] + self.kl_target = config["kl_target"] + self.kl_coeff = tf.get_variable( + initializer=tf.constant_initializer(self.kl_coeff_val), + name="kl_coeff", + shape=(), + trainable=False, + dtype=tf.float32) + def update_kl(self, blah): + pass + + +def setup_mixins(policy, obs_space, action_space, config): + # copied from PPO + KLCoeffMixin.__init__(policy, config) + + EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"], + config["entropy_coeff_schedule"]) + LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"]) + # hack: put in a noop VF so some of the inherited PPO code runs + policy.value_function = tf.zeros( + tf.shape(policy.get_placeholder(SampleBatch.CUR_OBS))[0]) + + +def central_vf_stats(policy, train_batch, grads): + # Report the explained variance of the central value function. + return { + "vf_explained_var": explained_variance( + train_batch[Postprocessing.VALUE_TARGETS], + policy.central_value_function), + } + +def kl_and_loss_stats(policy, train_batch): + return { + "cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64), + "cur_lr": tf.cast(policy.cur_lr, tf.float64), + "total_loss": policy.loss_obj.loss, + "policy_loss": policy.loss_obj.mean_policy_loss, + "vf_loss": policy.loss_obj.mean_vf_loss, + "vf_explained_var": explained_variance( + train_batch[Postprocessing.VALUE_TARGETS], + policy.model.value_function()), + "vf_preds": train_batch[Postprocessing.VALUE_TARGETS], + "kl": policy.loss_obj.mean_kl, + "entropy": policy.loss_obj.mean_entropy, + "entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64), + } + +CCPPO = CustomPPOTFPolicy.with_updates( + name="CCPPO", + postprocess_fn=centralized_critic_postprocessing, + loss_fn=new_ppo_surrogate_loss, + stats_fn=kl_and_loss_stats, + before_loss_init=setup_mixins, + grad_stats_fn=central_vf_stats, + mixins=[ + LearningRateSchedule, EntropyCoeffSchedule, + CentralizedValueMixin, KLCoeffMixin + ]) + +CCTrainer = PPOTrainer.with_updates(name="CCPPOTrainer", default_policy=CCPPO) \ No newline at end of file diff --git a/flow/algorithms/custom_ppo.py b/flow/algorithms/custom_ppo.py new file mode 100644 index 000000000..a98af6c2d --- /dev/null +++ b/flow/algorithms/custom_ppo.py @@ -0,0 +1,318 @@ +"""PPO but we add in the outflow after the reward to the final reward""" +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging + +import numpy as np +import ray +from ray.rllib.agents.ppo.ppo import PPOTrainer +from ray.rllib.evaluation.postprocessing import compute_advantages, \ + Postprocessing +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.policy.tf_policy import LearningRateSchedule, \ + EntropyCoeffSchedule, ACTION_LOGP +from ray.rllib.policy.tf_policy_template import build_tf_policy +from ray.rllib.utils.explained_variance import explained_variance +from ray.rllib.utils.tf_ops import make_tf_callable +from ray.rllib.utils import try_import_tf + +tf = try_import_tf() + +logger = logging.getLogger(__name__) + +# Frozen logits of the policy that computed the action +BEHAVIOUR_LOGITS = "behaviour_logits" + + +class PPOLoss(object): + def __init__(self, + action_space, + dist_class, + model, + value_targets, + advantages, + actions, + prev_logits, + prev_actions_logp, + vf_preds, + curr_action_dist, + value_fn, + cur_kl_coeff, + valid_mask, + entropy_coeff=0, + clip_param=0.1, + vf_clip_param=0.1, + vf_loss_coeff=1.0, + use_gae=True, + model_config=None): + """Constructs the loss for Proximal Policy Objective. + + Arguments: + action_space: Environment observation space specification. + dist_class: action distribution class for logits. + value_targets (Placeholder): Placeholder for target values; used + for GAE. + actions (Placeholder): Placeholder for actions taken + from previous model evaluation. + advantages (Placeholder): Placeholder for calculated advantages + from previous model evaluation. + prev_logits (Placeholder): Placeholder for logits output from + previous model evaluation. + prev_actions_logp (Placeholder): Placeholder for prob output from + previous model evaluation. + vf_preds (Placeholder): Placeholder for value function output + from previous model evaluation. + curr_action_dist (ActionDistribution): ActionDistribution + of the current model. + value_fn (Tensor): Current value function output Tensor. + cur_kl_coeff (Variable): Variable holding the current PPO KL + coefficient. + valid_mask (Tensor): A bool mask of valid input elements (#2992). + entropy_coeff (float): Coefficient of the entropy regularizer. + clip_param (float): Clip parameter + vf_clip_param (float): Clip parameter for the value function + vf_loss_coeff (float): Coefficient of the value function loss + use_gae (bool): If true, use the Generalized Advantage Estimator. + model_config (dict): (Optional) model config for use in specifying + action distributions. + """ + + def reduce_mean_valid(t): + return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) + + prev_dist = dist_class(prev_logits, model) + # Make loss functions. + logp_ratio = tf.exp(curr_action_dist.logp(actions) - prev_actions_logp) + action_kl = prev_dist.kl(curr_action_dist) + self.mean_kl = reduce_mean_valid(action_kl) + + curr_entropy = curr_action_dist.entropy() + self.mean_entropy = reduce_mean_valid(curr_entropy) + + surrogate_loss = tf.minimum( + advantages * logp_ratio, + advantages * tf.clip_by_value(logp_ratio, 1 - clip_param, + 1 + clip_param)) + self.mean_policy_loss = reduce_mean_valid(-surrogate_loss) + + if use_gae: + vf_loss1 = tf.square(value_fn - value_targets) + vf_clipped = vf_preds + tf.clip_by_value( + value_fn - vf_preds, -vf_clip_param, vf_clip_param) + vf_loss2 = tf.square(vf_clipped - value_targets) + vf_loss = tf.maximum(vf_loss1, vf_loss2) + self.mean_vf_loss = reduce_mean_valid(vf_loss) + loss = reduce_mean_valid( + -surrogate_loss + + vf_loss_coeff * vf_loss - entropy_coeff * curr_entropy) + else: + self.mean_vf_loss = tf.constant(0.0) + loss = reduce_mean_valid(-surrogate_loss -entropy_coeff * curr_entropy) + self.loss = loss + + +def ppo_surrogate_loss(policy, model, dist_class, train_batch): + logits, state = model.from_batch(train_batch) + action_dist = dist_class(logits, model) + + if state: + max_seq_len = tf.reduce_max(train_batch["seq_lens"]) + mask = tf.sequence_mask(train_batch["seq_lens"], max_seq_len) + mask = tf.reshape(mask, [-1]) + else: + mask = tf.ones_like( + train_batch[Postprocessing.ADVANTAGES], dtype=tf.bool) + + policy.loss_obj = PPOLoss( + policy.action_space, + dist_class, + model, + train_batch[Postprocessing.VALUE_TARGETS], + train_batch[Postprocessing.ADVANTAGES], + train_batch[SampleBatch.ACTIONS], + train_batch[BEHAVIOUR_LOGITS], + train_batch[ACTION_LOGP], + train_batch[SampleBatch.VF_PREDS], + action_dist, + model.value_function(), + policy.kl_coeff, + mask, + entropy_coeff=policy.entropy_coeff, + clip_param=policy.config["clip_param"], + vf_clip_param=policy.config["vf_clip_param"], + vf_loss_coeff=policy.config["vf_loss_coeff"], + use_gae=policy.config["use_gae"], + model_config=policy.config["model"]) + + return policy.loss_obj.loss + + +def kl_and_loss_stats(policy, train_batch): + return { + "cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64), + "cur_lr": tf.cast(policy.cur_lr, tf.float64), + "total_loss": policy.loss_obj.loss, + "policy_loss": policy.loss_obj.mean_policy_loss, + "vf_loss": policy.loss_obj.mean_vf_loss, + "vf_explained_var": explained_variance( + train_batch[Postprocessing.VALUE_TARGETS], + policy.model.value_function()), + "vf_preds": train_batch[Postprocessing.VALUE_TARGETS], + "kl": policy.loss_obj.mean_kl, + "entropy": policy.loss_obj.mean_entropy, + "entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64), + "advantages": train_batch[Postprocessing.ADVANTAGES], + "rewards": train_batch["rewards"] + } + + +def vf_preds_and_logits_fetches(policy): + """Adds value function and logits outputs to experience train_batches.""" + return { + SampleBatch.VF_PREDS: policy.model.value_function(), + BEHAVIOUR_LOGITS: policy.model.last_output(), + } + + +def postprocess_ppo_gae(policy, + sample_batch, + other_agent_batches=None, + episode=None): + """Adds the policy logits, VF preds, and advantages to the trajectory.""" + + completed = sample_batch["dones"][-1] + if completed: + last_r = 0.0 + else: + next_state = [] + for i in range(policy.num_state_tensors()): + next_state.append([sample_batch["state_out_{}".format(i)][-1]]) + last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1], + sample_batch[SampleBatch.ACTIONS][-1], + sample_batch[SampleBatch.REWARDS][-1], + *next_state) + + batch = compute_advantages( + sample_batch, + last_r, + policy.config["gamma"], + policy.config["lambda"], + use_gae=policy.config["use_gae"]) + return batch + + +def clip_gradients(policy, optimizer, loss): + variables = policy.model.trainable_variables() + if policy.config["grad_clip"] is not None: + grads_and_vars = optimizer.compute_gradients(loss, variables) + grads = [g for (g, v) in grads_and_vars] + policy.grads, _ = tf.clip_by_global_norm(grads, + policy.config["grad_clip"]) + clipped_grads = list(zip(policy.grads, variables)) + return clipped_grads + else: + return optimizer.compute_gradients(loss, variables) + + +class ValueNetworkMixin(object): + def __init__(self, obs_space, action_space, config): + if config["use_gae"]: + + @make_tf_callable(self.get_session()) + def value(ob, prev_action, prev_reward, *state): + model_out, _ = self.model({ + SampleBatch.CUR_OBS: tf.convert_to_tensor([ob]), + SampleBatch.PREV_ACTIONS: tf.convert_to_tensor( + [prev_action]), + SampleBatch.PREV_REWARDS: tf.convert_to_tensor( + [prev_reward]), + "is_training": tf.convert_to_tensor(False), + }, [tf.convert_to_tensor([s]) for s in state], + tf.convert_to_tensor([1])) + return self.model.value_function()[0] + + else: + + @make_tf_callable(self.get_session()) + def value(ob, prev_action, prev_reward, *state): + return tf.constant(0.0) + + self._value = value + + +def setup_config(policy, obs_space, action_space, config): + # auto set the model option for layer sharing + config["model"]["vf_share_layers"] = config["vf_share_layers"] + + +def setup_mixins(policy, obs_space, action_space, config): + KLCoeffMixin.__init__(policy, config) + ValueNetworkMixin.__init__(policy, obs_space, action_space, config) + EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"], + config["entropy_coeff_schedule"]) + LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"]) + + +class KLCoeffMixin(object): + def __init__(self, config): + # KL Coefficient + self.kl_coeff_val = config["kl_coeff"] + self.kl_target = config["kl_target"] + self.kl_coeff = tf.get_variable( + initializer=tf.constant_initializer(self.kl_coeff_val), + name="kl_coeff", + shape=(), + trainable=False, + dtype=tf.float32) + def update_kl(self, blah): + pass + + +CustomPPOTFPolicy = build_tf_policy( + name="CustomPPOTFPolicy", + get_default_config=lambda: ray.rllib.agents.ppo.ppo.DEFAULT_CONFIG, + loss_fn=ppo_surrogate_loss, + stats_fn=kl_and_loss_stats, + extra_action_fetches_fn=vf_preds_and_logits_fetches, + postprocess_fn=postprocess_ppo_gae, + gradients_fn=clip_gradients, + before_init=setup_config, + before_loss_init=setup_mixins, + mixins=[ + LearningRateSchedule, EntropyCoeffSchedule, + ValueNetworkMixin, KLCoeffMixin + ]) + +def validate_config(config): + if config["entropy_coeff"] < 0: + raise DeprecationWarning("entropy_coeff must be >= 0") + if isinstance(config["entropy_coeff"], int): + config["entropy_coeff"] = float(config["entropy_coeff"]) + if config["batch_mode"] == "truncate_episodes" and not config["use_gae"]: + raise ValueError( + "Episode truncation is not supported without a value " + "function. Consider setting batch_mode=complete_episodes.") + if config["multiagent"]["policies"] and not config["simple_optimizer"]: + logger.info( + "In multi-agent mode, policies will be optimized sequentially " + "by the multi-GPU optimizer. Consider setting " + "simple_optimizer=True if this doesn't work for you.") + if config["simple_optimizer"]: + logger.warning( + "Using the simple minibatch optimizer. This will significantly " + "reduce performance, consider simple_optimizer=False.") + elif tf and tf.executing_eagerly(): + config["simple_optimizer"] = True # multi-gpu not supported + +from ray.rllib.agents.trainer_template import build_trainer +from ray.rllib.agents.ppo.ppo import choose_policy_optimizer, DEFAULT_CONFIG, update_kl, \ + warn_about_bad_reward_scales +CustomPPOTrainer = build_trainer( + name="CustomPPOTrainer", + default_config=DEFAULT_CONFIG, + default_policy=CustomPPOTFPolicy, + make_policy_optimizer=choose_policy_optimizer, + validate_config=validate_config, + after_train_result=warn_about_bad_reward_scales) \ No newline at end of file diff --git a/flow/controllers/car_following_models.py b/flow/controllers/car_following_models.py index 42c9b2a9b..280c94d37 100755 --- a/flow/controllers/car_following_models.py +++ b/flow/controllers/car_following_models.py @@ -647,6 +647,7 @@ def __init__(self, def get_accel(self, env): """See parent class.""" + # without generating waves. lead_id = env.k.vehicle.get_leader(self.veh_id) if not lead_id: # no car ahead if self.want_max_accel: diff --git a/flow/controllers/velocity_controllers.py b/flow/controllers/velocity_controllers.py index c3da6136d..62ce15beb 100644 --- a/flow/controllers/velocity_controllers.py +++ b/flow/controllers/velocity_controllers.py @@ -25,7 +25,10 @@ class FollowerStopper(BaseController): def __init__(self, veh_id, car_following_params, - v_des=15): + v_des=15, + danger_edges=None, + control_length=None, + no_control_edges=None): """Instantiate FollowerStopper.""" BaseController.__init__( self, veh_id, car_following_params, delay=0.0, @@ -45,6 +48,10 @@ def __init__(self, self.d_2 = 1.0 self.d_3 = 0.5 + self.danger_edges = danger_edges if danger_edges else {} + self.control_length = control_length + self.no_control_edges = no_control_edges + def find_intersection_dist(self, env): """Find distance to intersection. @@ -72,41 +79,54 @@ def find_intersection_dist(self, env): def get_accel(self, env): """See parent class.""" - lead_id = env.k.vehicle.get_leader(self.veh_id) - this_vel = env.k.vehicle.get_speed(self.veh_id) - lead_vel = env.k.vehicle.get_speed(lead_id) - - if self.v_des is None: + if env.time_counter < env.env_params.warmup_steps * env.env_params.sims_per_step: return None - - if lead_id is None: - v_cmd = self.v_des else: - dx = env.k.vehicle.get_headway(self.veh_id) - dv_minus = min(lead_vel - this_vel, 0) + lead_id = env.k.vehicle.get_leader(self.veh_id) + this_vel = env.k.vehicle.get_speed(self.veh_id) + lead_vel = env.k.vehicle.get_speed(lead_id) - dx_1 = self.dx_1_0 + 1 / (2 * self.d_1) * dv_minus**2 - dx_2 = self.dx_2_0 + 1 / (2 * self.d_2) * dv_minus**2 - dx_3 = self.dx_3_0 + 1 / (2 * self.d_3) * dv_minus**2 - v = min(max(lead_vel, 0), self.v_des) - # compute the desired velocity - if dx <= dx_1: - v_cmd = 0 - elif dx <= dx_2: - v_cmd = v * (dx - dx_1) / (dx_2 - dx_1) - elif dx <= dx_3: - v_cmd = v + (self.v_des - this_vel) * (dx - dx_2) \ - / (dx_3 - dx_2) - else: - v_cmd = self.v_des - - edge = env.k.vehicle.get_edge(self.veh_id) + if self.v_des is None: + return None - if edge == "": - return None - else: - # compute the acceleration from the desired velocity - return np.clip((v_cmd - this_vel) / env.sim_step, -np.abs(self.max_deaccel), self.max_accel) + if lead_id is None: + v_cmd = self.v_des + else: + dx = env.k.vehicle.get_headway(self.veh_id) + dv_minus = min(lead_vel - this_vel, 0) + + dx_1 = self.dx_1_0 + 1 / (2 * self.d_1) * dv_minus**2 + dx_2 = self.dx_2_0 + 1 / (2 * self.d_2) * dv_minus**2 + dx_3 = self.dx_3_0 + 1 / (2 * self.d_3) * dv_minus**2 + v = min(max(lead_vel, 0), self.v_des) + # compute the desired velocity + if dx <= dx_1: + v_cmd = 0 + elif dx <= dx_2: + v_cmd = v * (dx - dx_1) / (dx_2 - dx_1) + elif dx <= dx_3: + v_cmd = v + (self.v_des - this_vel) * (dx - dx_2) \ + / (dx_3 - dx_2) + else: + v_cmd = self.v_des + + edge = env.k.vehicle.get_edge(self.veh_id) + + if edge == "": + return None + + if (self.find_intersection_dist(env) <= 10 and \ + env.k.vehicle.get_edge(self.veh_id) in self.danger_edges) or \ + env.k.vehicle.get_edge(self.veh_id)[0] == ":"\ + or (self.control_length and (env.k.vehicle.get_x_by_id(self.veh_id) < self.control_length[0] + or env.k.vehicle.get_x_by_id(self.veh_id) > self.control_length[1]))\ + or edge in self.no_control_edges: + # TODO(@evinitsky) put back + # or env.k.vehicle.get_edge(self.veh_id) in self.no_control_edges: + return None + else: + # compute the acceleration from the desired velocity + return np.clip((v_cmd - this_vel) / env.sim_step, -np.abs(self.max_deaccel), self.max_accel) class NonLocalFollowerStopper(FollowerStopper): diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 9ca83ab40..1c5ed271a 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -323,6 +323,7 @@ def get_fuel_consumption(selfself, veh_id, error=-1001): vehicle id, or list of vehicle ids error : any, optional value that is returned if the vehicle is not found + Returns ------- float diff --git a/flow/core/rewards.py b/flow/core/rewards.py index 1434636e6..5aada2d8e 100755 --- a/flow/core/rewards.py +++ b/flow/core/rewards.py @@ -333,7 +333,6 @@ def energy_consumption(env, gain=.001): return -gain * power - def vehicle_energy_consumption(env, veh_id, gain=.001): """Calculate power consumption of a vehicle. @@ -352,6 +351,7 @@ def vehicle_energy_consumption(env, veh_id, gain=.001): if veh_id not in env.k.vehicle.previous_speeds: return 0 + speed = env.k.vehicle.get_speed(veh_id) prev_speed = env.k.vehicle.get_previous_speed(veh_id) @@ -389,7 +389,7 @@ def miles_per_megajoule(env, veh_ids=None, gain=.001): speed = env.k.vehicle.get_speed(veh_id) # convert to be positive since the function called is a penalty power = -vehicle_energy_consumption(env, veh_id, gain=1.0) - if power > 0 and speed >= 0.0: + if power > 0 and speed >= 0.1: counter += 1 # meters / joule is (v * \delta t) / (power * \delta t) mpj += speed / power diff --git a/flow/envs/base.py b/flow/envs/base.py index cf1674355..fbc57f33b 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -148,6 +148,8 @@ def __init__(self, self.state = None self.obs_var_labels = [] + self.num_training_iters = 0 + # track IDs that have ever been observed in the system self.observed_ids = set() self.observed_rl_ids = set() diff --git a/flow/envs/multiagent/__init__.py b/flow/envs/multiagent/__init__.py index 818d6662b..8c5552580 100644 --- a/flow/envs/multiagent/__init__.py +++ b/flow/envs/multiagent/__init__.py @@ -12,6 +12,7 @@ from flow.envs.multiagent.merge import MultiAgentMergePOEnv from flow.envs.multiagent.i210 import I210MultiEnv, MultiStraightRoad + __all__ = [ 'MultiEnv', 'AdversarialAccelEnv', diff --git a/flow/envs/multiagent/base.py b/flow/envs/multiagent/base.py index 594fb2fdb..7104138de 100644 --- a/flow/envs/multiagent/base.py +++ b/flow/envs/multiagent/base.py @@ -322,3 +322,6 @@ def apply_rl_actions(self, rl_actions=None): # clip according to the action space requirements clipped_actions = self.clip_actions(rl_actions) self._apply_rl_actions(clipped_actions) + + def set_iteration_num(self): + self.num_training_iters += 1 diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index a6e39cdec..c9b63b23a 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -1,8 +1,13 @@ """Environment for training vehicles to reduce congestion in the I210.""" -from gym.spaces import Box +from collections import OrderedDict +from copy import deepcopy +from time import time + +from gym.spaces import Box, Discrete, Dict import numpy as np +from flow.core.rewards import miles_per_gallon, miles_per_megajoule from flow.envs.multiagent.base import MultiEnv # largest number of lanes on any given edge in the network @@ -19,6 +24,7 @@ "lead_obs": True, # whether the reward should come from local vehicles instead of global rewards "local_reward": True, + # desired velocity "target_velocity": 25 } @@ -66,10 +72,35 @@ def __init__(self, env_params, sim_params, network, simulator='traci'): self.reroute_on_exit = env_params.additional_params.get("reroute_on_exit") self.max_lanes = MAX_LANES self.num_enter_lanes = 5 - self.entrance_edge = "119257914" - self.exit_edge = "119257908#3" + self.entrance_edge = "ghost0" + self.exit_edge = "119257908#2" + self.control_range = env_params.additional_params.get('control_range', None) + self.no_control_edges = env_params.additional_params.get('no_control_edges', []) + self.mpg_reward = env_params.additional_params["mpg_reward"] + self.mpj_reward = env_params.additional_params["mpj_reward"] + self.look_back_length = env_params.additional_params["look_back_length"] + + # whether to add a slight reward for opening up a gap that will be annealed out N iterations in + self.headway_curriculum = env_params.additional_params["headway_curriculum"] + # how many timesteps to anneal the headway curriculum over + self.headway_curriculum_iters = env_params.additional_params["headway_curriculum_iters"] + self.headway_reward_gain = env_params.additional_params["headway_reward_gain"] + self.min_time_headway = env_params.additional_params["min_time_headway"] + + # whether to add a slight reward for opening up a gap that will be annealed out N iterations in + self.speed_curriculum = env_params.additional_params["speed_curriculum"] + # how many timesteps to anneal the headway curriculum over + self.speed_curriculum_iters = env_params.additional_params["speed_curriculum_iters"] + self.speed_reward_gain = env_params.additional_params["speed_reward_gain"] + self.num_training_iters = 0 self.leader = [] + # penalize stops + self.penalize_stops = env_params.additional_params["penalize_stops"] + + # penalize accel + self.penalize_accel = env_params.additional_params.get("penalize_accel", False) + @property def observation_space(self): """See class definition.""" @@ -109,6 +140,8 @@ def action_space(self): def _apply_rl_actions(self, rl_actions): """See class definition.""" # in the warmup steps, rl_actions is None + id_list = [] + accel_list = [] if rl_actions: for rl_id, actions in rl_actions.items(): accel = actions[0] @@ -117,15 +150,28 @@ def _apply_rl_actions(self, rl_actions): # lane_change_softmax /= np.sum(lane_change_softmax) # lane_change_action = np.random.choice([-1, 0, 1], # p=lane_change_softmax) + id_list.append(rl_id) + accel_list.append(accel) + self.k.vehicle.apply_acceleration(id_list, accel_list) + # self.k.vehicle.apply_lane_change(rl_id, lane_change_action) + # print('time to apply actions is ', time() - t) - self.k.vehicle.apply_acceleration(rl_id, accel) - # self.k.vehicle.apply_lane_change(rl_id, lane_change_action) + def in_control_range(self, veh_id): + """Return if a veh_id is on an edge that is allowed to be controlled. + + If control range is defined it uses control range, otherwise it searches over a set of edges + """ + return (self.control_range and self.k.vehicle.get_x_by_id(veh_id) < self.control_range[1] \ + and self.k.vehicle.get_x_by_id(veh_id) > self.control_range[0]) or \ + (len(self.no_control_edges) > 0 and self.k.vehicle.get_edge(veh_id) not in + self.no_control_edges) def get_state(self): """See class definition.""" + valid_ids = [rl_id for rl_id in self.k.vehicle.get_rl_ids() if self.in_control_range(rl_id)] if self.lead_obs: veh_info = {} - for rl_id in self.k.vehicle.get_rl_ids(): + for rl_id in valid_ids: speed = self.k.vehicle.get_speed(rl_id) lead_id = self.k.vehicle.get_leader(rl_id) if lead_id in ["", None]: @@ -140,7 +186,7 @@ def get_state(self): else: veh_info = {rl_id: np.concatenate((self.state_util(rl_id), self.veh_statistics(rl_id))) - for rl_id in self.k.vehicle.get_rl_ids()} + for rl_id in valid_ids} return veh_info def compute_reward(self, rl_actions, **kwargs): @@ -150,27 +196,107 @@ def compute_reward(self, rl_actions, **kwargs): return {} rewards = {} + valid_ids = [rl_id for rl_id in self.k.vehicle.get_rl_ids() if self.in_control_range(rl_id)] + if self.env_params.additional_params["local_reward"]: des_speed = self.env_params.additional_params["target_velocity"] - for rl_id in self.k.vehicle.get_rl_ids(): + for rl_id in valid_ids: rewards[rl_id] = 0 - speeds = [] - follow_speed = self.k.vehicle.get_speed(self.k.vehicle.get_follower(rl_id)) - if follow_speed >= 0: - speeds.append(follow_speed) - if self.k.vehicle.get_speed(rl_id) >= 0: - speeds.append(self.k.vehicle.get_speed(rl_id)) - if len(speeds) > 0: - # rescale so the critic can estimate it quickly - rewards[rl_id] = np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 - for speed in speeds]) / (des_speed ** 2) + if self.mpg_reward: + rewards[rl_id] = miles_per_gallon(self, rl_id, gain=1.0) / 100.0 + follow_id = rl_id + for i in range(self.look_back_length): + follow_id = self.k.vehicle.get_follower(follow_id) + if follow_id not in ["", None]: + rewards[rl_id] += miles_per_gallon(self, follow_id, gain=1.0) / 100.0 + else: + break + elif self.mpj_reward: + rewards[rl_id] = miles_per_megajoule(self, rl_id, gain=1.0) / 100.0 + follow_id = rl_id + for i in range(self.look_back_length): + follow_id = self.k.vehicle.get_follower(follow_id) + if follow_id not in ["", None]: + # if self.time_counter > 700 and miles_per_megajoule(self, follow_id, gain=1.0) > 1.0: + # import ipdb; ipdb.set_trace() + rewards[rl_id] += miles_per_megajoule(self, follow_id, gain=1.0) / 100.0 + else: + break + else: + speeds = [] + follow_speed = self.k.vehicle.get_speed(self.k.vehicle.get_follower(rl_id)) + if follow_speed >= 0: + speeds.append(follow_speed) + if self.k.vehicle.get_speed(rl_id) >= 0: + speeds.append(self.k.vehicle.get_speed(rl_id)) + if len(speeds) > 0: + # rescale so the critic can estimate it quickly + rewards[rl_id] = np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 + for speed in speeds]) / (des_speed ** 2) else: - speeds = self.k.vehicle.get_speed(self.k.vehicle.get_ids()) + if self.mpg_reward: + reward = np.nan_to_num(miles_per_gallon(self, self.k.vehicle.get_ids(), gain=1.0)) / 100.0 + else: + speeds = self.k.vehicle.get_speed(self.k.vehicle.get_ids()) + des_speed = self.env_params.additional_params["target_velocity"] + # rescale so the critic can estimate it quickly + if self.reroute_on_exit: + reward = np.nan_to_num(np.mean([(des_speed - np.abs(speed - des_speed)) + for speed in speeds]) / (des_speed)) + else: + reward = np.nan_to_num(np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 + for speed in speeds]) / (des_speed ** 2)) + rewards = {rl_id: reward for rl_id in valid_ids} + + # curriculum over time-gaps + if self.headway_curriculum and self.num_training_iters <= self.headway_curriculum_iters: + t_min = self.min_time_headway # smallest acceptable time headway + for veh_id, rew in rewards.items(): + lead_id = self.k.vehicle.get_leader(veh_id) + penalty = 0 + if lead_id not in ["", None] \ + and self.k.vehicle.get_speed(veh_id) > 0: + t_headway = max( + self.k.vehicle.get_headway(veh_id) / + self.k.vehicle.get_speed(veh_id), 0) + # print('time headway is {}, headway is {}'.format(t_headway, self.k.vehicle.get_headway(veh_id))) + scaling_factor = max(0, 1 - self.num_training_iters / self.headway_curriculum_iters) + penalty += scaling_factor * self.headway_reward_gain * min((t_headway - t_min) / t_min, 0) + # print('penalty is ', penalty) + + rewards[veh_id] += penalty + + if self.speed_curriculum and self.num_training_iters <= self.speed_curriculum_iters: des_speed = self.env_params.additional_params["target_velocity"] - # rescale so the critic can estimate it quickly - reward = np.nan_to_num(np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 - for speed in speeds]) / (des_speed ** 2)) - rewards = {rl_id: reward for rl_id in self.k.vehicle.get_rl_ids()} + + for veh_id, rew in rewards.items(): + speed = self.k.vehicle.get_speed(veh_id) + speed_reward = 0.0 + follow_id = veh_id + for i in range(self.look_back_length): + follow_id = self.k.vehicle.get_follower(follow_id) + if follow_id not in ["", None]: + if self.reroute_on_exit: + speed_reward += ((des_speed - np.abs(speed - des_speed))) / (des_speed) + else: + speed_reward += ((des_speed - np.abs(speed - des_speed)) ** 2) / (des_speed ** 2) + else: + break + scaling_factor = max(0, 1 - self.num_training_iters / self.speed_curriculum_iters) + + rewards[veh_id] += speed_reward * scaling_factor * self.speed_reward_gain + + for veh_id in rewards.keys(): + speed = self.k.vehicle.get_speed(veh_id) + if self.penalize_stops: + if speed < 1.0: + rewards[veh_id] -= .01 + if self.penalize_accel and veh_id in self.k.vehicle.previous_speeds: + prev_speed = self.k.vehicle.get_previous_speed(veh_id) + abs_accel = abs(speed - prev_speed) / self.sim_step + rewards[veh_id] -= abs_accel / 400.0 + + # print('time to get reward is ', time() - t) return rewards def additional_command(self): @@ -191,6 +317,7 @@ def additional_command(self): and not self.env_params.evaluate: veh_ids = self.k.vehicle.get_ids() edges = self.k.vehicle.get_edge(veh_ids) + valid_lanes = list(range(self.num_enter_lanes)) for veh_id, edge in zip(veh_ids, edges): if edge == "": continue @@ -200,28 +327,38 @@ def additional_command(self): if edge == self.exit_edge and \ (self.k.vehicle.get_position(veh_id) > self.k.network.edge_length(self.exit_edge) - 100) \ and self.k.vehicle.get_leader(veh_id) is None: + # if self.step_counter > 6000: + # import ipdb; ipdb.set_trace() type_id = self.k.vehicle.get_type(veh_id) # remove the vehicle self.k.vehicle.remove(veh_id) - lane = np.random.randint(low=0, high=self.num_enter_lanes) + index = np.random.randint(low=0, high=len(valid_lanes)) + lane = valid_lanes[index] + del valid_lanes[index] # reintroduce it at the start of the network # TODO(@evinitsky) select the lane and speed a bit more cleanly # Note, the position is 10 so you are not overlapping with the inflow car that is being removed. # this allows the vehicle to be immediately inserted. - self.k.vehicle.add( - veh_id=veh_id, - edge=self.entrance_edge, - type_id=str(type_id), - lane=str(lane), - pos="10.0", - speed="23.0") + try: + self.k.vehicle.add( + veh_id=veh_id, + edge=self.entrance_edge, + type_id=str(type_id), + lane=str(lane), + pos="20.0", + speed="23.0") + except Exception as e: + print(e) + if len(valid_lanes) == 0: + break departed_ids = self.k.vehicle.get_departed_ids() - if len(departed_ids) > 0: + if isinstance(departed_ids, tuple) and len(departed_ids) > 0: for veh_id in departed_ids: if veh_id not in self.observed_ids: self.k.vehicle.remove(veh_id) + def state_util(self, rl_id): """Return an array of headway, tailway, leader speed, follower speed. diff --git a/flow/networks/i210_subnetwork_ghost_cell.py b/flow/networks/i210_subnetwork_ghost_cell.py new file mode 100644 index 000000000..8a45b4d91 --- /dev/null +++ b/flow/networks/i210_subnetwork_ghost_cell.py @@ -0,0 +1,162 @@ +"""Contains the I-210 sub-network class.""" + +from flow.networks.base import Network + +EDGES_DISTRIBUTION = [ + # Main highway + "ghost0", + "119257914", + "119257908#0", + "119257908#1-AddedOnRampEdge", + "119257908#1", + "119257908#1-AddedOffRampEdge", + "119257908#2", + "119257908#3", + + # On-ramp + "27414345", + "27414342#0", + "27414342#1-AddedOnRampEdge", + + # Off-ramp + "173381935", +] + + +class I210SubNetworkGhostCell(Network): + """A network used to simulate the I-210 sub-network. + + Usage + ----- + >>> from flow.core.params import NetParams + >>> from flow.core.params import VehicleParams + >>> from flow.core.params import InitialConfig + >>> from flow.networks import I210SubNetwork + >>> + >>> network = I210SubNetwork( + >>> name='I-210_subnetwork', + >>> vehicles=VehicleParams(), + >>> net_params=NetParams() + >>> ) + """ + + def specify_routes(self, net_params): + """See parent class. + + Routes for vehicles moving through the I210. + """ + if net_params.additional_params["on_ramp"]: + rts = { + # Main highway + "ghost0": [ + (["ghost0", "119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1 - 17 / 8378), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + 17 / 8378) + ], + "119257914": [ + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1 - 17 / 8378), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) + (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + 17 / 8378) + ], + "119257908#0": [ + (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", + # "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1-AddedOnRampEdge": [ + (["119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1-AddedOnRampEdge", "119257908#1", + # "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1": [ + (["119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#1-AddedOffRampEdge": [ + (["119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1.0), + # (["119257908#1-AddedOffRampEdge", "173381935"], + # 0.5), + ], + "119257908#2": [ + (["119257908#2", "119257908#3"], 1), + ], + "119257908#3": [ + (["119257908#3"], 1), + ], + + # On-ramp + "27414345": [ + (["27414345", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1 - 9 / 321), + (["27414345", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "173381935"], + 9 / 321), + ], + "27414342#0": [ + (["27414342#0", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1 - 20 / 421), + (["27414342#0", "27414342#1-AddedOnRampEdge", + "27414342#1", + "119257908#1-AddedOnRampEdge", "119257908#1", + "119257908#1-AddedOffRampEdge", "173381935"], + 20 / 421), + ], + "27414342#1-AddedOnRampEdge": [ + (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 0.5), + (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], + 0.5), + ], + + # Off-ramp + "173381935": [ + (["173381935"], 1), + ], + } + + else: + rts = { + # Main highway + "ghost0": [ + (["ghost0", "119257914", "119257908#0", "119257908#1-AddedOnRampEdge", + "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", + "119257908#3"], + 1), + ], + } + + return rts diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index 9ac6938d4..004172765 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -256,12 +256,31 @@ def _highway(data, params, all_time): time step. Set to zero if the vehicle is not present in the network at that time step. """ - length = params['net'].additional_params['length'] - num_edges = params['net'].additional_params['num_edges'] - edge_len = length / num_edges + junction_length = 0.1 + length = params['net'].additional_params["length"] + num_edges = params['net'].additional_params.get("num_edges", 1) edge_starts = {} - for i in range(num_edges): - edge_starts.update({"highway_{}".format(i): i * edge_len, ":edge_{}_0".format(i): i * edge_len}) + # Add the main edges. + edge_starts.update({ + "highway_{}".format(i): + i * (length / num_edges + junction_length) + for i in range(num_edges) + }) + + if params['net'].additional_params["use_ghost_edge"]: + edge_starts.update({"highway_end": length + num_edges * junction_length}) + + edge_starts.update({ + ":edge_{}".format(i + 1): + (i + 1) * length / num_edges + i * junction_length + for i in range(num_edges - 1) + }) + + if params['net'].additional_params["use_ghost_edge"]: + edge_starts.update({ + ":edge_{}".format(num_edges): + length + (num_edges - 1) * junction_length + }) # compute the absolute position for veh_id in data.keys(): diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index c1dd83193..5c52e196f 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -26,6 +26,7 @@ from ray.rllib.agents.registry import get_agent_class from ray.tune.registry import register_env +from flow.core.rewards import miles_per_gallon, miles_per_megajoule from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from flow.utils.rllib import get_flow_params @@ -90,6 +91,14 @@ def visualizer_rllib(args): sys.exit(1) if args.run: agent_cls = get_agent_class(args.run) + elif config['env_config']['run'] == "": + from flow.algorithms.centralized_PPO import CCTrainer, CentralizedCriticModel + from ray.rllib.models import ModelCatalog + agent_cls = CCTrainer + ModelCatalog.register_custom_model("cc_model", CentralizedCriticModel) + elif config['env_config']['run'] == "": + from flow.algorithms.custom_ppo import CustomPPOTrainer + agent_cls = CustomPPOTrainer elif config_run: agent_cls = get_agent_class(config_run) else: @@ -160,6 +169,10 @@ def visualizer_rllib(args): else: env = gym.make(env_name) + # reroute on exit is a training hack, it should be turned off at test time. + if hasattr(env, "reroute_on_exit"): + env.reroute_on_exit = False + if args.render_mode == 'sumo_gui': env.sim_params.render = True # set to True after initializing agent and env @@ -197,6 +210,8 @@ def visualizer_rllib(args): # Simulate and collect metrics final_outflows = [] final_inflows = [] + mpg = [] + mpj = [] mean_speed = [] std_speed = [] for i in range(args.num_rollouts): @@ -214,6 +229,9 @@ def visualizer_rllib(args): if speeds: vel.append(np.mean(speeds)) + mpg.append(miles_per_gallon(env.unwrapped, vehicles.get_ids(), gain=1.0)) + mpj.append(miles_per_megajoule(env.unwrapped, vehicles.get_ids(), gain=1.0)) + if multiagent: action = {} for agent_id in state.keys(): @@ -279,10 +297,11 @@ def visualizer_rllib(args): print(mean_speed) print('Average, std: {}, {}'.format(np.mean(mean_speed), np.std( mean_speed))) - print("\nSpeed, std (m/s):") - print(std_speed) - print('Average, std: {}, {}'.format(np.mean(std_speed), np.std( - std_speed))) + + print('Average, std miles per gallon: {}, {}'.format(np.mean(mpg), np.std(mpg))) + + print('Average, std miles per megajoule: {}, {}'.format(np.mean(mpj), np.std(mpj))) + # Compute arrival rate of vehicles in the last 500 sec of the run print("\nOutflows (veh/hr):") diff --git a/scripts/ray_autoscale.yaml b/scripts/ray_autoscale.yaml index 5cf0eca96..18e25154d 100644 --- a/scripts/ray_autoscale.yaml +++ b/scripts/ray_autoscale.yaml @@ -1,4 +1,4 @@ -# cluster.yaml ========================================= +# cluster.yaml ========================================= # An unique identifier for the head node and workers of this cluster. cluster_name: test # @@ -39,8 +39,8 @@ auth: # For more documentation on available fields, see: # http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances head_node: - InstanceType: c4.4xlarge - ImageId: ami-0b489700e7f810707 # Flow AMI (Ubuntu) + InstanceType: c4.8xlarge + ImageId: ami-0c047f3ddd3939b30 # Flow AMI (Ubuntu) InstanceMarketOptions: MarketType: spot #Additional options can be found in the boto docs, e.g. @@ -54,10 +54,10 @@ head_node: # For more documentation on available fields, see: # http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#EC2.ServiceResource.create_instances worker_nodes: - InstanceType: c4.4xlarge - ImageId: ami-0b489700e7f810707 # Flow AMI (Ubuntu) + InstanceType: c4.8xlarge + ImageId: ami-0c047f3ddd3939b30 # Flow AMI (Ubuntu) - #Run workers on spot by default. Comment this out to use on-demand. + #Run workers on spot by default. Comment this out to use on-demand. InstanceMarketOptions: MarketType: spot # Additional options can be found in the boto docs, e.g. @@ -67,7 +67,8 @@ worker_nodes: # Additional options in the boto docs. setup_commands: - - cd flow && git fetch && git checkout origin/i210_dev + - cd flow && git fetch && git checkout origin/flow_maddpg + - flow/scripts/setup_sumo_ubuntu1604.sh - pip install ray==0.8.0 - pip install tabulate - pip install boto3==1.10.45 # 1.4.8 adds InstanceMarketOptions @@ -79,7 +80,6 @@ setup_commands: - pip install lz4 - pip install dm-tree - pip install numpy==1.18.4 - - ./flow/scripts/setup_sumo_ubuntu1604.sh head_setup_commands: [] From 4ebcc0629fe98008a4faa63159613da33e31f3ff Mon Sep 17 00:00:00 2001 From: Yasharzf Date: Thu, 28 May 2020 12:38:16 -0700 Subject: [PATCH 216/335] seperated speed limit check, modified orders --- flow/controllers/base_controller.py | 112 ++++++++++++++++++++-------- 1 file changed, 82 insertions(+), 30 deletions(-) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 95ecd1737..4a7818d11 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -33,8 +33,10 @@ class BaseController: specified to in this model are as desired. delay : int delay in applying the action (time) - fail_safe : str - Should be "instantaneous", "safe_velocity", "feasible_accel", or "all" + fail_safe : list of str or str + List of failsafes which can be "instantaneous", "safe_velocity", + "feasible_accel", or "obey_speed_limit". The order of applying the + falsafes will be based on the order in the list. noise : double variance of the gaussian from which to sample a noisy acceleration """ @@ -55,7 +57,18 @@ def __init__(self, self.delay = delay # longitudinal failsafe used by the vehicle - self.fail_safe = fail_safe + if isinstance(fail_safe, str): + self.fail_safe = [fail_safe] + elif isinstance(fail_safe, list) or fail_safe is None: + self.fail_safe = fail_safe + else: + print( + "==========================================================\n" + "WARNING: fail_safe should be string or list of strings. \n" + "Set fal_safe to None\n" + "==========================================================\n" + ) + self.fail_safe = None self.max_accel = car_following_params.controller_params['accel'] # max deaccel should always be a positive @@ -76,9 +89,7 @@ def get_action(self, env): This method also augments the controller with the desired level of stochastic noise, and utlizes the "instantaneous", "safe_velocity", - "feasible_accel", or "all" failsafes if requested. The "all" failsafe - performs all three failsafes with this order: 1)"safe_velocity", - 2) "feasible_accel", 3) "instantaneous". + "feasible_accel", or "obey_speed_limit" failsafes if requested. Parameters ---------- @@ -117,16 +128,17 @@ def get_action(self, env): # run fail safe if requested env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, accel) accel_no_noise_with_failsafe = accel - if self.fail_safe == 'instantaneous': - accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel) - elif self.fail_safe == 'safe_velocity': - accel_no_noise_with_failsafe = self.get_safe_velocity_action(env, accel) - elif self.fail_safe == 'feasible_accel': - accel_no_noise_with_failsafe = self.get_feasible_action(accel) - elif self.fail_safe == 'all': - accel_no_noise_with_failsafe = self.get_safe_velocity_action(env, accel) - accel_no_noise_with_failsafe = self.get_feasible_action(accel_no_noise_with_failsafe) - accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel_no_noise_with_failsafe) + + if self.fail_safe is not None: + for check in self.fail_safe: + if check == 'instantaneous': + accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel_no_noise_with_failsafe) + elif check == 'safe_velocity': + accel_no_noise_with_failsafe = self.get_safe_velocity_action(env, accel_no_noise_with_failsafe) + elif check == 'feasible_accel': + accel_no_noise_with_failsafe = self.get_feasible_action(accel_no_noise_with_failsafe) + elif check == 'obey_speed_limit': + accel_no_noise_with_failsafe = self.get_obey_speed_limit_action(env, accel_no_noise_with_failsafe) env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, accel_no_noise_with_failsafe) @@ -136,16 +148,17 @@ def get_action(self, env): env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, accel) # run the fail-safes, if requested - if self.fail_safe == 'instantaneous': - accel = self.get_safe_action_instantaneous(env, accel) - elif self.fail_safe == 'safe_velocity': - accel = self.get_safe_velocity_action(env, accel) - elif self.fail_safe == 'feasible_accel': - accel = self.get_feasible_action(accel) - elif self.fail_safe == 'all': - accel = self.get_safe_velocity_action(env, accel) - accel = self.get_feasible_action(accel) - accel = self.get_safe_action_instantaneous(env, accel) + if self.fail_safe is not None: + for check in self.fail_safe: + if check == 'instantaneous': + accel = self.get_safe_action_instantaneous(env, accel) + elif check == 'safe_velocity': + accel = self.get_safe_velocity_action(env, accel) + elif check == 'feasible_accel': + accel = self.get_feasible_action(accel) + elif check == 'obey_speed_limit': + accel = self.get_obey_speed_limit_action(env, accel) + env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, accel) return accel @@ -275,16 +288,55 @@ def safe_velocity(self, env): this_edge = env.k.vehicle.get_edge(self.veh_id) edge_speed_limit = env.k.network.speed_limit(this_edge) - if v_safe > edge_speed_limit: - v_safe = edge_speed_limit + if this_vel > v_safe: print( "=====================================\n" - "Speed of vehicle {} is greater than speed limit. Safe " - "velocity clipping applied.\n" + "Speed of vehicle {} is greater than safe speed. Safe velocity " + "clipping applied.\n" "=====================================".format(self.veh_id)) return v_safe + def get_obey_speed_limit_action(self, env, action): + """Perform the "obey_speed_limit" failsafe action. + + Checks if the computed acceleration would put us above edge speed limit. + If it would, output the acceleration that would put at the speed limit + velocity. + + Parameters + ---------- + env : flow.envs.Env + current environment, which contains information of the state of the + network at the current time step + action : float + requested acceleration action + + Returns + ------- + float + the requested action clipped by the speed limit + """ + # check for speed limit + this_edge = env.k.vehicle.get_edge(self.veh_id) + edge_speed_limit = env.k.network.speed_limit(this_edge) + + this_vel = env.k.vehicle.get_speed(self.veh_id) + sim_step = env.sim_step + + if this_vel + action * sim_step > edge_speed_limit: + if edge_speed_limit > 0: + print( + "=====================================\n" + "Speed of vehicle {} is greater than speed limit. Obey " + "speed limit clipping applied.\n" + "=====================================".format(self.veh_id)) + return (edge_speed_limit - this_vel) / sim_step + else: + return -this_vel / sim_step + else: + return action + def get_feasible_action(self, action): """Perform the "feasible_accel" failsafe action. From bd7622a0d0290e68fb2e37df85dcbedd02e7ec3b Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 28 May 2020 13:56:33 -0700 Subject: [PATCH 217/335] fix get metadata --- flow/data_pipeline/leaderboard_utils.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index f4476b2eb..dedd5b3c6 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -145,8 +145,7 @@ def get_metadata(name, bucket="circles.data.pipeline"): """ s3 = boto3.client("s3") name_list = name.split('_') - source_id = "flow_{}".format(name_list[2]) response = s3.head_object(Bucket=bucket, Key="fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv".format(name_list[0], - source_id)) + name_list[1])) return response["Metadata"] From e9c0438038ddf53571363812262de48f919fd466 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 28 May 2020 14:03:35 -0700 Subject: [PATCH 218/335] fix get metadata --- flow/data_pipeline/leaderboard_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index dedd5b3c6..5cef40804 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -145,7 +145,8 @@ def get_metadata(name, bucket="circles.data.pipeline"): """ s3 = boto3.client("s3") name_list = name.split('_') + source_id = name_list[1].replace('.csv', "").replace('-', '_') response = s3.head_object(Bucket=bucket, Key="fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv".format(name_list[0], - name_list[1])) + source_id)) return response["Metadata"] From fd29e0fad7dcc67a1115b43390fc264ce8fc9740 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 28 May 2020 16:06:23 -0700 Subject: [PATCH 219/335] Code cleanup --- .../imitating_controller.py | 22 ++- .../imitation_learning/imitating_network.py | 129 +++++++++++--- .../imitation_learning/keras_utils.py | 63 ++++++- .../imitation_learning/ppo_model.py | 67 +++++-- .../imitation_learning/replay_buffer.py | 32 +++- flow/controllers/imitation_learning/run.py | 53 +++--- .../train_with_imitation.py | 50 +++--- .../controllers/imitation_learning/trainer.py | 45 +++-- flow/controllers/imitation_learning/utils.py | 164 ++++++++++++------ .../imitation_learning/utils_tensorflow.py | 44 +++-- 10 files changed, 487 insertions(+), 182 deletions(-) diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index a13ce2083..53212f3ab 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -14,10 +14,14 @@ class ImitatingController(BaseController): def __init__(self, veh_id, action_network, multiagent, car_following_params=None, time_delay=0.0, noise=0, fail_safe=None): """ - Args: - veh_id: ID of vehicle to control - action_network: Instance of imitating_network class; neural net that gives action given state - multiagent: boolean indicating if env is multiagent or singleagent + Parameters + __________ + veh_id: String + ID of vehicle to control + action_network: ImitatingNetwork + Instance of imitating_network class; neural net that gives action given state + multiagent: bool + boolean indicating if env is multiagent or singleagent """ BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) @@ -25,12 +29,14 @@ def __init__(self, veh_id, action_network, multiagent, car_following_params=None self.multiagent = multiagent # whether env is multiagent or singleagent self.veh_id = veh_id # vehicle id that controller is controlling + def get_accel(self, env): """ - Args: - env: instance of environment being used - - Get acceleration for vehicle in the env, using action_network. Overrides superclass method. + Get acceleration for vehicle in the environment. Overrides superclass method. + Parameters + __________ + env: Gym Env + instance of environment being used """ # observation is a dictionary for multiagent envs, list for singleagent envs if self.multiagent: diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index c2ab892cc..1db349b14 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -5,6 +5,8 @@ import tensorflow_probability as tfp from flow.controllers.base_controller import BaseController from replay_buffer import ReplayBuffer +from time import time +from tensorflow.python.keras.callbacks import TensorBoard class ImitatingNetwork(): @@ -12,21 +14,29 @@ class ImitatingNetwork(): Class containing neural network which learns to imitate a given expert controller. """ - def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, stochastic=False, variance_regularizer = 0, load_existing=False, load_path=''): - - """ - Initializes and constructs neural network - - Args: - sess: Tensorflow session variable - action_dim: dimension of action space (determines size of network output) - obs_dim: dimension of observation space (size of network input) - num_layers: number of hidden layers (for an MLP) - size: size of each layer in network - replay_buffer_size: maximum size of replay buffer used to hold data for training - stochastic: boolean indicating if the network outputs a stochastic (multivariate Gaussian) or deterministic policy - load_existing: boolean, whether to load an existing tensorflow model - load_path: path to directory containing an existing tensorflow model + def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, stochastic=False, variance_regularizer = 0, load_model=False, load_path=''): + + """Initializes and constructs neural network. + Parameters + ---------- + sess : tf.Session + Tensorflow session variable + action_dim : int + action_space dimension + obs_dim : int + dimension of observation space (size of network input) + fcnet_hiddens : list + list of hidden layer sizes for fully connected network (length of list is number of hidden layers) + replay_buffer_size: int + maximum size of replay buffer used to hold data for training + stochastic: bool + indicates if network outputs a stochastic (MV Gaussian) or deterministic policy + variance_regularizer: float + regularization hyperparameter to penalize high variance policies + load_model: bool + if True, load model from path specified in load_path + load_path: String + path to h5 file containing model to load. """ @@ -38,7 +48,7 @@ def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, self.variance_regularizer = variance_regularizer # load network if specified, or construct network - if load_existing: + if load_model: self.load_network(load_path) else: @@ -47,8 +57,6 @@ def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, self.replay_buffer = ReplayBuffer(replay_buffer_size) - - def build_network(self): """ Defines neural network for choosing actions. Defines placeholders and forward pass @@ -61,22 +69,43 @@ def build_network(self): def compile_network(self): + """ + Compiles Keras network with appropriate loss and optimizer + """ loss = get_loss(self.stochastic, self.variance_regularizer) self.model.compile(loss=loss, optimizer='adam') def train(self, observation_batch, action_batch): """ - Executes one training step for the given batch of observation and action data + Executes one training (gradient) step for the given batch of observation and action data + + Parameters + ---------- + observation_batch : numpy array + numpy array containing batch of observations (inputs) + action_batch : numpy array + numpy array containing batch of actions (labels) """ + # reshape action_batch to ensure a shape (batch_size, action_dim) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) - batch_size = action_batch.shape[0] - self.model.fit(observation_batch, action_batch, batch_size=batch_size, epochs=1, steps_per_epoch=1, verbose=0) + # one gradient step on batch + self.model.train_on_batch(observation_batch, action_batch) def get_accel_from_observation(self, observation): """ Gets the network's acceleration prediction based on given observation/state + + Parameters + ---------- + observation : numpy array + numpy array containing a single observation + + Returns + ------- + numpy array + one element numpy array containing accleeration """ # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays @@ -95,24 +124,56 @@ def get_accel_from_observation(self, observation): def get_accel(self, env): """ Get network's acceleration prediction(s) based on given env + + Parameters + ---------- + env : + environment object + + Returns + ------- + numpy array + one element numpy array containing accleeration + """ observation = env.get_state() return self.get_accel_from_observation(observation) def add_to_replay_buffer(self, rollout_list): - """ Add rollouts to replay buffer """ + """ + Add data to a replay buffer + + Parameters + ---------- + rollout_list : list + list of rollout dictionaries + """ self.replay_buffer.add_rollouts(rollout_list) def sample_data(self, batch_size): - """ Sample a batch of data from replay buffer """ + """ + Sample a batch of data from replay buffer. + + Parameters + ---------- + batch_size : int + size of batch to sample + """ return self.replay_buffer.sample_batch(batch_size) def save_network(self, save_path): - """ Save network to given path and to tensorboard """ + """ + Save imitation network as a h5 file in save_path + + Parameters + ---------- + save_path : String + path to h5 file to save to + """ self.model.save(save_path) # tensorboard @@ -120,18 +181,30 @@ def save_network(self, save_path): # writer = tf.summary.FileWriter('./graphs2', tf.get_default_graph()) def load_network(self, load_path): + """ + Load imitation network from a h5 file in load_path + + Parameters + ---------- + load_path : String + path to h5 file containing model to load from + """ if self.stochastic: self.model = tf.keras.models.load_model(load_path, custom_objects={'negative_log_likelihood_loss': negative_log_likelihood_loss}) + else: + self.model = tf.keras.models.load_model(load_path) def save_network_PPO(self, save_path): """ - Builds and saves keras model for training PPO using policy weights learned from imitation. - - Args: - save_path: path (including h5 format filename) where the PPO model should be saved + Build a model, with same policy architecture as imitation network, to run PPO, copy weights from imitation, and save this model. + Parameters + ---------- + load_path : save_path + path to h5 file to save to """ + input = tf.keras.layers.Input(self.model.input.shape[1].value) curr_layer = input diff --git a/flow/controllers/imitation_learning/keras_utils.py b/flow/controllers/imitation_learning/keras_utils.py index e8dbaf458..87679a005 100644 --- a/flow/controllers/imitation_learning/keras_utils.py +++ b/flow/controllers/imitation_learning/keras_utils.py @@ -4,6 +4,21 @@ from tensorflow.keras.layers import Dense def build_neural_net_deterministic(input_dim, action_dim, fcnet_hiddens): + """Build a keras model to output a deterministic policy. + Parameters + ---------- + input_dim : int + dimension of input layer + action_dim : int + action_space dimension + fcnet_hiddens : list + list containing size of each hidden layer (length of list is number of hidden layers) + + Returns + ------- + Keras model (untrained) + """ + input_layer = Input(shape=(input_dim, )) curr_layer = input_layer @@ -17,6 +32,20 @@ def build_neural_net_deterministic(input_dim, action_dim, fcnet_hiddens): return model def build_neural_net_stochastic(input_dim, action_dim, fcnet_hiddens): + """Build a keras model to output a stochastic policy. + Parameters + ---------- + input_dim : int + dimension of input layer + action_dim : int + action_space dimension + fcnet_hiddens : list + list containing size of each hidden layer (length of list is number of hidden layers) + + Returns + ------- + Keras model (untrained) + """ input_layer = Input(shape=(input_dim, )) curr_layer = input_layer @@ -31,20 +60,46 @@ def build_neural_net_stochastic(input_dim, action_dim, fcnet_hiddens): return model def get_loss(stochastic, variance_regularizer): + """Get appropriate loss function for training. + Parameters + ---------- + stochastic : bool + determines if policy to be learned is deterministic or stochastic + variance_regularizer : float + regularization hyperparameter to penalize high variance policies + + Returns + ------- + Keras loss function to use for imitation learning. + """ if stochastic: return negative_log_likelihood_loss(variance_regularizer) else: return tf.keras.losses.mean_squared_error def negative_log_likelihood_loss(variance_regularizer): + """Negative log likelihood loss for learning stochastic policies. - def nll_loss(y, distribution_params): - assert distribution_params.shape[1] % 2 == 0, "Stochastic policies must output vectors of even length" + Parameters + ---------- + variance_regularizer : float + regularization hyperparameter to penalize high variance policies + Returns + ------- + Negative log likelihood loss function with variance regularization. + """ - action_dim = distribution_params.shape[1]//2 - means, log_stds = distribution_params[:, :action_dim], distribution_params[:, action_dim:] + def nll_loss(y, network_output): + assert network_output.shape[1] % 2 == 0, "Stochastic policies must output vectors of even length" + + action_dim = network_output.shape[1] // 2 + + # first half of network_output is mean, second half is log_std + means, log_stds = network_output[:, :action_dim], network_output[:, action_dim:] stds = tf.math.exp(log_stds) variances = tf.math.square(stds) + + # Multivariate Gaussian distribution dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=variances) loss = dist.log_prob(y) loss = tf.negative(loss) diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index 5ad97a75d..df8648afa 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -8,11 +8,26 @@ class PPONetwork(TFModelV2): """ - Custom RLLib PPOModel (using tensorflow keras) to load weights from a pretained policy model (e.g. from imitation learning) and start RL training with loaded weights. - Subclass of TFModelV2 + Custom RLLib PPOModel (using tensorflow keras) to load weights from a pre-trained policy model (e.g. from imitation learning) and start RL training with loaded weights. + Subclass of TFModelV2. See https://docs.ray.io/en/master/rllib-models.html. """ def __init__(self, obs_space, action_space, num_outputs, model_config, name): + """ + Parameters + __________ + obs_space: gym.Space + observation space of gym environment + action_space: gym.Space + action_space of gym environment + num_outputs: int + number of outputs for policy network. For deterministic policies, this is dimension of the action space. For continuous stochastic policies, this is 2 * dimension of the action space + model_config: dict + configuration of model + name: str + name of model + + """ super(PPONetwork, self).__init__(obs_space, action_space, num_outputs, model_config, name) @@ -28,12 +43,19 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, name): def setup_model(self, obs_space, action_space, model_config, num_outputs, imitation_h5_path): """ Loads/builds model for both policy and value function - Args: - obs_space: observation space of env - action_space: action space of env - model_config: configuration parameters for model - num_outputs: number of outputs expected for policy - imitation_h5_path: path to h5 file containing weights of a pretrained network (empty string if no such file) + Parameters + __________ + + obs_space: gym.Space + observation space of env + action_space: gym.Space + action space of env + model_config: dict + configuration parameters for model + num_outputs: int + number of outputs expected for policy + imitation_h5_path: str + path to h5 file containing weights of a pretrained network (empty string if no such file) """ if imitation_h5_path: @@ -77,7 +99,20 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat def forward(self, input_dict, state, seq_lens): """ - Overrides parent class's method. Used to pass a input through model and get policy/vf output. + Overrides parent class's method. Used to pass a input through model and get policy/vf output. + Parameters + __________ + input_dict: dict + dictionary of input tensors, including “obs”, “obs_flat”, “prev_action”, “prev_reward”, “is_training” + state: list + list of state tensors with sizes matching those returned by get_initial_state + the batch dimension + seq_lens: tensor + 1d tensor holding input sequence lengths + + Returns + _______ + (outputs, state) + Tuple, first element is policy output, second element state """ policy_out, value_out = self.base_model(input_dict["obs_flat"]) @@ -86,9 +121,21 @@ def forward(self, input_dict, state, seq_lens): def value_function(self): """ - Overrides parent class's method. Get value function method. + Returns the value function output for the most recent forward pass. + + Returns + _______ + tensor + value estimate tensor of shape [BATCH]. """ return tf.reshape(self.value_out, [-1]) def import_from_h5(self, import_file): + """ + Overrides parent class method. Import base_model from h5 import_file. + Parameters: + __________ + import_file: str + filepath to h5 file + """ self.setup_model(self, self.obs_space, self.action_space, self.model_config, self.num_outputs, import_file) diff --git a/flow/controllers/imitation_learning/replay_buffer.py b/flow/controllers/imitation_learning/replay_buffer.py index 58bdd2cd7..4e02a52c8 100644 --- a/flow/controllers/imitation_learning/replay_buffer.py +++ b/flow/controllers/imitation_learning/replay_buffer.py @@ -26,6 +26,12 @@ def __init__(self, max_size=100000): def add_rollouts(self, rollouts_list): """ Add a list of rollouts to the replay buffer + + Parameters + __________ + rollouts_list: list + list of rollout dictionaries + """ for rollout in rollouts_list: @@ -53,10 +59,18 @@ def add_rollouts(self, rollouts_list): def sample_batch(self, batch_size): """ - Sample a batch of data (with size batch_size) from replay buffer. - Returns data in separate numpy arrays of observations, actions, rewards, next_observations, terminals + Sample a batch of data (with size batch_size) from replay buffer. + + Parameters + ---------- + batch_size: int + size of batch to sample + + Returns + _______ + Data in separate numpy arrays of observations, actions, and expert actionis """ - assert self.observations is not None and self.actions is not None and self.expert_actions is not None and self.rewards is not None and self.next_observations is not None and self.terminals is not None + assert self.observations is not None and self.actions is not None and self.expert_actions is not None size = len(self.observations) rand_inds = np.random.randint(0, size, batch_size) @@ -66,9 +80,15 @@ def sample_batch(self, batch_size): def unpack_rollouts(self, rollouts_list): """ - Convert list of rollout dictionaries to individual observation, action, rewards, next observation, terminal arrays - rollouts: list of rollout dictionaries, rollout dictionary: dictionary with keys "observations", "actions", "rewards", "next_observations", "is_terminals" - return separate np arrays of observations, actions, rewards, next_observations, and is_terminals + Convert list of rollout dictionaries to individual observation, action, rewards, next observation, terminal arrays + Parameters + ---------- + rollouts: list + list of rollout dictionaries + + Returns + ---------- + separate numpy arrays of observations, actions, rewards, next_observations, and is_terminals """ observations = np.concatenate([rollout["observations"] for rollout in rollouts_list]) actions = np.concatenate([rollout["actions"] for rollout in rollouts_list]) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 439b5e5d0..40a1b08a3 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -9,9 +9,17 @@ class Runner(object): """ Class to run imitation learning (training and evaluation) """ def __init__(self, params): + """ + Parameters + __________ + params: dict + dictionary of parameters relevent to running imitation learning. + """ # initialize trainer class instance and params self.params = params + + # import appropriate exp_config module if self.params['multiagent']: module = __import__("examples.exp_configs.rl.multiagent", fromlist=[self.params['exp_config']]) else: @@ -22,7 +30,7 @@ def __init__(self, params): def run_training_loop(self): """ - Runs training for imitation learning for specified number of iterations + Runs training for imitation learning for number of iterations specified in params. """ self.trainer.run_training_loop(n_iter=self.params['n_iter']) @@ -34,18 +42,22 @@ def evaluate(self): def save_controller_network(self): """ - Saves a tensorflow checkpoint to path specified in params (and writes to tensorboard) + Saves the tensorflow keras model of the imitation policy to a h5 file, whose path is specified in params """ self.trainer.save_controller_network() def save_controller_for_PPO(self): + """ + Creates and saves (in h5 file format) new tensorflow keras model to run PPO with weighs loaded from imitation learning + """ self.trainer.save_controller_for_PPO() def main(): """ - Parse args, run training, and evalutation + Parse args, run training, and evaluate. """ + import argparse parser = argparse.ArgumentParser() @@ -55,26 +67,25 @@ def main(): help='Name of the experiment configuration file, as located in ' 'exp_configs/rl/singleagent or exp_configs/rl/multiagent.') - parser.add_argument('--ep_len', type=int, default=5000) + parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts. ') - parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy - parser.add_argument('--n_iter', type=int, default=5) + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help='Number of gradient steps for training policy.') # number of gradient steps for training policy + parser.add_argument('--n_iter', type=int, default=5, help='Number of DAgger iterations to run (1st iteration is behavioral cloning') - parser.add_argument('--batch_size', type=int, default=3000) # training data collected (in the env) during each iteration - parser.add_argument('--init_batch_size', type=int, default=4000) + parser.add_argument('--batch_size', type=int, default=3000, help='Number of environment steps to collect in iteration of DAgger') + parser.add_argument('--init_batch_size', type=int, default=4000, help='Number of environment steps to collect on 1st iteration of DAgger (behavioral cloning iteration)') - parser.add_argument('--train_batch_size', type=int, - default=100) # number of sampled data points to be used per gradient/train step + parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size to train on') - - parser.add_argument('--replay_buffer_size', type=int, default=1000000) - parser.add_argument('--save_path', type=str, default='') - parser.add_argument('--save_model', type=int, default=0) - parser.add_argument('--num_eval_episodes', type=int, default=0) - parser.add_argument('--stochastic', type=bool, default=False) - parser.add_argument('--multiagent', type=bool, default=False) - parser.add_argument('--v_des', type=float, default=15) - parser.add_argument('--variance_regularizer', type=float, default=0.5) + parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') + parser.add_argument('--save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') + parser.add_argument('--PPO_save_path', type=str, default='', help='Filepath to h5 file in which PPO model with copied weights should be saved') + parser.add_argument('--save_model', type=int, default=0, help='If true, save models in h5 format') + parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') + parser.add_argument('--stochastic', type=bool, default=False, help='If true, learn a stochastic policy (MV Gaussian)') + parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent. ') + parser.add_argument('--v_des', type=float, default=15, help='Desired velocity for follower-stopper') + parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning loss, for stochastic policies.') args = parser.parse_args() # convert args to dictionary @@ -94,10 +105,8 @@ def main(): if params['save_model'] == 1: train.save_controller_network() - # evaluate + # evaluate controller on difference, compared to expert, in action taken and average reward accumulated per rollout train.evaluate() - print("DONE") - if __name__ == "__main__": main() diff --git a/flow/controllers/imitation_learning/train_with_imitation.py b/flow/controllers/imitation_learning/train_with_imitation.py index 3dfbb1265..416ae7048 100644 --- a/flow/controllers/imitation_learning/train_with_imitation.py +++ b/flow/controllers/imitation_learning/train_with_imitation.py @@ -8,11 +8,12 @@ def parse_args(args): ------- argparse.Namespace the output parser object + dict_args + dictionary version of the argparse """ # train.py args - parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description="Parse argument used when running a Flow simulation.", @@ -24,6 +25,7 @@ def parse_args(args): help='Name of the experiment configuration file, as located in ' 'exp_configs/rl/singleagent or exp_configs/rl/multiagent.') + parser.add_argument( 'exp_title', type=str, help='Title to give the run.') @@ -71,26 +73,26 @@ def parse_args(args): '--checkpoint_path', type=str, default=None, help='Directory with checkpoint to restore training from.') + # Imitation Learning args + parser.add_argument('--ep_len', type=int, default=5000, help="Maximum length of episode for imitation learning") - parser.add_argument('--ep_len', type=int, default=5000) + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help="Number of gradient steps to take per iteration") # number of gradient steps for training policy + parser.add_argument('--n_iter', type=int, default=5, help="Number of iterations of DAgger to perform (1st iteration is behavioral cloning)") - parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000) # number of gradient steps for training policy - parser.add_argument('--n_iter', type=int, default=5) - - parser.add_argument('--batch_size', type=int, default=3000) # training data collected (in the env) during each iteration + parser.add_argument('--batch_size', type=int, default=3000, help="") # training data collected (in the env) during each iteration parser.add_argument('--init_batch_size', type=int, default=4000) - parser.add_argument('--train_batch_size', type=int, - default=100) # number of sampled data points to be used per gradient/train step - + parser.add_argument('--train_batch_size', type=int, default=100, help="Batch size for training") # number of sampled data points to be used per gradient/train step + parser.add_argument('--tensorboard_path', type=str, help='Path to tensorboard log dir for imitation') parser.add_argument('--replay_buffer_size', type=int, default=1000000) - parser.add_argument('--num_eval_episodes', type=int, default=0) - parser.add_argument('--stochastic', type=bool, default=False) - parser.add_argument('--multiagent', type=bool, default=False) - parser.add_argument('--v_des', type=float, default=15) - parser.add_argument('--variance_regularizer', type=float, default=0.5) + parser.add_argument('--num_eval_episodes', type=int, default=0, help="Number of episodes to evaluate imitation controller.") + parser.add_argument('--stochastic', type=bool, default=False, help="If true, controller learns stochastic policy (multivariate gaussian)") + parser.add_argument('--multiagent', type=bool, default=False, help="Whether the env is multiagent") + parser.add_argument('--v_des', type=float, default=15, help="v_des for FollowerStopper") + parser.add_argument('--variance_regularizer', type=float, default=0.5, help="Regularization parameter to penalize high variance in negative log likelihood loss") + parsed_args = parser.parse_known_args(args)[0] dict_args = vars(parsed_args) @@ -102,28 +104,24 @@ def parse_args(args): def main(args): - """ - Parse args, run training, and evalutation - """ - flags, params = parse_args(args) - params["fcnet_hiddens"] = [32, 32, 32] - # change this to determine number and size of hidden layers + # Parse args, train imitation learning + + flags, params = parse_args(args) params["fcnet_hiddens"] = [32, 32, 32] assert flags.n_iter>1, ('DAgger needs >1 iteration') - + print("\n\n********** IMITATION LEARNING ************ \n") # run training imitation_runner = Runner(params) imitation_runner.run_training_loop() - # save model after training + # convert model to work for PPO and save for training imitation_runner.save_controller_for_PPO() - ### IMITATION DONE - - + # Imitation Done, start RL + print("\n\n********** RL ************ \n") # Import relevant information from the exp_config script. module = __import__( @@ -158,7 +156,5 @@ def main(args): raise ValueError("rl_trainer should be either 'rllib', 'h-baselines', " "or 'stable-baselines'.") - - if __name__ == "__main__": main(sys.argv[1:]) diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index fc055ccda..84a2ed15d 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -20,6 +20,14 @@ class Trainer(object): """ def __init__(self, params, submodule): + """ + Parameters + __________ + params: dict + Dictionary of parameters used to run imitation learning + submodule: Module + Python module for file containing flow_params + """ # get flow params self.flow_params = submodule.flow_params @@ -72,8 +80,10 @@ def run_training_loop(self, n_iter): """ Trains imitator for n_iter iterations (each iteration collects new trajectories to put in replay buffer) - Args: - param n_iter: number of iterations to execute training + Parameters + __________ + n_iter : + intnumber of iterations to execute training """ # init vars at beginning of training @@ -104,12 +114,18 @@ def collect_training_trajectories(self, itr, batch_size): """ Collect (state, action, reward, next_state, terminal) tuples for training - Args: - itr: iteration of training during which function is called. Used to determine whether to run behavioral cloning or DAgger - batch_size: number of tuples to collect - Returns: - paths: list of trajectories - envsteps_this_batch: the sum over the numbers of environment steps in paths + Parameters + __________ + itr: int + iteration of training during which function is called. Used to determine whether to run behavioral cloning or DAgger + batch_size: int + number of tuples to collect + Returns + _______ + paths: list + list of trajectories + envsteps_this_batch: int + the sum over the numbers of environment steps in paths (total number of env transitions in trajectories collected) """ print("\nCollecting data to be used for training...") @@ -132,10 +148,12 @@ def train_controller(self): def evaluate_controller(self, num_trajs = 10): """ - Evaluates a trained imitation controller on similarity with expert with respect to action taken and total reward per rollout + Evaluates a trained imitation controller on similarity with expert with respect to action taken and total reward per rollout. - Args: - num_trajs: number of trajectories to evaluate performance on + Parameters + __________ + num_trajs: int + number of trajectories to evaluate performance on """ print("\n\n********** Evaluation ************ \n") @@ -211,10 +229,13 @@ def evaluate_controller(self, num_trajs = 10): def save_controller_network(self): """ - Saves a tensorflow model to the specified path given in the command line params. Path must end with .ckpt + Saves a keras tensorflow model to the specified path given in the command line params. Path must end with .h5. """ print("Saving tensorflow model to: ", self.params['save_path']) self.action_network.save_network(self.params['save_path']) def save_controller_for_PPO(self): + """ + Creates and saves a keras tensorflow model for training PPO with weights learned from imitation, to the specified path given in the command line params. Path must end with .h5. + """ self.action_network.save_network_PPO(self.params['save_path']) diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index 3be12f849..483b76e7d 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -13,14 +13,26 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des, max_decel): """ - Samples a trajectory for a given vehicle using the actions prescribed by specified controller. - Args: - env: environment - vehicle_id: id of the vehicle that is being controlled/tracked during trajectory - controller: subclass of BaseController, decides actions taken by vehicle - expert_controller: subclass of BaseController, "expert" for imitation learning - max_trajectory_length: maximum steps in a trajectory - Returns: + Samples a single trajectory from a singleagent environment. + Parameters + __________ + env: gym.Env + environment + controllers: dict + Dictionary of 2-tuples (Imitating_Controller, Expert_Controller), with keys of vehicle_ids + action_network: ImitatingNetwork + ImitatingNetwork class containing neural net for action prediction + max_trajectory_length: int + maximum steps in a trajectory + use_expert: bool + if True, trajectory is collected using expert policy (for behavioral cloning) + v_des: float + v_des parameter for follower-stopper + max_decel: float + maximum deceleration of environment. Used to determine dummy values to put as labels when environment has less vehicles than the maximum amount. + Returns + _______ + dict Dictionary of numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples """ @@ -85,7 +97,7 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto if use_expert: if traj_length == 0 and i == 0: - print("Controller collecing trajectory: ", type(expert)) + print("Controller collecting trajectory: ", type(expert)) rl_actions.append(expert_action) else: if traj_length == 0 and i == 0: @@ -130,16 +142,25 @@ def sample_trajectory_singleagent(env, controllers, action_network, max_trajecto def sample_trajectory_multiagent(env, controllers, action_network, max_trajectory_length, use_expert, v_des): """ - Samples a trajectory for a given set of vehicles using the actions prescribed by specified controller. - - Args: - env: environment - vehicle_ids: id of the vehicle that is being controlled/tracked during trajectory - controllers: subclass of BaseController, decides actions taken by vehicle - expert_controllers: subclass of BaseController, "expert" for imitation learning - max_trajectory_length: maximum steps in a trajectory - - Returns: + Samples a single trajectory from a multiagent environment. + + Parameters + __________ + env: gym.Env + environment + controllers: dict + Dictionary of 2-tuples (Imitating_Controller, Expert_Controller), with keys of vehicle_ids + action_network: ImitatingNetwork + ImitatingNetwork class containing neural net for action prediction + max_trajectory_length: int + maximum steps in a trajectory + use_expert: bool + if True, trajectory is collected using expert policy (for behavioral cloning) + v_des: float + v_des parameter for follower-stopper + Returns + _______ + dict Dictionary of numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples """ @@ -150,8 +171,6 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector while True: - - # vehicle_ids = env.k.vehicle.get_rl_ids() **this doesn't work now due to control range restriction** vehicle_ids = list(observation_dict.keys()) # add nothing to replay buffer if no vehicles if len(vehicle_ids) == 0: @@ -230,19 +249,34 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector def sample_trajectories(env, controllers, action_network, min_batch_timesteps, max_trajectory_length, multiagent, use_expert, v_des=15, max_decel=4.5): """ - Samples trajectories to collect at least min_batch_timesteps steps in the environment - - Args: - env: environment - vehicle_id: id of vehicle being tracked/controlled - controller: subclass of BaseController, decides actions taken by vehicle - expert_controller: subclass of BaseController, "expert" for imitation learning - min_batch_timesteps: minimum number of environment steps to collect - max_trajectory_length: maximum steps in a trajectory - v_des: parameter used for follower-stopper (applies if Expert controller is follower-stopper) - - Returns: - List of rollout dictionaries, total steps taken by environment + Samples trajectories from environment. + + Parameters + __________ + env: gym.Env + environment + controllers: dict + Dictionary of 2-tuples (Imitating_Controller, Expert_Controller), with keys of vehicle_ids + action_network: ImitatingNetwork + ImitatingNetwork class containing neural net for action prediction + min_batch_timesteps: int + minimum number of env transitions to collect + max_trajectory_length: int + maximum steps in a trajectory + multiagent: bool + if True, env is a multiagent env + use_expert: bool + if True, trajectory is collected using expert policy (for behavioral cloning) + v_des: float + v_des parameter for follower-stopper + max_decel: float + maximum deceleration of environment. Used to determine dummy values to put as labels when environment has less vehicles than the maximum amount. + + Returns + _______ + dict, int + Dictionary of trajectory numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples + Total number of env transitions seen over trajectories """ total_envsteps = 0 trajectories = [] @@ -262,22 +296,35 @@ def sample_trajectories(env, controllers, action_network, min_batch_timesteps, m def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_length, multiagent, use_expert, v_des=15, max_decel=4.5): """ - Collects a fixed number of trajectories. - - Args: - env: environment - vehicle_id: id of vehicle being tracked/controlled - controller: subclass of BaseController, decides actions taken by vehicle - expert_controller: subclass of BaseController, "expert" for imitation learning - n: number of trajectories to collect - max_trajectory_length: maximum steps in a trajectory - v_des: parameter used for follower-stopper (applies if Expert controller is follower-stopper) - - - Returns: - List of rollouts (tuple of rollout dictionary, length of rollout) - + Samples n trajectories from environment. + + Parameters + __________ + env: gym.Env + environment + controllers: dict + Dictionary of 2-tuples (Imitating_Controller, Expert_Controller), with keys of vehicle_ids + action_network: ImitatingNetwork + ImitatingNetwork class containing neural net for action prediction + n: int + number of trajectories to collect + max_trajectory_length: int + maximum steps in a trajectory + multiagent: bool + if True, env is a multiagent env + use_expert: bool + if True, trajectory is collected using expert policy (for behavioral cloning) + v_des: float + v_des parameter for follower-stopper + max_decel: float + maximum deceleration of environment. Used to determine dummy values to put as labels when environment has less vehicles than the maximum amount. + + Returns + _______ + dict + Dictionary of trajectory numpy arrays, where matching indeces of each array given (state, action, expert_action, reward, next_state, terminal) tuples """ + trajectories = [] for _ in range(n): @@ -293,7 +340,24 @@ def sample_n_trajectories(env, controllers, action_network, n, max_trajectory_le def traj_dict(observations, actions, expert_actions, rewards, next_observations, terminals): """ - Collects individual observation, action, expert_action, rewards, next observation, terminal arrays into a single rollout dictionary + Collects observation, action, expert_action, rewards, next observation, terminal lists (collected over a rollout) into a single rollout dictionary. + Parameters + __________ + observations: list + list of observations; ith entry is ith observation + actions: list + list of actions; ith entry is action taken at ith timestep + rewards: list + list of rewards; ith entry is reward received at ith timestep + next_observations: list + list of next observations; ith entry is the observation transitioned to due to state and action at ith timestep + terminals: list + list of booleans indicating if rollout ended at that timestep + + Returns + _______ + dict + dictionary containing above lists in numpy array form. """ return {"observations" : np.array(observations), "actions" : np.array(actions), diff --git a/flow/controllers/imitation_learning/utils_tensorflow.py b/flow/controllers/imitation_learning/utils_tensorflow.py index 7be44cf60..cbbfa633d 100644 --- a/flow/controllers/imitation_learning/utils_tensorflow.py +++ b/flow/controllers/imitation_learning/utils_tensorflow.py @@ -6,21 +6,28 @@ def build_neural_net(input_placeholder, output_size, scope, n_layers, size, activation=tf.tanh, output_activation=None): """ - Builds a feedfoward neural network for action prediction - - arguments: - input_placeholder: placeholder variable for the state (batch_size, input_size) - scope: variable scope of the network - - n_layers: number of hidden layers - size: dimension of each hidden layer - activation: activation of each hidden layer - - output_size: size of the output layer - output_activation: activation of the output layer - - returns: - output_placeholder: the result of pass through Neural Network + Builds a feedfoward neural network for action prediction + Parameters + __________ + input_placeholder: tensor + placeholder variable for the state (batch_size, input_size) + scope: str + variable scope of the network + n_layers: int + number of hidden layers + size: int + dimension of each hidden layer + activation: str + activation function of each hidden layer + output_size: int + size of the output layer + output_activation: str + activation function of the output layer + + Returns + _______ + output_placeholder: tensor + the result of pass through Neural Network """ output_placeholder = input_placeholder with tf.variable_scope(scope, reuse=tf.AUTO_REUSE): @@ -30,6 +37,13 @@ def build_neural_net(input_placeholder, output_size, scope, n_layers, size, acti return output_placeholder def create_tf_session(): + """ + Creates a tf session + Returns + _______ + tf.Session + new tensorflow session + """ config = tf.compat.v1.ConfigProto(device_count={'GPU': 0}) sess = tf.compat.v1.Session(config=config) return sess From 2b6cc0838758aad2017715dd706d0bc3a98fff82 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 28 May 2020 16:08:19 -0700 Subject: [PATCH 220/335] test with cluster --- scripts/ray_autoscale.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/scripts/ray_autoscale.yaml b/scripts/ray_autoscale.yaml index 5cf0eca96..a216a3747 100644 --- a/scripts/ray_autoscale.yaml +++ b/scripts/ray_autoscale.yaml @@ -1,4 +1,4 @@ -# cluster.yaml ========================================= +# cluster.yaml ========================================= # An unique identifier for the head node and workers of this cluster. cluster_name: test # @@ -67,7 +67,7 @@ worker_nodes: # Additional options in the boto docs. setup_commands: - - cd flow && git fetch && git checkout origin/i210_dev + - cd flow && git fetch && git checkout origin/akash-dagger - pip install ray==0.8.0 - pip install tabulate - pip install boto3==1.10.45 # 1.4.8 adds InstanceMarketOptions From 39e4bc4436f894754f766c5878137ee2d0675ec6 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 29 May 2020 09:23:43 -0700 Subject: [PATCH 221/335] fix pathname --- flow/core/experiment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 82cdcd943..de61d691c 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -213,7 +213,7 @@ def rl_actions(*_): # Delete the .xml version of the emission file. os.remove(emission_path) - trajectory_table_path = dir_path + source_id + ".csv" + trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) if partition_name: From d6b6c182893abef5ce19671b03f053105244f118 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 29 May 2020 09:25:31 -0700 Subject: [PATCH 222/335] fix pathname --- flow/visualize/i210_replay.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index af19111dc..b3e41b6c1 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -329,7 +329,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= emission_to_csv(emission_path, output_path=output_path) # generate the trajectory output file - trajectory_table_path = dir_path + source_id + ".csv" + trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) # upload to s3 if asked From 3dfafe157093adecef9640d58425e5c210ecf065 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Fri, 29 May 2020 16:39:34 -0700 Subject: [PATCH 223/335] Bug fix --- .../imitation_learning/imitating_network.py | 2 +- .../imitation_learning/keras_utils.py | 35 ++++++++++++++++--- .../imitation_learning/ppo_model.py | 12 +++++-- flow/controllers/imitation_learning/run.py | 7 ++-- .../controllers/imitation_learning/trainer.py | 5 +-- 5 files changed, 48 insertions(+), 13 deletions(-) diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 1db349b14..569ec6fd1 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -190,7 +190,7 @@ def load_network(self, load_path): path to h5 file containing model to load from """ if self.stochastic: - self.model = tf.keras.models.load_model(load_path, custom_objects={'negative_log_likelihood_loss': negative_log_likelihood_loss}) + self.model = tf.keras.models.load_model(load_path, custom_objects={'nll_loss': negative_log_likelihood_loss(self.variance_regularizer)}) else: self.model = tf.keras.models.load_model(load_path) diff --git a/flow/controllers/imitation_learning/keras_utils.py b/flow/controllers/imitation_learning/keras_utils.py index 87679a005..59928affc 100644 --- a/flow/controllers/imitation_learning/keras_utils.py +++ b/flow/controllers/imitation_learning/keras_utils.py @@ -95,15 +95,42 @@ def nll_loss(y, network_output): action_dim = network_output.shape[1] // 2 # first half of network_output is mean, second half is log_std - means, log_stds = network_output[:, :action_dim], network_output[:, action_dim:] + means, log_stds = tf.split(network_output, 2, axis=1) stds = tf.math.exp(log_stds) - variances = tf.math.square(stds) + # variances = tf.math.square(stds) # Multivariate Gaussian distribution - dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=variances) + dist = tfp.distributions.MultivariateNormalDiag(loc=means, scale_diag=stds) loss = dist.log_prob(y) loss = tf.negative(loss) - loss = tf.reduce_mean(loss) + (variance_regularizer * tf.norm(variances)) + loss = tf.reduce_mean(loss) + (variance_regularizer * tf.norm(stds)) return loss return nll_loss + +def compare_weights(ppo_model, imitation_path): + imitation_model = tf.keras.models.load_model(imitation_path, custom_objects={'nll_loss': negative_log_likelihood_loss(0.5)}) + + for i in range(len(imitation_model.layers) - 2): + ppo_name = 'policy_hidden_layer_' + str(i + 1) + ppo_layer = ppo_model.get_layer(ppo_name) + im_layer = imitation_model.layers[i + 1] + + ppo_weights = ppo_layer.get_weights() + im_weights = im_layer.get_weights() + for i in range(len(ppo_weights)): + print("\n\n") + print(type((ppo_weights[i] == im_weights[i]))) + print("\n\n") + assert (ppo_weights[i] == im_weights[i]).all(), "Weights don't match!" + + ppo_layer = ppo_model.get_layer('policy_output_layer') + im_layer = imitation_model.layers[-1] + ppo_weights = ppo_layer.get_weights() + im_weights = im_layer.get_weights() + for i in range(len(ppo_weights)): + assert (ppo_weights[i] == im_weights[i]).all(), "Weights don't match!" + + print("\n\nWeights properly loaded\n\n") + + diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index df8648afa..55f2fafc5 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -4,6 +4,7 @@ from ray.rllib.models.tf.misc import normc_initializer from ray.rllib.models.tf.tf_modelv2 import TFModelV2 import tensorflow as tf +from flow.controllers.imitation_learning.keras_utils import * class PPONetwork(TFModelV2): @@ -12,7 +13,7 @@ class PPONetwork(TFModelV2): Subclass of TFModelV2. See https://docs.ray.io/en/master/rllib-models.html. """ - def __init__(self, obs_space, action_space, num_outputs, model_config, name): + def __init__(self, obs_space, action_space, num_outputs, model_config, name, **kwargs): """ Parameters __________ @@ -31,13 +32,15 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, name): super(PPONetwork, self).__init__(obs_space, action_space, num_outputs, model_config, name) - h5_path = model_config.get("custom_options").get("h5_load_path", "") + h5_path = kwargs.get("h5_load_path", "") + # print("\n\nH5 PATH: ", h5_path + "\n\n") # setup model with weights loaded in from model in h5 path self.setup_model(obs_space, action_space, model_config, num_outputs, h5_path) # register variables for base model self.register_variables(self.base_model.variables) + # compare_weights(self.base_model, "/Users/akashvelu/Desktop/latest_run/imitation_model.h5") def setup_model(self, obs_space, action_space, model_config, num_outputs, imitation_h5_path): @@ -60,6 +63,8 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat if imitation_h5_path: # set base model to be loaded model + print("\n\nLOAAAADING IMMMMITATIONNNNN MODELLLLLL\n\n") + self.base_model = tf.keras.models.load_model(imitation_h5_path) else: @@ -130,6 +135,7 @@ def value_function(self): """ return tf.reshape(self.value_out, [-1]) + def import_from_h5(self, import_file): """ Overrides parent class method. Import base_model from h5 import_file. @@ -138,4 +144,4 @@ def import_from_h5(self, import_file): import_file: str filepath to h5 file """ - self.setup_model(self, self.obs_space, self.action_space, self.model_config, self.num_outputs, import_file) + self.setup_model(self.obs_space, self.action_space, self.model_config, self.num_outputs, import_file) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 40a1b08a3..8ebdfb899 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -70,10 +70,10 @@ def main(): parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts. ') parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help='Number of gradient steps for training policy.') # number of gradient steps for training policy - parser.add_argument('--n_iter', type=int, default=5, help='Number of DAgger iterations to run (1st iteration is behavioral cloning') + parser.add_argument('--n_iter', type=int, default=3, help='Number of DAgger iterations to run (1st iteration is behavioral cloning') - parser.add_argument('--batch_size', type=int, default=3000, help='Number of environment steps to collect in iteration of DAgger') - parser.add_argument('--init_batch_size', type=int, default=4000, help='Number of environment steps to collect on 1st iteration of DAgger (behavioral cloning iteration)') + parser.add_argument('--batch_size', type=int, default=1000, help='Number of environment steps to collect in iteration of DAgger') + parser.add_argument('--init_batch_size', type=int, default=2000, help='Number of environment steps to collect on 1st iteration of DAgger (behavioral cloning iteration)') parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size to train on') @@ -104,6 +104,7 @@ def main(): # save model after training if params['save_model'] == 1: train.save_controller_network() + train.save_controller_for_PPO() # evaluate controller on difference, compared to expert, in action taken and average reward accumulated per rollout train.evaluate() diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 84a2ed15d..d45c30bc7 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -63,7 +63,8 @@ def __init__(self, params, submodule): # initialize neural network class and tf variables self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['fcnet_hiddens'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], variance_regularizer=self.params['variance_regularizer']) - # tf.global_variables_initializer().run(session=self.sess) + # tf.global_variab + # les_initializer().run(session=self.sess) # controllers setup v_des = self.params['v_des'] # for FollowerStopper @@ -238,4 +239,4 @@ def save_controller_for_PPO(self): """ Creates and saves a keras tensorflow model for training PPO with weights learned from imitation, to the specified path given in the command line params. Path must end with .h5. """ - self.action_network.save_network_PPO(self.params['save_path']) + self.action_network.save_network_PPO(self.params['PPO_save_path']) From ba6796160242464877ee06afe40a0d9054420105 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Fri, 29 May 2020 16:50:44 -0700 Subject: [PATCH 224/335] Minor cleanup --- flow/controllers/imitation_learning/ppo_model.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index 55f2fafc5..a15eb6cc5 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -33,7 +33,6 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, name, **k super(PPONetwork, self).__init__(obs_space, action_space, num_outputs, model_config, name) h5_path = kwargs.get("h5_load_path", "") - # print("\n\nH5 PATH: ", h5_path + "\n\n") # setup model with weights loaded in from model in h5 path self.setup_model(obs_space, action_space, model_config, num_outputs, h5_path) @@ -63,8 +62,6 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat if imitation_h5_path: # set base model to be loaded model - print("\n\nLOAAAADING IMMMMITATIONNNNN MODELLLLLL\n\n") - self.base_model = tf.keras.models.load_model(imitation_h5_path) else: From 82b252eb14c460ed2411cdbfb450d280b2d01e6a Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Fri, 29 May 2020 19:33:26 -0700 Subject: [PATCH 225/335] added edge_id, lane_id, and distance --- flow/core/experiment.py | 6 +-- flow/core/kernel/vehicle/traci.py | 7 +++- flow/data_pipeline/data_pipeline.py | 3 ++ flow/data_pipeline/query.py | 62 +++++++++++------------------ 4 files changed, 36 insertions(+), 42 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index e9668d9db..6a4dafdca 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -152,8 +152,8 @@ def rl_actions(*_): # data pipeline extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) - metadate = defaultdict(lambda: "") - metadate['network'] = self.env.network.name.split('_')[0] + metadata = defaultdict(lambda: "") + metadata['network'] = self.env.network.name.split('_')[0] for i in range(num_runs): ret = 0 @@ -225,7 +225,7 @@ def rl_actions(*_): cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( cur_date, source_id, source_id), - trajectory_table_path, metadate) + trajectory_table_path, metadata) # delete the S3-only version of the trajectory file # os.remove(upload_file_path) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 9485572b2..9ca112345 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -336,7 +336,8 @@ def _add_departed(self, veh_id, veh_type): tc.VAR_POSITION, tc.VAR_ANGLE, tc.VAR_SPEED_WITHOUT_TRACI, - tc.VAR_FUELCONSUMPTION + tc.VAR_FUELCONSUMPTION, + tc.VAR_DISTANCE ]) self.kernel_api.vehicle.subscribeLeader(veh_id, 2000) @@ -1191,6 +1192,10 @@ def get_2d_position(self, veh_id, error=-1001): """See parent class.""" return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_POSITION, error) + def get_distance(self, veh_id, error=-1001): + """See parent class.""" + return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_DISTANCE, error) + def get_road_grade(self, veh_id): """See parent class.""" # TODO : Brent diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 5c9346c40..b1b9d1fef 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -108,6 +108,9 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): veh_kernel.get_accel_no_noise_with_failsafe(vid)) extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) + extra_info["edge_id"].append(veh_kernel.get_edge(vid)) + extra_info["lane_id"].append(veh_kernel.get_lane(vid)) + extra_info["distance"].append(veh_kernel.get_distance(vid)) def delete_obsolete_data(s3, latest_key, table, bucket="circles.data.pipeline"): diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 04793cc73..f591aba26 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -160,57 +160,41 @@ class QueryStrings(Enum): FACT_VEHICLE_FUEL_EFFICIENCY_AGG = """ WITH sub_fact_vehicle_trace AS ( SELECT - id, - time_step, - x, - source_id - FROM fact_vehicle_trace - WHERE 1 = 1 - AND date = \'{date}\' - AND partition_name = \'{partition}\' - ), distance AS ( - SELECT - id, - source_id, - MAX(x)-MIN(x) AS distance_meters - FROM sub_fact_vehicle_trace - WHERE 1 = 1 - AND x BETWEEN 500 AND 2300 - AND time_step >= 600 - GROUP BY 1, 2 - ), energy AS ( - SELECT - e.id, - e.source_id, + v.id, + v.source_id, e.energy_model_id, + MAX(x) - MIN(x) AS distance_meters, (MAX(e.time_step) - MIN(e.time_step)) / (COUNT(DISTINCT e.time_step) - 1) AS time_step_size_seconds, SUM(e.power) AS power_watts - FROM fact_energy_trace AS e - JOIN sub_fact_vehicle_trace AS v ON 1 = 1 + FROM fact_vehicle_trace v + JOIN fact_energy_trace AS e ON 1 = 1 AND e.id = v.id AND e.time_step = v.time_step AND e.source_id = v.source_id - WHERE 1 = 1 AND e.date = \'{date}\' AND e.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' - AND v.x BETWEEN 500 AND 2300 AND e.time_step >= 600 + WHERE 1 = 1 + AND v.date = \'{date}\' + AND v.partition_name = \'{partition}' + AND v.x BETWEEN 500 AND 2300 GROUP BY 1, 2, 3 - HAVING COUNT(DISTINCT e.time_step) > 1 + HAVING 1 = 1 + AND MIN(x) - MIN(x) > 10 + AND COUNT(DISTINCT e.time_step) > 10 ) SELECT - d.id, - d.source_id, - e.energy_model_id, + id, + source_id, + energy_model_id, distance_meters, power_watts * time_step_size_seconds AS energy_joules, distance_meters / (power_watts * time_step_size_seconds) AS efficiency_meters_per_joules, - 74564 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon - FROM distance d - JOIN energy e ON 1=1 - AND d.id = e.id - AND d.source_id = e.source_id + 19972 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon + FROM sub_fact_vehicle_trace + WHERE 1 = 1 + AND ABS(power_watts * time_step_size_seconds) > 0 ; """ @@ -221,12 +205,13 @@ class QueryStrings(Enum): SUM(distance_meters) AS distance_meters, SUM(energy_joules) AS energy_joules, SUM(distance_meters) / SUM(energy_joules) AS efficiency_meters_per_joules, - 74564 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon + 19972 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon FROM fact_vehicle_fuel_efficiency_agg WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND ABS(SUM(energy_joules)) != 0 GROUP BY 1, 2 ;""" @@ -235,7 +220,7 @@ class QueryStrings(Enum): t.source_id, e.energy_model_id, e.efficiency_meters_per_joules, - 74564 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, + 19972 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, t.throughput_per_hour FROM fact_network_throughput_agg AS t JOIN fact_network_fuel_efficiency_agg AS e ON 1 = 1 @@ -513,11 +498,12 @@ class QueryStrings(Enum): LEADERBOARD_CHART_AGG = """ SELECT + date AS query_date, source_id, energy_model_id, efficiency_meters_per_joules, efficiency_miles_per_gallon, throughput_per_hour FROM leaderboard_chart - ORDER BY date, source_id ASC + ORDER BY query_date, source_id ASC ;""" From 65791df1e82deddf9c01456ce8ae1275315bb8ac Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Fri, 29 May 2020 19:59:18 -0700 Subject: [PATCH 226/335] added netwokr name translation --- flow/core/experiment.py | 3 ++- flow/data_pipeline/leaderboard_utils.py | 10 ++++++++++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 6a4dafdca..80126a306 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -2,6 +2,7 @@ from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, get_extra_info +from flow.data_pipeline.leaderboard_utils import network_name_translate from collections import defaultdict import datetime import logging @@ -153,7 +154,7 @@ def rl_actions(*_): extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) metadata = defaultdict(lambda: "") - metadata['network'] = self.env.network.name.split('_')[0] + metadata['network'] = network_name_translate(self.env.network.name.split('_')[0]) for i in range(num_runs): ret = 0 diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 5cef40804..86f3fed07 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -5,6 +5,16 @@ from io import StringIO +network_name_map = {"highway": "Single-Lane Straight Road", + "ring": "Single-Lane Ring Road"} + +def network_name_translate(network_name): + """Translate network name to a human friendly name for the leaderboard.""" + if network_name in network_name_map: + return network_name_map[network_name] + return network_name + + def key_to_name(key): """Return the standard formatted file name from object key.""" k_list = key.split("/") From f1ef8e2b703594f63a7950a871ce0e5caf3840dc Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 29 May 2020 23:29:54 -0700 Subject: [PATCH 227/335] fix some query bugs --- flow/data_pipeline/query.py | 180 ++++++++++++++++-------------------- 1 file changed, 78 insertions(+), 102 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index f591aba26..15914264f 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -163,7 +163,7 @@ class QueryStrings(Enum): v.id, v.source_id, e.energy_model_id, - MAX(x) - MIN(x) AS distance_meters, + MAX(distance) - MIN(distance) AS distance_meters, (MAX(e.time_step) - MIN(e.time_step)) / (COUNT(DISTINCT e.time_step) - 1) AS time_step_size_seconds, SUM(e.power) AS power_watts FROM fact_vehicle_trace v @@ -177,11 +177,11 @@ class QueryStrings(Enum): AND e.time_step >= 600 WHERE 1 = 1 AND v.date = \'{date}\' - AND v.partition_name = \'{partition}' + AND v.partition_name = \'{partition}\' AND v.x BETWEEN 500 AND 2300 GROUP BY 1, 2, 3 HAVING 1 = 1 - AND MIN(x) - MIN(x) > 10 + AND MIN(distance) - MIN(distance) > 10 AND COUNT(DISTINCT e.time_step) > 10 ) SELECT @@ -211,7 +211,7 @@ class QueryStrings(Enum): AND date = \'{date}\' AND partition_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' - AND ABS(SUM(energy_joules)) != 0 + AND SUM(energy_joules) != 0 GROUP BY 1, 2 ;""" @@ -245,7 +245,6 @@ class QueryStrings(Enum): AND date = \'{date}\' AND partition_name = \'{partition}\' AND x BETWEEN 500 AND 2300 - AND time_step >= 600 GROUP BY 1, 2 ), inflows AS ( SELECT @@ -253,6 +252,8 @@ class QueryStrings(Enum): source_id, 60 * COUNT(DISTINCT id) AS inflow_rate FROM min_max_time_step + WHERE 1 = 1 + AND min_time_step >= 600 GROUP BY 1, 2 ), outflows AS ( SELECT @@ -260,6 +261,8 @@ class QueryStrings(Enum): source_id, 60 * COUNT(DISTINCT id) AS outflow_rate FROM min_max_time_step + WHERE 1 = 1 + AND max_time_step < 1000 GROUP BY 1, 2 ) SELECT @@ -280,73 +283,66 @@ class QueryStrings(Enum): vt.id, vt.source_id, vt.time_step, - vt.x, + vt.distance - FIRST_VALUE(vt.distance) + OVER (PARITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC) AS distance_meters, energy_model_id, + et.speed, + et.acceleration, vt.time_step - LAG(vt.time_step, 1) - OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) AS sim_step, + OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC) AS sim_step, SUM(power) - OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS - cumulative_power + OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC + ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 - AND vt.date = \'{date}\' - AND vt.partition_name = \'{partition}\' AND et.date = \'{date}\' AND et.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND vt.id = et.id AND vt.source_id = et.source_id AND vt.time_step = et.time_step - AND vt.x BETWEEN 500 AND 2300 - AND vt.time_step >= 600 AND et.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' WHERE 1 = 1 + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' + AND vt.x BETWEEN 500 AND 2300 + AND vt.time_step >= 600 ), cumulative_energy AS ( SELECT id, source_id, time_step, - x, + distance_meters, energy_model_id, + speed, + acceleration, cumulative_power * sim_step AS energy_joules FROM joined_trace - WHERE 1 = 1 - AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' - AND x BETWEEN 500 AND 2300 - AND time_step >= 600 ), binned_cumulative_energy AS ( SELECT source_id, - CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, - AVG(energy_joules) AS cumulative_energy_avg, - AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, - AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound - FROM cumulative_energy - GROUP BY 1, 2 - HAVING COUNT(DISTINCT time_step) > 1 - ), binned_speed_accel AS ( - SELECT - source_id, - CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, + CAST(distance_meters/10 AS INTEGER) * 10 AS distance_meters_bin, AVG(speed) AS speed_avg, AVG(speed) + STDDEV(speed) AS speed_upper_bound, AVG(speed) - STDDEV(speed) AS speed_lower_bound, AVG(target_accel_no_noise_with_failsafe) AS accel_avg, AVG(target_accel_no_noise_with_failsafe) + STDDEV(target_accel_no_noise_with_failsafe) AS accel_upper_bound, - AVG(target_accel_no_noise_with_failsafe) - STDDEV(target_accel_no_noise_with_failsafe) AS accel_lower_bound - FROM fact_vehicle_trace - WHERE 1 = 1 - AND date = \'{date}\' - AND partition_name = \'{partition}\' - AND x BETWEEN 500 AND 2300 - AND time_step >= 600 + AVG(target_accel_no_noise_with_failsafe) - STDDEV(target_accel_no_noise_with_failsafe) AS accel_lower_bound, + AVG(energy_joules) AS cumulative_energy_avg, + AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound + FROM cumulative_energy GROUP BY 1, 2 + HAVING 1 = 1 + AND COUNT(DISTINCT time_step) > 1 ), binned_energy_start_end AS ( SELECT DISTINCT source_id, id, - CAST(x/10 AS INTEGER) * 10 AS distance_meters_bin, - FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_start, - LAST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_end + CAST(distance_meters/10 AS INTEGER) * 10 AS distance_meters_bin, + FIRST_VALUE(energy_joules) + OVER (PARTITION BY id, CAST(distance_meters/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) + OVER (PARTITION BY id, CAST(distance_meters/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT @@ -359,27 +355,24 @@ class QueryStrings(Enum): GROUP BY 1, 2 ) SELECT - COALESCE(bce.source_id, bsa.source_id, be.source_id) AS source_id, - COALESCE(bce.distance_meters_bin, bsa.distance_meters_bin, be.distance_meters_bin) AS distance_meters_bin, + COALESCE(bce.source_id, be.source_id) AS source_id, + COALESCE(bce.distance_meters_bin, be.distance_meters_bin) AS distance_meters_bin, bce.cumulative_energy_avg, bce.cumulative_energy_lower_bound, bce.cumulative_energy_upper_bound, - bsa.speed_avg, - bsa.speed_upper_bound, - bsa.speed_lower_bound, - bsa.accel_avg, - bsa.accel_upper_bound, - bsa.accel_lower_bound, + bce.speed_avg, + bce.speed_upper_bound, + bce.speed_lower_bound, + bce.accel_avg, + bce.accel_upper_bound, + bce.accel_lower_bound, be.instantaneous_energy_avg, be.instantaneous_energy_upper_bound, be.instantaneous_energy_lower_bound FROM binned_cumulative_energy bce - FULL OUTER JOIN binned_speed_accel bsa ON 1 = 1 - AND bce.source_id = bsa.source_id - AND bce.distance_meters_bin = bsa.distance_meters_bin FULL OUTER JOIN binned_energy be ON 1 = 1 - AND COALESCE(bce.source_id, bsa.source_id) = be.source_id - AND COALESCE(bce.distance_meters_bin, bce.distance_meters_bin) = be.distance_meters_bin + AND bce.source_id = be.source_id + AND bce.distance_meters_bin = be.distance_meters_bin ORDER BY distance_meters_bin ASC ;""" @@ -389,50 +382,38 @@ class QueryStrings(Enum): vt.id, vt.source_id, vt.time_step, - vt.x, energy_model_id, + et.speed, + et.acceleration, vt.time_step - LAG(vt.time_step, 1) - OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN 1 PRECEDING and CURRENT ROW) - AS sim_step, + OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC) AS sim_step, SUM(power) - OVER (PARTITION BY vt.id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) - AS cumulative_power + OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC + ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS cumulative_power FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 - AND vt.date = \'{date}\' - AND vt.partition_name = \'{partition}\' AND et.date = \'{date}\' AND et.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND vt.id = et.id AND vt.source_id = et.source_id AND vt.time_step = et.time_step - AND vt.x BETWEEN 500 AND 2300 - AND vt.time_step >= 600 AND et.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' WHERE 1 = 1 + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' + AND vt.x BETWEEN 500 AND 2300 + AND vt.time_step >= 600 ), cumulative_energy AS ( SELECT id, source_id, time_step, - x, energy_model_id, + speed, + acceleration, cumulative_power * sim_step AS energy_joules FROM joined_trace - WHERE 1 = 1 - AND x BETWEEN 500 AND 2300 - AND time_step >= 600 ), binned_cumulative_energy AS ( - SELECT - source_id, - CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, - AVG(energy_joules) AS cumulative_energy_avg, - AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, - AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound - FROM cumulative_energy - GROUP BY 1, 2 - HAVING COUNT(DISTINCT time_step) > 1 - ), binned_speed_accel AS ( SELECT source_id, CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, @@ -440,26 +421,24 @@ class QueryStrings(Enum): AVG(speed) + STDDEV(speed) AS speed_upper_bound, AVG(speed) - STDDEV(speed) AS speed_lower_bound, AVG(target_accel_no_noise_with_failsafe) AS accel_avg, - AVG(target_accel_no_noise_with_failsafe) + STDDEV(target_accel_no_noise_with_failsafe) - AS accel_upper_bound, - AVG(target_accel_no_noise_with_failsafe) - STDDEV(target_accel_no_noise_with_failsafe) - AS accel_lower_bound - FROM fact_vehicle_trace - WHERE 1 = 1 - AND date = \'{date}\' - AND partition_name = \'{partition}\' - AND x BETWEEN 500 AND 2300 - AND time_step >= 600 + AVG(target_accel_no_noise_with_failsafe) + STDDEV(target_accel_no_noise_with_failsafe) AS accel_upper_bound, + AVG(target_accel_no_noise_with_failsafe) - STDDEV(target_accel_no_noise_with_failsafe) AS accel_lower_bound, + AVG(energy_joules) AS cumulative_energy_avg, + AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, + AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound + FROM cumulative_energy GROUP BY 1, 2 + HAVING 1 = 1 + AND COUNT(DISTINCT time_step) > 1 ), binned_energy_start_end AS ( SELECT DISTINCT source_id, id, CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, - FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) - AS energy_start, - LAST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(x/10 AS INTEGER) * 10 ORDER BY x ASC) - AS energy_end + FIRST_VALUE(energy_joules) + OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 ORDER BY x ASC) AS energy_start, + LAST_VALUE(energy_joules) + OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 ORDER BY x ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT @@ -472,27 +451,24 @@ class QueryStrings(Enum): GROUP BY 1, 2 ) SELECT - COALESCE(bce.source_id, bsa.source_id, be.source_id) AS source_id, - COALESCE(bce.time_seconds_bin, bsa.time_seconds_bin, be.time_seconds_bin) AS time_seconds_bin, + COALESCE(bce.source_id, be.source_id) AS source_id, + COALESCE(bce.time_seconds_bin, be.time_seconds_bin) AS time_seconds_bin, bce.cumulative_energy_avg, bce.cumulative_energy_lower_bound, bce.cumulative_energy_upper_bound, - bsa.speed_avg, - bsa.speed_upper_bound, - bsa.speed_lower_bound, - bsa.accel_avg, - bsa.accel_upper_bound, - bsa.accel_lower_bound, + bce.speed_avg, + bce.speed_upper_bound, + bce.speed_lower_bound, + bce.accel_avg, + bce.accel_upper_bound, + bce.accel_lower_bound, be.instantaneous_energy_avg, be.instantaneous_energy_upper_bound, be.instantaneous_energy_lower_bound FROM binned_cumulative_energy bce - FULL OUTER JOIN binned_speed_accel bsa ON 1 = 1 - AND bce.source_id = bsa.source_id - AND bce.time_seconds_bin = bsa.time_seconds_bin FULL OUTER JOIN binned_energy be ON 1 = 1 - AND COALESCE(bce.source_id, bsa.source_id) = be.source_id - AND COALESCE(bce.time_seconds_bin, bce.time_seconds_bin) = be.time_seconds_bin + AND bce.source_id = be.source_id + AND bce.time_seconds_bin = be.time_seconds_bin ORDER BY time_seconds_bin ASC ;""" From 4b5cb4156dc3de8bd2a5bdf787645b3dadd6a0e8 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 29 May 2020 23:35:06 -0700 Subject: [PATCH 228/335] update values for warm-up time and horizon --- flow/data_pipeline/query.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 15914264f..ac2770af2 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -148,7 +148,7 @@ class QueryStrings(Enum): MAX(enter_time) - MIN(enter_time) AS total_time_seconds FROM min_time WHERE 1 = 1 - AND enter_time >= 600 + AND enter_time >= 720 GROUP BY 1 ) SELECT @@ -174,7 +174,7 @@ class QueryStrings(Enum): AND e.date = \'{date}\' AND e.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' - AND e.time_step >= 600 + AND e.time_step >= 720 WHERE 1 = 1 AND v.date = \'{date}\' AND v.partition_name = \'{partition}\' @@ -253,7 +253,7 @@ class QueryStrings(Enum): 60 * COUNT(DISTINCT id) AS inflow_rate FROM min_max_time_step WHERE 1 = 1 - AND min_time_step >= 600 + AND min_time_step >= 720 GROUP BY 1, 2 ), outflows AS ( SELECT @@ -262,7 +262,7 @@ class QueryStrings(Enum): 60 * COUNT(DISTINCT id) AS outflow_rate FROM min_max_time_step WHERE 1 = 1 - AND max_time_step < 1000 + AND max_time_step < 1200 GROUP BY 1, 2 ) SELECT @@ -305,7 +305,7 @@ class QueryStrings(Enum): AND vt.date = \'{date}\' AND vt.partition_name = \'{partition}\' AND vt.x BETWEEN 500 AND 2300 - AND vt.time_step >= 600 + AND vt.time_step >= 720 ), cumulative_energy AS ( SELECT id, @@ -402,7 +402,7 @@ class QueryStrings(Enum): AND vt.date = \'{date}\' AND vt.partition_name = \'{partition}\' AND vt.x BETWEEN 500 AND 2300 - AND vt.time_step >= 600 + AND vt.time_step >= 720 ), cumulative_energy AS ( SELECT id, From 4c0358a6e626583f605ad146fc77b8b700f4b724 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 29 May 2020 23:40:57 -0700 Subject: [PATCH 229/335] leaderboard chart agg query fixes --- flow/data_pipeline/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index ac2770af2..5fa7937ec 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -474,12 +474,12 @@ class QueryStrings(Enum): LEADERBOARD_CHART_AGG = """ SELECT - date AS query_date, + date AS submission_date, source_id, energy_model_id, efficiency_meters_per_joules, efficiency_miles_per_gallon, throughput_per_hour - FROM leaderboard_chart + FROM leaderboard_chart ORDER BY query_date, source_id ASC ;""" From ce18a3677f23ebca0d724dc025f159c1bd55c2cf Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 29 May 2020 23:46:46 -0700 Subject: [PATCH 230/335] remove unnecessary references to "x" --- flow/data_pipeline/query.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 5fa7937ec..a664fd100 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -340,9 +340,11 @@ class QueryStrings(Enum): id, CAST(distance_meters/10 AS INTEGER) * 10 AS distance_meters_bin, FIRST_VALUE(energy_joules) - OVER (PARTITION BY id, CAST(distance_meters/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_start, + OVER (PARTITION BY id, CAST(distance_meters/10 AS INTEGER) * 10 + ORDER BY time_step ASC) AS energy_start, LAST_VALUE(energy_joules) - OVER (PARTITION BY id, CAST(distance_meters/10 AS INTEGER) * 10 ORDER BY x ASC) AS energy_end + OVER (PARTITION BY id, CAST(distance_meters/10 AS INTEGER) * 10 + ORDER BY time_step ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT @@ -436,9 +438,11 @@ class QueryStrings(Enum): id, CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, FIRST_VALUE(energy_joules) - OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 ORDER BY x ASC) AS energy_start, + OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 + ORDER BY time_step ASC) AS energy_start, LAST_VALUE(energy_joules) - OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 ORDER BY x ASC) AS energy_end + OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 + ORDER BY time_step ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( SELECT From 6c7c3c0d5a54b0487ae3626825a7a1b5591d5698 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sat, 30 May 2020 02:54:23 -0700 Subject: [PATCH 231/335] fix some error in query --- flow/core/experiment.py | 2 +- flow/data_pipeline/leaderboard_utils.py | 4 +++- flow/data_pipeline/query.py | 6 +++++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 80126a306..64b46fe7d 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -154,7 +154,7 @@ def rl_actions(*_): extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) metadata = defaultdict(lambda: "") - metadata['network'] = network_name_translate(self.env.network.name.split('_')[0]) + metadata['network'] = network_name_translate(self.env.network.name.split('_20')[0]) for i in range(num_runs): ret = 0 diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 86f3fed07..0ab8dc6cd 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -6,7 +6,9 @@ network_name_map = {"highway": "Single-Lane Straight Road", - "ring": "Single-Lane Ring Road"} + "ring": "Single-Lane Ring Road", + "I-210_subnetwork": "I-210 without Ramps"} + def network_name_translate(network_name): """Translate network name to a human friendly name for the leaderboard.""" diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index f591aba26..7f309fb84 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -11,6 +11,8 @@ "fact_network_metrics_by_distance_agg": ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, + "POWER_DEMAND_MODEL": {}, + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL": {}, "fact_vehicle_fuel_efficiency_agg": {"fact_network_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"]}, "fact_network_fuel_efficiency_agg": {"leaderboard_chart": ["LEADERBOARD_CHART"]}, "leaderboard_chart": {"leaderboard_chart_agg": ["LEADERBOARD_CHART_AGG"]} @@ -211,8 +213,10 @@ class QueryStrings(Enum): AND date = \'{date}\' AND partition_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' - AND ABS(SUM(energy_joules)) != 0 + AND ABS(energy_joules) != 0 GROUP BY 1, 2 + HAVING 1=1 + AND ABS(SUM(energy_joules)) != 0 ;""" LEADERBOARD_CHART = """ From f0aa7b4954283c47b988cb02790e8c98b867d102 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sun, 31 May 2020 03:27:13 -0700 Subject: [PATCH 232/335] added metadata as a table, update realized_accel at timestep 0, fixed some error with previous query, add different edge constraint for different network --- examples/simulate.py | 7 ++- flow/core/experiment.py | 25 +++++++---- flow/core/kernel/vehicle/traci.py | 2 + flow/data_pipeline/data_pipeline.py | 8 ++-- flow/data_pipeline/lambda_function.py | 11 +++-- flow/data_pipeline/query.py | 64 ++++++++++++++++----------- flow/visualize/i210_replay.py | 2 +- 7 files changed, 74 insertions(+), 45 deletions(-) diff --git a/examples/simulate.py b/examples/simulate.py index 0b183649b..26ed916c0 100644 --- a/examples/simulate.py +++ b/examples/simulate.py @@ -60,6 +60,11 @@ def parse_args(args): help='specify which query should be run by lambda' 'for detail, see upload_to_s3 in data_pipeline.py' ) + parser.add_argument( + '--is_baseline', + action='store_true', + help='specifies whether this is a baseline run' + ) return parser.parse_known_args(args)[0] @@ -98,4 +103,4 @@ def parse_args(args): # Run for the specified number of rollouts. exp.run(flags.num_runs, convert_to_csv=flags.gen_emission, to_aws=flags.to_aws, - only_query=flags.only_query) + only_query=flags.only_query, is_baseline=flags.is_baseline) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index c2f6ed44a..0be1be176 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,7 +1,7 @@ """Contains an experiment class for running simulations.""" from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env -from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, get_extra_info +from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info from flow.data_pipeline.leaderboard_utils import network_name_translate from collections import defaultdict import datetime @@ -90,7 +90,7 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only_query=""): + def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only_query="", is_baseline=False): """Run the given network for a set number of runs. Parameters @@ -111,6 +111,8 @@ def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only Specifies which queries should be automatically run when the simulation data gets uploaded to S3. If an empty str is passed in, then it implies no queries should be run on this. + is_baseline: bool + Specifies whether this is a baseline run. Returns ------- @@ -153,8 +155,10 @@ def rl_actions(*_): # data pipeline extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) - metadata = defaultdict(lambda: "") - metadata['network'] = network_name_translate(self.env.network.name.split('_20')[0]) + metadata = defaultdict(lambda: []) + metadata['source_id'].append(source_id) + metadata['network'].append(network_name_translate(self.env.network.name.split('_20')[0])) + metadata['is_baseline'].append(str(is_baseline)) for i in range(num_runs): ret = 0 @@ -220,13 +224,18 @@ def rl_actions(*_): os.remove(emission_path) trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) - generate_trajectory_from_flow(trajectory_table_path, extra_info, partition_name) + write_dict_to_csv(trajectory_table_path, extra_info) + metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) + write_dict_to_csv(metadata_table_path, metadata) if to_aws: cur_date = date.today().isoformat() - upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( - cur_date, source_id, source_id), - trajectory_table_path, metadata) + upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format( + cur_date, source_id), + trajectory_table_path, {'network': metadata['network']}) + upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' + '{1}_METADATA.csv'.format(cur_date, source_id), + metadata_table_path) # delete the S3-only version of the trajectory file # os.remove(upload_file_path) diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 9ca112345..f59fa9ba0 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -1186,6 +1186,8 @@ def get_accel_with_noise_with_failsafe(self, veh_id): def get_realized_accel(self, veh_id): """See parent class.""" + if self.get_distance(veh_id) == 0: + return 0 return (self.get_speed(veh_id) - self.get_previous_speed(veh_id)) / self.sim_step def get_2d_position(self, veh_id, error=-1001): diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index b1b9d1fef..f98c1fb60 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -38,7 +38,7 @@ def generate_trajectory_table(data_path, extra_info, partition_name): return output_file_path -def generate_trajectory_from_flow(data_path, extra_info, partition_name=None): +def write_dict_to_csv(data_path, extra_info, partition_name=None): """Generate desired output for the trajectory_table based only on flow output. Parameters @@ -65,7 +65,7 @@ def generate_trajectory_from_flow(data_path, extra_info, partition_name=None): return -def upload_to_s3(bucket_name, bucket_key, file_path, metadata): +def upload_to_s3(bucket_name, bucket_key, file_path, metadata={}): """Upload a file to S3 bucket. Parameters @@ -247,7 +247,7 @@ def repair_partition(self, table, query_date, partition): self.update_partition(table, query_date, partition) def run_query(self, query_name, result_location="s3://circles.data.pipeline/result/", - query_date="today", partition="default"): + query_date="today", partition="default", **kwargs): """Start the execution of a query, does not wait for it to finish. Parameters @@ -277,7 +277,7 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu source_id = "flow_{}".format(partition.split('_')[1]) response = self.client.start_query_execution( - QueryString=QueryStrings[query_name].value.format(date=query_date, partition=source_id), + QueryString=QueryStrings[query_name].value.format(date=query_date, partition=source_id, **kwargs), QueryExecutionContext={ 'Database': 'circles' }, diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 3a9f55ded..0985b0cdc 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -2,7 +2,7 @@ import boto3 from urllib.parse import unquote_plus from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data -from flow.data_pipeline.query import tags, tables +from flow.data_pipeline.query import tags, tables, network_using_edge, X_CONSTRAINT, EDGE_CONSTRAINT s3 = boto3.client('s3') queryEngine = AthenaQuery() @@ -40,8 +40,11 @@ def lambda_handler(event, context): # initialize the queries for bucket, key, table, query_date, partition in records: source_id = "flow_{}".format(partition.split('_')[1]) - # response = s3.head_object(Bucket=bucket, Key=key) - # required_query = response["Metadata"]["run-query"] + response = s3.head_object(Bucket=bucket, Key=key) + network_constraint = X_CONSTRAINT + if 'network' in response["Metadata"]: + if response["Metadata"]['network'] in network_using_edge: + network_constraint = EDGE_CONSTRAINT query_dict = tags[table] @@ -57,4 +60,4 @@ def lambda_handler(event, context): query_date, source_id, query_name) - queryEngine.run_query(query_name, result_location, query_date, partition) + queryEngine.run_query(query_name, result_location, query_date, partition, constraint=network_constraint) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 4bcc78b5f..31eea85fd 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -23,6 +23,12 @@ "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", "leaderboard_chart", "leaderboard_chart_agg"] +network_using_edge = ["I-210 without Ramps"] + +X_CONSTRAINT = "x BETWEEN 500 AND 2300" + +EDGE_CONSTRAINT = "edge_id <> ANY (VALUES 'ghost0', '119257908#3')" + VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ SELECT id, @@ -141,7 +147,7 @@ class QueryStrings(Enum): WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}\' - AND x BETWEEN 500 AND 2300 + AND {constraint} GROUP BY 1, 2 ), agg AS ( SELECT @@ -180,10 +186,10 @@ class QueryStrings(Enum): WHERE 1 = 1 AND v.date = \'{date}\' AND v.partition_name = \'{partition}\' - AND v.x BETWEEN 500 AND 2300 + AND v.{constraint} GROUP BY 1, 2, 3 HAVING 1 = 1 - AND MIN(distance) - MIN(distance) > 10 + AND MAX(distance) - MIN(distance) > 10 AND COUNT(DISTINCT e.time_step) > 10 ) SELECT @@ -247,7 +253,7 @@ class QueryStrings(Enum): WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}\' - AND x BETWEEN 500 AND 2300 + AND {constraint} GROUP BY 1, 2 ), inflows AS ( SELECT @@ -287,7 +293,7 @@ class QueryStrings(Enum): vt.source_id, vt.time_step, vt.distance - FIRST_VALUE(vt.distance) - OVER (PARITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC) AS distance_meters, + OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC) AS distance_meters, energy_model_id, et.speed, et.acceleration, @@ -307,7 +313,7 @@ class QueryStrings(Enum): WHERE 1 = 1 AND vt.date = \'{date}\' AND vt.partition_name = \'{partition}\' - AND vt.x BETWEEN 500 AND 2300 + AND vt.{constraint} AND vt.time_step >= 720 ), cumulative_energy AS ( SELECT @@ -327,9 +333,9 @@ class QueryStrings(Enum): AVG(speed) AS speed_avg, AVG(speed) + STDDEV(speed) AS speed_upper_bound, AVG(speed) - STDDEV(speed) AS speed_lower_bound, - AVG(target_accel_no_noise_with_failsafe) AS accel_avg, - AVG(target_accel_no_noise_with_failsafe) + STDDEV(target_accel_no_noise_with_failsafe) AS accel_upper_bound, - AVG(target_accel_no_noise_with_failsafe) - STDDEV(target_accel_no_noise_with_failsafe) AS accel_lower_bound, + AVG(acceleration) AS accel_avg, + AVG(acceleration) + STDDEV(acceleration) AS accel_upper_bound, + AVG(acceleration) - STDDEV(acceleration) AS accel_lower_bound, AVG(energy_joules) AS cumulative_energy_avg, AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound @@ -371,9 +377,9 @@ class QueryStrings(Enum): bce.accel_avg, bce.accel_upper_bound, bce.accel_lower_bound, - be.instantaneous_energy_avg, - be.instantaneous_energy_upper_bound, - be.instantaneous_energy_lower_bound + COALESCE(be.instantaneous_energy_avg, 0) AS instantaneous_energy_avg, + COALESCE(be.instantaneous_energy_upper_bound, 0) AS instantaneous_energy_upper_bound, + COALESCE(be.instantaneous_energy_lower_bound, 0) AS instantaneous_energy_lower_bound FROM binned_cumulative_energy bce FULL OUTER JOIN binned_energy be ON 1 = 1 AND bce.source_id = be.source_id @@ -406,7 +412,7 @@ class QueryStrings(Enum): WHERE 1 = 1 AND vt.date = \'{date}\' AND vt.partition_name = \'{partition}\' - AND vt.x BETWEEN 500 AND 2300 + AND vt.{constraint} AND vt.time_step >= 720 ), cumulative_energy AS ( SELECT @@ -425,9 +431,9 @@ class QueryStrings(Enum): AVG(speed) AS speed_avg, AVG(speed) + STDDEV(speed) AS speed_upper_bound, AVG(speed) - STDDEV(speed) AS speed_lower_bound, - AVG(target_accel_no_noise_with_failsafe) AS accel_avg, - AVG(target_accel_no_noise_with_failsafe) + STDDEV(target_accel_no_noise_with_failsafe) AS accel_upper_bound, - AVG(target_accel_no_noise_with_failsafe) - STDDEV(target_accel_no_noise_with_failsafe) AS accel_lower_bound, + AVG(acceleration) AS accel_avg, + AVG(acceleration) + STDDEV(acceleration) AS accel_upper_bound, + AVG(acceleration) - STDDEV(acceleration) AS accel_lower_bound, AVG(energy_joules) AS cumulative_energy_avg, AVG(energy_joules) + STDDEV(energy_joules) AS cumulative_energy_upper_bound, AVG(energy_joules) - STDDEV(energy_joules) AS cumulative_energy_lower_bound @@ -469,9 +475,9 @@ class QueryStrings(Enum): bce.accel_avg, bce.accel_upper_bound, bce.accel_lower_bound, - be.instantaneous_energy_avg, - be.instantaneous_energy_upper_bound, - be.instantaneous_energy_lower_bound + COALESCE(be.instantaneous_energy_avg, 0) AS instantaneous_energy_avg, + COALESCE(be.instantaneous_energy_upper_bound, 0) AS instantaneous_energy_upper_bound, + COALESCE(be.instantaneous_energy_lower_bound, 0) AS instantaneous_energy_lower_bound FROM binned_cumulative_energy bce FULL OUTER JOIN binned_energy be ON 1 = 1 AND bce.source_id = be.source_id @@ -481,12 +487,16 @@ class QueryStrings(Enum): LEADERBOARD_CHART_AGG = """ SELECT - date AS submission_date, - source_id, - energy_model_id, - efficiency_meters_per_joules, - efficiency_miles_per_gallon, - throughput_per_hour - FROM leaderboard_chart - ORDER BY query_date, source_id ASC + l.date AS submission_date, + l.source_id, + m.network, + m.is_baseline, + l.energy_model_id, + l.efficiency_meters_per_joules, + l.efficiency_miles_per_gallon, + l.throughput_per_hour + FROM leaderboard_chart AS l, metadata_table AS m + WHERE 1 = 1 + AND l.source_id = m.source_id + ORDER BY l.date, source_id ASC ;""" diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index cf5565442..faadd87b4 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -337,7 +337,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= cur_date = date.today().isoformat() upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), - upload_file_path, str(args.only_query)[2:-2]) + upload_file_path) # print the location of the emission csv file print("\nGenerated emission file at " + output_path) From b46d5f933e2560e7c83f18ffa369dcc3081c9ab4 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 21:21:00 -0700 Subject: [PATCH 233/335] minor re-formats --- flow/core/experiment.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 0be1be176..3589f7d36 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -230,14 +230,13 @@ def rl_actions(*_): if to_aws: cur_date = date.today().isoformat() - upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format( - cur_date, source_id), - trajectory_table_path, {'network': metadata['network']}) - upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' - '{1}_METADATA.csv'.format(cur_date, source_id), + upload_to_s3('circles.data.pipeline', + 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format(cur_date, source_id), + trajectory_table_path, + {'network': metadata['network']}) + upload_to_s3('circles.data.pipeline', + 'metadata_table/date={0}/partition_name={1}_METADATA/', + '{1}_METADATA.csv'.format(cur_date, source_id), metadata_table_path) - # delete the S3-only version of the trajectory file - # os.remove(upload_file_path) - return info_dict From 15b646be3e8505db028712bcb905766ecf460c61 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 21:28:16 -0700 Subject: [PATCH 234/335] update docstring for write_dict_to_csv --- flow/data_pipeline/data_pipeline.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index f98c1fb60..4b2861a22 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -49,19 +49,9 @@ def write_dict_to_csv(data_path, extra_info, partition_name=None): extra information needed in the trajectory table, collected from flow partition_name: str the name of the partition to put this output to - Returns - ------- - output_file_path: str - the local path of the outputted csv file that should be used for - upload to s3 only, it does not the human readable column names and - will be deleted after uploading to s3. A copy of this file with all - the column name will remain in the ./data folder """ extra_info = pd.DataFrame.from_dict(extra_info) - # extra_info["partition"] = partition_name extra_info.to_csv(data_path, index=False) - # upload_only_file_path = data_path[:-4] + "_upload" + ".csv" - # extra_info.to_csv(upload_only_file_path, index=False, header=False) return From 17802fdc3d7ce11edec7536852ac859af9c6f498 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 21:33:28 -0700 Subject: [PATCH 235/335] simplify network_name_translate --- flow/data_pipeline/leaderboard_utils.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 0ab8dc6cd..1b3cb64c3 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -12,9 +12,7 @@ def network_name_translate(network_name): """Translate network name to a human friendly name for the leaderboard.""" - if network_name in network_name_map: - return network_name_map[network_name] - return network_name + return network_name_map.get(network_name, network_name) def key_to_name(key): From a0b60c5c7743c725afa2c42bd183ffd44cee3cae Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 21:47:52 -0700 Subject: [PATCH 236/335] modify constraint specifications for run_query --- flow/data_pipeline/lambda_function.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 0985b0cdc..d296d99d1 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -2,7 +2,8 @@ import boto3 from urllib.parse import unquote_plus from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data -from flow.data_pipeline.query import tags, tables, network_using_edge, X_CONSTRAINT, EDGE_CONSTRAINT +from flow.data_pipeline.query import tags, tables, network_using_edge +from flow.data_pipeline.query import X_CONSTRAINT, EDGE_CONSTRAINT, WARMUP_STEPS, HORIZON_STEPS s3 = boto3.client('s3') queryEngine = AthenaQuery() @@ -38,13 +39,15 @@ def lambda_handler(event, context): records.append((bucket, key, table, query_date, partition)) # initialize the queries + start_constraint = WARMUP_STEPS + stop_constraint = WARMUP_STEPS + HORIZON_STEPS for bucket, key, table, query_date, partition in records: source_id = "flow_{}".format(partition.split('_')[1]) response = s3.head_object(Bucket=bucket, Key=key) - network_constraint = X_CONSTRAINT + loc_constraint = X_CONSTRAINT if 'network' in response["Metadata"]: if response["Metadata"]['network'] in network_using_edge: - network_constraint = EDGE_CONSTRAINT + loc_constraint = EDGE_CONSTRAINT query_dict = tags[table] @@ -60,4 +63,10 @@ def lambda_handler(event, context): query_date, source_id, query_name) - queryEngine.run_query(query_name, result_location, query_date, partition, constraint=network_constraint) + queryEngine.run_query(query_name, + result_location, + query_date, + partition, + loc_constraint=loc_constraint, + start_constraint=start_constraint, + stop_constraint=stop_constraint) From 79afdae957f06115f0efbb4c465803b878f27772 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 21:53:26 -0700 Subject: [PATCH 237/335] rename loc_filter, add time filters --- flow/data_pipeline/query.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 31eea85fd..2d70daed7 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -25,9 +25,13 @@ network_using_edge = ["I-210 without Ramps"] -X_CONSTRAINT = "x BETWEEN 500 AND 2300" +X_FILTER = "x BETWEEN 500 AND 2300" -EDGE_CONSTRAINT = "edge_id <> ANY (VALUES 'ghost0', '119257908#3')" +EDGE_FILTER = "edge_id <> ANY (VALUES 'ghost0', '119257908#3')" + +WARMUP_STEPS = 600 * 3 * 0.4 + +HORIZON_STEPS = 1000 * 3 * 0.4 VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ SELECT @@ -147,7 +151,7 @@ class QueryStrings(Enum): WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}\' - AND {constraint} + AND {loc_filter} GROUP BY 1, 2 ), agg AS ( SELECT @@ -156,7 +160,7 @@ class QueryStrings(Enum): MAX(enter_time) - MIN(enter_time) AS total_time_seconds FROM min_time WHERE 1 = 1 - AND enter_time >= 720 + AND enter_time >= {start_filter} GROUP BY 1 ) SELECT @@ -182,11 +186,11 @@ class QueryStrings(Enum): AND e.date = \'{date}\' AND e.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' - AND e.time_step >= 720 + AND e.time_step >= {start_filter} WHERE 1 = 1 AND v.date = \'{date}\' AND v.partition_name = \'{partition}\' - AND v.{constraint} + AND v.{loc_filter} GROUP BY 1, 2, 3 HAVING 1 = 1 AND MAX(distance) - MIN(distance) > 10 @@ -253,7 +257,7 @@ class QueryStrings(Enum): WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}\' - AND {constraint} + AND {loc_filter} GROUP BY 1, 2 ), inflows AS ( SELECT @@ -262,7 +266,7 @@ class QueryStrings(Enum): 60 * COUNT(DISTINCT id) AS inflow_rate FROM min_max_time_step WHERE 1 = 1 - AND min_time_step >= 720 + AND min_time_step >= {start_filter} GROUP BY 1, 2 ), outflows AS ( SELECT @@ -271,7 +275,7 @@ class QueryStrings(Enum): 60 * COUNT(DISTINCT id) AS outflow_rate FROM min_max_time_step WHERE 1 = 1 - AND max_time_step < 1200 + AND max_time_step < {stop_filter} GROUP BY 1, 2 ) SELECT @@ -313,8 +317,8 @@ class QueryStrings(Enum): WHERE 1 = 1 AND vt.date = \'{date}\' AND vt.partition_name = \'{partition}\' - AND vt.{constraint} - AND vt.time_step >= 720 + AND vt.{loc_filter} + AND vt.time_step >= {start_filter} ), cumulative_energy AS ( SELECT id, @@ -412,8 +416,8 @@ class QueryStrings(Enum): WHERE 1 = 1 AND vt.date = \'{date}\' AND vt.partition_name = \'{partition}\' - AND vt.{constraint} - AND vt.time_step >= 720 + AND vt.{loc_filter} + AND vt.time_step >= {start_filter} ), cumulative_energy AS ( SELECT id, From 09a26a3cbf393d59e0ba6b5a3d7678ce96991dff Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 21:54:18 -0700 Subject: [PATCH 238/335] rename constraints to filters --- flow/data_pipeline/lambda_function.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index d296d99d1..dfcd95f43 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -39,15 +39,15 @@ def lambda_handler(event, context): records.append((bucket, key, table, query_date, partition)) # initialize the queries - start_constraint = WARMUP_STEPS - stop_constraint = WARMUP_STEPS + HORIZON_STEPS + start_filter = WARMUP_STEPS + stop_filter = WARMUP_STEPS + HORIZON_STEPS for bucket, key, table, query_date, partition in records: source_id = "flow_{}".format(partition.split('_')[1]) response = s3.head_object(Bucket=bucket, Key=key) - loc_constraint = X_CONSTRAINT + loc_filter = X_FILTER if 'network' in response["Metadata"]: if response["Metadata"]['network'] in network_using_edge: - loc_constraint = EDGE_CONSTRAINT + loc_filter = EDGE_FILTER query_dict = tags[table] @@ -67,6 +67,6 @@ def lambda_handler(event, context): result_location, query_date, partition, - loc_constraint=loc_constraint, - start_constraint=start_constraint, - stop_constraint=stop_constraint) + loc_filter=loc_filter, + start_filter=start_filter, + stop_filter=stop_filter) From f7a4d6ed4d22065585b97a0a7333636e439d1604 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 21:54:53 -0700 Subject: [PATCH 239/335] rename constraints to filters --- flow/data_pipeline/lambda_function.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index dfcd95f43..00cf0fba5 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -3,7 +3,7 @@ from urllib.parse import unquote_plus from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data from flow.data_pipeline.query import tags, tables, network_using_edge -from flow.data_pipeline.query import X_CONSTRAINT, EDGE_CONSTRAINT, WARMUP_STEPS, HORIZON_STEPS +from flow.data_pipeline.query import X_FILTER, EDGE_FILTER, WARMUP_STEPS, HORIZON_STEPS s3 = boto3.client('s3') queryEngine = AthenaQuery() From 830516557fc9fda2e113a5025dd2d843bfd3f577 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 21:57:15 -0700 Subject: [PATCH 240/335] tweak queries for styling --- flow/data_pipeline/query.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 2d70daed7..845609494 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -206,7 +206,7 @@ class QueryStrings(Enum): 19972 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon FROM sub_fact_vehicle_trace WHERE 1 = 1 - AND ABS(power_watts * time_step_size_seconds) > 0 + AND power_watts * time_step_size_seconds != 0 ; """ @@ -225,7 +225,7 @@ class QueryStrings(Enum): AND energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' GROUP BY 1, 2 HAVING 1=1 - AND ABS(SUM(energy_joules)) != 0 + AND SUM(energy_joules) != 0 ;""" LEADERBOARD_CHART = """ @@ -237,13 +237,13 @@ class QueryStrings(Enum): t.throughput_per_hour FROM fact_network_throughput_agg AS t JOIN fact_network_fuel_efficiency_agg AS e ON 1 = 1 - AND t.date = \'{date}\' - AND t.partition_name = \'{partition}_FACT_NETWORK_THROUGHPUT_AGG\' AND e.date = \'{date}\' AND e.partition_name = \'{partition}_FACT_NETWORK_FUEL_EFFICIENCY_AGG\' AND t.source_id = e.source_id AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' WHERE 1 = 1 + AND t.date = \'{date}\' + AND t.partition_name = \'{partition}_FACT_NETWORK_THROUGHPUT_AGG\' ;""" FACT_NETWORK_INFLOWS_OUTFLOWS = """ From ac8c5457506715c9e8beb4ceafc6771a174c1910 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 22:00:32 -0700 Subject: [PATCH 241/335] remove outer joins to avoid edge cases --- flow/data_pipeline/query.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 845609494..a2ef711cc 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -370,8 +370,8 @@ class QueryStrings(Enum): GROUP BY 1, 2 ) SELECT - COALESCE(bce.source_id, be.source_id) AS source_id, - COALESCE(bce.distance_meters_bin, be.distance_meters_bin) AS distance_meters_bin, + bce.source_id AS source_id, + bce.distance_meters_bin AS distance_meters_bin, bce.cumulative_energy_avg, bce.cumulative_energy_lower_bound, bce.cumulative_energy_upper_bound, @@ -385,7 +385,7 @@ class QueryStrings(Enum): COALESCE(be.instantaneous_energy_upper_bound, 0) AS instantaneous_energy_upper_bound, COALESCE(be.instantaneous_energy_lower_bound, 0) AS instantaneous_energy_lower_bound FROM binned_cumulative_energy bce - FULL OUTER JOIN binned_energy be ON 1 = 1 + JOIN binned_energy be ON 1 = 1 AND bce.source_id = be.source_id AND bce.distance_meters_bin = be.distance_meters_bin ORDER BY distance_meters_bin ASC @@ -468,8 +468,8 @@ class QueryStrings(Enum): GROUP BY 1, 2 ) SELECT - COALESCE(bce.source_id, be.source_id) AS source_id, - COALESCE(bce.time_seconds_bin, be.time_seconds_bin) AS time_seconds_bin, + bce.source_id AS source_id, + bce.time_seconds_bin AS time_seconds_bin, bce.cumulative_energy_avg, bce.cumulative_energy_lower_bound, bce.cumulative_energy_upper_bound, @@ -483,7 +483,7 @@ class QueryStrings(Enum): COALESCE(be.instantaneous_energy_upper_bound, 0) AS instantaneous_energy_upper_bound, COALESCE(be.instantaneous_energy_lower_bound, 0) AS instantaneous_energy_lower_bound FROM binned_cumulative_energy bce - FULL OUTER JOIN binned_energy be ON 1 = 1 + JOIN binned_energy be ON 1 = 1 AND bce.source_id = be.source_id AND bce.time_seconds_bin = be.time_seconds_bin ORDER BY time_seconds_bin ASC From f9f75af78ca28cfba3360476ebec26f44c931e85 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 31 May 2020 22:04:50 -0700 Subject: [PATCH 242/335] rename query_date to submission-date --- flow/data_pipeline/data_pipeline.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 4b2861a22..5669fb6a8 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -203,20 +203,20 @@ def wait_for_execution(self, execution_id): return False return True - def update_partition(self, table, query_date, partition): + def update_partition(self, table, submission_date, partition): """Load the given partition to the trajectory_table on Athena. Parameters ---------- table : str the name of the table to update - query_date : str + submission_date : str the new partition date that needs to be loaded partition : str the new partition that needs to be loaded """ response = self.client.start_query_execution( - QueryString=QueryStrings['UPDATE_PARTITION'].value.format(table=table, date=query_date, + QueryString=QueryStrings['UPDATE_PARTITION'].value.format(table=table, date=submission_date, partition=partition), QueryExecutionContext={ 'Database': 'circles' @@ -225,19 +225,19 @@ def update_partition(self, table, query_date, partition): ) if self.wait_for_execution(response['QueryExecutionId']): raise RuntimeError("update partition timed out") - self.existing_partitions[table].append("date={}/partition_name={}".format(query_date, partition)) + self.existing_partitions[table].append("date={}/partition_name={}".format(submission_date, partition)) return - def repair_partition(self, table, query_date, partition): + def repair_partition(self, table, submission_date, partition): """Load the missing partitions.""" if table not in self.existing_partitions.keys(): self.existing_partitions[table] = self.get_existing_partitions(table) - if "date={}/partition_name={}".format(query_date, partition) not in \ + if "date={}/partition_name={}".format(submission_date, partition) not in \ self.existing_partitions[table]: - self.update_partition(table, query_date, partition) + self.update_partition(table, submission_date, partition) def run_query(self, query_name, result_location="s3://circles.data.pipeline/result/", - query_date="today", partition="default", **kwargs): + submission_date="today", partition="default", **kwargs): """Start the execution of a query, does not wait for it to finish. Parameters @@ -246,7 +246,7 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu name of the query in QueryStrings enum that will be run result_location: str, optional location on the S3 bucket where the result will be stored - query_date : str + submission_date : str name of the partition date to run this query on partition: str, optional name of the partition to run this query on @@ -261,13 +261,13 @@ def run_query(self, query_name, result_location="s3://circles.data.pipeline/resu if query_name not in QueryStrings.__members__: raise ValueError("query not existed: please add it to query.py") - if query_date == "today": - query_date = date.today().isoformat() + if submission_date == "today": + submission_date = date.today().isoformat() source_id = "flow_{}".format(partition.split('_')[1]) response = self.client.start_query_execution( - QueryString=QueryStrings[query_name].value.format(date=query_date, partition=source_id, **kwargs), + QueryString=QueryStrings[query_name].value.format(date=submission_date, partition=source_id, **kwargs), QueryExecutionContext={ 'Database': 'circles' }, From 660891a540a8e1a0c567659ab05333b7ce86d349 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Sun, 31 May 2020 23:19:34 -0700 Subject: [PATCH 243/335] write simulation result to disk every 100 time step --- flow/controllers/velocity_controllers.py | 3 +- flow/core/experiment.py | 37 ++++++++++++++---------- flow/core/kernel/vehicle/traci.py | 2 ++ flow/data_pipeline/data_pipeline.py | 4 +-- flow/data_pipeline/query.py | 2 +- flow/visualize/i210_replay.py | 17 +++++++++-- 6 files changed, 43 insertions(+), 22 deletions(-) diff --git a/flow/controllers/velocity_controllers.py b/flow/controllers/velocity_controllers.py index 62ce15beb..e1f69de98 100644 --- a/flow/controllers/velocity_controllers.py +++ b/flow/controllers/velocity_controllers.py @@ -120,7 +120,8 @@ def get_accel(self, env): env.k.vehicle.get_edge(self.veh_id)[0] == ":"\ or (self.control_length and (env.k.vehicle.get_x_by_id(self.veh_id) < self.control_length[0] or env.k.vehicle.get_x_by_id(self.veh_id) > self.control_length[1]))\ - or edge in self.no_control_edges: + or (self.no_control_edges is not None and len(self.no_control_edges) > 0 + and edge in self.no_control_edges): # TODO(@evinitsky) put back # or env.k.vehicle.get_edge(self.veh_id) in self.no_control_edges: return None diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 0be1be176..e5fe0cb4a 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -4,10 +4,9 @@ from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info from flow.data_pipeline.leaderboard_utils import network_name_translate from collections import defaultdict -import datetime +from datetime import datetime, timezone import logging import time -from datetime import date import os import numpy as np import uuid @@ -86,7 +85,7 @@ def __init__(self, flow_params, custom_callables=None): self.env = create_env() logging.info(" Starting experiment {} at {}".format( - self.env.network.name, str(datetime.datetime.utcnow()))) + self.env.network.name, str(datetime.utcnow()))) logging.info("Initializing environment.") @@ -156,10 +155,19 @@ def rl_actions(*_): extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) metadata = defaultdict(lambda: []) + # collect current time + cur_datetime = datetime.now(timezone.utc) + cur_date = cur_datetime.date().isoformat() + cur_time = cur_datetime.time().isoformat() metadata['source_id'].append(source_id) + metadata['submission_time'].append(cur_time) metadata['network'].append(network_name_translate(self.env.network.name.split('_20')[0])) metadata['is_baseline'].append(str(is_baseline)) + dir_path = self.env.sim_params.emission_path + trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) + metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) + for i in range(num_runs): ret = 0 vel = [] @@ -181,6 +189,11 @@ def rl_actions(*_): extra_info["source_id"].extend([source_id] * len(veh_ids)) extra_info["run_id"].extend(['run_{}'.format(i)] * len(veh_ids)) + # write to disk every 100 steps + if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0: + write_dict_to_csv(trajectory_table_path, extra_info, not j) + extra_info.clear() + # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) @@ -212,32 +225,26 @@ def rl_actions(*_): time.sleep(0.1) # collect the location of the emission file - dir_path = self.env.sim_params.emission_path emission_filename = \ "{0}-emission.xml".format(self.env.network.name) emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv - emission_to_csv(emission_path) + # FIXME: Brent: produce seg fault with large CSV + # emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) - trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) write_dict_to_csv(trajectory_table_path, extra_info) - metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) - write_dict_to_csv(metadata_table_path, metadata) + write_dict_to_csv(metadata_table_path, metadata, True) if to_aws: - cur_date = date.today().isoformat() - upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format( - cur_date, source_id), - trajectory_table_path, {'network': metadata['network']}) upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' '{1}_METADATA.csv'.format(cur_date, source_id), metadata_table_path) - - # delete the S3-only version of the trajectory file - # os.remove(upload_file_path) + upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format( + cur_date, source_id), + trajectory_table_path, {'network': metadata['network'][0]}) return info_dict diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index f59fa9ba0..2a6a4da12 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -87,6 +87,8 @@ def __init__(self, # old speeds used to compute accelerations self.previous_speeds = {} + # The time that previous speed is recorded, used to calculate realized_accel + self.previous_time = 0 def initialize(self, vehicles): """Initialize vehicle state information. diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index f98c1fb60..366dc9bd1 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -38,7 +38,7 @@ def generate_trajectory_table(data_path, extra_info, partition_name): return output_file_path -def write_dict_to_csv(data_path, extra_info, partition_name=None): +def write_dict_to_csv(data_path, extra_info, include_header=False): """Generate desired output for the trajectory_table based only on flow output. Parameters @@ -59,7 +59,7 @@ def write_dict_to_csv(data_path, extra_info, partition_name=None): """ extra_info = pd.DataFrame.from_dict(extra_info) # extra_info["partition"] = partition_name - extra_info.to_csv(data_path, index=False) + extra_info.to_csv(data_path, mode='a+', index=False, header=include_header) # upload_only_file_path = data_path[:-4] + "_upload" + ".csv" # extra_info.to_csv(upload_only_file_path, index=False, header=False) return diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 31eea85fd..feb597143 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -21,7 +21,7 @@ tables = ["fact_vehicle_trace", "fact_energy_trace", "fact_network_throughput_agg", "fact_network_inflows_outflows", "fact_vehicle_fuel_efficiency_agg", "fact_network_metrics_by_distance_agg", "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", "leaderboard_chart", - "leaderboard_chart_agg"] + "leaderboard_chart_agg", "metadata_table"] network_using_edge = ["I-210 without Ramps"] diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index faadd87b4..a70fd72e3 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -1,6 +1,6 @@ """Transfer and replay for i210 environment.""" import argparse -from datetime import datetime +from datetime import datetime, timezone from collections import defaultdict from copy import deepcopy import numpy as np @@ -32,7 +32,8 @@ from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables -from flow.data_pipeline.data_pipeline import generate_trajectory_from_flow, upload_to_s3, get_extra_info +from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info +from flow.data_pipeline.leaderboard_utils import network_name_translate import uuid EXAMPLE_USAGE = """ @@ -208,8 +209,18 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= key: [] for key in custom_callables.keys() }) + # date pipeline extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) + metadata = defaultdict(lambda: []) + # collect current time + cur_datetime = datetime.now(timezone.utc) + cur_date = cur_datetime.date().isoformat() + cur_time = cur_datetime.time().isoformat() + metadata['source_id'].append(source_id) + metadata['submission_time'].append(cur_time) + metadata['network'].append(network_name_translate(env.network.name.split('_20')[0])) + metadata['is_baseline'].append(str(is_baseline)) i = 0 while i < args.num_rollouts: @@ -330,7 +341,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # generate the trajectory output file trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) - upload_file_path = generate_trajectory_from_flow(trajectory_table_path, extra_info) + upload_file_path = write_dict_to_csv(trajectory_table_path, extra_info) # upload to s3 if asked if args.use_s3: From d617c2f452bc924290145d1b59c2abb17e4d9ed8 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 1 Jun 2020 10:19:19 -0700 Subject: [PATCH 244/335] reformat upload_to_s3, add missing comma --- flow/core/experiment.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index d849def74..64674dfda 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -240,11 +240,13 @@ def rl_actions(*_): write_dict_to_csv(metadata_table_path, metadata, True) if to_aws: - upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' - '{1}_METADATA.csv'.format(cur_date, source_id), + upload_to_s3('circles.data.pipeline', + 'metadata_table/date={0}/partition_name={1}_METADATA/', + '{1}_METADATA.csv'.format(cur_date, source_id), metadata_table_path) - upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format( - cur_date, source_id), - trajectory_table_path, {'network': metadata['network'][0]}) + upload_to_s3('circles.data.pipeline', + 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format(cur_date, source_id), + trajectory_table_path, + {'network': metadata['network'][0]}) return info_dict From a7eda70c3b69a3a183faa5228a45a88c4751085c Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 1 Jun 2020 10:38:08 -0700 Subject: [PATCH 245/335] fix i210 replay data collection --- flow/data_pipeline/data_pipeline.py | 4 +++- flow/data_pipeline/query.py | 2 +- flow/visualize/i210_replay.py | 21 +++++++++++++++------ 3 files changed, 19 insertions(+), 8 deletions(-) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 3b6f953d9..ea5307dad 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -48,7 +48,9 @@ def write_dict_to_csv(data_path, extra_info, include_header=False): extra_info: dict extra information needed in the trajectory table, collected from flow include_header: bool - whether or not to include the header in the output + whether or not to include the header in the output, this should be set to + True for the first write to the a empty or newly created CSV, and set to + False for subsequent appends. """ extra_info = pd.DataFrame.from_dict(extra_info) extra_info.to_csv(data_path, mode='a+', index=False, header=include_header) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 29828e685..89432e260 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -502,5 +502,5 @@ class QueryStrings(Enum): FROM leaderboard_chart AS l, metadata_table AS m WHERE 1 = 1 AND l.source_id = m.source_id - ORDER BY l.date, source_id ASC + ORDER BY l.date, m.submission_time, l.source_id ASC ;""" diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index a70fd72e3..b3b4fcb78 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -220,7 +220,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= metadata['source_id'].append(source_id) metadata['submission_time'].append(cur_time) metadata['network'].append(network_name_translate(env.network.name.split('_20')[0])) - metadata['is_baseline'].append(str(is_baseline)) + metadata['is_baseline'].append(str(args.is_baseline)) i = 0 while i < args.num_rollouts: @@ -341,14 +341,18 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # generate the trajectory output file trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) - upload_file_path = write_dict_to_csv(trajectory_table_path, extra_info) + write_dict_to_csv(trajectory_table_path, extra_info, True) + metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) + write_dict_to_csv(metadata_table_path, metadata, True) # upload to s3 if asked if args.use_s3: - cur_date = date.today().isoformat() - upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={}/partition_name={}/{}.csv'.format( - cur_date, source_id, upload_file_path.split('/')[-1].split('_upload')[0]), - upload_file_path) + upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' + '{1}_METADATA.csv'.format(cur_date, source_id), + metadata_table_path) + upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format( + cur_date, source_id), + trajectory_table_path, {'network': metadata['network'][0]}) # print the location of the emission csv file print("\nGenerated emission file at " + output_path) @@ -465,6 +469,11 @@ def create_parser(): help='specify which query should be run by lambda' 'for detail, see upload_to_s3 in data_pipeline.py' ) + parser.add_argument( + '--is_baseline', + action='store_true', + help='specifies whether this is a baseline run' + ) return parser From f5f000ee58122c7a994b00e4a9e049b88f84dac4 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 1 Jun 2020 10:41:38 -0700 Subject: [PATCH 246/335] remove extra comma --- flow/core/experiment.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 64674dfda..8ac9c3699 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -241,7 +241,7 @@ def rl_actions(*_): if to_aws: upload_to_s3('circles.data.pipeline', - 'metadata_table/date={0}/partition_name={1}_METADATA/', + 'metadata_table/date={0}/partition_name={1}_METADATA/' '{1}_METADATA.csv'.format(cur_date, source_id), metadata_table_path) upload_to_s3('circles.data.pipeline', From 1e42556d58c418311e39e3896304f00291ae510e Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 1 Jun 2020 14:35:30 -0700 Subject: [PATCH 247/335] fix network name mapping for highway-single --- flow/data_pipeline/leaderboard_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 1b3cb64c3..f0c4178df 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -5,7 +5,7 @@ from io import StringIO -network_name_map = {"highway": "Single-Lane Straight Road", +network_name_map = {"highway-single": "Single-Lane Straight Road", "ring": "Single-Lane Ring Road", "I-210_subnetwork": "I-210 without Ramps"} From bff9e47d01e0b48e7cb446d267f1ad165476d4f9 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Mon, 1 Jun 2020 20:58:14 -0700 Subject: [PATCH 248/335] Load weights into rllib --- examples/train.py | 27 +++-- .../imitation_learning/imitating_network.py | 5 +- .../imitation_learning/imitation_trainer.py | 49 +++++++++ .../imitation_learning/keras_utils.py | 1 + .../imitation_learning/ppo_model.py | 55 +++++----- flow/controllers/imitation_learning/run.py | 17 +-- .../controllers/imitation_learning/trainer.py | 100 +++++++++++++++++- flow/controllers/imitation_learning/utils.py | 5 +- flow/controllers/velocity_controllers.py | 8 +- flow/visualize/visualizer_rllib.py | 17 ++- 10 files changed, 229 insertions(+), 55 deletions(-) create mode 100644 flow/controllers/imitation_learning/imitation_trainer.py diff --git a/examples/train.py b/examples/train.py index d80f5a1e2..083593548 100644 --- a/examples/train.py +++ b/examples/train.py @@ -185,32 +185,46 @@ def setup_exps_rllib(flow_params, alg_run = flags.algorithm.upper() if alg_run == "PPO": - from flow.algorithms.custom_ppo import CustomPPOTrainer - from ray.rllib.agents.ppo import DEFAULT_CONFIG - alg_run = CustomPPOTrainer - config = deepcopy(DEFAULT_CONFIG) + from ray import tune + from ray.tune.registry import register_env + try: + from ray.rllib.agents.agent import get_agent_class + except ImportError: + from ray.rllib.agents.registry import get_agent_class + + horizon = flow_params['env'].horizon + + alg_run = "PPO" + + agent_cls = get_agent_class(alg_run) + config = deepcopy(agent_cls._default_config) config["num_workers"] = n_cpus config["horizon"] = horizon - config["model"].update({"fcnet_hiddens": [32, 32]}) + config["model"].update({"fcnet_hiddens": [32, 32, 32]}) config["train_batch_size"] = horizon * n_rollouts config["gamma"] = 0.995 # discount rate config["use_gae"] = True config["lambda"] = 0.97 config["kl_target"] = 0.02 - config["num_sgd_iter"] = 10 + # TODO: restore this to 10 + config["num_sgd_iter"] = 1 + # config["num_sgd_iter"] = 10 if flags.grid_search: config["lambda"] = tune.grid_search([0.5, 0.9]) config["lr"] = tune.grid_search([5e-4, 5e-5]) if flags.load_weights_path: from flow.controllers.imitation_learning.ppo_model import PPONetwork + from flow.controllers.imitation_learning.imitation_trainer import Imitation_PPO_Trainable from ray.rllib.models import ModelCatalog # Register custom model ModelCatalog.register_custom_model("PPO_loaded_weights", PPONetwork) # set model to the custom model for run config['model']['custom_model'] = "PPO_loaded_weights" config['model']['custom_options'] = {"h5_load_path": flags.load_weights_path} + config['observation_filter'] = 'NoFilter' + alg_run = Imitation_PPO_Trainable elif alg_run == "CENTRALIZEDPPO": from flow.algorithms.centralized_PPO import CCTrainer, CentralizedCriticModel @@ -402,6 +416,7 @@ def trial_str_creator(trial): return "{}_{}".format(trial.trainable_name, trial.experiment_tag) if flags.local_mode: + print("LOCAL MODE") ray.init(local_mode=True) else: ray.init() diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 569ec6fd1..47cdf3064 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -105,7 +105,7 @@ def get_accel_from_observation(self, observation): Returns ------- numpy array - one element numpy array containing accleeration + one element numpy array containing acceleration """ # network expects an array of arrays (matrix); if single observation (no batch), convert to array of arrays @@ -116,7 +116,8 @@ def get_accel_from_observation(self, observation): if self.stochastic: mean, log_std = network_output[:, :self.action_dim], network_output[:, self.action_dim:] var = np.exp(2 * log_std) - action = np.random.multivariate_normal(mean[0], var) + cov_matrix = np.diag(var[0]) + action = np.random.multivariate_normal(mean[0], cov_matrix) return action else: return network_output diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/imitation_trainer.py new file mode 100644 index 000000000..503bb6d07 --- /dev/null +++ b/flow/controllers/imitation_learning/imitation_trainer.py @@ -0,0 +1,49 @@ +from ray import tune +from flow.controllers.imitation_learning.ppo_model import * +from ray.rllib.agents import ppo +try: + from ray.rllib.agents.agent import get_agent_class +except ImportError: + from ray.rllib.agents.registry import get_agent_class + + +class Imitation_PPO_Trainable(tune.Trainable): + def _setup(self, config): + env_name = config['env'] + # agent_cls = get_agent_class(config['env_config']['run']) + self.trainer = ppo.PPOTrainer(env=env_name, config=config) + print("\n\n\nPOLICY_NAME") + policy_id = list(self.trainer.get_weights().keys())[0] + print(policy_id) + print("\n\n\n") + self.trainer.import_model(config['model']['custom_options']['h5_load_path'], policy_id) + print("here") + + def _train(self): + print("TRAIN CALLED") + # return self.trainer.train() + return self.trainer.train() + + # def train(self): + # print("TRAIN CALLED") + # return self.trainer.train() + + def _save(self, tmp_checkpoint_dir): + return self.trainer._save(tmp_checkpoint_dir) + + def _restore(self, checkpoint): + self.trainer.restore(checkpoint) + + def _log_result(self, result): + self.trainer._log_result(result) + + def _stop(self): + self.trainer.stop() + + def _export_model(self, export_formats, export_dir): + return self.trainer.export_model(export_formats, export_dir=export_dir) + + + + + diff --git a/flow/controllers/imitation_learning/keras_utils.py b/flow/controllers/imitation_learning/keras_utils.py index 59928affc..34b32f692 100644 --- a/flow/controllers/imitation_learning/keras_utils.py +++ b/flow/controllers/imitation_learning/keras_utils.py @@ -108,6 +108,7 @@ def nll_loss(y, network_output): return nll_loss + def compare_weights(ppo_model, imitation_path): imitation_model = tf.keras.models.load_model(imitation_path, custom_objects={'nll_loss': negative_log_likelihood_loss(0.5)}) diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index a15eb6cc5..f7490d180 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -36,9 +36,9 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, name, **k # setup model with weights loaded in from model in h5 path self.setup_model(obs_space, action_space, model_config, num_outputs, h5_path) + self.register_variables(self.base_model.variables) # register variables for base model - self.register_variables(self.base_model.variables) # compare_weights(self.base_model, "/Users/akashvelu/Desktop/latest_run/imitation_model.h5") @@ -60,42 +60,38 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat path to h5 file containing weights of a pretrained network (empty string if no such file) """ - if imitation_h5_path: - # set base model to be loaded model - self.base_model = tf.keras.models.load_model(imitation_h5_path) + activation = model_config.get("fcnet_activation") + hiddens = model_config.get("fcnet_hiddens", []) + vf_share_layers = model_config.get("vf_share_layers") - else: - activation = model_config.get("fcnet_activation") - hiddens = model_config.get("fcnet_hiddens", []) - vf_share_layers = model_config.get("vf_share_layers") + # set up model + inp_layer = tf.keras.layers.Input(shape=obs_space.shape, name="input_layer") + curr_layer = inp_layer - # set up model - inp_layer = tf.keras.layers.Input(shape=obs_space.shape, name="input_layer") - curr_layer = inp_layer + # hidden layers and output for policy + i = 1 + for size in hiddens: + curr_layer = tf.keras.layers.Dense(size, name="policy_hidden_layer_{}".format(i), + activation=activation)(curr_layer) + i += 1 + + output_layer_policy = tf.keras.layers.Dense(num_outputs, name="policy_output_layer", activation=None)( + curr_layer) - # hidden layers and output for policy + # set up value function + if not vf_share_layers: + curr_layer = inp_layer i = 1 for size in hiddens: - curr_layer = tf.keras.layers.Dense(size, name="policy_hidden_layer_{}".format(i), + curr_layer = tf.keras.layers.Dense(size, name="vf_hidden_layer_{}".format(i), activation=activation)(curr_layer) i += 1 - output_layer_policy = tf.keras.layers.Dense(num_outputs, name="policy_output_layer", activation=None)( - curr_layer) - - # set up value function - if not vf_share_layers: - curr_layer = inp_layer - i = 1 - for size in hiddens: - curr_layer = tf.keras.layers.Dense(size, name="vf_hidden_layer_{}".format(i), - activation=activation)(curr_layer) - i += 1 + output_layer_vf = tf.keras.layers.Dense(1, name="vf_output_layer", activation=None)(curr_layer) - output_layer_vf = tf.keras.layers.Dense(1, name="vf_output_layer", activation=None)(curr_layer) + # build model from layers + self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) - # build model from layers - self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) @@ -116,7 +112,7 @@ def forward(self, input_dict, state, seq_lens): (outputs, state) Tuple, first element is policy output, second element state """ - + # print(self.base_model.get_weights()) policy_out, value_out = self.base_model(input_dict["obs_flat"]) self.value_out = value_out return policy_out, state @@ -141,4 +137,5 @@ def import_from_h5(self, import_file): import_file: str filepath to h5 file """ - self.setup_model(self.obs_space, self.action_space, self.model_config, self.num_outputs, import_file) + print("LOADING WEIGHTS FROM H6") + self.base_model.load_weights(import_file) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 8ebdfb899..aea231be7 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -74,9 +74,13 @@ def main(): parser.add_argument('--batch_size', type=int, default=1000, help='Number of environment steps to collect in iteration of DAgger') parser.add_argument('--init_batch_size', type=int, default=2000, help='Number of environment steps to collect on 1st iteration of DAgger (behavioral cloning iteration)') + parser.add_argument('--vf_batch_size', type=int, default=2000, help='Number of environment steps to collect to learn value function for a policy') + parser.add_argument('--num_vf_iters', type=int, default=100, help='Number of iterations to run vf training') # TODO: better help description for this parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size to train on') + parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existin imitation neural net') + parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net') parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') parser.add_argument('--save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') parser.add_argument('--PPO_save_path', type=str, default='', help='Filepath to h5 file in which PPO model with copied weights should be saved') @@ -86,6 +90,7 @@ def main(): parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent. ') parser.add_argument('--v_des', type=float, default=15, help='Desired velocity for follower-stopper') parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning loss, for stochastic policies.') + args = parser.parse_args() # convert args to dictionary @@ -94,20 +99,18 @@ def main(): # change this to determine number and size of hidden layers params["fcnet_hiddens"] = [32, 32, 32] - assert args.n_iter>1, ('DAgger needs >1 iteration') - # run training - train = Runner(params) - train.run_training_loop() + runner = Runner(params) + runner.run_training_loop() # save model after training if params['save_model'] == 1: - train.save_controller_network() - train.save_controller_for_PPO() + runner.save_controller_network() + runner.save_controller_for_PPO() # evaluate controller on difference, compared to expert, in action taken and average reward accumulated per rollout - train.evaluate() + runner.evaluate() if __name__ == "__main__": main() diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index d45c30bc7..67e00eb25 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -13,6 +13,7 @@ import tensorflow as tf from utils import * from utils_tensorflow import * +from flow.controllers.imitation_learning.keras_utils import * class Trainer(object): """ @@ -227,6 +228,93 @@ def evaluate_controller(self, num_trajs = 10): print("Total imitator steps: ", total_imitator_steps) print("Total expert steps: ", total_expert_steps) + def learn_value_function(self, num_samples, num_iterations, num_grad_steps): + # init value function neural net + vf_net = build_neural_net_deterministic(self.params['obs_dim'], 1, self.params['fcnet_hiddens']) + vf_net.compile(loss='mean_squared_error', optimizer = 'adam') + + max_decel = self.flow_params['env'].additional_params['max_decel'] + # collect trajectory samples to train on + trajectories, envsteps_this_batch = sample_trajectories(self.env, self.controllers, self.action_network, + num_samples, self.params['ep_len'], self.multiagent, + use_expert=False, v_des=self.params['v_des'], + max_decel=max_decel) + observations = np.array([]) + rewards = np.array([]) + next_observations = np.array([]) + + # accumulate trajectory data + for traj in trajectories: + observations = np.append(observations, traj['observations']) + rewards = np.append(rewards, traj['rewards']) + next_observations = np.append(next_observations, traj['next_observations']) + + # iterate over data multiple times (labels change every iteration) + for _ in range(num_iterations): + # form labels + next_state_value_preds = vf_net.predict(next_observations).flatten() + next_state_value_preds[np.isnan(next_state_value_preds)] = 0 + labels = rewards + next_state_value_preds + for i in range(num_grad_steps): + vf_net.train_on_batch(observations, labels) + + return vf_net + + + + def save_controller_for_PPO(self): + """ + Build a model, with same policy architecture as imitation network, to run PPO, copy weights from imitation, and save this model. + + Parameters + ---------- + load_path : save_path + path to h5 file to save to + """ + + vf_net = self.learn_value_function(self.params['vf_batch_size'], self.params['num_vf_iters'], self.params['num_agent_train_steps_per_iter']) + + input = tf.keras.layers.Input(self.action_network.model.input.shape[1].value) + curr_layer = input + + # number of hidden layers + num_layers = len(self.action_network.model.layers) - 2 + + # build layers for policy + for i in range(num_layers): + size = self.action_network.model.layers[i + 1].output.shape[1].value + activation = tf.keras.activations.serialize(self.action_network.model.layers[i + 1].activation) + curr_layer = tf.keras.layers.Dense(size, activation=activation, name="policy_hidden_layer_{}".format(i + 1))(curr_layer) + output_layer_policy = tf.keras.layers.Dense(self.action_network.model.output.shape[1].value, activation=None, name="policy_output_layer")(curr_layer) + + # build layers for value function + curr_layer = input + for i in range(num_layers): + size = self.fcnet_hiddens[i] + curr_layer = tf.keras.layers.Dense(size, activation="tanh", name="vf_hidden_layer_{}".format(i+1))(curr_layer) + output_layer_vf = tf.keras.layers.Dense(1, activation=None, name="vf_output_layer")(curr_layer) + + ppo_model = tf.keras.Model(inputs=input, outputs=[output_layer_policy, output_layer_vf], name="ppo_model") + + # set the policy weights to those learned from imitation + for i in range(num_layers): + policy_layer = ppo_model.get_layer(name="policy_hidden_layer_{}".format(i + 1)) + policy_layer.set_weights(self.action_network.model.layers[i + 1].get_weights()) + policy_output = ppo_model.get_layer("policy_output_layer") + policy_output.set_weights(self.action_network.model.layers[-1].get_weights()) + + # set value function weights to those learned + num_vf_layers = len(vf_net.layers) - 2 + for i in range(num_vf_layers): + vf_layer = ppo_model.get_layer(name-'vf_hidden_layer{}'.format(i + 1)) + vf_layer.set_weights(vf_net.layers[i + 1].get_weights()) + vf_output = ppo_model.get_layer("vf_output_layer") + vf_output.set_weights(vf_net.layers[-1].get_weights()) + + + # save the model (as a h5 file) + ppo_model.save(self.params['PPO_save_path']) + def save_controller_network(self): """ @@ -235,8 +323,10 @@ def save_controller_network(self): print("Saving tensorflow model to: ", self.params['save_path']) self.action_network.save_network(self.params['save_path']) - def save_controller_for_PPO(self): - """ - Creates and saves a keras tensorflow model for training PPO with weights learned from imitation, to the specified path given in the command line params. Path must end with .h5. - """ - self.action_network.save_network_PPO(self.params['PPO_save_path']) + + + # def save_controller_for_PPO(self): + # """ + # Creates and saves a keras tensorflow model for training PPO with weights learned from imitation, to the specified path given in the command line params. Path must end with .h5. + # """ + # self.action_network.save_network_PPO(self.params['PPO_save_path']) diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index 483b76e7d..5d6134500 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -205,6 +205,8 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector print("Controller collecting trajectory: ", controller) action = controller.get_action(env) + if type(action) == tuple: + mean, log_std = action[1], action[2] # action should be a scalar acceleration if type(action) == np.ndarray: @@ -235,7 +237,8 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector terminate_rollout = done_dict['__all__'] or (traj_length == max_trajectory_length) for vehicle_id in vehicle_ids: - next_observations.append(observation_dict.get(vehicle_id, None)) + # default next observation to nans + next_observations.append(observation_dict.get(vehicle_id, np.empty((env.observation_space.shape[0], )))) rewards.append(reward_dict.get(vehicle_id, 0)) terminals.append(terminate_rollout) diff --git a/flow/controllers/velocity_controllers.py b/flow/controllers/velocity_controllers.py index 62ce15beb..7ad12bbe8 100644 --- a/flow/controllers/velocity_controllers.py +++ b/flow/controllers/velocity_controllers.py @@ -118,9 +118,11 @@ def get_accel(self, env): if (self.find_intersection_dist(env) <= 10 and \ env.k.vehicle.get_edge(self.veh_id) in self.danger_edges) or \ env.k.vehicle.get_edge(self.veh_id)[0] == ":"\ - or (self.control_length and (env.k.vehicle.get_x_by_id(self.veh_id) < self.control_length[0] - or env.k.vehicle.get_x_by_id(self.veh_id) > self.control_length[1]))\ - or edge in self.no_control_edges: + or (self.control_length and env.k.vehicle.get_x_by_id(self.veh_id) < self.control_length[0] \ + and env.k.vehicle.get_x_by_id(self.veh_id) > self.control_length[1]) or \ + (self.no_control_edges and len(self.no_control_edges) > 0 and env.k.vehicle.get_edge(self.veh_id) not in self.no_control_edges): + + # TODO(@evinitsky) put back # or env.k.vehicle.get_edge(self.veh_id) in self.no_control_edges: return None diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 5c52e196f..d11773e4b 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -80,6 +80,8 @@ def visualizer_rllib(args): sim_params.use_ballistic = False # Determine agent and checkpoint + # TODO(akashvelu): remove this + # print("NEW CONFIGGG: ", config['env_config']['run']) config_run = config['env_config']['run'] if 'run' in config['env_config'] \ else None if args.run and config_run: @@ -91,6 +93,14 @@ def visualizer_rllib(args): sys.exit(1) if args.run: agent_cls = get_agent_class(args.run) + elif config['env_config']['run'] == "": + from flow.controllers.imitation_learning.imitation_trainer import Imitation_PPO_Trainable + from flow.controllers.imitation_learning.ppo_model import PPONetwork + from ray.rllib.models import ModelCatalog + agent_cls = get_agent_class("PPO") + ModelCatalog.register_custom_model("imitation_ppo_trainable", Imitation_PPO_Trainable) + ModelCatalog.register_custom_model("PPO_loaded_weights", PPONetwork) + elif config['env_config']['run'] == "": from flow.algorithms.centralized_PPO import CCTrainer, CentralizedCriticModel from ray.rllib.models import ModelCatalog @@ -162,6 +172,8 @@ def visualizer_rllib(args): checkpoint = result_dir + '/checkpoint_' + args.checkpoint_num checkpoint = checkpoint + '/checkpoint-' + args.checkpoint_num agent.restore(checkpoint) + # agent.import_model('/Users/akashvelu/Desktop/latest_run3/ppo.h5', 'av') + if hasattr(agent, "local_evaluator") and \ os.environ.get("TEST_FLAG") != 'True': @@ -169,7 +181,7 @@ def visualizer_rllib(args): else: env = gym.make(env_name) - # reroute on exit is a training hack, it should be turned off at test time. + # reroute on exit is a training hack, it should be turned off at test time. if hasattr(env, "reroute_on_exit"): env.reroute_on_exit = False @@ -401,5 +413,6 @@ def create_parser(): if __name__ == '__main__': parser = create_parser() args = parser.parse_args() - ray.init(num_cpus=1) + print("GEN EMISSION: ", args.gen_emission) + ray.init(local_mode=True) visualizer_rllib(args) From 1ae0081b693bd4502b601f7ab51fb5dda70987cb Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 1 Jun 2020 21:08:14 -0700 Subject: [PATCH 249/335] fix data collection issue in i210_replay --- flow/visualize/i210_replay.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index b3b4fcb78..b2e22d5b3 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -262,7 +262,8 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= # Collect information from flow for the trajectory output get_extra_info(env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend(['{}_run_{}'.format(source_id, i)] * len(veh_ids)) + extra_info["source_id"].extend([source_id] * len(veh_ids)) + extra_info["run_id"].extend(['run_{}'.format(i)] * len(veh_ids)) # Compute the results for the custom callables. for (key, lambda_func) in custom_callables.items(): From f7d9ec17c6e57f8bf308e6a3f1343959db5b101b Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Mon, 1 Jun 2020 21:09:26 -0700 Subject: [PATCH 250/335] fix the network name mapping for i210 single lane --- flow/data_pipeline/leaderboard_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index f0c4178df..7553703f3 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -7,7 +7,8 @@ network_name_map = {"highway-single": "Single-Lane Straight Road", "ring": "Single-Lane Ring Road", - "I-210_subnetwork": "I-210 without Ramps"} + "I-210_subnetwork": "I-210 without Ramps", + "I_210_subnetwork": "I-210 without Ramps"} def network_name_translate(network_name): From a0c39045be0d74712fd1ab797882778f2006c528 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Tue, 2 Jun 2020 14:33:34 -0400 Subject: [PATCH 251/335] Minor changes to support cusotm PPO --- .../rl/multiagent/multiagent_i210.py | 4 +++- flow/visualize/i210_replay.py | 17 ++++++++++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index f55917e49..8c619ee88 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -318,5 +318,7 @@ def policy_mapping_fn(_): env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]), "avg_outflow": lambda env: np.nan_to_num(env.k.vehicle.get_outflow_rate(120)), "avg_energy": lambda env: -1*energy_consumption(env, 0.1), - "avg_per_step_energy": lambda env: -1*energy_consumption(env, 0.1) / env.k.vehicle.num_vehicles, + "avg_per_step_energy": lambda env: -1*energy_consumption(env, 0.1) / env.k.vehicle.num_vehicles + if env.k.vehicle.num_vehicles > 0 + else 0, } diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index b3b4fcb78..58f8aa755 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -167,7 +167,18 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= rllib_flow_params = get_flow_params(rllib_config) agent_create_env, agent_env_name = make_create_env(params=rllib_flow_params, version=0) register_env(agent_env_name, agent_create_env) - agent_cls = get_agent_class(config_run) + # agent_cls = get_agent_class(config_run) + + if rllib_config['env_config']['run'] == "": + from flow.algorithms.centralized_PPO import CCTrainer, CentralizedCriticModel + from ray.rllib.models import ModelCatalog + agent_cls = CCTrainer + ModelCatalog.register_custom_model("cc_model", CentralizedCriticModel) + elif rllib_config['env_config']['run'] == "": + from flow.algorithms.custom_ppo import CustomPPOTrainer + agent_cls = CustomPPOTrainer + elif config_run: + agent_cls = get_agent_class(config_run) # create the agent that will be used to compute the actions agent = agent_cls(env=agent_env_name, config=rllib_config) @@ -209,6 +220,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= key: [] for key in custom_callables.keys() }) + # reroute on exit is a training hack, it should be turned off at test time. + if hasattr(env, "reroute_on_exit"): + env.reroute_on_exit = False + # date pipeline extra_info = defaultdict(lambda: []) source_id = 'flow_{}'.format(uuid.uuid4().hex) From d10f8e50f10ea7124afb6da09b1263a04cc62dd7 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 3 Jun 2020 16:34:31 -0700 Subject: [PATCH 252/335] Value function learning --- .../imitating_controller.py | 3 +- .../imitation_learning/imitating_network.py | 18 +++--- .../imitation_learning/imitation_trainer.py | 32 ++++++++-- flow/controllers/imitation_learning/run.py | 9 +-- .../controllers/imitation_learning/trainer.py | 59 ++++++++----------- 5 files changed, 70 insertions(+), 51 deletions(-) diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index 53212f3ab..4d912179d 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -2,8 +2,7 @@ import tensorflow as tf import tensorflow_probability as tfp from flow.controllers.base_controller import BaseController -from replay_buffer import ReplayBuffer - +from flow.controllers.imitation_learning.replay_buffer import ReplayBuffer class ImitatingController(BaseController): """ diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 47cdf3064..bf0a190a9 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -1,12 +1,12 @@ import numpy as np import tensorflow as tf -from utils_tensorflow import * -from keras_utils import * -import tensorflow_probability as tfp -from flow.controllers.base_controller import BaseController -from replay_buffer import ReplayBuffer from time import time from tensorflow.python.keras.callbacks import TensorBoard +import tensorflow_probability as tfp +from flow.controllers.imitation_learning.utils_tensorflow import * +from flow.controllers.imitation_learning.keras_utils import * +from flow.controllers.base_controller import BaseController +from flow.controllers.imitation_learning.replay_buffer import ReplayBuffer class ImitatingNetwork(): @@ -14,7 +14,7 @@ class ImitatingNetwork(): Class containing neural network which learns to imitate a given expert controller. """ - def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, stochastic=False, variance_regularizer = 0, load_model=False, load_path=''): + def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, stochastic=False, variance_regularizer = 0, load_model=False, load_path='', tensorboard_path=''): """Initializes and constructs neural network. Parameters @@ -47,6 +47,9 @@ def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, self.stochastic=stochastic self.variance_regularizer = variance_regularizer + self.train_steps = 0 + self.writer = tf.summary.FileWriter(tensorboard_path, tf.get_default_graph()) + # load network if specified, or construct network if load_model: self.load_network(load_path) @@ -91,7 +94,8 @@ def train(self, observation_batch, action_batch): # reshape action_batch to ensure a shape (batch_size, action_dim) action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) # one gradient step on batch - self.model.train_on_batch(observation_batch, action_batch) + loss = self.model.train_on_batch(observation_batch, action_batch) + self.writer.add_summary() def get_accel_from_observation(self, observation): """ diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/imitation_trainer.py index 503bb6d07..b27b46609 100644 --- a/flow/controllers/imitation_learning/imitation_trainer.py +++ b/flow/controllers/imitation_learning/imitation_trainer.py @@ -8,7 +8,15 @@ class Imitation_PPO_Trainable(tune.Trainable): + """ + Class to train PPO with imitation, with Tune. Extends Trainable. + """ + def _setup(self, config): + """ + Sets up trainable. See superclass definition. + """ + env_name = config['env'] # agent_cls = get_agent_class(config['env_config']['run']) self.trainer = ppo.PPOTrainer(env=env_name, config=config) @@ -16,31 +24,45 @@ def _setup(self, config): policy_id = list(self.trainer.get_weights().keys())[0] print(policy_id) print("\n\n\n") - self.trainer.import_model(config['model']['custom_options']['h5_load_path'], policy_id) + self.trainer.import_model(config['model']['custom_options']['h5_load_path']) print("here") def _train(self): + """ + Executes one training iteration on trainer. See superclass definition. + """ print("TRAIN CALLED") # return self.trainer.train() return self.trainer.train() - # def train(self): - # print("TRAIN CALLED") - # return self.trainer.train() - def _save(self, tmp_checkpoint_dir): + """ + Saves trainer. See superclass definition. + """ return self.trainer._save(tmp_checkpoint_dir) def _restore(self, checkpoint): + """ + Restores trainer from checkpoint. See superclass definition. + """ self.trainer.restore(checkpoint) def _log_result(self, result): + """ + Logs results of trainer. See superclass definition. + """ self.trainer._log_result(result) def _stop(self): + """ + Stops trainer. See superclass definition. + """ self.trainer.stop() def _export_model(self, export_formats, export_dir): + """ + Exports trainer model. See superclass definition. + """ return self.trainer.export_model(export_formats, export_dir=export_dir) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index aea231be7..0fbd2006b 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -1,7 +1,7 @@ import os import time import numpy as np -from trainer import Trainer +from flow.controllers.imitation_learning.trainer import Trainer from flow.controllers.car_following_models import IDMController @@ -67,7 +67,7 @@ def main(): help='Name of the experiment configuration file, as located in ' 'exp_configs/rl/singleagent or exp_configs/rl/multiagent.') - parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts. ') + parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts.') parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help='Number of gradient steps for training policy.') # number of gradient steps for training policy parser.add_argument('--n_iter', type=int, default=3, help='Number of DAgger iterations to run (1st iteration is behavioral cloning') @@ -87,7 +87,7 @@ def main(): parser.add_argument('--save_model', type=int, default=0, help='If true, save models in h5 format') parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') parser.add_argument('--stochastic', type=bool, default=False, help='If true, learn a stochastic policy (MV Gaussian)') - parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent. ') + parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent.') parser.add_argument('--v_des', type=float, default=15, help='Desired velocity for follower-stopper') parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning loss, for stochastic policies.') @@ -110,7 +110,8 @@ def main(): runner.save_controller_for_PPO() # evaluate controller on difference, compared to expert, in action taken and average reward accumulated per rollout - runner.evaluate() + if params['num_eval_episodes'] > 0: + ßrunner.evaluate() if __name__ == "__main__": main() diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 67e00eb25..99a091083 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -4,16 +4,16 @@ import numpy as np import gym import os +import tensorflow as tf +from utils import * from flow.utils.registry import make_create_env -from imitating_controller import ImitatingController -from imitating_network import ImitatingNetwork +from flow.controllers.imitation_learning.imitating_controller import ImitatingController +from flow.controllers.imitation_learning.imitating_network import ImitatingNetwork +from flow.controllers.imitation_learning.utils_tensorflow import * +from flow.controllers.imitation_learning.keras_utils import * from flow.controllers.car_following_models import IDMController from flow.controllers.velocity_controllers import FollowerStopper from flow.core.params import SumoCarFollowingParams -import tensorflow as tf -from utils import * -from utils_tensorflow import * -from flow.controllers.imitation_learning.keras_utils import * class Trainer(object): """ @@ -62,10 +62,8 @@ def __init__(self, params, submodule): self.params['obs_dim'] = obs_dim # initialize neural network class and tf variables - self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['fcnet_hiddens'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], variance_regularizer=self.params['variance_regularizer']) + self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['fcnet_hiddens'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], variance_regularizer=self.params['variance_regularizer'], load_model=self.params['load_imitation_model'], load_path=self.params['load_imitation_path']) - # tf.global_variab - # les_initializer().run(session=self.sess) # controllers setup v_des = self.params['v_des'] # for FollowerStopper @@ -229,6 +227,18 @@ def evaluate_controller(self, num_trajs = 10): print("Total expert steps: ", total_expert_steps) def learn_value_function(self, num_samples, num_iterations, num_grad_steps): + """ + Learn the value function under imitation policy. + Parameters + __________ + num_samples: number of environment transition samples to collect to learn from + num_iterations: number of iterations to relabel data, and train + num_grad_steps: number of gradient steps per training iteration + + Returns + _______ + Value function neural net + """ # init value function neural net vf_net = build_neural_net_deterministic(self.params['obs_dim'], 1, self.params['fcnet_hiddens']) vf_net.compile(loss='mean_squared_error', optimizer = 'adam') @@ -239,15 +249,11 @@ def learn_value_function(self, num_samples, num_iterations, num_grad_steps): num_samples, self.params['ep_len'], self.multiagent, use_expert=False, v_des=self.params['v_des'], max_decel=max_decel) - observations = np.array([]) - rewards = np.array([]) - next_observations = np.array([]) - # accumulate trajectory data - for traj in trajectories: - observations = np.append(observations, traj['observations']) - rewards = np.append(rewards, traj['rewards']) - next_observations = np.append(next_observations, traj['next_observations']) + # combine trajectories into one + observations = np.concatenate([traj['observations'] for traj in trajectories]) + rewards = np.concatenate([traj['rewards'] for traj in trajectories]) + next_observations = np.concatenate([traj['next_observations'] for traj in trajectories]) # iterate over data multiple times (labels change every iteration) for _ in range(num_iterations): @@ -255,8 +261,7 @@ def learn_value_function(self, num_samples, num_iterations, num_grad_steps): next_state_value_preds = vf_net.predict(next_observations).flatten() next_state_value_preds[np.isnan(next_state_value_preds)] = 0 labels = rewards + next_state_value_preds - for i in range(num_grad_steps): - vf_net.train_on_batch(observations, labels) + vf_net.fit(observations, labels, verbose=0) return vf_net @@ -266,10 +271,6 @@ def save_controller_for_PPO(self): """ Build a model, with same policy architecture as imitation network, to run PPO, copy weights from imitation, and save this model. - Parameters - ---------- - load_path : save_path - path to h5 file to save to """ vf_net = self.learn_value_function(self.params['vf_batch_size'], self.params['num_vf_iters'], self.params['num_agent_train_steps_per_iter']) @@ -290,7 +291,7 @@ def save_controller_for_PPO(self): # build layers for value function curr_layer = input for i in range(num_layers): - size = self.fcnet_hiddens[i] + size = self.params['fcnet_hiddens'][i] curr_layer = tf.keras.layers.Dense(size, activation="tanh", name="vf_hidden_layer_{}".format(i+1))(curr_layer) output_layer_vf = tf.keras.layers.Dense(1, activation=None, name="vf_output_layer")(curr_layer) @@ -306,7 +307,7 @@ def save_controller_for_PPO(self): # set value function weights to those learned num_vf_layers = len(vf_net.layers) - 2 for i in range(num_vf_layers): - vf_layer = ppo_model.get_layer(name-'vf_hidden_layer{}'.format(i + 1)) + vf_layer = ppo_model.get_layer('vf_hidden_layer_{}'.format(i + 1)) vf_layer.set_weights(vf_net.layers[i + 1].get_weights()) vf_output = ppo_model.get_layer("vf_output_layer") vf_output.set_weights(vf_net.layers[-1].get_weights()) @@ -322,11 +323,3 @@ def save_controller_network(self): """ print("Saving tensorflow model to: ", self.params['save_path']) self.action_network.save_network(self.params['save_path']) - - - - # def save_controller_for_PPO(self): - # """ - # Creates and saves a keras tensorflow model for training PPO with weights learned from imitation, to the specified path given in the command line params. Path must end with .h5. - # """ - # self.action_network.save_network_PPO(self.params['PPO_save_path']) From 7fa3e3a2a3b8f7353914cb4c50b3aeb2f760e5b0 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 3 Jun 2020 16:50:24 -0700 Subject: [PATCH 253/335] Tensorboard plotting for loss --- flow/controllers/imitation_learning/imitating_network.py | 8 +++++++- flow/controllers/imitation_learning/run.py | 1 + 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index bf0a190a9..2ee4de3f8 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -48,6 +48,8 @@ def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, self.variance_regularizer = variance_regularizer self.train_steps = 0 + + tensorboard_path = tensorboard_path + 'imitation_tensorboard/' self.writer = tf.summary.FileWriter(tensorboard_path, tf.get_default_graph()) # load network if specified, or construct network @@ -95,7 +97,11 @@ def train(self, observation_batch, action_batch): action_batch = action_batch.reshape(action_batch.shape[0], self.action_dim) # one gradient step on batch loss = self.model.train_on_batch(observation_batch, action_batch) - self.writer.add_summary() + + # tensorboard + summary = tf.Summary(value=[tf.Summary.Value(tag="imitation training loss", simple_value=loss), ]) + self.writer.add_summary(summary, global_step=self.train_steps) + self.train_steps += 1 def get_accel_from_observation(self, observation): """ diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 0fbd2006b..59fdeea90 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -81,6 +81,7 @@ def main(): parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existin imitation neural net') parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net') + parser.add_argument('--tensorboad_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') parser.add_argument('--save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') parser.add_argument('--PPO_save_path', type=str, default='', help='Filepath to h5 file in which PPO model with copied weights should be saved') From 91fab74509573cc5e0cfe661319afd4d38e624a7 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 3 Jun 2020 16:51:18 -0700 Subject: [PATCH 254/335] Bug fixes --- flow/controllers/imitation_learning/trainer.py | 2 +- flow/controllers/imitation_learning/utils.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 99a091083..8ac5a3208 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -62,7 +62,7 @@ def __init__(self, params, submodule): self.params['obs_dim'] = obs_dim # initialize neural network class and tf variables - self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['fcnet_hiddens'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], variance_regularizer=self.params['variance_regularizer'], load_model=self.params['load_imitation_model'], load_path=self.params['load_imitation_path']) + self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['fcnet_hiddens'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], variance_regularizer=self.params['variance_regularizer'], load_model=self.params['load_imitation_model'], load_path=self.params['load_imitation_path'], tensorboard_path=self.params['tensorboard_path']) # controllers setup diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index 5d6134500..4708e96cb 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -3,8 +3,8 @@ import numpy as np import math from flow.core.params import SumoCarFollowingParams -from imitating_controller import ImitatingController -from imitating_network import ImitatingNetwork +from flow.controllers.imitation_learning.imitating_controller import ImitatingController +from flow.controllers.imitation_learning.imitating_network import ImitatingNetwork from flow.controllers.car_following_models import IDMController from flow.controllers.velocity_controllers import FollowerStopper from flow.core.rewards import * From d38839f31d87ee97d51c30e3ac2162c9638a99cb Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 4 Jun 2020 11:03:35 -0700 Subject: [PATCH 255/335] Code cleanup --- examples/train.py | 2 + .../imitation_learning/imitating_network.py | 11 +++++- .../imitation_learning/imitation_trainer.py | 6 +-- .../imitation_learning/keras_utils.py | 1 - flow/controllers/imitation_learning/run.py | 4 +- .../train_with_imitation.py | 37 +++++++++++-------- .../controllers/imitation_learning/trainer.py | 2 + flow/controllers/imitation_learning/utils.py | 3 +- 8 files changed, 40 insertions(+), 26 deletions(-) diff --git a/examples/train.py b/examples/train.py index 083593548..6d7b13879 100644 --- a/examples/train.py +++ b/examples/train.py @@ -218,12 +218,14 @@ def setup_exps_rllib(flow_params, from flow.controllers.imitation_learning.ppo_model import PPONetwork from flow.controllers.imitation_learning.imitation_trainer import Imitation_PPO_Trainable from ray.rllib.models import ModelCatalog + # Register custom model ModelCatalog.register_custom_model("PPO_loaded_weights", PPONetwork) # set model to the custom model for run config['model']['custom_model'] = "PPO_loaded_weights" config['model']['custom_options'] = {"h5_load_path": flags.load_weights_path} config['observation_filter'] = 'NoFilter' + # alg run is the Trainable class alg_run = Imitation_PPO_Trainable elif alg_run == "CENTRALIZEDPPO": diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 2ee4de3f8..30eec3696 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -48,8 +48,8 @@ def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, self.variance_regularizer = variance_regularizer self.train_steps = 0 + self.action_steps = 0 - tensorboard_path = tensorboard_path + 'imitation_tensorboard/' self.writer = tf.summary.FileWriter(tensorboard_path, tf.get_default_graph()) # load network if specified, or construct network @@ -126,12 +126,21 @@ def get_accel_from_observation(self, observation): if self.stochastic: mean, log_std = network_output[:, :self.action_dim], network_output[:, self.action_dim:] var = np.exp(2 * log_std) + + variance_norm = np.linalg.norm(var) + summary = tf.Summary(value=[tf.Summary.Value(tag="Variance norm", simple_value=variance_norm), ]) + self.writer.add_summary(summary, global_step=self.action_steps) + cov_matrix = np.diag(var[0]) action = np.random.multivariate_normal(mean[0], cov_matrix) + + self.action_steps += 1 return action else: + self.action_steps += 1 return network_output + def get_accel(self, env): """ Get network's acceleration prediction(s) based on given env diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/imitation_trainer.py index b27b46609..a6f75ea45 100644 --- a/flow/controllers/imitation_learning/imitation_trainer.py +++ b/flow/controllers/imitation_learning/imitation_trainer.py @@ -20,12 +20,8 @@ def _setup(self, config): env_name = config['env'] # agent_cls = get_agent_class(config['env_config']['run']) self.trainer = ppo.PPOTrainer(env=env_name, config=config) - print("\n\n\nPOLICY_NAME") policy_id = list(self.trainer.get_weights().keys())[0] - print(policy_id) - print("\n\n\n") - self.trainer.import_model(config['model']['custom_options']['h5_load_path']) - print("here") + self.trainer.import_model(config['model']['custom_options']['h5_load_path'], policy_id) def _train(self): """ diff --git a/flow/controllers/imitation_learning/keras_utils.py b/flow/controllers/imitation_learning/keras_utils.py index 34b32f692..59928affc 100644 --- a/flow/controllers/imitation_learning/keras_utils.py +++ b/flow/controllers/imitation_learning/keras_utils.py @@ -108,7 +108,6 @@ def nll_loss(y, network_output): return nll_loss - def compare_weights(ppo_model, imitation_path): imitation_model = tf.keras.models.load_model(imitation_path, custom_objects={'nll_loss': negative_log_likelihood_loss(0.5)}) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 59fdeea90..25cb0f230 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -81,7 +81,7 @@ def main(): parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existin imitation neural net') parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net') - parser.add_argument('--tensorboad_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') + parser.add_argument('--tensorboard_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') parser.add_argument('--save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') parser.add_argument('--PPO_save_path', type=str, default='', help='Filepath to h5 file in which PPO model with copied weights should be saved') @@ -112,7 +112,7 @@ def main(): # evaluate controller on difference, compared to expert, in action taken and average reward accumulated per rollout if params['num_eval_episodes'] > 0: - ßrunner.evaluate() + runner.evaluate() if __name__ == "__main__": main() diff --git a/flow/controllers/imitation_learning/train_with_imitation.py b/flow/controllers/imitation_learning/train_with_imitation.py index 416ae7048..78053fe2e 100644 --- a/flow/controllers/imitation_learning/train_with_imitation.py +++ b/flow/controllers/imitation_learning/train_with_imitation.py @@ -1,4 +1,4 @@ -from run import * +from flow.controllers.imitation_learning.run import * from examples.train import * def parse_args(args): @@ -75,23 +75,30 @@ def parse_args(args): # Imitation Learning args - parser.add_argument('--ep_len', type=int, default=5000, help="Maximum length of episode for imitation learning") + parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts.') - parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help="Number of gradient steps to take per iteration") # number of gradient steps for training policy - parser.add_argument('--n_iter', type=int, default=5, help="Number of iterations of DAgger to perform (1st iteration is behavioral cloning)") + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help='Number of gradient steps for training policy.') # number of gradient steps for training policy + parser.add_argument('--n_iter', type=int, default=3, help='Number of DAgger iterations to run (1st iteration is behavioral cloning') - parser.add_argument('--batch_size', type=int, default=3000, help="") # training data collected (in the env) during each iteration - parser.add_argument('--init_batch_size', type=int, default=4000) + parser.add_argument('--batch_size', type=int, default=1000, help='Number of environment steps to collect in iteration of DAgger') + parser.add_argument('--init_batch_size', type=int, default=2000, help='Number of environment steps to collect on 1st iteration of DAgger (behavioral cloning iteration)') + parser.add_argument('--vf_batch_size', type=int, default=2000, help='Number of environment steps to collect to learn value function for a policy') + parser.add_argument('--num_vf_iters', type=int, default=100, help='Number of iterations to run vf training') # TODO: better help description for this - parser.add_argument('--train_batch_size', type=int, default=100, help="Batch size for training") # number of sampled data points to be used per gradient/train step - parser.add_argument('--tensorboard_path', type=str, help='Path to tensorboard log dir for imitation') + parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size to train on') + + parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existing imitation neural net') + parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net') + parser.add_argument('--tensorboard_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') + parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') + parser.add_argument('--save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') + parser.add_argument('--save_model', type=int, default=0, help='If true, save models in h5 format') + parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') + parser.add_argument('--stochastic', type=bool, default=False, help='If true, learn a stochastic policy (MV Gaussian)') + parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent.') + parser.add_argument('--v_des', type=float, default=15, help='Desired velocity for follower-stopper') + parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning loss, for stochastic policies.') - parser.add_argument('--replay_buffer_size', type=int, default=1000000) - parser.add_argument('--num_eval_episodes', type=int, default=0, help="Number of episodes to evaluate imitation controller.") - parser.add_argument('--stochastic', type=bool, default=False, help="If true, controller learns stochastic policy (multivariate gaussian)") - parser.add_argument('--multiagent', type=bool, default=False, help="Whether the env is multiagent") - parser.add_argument('--v_des', type=float, default=15, help="v_des for FollowerStopper") - parser.add_argument('--variance_regularizer', type=float, default=0.5, help="Regularization parameter to penalize high variance in negative log likelihood loss") parsed_args = parser.parse_known_args(args)[0] @@ -109,8 +116,8 @@ def main(args): flags, params = parse_args(args) params["fcnet_hiddens"] = [32, 32, 32] + params['PPO_save_path'] = params['load_weights_path'] - assert flags.n_iter>1, ('DAgger needs >1 iteration') print("\n\n********** IMITATION LEARNING ************ \n") # run training diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 8ac5a3208..c1ff5f981 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -239,6 +239,8 @@ def learn_value_function(self, num_samples, num_iterations, num_grad_steps): _______ Value function neural net """ + + print("\n\n********** Learning value function of imitation policy ************ \n") # init value function neural net vf_net = build_neural_net_deterministic(self.params['obs_dim'], 1, self.params['fcnet_hiddens']) vf_net.compile(loss='mean_squared_error', optimizer = 'adam') diff --git a/flow/controllers/imitation_learning/utils.py b/flow/controllers/imitation_learning/utils.py index 4708e96cb..36f7844e9 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/controllers/imitation_learning/utils.py @@ -205,8 +205,7 @@ def sample_trajectory_multiagent(env, controllers, action_network, max_trajector print("Controller collecting trajectory: ", controller) action = controller.get_action(env) - if type(action) == tuple: - mean, log_std = action[1], action[2] + # action should be a scalar acceleration if type(action) == np.ndarray: From c27bfe526fdea244757393265224b10acfde1cc0 Mon Sep 17 00:00:00 2001 From: AboudyKreidieh Date: Wed, 10 Jun 2020 12:14:24 -0700 Subject: [PATCH 256/335] pep8 (mostly --- .../exp_configs/non_rl/i210_subnetwork.py | 2 + .../rl/multiagent/multiagent_i210.py | 2 - examples/train.py | 23 ++-- flow/algorithms/centralized_PPO.py | 108 ++++++++++-------- flow/algorithms/custom_ppo.py | 17 +-- flow/controllers/base_controller.py | 21 ++-- flow/controllers/velocity_controllers.py | 8 +- flow/core/experiment.py | 3 +- flow/data_pipeline/leaderboard_utils.py | 2 +- flow/data_pipeline/query.py | 94 ++++++++++----- flow/envs/multiagent/i210.py | 22 ++-- flow/visualize/time_space_diagram.py | 13 +-- flow/visualize/visualizer_rllib.py | 3 +- 13 files changed, 186 insertions(+), 132 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 65131a6bd..90384b207 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -241,9 +241,11 @@ edge_id = "119257908#1-AddedOnRampEdge" + def valid_ids(env, veh_ids): return [veh_id for veh_id in veh_ids if env.k.vehicle.get_edge(veh_id) not in ["ghost0", "119257908#3"]] + custom_callables = { "avg_merge_speed": lambda env: np.nan_to_num(np.mean( env.k.vehicle.get_speed(valid_ids(env, env.k.vehicle.get_ids())))), diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index f55917e49..028e5bc7c 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -19,9 +19,7 @@ from flow.core.params import VehicleParams from flow.core.params import SumoParams from flow.core.params import SumoLaneChangeParams -from flow.core.params import SumoCarFollowingParams from flow.core.rewards import energy_consumption -from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION from flow.envs.multiagent.i210 import I210MultiEnv, ADDITIONAL_ENV_PARAMS from flow.utils.registry import make_create_env diff --git a/examples/train.py b/examples/train.py index e5b918ff6..1235e6241 100644 --- a/examples/train.py +++ b/examples/train.py @@ -164,6 +164,7 @@ def setup_exps_rllib(flow_params, """ from ray import tune from ray.tune.registry import register_env + from ray.rllib.env.group_agents_wrapper import _GroupAgentsWrapper try: from ray.rllib.agents.agent import get_agent_class except ImportError: @@ -171,8 +172,6 @@ def setup_exps_rllib(flow_params, horizon = flow_params['env'].horizon - horizon = flow_params['env'].horizon - alg_run = flags.algorithm.upper() if alg_run == "PPO": @@ -244,19 +243,22 @@ def on_episode_start(info): episode.user_data["avg_mpg"] = [] episode.user_data["avg_mpj"] = [] - def on_episode_step(info): episode = info["episode"] env = info["env"].get_unwrapped()[0] if isinstance(env, _GroupAgentsWrapper): env = env.env if hasattr(env, 'no_control_edges'): - veh_ids = [veh_id for veh_id in env.k.vehicle.get_ids() if (env.k.vehicle.get_speed(veh_id) >= 0 - and env.k.vehicle.get_edge(veh_id) - not in env.no_control_edges)] - rl_ids = [veh_id for veh_id in env.k.vehicle.get_rl_ids() if (env.k.vehicle.get_speed(veh_id) >= 0 - and env.k.vehicle.get_edge(veh_id) - not in env.no_control_edges)] + veh_ids = [ + veh_id for veh_id in env.k.vehicle.get_ids() + if env.k.vehicle.get_speed(veh_id) >= 0 + and env.k.vehicle.get_edge(veh_id) not in env.no_control_edges + ] + rl_ids = [ + veh_id for veh_id in env.k.vehicle.get_rl_ids() + if env.k.vehicle.get_speed(veh_id) >= 0 + and env.k.vehicle.get_edge(veh_id) not in env.no_control_edges + ] else: veh_ids = [veh_id for veh_id in env.k.vehicle.get_ids() if env.k.vehicle.get_speed(veh_id) >= 0] rl_ids = [veh_id for veh_id in env.k.vehicle.get_rl_ids() if env.k.vehicle.get_speed(veh_id) >= 0] @@ -270,7 +272,6 @@ def on_episode_step(info): episode.user_data["avg_mpg"].append(miles_per_gallon(env, veh_ids, gain=1.0)) episode.user_data["avg_mpj"].append(miles_per_megajoule(env, veh_ids, gain=1.0)) - def on_episode_end(info): episode = info["episode"] avg_speed = np.mean(episode.user_data["avg_speed"]) @@ -316,7 +317,7 @@ def on_train_result(info): def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" import ray - from ray.tune import run_experiments + from ray import tune flow_params = submodule.flow_params flow_params['sim'].render = flags.render diff --git a/flow/algorithms/centralized_PPO.py b/flow/algorithms/centralized_PPO.py index 8f3b9f261..5f71f865a 100644 --- a/flow/algorithms/centralized_PPO.py +++ b/flow/algorithms/centralized_PPO.py @@ -1,14 +1,12 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function + """An example of customizing PPO to leverage a centralized critic.""" import argparse import numpy as np -from gym.spaces import Dict - -from ray import tune from ray.rllib.agents.ppo.ppo import PPOTrainer from flow.algorithms.custom_ppo import CustomPPOTFPolicy from ray.rllib.evaluation.postprocessing import compute_advantages, \ @@ -19,13 +17,11 @@ from ray.rllib.models.modelv2 import ModelV2 from ray.rllib.models.tf.tf_modelv2 import TFModelV2 from ray.rllib.models.tf.recurrent_tf_modelv2 import RecurrentTFModelV2 -from ray.rllib.models.model import restore_original_dimensions from ray.rllib.utils.annotations import override from ray.rllib.models.tf.fcnet_v2 import FullyConnectedNetwork from ray.rllib.utils.explained_variance import explained_variance from ray.rllib.utils import try_import_tf - tf = try_import_tf() # Frozen logits of the policy that computed the action @@ -37,10 +33,10 @@ parser = argparse.ArgumentParser() parser.add_argument("--stop", type=int, default=100000) -#TODOy class CentralizedCriticModel(TFModelV2): """Multi-agent model that implements a centralized VF.""" + # TODO(@evinitsky) make this work with more than boxes def __init__(self, obs_space, action_space, num_outputs, model_config, @@ -56,9 +52,12 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, self.max_num_agents = model_config['custom_options']['max_num_agents'] self.obs_space_shape = obs_space.shape[0] self.obs_space = obs_space - other_obs = tf.keras.layers.Input(shape=(obs_space.shape[0] * self.max_num_agents, ), name="central_obs") + other_obs = tf.keras.layers.Input( + shape=(obs_space.shape[0] * self.max_num_agents,), + name="central_obs") central_vf_dense = tf.keras.layers.Dense( - model_config['custom_options']['central_vf_size'], activation=tf.nn.tanh, name="c_vf_dense")(other_obs) + model_config['custom_options']['central_vf_size'], + activation=tf.nn.tanh, name="c_vf_dense")(other_obs) central_vf_out = tf.keras.layers.Dense( 1, activation=None, name="c_vf_out")(central_vf_dense) self.central_vf = tf.keras.Model( @@ -89,15 +88,15 @@ def __init__(self, name, hiddens_size=64, cell_size=64): - super(CentralizedCriticModelRNN, self).__init__(obs_space, action_space, num_outputs, - model_config, name) + super(CentralizedCriticModelRNN, self).__init__( + obs_space, action_space, num_outputs, model_config, name) self.cell_size = cell_size # Define input layers input_layer = tf.keras.layers.Input( shape=(None, obs_space.shape[0]), name="inputs") - state_in_h = tf.keras.layers.Input(shape=(cell_size, ), name="h") - state_in_c = tf.keras.layers.Input(shape=(cell_size, ), name="c") + state_in_h = tf.keras.layers.Input(shape=(cell_size,), name="h") + state_in_c = tf.keras.layers.Input(shape=(cell_size,), name="c") seq_in = tf.keras.layers.Input(shape=(), name="seq_in") # Preprocess observation with a hidden layer and send to LSTM cell @@ -105,9 +104,9 @@ def __init__(self, hiddens_size, activation=tf.nn.relu, name="dense1")(input_layer) lstm_out, state_h, state_c = tf.keras.layers.LSTM( cell_size, return_sequences=True, return_state=True, name="lstm")( - inputs=dense1, - mask=tf.sequence_mask(seq_in), - initial_state=[state_in_h, state_in_c]) + inputs=dense1, + mask=tf.sequence_mask(seq_in), + initial_state=[state_in_h, state_in_c]) # Postprocess LSTM output with another hidden layer and compute values logits = tf.keras.layers.Dense( @@ -124,16 +123,20 @@ def __init__(self, self.register_variables(self.model.variables) self.model.summary() - #TODO(@evinitsky) add layer sharing to the VF - # Create the centralized VF - # Central VF maps (obs, opp_ops, opp_act) -> vf_pred + # TODO(@evinitsky) add layer sharing to the VF + # Create the centralized VF + # Central VF maps (obs, opp_ops, opp_act) -> vf_pred self.max_num_agents = model_config.get("max_num_agents", 120) self.obs_space_shape = obs_space.shape[0] - other_obs = tf.keras.layers.Input(shape=(obs_space.shape[0] * self.max_num_agents,), name="all_agent_obs") + other_obs = tf.keras.layers.Input( + shape=(obs_space.shape[0] * self.max_num_agents,), + name="all_agent_obs") central_vf_dense = tf.keras.layers.Dense( - model_config.get("central_vf_size", 64), activation=tf.nn.tanh, name="c_vf_dense")(other_obs) + model_config.get("central_vf_size", 64), activation=tf.nn.tanh, + name="c_vf_dense")(other_obs) central_vf_dense2 = tf.keras.layers.Dense( - model_config.get("central_vf_size", 64), activation=tf.nn.tanh, name="c_vf_dense")(central_vf_dense) + model_config.get("central_vf_size", 64), activation=tf.nn.tanh, + name="c_vf_dense")(central_vf_dense) central_vf_out = tf.keras.layers.Dense( 1, activation=None, name="c_vf_out")(central_vf_dense2) self.central_vf = tf.keras.Model( @@ -142,8 +145,8 @@ def __init__(self, @override(RecurrentTFModelV2) def forward_rnn(self, inputs, state, seq_lens): - model_out, self._value_out, h, c = self.model([inputs, seq_lens] + - state) + model_out, self._value_out, h, c = self.model( + [inputs, seq_lens] + state) return model_out, [h, c] @override(ModelV2) @@ -197,11 +200,13 @@ def centralized_critic_postprocessing(policy, for agent_id in other_agent_batches.keys()} agent_time = sample_batch['t'] # # find agents whose time overlaps with the current agent - rel_agents = {agent_id: other_agent_time for agent_id, other_agent_time in other_agent_times.items()} + rel_agents = {agent_id: other_agent_time for agent_id, other_agent_time + in other_agent_times.items()} # if len(rel_agents) > 0: - other_obs = {agent_id: - other_agent_batches[agent_id][1]["obs"].copy() - for agent_id in other_agent_batches.keys()} + other_obs = { + agent_id: other_agent_batches[agent_id][1]["obs"].copy() + for agent_id in other_agent_batches.keys() + } # padded_agent_obs = {agent_id: # overlap_and_pad_agent( # time_span, @@ -209,17 +214,20 @@ def centralized_critic_postprocessing(policy, # other_obs[agent_id]) # for agent_id, # rel_agent_time in rel_agents.items()} - padded_agent_obs = {agent_id: - fill_missing( + padded_agent_obs = { + agent_id: fill_missing( agent_time, other_agent_times[agent_id], - other_obs[agent_id]) - for agent_id, - rel_agent_time in rel_agents.items()} + other_obs[agent_id] + ) + for agent_id, rel_agent_time in rel_agents.items() + } # okay, now we need to stack and sort - central_obs_list = [padded_obs for padded_obs in padded_agent_obs.values()] + central_obs_list = [padded_obs for padded_obs in + padded_agent_obs.values()] try: - central_obs_batch = np.hstack((sample_batch["obs"], np.hstack(central_obs_list))) + central_obs_batch = np.hstack( + (sample_batch["obs"], np.hstack(central_obs_list))) except: # TODO(@ev) this is a bug and needs to be fixed central_obs_batch = sample_batch["obs"] @@ -238,17 +246,19 @@ def centralized_critic_postprocessing(policy, sample_batch[CENTRAL_OBS] = central_obs_batch # overwrite default VF prediction with the central VF - sample_batch[SampleBatch.VF_PREDS] = policy.compute_central_vf(sample_batch[CENTRAL_OBS]) + sample_batch[SampleBatch.VF_PREDS] = policy.compute_central_vf( + sample_batch[CENTRAL_OBS]) else: # policy hasn't initialized yet, use zeros - #TODO(evinitsky) put in the right shape + # TODO(evinitsky) put in the right shape obs_shape = sample_batch[SampleBatch.CUR_OBS].shape[1] obs_shape = (1, obs_shape * (policy.model.max_num_agents)) sample_batch[CENTRAL_OBS] = np.zeros(obs_shape) # TODO(evinitsky) put in the right shape. Will break if actions aren't 1 sample_batch[SampleBatch.VF_PREDS] = np.zeros(1, dtype=np.float32) - completed = sample_batch["dones"][-1] + # TODO (ak): this was not being used, so commented + # completed = sample_batch["dones"][-1] # if not completed and policy.loss_initialized(): # last_r = 0.0 @@ -267,7 +277,6 @@ def centralized_critic_postprocessing(policy, return batch - def time_overlap(time_span, agent_time): """Check if agent_time overlaps with time_span""" if agent_time[0] <= time_span[1] and agent_time[1] >= time_span[0]: @@ -302,12 +311,14 @@ def overlap_and_pad_agent(time_span, agent_time, obs): print(time_span) print(agent_time) if time_span[0] == 7 or agent_time[0] == 7: - import ipdb; ipdb.set_trace() + import ipdb + ipdb.set_trace() # FIXME(ev) some of these conditions can be combined # no padding needed if agent_time[0] == time_span[0] and agent_time[1] == time_span[1]: if obs.shape[0] < 200: - import ipdb; ipdb.set_trace() + import ipdb + ipdb.set_trace() return obs # agent enters before time_span starts and exits before time_span end if agent_time[0] < time_span[0] and agent_time[1] < time_span[1]: @@ -317,7 +328,8 @@ def overlap_and_pad_agent(time_span, agent_time, obs): padding = np.zeros((missing_time, obs.shape[1])) obs_concat = np.concatenate((overlap_obs, padding)) if obs_concat.shape[0] < 200: - import ipdb; ipdb.set_trace() + import ipdb + ipdb.set_trace() return obs_concat # agent enters after time_span starts and exits after time_span ends elif agent_time[0] > time_span[0] and agent_time[1] > time_span[1]: @@ -327,7 +339,8 @@ def overlap_and_pad_agent(time_span, agent_time, obs): padding = np.zeros((missing_time, obs.shape[1])) obs_concat = np.concatenate((padding, overlap_obs)) if obs_concat.shape[0] < 200: - import ipdb; ipdb.set_trace() + import ipdb + ipdb.set_trace() return obs_concat # agent time is entirely contained in time_span elif agent_time[0] >= time_span[0] and agent_time[1] <= time_span[1]: @@ -341,7 +354,8 @@ def overlap_and_pad_agent(time_span, agent_time, obs): padding = np.zeros((missing_right, obs.shape[1])) obs_concat = np.concatenate((obs_concat, padding)) if obs_concat.shape[0] < 200: - import ipdb; ipdb.set_trace() + import ipdb + ipdb.set_trace() return obs_concat # agent time totally contains time_span elif agent_time[0] <= time_span[0] and agent_time[1] >= time_span[1]: @@ -353,7 +367,8 @@ def overlap_and_pad_agent(time_span, agent_time, obs): if non_overlap_right > 0: overlap_obs = overlap_obs[:-non_overlap_right] if overlap_obs.shape[0] < 200: - import ipdb; ipdb.set_trace() + import ipdb + ipdb.set_trace() return overlap_obs @@ -492,6 +507,7 @@ def __init__(self, config): shape=(), trainable=False, dtype=tf.float32) + def update_kl(self, blah): pass @@ -516,6 +532,7 @@ def central_vf_stats(policy, train_batch, grads): policy.central_value_function), } + def kl_and_loss_stats(policy, train_batch): return { "cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64), @@ -532,6 +549,7 @@ def kl_and_loss_stats(policy, train_batch): "entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64), } + CCPPO = CustomPPOTFPolicy.with_updates( name="CCPPO", postprocess_fn=centralized_critic_postprocessing, @@ -544,4 +562,4 @@ def kl_and_loss_stats(policy, train_batch): CentralizedValueMixin, KLCoeffMixin ]) -CCTrainer = PPOTrainer.with_updates(name="CCPPOTrainer", default_policy=CCPPO) \ No newline at end of file +CCTrainer = PPOTrainer.with_updates(name="CCPPOTrainer", default_policy=CCPPO) diff --git a/flow/algorithms/custom_ppo.py b/flow/algorithms/custom_ppo.py index a98af6c2d..5effd0ce7 100644 --- a/flow/algorithms/custom_ppo.py +++ b/flow/algorithms/custom_ppo.py @@ -5,9 +5,7 @@ import logging -import numpy as np import ray -from ray.rllib.agents.ppo.ppo import PPOTrainer from ray.rllib.evaluation.postprocessing import compute_advantages, \ Postprocessing from ray.rllib.policy.sample_batch import SampleBatch @@ -17,6 +15,10 @@ from ray.rllib.utils.explained_variance import explained_variance from ray.rllib.utils.tf_ops import make_tf_callable from ray.rllib.utils import try_import_tf +from ray.rllib.agents.trainer_template import build_trainer +from ray.rllib.agents.ppo.ppo import choose_policy_optimizer, DEFAULT_CONFIG +from ray.rllib.agents.ppo.ppo import warn_about_bad_reward_scales + tf = try_import_tf() @@ -78,7 +80,6 @@ def __init__(self, model_config (dict): (Optional) model config for use in specifying action distributions. """ - def reduce_mean_valid(t): return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) @@ -109,7 +110,7 @@ def reduce_mean_valid(t): vf_loss_coeff * vf_loss - entropy_coeff * curr_entropy) else: self.mean_vf_loss = tf.constant(0.0) - loss = reduce_mean_valid(-surrogate_loss -entropy_coeff * curr_entropy) + loss = reduce_mean_valid(-surrogate_loss - entropy_coeff * curr_entropy) self.loss = loss @@ -266,6 +267,7 @@ def __init__(self, config): shape=(), trainable=False, dtype=tf.float32) + def update_kl(self, blah): pass @@ -285,6 +287,7 @@ def update_kl(self, blah): ValueNetworkMixin, KLCoeffMixin ]) + def validate_config(config): if config["entropy_coeff"] < 0: raise DeprecationWarning("entropy_coeff must be >= 0") @@ -306,13 +309,11 @@ def validate_config(config): elif tf and tf.executing_eagerly(): config["simple_optimizer"] = True # multi-gpu not supported -from ray.rllib.agents.trainer_template import build_trainer -from ray.rllib.agents.ppo.ppo import choose_policy_optimizer, DEFAULT_CONFIG, update_kl, \ - warn_about_bad_reward_scales + CustomPPOTrainer = build_trainer( name="CustomPPOTrainer", default_config=DEFAULT_CONFIG, default_policy=CustomPPOTFPolicy, make_policy_optimizer=choose_policy_optimizer, validate_config=validate_config, - after_train_result=warn_about_bad_reward_scales) \ No newline at end of file + after_train_result=warn_about_bad_reward_scales) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 4a4ceb546..9806413e0 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -134,15 +134,20 @@ def get_action(self, env): if self.fail_safe is not None: for check in self.fail_safe: if check == 'instantaneous': - accel_no_noise_with_failsafe = self.get_safe_action_instantaneous(env, accel_no_noise_with_failsafe) + accel_no_noise_with_failsafe = self.get_safe_action_instantaneous( + env, accel_no_noise_with_failsafe) elif check == 'safe_velocity': - accel_no_noise_with_failsafe = self.get_safe_velocity_action(env, accel_no_noise_with_failsafe) + accel_no_noise_with_failsafe = self.get_safe_velocity_action( + env, accel_no_noise_with_failsafe) elif check == 'feasible_accel': - accel_no_noise_with_failsafe = self.get_feasible_action(accel_no_noise_with_failsafe) + accel_no_noise_with_failsafe = self.get_feasible_action( + accel_no_noise_with_failsafe) elif check == 'obey_speed_limit': - accel_no_noise_with_failsafe = self.get_obey_speed_limit_action(env, accel_no_noise_with_failsafe) + accel_no_noise_with_failsafe = self.get_obey_speed_limit_action( + env, accel_no_noise_with_failsafe) - env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, accel_no_noise_with_failsafe) + env.k.vehicle.update_accel_no_noise_with_failsafe( + self.veh_id, accel_no_noise_with_failsafe) # add noise to the accelerations, if requested if self.accel_noise > 0: @@ -286,9 +291,9 @@ def safe_velocity(self, env): v_safe = 2 * h / env.sim_step + dv - this_vel * (2 * self.delay) - # check for speed limit - this_edge = env.k.vehicle.get_edge(self.veh_id) - edge_speed_limit = env.k.network.speed_limit(this_edge) + # check for speed limit FIXME: this is not called + # this_edge = env.k.vehicle.get_edge(self.veh_id) + # edge_speed_limit = env.k.network.speed_limit(this_edge) if this_vel > v_safe: print( diff --git a/flow/controllers/velocity_controllers.py b/flow/controllers/velocity_controllers.py index e1f69de98..86868c5f7 100644 --- a/flow/controllers/velocity_controllers.py +++ b/flow/controllers/velocity_controllers.py @@ -115,15 +115,15 @@ def get_accel(self, env): if edge == "": return None - if (self.find_intersection_dist(env) <= 10 and \ + if (self.find_intersection_dist(env) <= 10 and env.k.vehicle.get_edge(self.veh_id) in self.danger_edges) or \ - env.k.vehicle.get_edge(self.veh_id)[0] == ":"\ + env.k.vehicle.get_edge(self.veh_id)[0] == ":" \ or (self.control_length and (env.k.vehicle.get_x_by_id(self.veh_id) < self.control_length[0] - or env.k.vehicle.get_x_by_id(self.veh_id) > self.control_length[1]))\ + or env.k.vehicle.get_x_by_id(self.veh_id) > self.control_length[1])) \ or (self.no_control_edges is not None and len(self.no_control_edges) > 0 and edge in self.no_control_edges): # TODO(@evinitsky) put back - # or env.k.vehicle.get_edge(self.veh_id) in self.no_control_edges: + # or env.k.vehicle.get_edge(self.veh_id) in self.no_control_edges: return None else: # compute the acceleration from the desired velocity diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 8ac9c3699..df8992c20 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,5 +1,4 @@ """Contains an experiment class for running simulations.""" -from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info from flow.data_pipeline.leaderboard_utils import network_name_translate @@ -238,7 +237,7 @@ def rl_actions(*_): write_dict_to_csv(trajectory_table_path, extra_info) write_dict_to_csv(metadata_table_path, metadata, True) - + if to_aws: upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 7553703f3..62e6f6e53 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -57,7 +57,7 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin """ try: os.makedirs("result/{}".format(table_name)) - except FileExistsError as e: + except FileExistsError: pass s3 = boto3.client("s3") response = s3.list_objects_v2(Bucket=bucket) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 89432e260..184c7217a 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -2,26 +2,64 @@ from enum import Enum # tags for different queries -tags = {"fact_vehicle_trace": {"fact_energy_trace": ["POWER_DEMAND_MODEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL"], - "fact_network_throughput_agg": ["FACT_NETWORK_THROUGHPUT_AGG"], - "fact_network_inflows_outflows": ["FACT_NETWORK_INFLOWS_OUTFLOWS"]}, - "fact_energy_trace": {}, - "POWER_DEMAND_MODEL_DENOISED_ACCEL": {"fact_vehicle_fuel_efficiency_agg": ["FACT_VEHICLE_FUEL_EFFICIENCY_AGG"], - "fact_network_metrics_by_distance_agg": - ["FACT_NETWORK_METRICS_BY_DISTANCE_AGG"], - "fact_network_metrics_by_time_agg": ["FACT_NETWORK_METRICS_BY_TIME_AGG"]}, - "POWER_DEMAND_MODEL": {}, - "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL": {}, - "fact_vehicle_fuel_efficiency_agg": {"fact_network_fuel_efficiency_agg": ["FACT_NETWORK_FUEL_EFFICIENCY_AGG"]}, - "fact_network_fuel_efficiency_agg": {"leaderboard_chart": ["LEADERBOARD_CHART"]}, - "leaderboard_chart": {"leaderboard_chart_agg": ["LEADERBOARD_CHART_AGG"]} - } +tags = { + "fact_vehicle_trace": { + "fact_energy_trace": [ + "POWER_DEMAND_MODEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" + ], + "fact_network_throughput_agg": [ + "FACT_NETWORK_THROUGHPUT_AGG" + ], + "fact_network_inflows_outflows": [ + "FACT_NETWORK_INFLOWS_OUTFLOWS" + ] + }, + "fact_energy_trace": {}, + "POWER_DEMAND_MODEL_DENOISED_ACCEL": { + "fact_vehicle_fuel_efficiency_agg": [ + "FACT_VEHICLE_FUEL_EFFICIENCY_AGG" + ], + "fact_network_metrics_by_distance_agg": [ + "FACT_NETWORK_METRICS_BY_DISTANCE_AGG" + ], + "fact_network_metrics_by_time_agg": [ + "FACT_NETWORK_METRICS_BY_TIME_AGG" + ] + }, + "POWER_DEMAND_MODEL": {}, + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL": {}, + "fact_vehicle_fuel_efficiency_agg": { + "fact_network_fuel_efficiency_agg": [ + "FACT_NETWORK_FUEL_EFFICIENCY_AGG" + ] + }, + "fact_network_fuel_efficiency_agg": { + "leaderboard_chart": [ + "LEADERBOARD_CHART" + ] + }, + "leaderboard_chart": { + "leaderboard_chart_agg": [ + "LEADERBOARD_CHART_AGG" + ] + } +} -tables = ["fact_vehicle_trace", "fact_energy_trace", "fact_network_throughput_agg", "fact_network_inflows_outflows", - "fact_vehicle_fuel_efficiency_agg", "fact_network_metrics_by_distance_agg", - "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", "leaderboard_chart", - "leaderboard_chart_agg", "metadata_table"] +tables = [ + "fact_vehicle_trace", + "fact_energy_trace", + "fact_network_throughput_agg", + "fact_network_inflows_outflows", + "fact_vehicle_fuel_efficiency_agg", + "fact_network_metrics_by_distance_agg", + "fact_network_metrics_by_time_agg", + "fact_network_fuel_efficiency_agg", + "leaderboard_chart", + "leaderboard_chart_agg", + "metadata_table" +] network_using_edge = ["I-210 without Ramps"] @@ -171,7 +209,7 @@ class QueryStrings(Enum): FACT_VEHICLE_FUEL_EFFICIENCY_AGG = """ WITH sub_fact_vehicle_trace AS ( - SELECT + SELECT v.id, v.source_id, e.energy_model_id, @@ -248,7 +286,7 @@ class QueryStrings(Enum): FACT_NETWORK_INFLOWS_OUTFLOWS = """ WITH min_max_time_step AS ( - SELECT + SELECT id, source_id, MIN(time_step) AS min_time_step, @@ -283,10 +321,10 @@ class QueryStrings(Enum): COALESCE(i.source_id, o.source_id) AS source_id, COALESCE(i.inflow_rate, 0) AS inflow_rate, COALESCE(o.outflow_rate, 0) AS outflow_rate - FROM inflows i + FROM inflows i FULL OUTER JOIN outflows o ON 1 = 1 - AND i.time_step = o.time_step - AND i.source_id = o.source_id + AND i.time_step = o.time_step + AND i.source_id = o.source_id ORDER BY time_step ;""" @@ -306,7 +344,7 @@ class QueryStrings(Enum): SUM(power) OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS cumulative_power - FROM fact_vehicle_trace vt + FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 AND et.date = \'{date}\' AND et.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' @@ -384,7 +422,7 @@ class QueryStrings(Enum): COALESCE(be.instantaneous_energy_avg, 0) AS instantaneous_energy_avg, COALESCE(be.instantaneous_energy_upper_bound, 0) AS instantaneous_energy_upper_bound, COALESCE(be.instantaneous_energy_lower_bound, 0) AS instantaneous_energy_lower_bound - FROM binned_cumulative_energy bce + FROM binned_cumulative_energy bce JOIN binned_energy be ON 1 = 1 AND bce.source_id = be.source_id AND bce.distance_meters_bin = be.distance_meters_bin @@ -405,7 +443,7 @@ class QueryStrings(Enum): SUM(power) OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS cumulative_power - FROM fact_vehicle_trace vt + FROM fact_vehicle_trace vt JOIN fact_energy_trace et ON 1 = 1 AND et.date = \'{date}\' AND et.partition_name = \'{partition}_POWER_DEMAND_MODEL_DENOISED_ACCEL\' @@ -482,7 +520,7 @@ class QueryStrings(Enum): COALESCE(be.instantaneous_energy_avg, 0) AS instantaneous_energy_avg, COALESCE(be.instantaneous_energy_upper_bound, 0) AS instantaneous_energy_upper_bound, COALESCE(be.instantaneous_energy_lower_bound, 0) AS instantaneous_energy_lower_bound - FROM binned_cumulative_energy bce + FROM binned_cumulative_energy bce JOIN binned_energy be ON 1 = 1 AND bce.source_id = be.source_id AND bce.time_seconds_bin = be.time_seconds_bin diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index c9b63b23a..ad301a3f5 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -1,10 +1,5 @@ """Environment for training vehicles to reduce congestion in the I210.""" - -from collections import OrderedDict -from copy import deepcopy -from time import time - -from gym.spaces import Box, Discrete, Dict +from gym.spaces import Box import numpy as np from flow.core.rewards import miles_per_gallon, miles_per_megajoule @@ -20,9 +15,11 @@ "max_accel": 1, # maximum deceleration for autonomous vehicles, in m/s^2 "max_decel": 1, - # whether we use an obs space that contains adjacent lane info or just the lead obs + # whether we use an obs space that contains adjacent lane info or just the + # lead obs "lead_obs": True, - # whether the reward should come from local vehicles instead of global rewards + # whether the reward should come from local vehicles instead of global + # rewards "local_reward": True, # desired velocity "target_velocity": 25 @@ -161,10 +158,10 @@ def in_control_range(self, veh_id): If control range is defined it uses control range, otherwise it searches over a set of edges """ - return (self.control_range and self.k.vehicle.get_x_by_id(veh_id) < self.control_range[1] \ - and self.k.vehicle.get_x_by_id(veh_id) > self.control_range[0]) or \ - (len(self.no_control_edges) > 0 and self.k.vehicle.get_edge(veh_id) not in - self.no_control_edges) + return (self.control_range and self.control_range[1] > + self.k.vehicle.get_x_by_id(veh_id) > self.control_range[0]) or \ + (len(self.no_control_edges) > 0 and self.k.vehicle.get_edge(veh_id) not in + self.no_control_edges) def get_state(self): """See class definition.""" @@ -358,7 +355,6 @@ def additional_command(self): if veh_id not in self.observed_ids: self.k.vehicle.remove(veh_id) - def state_util(self, rl_id): """Return an array of headway, tailway, leader speed, follower speed. diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index 004172765..8daffde86 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -262,8 +262,7 @@ def _highway(data, params, all_time): edge_starts = {} # Add the main edges. edge_starts.update({ - "highway_{}".format(i): - i * (length / num_edges + junction_length) + "highway_{}".format(i): i * (length / num_edges + junction_length) for i in range(num_edges) }) @@ -271,15 +270,13 @@ def _highway(data, params, all_time): edge_starts.update({"highway_end": length + num_edges * junction_length}) edge_starts.update({ - ":edge_{}".format(i + 1): - (i + 1) * length / num_edges + i * junction_length + ":edge_{}".format(i + 1): (i + 1) * length / num_edges + i * junction_length for i in range(num_edges - 1) }) if params['net'].additional_params["use_ghost_edge"]: edge_starts.update({ - ":edge_{}".format(num_edges): - length + (num_edges - 1) * junction_length + ":edge_{}".format(num_edges): length + (num_edges - 1) * junction_length }) # compute the absolute position @@ -299,8 +296,8 @@ def _highway(data, params, all_time): data[veh_id]['time'], data[veh_id]['edge'], data[veh_id]['lane']): - # avoid vehicles not on the relevant edges. Also only check the second to - # last lane + # avoid vehicles not on the relevant edges. Also only check the + # second to last lane if edge not in edge_starts.keys() or ti not in all_time: continue else: diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 5c52e196f..0ab658f75 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -169,7 +169,7 @@ def visualizer_rllib(args): else: env = gym.make(env_name) - # reroute on exit is a training hack, it should be turned off at test time. + # reroute on exit is a training hack, it should be turned off at test time. if hasattr(env, "reroute_on_exit"): env.reroute_on_exit = False @@ -302,7 +302,6 @@ def visualizer_rllib(args): print('Average, std miles per megajoule: {}, {}'.format(np.mean(mpj), np.std(mpj))) - # Compute arrival rate of vehicles in the last 500 sec of the run print("\nOutflows (veh/hr):") print(final_outflows) From 4e8769b85c0a7fbd9418ee1d033e0f152f3ab62b Mon Sep 17 00:00:00 2001 From: AboudyKreidieh Date: Wed, 10 Jun 2020 12:41:40 -0700 Subject: [PATCH 257/335] pydocstyle (mostly --- .../exp_configs/non_rl/i210_subnetwork.py | 6 +- examples/train.py | 2 +- flow/algorithms/centralized_PPO.py | 95 +++++++++++-------- flow/algorithms/custom_ppo.py | 79 ++++++++------- flow/data_pipeline/data_pipeline.py | 20 ++-- flow/data_pipeline/leaderboard_utils.py | 6 +- flow/envs/multiagent/base.py | 1 + 7 files changed, 120 insertions(+), 89 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 90384b207..3fac52be2 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -243,7 +243,11 @@ def valid_ids(env, veh_ids): - return [veh_id for veh_id in veh_ids if env.k.vehicle.get_edge(veh_id) not in ["ghost0", "119257908#3"]] + """Return the names of vehicles within the controllable edges.""" + return [ + veh_id for veh_id in veh_ids + if env.k.vehicle.get_edge(veh_id) not in ["ghost0", "119257908#3"] + ] custom_callables = { diff --git a/examples/train.py b/examples/train.py index 1235e6241..112b7fa0d 100644 --- a/examples/train.py +++ b/examples/train.py @@ -283,7 +283,7 @@ def on_episode_end(info): episode.custom_metrics["avg_mpj_per_veh"] = np.mean(episode.user_data["avg_mpj"]) def on_train_result(info): - """Store the mean score of the episode, and increment or decrement how many adversaries are on""" + """Store the mean score of the episode, and adjust the number of adversaries.""" trainer = info["trainer"] trainer.workers.foreach_worker( lambda ev: ev.foreach_env( diff --git a/flow/algorithms/centralized_PPO.py b/flow/algorithms/centralized_PPO.py index 5f71f865a..57fdd7e33 100644 --- a/flow/algorithms/centralized_PPO.py +++ b/flow/algorithms/centralized_PPO.py @@ -278,7 +278,7 @@ def centralized_critic_postprocessing(policy, def time_overlap(time_span, agent_time): - """Check if agent_time overlaps with time_span""" + """Check if agent_time overlaps with time_span.""" if agent_time[0] <= time_span[1] and agent_time[1] >= time_span[0]: return True else: @@ -298,14 +298,18 @@ def fill_missing(agent_time, other_agent_time, obs): def overlap_and_pad_agent(time_span, agent_time, obs): - """take the part of obs that overlaps, pad to length time_span - Arguments: - time_span (tuple): tuple of the first and last time that the agent - of interest is in the system - agent_time (tuple): tuple of the first and last time that the - agent whose obs we are padding is in the system - obs (np.ndarray): observations of the agent whose time is - agent_time + """Take the part of obs that overlaps, pad to length time_span. + + Parameters + ---------- + time_span : tuple + tuple of the first and last time that the agent of interest is in the + system + agent_time : tuple + tuple of the first and last time that the agent whose obs we are + padding is in the system + obs : array_like + observations of the agent whose time is agent_time """ assert time_overlap(time_span, agent_time) print(time_span) @@ -424,38 +428,49 @@ def __init__(self, vf_loss_coeff=1.0, use_gae=True, model_config=None): - """Constructs the loss for Proximal Policy Objective. - - Arguments: - action_space: Environment observation space specification. - dist_class: action distribution class for logits. - value_targets (Placeholder): Placeholder for target values; used - for GAE. - actions (Placeholder): Placeholder for actions taken - from previous model evaluation. - advantages (Placeholder): Placeholder for calculated advantages - from previous model evaluation. - prev_logits (Placeholder): Placeholder for logits output from - previous model evaluation. - prev_actions_logp (Placeholder): Placeholder for prob output from - previous model evaluation. - vf_preds (Placeholder): Placeholder for value function output - from previous model evaluation. - curr_action_dist (ActionDistribution): ActionDistribution - of the current model. - value_fn (Tensor): Current value function output Tensor. - cur_kl_coeff (Variable): Variable holding the current PPO KL - coefficient. - valid_mask (Tensor): A bool mask of valid input elements (#2992). - entropy_coeff (float): Coefficient of the entropy regularizer. - clip_param (float): Clip parameter - vf_clip_param (float): Clip parameter for the value function - vf_loss_coeff (float): Coefficient of the value function loss - use_gae (bool): If true, use the Generalized Advantage Estimator. - model_config (dict): (Optional) model config for use in specifying - action distributions. + """Construct the loss for Proximal Policy Objective. + + Parameters + ---------- + action_space : TODO + Environment observation space specification. + dist_class : TODO + action distribution class for logits. + value_targets : tf.placeholder + Placeholder for target values; used for GAE. + actions : tf.placeholder + Placeholder for actions taken from previous model evaluation. + advantages : tf.placeholder + Placeholder for calculated advantages from previous model + evaluation. + prev_logits : tf.placeholder + Placeholder for logits output from previous model evaluation. + prev_actions_logp : tf.placeholder + Placeholder for prob output from previous model evaluation. + vf_preds : tf.placeholder + Placeholder for value function output from previous model + evaluation. + curr_action_dist : ActionDistribution + ActionDistribution of the current model. + value_fn : tf.Tensor + Current value function output Tensor. + cur_kl_coeff : tf.Variable + Variable holding the current PPO KL coefficient. + valid_mask : tf.Tensor + A bool mask of valid input elements (#2992). + entropy_coeff : float + Coefficient of the entropy regularizer. + clip_param : float + Clip parameter + vf_clip_param : float + Clip parameter for the value function + vf_loss_coeff : float + Coefficient of the value function loss + use_gae : bool + If true, use the Generalized Advantage Estimator. + model_config : dict, optional + model config for use in specifying action distributions. """ - def reduce_mean_valid(t): return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) diff --git a/flow/algorithms/custom_ppo.py b/flow/algorithms/custom_ppo.py index 5effd0ce7..65291f1d4 100644 --- a/flow/algorithms/custom_ppo.py +++ b/flow/algorithms/custom_ppo.py @@ -1,4 +1,4 @@ -"""PPO but we add in the outflow after the reward to the final reward""" +"""PPO but we add in the outflow after the reward to the final reward.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function @@ -49,36 +49,48 @@ def __init__(self, vf_loss_coeff=1.0, use_gae=True, model_config=None): - """Constructs the loss for Proximal Policy Objective. - - Arguments: - action_space: Environment observation space specification. - dist_class: action distribution class for logits. - value_targets (Placeholder): Placeholder for target values; used - for GAE. - actions (Placeholder): Placeholder for actions taken - from previous model evaluation. - advantages (Placeholder): Placeholder for calculated advantages - from previous model evaluation. - prev_logits (Placeholder): Placeholder for logits output from - previous model evaluation. - prev_actions_logp (Placeholder): Placeholder for prob output from - previous model evaluation. - vf_preds (Placeholder): Placeholder for value function output - from previous model evaluation. - curr_action_dist (ActionDistribution): ActionDistribution - of the current model. - value_fn (Tensor): Current value function output Tensor. - cur_kl_coeff (Variable): Variable holding the current PPO KL - coefficient. - valid_mask (Tensor): A bool mask of valid input elements (#2992). - entropy_coeff (float): Coefficient of the entropy regularizer. - clip_param (float): Clip parameter - vf_clip_param (float): Clip parameter for the value function - vf_loss_coeff (float): Coefficient of the value function loss - use_gae (bool): If true, use the Generalized Advantage Estimator. - model_config (dict): (Optional) model config for use in specifying - action distributions. + """Construct the loss for Proximal Policy Objective. + + Parameters + ---------- + action_space : TODO + Environment observation space specification. + dist_class : TODO + action distribution class for logits. + value_targets : tf.placeholder + Placeholder for target values; used for GAE. + actions : tf.placeholder + Placeholder for actions taken from previous model evaluation. + advantages : tf.placeholder + Placeholder for calculated advantages from previous model + evaluation. + prev_logits : tf.placeholder + Placeholder for logits output from previous model evaluation. + prev_actions_logp : tf.placeholder + Placeholder for prob output from previous model evaluation. + vf_preds : tf.placeholder + Placeholder for value function output from previous model + evaluation. + curr_action_dist : ActionDistribution + ActionDistribution of the current model. + value_fn : tf.Tensor + Current value function output Tensor. + cur_kl_coeff : tf.Variable + Variable holding the current PPO KL coefficient. + valid_mask : tf.Tensor + A bool mask of valid input elements (#2992). + entropy_coeff : float + Coefficient of the entropy regularizer. + clip_param : float + Clip parameter + vf_clip_param : float + Clip parameter for the value function + vf_loss_coeff : float + Coefficient of the value function loss + use_gae : bool + If true, use the Generalized Advantage Estimator. + model_config : dict, optional + model config for use in specifying action distributions. """ def reduce_mean_valid(t): return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) @@ -170,7 +182,7 @@ def kl_and_loss_stats(policy, train_batch): def vf_preds_and_logits_fetches(policy): - """Adds value function and logits outputs to experience train_batches.""" + """Add value function and logits outputs to experience train_batches.""" return { SampleBatch.VF_PREDS: policy.model.value_function(), BEHAVIOUR_LOGITS: policy.model.last_output(), @@ -181,8 +193,7 @@ def postprocess_ppo_gae(policy, sample_batch, other_agent_batches=None, episode=None): - """Adds the policy logits, VF preds, and advantages to the trajectory.""" - + """Add the policy logits, VF preds, and advantages to the trajectory.""" completed = sample_batch["dones"][-1] if completed: last_r = 0.0 diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index ea5307dad..50c2c8422 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -13,13 +13,14 @@ def generate_trajectory_table(data_path, extra_info, partition_name): ---------- data_path : str path to the standard SUMO emission - extra_info: dict + extra_info : dict extra information needed in the trajectory table, collected from flow - partition_name: str + partition_name : str the name of the partition to put this output to + Returns ------- - output_file_path: str + output_file_path : str the local path of the outputted csv file """ raw_output = pd.read_csv(data_path, index_col=["time", "id"]) @@ -39,7 +40,7 @@ def generate_trajectory_table(data_path, extra_info, partition_name): def write_dict_to_csv(data_path, extra_info, include_header=False): - """Write extra to the CSV file at data_path, create one if not exist + """Write extra to the CSV file at data_path, create one if not exist. Parameters ---------- @@ -105,7 +106,7 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): def delete_obsolete_data(s3, latest_key, table, bucket="circles.data.pipeline"): - """Delete the obsolete data on S3""" + """Delete the obsolete data on S3.""" response = s3.list_objects_v2(Bucket=bucket) keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table) == 0 and e["Key"][-4:] == ".csv"] keys.remove(latest_key) @@ -114,18 +115,17 @@ def delete_obsolete_data(s3, latest_key, table, bucket="circles.data.pipeline"): class AthenaQuery: - """ - Class used to run query. + """Class used to run queries. Act as a query engine, maintains an open session with AWS Athena. Attributes ---------- - MAX_WAIT: int + MAX_WAIT : int maximum number of seconds to wait before declares time-out - client: boto3.client + client : boto3.client the athena client that is used to run the query - existing_partitions: list + existing_partitions : list a list of partitions that is already recorded in Athena's datalog, this is obtained through query at the initialization of this class instance. diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index 62e6f6e53..afc2fd8bc 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -1,4 +1,4 @@ -"""APIs for the leader board front end""" +"""APIs for the leader board front end.""" import os import boto3 import pandas as pd @@ -35,7 +35,7 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin Parameters ---------- - table_name: str + table_name : str The name of table to retrieve from S3, the current available tables are: fact_vehicle_trace fact_energy_trace @@ -52,7 +52,7 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin avoid burdening the web server with more calculation. The date and source_id in its name is always going to reflect the latest leaderboard_chart entry. - bucket: str + bucket : str the S3 bucket that holds these tables """ try: diff --git a/flow/envs/multiagent/base.py b/flow/envs/multiagent/base.py index 7104138de..881461d63 100644 --- a/flow/envs/multiagent/base.py +++ b/flow/envs/multiagent/base.py @@ -324,4 +324,5 @@ def apply_rl_actions(self, rl_actions=None): self._apply_rl_actions(clipped_actions) def set_iteration_num(self): + """Increment the number of training iterations.""" self.num_training_iters += 1 From 81e8d6a47e2293cf8877e72f40c231fb4b1097ec Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 11 Jun 2020 16:14:49 -0700 Subject: [PATCH 258/335] Test file changes --- .../multi_agent/checkpoint_1/checkpoint-1 | Bin 10209 -> 19590 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 180 -> 210 bytes tests/data/rllib_data/multi_agent/params.json | 54 ++++++++++-------- tests/data/rllib_data/multi_agent/params.pkl | Bin 17562 -> 17746 bytes .../single_agent/checkpoint_1/checkpoint-1 | Bin 582 -> 27018 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 180 -> 210 bytes .../data/rllib_data/single_agent/params.json | 42 ++++++++------ tests/data/rllib_data/single_agent/params.pkl | Bin 6414 -> 10890 bytes 8 files changed, 55 insertions(+), 41 deletions(-) diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 index 0693ed4b62a9cabcdbecb267201ea862144f212c..bc68b0b99d2171f688662b7b1e4c841a36f8ffe0 100644 GIT binary patch literal 19590 zcmZU)c|29`*Z+@_Sy84)h76I)km>Ae8A?Kfln5ot6s3boG?2NFu`)*ql_r_a+1H|y zl1j5?5e53Iw#|$P3k&g|xj8B% zEOO@mjFMa&KL0<%CAnj#$8$v^Jw*k7h>Y@!3gC|0!4(S@oK{@lQ}#b+nH9L!*LarT zni~VcxDsnOZr-qI+sw7$5dkwdYz~VG@rw{#L((@QU~Txu$f$_TYojE&lK(SGa^t%H zIm(rC5lZ4p&lUFP%A{~*-MHiRU4&i6xM(DEC;acwby3?k1xRw`{$qNe8CTv#$VE7X zJJF3hN#A9Rr?9{jhO4l6@nYw{g5Q5OE<#aUMgKJWIKjavrHi0+oU~~AuC{sqaaqew ztyUPT7TfZ52gU1AzBpmUTUh*cxsB*!otB3`RBYBfUT9M|4s3Q?7J`%VdYid{J3!HY zQ_Dx*N*mq+@sSpBB6c>z4w>ZJQ$3q*Eqj~M zs&(jFdlj2*1-3|z*$V5H+@=qIWs@)Cf;VMOOmHj=X z|4+pDhxkQuRe~jh|Kt40!2-{#x^dP1@w`S7cS`W2w7GGmW2-IuJMUSy?s08isH4V- zJ#E3M(S2fdHC%_|QhlAL)9}Q4R<$U9a>jHHUnhdMR^Hcoy^#UGQSt%r`E5;(Scfc! z`$3nJSmtE?eocq<$m=lcC3CuYgCSFRH%`g&n>vnK#=1pXkB?U7_XP!64VMY=XAbVO zo`X%iOk+X1B}vMqUDzvHNi!{&1*3&9EU(`pp1*~-w~k_Bp_k3_NXM21x|>?e zc5!UFSKX&;7o39fcE^_Ow|3dg2w8(aQ9?SJ{gjr^61Dh4C7X1o=AlUU8?r@G3!8(CY^EyS#82MFIH2m#QZqWQqg~!;wt_sa|Lrd)UXE5tU6_qkxY~w91Zu36uC4I2# zG_0K2jq9(xhMlL&*m02?F{o`36jocbXdj<#bEn(g=G3Mro1mICxM^QhOIDwz&Ft%X zEvKu(Ta4OeZQdN!woxkgw$a~LN?w|-Zy|CA=xTV4^L|v~?I#FC*e^2@U$A|w3K)ruGsQ-@#r@L_t{x<*({}X_}-Fckp<2v~E6R&f^H>Pt+ zEwVUwX4djY{buv8TyNx~{$>6#r{(;svx4|G0SOrPc?BawxsjdrdDvCW(jlD*A$-kL)=D&u|`p59uZd~L482(SrH2Ej=geC|H-TKqX8Xc0t zk*Okh@xgN%GEEob{}NO^a}*MkD@cc;Ha;mGAhiqnspD`cIO-2Dp_dz}Mbta8Jb5pF z?34nib+rIa`6GJwUK>droC2la^3g72H_@suh09I5Aio<-f0 zSEzH`5S6Sa(DVjA9TQZ8Hv2x%DF^n_VzougRpA7*UOX2*zSP9a2}j7D=0_xB*cf8{ ztuW_n7TGbYlm^WUfErULd=t`5a<-qRm$sgxO7G_2Q=@HIH2jQp-29AY$ym`WcPZli z#Q_xl>_aE>E9BwPJ8*Wr3*DZ!leqdBAx~xx%A9!(da*KW&dPVpWzR`qMqbh%_A6+| zW|nqOON4tnTk*i}K1N9+1^NRVX~6yc)RNtUhy0DOJV%Qf-<^f+4g!&g{fAw!Y?~QR?&3+@`aYd}v=^gh<{k8IQ4)=AF@elN87QP}Y^PKb zz4FF}47weHS<3?m+g5<#?^>x`ZXPiR^~0v2Q*cKk2|l_{flvhz?2VU%RbTcAW{XK| z!>!NsTzn7?-Pnh@Mu*@)@+b0^GZXV-)}zO`?PT2fVlqE&ANjme7>v*9z!dtF#2W-* zW#UBoXiXjspSGWdC0%9K-r0fUcg>***(Hp=r8fCF=_hHb-UeeU)&r_cA;O}TaC2-X zo!J?T%KB3vs$RShM14gDO$UHSC%Gk19Es7RArzq3uhtYEcl0$s4zhuxK%3va#E z!8tOW2rt$|@7WrlzI`EFz4M+9&!{4Mckd?oZd&;H{0nB)sVn4%Z5-*!^ryUgLO5x0 zGP9{Y27f1qpndvRswy@Q>cuAFomv~U4 zi`ysd#SQCSF?qoOI1)3PGu~|~zBI~!2}&s>)_DiIndo6C^NtyblfW)Z9*pr)2GbRL zY4XMeXzjj{ep0I>QqT%1Wn;lVYydp>AB2|muj#&3N*J4L42rybxP5pz9iHAn^`dhz zz2guBT;kDJx5t6ro5w_Lyckq2c|<1u5hmk~3t>=%HE1&|tufdP!m62QKU&W>+AWBs zxN@*F{zeM?#HgjuO1!}_#QtzGc>Lx)`#!}U2K$n6?8g~Q&hth*E1Q8S{&}?4V+Z6a z93#IpUs|t{{Yn138AFVgYtm2G*D?y3ZA56hAbnT;pf&xgQI5?8Wv@JPVT}W7D;|J9 zb*;p_u!Bi0GY5vK(^9(&@N16-R-S32JRu2Uzda39K1KnAEXCYfF*MXL!8e*;=_ijK zqA^AYH@sK|0hconRQDq>9uFPE_cMmT0F_>pgm=sXIr8=^IjQASp`^Q+D$jXM>Ym!-Gu#X>W*?gdiVntYvPGQH6WrtKg|`Avr0e2>sD1s57g9TzJet;GQ@b8>EFo zzq0t5&%tY{JaW-&1DSV1n!L^5MpY)=M?FPzQrxZz0d{p{$u4JNDH{k~ zOgv`YjE1D&6+jy6sa;zsc+L!khVp$-mUIt?r+J`p1dkf~$yg6P2M_a0v~`eH_tD`U^JuFXgUM_k{c@VXp?%)uhIutD>3>3# z*Gl1@l42;^|Bn6~TE(WtX+iTFXBxhu3G&b#i?*$R8;z6T@0{zbcPu*&Ha>D0ZzL`++Xg*x>#$2TAP>tDmsLvrNawJ|^sh*OCT z`^X$vfr|G%n7p9`ocqoRFN_HMA;E=)Ipt*B^mx4W(-%FAQ#q0s2k294MlhewVs=E0 zFu4)4>8mb7a?je?I=p^A`}lV{M0Oa0{kw&Dcbpw)gCy9h$>DEbJ?Jwp;4fo8vI9XM zh{tdY?#g;h6q`-xfp8ZT8F)(VPR#|y>b$9VZ5qE9o##1v&RrlslFP zF}A*V#5E5RRz}0{!!qjo!5WtCRfe7~F(g$@68*?m`to5S-m#lQ-y41-am$Yrm1Eo3 zAA;B$I)9GsV!c5nQyFf0Zf35ZbwrQ(4w(4h2=+M?k`$+(Bx-mZjtc9MOzVR%^YT~f zepU*4M=P1(--z&J9k5nNNVAkk1@k| zjnnMzfJaQB_c>;csW(WynM7x;FJ=2BqhZUS1|%*QgA;XOWO76@4Adlp`zMyZyKDus zeNWIeYSoxlp^gFP9P#=_D-hb`i(`gHNS;F{U8sMB?nO7&$Bf`UsRP(hVh0!htOGga zYUpzChky(E_&Q_*ejIa;&Rx_B!B3wsk*WjKb>0W6c3zHNmY+{{UEIOweh`Q7Q@$`6 z$Kb;mc6jto9I^H^0H$#U9DcA6E|^3wFU+= z(&%I71_u?CAmzRe1gg4n8iS&AS$d%hyZd4e-aaMqkP8y=u_oS47^$@#($QrF5I(W_+SEkxkrEMvRwa(`A8U z@LTf?l-4{1$@lLujcY&B)x(V>F5Q@LmZ+iY?OYtTSqc7~Eb*W7kR2UyW$(+}!Rv1i z!{;=2*r?<{ePg7^B9jETG$9l&i0i;w=S0Z3ZjUt*3#s>MBkYy(g@D3isH7@|k1!c$ zztF>xm`_AKR}?+I?Z@l<2*Smm^tMtRRr;Ml-ug|(@6)YF+$$-ZJh&FM921~Y*%Zs9 z)UY9ZDHKW6pw~%poFcS|`Wqj?U%#5j*Nk>JlVe8Q%6dpw*9Ep~r64!v7{QNO6k>j_ z$KA`P;urtpw5xTHY-!mFFD35KS)X5{k@0ch>RlsFIR{C_+7tAz?LL}Nnulj?R^uf7 zD4&A8GZ+d}9@yYw>`r za=J%KE_|b3#D|E);Z*q6evlp`K9Fd(fNY(XjP8yrNOedOT!{lpqLXM`zc$9*)xyoA zvLO7Cg~V^AgunPRITITKgOb(cXV(B@X!05IMO$FttQwtcYfMBB>?ivbQqj|}hVsug z(Bjo2RKko$t?S|;d%{@KQ`SQduJS^=ldkBjXi9H4>?S5T(+PL(YZ`P5K|16FE&gy0 zbjJ)cnT09fvug=72AqQTlFj_J=Bq(|i7^)TiXx-85#O1HkP+c6^n>VRGU2N*d;gdu z-nlwcEY6in>A4{h6!XElkVII~m5@9$cQ^DYvE7d&un;!gqj+U!= zG8!9NnD1q}_(*>Y25(UyKFiKAFJ$`3+j?D+dv6lCe(gAUA?FQ0)^b4f(-p{Cx(fN* zH6T^A6Rez$qju3-QgZbh*>`U;-KcvLFTEHGe#(0II^ZpxZkJ0ka^5k=W7{z5=Ve;@ zZ~@NT|AH)`2`C<(Pwz12$cr2LkYYYSf4jNR_O6Ze-1ezh6O)TVix=bXky2W=;|Vo# zUkqojcQ9uKbFIO1DS_XcNUNkj?%i^T%F0`@FZ81^rEVVXkc)w%wRdTom=qqKT|-if z7UPP|gK%fz0{RX4IMgbN+1qW=NuwIv&rc@jK9<7VIdW)l=?~eK{*3+c*NC{vu7$NS zn|L)Y8)<5-3QXgf;B8@1sH*CQ`$u98$%%n-;*ngRPV&(TI~D&%`Z866kqf z1xAEV(~UE%Kz6MzI$kcot!Cq)ewH*|e7=WY=a|OK+G9w1|7@iz1`Ki1*PCecMh%_+ zY#{w_l)-FDBB#=}%Id0f3N$C*reOxlVfpzz^i{J13_nW&zuFvJUlvX(EV^lTbq-O? zXe3c4+4P#rWd7qPZ|U=ielUjbfkN$de3Z|_hz4cgnnrQW{x5Yine`v&G3Q@)VEzv~ zuyEsA{x9^f`VaIl_^r)XOE=|f43=8EPg}tI?mm`(29!9iPPW!%!Cc;pNL$W?wHiG6 zi4*xhjxOR2@ojj~^nbLR7n zl{j}ZE?K+GHsF`aow442ZqPdFjw=7ci5Q-$m>lOycmt1X9mVDRztru&=!5y!eDaU^ z;KsH2FZ!_k7k%UoIN+=7cjQg`Lqg_0CkgXJz@o~B{Pq&S-by<%+~IHiC(jKtL)*v- zle6Ib;*)DoCZCD(i`9Ng55x ziX)!I59!{=lZe_XaeSybOea+v!AtoSP@b%XrQP~Owd5Yey%EJFclTo2z&vQ{OoUfC z9#mICgseP19#pppp?#bz{P8ve9pVC`Ng>2(*#+7jkwf}9W+1Ywkkx(slS$|ig-cT( zQxTi_C|h`f8Q$5!98F$?8lzSatKCIjui8qr90M`PeLwW4+T((+k7$C>VfyT-3H=%K z7*8cXgq=4;91vBr-VEZ!zHaSZ% zk##(JR?`s;xtT=ox-NdWZBD|r^T>L$P^hfvqzxgu$hJwtPMzCStVa=E(mZP8F3g(s zj8d~Jf0+s6^N8M_E*Sl81fhjjnQ+%>pcY+4S5@Dj5*|im-k%)ch8zHPt~lIz+0Qu_ zWlQxhH`7mFU$Cx4uSt4$KgUyc9i3Gghwkfsa`Iyg>4)8UbkT&jOngN+Ej!pnJ$7>G zp+Xt{%A+}S=vx_`qHcm)O{Qa}-3S{Hs}Gkq%z>*H=i|s%9rzoYj_aH9@y^L1VtXuu ztr3)i<3H=N#hLRd?}#oQ?An1v_bZwF2j58RvMwU|aR`Hs?q!lN1^;}o!~;@U)c=kr zE2O>+Ht$}DYx=?%6ZKH$ZjUwf9#w^Ksh7l$nxNHcam*`o5FoML{8wL|($w~R3TBdw z^eGESSm%e`HE(I#?!ElZ>_vD$PXjOXDu6zxp4J_DOhkTVf&c4ERDH4%=)Vdj2kmbP zN|TTD?*KAkSCnCkq#w1&oetSf*HK+W3q*^J@z&q-xUBCKyj{5l%D;xgqVFB_^tvY=gL=V+59^RoC-ER$} zo_3hgv7dJOE2He2pJd=e0<(c9M^4}I0JX$K*x9>-UO(}X=pD|X8l9Z28rUb;8-$zkxQkYaZz>RB(kGc zmnpbV2<5zCc4Wgab!~KEUKP)#T@sGqerSmPe6K{^zRQvo+J6{##bE4cHDPBwO2cH! zAaE-=OZ{KXN8dYZtjjw-(ZUM`q|Rv@jZ8d8a)kBqujyF0N)OL@od2^3WcASlThk4X5d=HdZi-P!Z zQ5-+d8+Q0!Bl%K`aFbFrR5>{k7X=ZBJ9v=Jx$H!wUcKhI_C29LTc@K$r#*9$bBug# zACDC)8_9%yl~niQFr7cTlx*Y{AolLYw@0KI$DJR@<`vq6ygNf8RZf$2MaS5w{&Vnf z^>4O7Y8vhheGmSg&&etEW0B zDW;(gdw2%idt@~37ISv!0g;~GMw8t$a9p?s4eW~`I|eiGrtDZ|yqK8bXx z`gi&uVksQkXoKS_$HVLc1k+x{H4g?*G3_^+M&{y)*S8qyj6lx5 z!!uB2%`B43IRp=Fjqo^0#V_h%oC3dh^yb02*ePh#olQSUl~3nmr@0cCn`eQn;u@l{ zY$;JFmO)MXe4?0OLv~MDLgUxUqHc8$iYcmK@E;!gzG*WC{;8mb`bMPKLj!D&jECEy zZy|J~iy2thMBcTlQ)}6ypeWW!>wfQ~4^jqU{)jxjEzyKcVYM`zI{^-lsUY(X-GbtU z739{vOJq^WdPaPk6YbMvpbb1}_pf7g<<2-9;T?rQ&;3|j5CHjQk@!7tDm+!v!4n@=dUyB>+#oV#Gxk!{o|vg&SsiJKXmotliFMEH30E)TX6hj4z@^xe5@w%)s=^l8*8kQO2R1G0A zwZ;m=I~>5ezJ&Te(8P72SLnxBfLjvPGGZsU%m(l}V9%z+hlbQt@JgW&`=_yegSash7?0X}~o$5Rq{3i!5&OMB~r#z;U z`e)Nce({iQ8H;~7b*xc}F`Qp%OqRbXVjf6$kv8RQR2lk99$7cC)*rQDjpI194c3Lm z(Ob0Zu_?n=6_Ab9R?P1MTWP|IIM!aDCB;WilAzc?+$p{vZkL+lp;Pn0Vx2bZ)W6MV zi-o|sWHKuMY=+lgWMFXZddQ0QA;lxp!GEhSjJ(glrA>3N@atvhoTdSWDZTXDrW!hJ zZagN~s6eD-DqN4=4Z`*woL`?(*sKMSVA)hl`@;>Pcg1tC>)8xvmyCh%4bRAv@8Ohe zP{)Wx%8)R3HB?%p(8Y6g$etiGHs&f|_lP#0oo@l#e&1pfEtWCi8}*6!{(kb&ClW>i z4^W3S`(as}2_Adc2Fi1L={?mU=Jz2Ud+|vW&YgdrIG-ti4xvRbKE zaWdRoe+T9aZe_RC2~Y4P}-PBZH~dnYFHLy(O;0b4zn5&3F)&|I8G zl8c_vyNAta>BLfURCgJC=IeoC_)569wG|8XSvoOjEUxd6Z}IBeK_ynofsdt|0O7|# z7S9X+1h!yr(;ISV!yRg5(o0=(0;%rH0{HIM1^dRGfgclPh?9~CoQx2KWXY*GqePZ) zIty`8TPU~+7B(>#_R|dcbYl8=626PO0iI8@Nygm0P|XcN^micGQ;PWHJ2As8JL|Cj zK_EWjr=!t!Er{#0q$7$O=|Fr4kzJNYUzBm^^ZCB8JTw*Z;uo;DgwBxG^T&zmdqrOKa*_5}vRJrIZzF*wTaPT23@!bW^iX5SCn+a4%s9}oyI9Ox9 z8GZ-!u&2MZlL6nO(8VTV{A>%NV2}i}_Y>4Nt|J*Lx5?^TRv?+}3*%-{R%*jadjHlb z;uOe)^|L8{Y@Ekf1Ni@%Fq( zSnzTQS@e)%m*F%<-9xE_55M9(IMZ-jvRt zTZ>$~D6akgtNQp~05bPqf-vtNg5coBo&Uc8#PL4> zjzv==hr1w(>-2wBAEy7RKAitG+~ps`7rJp5{TF>K{uh17=e@&1t9If##|0;9uw-#% zD&NYm0-nTmk&9O+V)WhpuwefHy^)d(8eX?Z?`;>XZf0oQP!`!2oJ*GVdSk5f82U}8 zi}=JSAV=stbuzNWeOJrjn_@Vc{eDPJx||?smI*wfWlsV>29e2*arAUYB)--8PQuF6 zNzvLe6q^~+u3*UyzXLrd-*7xEYXU`q6yqt9zqKrt)~xf>B82Sw`8jCI+)@9 zop?4Hp+uUX6W6nhq*wkTn4}BYUvy!*!DKR=WJ$fY`VgavGHhp~pdg;W@@WgGleHgn zjI;eg`l5@d zo#uLSUhfkrm)(OR;b*A!t4p}L{}c&*VM=QMs)1^bpvzWLPs_JTGD3GQk#ELz@H*cd zY9w-qRhAzqDf$J*Ymc(K2KPdrmL*>QJq=$k8l{JHEzxe>De|j45NCHyMa%4yq;6m; z$(KBgmO(Ng=2wqP5=~I7ldxO2Pi*ODitxB}JCv_qh3;;;#Ccm5*x+>%zEtoEGY8R3 zNEE$1ZQ;Jc6FR@e3_1)1-3Z-E;H@ekY3J(bnE9H(-mPF>?0<&2BPS_LNTR(=9%k{5 z;E(&|?Bgey;B9{#Lwn4Lb6+(cvvP*sg_LPYy2!!@9vS>o0mh~$$lQs^ApUabYwP+J4)I`jaMLM9ww#$Eh}q`@liVUSrSZwn#4G!7~!|Im-r1i=p zkk>7x>JppiwMW*lyx)+KG?T*mmmHk%`Y@TEtBuDVtYqhYUkOb|)IoY<8iolvSoord zY|hcamK()XXj(aSa-Rl6d%baQ#USY)pM#}cR!q>1Bap(xLGQcmWc-)Y)H|k}a`w&z z=My}V8Dm85zpBIotDPDBZPCn{y+!omZD;Va`aqOK`C-X&#;bDTF-yae&J7*$2zk_N=^%GG$Hw^OHK-aX7(09q3h@#s-svdv))R_I;${UymCwnvP9Pye@wW=LUXGan25ij`$fILU(t}BXrD&B8~%`4#-cDV z9K`q}tFY$B&)_VA!wY;X5ws|z_V6>Vy|zZhLd9GU3m*MYD&c-K_Bw@!UFto+JxyN zK_q{i4rmwIfyLk5nA~DZ6=Rdgjho`oTLkc-vXY6rC<=$8*O3$xL1+6x9L#<)ft_Md zN*+fSk%jZNk;7xnVSCnUkdg?a4aKQ={^4Wt@YX)qW-UTz%GkpC>@w!f-Z}JTk^ocp zPK9ViOI#v77QaQOKor?a%{GNm-)|pi#;ive8kt7EI3~KXlXSB+BVc zriF`ptW8G>Nn`0nn#WHeUnH#P+Vu)>?S&lGn&Sh@FSOCGHoa8;qZJWzm=8+N*5Upq zmN4`@1}!X3!o#oxY7=yT7SwEKb5r~=Z={*t@ZL^sPI_XE(=xO)&LAS2Hc{3qo7k1k zL#_E+1geDuVl1o6myFs*URm&f_dAhOVZIVK`P`JMzKY zu81AA%_cpy9H^cDnfBcsizI0q_Jzx>zBs75jF5R2 z)%fu9ZH#+;0Ad{ZTl0=Jml3o6QVy6LU`$mzRU8dNW6GhE@a^OI_8_d@bC?y)A zfSp7jY1wC3vAdEczx>MYEMah1RRzvn>|@qlKLoYc%t1-il3Zu*F`LrjP)y1Rf=rK- zh3A|=p>Qr|-)(JTA$*r=?W|_+y{{$fydTq|YpQ(XC+g63S&jIp7}1UCVdSlZ6xn>_ zIe6QirB*+MX}f7Q*~k=NNplJn(NxFJE5w*H;tNPDYeRSyznCjk)9H@jI68k?80ptZ zg&W5g(Iw0$qTkU;8-MFU!DLVNgtP=sS5PGGC6nof)hWF7gOnCTWsv+$BJ|=ELX($$ zCvy&%5wk7N(C^DOFzl!Y>1~p5Lfrx-%Y(_=eSpJ#k{E4qhqSEvLXST6#Cic5T^@Om zxTU(F+0#5++xm&XP#x|6B2eGeWzi0qVzzbBDC@GW25z2?fvQJ>?p>q_M65MIlYMa{ zQQ|Lg-H-(Ok*|mvio)jO0xUUEn74njpln{|K#var+UHZcWUT^ZoOOq<{3B#s%UY_a zeV=si6b3EdUYNJQ4B{3|fJM@JBrq=M^a~5(;D}glO?mD~^j#Qba%^aT zwir?MVzE+2m$qHs3j^H=AYY+|VbA(lwZ=Yrc~vN!>JxM;yJg7XKYG|OV;@ZU8ciy9 ziQ|bvA0XlV8ynshsz#CIK%Y}n^UMluMQ4ZS3BU?qeIqe3+#z&MI26DxE4L5r?F0IHs~GVg4Z^GB+c@n zxF=;A?s6zV{TvH=Qz4P%maAa6hXEY3y-%i_&LZpD_mF+J*VDJ{69mii{d8gII5yg} zm=s=qMK7OBhm#uPsOi08T>aP>C!S6w*&chys@PkEQ(28sBQxM#odvF05=09gyR(iF z`nV!D9`11M;bN6E+-%#4)??HJ3aDB*Pys1FvV*(~AEJUD zIa&UCE}h-G7|*r;rU!aNKt$sgtaqpXyyxA`P%~ZecH&phqaJVrwN@7 z&)K+xc{tn=K`p|P;E{ z9PG(E4y!V}k*goYUi2QII}&2)F85M0IBo;>s}$qh5l7hB_<)X9e&kM;hy_MN)RwB!mUOYkPt+~se0e6USN+PMwkwgo-5Y}hfWYX1Wg8qF8 zu;$JedbxT7oENl&!_^MJ@d-#~{{F`9nRb~x2|mK?8_J&9w+|JV)wh@-!N@BoOY&93!%?HsN)pVUlm0LLA%2pv_7pqNXi`RSWiG`Sy151=XOo^8kFj zIUty$w1`Bi78sgtC8{3kXw?(}GcIbO-o-;SWWf(&Z+isZG0`Z&aR5|{grj5UKnYla z(NGCH#cngN!nlMEDcmOZu60c7d`l*Oo*ta_^~OD~oT;kU3fyTcQ1L%pgu>Sf@uqqR zIZ)_AryL8zzr&VTsCcy8d6)hw1;QK9>H=4&44>2g}^J%l{X8xc>)w z$cenQ?$X-8GtI8BmVG13&pzU4-Ky)sJJyqF{VK7{TDV$)Cp|pUY*(#dU0x{9_fzHY zWpDQI+%1pKK_e7JpVO+99m+LvFU@pXE@n#?C_wwPk_0Vnv`_ml1{ z=;Lon9iky(Zqzni4=1q?36=$6*4TGcDgP?{QuBidDWHmrlKD z2#Zf{qFLEmVE?#|gq+Nze9g^d=FcgQAXuQ+(_kC zGh%$X3-s?ca#epLqJ{x-mIb#sx_aVdmhEG`i}8FhmFD zgYU_wi59md{I3(>r3HyTnu;KV1>+LA&1@?$=k9q$hr zC&y8Io(l&#N727?I>6!}8Zyxm-F?fjeS+T(SwSefPL6@MqkQ6G|z8DQ_M6*yqpOl&{AB2FVNFkX-% zHy8b4r%pXX9>`bm#SdnX8QjN=^cNRKE=8a3zt{$49k0OW9W8W{;Pve+J&K9%ud%rg z*5lL%)x||;ZN+qGTb)i({tw+-m_^V;2KFVyLDd9tqTl|P zq^ntg*%n!7v)IQnul7>Wgb>JDHWPa14ABR|wU9D?CS7*;GS9MF2>-r4jD04HiE2YC zIX*>&ZrUY=$0kIQz#Dteec@uFcH}HbXXXaeX0Hu;3sMhm^thc+dc8n-?$0wkZs~UBX$VT_Ft0euV2?Wd5 z;ccT=lp7IB@BLjUSZxZPtT z+Knp`l*`q0_AV_fzH5p{cE+NZc`H#4@Wi0}pARk;l5JLpX~YRLlx#bQ4)!}~Tk&T`zPy2i3??yV9=R-j zoq{i#t}spCF5q0@+w7v2kEGL)L7XRp5f>MMdG*+q$1|71-=+sND!z*h&zpo9J;5Zl zc!*x;cBI|)#lZX&lnv(}k|{N1{M0YIVJ5eoCW=+kpb=gAb=@b%_--Rc%H?B5S_d#2 zib%rmD{RhHO6yD`1msbJTr{L9Z~Laaw+uyt!2Zc~2(8)fqgZeWsq=J-i5S=-(pzw-Zov zS^;LPZe~kUuFy^6BsjaM6wS0(WWVV?p)w1;lb?S5ba2NM*#9OO_2*}UaJ)Ev+p7oj zUhgJlHjM&NC{pb~Tf$Rmrz^^nnfWFc1ZdqD9^c8uqT=%~n01lm77vm&Z*`#hi5#tC z8X?$bCzy|a4$=F5uoA}4*jv&iVBV4r{N7E1`|-wtBeRH;)CAxJ_mH|j@r;^AJFQ-< zjP3g`@ogq1kkF@@I8QkM6OKyY*@+tj&y&HB=tX3XpvzDZ{1d`USCaRJ;pBDeXjGw5ccT3WZl ziKe$X@>lj~z~c+mP-!?sevAkSIwbDk_g5M@bP0KVB9wrNGuTc1j4z^_cts0N(#OM9 zM0&ypNO(9AZ(I7{N3UGs;_C*wKhrQbRubOia0py|L#ccM)`w-{+&4NXKFp&*VMoxX zNtP@WsM|Kb|3iv^ntxyue-2|g&`e@$j zQGReY7wUWW;HEMSGLf4G)5KQM&(q~_Q~ybFrdggAn+lfom2;`z@yXCIt^yX8t3%Wj zRY>%bhy6YcZ2dMr5HF9wYv9b})p1#8zfEX3;~35Q?!X@z5Cz*MYVgH}2e03y)0Z#Q z;gy&t1WY$Mi(U zL*jU|4YfB{5s!u{Ch0>VjPcoodk1IYHYpRj=DayhJ2nOD3`NoQY_8RG5ei_ioJOha z#p50u;jV%KM*NK7X!lRSbFCBkb2o9B%|`RV)+UQpx)Z^VFx^KJtq)P1J?H67VR0NS zJPUyW-0ZCIn$>aZq%yZV>At39tdSjMS7%#cXjVM7#!p7)1Y6vExtkVSX+njUA~udx zGP$Rl=<@l}=s(FwP_{S06oI&SaXpW*6vX1oyh%_h*MSd4_Tv5Uw`j500zkNrPIx9w zTCEQgdv9Rxd9Q?>DQj{3Wgq;#-U4%uFT`chv&hs9H6&?KHKp%YLT$x8R`0Y5tZ2$+ z#SWar2Tg8xai$Q|$TdTnkOpjD$HlE}sdV+nG-$GRB=eu~aP101Y`ZAHOt%B^%fa28 zOz$vU_U$j(vcrMor|p3PeldM~dn+n)&g1HIhu{{)U?*D-|z3e*0a9rdET|xen0#9ejR|<9z!F6 zOOPZ=gms^H;~`gXu zJy^TdQ`37%veo-{K=NP~xzm*hgKkoE-71?ECFR46kX7(#i#~D=Q;{V}e?r_0GiWNa zLiqbJ`pn=4JEg6Ho^G9oGkd2%Nue9=JFpmL>>sDIw-e??OEaYSYvR250&H@2GaZzk zKq7%RP8!$1zWu)=eY-@MX|e;oIvxqbB30Ov;|?y#YLE~SOp(PR&>8xf?YdGYi>}S4 z`3lRiW4$tvqxEQkt^+>3wG|nRCF2dfFA=*ULslM^PWvxLflurQ$iHL{iJu$=gPo~J z?@uI0FPU&OfPz_*9d;j#!kaZ>>0oFB2DA-x#`dUqO$=VUsGeSGKMeyOLi%{m5#-%0 zkQ+H%arsSGT9@@D8j}0yZB0J|ySfZk*YX~H5m<`Ef=n`hXDW0=DBv@5T~MlfE@YXN zf?Cf4{Bchr;*M3y){b4E^`@4%Ni&No9L_+7Zxqp$dva{ycZ_CV(!w@oA$Zhc9w|0) z!LGw9cv0hQ%%zvGued~6jL#{#G1!7S6d@60@k=@g#A0e zhduhALEp7&v~R!)hs(Ut%e_j#+&n-t3WBM<$QO@xNRWvr1r=89#@iqQ$Mq-U6I~;0 zr=&=BS??pAPIiL(y z;ZsnmMvE-<;v}SFZ-|qOUGP5hr7(Y)2R7R!0YAg*bkI)=-lScps&!&)>3RdC%ha)_ zu^-O)GJ;mQHX@yMJiSLNGi(FurYm!jBP#&k?4 zmwr$lhYiNW=w(A4JG8t5kzg-ma)h9SthxBcp1&y3`4~msTZ+9dyrKLjgcnbp z1=T(luxYvvyzab%n;v#DeYpv;-Fynxs3?$~4_yy2uglT0|D2_Di&oppc53e41!uAN{_lDjey_Q%CD!S{2BzBSy@ZirxH@DXv5qt-%QM z&I-+<^ohIULZQT1g;7jd%yig#FuD=GwFk+t?O4e%+xh?*KT`j=?t!xt@jX>0G(M=t zY_v%t3-gM3hmaor2lRXGL#u8+;hQc#Z`C@cRrFteV0s0E#!rnK3EtXt05#=e+my|flw@|uUqPWP&m3Z&i3}KBmB_w-n#O(Uuvu3br~F_d+JRx7CFUo#>w8ZjPH6yxMP*^qHp5J8HQw zF&FOZru}XJ@1lPGH3RYcDiIS^B3-zscQa5(;o)Cw^}p@G_V?;zCe+8ea62Z{E9~atCbTOX0T<*4$U}cbg)ABo6Zl3h@u~eisU+63eMZSp5z4ypCf4 literal 10209 zcmXYX2{cvT`@RSv$&g4&6b*)=TK$u^&epqmDkiULJcu??8{r^Kr zJ8if5A6zHi})gh%fPln#~qUq3f44xMZ-Vm~e|RDM;c!d!bX&v9d&VL}z1oSf|b zjg|j3_9Ee-O8yxZ1==DaeSW`D&|)fV4|xp33)XX$o^KU)HV5K9!5r?@#EkBW{ zt;51e?%WmwH{qbqAVG&{VbP0e+{U7Q407?|(vFufct}zBZ^L|;ck~Z#KxyuqIqUJG zOdOoH@(}v*`FN>=5e}@&!^Zv7&8(;B3UjPfg>#zag<#SL>Z*~jxcLe=L|>w@Rt+>f z*_*3?I{ zxqEV=xaVhGCz?zNCdWqz*Iv;PuADSZ=rX#T8}~$!D?8tWdn7!Nd*i@+h#U|H#fO_w zs@hy=+odIJ$aChVl#HNURx6IKc!6EAzd=Vo3qN+0lJ{5rxl&OsIHN5M-Sn4n?Yd*g z4~u@dbI?qQouf$Ng2*hN)xi3zy5WWfI;Ik-W?yhyV&=)g~+p_yCgsN!@ zYYGP-;iDq=m6M(Dskt3jdVeV=wf8VDS@{@e@tI=IjlvSn$fi8r*YaZCiql0L^-t!U zKG8Z(=dN;&s(&TVqNJ49o_?5j@M}HqYep$&^~ExtMOZ0MC$E~9J?$Xp^M_o{zOX!w znq~>-idzDY8dY&(FPHMpR+REKIv(ac-B`veytJOfP0TbozcZ7!Kg> z!CED7kU9>dZiiW~ICs3_xD$SSn~jppCR7=>kjT5#;pp*h{8q1uZ~9hY$1@L*^m;^Q ze$rv(zMW&c%Z<@Q`8>`ZT|)gM7vO_Ksjx@S2nIS^h_X=yJ+vW~OzAiZ4lfl*b(Aq| z@=8Tr=MpyS#SgkhgwpdjQb?SNH<)ZN0mnl(snX8CU#G?y6~2?68x@nn~*E9Nd;hv^@e;OvE`A#P1FtsVa!{goU++AEv% z$f!e>!hFzd3}$~T%!Xxk*O)2xv z<8vu=W!RDBrN&5SEu9b=a#rP{sC2bTSC8>&BNa&;?%R;N$@u`jpmz3WB2T5q{`Nr>>a*M zj`Nm6=m%dMihf7VTQ4KVHH`Mthk4 zk69$w>mGetD~@X)QZ!q$6p}Q{@k-DnFy4L?(^a!^8Ak~gzBos!Zi|6L^mA%`!2*l3 zwIKbaDDZAokP?k#dfHP7KW|ATF7YBz=wnXMrklO>Zwl&3CgI+;0$O@s1-?b`!75%7 zE8PEMX3D0J5WgzC>^wqb$NLh_)KYjUohZ0FO$n=y6_P_ex~Qfuju+mS!=g78aIbzl zU8l@MvT|9`@2g@;Q_S z&zcQ}Z6Z+Sq)Yx?%B0gnSFznwe-Mi~9?n?`v zRhy{Xn{KkeGZ3_!*W&Qgc_1Zn4d(@1zz-uUY1>FC`Mmuc-tJ!nB~zEcl%)W=b~=zz z^N374H5I?ioQiq!hIrFu5e9h=(z8#wRe)tY%-itE! z^jtYsJZvkoRWc9VzIM=P+i!IFHF@m4kjg}tPQ(6$QS#X08}nOK1RXuOuycnBof)En zFFdTlA|Qtr{!GFpjmg;EX3Lr$V~KtZLq*G{0vjxb6+OZ1?o&agBU?UV?&l`@`G*{8 zq_i^Z%em~!G6NjJFjAux5c{nxKW3IF$sub4j@6L?-!fjS^Zi$?W_q=7NW|6lVn@z1*)}lki@(A(4&K!!LIxg8zZ?2hwpZh2TP|@)$)82y;qe~ zemP6!t_6^Ou_~N!C=JF%?ZullE6CC`6~-l`3QcvNQa{yx;v!m#WlOwZ{?0sXjo}b$ z;(%#uW$C+JEZR&nWxBR5!gi_Sg8S35$){J_aN{)_YF;&q4BKXzWoY&uryC zd)0|o&nQ1mT_nD7AI1b{k=R%{a&Jx@E*ouS-F$P%cTN=PvD`;H zl(k7tWCL^`SO$_UGO$?lIrIFR5o%?(lXESP$RFcbpsbP%lC`>!-1?bxt<}I^4O-As z^PPHg_)sw36Sh3`#pj)IV0(5hY&Y?Q+8|5p+PfVaKP`dpKev*eNHb={IYTqCpr#Yfz3gSb*~HV!?w#a6xyi77;R&=qXoAAV zGV<#)#nlrUVeLj+^jejU;bt#b_nAnN{QX!5Ra@#+u#ZaXj;&Rzby#ONg_>R8z)W;H z0(NQzn7j8nbK<=*O5_HU?5GYB+oTKqpUyE}m-UH(+*-^?XTkZXD!q2B1bzM)z_4f! zy_N2Pg8Y04&{;&ZziPocjyC=}xCTpR8PGi|CqwMlPUymV4EXjA7gjBXx^-u%Xth6j zZ%IUcjUA4XENszG0?pq>n0&OA8tBM^ET)<92Jca|$qCejxk)svbkTulk0P3mpcQ); zQ%ddd;ss?G*P#iv=9H+}mlJn)W%3}r#q^A0li>Em2zZ{_MMd5|r|jQQ^xT{XVJ3#K zdgTQ0IPO48= z)cHT9ZvzgH#dT?%$`8rZPHZx;oHEub(FgMGH!-_5tpoFO8t}5Rz-&qQRt!J20*fj> zbC|^vwEIa4&W($~)}}jTn~N#7x#qE(d)4uCmk%8Kk^;RYE(Et{!iSh45-jk**^_Ru zp8xJro-G5eJ;P?3zb8XjTmn6IVIp#ze~^v>NxI8H7qmk2apg%3QX=Jre_W=Z{G0}i z+q#q*-ndSEzeHf;ZwqRq-AUG|)G+TXkC4sbktDq4GaZ*P8IKtmQ5)%bAZ+CkY%PTT zoEdOCX%_|(1u{uT0Tg~7!IjDa@}wl1HQKq4xqNpeabH_Ovt{H#WobRFNQ^;u6<@Y* zUL9&nDkDz#Kmx9d;O6VM=+%c7WXH(K)}_aclR1*#$ee9`cv={`18-A|13# z?*ty#CrFIlw9s4>f!I+qyP z%8)>P26m@kVg5dIrLraoRKHPpCUTX3kn|R-!;c{W3F+gIL%pW`C&Nw6FFadjH(#Te}A%f*n?4BBF*3Av+l z;klavgt;hSZ0|LCZ^t_}#kGvdcpymydNLR}s|jWMt*GAV8K9v%7b`_;NR?(VtG=m; zEIt{Iu8~)0rjj3=2$4r~;e6bud=H}NPqV1zjWGCikS^S`37R&Fv;4>@Xr;Ih-3^?O zDG-HEuQ!leKA}+1R13!wPBYfOl(Bc@BqUF`O4`RakchUKkeqCeCTr&~i`gT1iob`h zJ6ejT9Q{~hjkoN|HTAr-h=1hu$%$l9mJe-pK1R+in~dNb0e^mqVEgA0vh>zNG8&o) zDfefA#@CzlbKN&JwVpb97L2fbU&(u;5bEA?IAy>qkI#|ldeYnfiTQkG7 zQ{2ULma382sPr#>qjQ+4-`RbFQ7OhOK>dWNCog`{KV!a9i7t=ayE_ ze{$)i>C(Mog8cO&rj71SW|HzneDzBKrYAI|1rpV#`ALWVaJtv&nf+I#ZT5+C%Jh7L zoFF6e5x=_kCa*-iiJ$a#h1o(w`{n>K%8wBL%(HrH#`~{iaJt75NT(yiJRDHkdYL|K%TyIml0qlN3aIzcsDb zkTpGZ_ybQt!19e=z2SsJOc1y^Z!%Rd@a272=wKT8uGn;`hpFjX{Q&+h%W_ULM%Q`JNeEsh=j?e!D8NF#=#!U$n4%f*Lo4w}pHA)l6>IzR!5& z@Co3_Z$rf!j4;d73c4C&xH~5~awmUZhkHJS3p3`+2yI$!!=(-hs8Vqj-Zg9k_xg9V zYlZ~(rukIv6~pzyRQo{g!ryV!Li;(WW$AL={(5ky<_Iy^lPi2tsK-^?q|L4K+%3e{ zKIp1yEBx~J7;>Y>3Fmg~#bjO$)TUSovn>}0%VtFj8x<|FUBwt@$?xEbpUl9|Z4SaB z%lX1RKmTG=iX`{XZB_2KW!5e+5vJd~3V)Wr0>4^u zu6glJjD0MDTRqIV$_{^-&h@~2+c6GDZGADyOojWs-$VHJus_$MQ$^U_be5*|iNYQ3 zL0aBZ#_QZy$}9G&;B}AYbDU}lIllA|C*tr?jyPM+@sX+Gz5I~L5wEG_B>y_ZvllPs zlzk}Y`N@}YN<6AKX%>0B2ajENnro_f@6<|pG0_z~Wz{N9Xw%!~rt-1%L(6#U50>%P zDJF5$>Z&+@eD-nl=T~xK6-s$CjVd`&$J=N_-U~tNuq9pg-+snM(*j-7Z*f%3WoUF2 zLmXGw!C;Ub%4yd@-n|TgOw}=PU6F>|Gq$YNrTwU{`+?eo>?e|;j#NqeG}FO3$`r>M z;==R=G;qRZ#$QStb06E|z$FDx)LIL6nwI2(n#mVi^FpK`d=j`C;CE0Lm+Z`x1xAvJ<(A9|hJ9p(0ho*L_b9foE`r#G4 zw=j^IwX2qX9Mk+hE?5lq-i~BOff?#_Wuu^aJ9sgZX_re98kgvSs8s>|xu=sV=xM?^ zvvfH2^cxWy+sjiO{78I=A7o7*`#xHCj~;nBM6J#&L&MRd{LrF6h&(WlEPA|;COI6y z^z~J+5fu?3xB4e>YFLBuRsm% z>Pn;Stt~i9*PYxj8`H=72s7c9Jn|rvi5kBeIy1MBkOPNl@Uwj+&1(m-t@=$*bo8)w z?th4n@)G#FkBb9)$05jNv*E7K$;F)WwEV(Tl72-FvPB;=lCNDDJ)d-BWX0jKWhy*Q zSAl`qh7i5Pn610YMRS`qQ52ogp6H>10rMkQxVS&{xaSalwn- z)PI#bsynqIZ&?s&zLP{IZ2d_MpKIU}DRo@E8KaraWN!Om;#0+EIOPk;os1cTDTspUHa}Sp zyF#LokWP*y9>kTo!$j=%ezWhl%m}~uBo$sa20>3S>=RqgPCPgOh08>#-)b9B4thj0 zpUJXQXH%HBVT8W?84nJX8{vxDT8unhOCs-2B_XQ1aO(I}`oU~7$|tyzoH2b?EOQ$y zNONQYgSsH$?MAXSCy~B;f0iv!eM0=a=i-t<9XxH8M`sj2qZ$V1iEXzrr6 z@EZdxVD!K{Z!@))%47BRF>rih1OEJY5N_Q+2t~%vSj(rkDChfq`oJNSO11=(m7_{D z_0w!vIO7mwq}GPyLx*5Ae+t|>8c(G+J*UQdIc$%fB3z`2RA?1U>(^J3xQL06*=CQK z4S6JA_B(%G`!G4;t4MWkzM!Q$jL4#q7|fG%fj`CPsdfbmi|xy>>7X?+h&}+?^M=W} zQA6nF4Zyt^eR!HAiun~DG}h0A%9t-8K3|=oqB5WT_a%c&YST6Q^yG}-y81=pk(?_K z(|u1wy6vf;znKY?I>ZiZ??cz!vrvI+0Rdkd$>2%_nEL294c?N1^PRnE&z{RP*D@cR zCYF;XpGRbQ>UsLIH^eOJ+A0E4b{MtuIPG1`AoH%5F0W94t>MKWFIS5PE9>d5%GIR# ziUAm3wx(@?3t-ZM3~YMcL9B|C+2jX~Oq{3({0UiWX6IQ+5A`&W4yDa>!`>}q>dJWX zeS6(lZe58nqMM0P*E72LS~03VRKb^bZ_}->7trSxxwPi+M5H7W!y@`=ZvSjp)T9B^ zuU%nZoaiRG4{yTMVOgA8_<-)3yB{xfCW23WIb6A{hYO8%jIbk((J68oPCSWQo% z-N6J+la~|aE=d?{K0*?&w+kL^J4DwlSB9F&8}U-@GsdJx3{zKRgZth>rX(T5w7u3D ze7`l(<2TZv|Lr7HoS%SqKi0tUj)`cx;|A0E^%MC$Qbn#jdW^jTj>yh*f}5|$oo`P_IL= zKQil4a+rZ?(NA#@V20b!90QvhHu|oDaRE+n<*}1;Brpq3eB8ziRPNjjz z$MW*13VfHEfvk!s_qby=)*lvynvA)yu3;INzOG?iTZic3sXrj+#Sst&HjxR{18kqo z7e*!X6qGEkg<$Z-JKg(LeV2gY%y}GcAj=o(D0ly9L{ApuMK6ei8*|pHWOT=itQx_3$vtsOCWnfsOEKW7L zBEYxf@kv2C8g1=|Z&OY}OGrLj`1J%{{CdN5Z>9)7d}0avYZSO63Q1(|7*{qHXo7{D z6#n)bhyFq0f{Vjtg1qww=HD#k=r7)ab55$G#cmTQoqhyk6ypWg+NRP@ zjwVEU$iUa0CgwqGAFXh1z(=Ac?1_bU;bwrrm^K*-Tl3_omMX!^(h_KS*_ZBXjDxuj zJ4v{GDt62|2#UVHsdSMMEU1%Ya;uGTO~eK~<|bf5KbEsiI-#bqK50 z2KGk~$$v2e71o`mrV){tE|zcBAC-w07Hy;#^V}d)ejSdRT|&-mQ^8@a9n>kV38pz| zU{kyq)c$)wQw;~{lSL}fV%N#Ud$zIXU-#1a-rf8!OJ&&4zieQ-`W$?xv7dCgx(k~3 zY=n-kC(J5vg=St3qxxPRwmZyX7mw>>VkT&jymDXGbj2gS=ujJxx%rL^Jn+XGzk2B= z5odaGx;^Fv9b%I@>&e|TK12=c!(DfGqViK61|p?k>gF`OA(KdJglf#xd5?(+2jF9s zJN(>x2&PE}vrz|c(&G~=Nr(O>JaTP>k>YA##79rGvO7dRU(yEpqyols`=+tS|)j*bc-~O z4F9I-XGLM@EH!A%&Z4Q2LV8^+5a#p)evD26`^jBIw!j-T?)Ss}i`8Vmv=^*Um4lWL0{5V?f0E}IT$eq>BqSW7>K3P&(`)#& zxMe;`2~WnH_MgPEHk<5xQx3ePd#T=ue?+7C3(f!Aj+67J5Uns@6*WXNr5p76g>$5@{|J45(VoZzY{9SH2^e6~i$bx@@Ig(9!u?1XI2;b0 zYfWi|$9IxaI}1l9%fpj2E&8E;2Xxm|!rCQYnV`#*e*6&+&Nli#tG;QZ4D|B z;DgUcTG*fiYvIKERRFbnLFE24FgSC8lpe_--@`R%X4EvS_*#t#i|-KK;-{>k&s`KzY(HgD(Sz!Had03yZ9HLBqkP+J}g5UGx zvHRV6`p~@>-%DI1&Fyb_9_iv;ik!Titub%qnD#PcBPxMZuGqdh#A$+&G zNW)un@Z>gEIC;MjzHVL(!VAjW$Es=6J@yxK&1;ww!i@PC%Fjd8v=?M$!?-a`{~FaW zlVc(V`Jf&AnAYvcr0YKg(qBdSu%dY<2D(iE{*G^KSr}mR!vip*Zoo`pLn8k3t0&(w zHc}uxozv~FUrB$bj^@05tn+SQPGazANMjK$QB@ANG#X1|Y) z!@MXD)c%x4e{Vg=>UbrBLCsh8@C_AU`C%wObO;1RpRmhv5}USJftj&O9*3&a;L6yZ zv!<%lOnO=t%0IAzSie{}_9`EG43ofhOC<{9lbN1d4-#<|Q6?KRNo zvw~^3918XJtLcMnRqU&h7iK;76}(MP4)AUcRdLkJOL<;trM%ihMLd7$N}hRh2`_PJ zDQ|W`C8yrHg5&?Aio^UZ<$WfVoQ=AroVT47oHuv#dDq7LJ;}zUysxe$JoC;X-o~?a zye$30oR}?zyv>=1c};2sycWqy&c(87o>$Kij)iS0Peb7lFX>-3&(JbSu=|}7uX|d& z>FTgI{F&|(1iJSY^Cv%>CeXfgo_~G83x0jlC(|mLX7=mH9@ExYjNsb^8MD*fPfT%@ zky+V?eZ1yl0#obrgZ!$?C44O};IC+!#BW!*YN{Ifm^VJ^6#r*V9?yJ0&1{d-27cay z`KH#zL;SLte|avM?tDGla?XveE;43U6A7IX9;)oWGeUpNeJ3?`ynRP_(6*qMF{hrG kcj)-Jj_ZfUYM`g=*s1U^zo3x7FyH_4+llW9m59*)f66*xxBvhE diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata index 7eef2ef15bba26f49eb7e79079714b5c7015bddd..7c2f8bfeeb78c7ffcd7131dd9a8cca69c635a3bb 100644 GIT binary patch delta 147 zcmdnOc!_a>yn>0jd2(W^rA1mwN|K3%d9sCJvZaY}nq`Wqp@~tdN#aC%?Ti=95nK!m z3?-SlsqrQGC5btOtnLmud;ft1Blv(sYC&dkeoAUFRF=(~F@hH;T%1^zni8K_9G{Yz nTvEv18Nm${EK015FH0>d&dkp%.on_episode_end at 0x14c429f28>", + "on_episode_start": ".on_episode_start at 0x14c3f5d90>", + "on_episode_step": ".on_episode_step at 0x14c429ea0>", + "on_train_result": ".on_train_result at 0x14c44a048>" }, - "clip_actions": false, + "clip_actions": true, "clip_param": 0.3, "clip_rewards": null, "collect_metrics_timeout": 180, "compress_observations": false, "custom_resources_per_worker": {}, + "eager": false, + "eager_tracing": false, "entropy_coeff": 0.0, "entropy_coeff_schedule": null, - "env": "MultiWaveAttenuationPOEnv-v0", + "env": "MultiStraightRoad-v1", "env_config": { - "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"max_accel\": 1,\n \"max_decel\": 1,\n \"ring_length\": [\n 230,\n 230\n ],\n \"target_velocity\": 4\n },\n \"clip_actions\": true,\n \"evaluate\": false,\n \"horizon\": 3000,\n \"sims_per_step\": 1,\n \"warmup_steps\": 750\n },\n \"env_name\": \"MultiWaveAttenuationPOEnv\",\n \"exp_tag\": \"lord_of_numrings1\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 20.0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"custom\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"lanes\": 1,\n \"length\": 230,\n \"num_rings\": 1,\n \"resolution\": 40,\n \"speed_limit\": 30\n },\n \"inflows\": {\n \"_InFlows__flows\": []\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"MultiRingNetwork\",\n \"sim\": {\n \"color_vehicles\": true,\n \"emission_path\": null,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": false,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.1,\n \"teleport_time\": -1\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"noise\": 0.2\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 21,\n \"routing_controller\": [\n \"ContinuousRouter\",\n {}\n ],\n \"veh_id\": \"human_0\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 1,\n \"routing_controller\": [\n \"ContinuousRouter\",\n {}\n ],\n \"veh_id\": \"rl_0\"\n }\n ]\n}", + "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"control_range\": [\n 500,\n 2300\n ],\n \"headway_curriculum\": false,\n \"headway_curriculum_iters\": 100,\n \"headway_reward_gain\": 2.0,\n \"lead_obs\": true,\n \"local_reward\": true,\n \"look_back_length\": 3,\n \"max_accel\": 2.6,\n \"max_decel\": 4.5,\n \"max_num_agents\": 10,\n \"min_time_headway\": 2.0,\n \"mpg_reward\": false,\n \"mpj_reward\": false,\n \"penalize_accel\": true,\n \"penalize_stops\": true,\n \"reroute_on_exit\": true,\n \"sort_vehicles\": false,\n \"speed_curriculum\": true,\n \"speed_curriculum_iters\": 20,\n \"speed_reward_gain\": 1.0,\n \"target_velocity\": 6.0\n },\n \"clip_actions\": true,\n \"done_at_exit\": true,\n \"evaluate\": false,\n \"horizon\": 1000,\n \"sims_per_step\": 3,\n \"warmup_steps\": 500\n },\n \"env_name\": \"flow.envs.multiagent.i210.MultiStraightRoad\",\n \"exp_tag\": \"multiagent_highway\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"uniform\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"boundary_cell_length\": 300,\n \"ghost_speed_limit\": 6.0,\n \"lanes\": 1,\n \"length\": 2500,\n \"num_edges\": 2,\n \"speed_limit\": 30,\n \"use_ghost_edge\": true\n },\n \"inflows\": {\n \"_InFlows__flows\": [\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": 24.1,\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"idm_highway_inflow_0\",\n \"vehsPerHour\": 1993,\n \"vtype\": \"human\"\n },\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": 24.1,\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"rl_highway_inflow_1\",\n \"vehsPerHour\": 221,\n \"vtype\": \"rl\"\n }\n ]\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"flow.networks.highway.HighwayNetwork\",\n \"sim\": {\n \"color_by_speed\": false,\n \"disable_collisions\": false,\n \"emission_path\": null,\n \"force_color_update\": false,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": true,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.4,\n \"teleport_time\": -1,\n \"use_ballistic\": true\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"a\": 1.3,\n \"b\": 2.0,\n \"noise\": 0.3\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 0.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"SL2015\",\n \"lcAccelLat\": \"1.0\",\n \"lcAssertive\": \"1\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcLookaheadLeft\": \"2.0\",\n \"lcPushy\": \"0\",\n \"lcPushyGap\": \"0.6\",\n \"lcSpeedGain\": \"1.0\",\n \"lcSpeedGainRight\": \"1.0\",\n \"lcStrategic\": \"1.0\",\n \"lcSublane\": \"2.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"human\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"rl\"\n }\n ]\n}", "run": "PPO" }, "evaluation_config": {}, "evaluation_interval": null, "evaluation_num_episodes": 10, - "gamma": 0.999, + "gamma": 0.995, "grad_clip": null, - "horizon": 3000, + "horizon": 1000, "ignore_worker_failures": false, "input": "sampler", "input_evaluation": [ @@ -34,27 +34,31 @@ "wis" ], "kl_coeff": 0.2, - "kl_target": 0.01, - "lambda": 1.0, + "kl_target": 0.02, + "lambda": 0.97, "local_tf_session_args": { "inter_op_parallelism_threads": 8, "intra_op_parallelism_threads": 8 }, - "log_level": "INFO", + "log_level": "WARN", "log_sys_usage": true, - "lr": 1e-05, + "lr": 5e-05, "lr_schedule": null, + "memory": 0, + "memory_per_worker": 0, "metrics_smoothing_episodes": 100, "min_iter_time_s": 0, "model": { "conv_activation": "relu", "conv_filters": null, + "custom_action_dist": null, "custom_model": null, "custom_options": {}, "custom_preprocessor": null, "dim": 84, "fcnet_activation": "tanh", "fcnet_hiddens": [ + 32, 32, 32 ], @@ -74,24 +78,26 @@ "multiagent": { "policies": { "av": [ - "", + null, "Box(3,)", "Box(1,)", {} ] }, - "policies_to_train": [ - "av" - ], - "policy_mapping_fn": "tune.function(.policy_mapping_fn at 0x7fda132e6c80>)" + "policies_to_train": null, + "policy_mapping_fn": "" }, + "no_done_at_end": false, + "no_eager_on_workers": false, "num_cpus_for_driver": 1, "num_cpus_per_worker": 1, "num_envs_per_worker": 1, "num_gpus": 0, "num_gpus_per_worker": 0, - "num_sgd_iter": 30, - "num_workers": 2, + "num_sgd_iter": 1, + "num_workers": 1, + "object_store_memory": 0, + "object_store_memory_per_worker": 0, "observation_filter": "NoFilter", "optimizer": {}, "output": null, @@ -110,7 +116,7 @@ "sgd_minibatch_size": 128, "shuffle_buffer_size": 0, "shuffle_sequences": true, - "simple_optimizer": true, + "simple_optimizer": false, "soft_horizon": false, "synchronize_filters": true, "tf_session_args": { @@ -126,7 +132,7 @@ "log_device_placement": false }, "timesteps_per_iteration": 0, - "train_batch_size": 60000, + "train_batch_size": 1000, "use_gae": true, "vf_clip_param": 10.0, "vf_loss_coeff": 1.0, diff --git a/tests/data/rllib_data/multi_agent/params.pkl b/tests/data/rllib_data/multi_agent/params.pkl index cd832aa1c3eb1713e608fef452dbe168746e4cfa..83774e73e7f850e929d5da88d68d077099b99676 100644 GIT binary patch literal 17746 zcmeHv2UJs8*Jwg-Qmv?z*Z?II1r;gML5hkL8(c$j!%avt1q4BHP;6HPjgH{h3yLEs zj&-ni$KJ4EFNoO2hLyL^O#%UB=KI(GzxCF8>)|rod(J-l?6c2q=j1WP zDCXcw(n>B>h}bMSj^PrX44Pz0(g>kTlPCZ;jkJ_YSv-+8ikMnmQXh>LC=oGcY_TW< za;iMT?isP6-oQBEd9OZ1%LL`Q%L@WU&;E8aS6g6dVF&V*<0vIa`kuZxiQpn&< zCKp1tsgN}Zli?FF0WMNTwI|I{JI2NZ0u@7IXmqkG)rJO1q<98Jp;&}T@l2K!O=>EW zBL=}F5*!w2!j-awIJB|TVJ=+`PpFKFCympjtSlLuCBR{XaW*a$V?vxI0tg$nLM9gr z5pIc8%*JIhu@umlshd@tj3uqr?}$GzJBKHe1C+VCRV+cRGT^v^G)6B%*i9+ClKN>Z zp^&8v4|r8=;wM>KO&StnDK8rsk2cCoCg#d9P0MW392m$Ei*Sr32Z=$eGXcTDV9&U` zG-^LhxdL$}CSk!MWy;Z(ooTemCJyk!062z^gX)E9U~xD+L>26Y3cN)(1p;{GQn3J& zvP5Y(bdRG@p)Spjm%Dqv9~wRTySuk&Ol}FoAaE8Zla+@LfSk(`$nbv! z#CS+h0o1hi>oDLerPDetS zCMN)@e=E9?)UwzhrW66*Kk4P*s@?%iub=cH6=tR3BDrd%tsy|8L4iDP(~0 z6DlNNcX4W4Ku;-+RFjLW0VV=F2%{LtVla{aVG0!jIggS%CePEuof(7PMr^M|4sixh>S9t_g}{OF8Y4nf1v4z_bh$vS@9f28zO@f*gg+5MsgrW?LxL%VelE0&{N*BRFxuG{}2E>y6aR z%g|^kk0aD57^dP<+gQPl%0}SQ2(dy6*gXah*0Ky5#MFW^Ti9!>LrGlUEw*|+Oo8Gke&JfEW_+(LGhDae0G;1D1 zj>Gvtf_FerhW2!}x8KE-Pv1&LKTI5n-Nb?>u*kXZLilt_$ zObYb1a?A{fbXlozN&!FtPllr0p8(-Po(%Q1u@EqtYzhNRrI2ub?gr6HrBrAp6ALsR zpcO|TJVqW59XWuCmcapKz`#XoqAD#$iZj6Uv(j-)iX7cf=$Alr0?n;vlSp|YIR>6q z1orcz=F+F>lXv`3EG;Cdb12tmWqm|VgHeHM&})LE)h_iY#yR9 zQH;otnBZoRX5uvfEpcmF87v^cgV~m?OoiSb4iimdMPi-|ZJIl?f0Gt#I&)jJB)6o8 z!DdM@E-Xfz2~yP7Q4FC1UDPc5pTH>4HV+>fn$Wai)T}Y+E&wP;Tx-kNwRu`;LdO$= zjRM`GB-g$5p{5B84n`XH**Z#ShT)0ARl!-CXh3p>Xt&y)h>A!;aCGK%Z_`avKw)rX zr{wX6(7%ZyIZFX}pufdNqHZli-2_cx#n8n74D$49F{TwfLmeYGS=ZK)6N+dtHi6FH z|0?yPctRA!1XIxeEdT#3k=m2CaW#Mz`(Keqgi=LVlemqF_H_5~YD?k->_BvWj%Fdd z%K+B@1E5R>=9`y61u|{=YXLi0EQSytq*M+0PdKA+ToR{t2LC*Jv{;M<`?jIzlr34MwE_40on?+sG(Q3Rhio{4?;AbtEIL1|H~XfbTBxA z{%xDN|2=lQ$t3=}jcaWUTbL>&uUh6qBfivKl*V)apR#h=M^2T?Pd0&^gDxbE;B&-? z;gF8dg_}Pm+5U&CKGzHnC1g3!m8R;36lI50@2VUJO4V3tXGKtogbVp64){(`U5=8} z5lA7kVdDMZdADF!(oiUddpO9RK!ye7gakO;sX~4!DKIWpNgBhoy8zv)qb!LNX^C!& zRXHsxD!J6(WlKSLG@OI1J?3tf%ForZFek~E2C2~#<&c=S3$ z8K-n2{79o<2+NvVqUceyD4i(Xs8PZGIW!Kv3oVzPM$4h|X&fD2n)*q%=}E8YNx$jI zz?a6MjiM=?Vo5_a!LS%s2IRcZJrNMr^o+4GAU|h<-fYJYUN?55e zoTH=|oP$|FIH?EMJz^yhS^&I7cd;U-Buh#7#8}gV&1iHZx{3Oq(%D%K%cN3_7@QE; z>2Q^cVX$pR7i{$onA(Exd}@*jR*xbod8uV_X)bw@7P@3zxOjiA;ve zVVC5H@5H!VL<5mX_>g88Hc`P6pgSxyjSGOmr8L|g%P=XN5>uJJRJujx&-`(_EijBF z5{UsTx>SJSAi3rkmY&JMxh%MtMol1*7}nz51lA|!D5zx-Nu(iki`f)pCrnFrFqtA% z1&0!rq$!4_3B+)6Rn4O)P!`0wlNK$A!X-*#s1p(FLqE1HNHCErga& z&()!N47GZU)IGYQ0aTxnR$t3e##%j1qvXbX6OIY=(9|=T=IG?p(OfW_9`u^!=;hO8 zS3u(1aZG`FbM#K9QRB>7jMHrzr%#R3&!-pCW#-h#cGO5{p(EXC$1z9ma*J8C9374Y z$8sKwRrgp?J(ewd)cQk&5=@k`hD#i^hr_5_f2D@!I#GI2Ho@MYmb7GMF0hwRqM_sP)tcJ);Sg zgam|Jhv}{)4O9`MvOr0UAWeMHy#`w>l=v%S9O=OZ2DJ8cGrE#s&QMaCNBaGT@{LQH z1z!v4|KK_a86hVWLYF_!X@iBs ziA54fC3ur45Gk+?QvWO&OD>0J*get^q2O_l$3RbTHBG(fh$P?=sNw`dLF)5lA}CWJ zqDccZ6LPwiLNMy8ykHu*R8+!Y)7&J)(^z~l*hla)PDCsbN@;@%wSjsLqJ%VJWvKN6 ze#`vCEX76z>;oD2WEjXApb$cs~riELhRQ# z1QEd^o&RGUQ4;K2C4t8h+*l%wNFsn*Ub2!%N6w{;WFY^fav~7HFw+o6)j4xa#dhFh z8MveEq!pFnWQ(OZQxnP%+o7kDI6+jx?{Z=YX$%el?y%ru6NL`?kn92{i=qaF+8LN@ zIRnIJEnlEV1rWbz9Dy9H5|}|9JA7B%&{2c%gJ zG9)YxVlLI9j_Ue3utXdOo`@~Qg^)vV5KA54T2%c4_v{XkLCbPraUhoMkH+lKDSdr@@Wh?}NR=_f<~73qRo3ZVzn)Z8ay{*lYt4Ab^TDoj7NC}%>L1c@4)1;uFS%m75`;OH0F zhgUJdU>Wr^8&3}TZ%nO(_>(l$)W?zLnmJW1$Q8#DwUj-691{TkjHY7*7U@Tk;~PM5 zrjfeG|7v-(`SFA0A)abXk4oeeWJg|_2$-QR*MoqVCs2U&!P%xARqvK1!xS=zzu}Oi zkHc?t>S;$kU@#LZ7=Q*F2z69#U|cjShGum!2ugK8h6$HKZ3u+AowS;%WW1*2FUo|p z(&~fYNmUI7HWWq3(Ma9k5eZN+2_sY} zsH(Z<5wjFz2US_uPZq-O4q_5z#J8h<0VE3WU?*j$L`=ghw6C2|h7)JYp)ioLg;W9z zE*_Ope~`ACHknW?mZS0&&EXYT!6B`|N{FEXO0~sQttTMttZoHmQWt1uvUq@=D%`X{ zH7cmqO;zbdwRJ*8UsOd8WvNjwwUwyQQx7EoK}e`#QZ`h7QXp!bfnrjs9aV+4Xs3}~ z)fI}cVyG5?@<#1`U~Mx#&_j$Ph0Aj(Do!;J!jhy}XlfgADj)F$O29xNn%k{ZkJ<^RAox)@E|zS&4f>!w3a~Ki@HD)lm^D5J+o1l z?ZDjtmkYd9717d21K2k-1t^v32~Hg9U58y(2vudQq%Md6kRie|kubn4?bXnzzK=RP zQwWDa2&hMzG*?2Q^Dk*65es;1@B>IfbyykbhQK^6OUOh8L%58YDxL(?2f^nerSmju zh(3q9yCBYx>L&5_C`B}7)k8sUffh@#1rsSxc_<*iY=V3l=2 ziW~ynPX%L-W&=MpL&=W^ z&1g*qe>61yNXKlrk)Y)@YW_pGkV@*IiUTDdg8_UNQZk6C`D}R5o`VPPs=-rXF!i4r z+>R=If~vn%D8I}6H!LbjQ$1QTEaxW-n;onRo7Qp)Rw~p1{M!_hDMFhD8;jt24B`T@ z5bn@a99O+EG1Vm%x+PIVP!}W^N;aYp(=a9;73K`2f=nbY?NUMc;Yfg#8bJ7SVJ6sD z>Q)8y>!#yb|3F=gM5k%56fH!it-T65XiS}5^7N$lKkQ#^J-YO-fdv^kUVCdEm3v;_ zv*P=rz%Cs^_I-Zeso=NRo%L&u?{Z+4`sK)7ua(-#%Kk1Xx6ArPeBhI<_}zSShsdBu z?P?VEg$C*C%WhV8JicVD@-oMw($W5L#l!%s?ooaXtb$c3lM@r%#uiG~2^E1h>A(F| zIL+njs#sH_X(bct8b2i;dV0-$QTI|;sao;Io0=+^Lh`)#r4lePMZEOsL!22XY?A^E#Ainl<99bakjOv9_Ll`EqPkM z$~FF+_c+#AIpIKY7sJVE_B$?4uz0cZ*471sd0P~9PuyQe$XB-8-<~B2c)xee=ggN% z;e|loNVoHSbhCI^?cO)H8Yi~jemro}+^W!xTcWCq_AY*4>ilcZ;v$=nQ67%fpB5J_ zyHEq#01`4VeJUL7-eN8qE;>t5Uo{DkeHb9(98e{jp#`*?Feb>F#> zA?8W%r}93+W%yn#us(_B2hTdtf=9kL(B^k5=|k;=X~&30^AurRsIs(tEME4NzTn;;3$A^gQII}!)!*gP zdTFXJSy09}omNT@9q9M&)3)S&HMGCJ@80=2M(4wlfbwhk+`s2{?tZoKspI1W^83ZW z*LNnae(Yx%@%kox(j^-)<4-eIc|b8w2DwRiEOvwLd0-O&A0(r3k6*{Q9P_>XIUGfL02%g9+Z(AZ+;;}4HK@T;@C z?@us&nAHB-=0PjEo;P>29GtSg=iv=kuHE6`F3cY3fA`Wo-IuYhIO6J~&dF*0x9{q_ zhV9@Yj;tBx^P69Q%jPM)8V;CG-^M(->iE}tbC1`b`kZ!ka9@9KhtorRwv~v0eVDh+H`;Ed8!52?sCtQa(6pf6ZG|y)Ka)!tLj4wCVE$lJ=SG%zX zM-9l^H~C@N#Nr7N<@}QERlY)(&fLGpNBtHeeq1l~YEW*7ntA2hbBjH0n?7~AUYd4j z!;!xBJ0Do?u$V`fo}6^=y~F4l{<|-mm)yPiEviF@!bi`|Ht))1ys1CB!Pe1yZ)kOl z{?3(!JNI0$b*wQu>+oUosM2?T?s2INIiIwFwxPze)6&x!=RO{;PwC*-cqiL1B>k5e zVI6~BtcgGTw9ADhgnw|J%-8a2VgFeAwxPF`FR$V^%Gim|hwf@Gx6g7kqmAtT`Pbc3 zE^<1t1Ja$A)m({{*I)B|nLEiK(tosI-rUa>5tX~FI`*2n;k$bk_p3+$r5)`T+vmqL z{t?pPv;B*nL*(cUH7f_{rznRqJ*|_-1qmO!B|k0wbJA(fo$1BLZd}h>aATi-e3nbK z(N+5Hhue+#Z#o%W6pKv!Bfs7~BD5R0E8Tbh>hHMqbCap_54>M$;IMh{j0YWG>v$O* zCfpwDUV1sz$<6p^;^>O@lSUmn@Y0C2v)x|PWZ$63*fTYsvcKoDQ+qng?{KHa_`ZB; zdpbTPW=i#Nr)ux3R<-7Ktn1G@ZjsqJ1@*A8`Zi|Qw6eYHeEV)y-uW_y)$st$UjI&= zoPYWkFimc^@{%F~cyt?YuWbxsVyEnHnc;&c&Xk^}9ZcEm9r(PP!}BEL6?=OeU>hq@#b~5Q@F&xZ05+{+)S?Btk|b&EpJG#3dw_OC2nVG*C%)Q-F4Qd)MUHbEt~zm zJmPg)a&sts&Yg2IaSvJA;Sc>RUT@mqJz3uj56zjl!c=fKFRV-VbouNxuU9nm3A2Bn zA5na7gm0$#oIv}drn}~>@VZlZ<(&QUy1jl69~_z#N0)y-ak2lbkIRyC^e2oOeelp- zTjAG({@Xe@`(Avv@pQxMOS_+3>ih0a#+mNh_{RlfT!!`Su>I3to2(Yc8N9h)@;;q& z;n+aexwnQNJ$G$x7rQCF2mRTJJzFSz?E1ef4nKlZLnAHJ<$ ze)53Q>Is+y=f-vCsOYfN+Q-9PSPl1=#l+sJ@Gp{)bP z<$Zsc^w;6aFk12K9CMqYPWi6mdz+7)aOTdY>&oW?Mlw22O!(S+*2Pf!&QmrO+y1%p zh3=~4iy>F1#7&;(+Hi_BTDJB8(_DYoviZrodw=`5=Ger`^=1}t9kOO@d30CcrOf>j zz2!s9^WuZ6h?G(In?IGocQ$B` z*QWU0bN3JIakkf*7jJ6!uj;jrQzwkBJIZl&tuNr(Z~n3=KX)Co!$f}j7oiKN%g{e~v$M?}$D&2SN=#xHmgN~nimSIzV{CH+> zhi=^l)o;Pj*%A7Cz z@+$iExT9zP+MOL&erN32CqzPzf=dbG74KF^X6ny%ym$Y>##ry7yr8`6u`j(2_DQJx zc;A)^VRhl~ql5quzFY=Z$nez$g^W5d!k5d?Q@HAinnqKkDpL8wA+A)NH~4Yz$WKBa zW8ou;4gutN^wxH)jzCngx*m;Ico5Cb7yidXxZK`DsUh-zAEFAh>2wM{?>~g+ZwEj` z(@ZpA4t)F&XY&`JuD$4^5I!KX<}XAqZLV1rSQi7#?52o$n{3RtvTjqBF5=#>4#b34 zdrGILT?})2T6NTJtupyVzE8;QoWhC`quEntk2BqR?HKRm^Ze+u&x~)a7BOXy3!S!YewBzwf5|tdIWv2zo68sO z+vQ{4+f+1uo@i6F$K`{Kl~v5G!H*)UEpB9+uyyB*4fUGgXz4a+MmG`b#Mo+Pu*;wo zMSEunGS=^L=*Taeb9iOl!;2@9Z!Yzic;Nl$z`M-Yv5_I)%s#wac1c_{d#2cjm)Ed( z?ye>A_Vqp--(HE(*?ps`+ zw2O_sI%v%xb`w+!Je_*L@q3_N(ht zW`RjW{lMTAmcMOXJkRf=xiQAvDIaoeQMZb@QPz=x3ir}8t34W)-!qWu?5@ALI$&k( z#?R(?7KwS+j&^-MIdU1TxUg!`HRCRq$uns~cJqXMMsZAkv%dLtwjuvCTri*ZiA+A{Q@hdKcb@gg1uLR# zmSjgvcQER3`oy#TH(qu%NL#JH&uxd(vX9m4@2$VJbWvVDPw#u_%O%s=s#&I;m+y@7o-kwVge#7NULA9b*>rh8(e3kJ z4Ufb~yU;fB7raO|))#cx-yO{I$M81=&l5 zCZ#J3B87jhsrQ|h`z(N$7CUFmxIf<}&Mhgs>Ez_ddCdvG9q)K^mW@-yyR1VdByrjUAkd zddLq-JXaiB$s2q9t^CC>-5q7#Jq;z{;ZqL;n)f-rbVy`g;l+36DSpW&bu+g{FW&QX z-My_xSN5o0eC)`Pxn2pE#%)U~y<@qja_5;%vcM4mtLLuu^zPqLV$SgOch4&|W-jzM z8#$Ge*Z5W0{^ElQ{eymhC()=me zYxerQICe5FC9wX8+7Q$NnzXE><7W>e~!!;2~+lq>r^ zJ|e_VFFx6QOVp>CVR6S5I-wr-?hdVR74@hOa#@_c;{_f!mZZ&2TzBfx1OuHxa_h;v z#DnB|Z;U4`5)51Fo|4!jZeP)KQPuK?Rxv@tC1J{uca#Y^BcAz$-;Fr&YT|pZ)rUO) zG}g_YQyG}}c28*ZwDZRF--EwpmB#%cOY|)`w))-r+}R_JOF9iWv}EDBQXh|LRsEMO zV4GPfbvw^7S~YdRgxZq}miKe}t=2$q%!wP)S@&O@ul!bRd~k-IbZ}8OuM)j{Ztjk7 z>}LML0hcVNFy}2A9)3I|i0R?hSH8BWFMXU-O5J{je)78Raw2HEc2`SCDL<9X8o0_?36cyPh#EoO|i!&4euAYdPS_o>*ZpnQiiV_r z4u7V&8}>c4?sMFu+6wZ{MZ0f-(;MsA8>6X@)y0&|!^l^PKQS>>O(fe5b zT*CSr>-yG;or@p7FEVR*|996^?!GgOy-uZ5J+B;IXF6r*#4BTnAuFCOoL=K3IRN$X=H$Ce~T4|2j5&$oAQOJ)W-dW9l7F^xwJX>(#GQPtF>^%gZ;v;Cka2 zLpS;Rv~&C(U3&!SH5l#~v*bp~e$Rvs-RqsVy?Nig;%NDwQ(P|ZG^-g|y47h?)vGCa zYqKmY>;|8#iN1jKcfBKl5{9I?oU-j}`xow7wbt!{o#N=noVYiMPp+qwI&JhQIIcf& zpX=8rOD24c8o#eU_xF7xO4uuP16;c2mHRP5{ruAdNzT&Y<=;xzb_UUTlS zVRie>hl7@zr~j&JXX!Y8qM1`aJ;Oks_K^uW%O`f8GR5(iGm{6E7(K5ybX+kqtYZ1& z!v*(m*`EI-T3meNh5c-+W3!h%d0kznoAHaUBJ|YMw1E~uUX>{qLi%j8y6M|}e%;s4 zjQir^QQvzmN?5z6y7sp5xf7R9H9mYC`giWN$D`v@KQVnhQxh#Vw0}L|_uD2TU3{YM zuUNbP`+bkQgk$C5cizbhgw?Jo&#!DKtCGD-=OmAR*ZASf+}nqrUr)-|_+{rIvmx?5^=kH?+tZk$}rXt;bU>8ifZlXV3yW(!OO~xQbNt4Pk)qd z+M>T8m;GQQdtR@zennX?WL;n69kHL9R^He1m15<#Y<+LD3VY$Rk_pZiUI){JZo`Um zsDN@ggmWlh=C6Qnny{Av*<=K`1v>KoA=eioF>NhBz#!-Z_FC;IZUD#s>+0tD?Z zO*dx%AQ__i)-)!CuT3lYCjdWO`l|3jP5_gOQ63N~Mq(ADA^O@>8VltL4OE$2^!FgB zKL=q#8l?+hHk`|awo}FEr-ZbYrExGw5b?DCG)Nxgh*e)wMt?9vfxe1^N;J(-su01W zbP>wjm*Z)M^n?%4)WYmu>`(#tRg^jjvza>=+Ht( znot#^)VD@};LH7~A8H2ej-fqOsHnfY0bl8dDpSbC7{gLk-?oRL%Iy2$10rjvd6VsC;XmPy(Yf@7(wPai8aY;UVPgw$}cwve#aFlX*JJGCXP2YmUl} z)EA0HJh@n^N+Ase;s{KDN8th$sWXn{HBm(xa@Ybv7@HF*Q=!IYViAT*crq~;$7FK0 zRIXx?rY*{F2^5)V6yYK+l(dBs8Cxh3;F!7q^^!~3JP{_vWeNcrs2vnZ#4@==D(2ua z8LH;1SPHTOFG3`i;#jm;8i`A>a5hh%fKI@0Tg0#|R)#5L>;4O|P@v4{aBX`qn7lT0pz zY9k>#29x2l0Bwm$Se^dK8&21SR0Z-B(Tp2WyOBJ|AEgr=d zC_v6o6V4XMG^-jwg&KDk7Yzm(GM-R|Nx)KMa$Ew{8c;Amd4viHRfDJv8sI`P)xZ!! z9TR~bK^4Ontdci{9fgmU%W)Af0pR;h91C($^T)trB$%8Xf!YehQZ6PA$3zMt0#oMN z*oFtbg~vt$`6p3oViKkhaR?X(y1BVHY0eGjM&L4x%ah5ayf6h~36TdM)j$wQh9O{! z{@Q39=o~N;s{yVMyiWuhL8M6nkdrHaOM<)o4nJCqf2{RH=C;RBvpY$P3kASYv}3Ax)Ff96>AtC&Z>KPoxkCG{hsv zVZBi6KB{Sfo{WQw*ixPt2&)x!f>4en zuI|yw9vVv-A~8>fcy@7eYchw%$~i4cLRyZ=;IJX~0fxlUAnRYPfoLdD^G4SHt{K&| zak+GJ8rYItBTtB|7e&<ndUW{r}8W{=eWUrGgf2 zl5&_KZ6JZ7Emc4=O54wGBIH)IVF#`vt$(@qc8qdSL6QdL4Ahw*l-g21qXZU8(WH>( znn@j$;R#of23%PBzzuL!q?SOc@*Yw3`Jl3rs3qGqq!>`l3!4dC3K<6&v!g~a+O{-g z{bVX;8`4~(4i+1AE{8Ni&D1$MXr)COpyvpQ0v>2&J8J6#4^(18-AcuztIJWN7UeXu zOGDlXZM76qNC#@x18B|Tg~N`HD+P^~HjM`uwrCG@%o~!Au&w71>Kq&M0%@ksKx!2F z&U`_(Q9q+qSw)(|7EZ*01-=;pNNTAwnQU3Chy!?Ah{I$EH)`LY&Nu?#EU_1rd<14u zJ30VOD-P8<3MUrz~Vj%1AtH^{LEBj{1YMp;0z8rWYN1OZVN8`^ z%|?JS!=x5Z28cz&J5E|?rm?Bkh`n|V$s;Tba!4pWhcdlDNe||XNPy9)KS2c7fEXa8 z5UNwpq&C0@Y>4n^1SC+gY-o*Ao1k?tCp6%A$n5KoZD0!wiMI$OS(Lapkcb654iB>C zAXhDR6clxV)UXprAXacCJWiwl*Ziy^JA;2AV!0@^#blAN;g+L4v4Aa;sWQ=cVG2-g zNEJXDT^mg;A;!bQ|Js7=-aI@tYnhWoB1XSW zG>n-LQ$lB7UJOs9F?J$p9t25NoQ2DgIyF|4jOGy=80N-8Zj{GC>fXq>si(TEY2KNt zMF?Vj4C9J97^Y&$l2!hwqz#6FM}cWUA!*t?HpJ`#nMx-2NBxNO#`eS#V(GssOlr~H zj#xtWY$nngajY@727U?yw+~i=sfZ}j1SygL3kR2n{09;>l$v5fl$t_nKvf~|qR6(O z$CM6WjS67k0WLETX+sjs*^a5lq%rlGX3TaT5~aS>LQYfaC(^>{@!G%B=V&XnrM;Be zN?JTUUMpTZQHM@T)=Ab)riE*B>0GUa26Ob}biP(&z0y!=tTc$%Q?^mk<8|V7m3m4e zu6DW!ou<^13Y1Ol&^TOL08M4bBKF!bb$|mMrY>;cqNGVJU^ESSN;;PoX&`0rU<`~V zf{~S4iSUc5M_?tX$o3c>gWR!tZNq`vXI=l%~s$vj9LX`BN_}F+=R@jQLCCOTNA_@ zSp#o^&az|_skU%p#f7wA!!vOjTp3b6v>L;)qS22y}fSi!S zfs@?`fjA5fjgVtVAyQ>12BZdpn31(eMO+21WC9C-3{F!qbj}2t4J#RoMI=IqgUjKZ zsDUEbdsC4RxlgnRKv5SsLyqeU)q0W|7|&h+pwk<<9#Qux~NqXRAi z4BQT)Z#LMCE@fOQQm=tNsg3B6CZ1!xMo*p)hNLwH88Q3S)*go>i3iPmrb30j&1 zYv2Re5w=Yr(Lv4WOl?@{y1`0kMgc_Q(-O4;XlNacrzHYx)XE5IPaGo+nj#j$HkooC z;KXd18$9&TDq##OgmKd`Kk~9aM}hB(6d-~6lc}MGAu3jji4(S(vv&MXX5B=cQWA8L zflD2nHS2{oOj#S2_jVqPrYS=(k*Ez*21{2j_2{W_+7z4o(ZDizzkI@$)S)yTrhkGe zWp2}$eG|cdm_2_ujMr>{lrDf*g$;Vs2!Bxn#KOM@q|Yg z9k>0(S^_*Qn>SMQ=mbbj~gOgSu}^ z-C(uw1!y46DUbS3ru?_lLjZL9MGH8}S4S|7URCWdw7`0-zoz>fI$z_tMGCpOnm-w1mvSpQ>%TLqjpDI9uW7BwqeyOwzU*ZPLW*M#N&z!qCg z8v$b+&65+DLc)chN~R)WAwB^~@i>s%BE(!+6`HU?K`I^gsa-r^mmCfXr$qZ?+VAv6 zZ$z555)_2*iy)Xr_eP{VF105G!^}oFUH8zUflisiHM|JwR$?Sv5P|gKFP8A#&<8>} z^rz(#j`k3G4dFiQ3y68d0wRe>#??_@JW;^!h+wlpZ;t3~0KBn?VkL2bMcm@IM@2s9 zEdbt#LKbn8-v)iU%;KlRUlxA@dWUV!EBDC>O#I6ZKxZb zVc((^-XD^6Ogf4I?U>pmeHiSbSX5L_y)nVU!$@8AeN3h{BoDM2p4FY_ zLTBoi>f9DG7;piIi-p0>#^`?OVg6K20GJ`%TtK&;LGmC^Qr|bwb;uC+L4QNcSEAJfy`qIK z{0jOpd4|+A>v_K!+Xc?sPak)>{vk8$>8AKg0}kEj6gzL*&_yr$bxe7|Y4>g66^C;d zYEI5{C^TC!NK0yHzN4&Qifd_Ym!erU82#%|t-HZV9G!DjOApk1xP_lt%+ftVPI~<& zrB_}+ZSuhS*)h`lvl2pr(vq&U5B_1;%Q50g-$#3Iu3G%^@zy!DQFDxkjJndL-LP!) zk&Bmh?ehKdp-j;Qo6(XkJ+JrfI&M>r*@F2F^_KKK%imrtd~UJ-^W(<_@sG~z`Ih}= z#htH%a*x06H!SzOo3Kv^XP;M1_ejVim~9C?Q@m|qq)J*mV_=8-Zw4-%X{}n|ZWs0E zC)~3C50ALB$4uu&1`XTWe{e~LnP1JU=%lHxLr)nv#&~RB@y=#P@QyxvHy%Ghzte8P zoyE&^A8wj4ere2t&VvdDn|+wvahmj9m*?$wV-r7_YcWhgxm+kxT7gVVN# z&HUn+TD)i!eOh!EJy!C)!obe6mb1@~zp<&*bjrGO+srDY#;4bg?Ge{?w%hfh+&hOW zh4Vw_KYQ&p;zZi;g5-rxzGZt1vB;iV3#vW#bY5`ZHmuvYqP_A{CE-?+hfLmf{qUBV zGG6Z9jGUkkf3lX9)OyMo)$d(4mE_(KT+vUtC(Fq54f9(VcgO9LYr)>otKs&A{EMsZ zRhM=w*t>Yuq1fM_TzuSpZNd6yW6paX`rgezR8{+Zyl;=7n32(CXT%dTy48I(J#{@{ z@;lYLi0}J!tq*jv7?eR`HdzPfo+;mAy(RGdoB&&cPugR>AG3Vj11AgCd|WWzNULB} z&ci!y*&S%aQ^Ar!1D1Dp+R4iEHLGd9a`F=gg+(IqYO4054l_2)>mg8z39WT zN=J@KmTJSb4PAOM7GNi`zt+%r)#YPsA8DzkY#6-x*!*hiXxF{wH%>p#tely8d%th@ zYVW>U0q$=)UCnufd)4b~H%>Q5WYE25^|#1fQnd2zsXf#8tv56ZJrVd=cDd)};a&Xh zr*(Dzv_iQ}f4Za3w_X18ZPq^?DnI1>#AWVk?A-aziGDX844n7jOr**7<>u!W=eWja z1vt&DjeGnmow3B*iF}(+kBIZA{KC{-k9pqV%*YZ<==I1b>XzyDNY3n`>%#b%cf6j| zU(9?I6L?}(*QCVW9_iOI_Z&GB@+cxh@N|{J`_PIj=@)x>zACx5v+LNESAt{v4d>Y7 zSHB)wwe)aUX5y~omshu3e&^ABW-0C*OvZ~l4DRv1c&mN7-MmvdzV7jFv~sYQiFMz0 zx*m|MPv?Cd>A$%Rt=r+bTOPJQ(j_QIw*yBj&O76JnM3WPyVFAMJJdM`LN zFfoMTv%xViZm3yQhz>SmN!zZf{Ua)dznJmpdmjDaFx^7|mC+0LmB!vQ8GPrY;F-O@ zi>FTUIvt6{myYC`zQZQX4PEnI=sR>-e*P8HwS^x`-qf~NIgGK=+YmPQ`&32x*XPBq zRc=Eo>s%Qf56_!X`K-&XP_9+_*O&;)5oau}ru~p!nA3OQsH4-R=Tw}U&h@O_-5wme zwN1OjuKVjIY_6zpKP3cLhAAALO)lH!ku-;V$9S-1z$&dNs`r)$@cg+OeXF#cJkxHB z<~_k6cabrhnmnQt)t(6$?Cu5Xrmr*Lgnd((|a$s=ZO} zs1^2aOSW1q*m$7BvwTMP5O4Q)pJ&x2WS=i8)4LzK`DDo|uGNF0E7!DlYcKyp-sP5S zk$>K|Yroa;+u!$B<;~I`yxX)4AC-UD@ociW!8*?X`=0bg0}n2fcQaVHtNuY&Z1tX9 zyXTx(QNMlodL2)uU-h}L+O_=oGv_Yxix0BxTXsH2=YFF5f)TR1xB&0Z>Ej09ypu{B zd3)%Bq!;HN1Rd#L-1AHI4!(Yzv2}4pd@0T0bWKPtUg?%-TAEh&up*3OEpd&d8eCqz#zWaUphun}sor90A zH491IGJnRtkE)Z;$G(&9p_kphv5B@m)vtDIwmA9l!DW@gqUqmis`pf{b$YXF)ald9 z+f6>7#qM+Ywa>|$hK{?}_s%Wb^R?^SAK^thW0vN-7nUsPbLn%!tQ+(pR)&vQ`$aax zH;72#nQrATBTqUmx!N)6@T0WUO{+uNbt@h{OZ0YQJ#9qFy;Z7=GVM-JlKFAfmtH*( z{l4kb)y(~A&K|F~(8g82oYc|!VGpAM#jN*No`pR$>6KWzTy`^R;>hABj}|+`TOPae zBjIp$@6v^57Ykl1H~A`eA3v=4;B8hqVEWNMHQrtcQ@QVXTJs8Om@A&tRM@>+2X2NW_ejrvZ%HcK>@Gj{8{-Z{EQEZkEL$f$zAJuyWo#{g8^5NM z)*Rjwu)5eK)cLTTdu+k!s?VuMqpFs+t+6?}Y>uN|U+LCR?=5daD(jEEFFHB{&p23e zIoh>%S6$AKK|yJ^et+|(yMNTqrJTiHcOpjKWhf1QbF!@{Ev`eNX%ESUmLUD64N7s+hB#98g3Gp&ygxO~rEX1_g>Rej$XN>P-Xx z5`5%eMQ^3>#-u}-cmqBCI&MWx)B&~*1lOk#4gR11rprjG%jxjCo)ofJB``B@m!7|7u#mP$X@nLI^c6+;rRX8_G?dU8tVOF z#e%GP(K}8bmM6xCj(xs=&kT9r>4zS5Egc)YYxQZJm0rsPj>$Hr@l|W8^gkZYU-doH zq~7gf`Ix~|yz9nKaUL+9HQnI-7A;nSYCLXM|7t&jwX0(9iq7p8ZrnLVcl*V_yi3P+ z^mlkrPIkO@boKamv$CDqb;}72R;FNqr?fE}HnUETXX))K zpI2$uJ$`1c-PwZFqF(!SQazU~9YwPL)s*Ml9GY}@`bX0b^@;TFm$ib= zUVY@cAa8AqnKovZzo#rScSrD|_4&tlzT0N&&N$G=WLo*kD?X`buMB@zwD{V~ee=uM zJ7%3`3t)@kUh)J;bA7YAd8o%=8Ab9;J%)XOV>fvj_I)tTGFyAK*t zU%T!Jan$a+{Z+s2UuJZnkDX@OA@lx1!(i+DPDZ@lI}Z zBkvf;Cj9W|=5VaN3v1zv2PRuDCx=#_3Sdkh-y`RTVK`@aFJg}A?p~%lXCE1Q zprmB<7++Nanf~l@-3_IA@6Xd7dQIyiFg|*;Lb1>+a=>)2cZ1%@(ydp2UW#!-;@yh- z{W0NdZPbFuQKp**RxPP75pS;z_2?B(9(%W+6T}T4=RCu=*z&7n-3Q|r$Lt5^A0 z4prTAEXj(m&EsvWy}5*b{r$S}B}48?hK%I-=Z_kv@A9OBYhu~GwugpioZ0eZ?U_U4 zxn;{l`-HzAdAfybeD#=b`IV^3vbnFP)EDa7mKcm(>SR}bOv*_cbM(&d=5e%dqlEH9 zRvp^yGUJS0XuV&*G9{+i^|D3Ria32+y-7XC*6YR87EC!5zrffkN5Avj2;E5wMs`^> z#h+gOqwcphc&|R0oqEmt(_>v!+ruM$bk?^yELkWR8WB|zlWQF?(Wd|EgW~q#bkk3N z?$|OtcX4kkNy3GW-Mxl=jzM|PTCX7Rag)v5*U z*Z$D?JvGyEm`J{KM_T{;PqS{*74ruk8WoC}=l-F(A+CP-(sLXA!yvz?%YQm7%p0($ z>V9Z~>B-O`@_~k`8U#CnSF|lh_PWRypwd5;#b|w~M_Mgh~ zy|3h5x%G!nl4Iw&p<#iv1v1Z#i=7r3*qWa2TskI9xy8OHxaOXOh+n5y@_pB_^`{<| zEw!oBNhwf%TH+k_M{Js?$Bl(iQwz?;y!jxzy)HcV>*AduTdQ37wB4U09*lMjmp|=u z!sI|(htN%fZhm&l{p@<6KQr~ynTmw@#u2X4p2eB>g893PvyNsx`G6fBKgXhEOdWgs z0i*U$W@p@d`f>J^YqAALtj{qRYu-N^+|KP=Ilnq-`NMFBq(wfDOnvXI)=Et%ht>6pcDWTsk6!iNO)GK8gtlVF+(Fra*w(zb^5S`UiN_wi-RZ0ww|G|J zlMmneFAX*`zczE{q2)V-DA@zI^v)!sGT8-D`*kg`=iUT9mQSYd#P6J$K$u zuiM#?%xt~$IyW~BCfIw^wvYH?G}4XvjF=l8HY{Xq&S}#U+G2b-ndj?MU~C=Iw*HcB z;y~`eEy*93b+;KcX*IJzvOMkk_?`5-`jy9KjAouU=pOsfwb;{Qc;U41i{zWS2Mg>b zzZ|}%`0$Tw(~rM8`fT~tlt*2I9LC+@T`#;wn=Ui&P3@**7pV8 z)yv%Whve1E`RyXs9L_&&yTK*X?0Q*1WQPi;mtHs1p7+OQzQqOf2r3*d+Y2eQN;p>^V|HrX zja#k@wQkx~?Ob)*VuYcfWKD-;iw*R|38lj)FQ00YTX2nCRnya^=uY382lsz`ZuolE z1>et$Kh`eISZ)3Bv(K$5%LYB{=e_h+(%}fcQ=cP)R~o)_C>~_lHpc#hI4ZzvL%M6y zur5|sxjL2iP6T~;_^@{UQhdME|~b$7J@1yd=427OYH)IR3HU*t@e?T{5&8 zp8B`Q+%zb-?4jG z^^nY)_;H+xXAHX^%l=E7E04{S-Eywy?yYyjR(){=dOfe z5!|t%V{lkOGMewOq9ZPJWh)NRf=eCLzci!EGW-D$Z!$N+iARg0*p~NXnl8ss=db9- zqY!=Z1g4@+GN>V194e^qKEc1Y%bL#<{yJYcO^}5(009?kF}Ya%g%)s!icqi>q(1uU z26PETN$ROD^uU)Syiso=NCUW5!$G%Eq3q%+^jb|?$RfBH+*0PLFRH1p2hW46>+1hG zN8cBrK>s+2zDxpTwlUNm-C0&KyOBl$I0tOJILjhB!gQ~9+fWx8sQ>W;hD+V47ezs{VpOk&*-^JO(0@9kF9R`zsj9yW0lF@X Yd*r>#ayDrO|J;kd45ay%hf?A6Kk00n`}zll`-E)^<=-7Duu_6Iu*t`FV?i1HZ#`2Ze-C$^SldQ~1uUK4PI# zD+T^jqhYAD1D^wbVyKK;=u}k)!Ik{HQ6^M&>C&b4e|fKe-W>SCL#KHqTl0zY@oC1L zWTlseFj{}Jpm#6eNJ9rzoGS~L)kaD7`%F6FPZ{Us!*SrgZYEyqZqjpNndG9{0InKY z1$I9d5Ls6v65QQI7JRItcz-IWy%3_`gJbDZrGxO-GzvDjd4idL0yX`y7VJ+KQ$2QRG_4);`NltVx7Pl{Lg=yad8;GH9-{6Eay{`p$7OcsgjpZW{?G6dtEjBsbvK z4|yd2wGcT}H%99u^e`&qBS||lL^3-MkpPhdqZ%SJ}4T?)4v(=t&R69P&i>7Hp|viPpji>nvycU(N=`~`PXPM-zc*)GYBK{ zC&W4G}*RX=}?TOAuo7EWk~@|Jl-@Zk$oo699%4}YScj!&oOKkg#?Z6~7m zr`hOdTSa3`hN#e+LDp)^9qKD>4SE|i=&#;Mj8FYCY&tg!3N8tuZVqJ>d^%a(*Gtf~ z#+s=Y&Y%*ZUaWFP7*W31$p}OxV6u2JN!e6`pNrzb@xC*WZ{LTOb%L!gbk!j>B?m7r zj%S#r0&FTT2VHk-e75oioj$Gx+>drJ^0AZr*y+L5xn;`SQqN`jb}ppLm#}1oum)&( z1NF52P9nxF>E94PQYUo^I=5T{`REZUv!E0vPs=5T7TltC+Tu*~@FLu3e~-%i^rt=I z*`&3qm`-zVB>iT^n6pL|r{5Pqwbo3W(PvCHR+p1%yCFJrVH|n=%nKqXWsx6qrxG(0 zDI7EIB32m}$m{NQP%qg>TIRNsv-heXe(E#$T$@U_Ef%69t@|-(Ml-yfJ4QFS<)A^+ zKKNnynAv#Z9z7u~hn2$CpyPHNOa9)ZO_Br5E&o>HlBkb$G4AkW;zaD9;fTp|DruR| zZH`8vGWDpR1tC@1^!vW^pj5@d&WFSNTNVvAY@bmIR{g?2jWx6D*V@>&euBS}^yp4@Nia!}mH@NcY+Ooaz&u z)bg1p621zou5Ccoa6qqX>+qtgAiis*Fwn7&+*X&P7j`~`Cn7rFCi9u3NZz5jZ7ZN{ zStxd%8KNVaI#@0jj`pT&@#ow)+*`C3@}F*JkLa|M@~4MjV6PZl=}TlHyXHgt#!ULK z;~o`UoP?F8jZ|A~3#<&&Cb1V@g5zX0{4`q!TE+uFwXF>8MTbF28OXBCJ9KuXGo}SC z1=umy;^Vm`aA)dsy5@)w`iu#}%LzBY@MjwG`D`WBFOlAoM^e@x1#?Qk3{&z)1DsFHAe?p4_(i&z`DL&N z&IPIB$2Wm!r5cHLk0-XW9eK>2*HZM-*F)4Sv4OC3bhpzF?#!vgD zVAMJvMQt*$x#10SZGI28qw_i}dbo`1I#Cl{_J!fq=~+bg+&+3NdmFevi^BHnPSko; zH`;AXhDWl0sl3B$63kIVdAqMT^raZA0#D%g2nmk&&J4_}{ElxX#zIk|BK8ZI6Q_P5 zxVK9lUg%80yiOrT{Lxj`;rS*6K&-Bmp)2QSHbsfrl4%=11%jdNT5*x z2E>1YNv9*QN-Bf~Eb+q1tYW$#%7;C@*^@ZGDaO{GOgO1D55`(mP{Fd7p>6Jvc3A=s z>5t#in3kc8(e^E{MC2^2Q#gZ_npLVro~i5_Ic1tkQ*jWYYd# zdj9t$^!4mz|6mIHeSsEt!4pF|bXFN_=cG`_l7rY8Z-y5hXpkAL>p&oHK4@Cc$0(<7 zbhO|Eed9vNilSle=KHGV9PK3PvHmn|k$yv7Uz&ttipfCiR%6_&_po349QEAvhwSUp zgsj#kQh59r8ToU75|i1`u+t9C8>B=1QGS$?JVTD8rL$)bUZb0I+8MqrE)ZdSkvUN! zYO!4N4wxOSVc#i^v*vl7=oLIUn46sk==BnV8eeRH^H;L%U8gR;#w) zwCf&Nmhg$OjUDH7L|;dbqg;~LkiZ`2Q=xTXqr}+tDE77cK>uDn;-@?lcaRYHzPJK) zCv79%R}QiD&Fz@CP84OfM+0A(5Vbs#kKM~O(Q4u#owp|i3kHJWaaI;-_q$EqIWK8r zYc)M`P5`og>%z661{z;l1vk$u04B-@o_3`(GBSU$Vmut?4E_d_MP;OkD*)D)E3xT= z5H-JO2z)#8@FinMp3Hv7Ja9>*(gU`*GAj=Gb}hz}vyNhD>{Kf2G>=-3_`&OdnfO*e z2S1rt(#ENWITw9uh+JtZowp<%(v5WK?$goWl`lt^1E=hfVoMeY~lUdR^jMNP`@J{6*xg>^8*}Q%D-~-^Sh{30^xv*hS7mH2n z7+HnAke6M}jD1eS^n}Hz_gNi|{PKd8Y1T}Xj#)OfHQl6T0F8SF1kX8aU3 zU}zN>r+=h}73}E6oX7P1pA&)4f(s|6kOLm;c6Xp)>r%{QeOlGyQlf zWR_c~{C`x4LVW0Kzp2SKF=D3!OdV&4H0#vuH07$bm>j!#*UVyk#B_>cu9-u>p4rmt z*PF%HWtmkNUTPY**=@FFW29-5?o=~{POe$zOogU~X$8#>M7A`i#Q2(d?%6^4d)Cr(H0%k&f5#kcFc}5-tb+fsqkd7S?HYb(79eK z75;xq*uF6^!sq{$VG942GVed7%y$b_{2xK3#4F|>N%U>WYtH5cG1TRp6BP8T;+&sm zbkZ+BaG-oRcF_awALyb-{hUx+KaIY%u)*_f{jh6ib@7VC_mNXjpCuUY}Hee|9MqS+IZ(Ok_#3>?3+KWt2=C{->85? z-UHn3nlwDHMjJ*37Z7=q9(D_V5Uie1hfZzcRA`n4Duqaq$c=80GvixJb=)P|{dzHN zoYVjnZKA|iEgAQ0mV?5uGMvdDh~K&)?!~)!L zosy$WJy<_o%IdB!q8+XYv}DmCQX+JY)b8Ac8S=s~X|xMI2Aw7|?R9wPI>!W_HiQJh zL^`(MG--%jif&a$K=Yw11g9Ke_L^#uk+IX9ooz=XzKbn$j8JUcfX$FvG);q?8iu}M6;XvSsyMHzJY zzz01(SIOjsSJ}G}L1->J3+gJjLGgsF7PabC?AGKT#FiZeyXW7DM#yp^FfE=Kf^lJ?BPNyDprUa!K9iaV1+K<4Wp)Sk|4`3ST9Aa- z4&I^BJh^bSU55A+*@2O2A__RP;)MG%aN)WTBrh`PgzqwRvbR1eg_bbAKc3PbS`5xs zkcT|+N%W1zW+E>Xi=$hnw!W53qpKsW>4zh~n5>td$xpjR?A%-cXXn)6_ABF{I;09I zlWwqiQm>gQXQ~*Ls~lXYdJW=Uo1w^41xS(`p$V&gaGPT$GSU8kAh{F13*E!4V|Qs) zNj6^DeG07vPm*;VK#h7NuvfneRi7Fn zQER1CIyeX~iQlS`HYzd^Dti-m?^GJNG?Rj}X9K=U5nNQN%|Qz>|4lROrQ9TDeLa=D#|GKI3MDD4Wq1 zFAm5{@lz)iE2iV??3UkK^H3(t7w?@^M?F>wqP9rF&DPDhYuj>QVz%O?`_E{6$vq6n zkcSYNB2a2IgGZYWFx{cQ$fH99%sabL;m=Bj-i8+0+j=y5Mjtt4Xo-CTI_U5JhVDL? zPsDq!Q*)bM;u@p|*~xu)Q9PR2dAk|%)Jt%2!cF!ou?59J145sApeTjePaavuRB?)JQ?rl`=ES>HMJ77#q)}}cpyd!biPJoR>%vg7MO=g(H{7{Up`|FIU0j5Jh=_G8g;}G{@WAylQk-S)-4_jM) zg4tVZJi1sO&fbXzHH~XzPj(6Id;gSJGpgi)V?50e)`r{tCD=S=6I1B75k9Y#B!&AH z(~39^uvN6cNf)QVTN7XM)TNL5tMS&D=@+nDMUEK0Rs{NRguYzQ2dmPjL5}q|x;c5A zSz2a@A2;k{&eu*MXWG}J-uGPaS>%F(UN_*%)hd`Tn?wo**1^n*9%4Gz9T%Hruy&76 zfaZeZc=X!=$aCKZ6pBJ!0i%2`!Q$WoB4+<+!{rPPbgq?wkYbEZAaS&gv#m(6BGW8L`)}wnw1`- z4-Psqc8SN(QehU9WmV!r2M0V`A4NqZ*W#WXz96(s2+!O|f_aO0GOq0aQFl?G5;6kt zc8WdJ44hz!Tqd^`dj^uQje9|S*Br2MnuAHkA4!vWFzMR%lPG_T1OB~Zw05@$Jp0Ly z8<)otkNj^`-|GkaX}><4(sIR->6##Xzl@^i8FZNzfa`?^sg~aZ8s~a|c`(>O=NbiJ zfQ>wTwc!#DPK(7~&(6Tb&!<`a>=+c!O{Xd`^4M0L3$NWWV9(MR$Q8K8{+;-Vc9q-1 zqVpG^J}U&KXXoPKi!X`))Jih8<}6t!6^TvzVuAZq6=Y?Wf=*u?I1+7Ge=8Q=U(X<0 z($g`zEQeKUR0dVH8FuabffoK2e{}I7wl8jx=avHEn!j zi9@wAt)mU@7>L${W2FV+~6M-?TwJFo~^e|cePst)utO~9|dHKb&o0P}0L z7EIKijcJ3Ul-UqX`e*y3cGrd>Tgr%gKa@H1MW|FuI_Q zW-pb7fQjeu_mRce-B1a*dM0_`FN*4mh4I0s=Ww#EoaFJ4--%y?+)ke}ELuFk(KHES z<|IN4F3ZEwnabqLQ*}mCFB0r}VqwLNPLj6%E@xh6dm{U0vSR})gPc+)C&S)M3N507R9%BRuMXBWtG zBS$jZtdGuo;|mM6ouGyy{-ozu1zq=^Ql;*XxT)zaBh^eAjzs+M;~9`z>^RC@FDp%SS|D<_AaJOX0a)`px9&nUX?)8`8=2rtpX0k$}qNW zI_Bo7!H(2rXeKia;&$hNNsAAhO*%~rFG!IUIZJ7lPAOb&P=nUiVZt|jfGC_UV1HkT z#gzp=sPGqY$aITht+a1I-&H_gqnp$;HkpQqO(RbXC&Ba+0*t3aBF$9ShCm%x+$h^g z9}9@Wpr#}dbhN_!$_mcq^94}l)4_V|QpRbj3M6PwAtp?)$Q0+TbzC5;+FDy1UWW@f5kZX$7qQd!Hz8If?I? z58s}I1TT9!D=!fx3zX?O?W-uQ!v~kfMaXi_dUATlCe)(c5GC`RuJGA_D|4;LwH#Zb za!Zk%R9}QEzg)oR*ca^b^+sr36a!C{`tepo7k&|*iI)5uz_>{nO;u`O!;~^$Z#{$; zRwXd%X3Cxj}VD!!T#CUG4LO#$ zO8*e;B`c{*NG6#zG6(Iy$%4z-P3+#gV?^R;HP@N*o_YqXVGrMKWsILJMWyf}vYIFP zGEbyXXTwDDs@{-BVIGkm973lTtKsg-DYSL07-0QX2)$@c(sQ+-(!CM+0&mg9aXO&m z;EkH>7N}xol3TyGlNHh#aQIOo|Y1*jn25uOc>fT z6WP1Zk;=bnLiImtLd`8y{lA5g#y`U7{1v;FKdCw`Z&q8iK)qc{ z*SAIH-i}O5q^_;Ga=3=M_3p#X!=4r`QG*9+5F@OBl)|?!-Huu+C)Y6-2&@y;b zw`IC8r)9H-wRxy!c&L`w|Kd#P|LaU?|67F4e~Qp`3)TCli2ry@`u|8GNj|;_m*zs- zS0@@ZaXYlhSCYs_{7@$y%Vry_r`vK@z&##8&OIbgw2QXEW4#D8USfa`mF0M3)eEzl zGwF-NiqPNBYlTJkl4ngksvo)sWDexQlxH(QID9c;`g8JeJs*zm8^)zWYw!#|Li(y1 z?A1SUbXT+{^*>m_Q`_q(XSM`}{_RDstOG3Rd%!k5^rMdQFS+S_4~T5`TjHd$9cmVu z;MJ$b@KBSFjNaMIhW)4pkH1aK8liLagS7|5@|N)Xm##97Q;gv7r6k<@cmepWdPgQ4 zorOh`6Lg?NTG+2CqXcT$l!8hN@$`@H+o9jhQT*m01mb&@T zJqvD;$8r~2Jl^idSvMuY`|D~lk11z6n$$?_tF8E8k_>p-$3^ZbH$Pxp2j>eWd4&)TSF1=xI zKnC~n$0+`<_gK7(vu=`RI zUGX*%^86COv;P^LXykzNR~*2FvZ;`!SHzmU7QxU20MIkQ)V_IK%% zopG9AeETaY*}H=Ak}bim#WI`;*=wONN(4)7LvXdyHniO4j4@_<_&&%Erp`W31I3q8 zr#ornV$ubAHL-y>6iAYeQ>#(kBLObEtViEB`=Ks%AMtsjLpM4If{vUkv6;+=m3c*s zeZ>KE8<-194_1K8{oVA|(nJVL7AI$77eIfWGz1O(XxZJ6Oqt!ij7fnUoc&o23U9Na z`dTg6mA&Thg-48(l`9AsZ~!Lzz6T>dNB?5dq{DC&naAny#b z?YuU!*3(h=emZ#B=8&8E49e^16T|j6n3vr_eapS6@9iq)+mdax_vHyz$Ycm?^_yvf zojMiQ+dz@oZ zY57Fy{Z(+k=*jwp?}IB5_9VfXps9B>XMFoCQr(;mR!=Vy3HpkL-akzzC_JHG&E7J7 z_Qz=dzUSmqRTBH;)>W`VXB}V#sn4Nq-{B*X`u$X z2~Wod8q?7IyDBXC9YqG-SCP)rW=61Hh=_;WpifNls9AX*p@t=_>kA9gdeV@dbZf@; z@!2F|G!DG~u3)-{@}cK&F8U-^z)a;@Qv7Vl+_yO$1z)67L-$^GujM@|AUi7_*Nj!7On?UYc}kz|pfPx*i)B@@0XFQ8rBPWfIGJOP`aTUb?8i|$*?K9u zrYMqa(IT)y<1rbX(Zf2PN~iBF@~BFx4<7qfiz1su@zTxd(0O_jqfs{r&z@RF7D&$k zC*f!?C6_t7hc05w^8tD?z=D14UIVVjZxV%DF2H=b%BX40ZjDzy$Cf@mO%|wa#_i)G z)Y8KO4m!uvosLCJ#)v6Vs-6cQ3fwU7*A`H;IYLGqcQEo@Hn{ryeRApiO1fK8f!=wO zNhbL2K)c3pSTi8+FYjTXB_b1L19Rivu0`hQCP0dhn53- zn9&30$<3b!2(vB+(l$7gO!sWYx?Ke=BkhQ!i6bo@pb&NZFlDTcvRQX+8Ih^Q?8mK= zP`q4eyxF0=cwV z@WeL?X6&8Ew)l#-rrvI)WSTQ-COP&tkf!Ivkcwhh+-s_}YGmWGS`t z3^Ns^GUas7tt%uh(vSSnQ>B(Y!tld+2Qi-zM^>pg@f?4^_6vn{d*gFZa8#!Q>656f znmfE>B!EiEgPbbBXyQIhN;iR<+Y<0^NDLLN{l+=_(wUj_`6OvcPQroFOxQA+=akZ! z_e*2R zWp#e$t^&{1d*KU*SMicCo8GeGj4d7zO@LO(LTbdfn_Mup#Y)a}te<;~%KW_ty1adV zZKM;hr&Gvgoo{&mkr!0YkU}GWM|@>=iZqoN!!H|aQrIH_EyJIwkWn6Odmsn{(r?M7 zwUpHMXF}rbIEWGbOMXoZhOD?|Ht4SmlF$!Ra7zM557-Ax*eIzrhZtlriUDrp27Lh zBn^FAo^YftRI?{12jIswrg%(474D^cW|nlEgM}Fxs1JH@{oO;N))7R5bIUMDvjb0E z7ef_YZE7;}JU#e#5?n1DBv!6dut#_igw%gydpaiJq?@9+@lX}h71hTcK9`KolmJ$R zJz;ON*6b#~Bk0oKO{z?u;j%RwP+Mye+>^5aI|&Pro{&gFrYAu4E(hvSmc`2lwq1-|LFSPN=2Rg;WjJdPH2(=Rg&_lu*mz4*S zh_(B`@u49~ta(M9b84Z;ISq9#y3yaiF4N^P*|bi)fnMO+Vvzbo4DGne4GSuvK~CFX zuG?W~lq`iKjs9@(@-%2xo{Qf$?I#?UWGY)F2oHGw?OLUF)SS8+K1-EB-=`$HQtJkt zoIL}iizYzplqgIM`%H>8UZGym6DHheKOOxWLsK@Vky8zBBx_?9)-7JoOHFv-Hl`BY zbyz0RrJlU=iAS~ei{#}{2G+}{!SL@0Mlv83bgmvJ6Zyiad6g!|_n`)z+nq!etG_|) zcs|>_c#sW!a-WPok7k$IjnF+Zu~fa-i=_5?;?lO`#PM?{5k8Yme0tW?)Y0|Lo9j(c ziY~#KZL>-F?G$*ob_SXlf1(u&Lz(ej{YP#bhcrCvIVWGt+?Jj}MBNZ(B zSrL|64AS^R7R*=2^;Epj8Z>Xl&`#MQCd;RbdQXjHle#98U(ar$_Y_59XTJz%L)a*z&@F*8Hj_RQI)yY^yrOCN54$ieKAae#YInM?+ zQ3J1XHnlMx1pBv6MyiGIWPh&-88omDeBxFjef<;XhbefKy3@*3laZn*|px&CG!aVyCop@YP&(*QRG zm7&k`d)$vSk30QC>_b6$tx!)|`3E}y2LHM%D7q2mEAVOCIrS|X<8z}=)!SJK^^N;v8x0i~vS>oM851F>( zQ%T+nUC=($Nk7LKVLnVFjY{2^T2M^8R~uq$+9mR3mI#dGEJp2zf}Bu;@KD45%_Y{? z`3Hs={mT)I|HBbX+(J$N8-|$u14GW8*KB^gRI6EFy^q<ZTWW`zn&Y9lIM=&b9m<=&7aeIr zmf!U4)DDwkg_+IGg`Uklf3upGEVpSgUsGwCIX}8NC|R%BdiiIQ<38%m2453RL(RiO zIsdmy{69#9`L`7EpHjdr)Z%}Th~>XX58jR* zMka9U<3~m;G=~;@H*#*;hOr)Xn!MJ1I-ZiQVcr|1g7WHlH0zNjo$HN6PjoieHOxo- z@{h#wG6&sO>}LKP35I3v89-yqQ1zfP`{l7CeryWIv4_TV-`n+c$r>d(y|EMpSDScg2(zMLuFbn?)6}q8LL!5CuTXue|6+7p~+NNb%66Exq>r&#aXa;=t4C& z9wI0A?kC=XvRE(u3cQ>qQ8HZyI}gS%eCyX>nXNpTe8rA^(Eo;xH+13TUOAMkTfr{) z5KH+#Eulv|t%*(SMUZWojz7k-@O8xkxNh~FC^bi+kVIjNsFxt_+}j0CS2&>7@s2KY zJji`{=r?^aF$HEB0$IL24jImBk{@x9#%>9P4=xe(>Fp=<5_15KCqH6Bo@K$9fB;Ae z76QL=DeBDUIl{rCNj`cDkxa!ulDJJd&sMg%+D=vq_dB;I?`#vOAM(|P^lHAMI%+15P&Dt>TvlfOU zC!o;#!(DWJn)nJx8QOQ3YL zAH4OC;Lep9kYH6xcNMy$zg`hu$qXg0TI0}TR|5C@D-E_`rZm1vc*v$6n@CqLn~ZZl z3(z%j*Vs6TPn;;{Se)T2Mmn`9GnAh~2Q@~C^_!av-}S@Tn|PWUT@#1Eq^UforhyLK zyGars{Uo}<>)FR?Sr`+Tj+q~d@KkXzb8+E*PP218InvY0sw?<$W-95zjRFZgmuQE> zg$HnnTRk~rR8BY3EFu-7ib*p9aL|5$Q&DUVKhj=Pjk_sC)pZG3#`#5`_>__OV<}kI zw*s!n>cQMgftb9!1_WIXv!(Uh8OvxLoF6JfF4@h2vo`Xub0dfLstMw+Gd{5JOEKwj z)+fSeH!&emmzc7G_Yf6OO_rbAh>zCn;Lhcn3j>aiv9DQ9wSF5cAWw=Y-2Y2J3 zelp#_X<}mjju4ToZE&>l9jnoFhW<=ki;5Z#asMQ7nEvAsIb0V5f)6&rikd8RFBd@H z=YeoBy`4T1@5cML^wIaRIP-@S!Ad11`9ecpr*}8L8%mEdg$RMBTrO}Gl6+`oJr395VMm+ z7%zveEdXTCq1lM3gl@U zS=?d_Wrs@XzPe>3%%=hNtcZcLvnGMxaSKq2okugSv~tvHG~wqH1Nt+1GoG)#&z31p z!=o<+nUV|6B<-0fp4YTw2O~BZRvSn?dD`hqw>uNjDpeG&Zgn9i>N43c^^N4lUU|~Kg{mSOb!zdzoA7&xe%k0gtBBWyQljv>-+DpjU>{zCGt~wfLZ@}>Jd@5IW0DJfMkvn?f8bZ{T@nQs@2EgI`in&&11yt6&F(+&(KXtLuB>4!>BJ^O;hTpL-P|C z+VrxQY`z)>5_fMgN3`Oxy6qIbq2@p**@(eTRcU-AB8J~(Zqo~;y5LghKrct?;@Q4D zxaOWuFT0xHpYZ*tp{t0|T7C3@);D@D@DQQ#ZruGHzNGY1F~&s?(u29lU>bITT=V{YGx)Tj2Us9-@*?#E4x&uz-68?u`6q zx0=aAdI^QA-A8atH5>!(m4RFJE6OLi$T6wBsp9<3#=u#ukWY(Z(oX&1a zgSK2r^vOvkI+r%FzRY2oV>X%j*iZ*)hGvj+`W=1o$dtOSI!`B?%c1P6^*Dji<_dGl zNLjuhp6W9PT_Znos4*F>-}{jzA*y8DQIDo=aWfZm9OI13%i#9!sq9zXRI+o=RXQM5 zN+QLz;D}jmbGAb+QeA}@(}b5%9B12jNdx`1Ba%@6NljNyo6Dk~K& ziO;57r>f>2=p~j;Bae5}p0n$*V$L2?=HmngHTC3|q$;geUk|ssw297c738}vO15Nt zWpfMWp|ZIGTss@b5!IUkmN$z?zgQw{yrF^t%zmm=_?mbIABMkM8;N+?b6OCw7-S_5 zqeA{69`xs^r?eDddXy%4ua`j`+(huWR3YvXa>7SrcBtn$Y;Jav4{!O~!o*XSP_!q6 zSgW^@ijcq5{X;i)bUI?`?ihCDb2V76euz`AwUA%wK_qj%2##Ih^$Xu$QCAEHn+LV9 zaK%iJZ|;DtM^9q$3Lf(EQ=#=%p|r*=9(+UW@NAY6cqQi1X#-Z+a6Xa5+qpqyx)nV( zT?}j{WTJ|uChE_+K%$5IKy$nj4m~=Eq1pQAuT@OfPe>x}A(R{|I0&N?wnF~Ry=2w5 z#Fhb%MNCo33_LV^7BzZnn2Tl4=&Lb+b&@hDYZyqmrHzmm+zCHptfc1(>S}_$jiF{QR7N?-$pQt>V(~aEb6SVBeBT~joU98B^!&ioCbVcfA`ug&mmNw=a@SPTf388c8 zDX}Wrys85fy99xIe=bJunvBn7mBIdDEokg(rE9FpiD7yK+*Dfu(S2*d)3zL1!en5q zpSM?r3=sqKIyTg~mYj-dW90JM(dThITr8Hv)ciKC-%CCwYW-3spL3X17d=Y6-&}`z z4Zh5N#}Vd!dKsHA^CxlIACD`YD$qGHnaHfT#Oxa9B`?;Fk`VP+Tr%8GVr2VhrsQpE zG7yQS{;F_bOCr{b0;;C@J7 z23931(4PI3Op-r{vtPcZaz(N*EZo*ovn~r3eg?K7`9Af(;((#OT-xfnj?NbRLiKO% zr?sXl&?8!!xm6>GgH2(~y?Oh2R^0^VPqidQcvzBp`9ZRjJA?jS!%I8N-GUoxdvIP? zD)oQGL+nnx{;_B)>b_HiavrLTfAEu#z>TCb`2uX%=uAHzC?+?jG!kidE&h! zfZWKq!M!2+lME#A(eK;i$VlcFjLuyL?o%#7Sy49`PW*@sPLVL}mMq%ZbHGbaj?6zG zk9V@4lNkX6WM)q~_iMvAs~zk_e$7+GcY=pWUH~6aP0|HNuTX_AAmDFj}_WMn4M6RHt8$Z&ew)4?4+5ryg$ikbD2x|F22sHTa5z%el zyuQQ^={+qn?h=CmN7Fz$;0+1oE8sBBUzx*q?l99VH<9qM`Q%sCek@sALaJ|;(hV;B zgfD`F-uvA!<7frF@OU+bT7`#N|KBN*{{|yA|I!58|ImbmZlR0*8;scf14dR`TbS$p zS!CY%H@_)solT2t=FAqw4yzWuyH@7s0v5IWjtBFV3&H%FrK$Pz(-tjnTo<=|tFdmm zsczTu{O6(;-y_B?*1KfPmY?&D%?u4YJDz8fzz=k>bgt9tEP z4)W$MNtJblS94~@ScNmc}hFBJ*B-X^5D;32H%`4Al^debY7-5+|nth*`I9aBj*a* zJB0%@vxGit&;y;13NS(1jC}jP1%IqQO|!J^ak+UPMVLdmopr2a{t~$IeIxCiTuZi2 zoCe)dJBhyiGEj8VfgsL(bahOk+sw+T=ck8M=r2E+>iUv>C}Tv2lAP!XjYf7zN)&@{ zWZ}Bif~?N&H%vlvIIb^org|$EgZkV_c-!_O>odn2<=R!q+1m%%XQGa*?}{E;roDw8 z+B!(it?x&*mJWI-CmDw7!l7kNINdIsiHxr%tdw3x$J8#6k0*e1Jc@>aErrCxSqL?X zgyGgU0v-PPU>y*EclW4a&6|93;zk8MvO^#9@1I6aD1(cPWRQuG#SMSY;RP=NDtwKI1!Igd&#`zZMK6+v+&`a6@zg6d_`LHcyENYk_THFGEDR5U z5+ejV28+o{?oH~fx)pj$^$(MwE%cTRHfc~q2HnRA>b z9`3@2s?yAHKXDNFCJ2Lz!*KH{4lPTMqZ-SWqUZPZa3yXZREeC%&k8SS)!z~JmLG>~ z{gcRkdwGfevN(uU%pqJ^7YWtNY^dH?J>y*%23953?>-m-E_`X*JDY@??)+GAn5D*ZU?H=DMSMeeaSYW00PEz_wbr!^FCmdJjb z8KQ(DU;T-TOCMagGY1ZE8z8{qJGrFtjr0$?V_QxYF6y32_8x9wZ|zWprq9<%^q z$GOMJsfopOO;rHMuha#h75lgq%l*jjWmECtlrG|bJr<^_?IG)*l|hN16t2tD!BUTV z+;M|%Y!tr<*laLEXNQBJ!5yaWZ)AgGxima3XlG?v?##t(z%;c?I%+=Yk zQ0?PHGM-hDXN77Yx@;b1tueNj=r)8j%KSB=OJ%N}U$nYkh$fdLO4d`6l2? z`3&x~v?{iVPQ%G z5t>JmsUr1W_wzjWZ@p{1@9+8hTI*W-*n920zsL9Z9N*nW_J90|;#d2F^(6+cZL)^p z@u7I8VH{*`Ou&X0HWQ5-ExPCuzi9XrY14J_6SKH z-vB(XX)JDbyhrTxq#4nwe1X*;Ig*$=O4i%i!s5$v__0km4)}RmFu&s_!iH(&_LgDp zzTp{&o(N#IDV32w7lPdS5^?C8vvi*KdzA0rL>76L3Qh-|A~IoGT<<$ounQX|+2{Mo zOU)zb@T%2NrsOYB(f7e(!rjR8Gy5@PHxV19M!{Z@)3D}zDlC}HlD@Kf(l9U%6$oX7 zqNFGmE1HE((#r6)pm)TmPo2pdX}~% zrtUN1dEh*flDUCR*BRi3(dnc^uZ%dn6T>&#PBUs1aX9|OH%2B=1s)g&A`Qu>g!a(D z%AbA`-|csi7vD}$UwV+(-ZsWEC#>LU=_omAkRq_yX^o4%%)+~LRm?Z4w~@BXN3b}Z zfg8P_kusTiPiRTH0FNofcI}R^tkD~<&`~6Xt#J?- zd5(#>V@rkx!|+;NQCwoBgl~pFgni|e&~@6K8Fso4A4@|3s1(R+655ZQ@@UgHTiEEG zhFyHkg@lqezOrpSaamVN@@7nhH~B(Ru1Xa|ZfhZtsJYm-qk{X8H-tVO+KI;>e8E8Z zZ{lq>7St?$Y&=N7_-QX$mwSic}rhrmc?W|=0yX^_t-_&R^1g;9Hx+=Wjj$k z{F=BX24YjYeK4)%I!PLT4jT;{!tMjl1ZO|5M+YQcp`^#NF}vm+(MWi~%1x1l*bVWd zL3Smc)US-k-Pi;P6ONL;H?oi$ae>@B^$@T0yi9%w+xTOP=itIU4j^M62hCpd@%uGu zMEAx~s(TVEn196vo7YmrVKzcJri-`}aekoor$KNbKpai-vcgS2l^MsovA`ea1KU5Z zA&u#Qc>a;MD0OrSc43^cY`hiqaF|2Av!(Dl>kgvL6G4(kmytU{ns3II75J3Gc36@f zfKP9-06i%OaNBzfsa8gUR%9AlGqZ~BKInqvBOST(UqqM_rzP;Ut_ZfXeHC-z%~cX? z@D{IXjm7JPc^%G1^+dWTg&Zyx$I0y*iC=FQ<0>RX>r_+mo5s=p@V z)g|%wb;7KYJHQ1I z$VlfH8u7reE;R>-gqJY~n&&}l%Xe~r${V8EF3U~+k&n+ODPvttVCJm|!fR|xu=hG; z)N=F@iQkfm+YS2AyFePuTuhlBPvQ7v$^^W7M<9_6Loigc9N*cq0Jb%pXOst0(C({h zxb9K8px4+P=Qnqg)r=l!y_$~itPv{lcV9Sn%|uuqu@8E#?1bJ`RgfbV&Fsqb5M2Iw zg}FJ9h10jLz|MH9;5}8%H4BNvvp+h}LBDtiy5$5zM#WImKM!nm+l2UC07Ph;!>L4; zS^Yc{8x*k2i>iBM$f^KLlSD~tWf1gAPb8z3dysv$9Ia6fD7$1Kc{$k<4p?`SwH~v{ z^N-HZ8ZsM?JGKNX*QCL-VkgEz=CZ(8HH#dnsX$wUClRr?r&fSLoI`@kQK+)l){>rKedba^JmV-;#@biyP3Pf*i-Ei%w-gYT~J zBc<*q!TyW{n6*D4fpRRlVk(Oz7LJhSjDsK+d>aM>vLH}>AM_Quq0q3?sC$W9$Y!ZN`{>Ay8E#C+?R%6V`b-U8+z;gQz)HCBM|eHhqxkoF zYgpYMiJDy};8a-=*dn(Fr5-#e=s(kiH$B+`Q(s;pnOUh!;*3k^mEjrG^-dJf+zd3< zrW}8|qy=|9r@|7Ae)8p~BivNJ2OkJ&ZDi5L2{?*Phc**6s1YQAGJQyJcVYpU2r`KE3r`3xSRfSEe*`IyGw_O? zubBm@Ld*Ai0(S2SA@(^_7_n$6n0Jqdk|vZ3uTKZEa(yO9+M`TPobtk3pZnmaW8#Ty z)?plPItj1rJAj{xe&%Kx>?JqKl3}OYMZq0fmXur2Ohx-l;F)}6x@JgY*AqelSuc+p z6k1U8o#Np9f-EAFFoeW5X2PG{Nu+qII(m~gfxO(83|_+V=G5D+MA5sPv2R@uC@7!Q z?wU_jQ?B6hHNiO9$Qd%F=0nBBB!Pj|Inu)_kUKXdk*t#$=El#)W}(n73bHt?ikx?p#`aO^usW|vA`__+DOgvG(g5g@S0IgoVP9@YcH>6MjTk? z`1~fcFRemgXd#MM1zs>L@?v zgL^!VKmCz0t@cIkHiPRFdoW&P4^HSN_?o(+x*M0!9+m5Kc#Q+-pXk8- zUDDuT{vJJPI#29IqS4}<@h~IR433Elm!Os23fLnm(9CZj^CCYproD}V`(LkN!Sg{P zUD1H=eRGAMPqMJUVJw=M9Sbv(4A7Qv9!!>fOgAF;6}` zmzYERo|Z8>=RXkf_jYS$)>dD*#Pw~y7 zEzGxx9Oh-ZV!H$tygw}ob#$D7>wETsiQH^dC$Ud(uLz+j{u9YHn|o-#vH@&-T!ih0 zJxGc}FVniKN4OjsKy)^*KsmBg@lgLR@Qy14jdAzz=YeGK@Y_dbb}hh_!v5&l`=y|C z{S<^chljfSZcH`sST=uNo40Th6)HtUL!x=a+nr;5F_n<+c8i;&-gkK_m zgdeP1Wj6JC{DM%A@KDeHPKo@FC9?dVQoQ~y#oH~^=YK2_-+wHTp8Kt&T{i;{MLuH= z8|$JybKA&A+YJ0fdNm|{b0I;uCquUTX=3fMj66ErgJvC%#y!TnNr}4$nzwc;?j0Wj z*X~p?$2Bxy%ra|cLg`;PX|gubOGHS$e;qm@?hk`O*GQSLP3Ye=1uh(o2g#0$Wc;az zB*|(O-XdN{5+o{MmzNQ7{uo2DiY}3W1sZs~n+?n=`%LM#&PL}hCF91T{rIA2Da3yO zCO|kL9l7lZo;jv?&aAo6$nsEemm8euPlK0d<}!M6(d6=XS@4K8g7ox>xF}j2{GOhK zm!*L)Rk%CdcXl5ge4`0nGBM%8Qy!zY9@B+xS{+=VM9@*EK_a*`KxChmAme@R@P1`K zvj1R0-v2s;ax1qXq>}_^R;~l3FHUH^<1k5lAq|$Bza!boQW9-{4KLLQ1-Zs2q$b=P zs2$FDkMAi6U_4R$jUHmVSstu}vee+B10Eb$0g>gqkc%*j;j;T)P#l= z5d)-dd<+K$Ux8WmQACBYg@)UU@qYU#VB~2~Eu9Ij^=T+%O9dIWZ70=^RbXkF3mvOY zqn`@z$;0Am_!b;NR{qJxZdJWFw|aa(x)t%&=b3_7x|9@ecSrSuL4vC9G5B%*NfiHf z2btR(3k!c&1JRBW%(WJW#|u<(tzj+gXEzRlk9I&yRvzqET@JQY`tW5+BYCQ|j~p9J zhS^H`kZ;8>xNsUC49f)c@(;1vstO986nLzE05)oO87R)gzsCGTsWzmUrj+2bwR6;0<6Uw!)-k*X> z5kBniKY=hdh0`}v;bGbfGUw!LQf5hkk-9gGIiv=e39h(vh7Xw?H33^++=Ub0=Rw}u zuZ%O7Lv~AlC9qK(ZyjkT&WrsZPsakPhm`PQ&jj*H7~y5jp1)=57PLa(@id@c4qx=3y2-liof1!CEs^eBiGs({N=ML))Ka^yA|#5 zsqO@_oHc;GCvUUbz44^nio%CSov`W53^E{Eg6ns$#G_BA6LI0Ac+YilA$^fWG>iI( zn)O05DbO6<^UFd{>ca4M^VLw$KN}Y;+=$M7-3SHlE)c%>s^Du}3K-g+#u^W-n0lR5 zG|tfzSsa>1R1c4Vt&`(X!kYx}$?(B`t1;|VC?l#blku|XRAz6MA`aeKgxf7uVR^d) zURGa?-kIi*ORM5xqEHB;?KJs~K9-kaTfQz?yO<-(Y`O%2;(EJ zhXi1zkPZb>Z2=IsahqNPZ>RGP?N6k!Y{6>;td76n|@#sR-<83mtc9A6_##CRgOzv>(- z`7H!jOgu<39FmYX9znNLgt4HP_u+3n+GO*u1n3==2HA7&`0Ah`7UuKe_&wtAqcwyp zIaEPzmLFrd3Dr0Xj2 zlqwM%^71cO`$QK-CMUz3^zY2Knm0I;Ck_`H!Vr0|4(bWR}d+fosExq|a+3W1-qc8b2pMX{4dx z{IC}x#t8B|&w_UFLo#@{8fk_-CG%hJ2jy-lTvTxti5R;>(qOu9p~D?dUULZaKKV0B zO}9{_FbiVF<}?(& z3c`5DH0=A=Gm`TomsI?vhBC#P$)S=-VA1c3b-Op?6(*Wk;ZmePa&;~a4gP{gu3MvL zdmrGbj}}3?Y%wXj9f0jP5maP)0Mbn~@Wb-;cthk^(oxDGe`ztutet~z8p>m{YWm&je;o`XvorEui+`3S1p>3bE^gt>8A*lR*RBQsqOitNteif6Src9@U# z?w1R}@`=c-#~HsJaU%B`oXFh`StMjYoAkZvLosLlNm{=@>Ab6j^~~=GN?#mltSj6U@~6vLwy@~I^Ui#HYH zt3$&=!cG^e52$jU<7Js03)2L{VW~tVVF|8zx)oijY$87EhRAV6J*=BK8}c6VvNQ1+`wxmNW%dk zjVY$BiS>vV>~$(bo~~hJ%9mwAOHUHEoIlP;4NqknzrRJr!*OKM=R)W`t3+OPD`C?| z`dIFc4BAiaAdS~lNUdKiz0KhlT9f?_f7N(TUgySO^RIWwa;BTG_R_E+L{!+OpToXB zr63r4n@E&Ifo<(ch`Xr?`pQp`%AX{B@m)8{Z_q)~zn7p&;|W;VU^md z&7|q+Mx+;bSkO1*2x${G;a$^zQ%F`B*Q$DwtBK=~&5Tsy^dkqJ&y;}iJasTo-vFzw z?1E;!deqt{2RYR*kne`CWT^QU>ZI#IL2f+$Lw=LjdFPp~fWN@uWCrp0Ycft+ZVJU{ zAxz+>K~-BG7O7If$Jb{Fr|w6Q`=eq(ps72~>G6f@pKcJjj-y0vygn{7SPBJ~vygAs z8a$;oUoZ<1=6FaPin(BhZ&o#NLpBL0Pje!^KYcUKFVzs_)f|L*Kej{H#ZCBeb{9H5 zeFkJZ&BEpOWuWg-j(Ek(U}0@ObnVn2P7nz%-x=bF)tV4xsfEYfipJKGHeh$OT!2mG z@qDv&V0&vH3AD(=ExA^ZYunA2WbYyC_1)>$1Ko>L3CU!I`Isd2E&C6!4@X~Ck=JeV0(i%xt> zB^RVJi0V&&G};~koL(G`>lnf-O@r`QJwP?A8`gE%hF+c5CT-ugf&0uCNK|qj&aCrA zADx!Mz^Wo-vP=!)rx+9G!6Vpzbt5jGN~7xEu{g0<9W0HVgmIvbe@4RpZ+k}k4%a6PJMh}D3K&;p5UzVDj@37IK0~26d%n{#qOt5 z$o>7#Q0CSRc+Q5kMD$iJtkar?+5I`B)zuVYkL|=Whn@*Kdlo}<_HrPim(f-kPdIOx zhhzINr2?p)z?SI&(f$-v72uZH%EfeOV$@uT;!fmY`AK=`qQ)vxvdI}m8NMa!UmYZ^ zu?!wsc7@38uSHkw{~*z`*9GQ@k>rq;1gtz{2r2bNNIb|DyuMzC2_I9?^SU0Ax|oM% z-zp{zUo6mD$3nENEFW)|c|~I8jsacwX}FLvgJ1hRpgKufxUV^f)M=K$j#@p~#x|fy zpLZ~u6m)QxyaeVg%7EmUX*i|W2eZ!&@R~#^Y=31oD4Bjo39fZyUNr;3KDO|yTMwkP z$0BtlUv9&o0!Y<8MrTLUVC_-?mUEmB0d3o1Xwn~|ka`DADmsIX&Q*bef;r^ZB1b{G zwFtiHQ;R-HO~UUtMUn#kWnsMKKGMIY8!4}~#D04oA;;J1I3-vD!lLG3nY)=tHg7c! z^$QPO@!u(t{}?0w|F{V&|8^4s+(HBYV~hm-%NRM{xsYAyN>eM_=CjmuimHvmY|JcM z3ePZP!>Sl+=_Je^^0r_dV{ItiAJ(i?mnLPcVM84|BT2cPTf{m^o3oE5Qk2(sEB4S# zD{8>sf)Z5`21IYetdx%l<=kgOjWM@mOUG)nE;=@pMYa{IKi`g3e_=`48>+COtHMKr z1DC7+e?w6=M}+^wiLCyoj5U9kvDPhg-M>u{xunqb|1w3Sgu(lk?Np?b9J~9P3_n!g zo{Ri&kJTR1;fsZzrp|p0G}Ab>k>|hfCEMIC!L?8s?5#bRR_|HKPQ75yc?`O zvQ4(s)fb)N<$2WB_!=+dLR?K~tfk0JFOi^Uj;r9sZHk~Idt%wiY#pob@oYi9W3rj5 zG{cR4pTaro+tI4-t7v`ye(IywY+B6v7<)upfjhJMA?siDnm_n>8SST%#_rAQ;%%Fz z#Qo){Mjw)~qk8-0IaNJ#dcf14a`)+FZ*?5wU(7aO)5ngmPJ1afYgsNYdagOYEc$lc zygT>n%oWd2oh`X^_1o9%!a)P>(>4YAuC6mzv9X78UZT#;kD1T?KCqZ>`8=~OzIH6f z*AL{9D+Fwfrvx{rZV{J!t%Tz27SZG1lv7VT*6;?qr*Jc>zVYp8N$xk5LzgJNr~KIh ze)8Nf_R2g3dd9aBN`6H$wSI0Otq>wh53J>QLkgC3+OOSILdG@LGeevuNjGJDt%{mo6~fl;`bt@8n^9haiu97E zN=oAOCiY#QHrE{?LWMQiQ?+3Zw4$FJ9TtC(O%b2VEiIzCC)4kg`7Qq8m*Jm%Qnv%p)}e&&APXJ zo*bsnt+ljd>npGDJl6TL(z3d>47 zxWqrPAaR>Yl@r|nuJ<%kcJ7bq?v29yN(tYA!F#q188gn5Rwq1}@>B(wsuwShj#w zEPl$)2$QB)d1`R?+7#%%b;l|F)yAB3Ya{>2z3J4^`a{&wi3R-3CHvS5lY97;0}0e| zZwKmrv_GpRcAtH!eTKR)DnYF}_LJ=&m8WKJzee3iA4~7IxJ*5ok;?w;t6spQ>r-_F z5&Vyo2|G=A&s5Qp^a>LZ?#H9^tijZkY#H~By;u8){UE&Wo>`8t%iOi;PG?0vFCm%o zmmK7M5NoAc47PJKU*@q!UmsHK2ikbXa>}gXsc5QKR-XPIw8!k-$PTu?^(2)s?J>`H zlL=R7yq)qM;CL>o32bpxF*|&(jm^~E$5+%8P>1hspqf5b^2EJXQq$VJ>k1_|voQrW zl+9gWZC`9-_upGocXo3s|LZhEUTWfV>S5DfcCG1Rdd=lAT%w8%bx`RTb;_ieHoKI{ z{t_Igo%F4_kvF%giF+mK?1DUMiNXl49x(0lOOKT_YNkF2#?cjt>HMX)<+tMD-MD&}lJIv}s8>Rdmst?Nn6eHag1FHuo@?G*iD3cTp7TIcbuhe6{hgEP7>o5dYjP` zebwkV^_05KJNn$Ev+msSASJeRdna|NbPPA;T0gbmaxho9=q@!{)WVy2b2ZNa0;$!r z!&&d#W%Oj12);O9l-s1wvQK9yv%gf*D5Z~*tjcvBT~|Awp1RAJI|FL0D={I1>-3vI2}W_rtt__MKx4sa80mQd@)mhzNZzOo9&&-l-sfABIKwy-;e zVcek`!b3y;H{$efE%C2?;*I|(iA{eiiOp`ITmDuOa*3g#e+wdu6C*+V_j9lD)x<>k z7p!x5qFWK4E_hzIWl{5vxG|I#$!{~Srg-$$~|Ep+?8HH}D8=#GDhGhU!4_aQEgVl`Is z{W@k*O5XFhgERf8Q%w?lzbB$}{9Z$@;KLVo+jM7Y;=nKVSCKZ=yXzGdGs}Yi_+TYF zc4jbVrsGUqysgez??dzol?ckpIFX*FN3k6lg}kQS0xIE%2-obX%ln}6nKG(T=dz~7 zuw%XwO4VZ>FElbdbZ6l5ssF8(f1T4^|5Ot7cO|>sLihY#iAZ8-^xt)ey*p+>g~o)3 z#s)@gjnEfec)0b+3r zhj9#2aSbR-0ZTrGaSmf{b!~8Y3S@M7aAgW{4_E>K003q*aS%%YO96**5mIpyC`$ua z0RR91JaH3FPEJby|Ns9=|NsC0O8|6n6k>@(_{ZcBKeeY9zJ%;=KRa$1zbFTPCqKrB z1V7Sw48QI>K|d>19zNh*c)h))kv+dxwmq*L2fr~SguTqOc)#~2AiwU&c0X>hPra@X z7e72M{yxh`x4!Q%sy)VSs6DN2guO0tLp+Qbr@c9F;yVn2h`mK-!8><_Y(0BeH9Jqj za=qKSB)=CLMLt}5hrjrfNxyz=(g;6=Wyn;oVxp}H(a*Cm;p}9$-p;3~dxn*jqfw_fIVv>P{f#F1Z?TicT5nK!m z3?-SlsqrQGC5btOtnLnOO26I#1ta)?L~221aehi_F;te#n=yhHC|sOamYNcuSR9{{ onOsuH-WkCS6f8=tj4w+qD$dN$E98h^1&SExS?C!Qa+c}=0CRRKh5!Hn delta 117 zcmcb_xP@_oyh2K{ak8bMiII7tiLptlxw)lBw7Ff diff --git a/tests/data/rllib_data/single_agent/params.json b/tests/data/rllib_data/single_agent/params.json index c5e605ef4..0e508b4e9 100644 --- a/tests/data/rllib_data/single_agent/params.json +++ b/tests/data/rllib_data/single_agent/params.json @@ -1,32 +1,32 @@ { "batch_mode": "truncate_episodes", "callbacks": { - "on_episode_end": null, - "on_episode_start": null, - "on_episode_step": null, - "on_postprocess_traj": null, - "on_sample_end": null, - "on_train_result": null + "on_episode_end": ".on_episode_end at 0x147eb0400>", + "on_episode_start": ".on_episode_start at 0x147e97e18>", + "on_episode_step": ".on_episode_step at 0x10be8fea0>", + "on_train_result": ".on_train_result at 0x147eb0510>" }, - "clip_actions": false, + "clip_actions": true, "clip_param": 0.3, "clip_rewards": null, "collect_metrics_timeout": 180, "compress_observations": false, "custom_resources_per_worker": {}, + "eager": false, + "eager_tracing": false, "entropy_coeff": 0.0, "entropy_coeff_schedule": null, - "env": "WaveAttenuationPOEnv-v0", + "env": "SingleStraightRoad-v1", "env_config": { - "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"max_accel\": 1,\n \"max_decel\": 1,\n \"ring_length\": [\n 220,\n 270\n ]\n },\n \"clip_actions\": false,\n \"evaluate\": false,\n \"horizon\": 3000,\n \"sims_per_step\": 1,\n \"warmup_steps\": 750\n },\n \"env_name\": \"WaveAttenuationPOEnv\",\n \"exp_tag\": \"stabilizing_the_ring\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"uniform\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"lanes\": 1,\n \"length\": 260,\n \"resolution\": 40,\n \"speed_limit\": 30\n },\n \"inflows\": {\n \"_InFlows__flows\": []\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"RingNetwork\",\n \"sim\": {\n \"color_vehicles\": true,\n \"emission_path\": null,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": false,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.1,\n \"teleport_time\": -1\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"noise\": 0.2\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 0,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 21,\n \"routing_controller\": [\n \"ContinuousRouter\",\n {}\n ],\n \"veh_id\": \"human\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 1,\n \"routing_controller\": [\n \"ContinuousRouter\",\n {}\n ],\n \"veh_id\": \"rl\"\n }\n ]\n}", + "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"lead_obs\": true,\n \"local_reward\": true,\n \"max_accel\": 2.6,\n \"max_decel\": 4.5,\n \"reward_after_exit\": true,\n \"sort_vehicles\": false,\n \"target_velocity\": 18.0,\n \"terminate_on_wave\": false,\n \"wave_termination_horizon\": 1000,\n \"wave_termination_speed\": 10.0\n },\n \"clip_actions\": true,\n \"done_at_exit\": false,\n \"evaluate\": false,\n \"horizon\": 2000,\n \"sims_per_step\": 1,\n \"warmup_steps\": 0\n },\n \"env_name\": \"flow.envs.straightroad_env.SingleStraightRoad\",\n \"exp_tag\": \"singleagent_highway\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"uniform\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"boundary_cell_length\": 500,\n \"ghost_speed_limit\": 25,\n \"lanes\": 1,\n \"length\": 2000,\n \"num_edges\": 2,\n \"speed_limit\": 30,\n \"use_ghost_edge\": false\n },\n \"inflows\": {\n \"_InFlows__flows\": [\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": \"23.0\",\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"idm_highway_inflow_0\",\n \"vehsPerHour\": 1944,\n \"vtype\": \"human\"\n },\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": \"23.0\",\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"rl_highway_inflow_1\",\n \"vehsPerHour\": 216,\n \"vtype\": \"rl\"\n }\n ]\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"flow.networks.highway.HighwayNetwork\",\n \"sim\": {\n \"color_by_speed\": false,\n \"disable_collisions\": false,\n \"emission_path\": null,\n \"force_color_update\": false,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": true,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.5,\n \"teleport_time\": -1,\n \"use_ballistic\": true\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"a\": 0.3,\n \"b\": 2.0,\n \"noise\": 0.5\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 1621\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"human\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"rl\"\n }\n ]\n}", "run": "PPO" }, "evaluation_config": {}, "evaluation_interval": null, "evaluation_num_episodes": 10, - "gamma": 0.999, + "gamma": 0.995, "grad_clip": null, - "horizon": 3000, + "horizon": 2000, "ignore_worker_failures": false, "input": "sampler", "input_evaluation": [ @@ -40,23 +40,27 @@ "inter_op_parallelism_threads": 8, "intra_op_parallelism_threads": 8 }, - "log_level": "INFO", + "log_level": "WARN", "log_sys_usage": true, "lr": 5e-05, "lr_schedule": null, + "memory": 0, + "memory_per_worker": 0, "metrics_smoothing_episodes": 100, "min_iter_time_s": 0, "model": { "conv_activation": "relu", "conv_filters": null, + "custom_action_dist": null, "custom_model": null, "custom_options": {}, "custom_preprocessor": null, "dim": 84, "fcnet_activation": "tanh", "fcnet_hiddens": [ - 3, - 3 + 32, + 32, + 32 ], "framestack": true, "free_log_std": false, @@ -76,13 +80,17 @@ "policies_to_train": null, "policy_mapping_fn": null }, + "no_done_at_end": false, + "no_eager_on_workers": false, "num_cpus_for_driver": 1, "num_cpus_per_worker": 1, "num_envs_per_worker": 1, "num_gpus": 0, "num_gpus_per_worker": 0, - "num_sgd_iter": 10, - "num_workers": 2, + "num_sgd_iter": 1, + "num_workers": 1, + "object_store_memory": 0, + "object_store_memory_per_worker": 0, "observation_filter": "NoFilter", "optimizer": {}, "output": null, @@ -117,7 +125,7 @@ "log_device_placement": false }, "timesteps_per_iteration": 0, - "train_batch_size": 60000, + "train_batch_size": 2000, "use_gae": true, "vf_clip_param": 10.0, "vf_loss_coeff": 1.0, diff --git a/tests/data/rllib_data/single_agent/params.pkl b/tests/data/rllib_data/single_agent/params.pkl index 511d3434300e6270d326503ee9bcfce6f9127b1c..60cfcb075c7ed028b5ef23a05739e2fb931e1abd 100644 GIT binary patch literal 10890 zcmeHN&2J>fb?+Vy$>EU8kL60TR*JwheBe6PfaIR$fbFx-aZUmfz^5E+z(@ce>^~vDSJge!GbDEv z*zh3`7t(Z9y?XWPy;twOdiB2F{HMSDhN1o*7VC08%ma29CHp){i=HW0sm;TKl*K$L z2dIA~wS(B_Y}d{l!BX!5FM9tb+qxM46;tc5&m4-~OWg zTKq4+kh3C6ya!-B32GsY`WdTwK9!4Lpc{pp*%>s3cGrgDFvzU$4U|0R`+juDVjG)G zi@*N%GlubDdkudU;H+2IbVGI6uImw1kcI{y8&)7bw_O(PrWk>u@oy?jG(vxtAAw&b zpHGJ%;n@zPh2gEv)rt8u>T*4LrSrMi?$j*l&S}e!+;!=lC^4%H=Uo&K4w-!IJ{TzEZNxL;7b8C!DEA;|xdD4u>L|Kg( z9HAhOae8@7#Au33YDuG5h_i$Ni?psoHmsA9#Tke!iJ&{^>D=|gfzR)j-FMJ!=@l$~ zKW3Rdp!ul+vIjhbx?}pGeN;htA=K0+6?&TT*4elVyC{H^sIF~@y91u8)0ud?IjKLb zZu@?@npSK(JPC9YM+}r4)sR77*uW;9)t7(~&+-ITS|i`6Ynqjcyx;dvlTRAkj;7bj zL$4ntL7k-g8?=g2n2P3bn5B&{*xkHW!jhSzC?u?pb*Ap!!Rzws%$59V%{4?NPAiQNvzT@Evv-2zVlWRd5q z39y2qUrR&YEiHkyb-A-~7HGLst2`6zY`~DL%D1jwnG|KUvQ-^b&kd?0#x$SK#tOrl z-sZ{MQJ&DwzkTJ(xX6ac53-|J?OWu59a<-NS`$t+=l?mI6MvG+n`a4moXcCA6OOWC zm^EZ_;-A9dF`nNWwrZTWq7-qAt&V({`~FClR>l!(YR5q%YIe=gP_(MFn#?>554F=y zsg#|!wZY!e{pBvnuVBhZ%9a!PQNnhQv{}K}r1wJ5WbY#21)%SxFrRdQCxLk2r8IWN zMqoG{g#k3gJgS!pk0|@8Dg7N*BxLLE9R)PSK)3Kc7P(slyq9QNEZ|Kmm zoWPb+40GoER7WTi6s7`kWn#NtewtKL+U;n#?kY$T6e~f;Mm@}f()pa&!4MALwH#uFO zHaayGtpGF{^5)lZ`&0V3d8TkPI|m+&j|q}c)C_fnSuf4#j)_TIxRp`hAq@ zD>GLa2>tt&nH%xLs-K!MH9y6aTwDV)tb^8$?JoAU*M66p9~F0s zb@7^PbrE%pdU|Hh=rwv%y}RAlj}6y+-gsyZjAL`pa2wZ*@@s1N)f|3J55H!v8Ln~H zDAsr6Y)LRSV;|skfK&{G4ZoRvfR~ImwRy;s!BHV+sS(73;n!Y)ozCH!kT6{HY!EJ+ z$Y(@_cEmhz2ZCq62~tJ~+z?t)>q%y#*&xO6GQHq~PcL%`weBlE}@#MWXerHOC) z4Cw7df8vFNAZmE?ot+)RBG;b4ZPvH(5FpR9KJt<3!jbX}Qp$+$;{At2gZL})s$5{~ zgWUEhNkhwg17JwJB14+81ec)Q&NY>8Phanx6!#Kib{Iy$N_S>Vhw3gewtwjIzMcCS zbwMK;vhB5Y8~cmgTy0CdCucDnISTSQ+---t!_s_L!_m`@oM&v{N4S{jb@GpkkMC^A zr7@y-T!``cjgBzj7;y%?9Dzh-ZUPW4;yM^wl>l#N2bCnYOGZo8=o~-8yU{Z z=Jwid8zV-!()@Abkx6S|wI)Uvj+>9n^gp5T=iGTvzesJ;sk9bRT=y4A@iqgd~mDnyS zo;P~U-fH(Mj-_$0^APOaGxp5A#xXYI8vD};7_54xs0ZK8{n?~>Z2n{8gC-7W&-e&- z#oWhHD`QCC2?O)Yq6Fa|1*t3TS#%ePTJk> z*A#|j!l3nNh>3It$i!zQn$8%S8{+1TH-4v|z-4m)vkG%9-pQb5=SZ3I@G!CCn4CZx znZGh|RmfF#16P*#d-N`pzNbJEunmu-c4}uCz9Bt1ODH^-JO+Is^Qsz+FosJ|&$uF& z(_R|lL08<8Gqe&p-Q@sAUB7(|;8N+adv(+m@PWM-!9K#ztcxA-mO2d@^-Q^j_)NC! zgYsNJe{PF#`vtRWk+WHbvzftv7pGoqUNkQ@Uc&dX`4Y~-vU$-2vv3&5e~5QWM+b)MD-%X z81~K#duMCCtzqxnFbIJWy)JHux8?a!n4u~#V95$`Q!bSO$Efd}s&8-DN2>9;_*ZCE z_0%8mEb*MQQW{!ADaIGuubAECN-;<|p2fd15~=IJe^om01$1EjOX+~(@>wbX`hK3) z{Im*;5UHk!4C=Dc!QG3MY`|Ga$8ES%KLDRiIAjXlW;*clC*uxl+Eeng?=4uZfHu#9<>;8*b$l$~r14jUX(>zc13UAUz; z&8|^$Uv58<%XNm;y^+RrvZwe2VG`aUxQ((Mx-@{O2B#m`hu>~@VHum1;N^FIkEIL5 zpK`XUB$A8On!1Nv@s9W(WsiTo{RaG*(P+UUom9EK83bpQ*8TdamZ#2-6P8E(cV&9C z%H{G|ZxDhRWnmKnV$aW^eYo44QfB)) z3uT{!cmIrLPIYhpC2GqsL!(gjMWY#ybNP%e>p5j3dw&8o>9zJ=Qa^yC01uK(sYIw^ z7RmKFdcfmOh7u}e3)TA$GLJ>?r}ARemj+RkQH4fLI<7nqD-odvM~j$F4K)r1vc-i7LBuP$#EA$}@wqJJnD1 zb7TF6e4(u6!jADAjzX~dI9NNvhjYl>1j*)v%J^miY$-XSrB|cfJ>m-r%5We?{VV#j zju6lJb0u@_dZajuhX(VB#j9my z3UUMeK6t6i4GlR1xzQ3hRQd}yj#BE7avtb{KRE>z02v`aBxS%V7fNU<@=|wah43AO zfKA!P!yQU`D5#_RuWZGU?>X=Tq@gRSafG{f9FunRL#(Yk0l~6}tODZ4om|eUiV&MD z$Bee?R7a=E=-RTi?}Jp{@1yVM|4e_M$@A&JWl$HdUQ+pecnn>YLxqMpy*p9DW`PU` z!ISck9Or#-#SxsY%~uu`a9^dsnhwN4e`p2Npd^ZIwS# zPb47tKQ603wL*R1yI(J3XsTCq=?OXpcjmBFUE_wS#e-ko`250$avATVR6RPXHvBa2 F{4W_Wq{{#R delta 778 zcmZ`%OKTHR6z)vY2a`u*YfS=5+i9znz9z8`T5wTBA+$}455$U0Zzs2%sgrw$xpz_t z7Tk$4i0PSv8*$N{U>C-Pd-rYx(Z3)lxGIVZ?=)SsMepwWzVm(O+{1Z#>sdbeD7q8L zHXXdJU=XxSHd-iNTZDy7A1eCol<3&87%gF)aqI*|KY4`0q$HSxU>GVpHr5+N=;AK2 zi0)$M*<93g3d*63J1#XaW1+ke6yZ<*Xl;8X5}AwC_P@h#X}EX)l8v zVBvktk2AW)^+N{k$i4Dz3LeUr;g=jQ<%X4>BTp@(2YLw`*j8tHPHL`2)^!_`b#4mv z?lEJSkTySa&$tv1ka<>+SG)0VBcyOR7@&5L1ip*)!V3oXF12j zL)@ZdY4IwlK&tt4Cr7K6ZB@f5xQX>}ocd(*O$olVs6CS>daGi2)b_#z5jw6xwzV7X zwax-l<}=K|1i2Os+s#wz+&IMWL+MUduxS}KWo42i$dF5q$@v_hbU(aGbtbP+!Y$%ak6ou8hrO=x`rD4Z zsEGN01-wX)PpAL2hmndF_;JIw9B~|*$SpB(9Sw|(@UJX*0+GxpEDsdU#)wzeH>rCc zyDXTK{k9OVJDvbduyetxb&qzX8ke?;xMAwds^VZ_OD*Q&T${NBpU!`% bFAr`=9f}{bU7Z;wE_pT%ie?r*Uwro$+&}(6 From 240cc05f8f37f7cbbfcd0a010febc8a4a56bf342 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Sun, 14 Jun 2020 15:42:36 -0700 Subject: [PATCH 259/335] Code cleanup --- flow/controllers/imitation_learning/ppo_model.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index f7490d180..cbc51c6c4 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -38,8 +38,6 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, name, **k self.setup_model(obs_space, action_space, model_config, num_outputs, h5_path) self.register_variables(self.base_model.variables) - # register variables for base model - # compare_weights(self.base_model, "/Users/akashvelu/Desktop/latest_run/imitation_model.h5") def setup_model(self, obs_space, action_space, model_config, num_outputs, imitation_h5_path): @@ -137,5 +135,4 @@ def import_from_h5(self, import_file): import_file: str filepath to h5 file """ - print("LOADING WEIGHTS FROM H6") self.base_model.load_weights(import_file) From d9470cbff0845c84d8c78699bdc3ba6ba57ea9f0 Mon Sep 17 00:00:00 2001 From: AboudyKreidieh Date: Mon, 15 Jun 2020 12:12:08 -0700 Subject: [PATCH 260/335] some cleanup --- .../exp_configs/non_rl/i210_subnetwork.py | 164 ++++-------------- .../rl/multiagent/multiagent_i210.py | 6 +- 2 files changed, 36 insertions(+), 134 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index d4f0800ad..8b572d39f 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -28,7 +28,7 @@ # whether to include the downstream slow-down edge in the network WANT_DOWNSTREAM_BOUNDARY = True # whether to include vehicles on the on-ramp -ON_RAMP = True +ON_RAMP = False # the inflow rate of vehicles (in veh/hr) INFLOW_RATE = 2050 # the speed of inflowing vehicles from the main edge (in m/s) @@ -42,59 +42,6 @@ # steps to run before follower-stopper is allowed to take control WARMUP_STEPS = 600 -highway_start_edge = "ghost0" if WANT_GHOST_CELL else "119257914" -accel_data = (IDMController, {'a': 1.3, 'b': 2.0, 'noise': 0.3}) - -vehicles = VehicleParams() - -inflow = InFlows() - -if ON_RAMP: - vehicles.add( - "human", - num_vehicles=0, - color="white", - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=accel_data, - routing_controller=(I210Router, {}) - ) - if PENETRATION_RATE > 0.0: - vehicles.add( - "av", - num_vehicles=0, - color="red", - acceleration_controller=(FollowerStopper, { - "v_des": V_DES, - "no_control_edges": ["ghost0", "119257908#3"] - }), - routing_controller=(I210Router, {}) - ) - - # inflow.add( - # veh_type="human", - # edge=highway_start_edge, - # vehs_per_hour=inflow_rate, - # departLane="best", - # departSpeed=inflow_speed) - - lane_list = ['0', '1', '2', '3', '4'] - - for lane in lane_list: - inflow.add( - veh_type="human", - edge=highway_start_edge, - vehs_per_hour=int(INFLOW_RATE * (1 - PENETRATION_RATE)), - departLane=lane, - departSpeed=INFLOW_SPEED) - - inflow.add( - veh_type="human", - edge="27414345", - vehs_per_hour=int(500 * (1 - PENETRATION_RATE)) - ) - # =========================================================================== # # Specify the path to the network template. # # =========================================================================== # @@ -124,6 +71,7 @@ # =========================================================================== # vehicles = VehicleParams() + vehicles.add( "human", num_vehicles=0, @@ -138,96 +86,54 @@ routing_controller=(I210Router, {}) if ON_RAMP else None, ) +vehicles.add( + "av", + num_vehicles=0, + color="red", + acceleration_controller=(FollowerStopper, { + "v_des": V_DES, + "no_control_edges": ["ghost0", "119257908#3"] + }), + routing_controller=(I210Router, {}) +) + inflow = InFlows() + # main highway -inflow.add( - veh_type="human", - edge="ghost0" if WANT_GHOST_CELL else "119257914", - vehs_per_hour=INFLOW_RATE, - departLane="best", - departSpeed=INFLOW_SPEED) +highway_start_edge = "ghost0" if WANT_GHOST_CELL else "119257914" + +for lane in [0, 1, 2, 3, 4]: + inflow.add( + veh_type="human", + edge=highway_start_edge, + vehs_per_hour=INFLOW_RATE * (1 - PENETRATION_RATE), + departLane=lane, + departSpeed=INFLOW_SPEED) + + if PENETRATION_RATE > 0.0: + inflow.add( + veh_type="av", + edge=highway_start_edge, + vehs_per_hour=INFLOW_RATE * PENETRATION_RATE, + departLane=lane, + departSpeed=INFLOW_SPEED) + # on ramp if ON_RAMP: inflow.add( veh_type="human", edge="27414345", - vehs_per_hour=500, - departLane="random", - departSpeed=10) + vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), + departSpeed=10, + ) if PENETRATION_RATE > 0.0: - for lane in [0, 1, 2, 3, 4]: - inflow.add( - veh_type="av", - edge=highway_start_edge, - vehs_per_hour=int(INFLOW_RATE * PENETRATION_RATE), - departLane=lane, - departSpeed=INFLOW_SPEED) - inflow.add( veh_type="av", edge="27414345", vehs_per_hour=int(500 * PENETRATION_RATE), departLane="random", departSpeed=10) - inflow.add( - veh_type="av", - edge="27414342#0", - vehs_per_hour=int(500 * PENETRATION_RATE), - departLane="random", - departSpeed=10) - -else: - # create the base vehicle type that will be used for inflows - vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=accel_data, - ) - if PENETRATION_RATE > 0.0: - vehicles.add( - "av", - color="red", - num_vehicles=0, - acceleration_controller=(FollowerStopper, { - "v_des": V_DES, - "no_control_edges": ["ghost0", "119257908#3"] - }), - ) - - # If you want to turn off the fail safes uncomment this: - - # vehicles.add( - # 'human', - # num_vehicles=0, - # lane_change_params=SumoLaneChangeParams( - # lane_change_mode='strategic', - # ), - # acceleration_controller=accel_data, - # car_following_params=SumoCarFollowingParams(speed_mode='19') - # ) - - lane_list = ['0', '1', '2', '3', '4'] - - for lane in lane_list: - inflow.add( - veh_type="human", - edge=highway_start_edge, - vehs_per_hour=int(INFLOW_RATE * (1 - PENETRATION_RATE)), - departLane=lane, - departSpeed=INFLOW_SPEED) - - if PENETRATION_RATE > 0.0: - for lane in lane_list: - inflow.add( - veh_type="av", - edge=highway_start_edge, - vehs_per_hour=int(INFLOW_RATE * PENETRATION_RATE), - departLane=lane, - departSpeed=INFLOW_SPEED) # =========================================================================== # # Generate the flow_params dict with all relevant simulation information. # diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index fe5c71f63..028e5bc7c 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -24,7 +24,7 @@ from flow.utils.registry import make_create_env # SET UP PARAMETERS FOR THE SIMULATION -WANT_GHOST_CELL = False +WANT_GHOST_CELL = True # WANT_DOWNSTREAM_BOUNDARY = True ON_RAMP = False PENETRATION_RATE = 0.10 @@ -51,10 +51,6 @@ edges_distribution = EDGES_DISTRIBUTION highway_start_edge = "119257914" -# TODO: temporary fix -edges_distribution = EDGES_DISTRIBUTION.copy() -edges_distribution.remove("ghost0") - # SET UP PARAMETERS FOR THE ENVIRONMENT additional_env_params = ADDITIONAL_ENV_PARAMS.copy() additional_env_params.update({ From 1f6ceee2208b0af7b9458157beecc7c225fc0f04 Mon Sep 17 00:00:00 2001 From: AboudyKreidieh Date: Mon, 15 Jun 2020 12:22:04 -0700 Subject: [PATCH 261/335] removed unused simulation --- examples/exp_configs/non_rl/straight_road.py | 134 ------------------- 1 file changed, 134 deletions(-) delete mode 100644 examples/exp_configs/non_rl/straight_road.py diff --git a/examples/exp_configs/non_rl/straight_road.py b/examples/exp_configs/non_rl/straight_road.py deleted file mode 100644 index 1669bb896..000000000 --- a/examples/exp_configs/non_rl/straight_road.py +++ /dev/null @@ -1,134 +0,0 @@ -"""Multi-agent highway with ramps example. - -Trains a non-constant number of agents, all sharing the same policy, on the -highway with ramps network. -""" -import numpy as np - -from flow.controllers import IDMController -from flow.controllers.velocity_controllers import FollowerStopper -from flow.core.params import EnvParams, NetParams, InitialConfig, InFlows, \ - VehicleParams, SumoParams, SumoLaneChangeParams -from flow.core.rewards import miles_per_gallon -from flow.networks import HighwayNetwork -from flow.envs import TestEnv -from flow.networks.highway import ADDITIONAL_NET_PARAMS - - -# SET UP PARAMETERS FOR THE SIMULATION - -# number of steps per rollout -HORIZON = 2000 - -# inflow rate on the highway in vehicles per hour -HIGHWAY_INFLOW_RATE = 10800 / 5 -# percentage of autonomous vehicles compared to human vehicles on highway -PENETRATION_RATE = 0.0 - - -# SET UP PARAMETERS FOR THE NETWORK - -additional_net_params = ADDITIONAL_NET_PARAMS.copy() -additional_net_params.update({ - # length of the highway - "length": 2000, - # number of lanes - "lanes": 1, - # speed limit for all edges - "speed_limit": 30, - # number of edges to divide the highway into - "num_edges": 2 -}) - -# CREATE VEHICLE TYPES AND INFLOWS - -vehicles = VehicleParams() -inflows = InFlows() - -# human vehicles -vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=(IDMController, {"a": .3, "b": 2.0, "noise": 0.5}), -) - -if PENETRATION_RATE > 0.0: - vehicles.add( - "av", - num_vehicles=0, - acceleration_controller=(FollowerStopper, {"v_des": 12.0}), - ) - -# add human vehicles on the highway -# add human vehicles on the highway -inflows.add( - veh_type="human", - edge="highway_0", - vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (1 - PENETRATION_RATE / 100)), - depart_lane="free", - depart_speed="23", - name="idm_highway_inflow") - -if PENETRATION_RATE > 0.0: - inflows.add( - veh_type="av", - edge="highway_0", - vehs_per_hour=int(HIGHWAY_INFLOW_RATE * (PENETRATION_RATE / 100)), - depart_lane="free", - depart_speed="23", - name="av_highway_inflow") - -# SET UP FLOW PARAMETERS - -flow_params = dict( - # name of the experiment - exp_tag='multiagent_highway', - - # name of the flow environment the experiment is running on - env_name=TestEnv, - - # name of the network class the experiment is running on - network=HighwayNetwork, - - # simulator that is used by the experiment - simulator='traci', - - # environment related parameters (see flow.core.params.EnvParams) - env=EnvParams( - horizon=HORIZON, - warmup_steps=400, - sims_per_step=1, - ), - - # sumo-related parameters (see flow.core.params.SumoParams) - sim=SumoParams( - sim_step=0.5, - render=False, - restart_instance=False - ), - - # network-related parameters (see flow.core.params.NetParams and the - # network's documentation or ADDITIONAL_NET_PARAMS component) - net=NetParams( - inflows=inflows, - additional_params=additional_net_params - ), - - # vehicles to be placed in the network at the start of a rollout (see - # flow.core.params.VehicleParams) - veh=vehicles, - - # parameters specifying the positioning of vehicles upon initialization/ - # reset (see flow.core.params.InitialConfig) - initial=InitialConfig(), -) - -custom_callables = { - "avg_speed": lambda env: np.nan_to_num(np.mean( - env.k.vehicle.get_speed(env.k.vehicle.get_ids_by_edge(['highway_0', 'highway_1'])))), - "mpg": lambda env: miles_per_gallon(env, env.k.vehicle.get_ids(), gain=1.0) - -} From 7fd11b172e4a6e964f9cad3a41757e97960b72cf Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 15 Jun 2020 22:03:14 -0700 Subject: [PATCH 262/335] Add queries for safety metrics reporting --- flow/data_pipeline/query.py | 66 +++++++++++++++++++++++++++++++++++-- 1 file changed, 64 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 184c7217a..9d701eec1 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -9,6 +9,9 @@ "POWER_DEMAND_MODEL_DENOISED_ACCEL", "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" ], + "fact_safety_metrics": [ + "FACT_SAFETY_METRICS" + ], "fact_network_throughput_agg": [ "FACT_NETWORK_THROUGHPUT_AGG" ], @@ -17,6 +20,22 @@ ] }, "fact_energy_trace": {}, + "fact_safety_metrics": { + "fact_safety_metrics_agg": [ + "FACT_SAFETY_METRICS_AGG" + ] + }, + # @brent: are these needed? Is there a race condition here that may break things? + # "fact_safety_metrics_agg": { + # "leaderboard_chart": [ + # "LEADERBOARD_CHART" + # ] + # }, + # "fact_network_throughput_agg": { + # "leaderboard_chart": [ + # "LEADERBOARD_CHART" + # ] + # } "POWER_DEMAND_MODEL_DENOISED_ACCEL": { "fact_vehicle_fuel_efficiency_agg": [ "FACT_VEHICLE_FUEL_EFFICIENCY_AGG" @@ -50,6 +69,8 @@ tables = [ "fact_vehicle_trace", "fact_energy_trace", + "fact_safety_metrics", + "fact_safety_metrics_agg", "fact_network_throughput_agg", "fact_network_inflows_outflows", "fact_vehicle_fuel_efficiency_agg", @@ -179,6 +200,39 @@ class QueryStrings(Enum): 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', 'denoised_speed_cte')) + FACT_SAFETY_METRICS = """ + SELECT + vt.id, + vt.time_step, + COALESCE(( + value_lower_left*(headway_upper-headway)*(rel_speed_upper-leader_rel_speed) + + value_lower_right*(headway-headway_lower)*(rel_speed_upper-leader_rel_speed) + + value_upper_left*(headway_upper-headway)*(leader_rel_speed-rel_speed_lower) + + value_upper_right*(headway-headway_lower)*(leader_rel_speed-rel_speed_lower) + ) / ((headway_upper-headway_lower)*(rel_speed_upper-rel_speed_lower)), 200) AS safety_value, + vt.source_id + FROM fact_vehicle_trace vt + LEFT OUTER JOIN fact_safety_matrix sm ON 1 = 1 + AND vt.leader_rel_speed BETWEEN sm.rel_speed_lower AND sm.rel_speed_upper + AND vt.headway BETWEEN sm.headway_lower AND sm.headway_upper + WHERE 1 = 1 + AND vt.date = \'{{date}}\' + AND vt.partition_name = \'{{partition}}\' + ; + """ + + FACT_SAFETY_METRICS_AGG = """ + SELECT + source_id, + SUM(CASE WHEN safety_value > 0 THEN 1 ELSE 0) * 100 / COUNT() safety_rate, + MAX(safety_value) AS safety_value_max + FROM fact_safety_metrics + WHERE 1 = 1 + AND date = \'{{date}}\' + AND partition_name = \'{{partition}}\' + GROUP BY 1 + """ + FACT_NETWORK_THROUGHPUT_AGG = """ WITH min_time AS ( SELECT @@ -272,13 +326,19 @@ class QueryStrings(Enum): e.energy_model_id, e.efficiency_meters_per_joules, 19972 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, - t.throughput_per_hour + t.throughput_per_hour, + s.safety_rate, + s.safety_value_max FROM fact_network_throughput_agg AS t JOIN fact_network_fuel_efficiency_agg AS e ON 1 = 1 AND e.date = \'{date}\' AND e.partition_name = \'{partition}_FACT_NETWORK_FUEL_EFFICIENCY_AGG\' AND t.source_id = e.source_id AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + JOIN fact_safety_metrics_agg AS s ON 1 = 1 + AND s.dat = \'{date}\' + AND s.partition_name = \'{partition}_FACT_SAFETY_METRICS_AGG\' + AND t.source_id = s.source_id WHERE 1 = 1 AND t.date = \'{date}\' AND t.partition_name = \'{partition}_FACT_NETWORK_THROUGHPUT_AGG\' @@ -536,7 +596,9 @@ class QueryStrings(Enum): l.energy_model_id, l.efficiency_meters_per_joules, l.efficiency_miles_per_gallon, - l.throughput_per_hour + l.throughput_per_hour, + l.safety_rate, + l.safety_value_max FROM leaderboard_chart AS l, metadata_table AS m WHERE 1 = 1 AND l.source_id = m.source_id From c7937ffc01b589d39ce949420d88b1e6fbcb8db2 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 15 Jun 2020 22:04:52 -0700 Subject: [PATCH 263/335] fix typo --- flow/data_pipeline/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 9d701eec1..1ecaffb30 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -336,7 +336,7 @@ class QueryStrings(Enum): AND t.source_id = e.source_id AND e.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' JOIN fact_safety_metrics_agg AS s ON 1 = 1 - AND s.dat = \'{date}\' + AND s.date = \'{date}\' AND s.partition_name = \'{partition}_FACT_SAFETY_METRICS_AGG\' AND t.source_id = s.source_id WHERE 1 = 1 From d523b743d9a0596169650f7397e7b352355f7d77 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 15 Jun 2020 22:07:10 -0700 Subject: [PATCH 264/335] filter warmup steps and ghost edges from safety calculation --- flow/data_pipeline/query.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 1ecaffb30..bd8cc8bc9 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -218,6 +218,8 @@ class QueryStrings(Enum): WHERE 1 = 1 AND vt.date = \'{{date}}\' AND vt.partition_name = \'{{partition}}\' + AND vt.time_step >= {start_filter} + AND vt.{loc_filter} ; """ From 946938a9704e99a0cfc1a2958ca6a1043c2dfbdd Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 15 Jun 2020 22:11:50 -0700 Subject: [PATCH 265/335] invert safety_rate definition --- flow/data_pipeline/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index bd8cc8bc9..bc406ae17 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -226,7 +226,7 @@ class QueryStrings(Enum): FACT_SAFETY_METRICS_AGG = """ SELECT source_id, - SUM(CASE WHEN safety_value > 0 THEN 1 ELSE 0) * 100 / COUNT() safety_rate, + SUM(CASE WHEN safety_value < 0 THEN 1 ELSE 0) * 100 / COUNT() safety_rate, MAX(safety_value) AS safety_value_max FROM fact_safety_metrics WHERE 1 = 1 From 10bf24fde3b34d0f612ddbd26d3e7e9306eb722b Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 17 Jun 2020 15:35:33 -0700 Subject: [PATCH 266/335] Flag reconfig --- .../imitation_learning/imitation_trainer.py | 2 +- flow/controllers/imitation_learning/run.py | 4 +-- .../train_with_imitation.py | 27 +++++++++---------- .../controllers/imitation_learning/trainer.py | 4 +-- 4 files changed, 18 insertions(+), 19 deletions(-) diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/imitation_trainer.py index a6f75ea45..18c41a795 100644 --- a/flow/controllers/imitation_learning/imitation_trainer.py +++ b/flow/controllers/imitation_learning/imitation_trainer.py @@ -21,7 +21,7 @@ def _setup(self, config): # agent_cls = get_agent_class(config['env_config']['run']) self.trainer = ppo.PPOTrainer(env=env_name, config=config) policy_id = list(self.trainer.get_weights().keys())[0] - self.trainer.import_model(config['model']['custom_options']['h5_load_path'], policy_id) + self.trainer.import_model(config['model']['custom_options']['h5_load_path'], policy_id=policy_id) def _train(self): """ diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 25cb0f230..eee7d7b3f 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -48,7 +48,7 @@ def save_controller_network(self): def save_controller_for_PPO(self): """ - Creates and saves (in h5 file format) new tensorflow keras model to run PPO with weighs loaded from imitation learning + Creates and saves (in h5 file format) new tensorflow keras model to run PPO with weighs loaded from imitation learning. This model encapsulates both a policy network and a value function network. """ self.trainer.save_controller_for_PPO() @@ -83,7 +83,7 @@ def main(): parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net') parser.add_argument('--tensorboard_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') - parser.add_argument('--save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') + parser.add_argument('--imitation_save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') parser.add_argument('--PPO_save_path', type=str, default='', help='Filepath to h5 file in which PPO model with copied weights should be saved') parser.add_argument('--save_model', type=int, default=0, help='If true, save models in h5 format') parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') diff --git a/flow/controllers/imitation_learning/train_with_imitation.py b/flow/controllers/imitation_learning/train_with_imitation.py index 78053fe2e..cd80131cb 100644 --- a/flow/controllers/imitation_learning/train_with_imitation.py +++ b/flow/controllers/imitation_learning/train_with_imitation.py @@ -12,7 +12,7 @@ def parse_args(args): dictionary version of the argparse """ - # train.py args + # **** TRAIN.PY ARGS **** parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, @@ -35,10 +35,6 @@ def parse_args(args): parser.add_argument( '--rl_trainer', type=str, default="rllib", help='the RL trainer to use. either rllib or Stable-Baselines') - parser.add_argument( - '--load_weights_path', type=str, default=None, - help='Path to h5 file containing a pretrained model. Relevent for PPO with RLLib' - ) parser.add_argument( '--algorithm', type=str, default="PPO", help='RL algorithm to use. Options are PPO, TD3, MATD3 (MADDPG w/ TD3) right now.' @@ -73,26 +69,31 @@ def parse_args(args): '--checkpoint_path', type=str, default=None, help='Directory with checkpoint to restore training from.') + + parser.add_argument( + '--load_weights_path', type=str, default=None, + help='Path to h5 file containing a pretrained model. Relevent for PPO with RLLib' + ) # Imitation Learning args parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts.') - parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help='Number of gradient steps for training policy.') # number of gradient steps for training policy + parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help='Number of gradient steps for training imitation policy.') parser.add_argument('--n_iter', type=int, default=3, help='Number of DAgger iterations to run (1st iteration is behavioral cloning') parser.add_argument('--batch_size', type=int, default=1000, help='Number of environment steps to collect in iteration of DAgger') parser.add_argument('--init_batch_size', type=int, default=2000, help='Number of environment steps to collect on 1st iteration of DAgger (behavioral cloning iteration)') parser.add_argument('--vf_batch_size', type=int, default=2000, help='Number of environment steps to collect to learn value function for a policy') - parser.add_argument('--num_vf_iters', type=int, default=100, help='Number of iterations to run vf training') # TODO: better help description for this + parser.add_argument('--num_vf_iters', type=int, default=100, help='Number of iterations to run value function learning, after imitation') - parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size to train on') + parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size to run SGD on during imitation learning.') parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existing imitation neural net') - parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net') + parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net. load_imitation_model must be True') parser.add_argument('--tensorboard_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') - parser.add_argument('--save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') - parser.add_argument('--save_model', type=int, default=0, help='If true, save models in h5 format') + parser.add_argument('--imitation_save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') + parser.add_argument('--PPO_save_path', type=str, default='', help="Filepath to h5 file in which the ppo model should be saved") parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') parser.add_argument('--stochastic', type=bool, default=False, help='If true, learn a stochastic policy (MV Gaussian)') parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent.') @@ -103,8 +104,6 @@ def parse_args(args): parsed_args = parser.parse_known_args(args)[0] dict_args = vars(parsed_args) - dict_args['save_model'] = 1 - dict_args['save_path'] = dict_args['load_weights_path'] return parsed_args, dict_args @@ -116,7 +115,7 @@ def main(args): flags, params = parse_args(args) params["fcnet_hiddens"] = [32, 32, 32] - params['PPO_save_path'] = params['load_weights_path'] + params['load_weights_path'] = params["PPO_save_path"] print("\n\n********** IMITATION LEARNING ************ \n") diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index c1ff5f981..daba2f9f4 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -323,5 +323,5 @@ def save_controller_network(self): """ Saves a keras tensorflow model to the specified path given in the command line params. Path must end with .h5. """ - print("Saving tensorflow model to: ", self.params['save_path']) - self.action_network.save_network(self.params['save_path']) + print("Saving tensorflow model to: ", self.params['imitation_save_path']) + self.action_network.save_network(self.params['imitation_save_path']) From aa72b2e7361e121f59a45d97c36f555a6804ca51 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 17 Jun 2020 15:41:46 -0700 Subject: [PATCH 267/335] flag cleanup --- .../train_with_imitation.py | 28 ++++++++++--------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/flow/controllers/imitation_learning/train_with_imitation.py b/flow/controllers/imitation_learning/train_with_imitation.py index cd80131cb..db6500d43 100644 --- a/flow/controllers/imitation_learning/train_with_imitation.py +++ b/flow/controllers/imitation_learning/train_with_imitation.py @@ -74,33 +74,35 @@ def parse_args(args): '--load_weights_path', type=str, default=None, help='Path to h5 file containing a pretrained model. Relevent for PPO with RLLib' ) - # Imitation Learning args - parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts.') + # *** IMITATION LEARNING ARGS *** + # rollout collection params: + parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts.') parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help='Number of gradient steps for training imitation policy.') parser.add_argument('--n_iter', type=int, default=3, help='Number of DAgger iterations to run (1st iteration is behavioral cloning') + parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent.') + parser.add_argument('--v_des', type=float, default=15, help='Desired velocity for follower-stopper') + parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') + # imitation training params: parser.add_argument('--batch_size', type=int, default=1000, help='Number of environment steps to collect in iteration of DAgger') parser.add_argument('--init_batch_size', type=int, default=2000, help='Number of environment steps to collect on 1st iteration of DAgger (behavioral cloning iteration)') parser.add_argument('--vf_batch_size', type=int, default=2000, help='Number of environment steps to collect to learn value function for a policy') parser.add_argument('--num_vf_iters', type=int, default=100, help='Number of iterations to run value function learning, after imitation') - parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size to run SGD on during imitation learning.') + parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning negative log-likelihood loss, for stochastic policies.') + parser.add_argument('--stochastic', type=bool, default=True, help='If true, learn a stochastic policy (MV Gaussian). Must be true to continue with PPO training.') + parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') - parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existing imitation neural net') + # loading and saving params: + parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existing imitation neural network.') parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net. load_imitation_model must be True') - parser.add_argument('--tensorboard_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') - parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') parser.add_argument('--imitation_save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') - parser.add_argument('--PPO_save_path', type=str, default='', help="Filepath to h5 file in which the ppo model should be saved") - parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') - parser.add_argument('--stochastic', type=bool, default=False, help='If true, learn a stochastic policy (MV Gaussian)') - parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent.') - parser.add_argument('--v_des', type=float, default=15, help='Desired velocity for follower-stopper') - parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning loss, for stochastic policies.') - + parser.add_argument('--PPO_save_path', type=str, default='', help="Filepath to h5 file in which the ppo model should be saved. Before starting PPO training, weights (for both policy and value function) will be loaded from this model") + # misc + parser.add_argument('--tensorboard_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') parsed_args = parser.parse_known_args(args)[0] dict_args = vars(parsed_args) From e209de24ba21e1ab5645f874999941f0382a1ee7 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 17 Jun 2020 15:47:18 -0700 Subject: [PATCH 268/335] Reorganize method arguments --- flow/controllers/imitation_learning/run.py | 4 ++-- .../imitation_learning/train_with_imitation.py | 14 ++++++++------ flow/controllers/imitation_learning/trainer.py | 10 +++++----- 3 files changed, 15 insertions(+), 13 deletions(-) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index eee7d7b3f..c88801825 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -44,13 +44,13 @@ def save_controller_network(self): """ Saves the tensorflow keras model of the imitation policy to a h5 file, whose path is specified in params """ - self.trainer.save_controller_network() + self.trainer.save_controller_network(imitation_save_path=self.params['imitation_save_path']) def save_controller_for_PPO(self): """ Creates and saves (in h5 file format) new tensorflow keras model to run PPO with weighs loaded from imitation learning. This model encapsulates both a policy network and a value function network. """ - self.trainer.save_controller_for_PPO() + self.trainer.save_controller_for_PPO(PPO_save_path=self.params['PPO_save_path']) def main(): diff --git a/flow/controllers/imitation_learning/train_with_imitation.py b/flow/controllers/imitation_learning/train_with_imitation.py index db6500d43..057c62835 100644 --- a/flow/controllers/imitation_learning/train_with_imitation.py +++ b/flow/controllers/imitation_learning/train_with_imitation.py @@ -70,11 +70,6 @@ def parse_args(args): help='Directory with checkpoint to restore training from.') - parser.add_argument( - '--load_weights_path', type=str, default=None, - help='Path to h5 file containing a pretrained model. Relevent for PPO with RLLib' - ) - # *** IMITATION LEARNING ARGS *** # rollout collection params: @@ -116,7 +111,11 @@ def main(args): # Parse args, train imitation learning flags, params = parse_args(args) + + # depth and size of MLP layers params["fcnet_hiddens"] = [32, 32, 32] + + # load_weights_path for PPO must be set to same path as PPO_save_path (a result from imitation) params['load_weights_path'] = params["PPO_save_path"] @@ -125,7 +124,10 @@ def main(args): imitation_runner = Runner(params) imitation_runner.run_training_loop() - # convert model to work for PPO and save for training + # save imitation network + imitation_runner.save_controller_network() + + # save PPO network (contains policy and value function) imitation_runner.save_controller_for_PPO() # Imitation Done, start RL diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index daba2f9f4..56845031e 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -269,7 +269,7 @@ def learn_value_function(self, num_samples, num_iterations, num_grad_steps): - def save_controller_for_PPO(self): + def save_controller_for_PPO(self, PPO_save_path): """ Build a model, with same policy architecture as imitation network, to run PPO, copy weights from imitation, and save this model. @@ -316,12 +316,12 @@ def save_controller_for_PPO(self): # save the model (as a h5 file) - ppo_model.save(self.params['PPO_save_path']) + ppo_model.save(PPO_save_path) - def save_controller_network(self): + def save_controller_network(self, imitation_save_path): """ Saves a keras tensorflow model to the specified path given in the command line params. Path must end with .h5. """ - print("Saving tensorflow model to: ", self.params['imitation_save_path']) - self.action_network.save_network(self.params['imitation_save_path']) + print("Saving tensorflow model to: ", imitation_save_path) + self.action_network.save_network(imitation_save_path) From 5ce3c4de5c550ab5a0b460e3fe27323cdeb8fd0b Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 17 Jun 2020 15:56:00 -0700 Subject: [PATCH 269/335] Argument reorganizing --- flow/controllers/imitation_learning/run.py | 9 +++--- .../controllers/imitation_learning/trainer.py | 31 ++++++++++--------- 2 files changed, 20 insertions(+), 20 deletions(-) diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index c88801825..e78c2ce0b 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -2,7 +2,6 @@ import time import numpy as np from flow.controllers.imitation_learning.trainer import Trainer -from flow.controllers.car_following_models import IDMController class Runner(object): @@ -32,25 +31,25 @@ def run_training_loop(self): """ Runs training for imitation learning for number of iterations specified in params. """ - self.trainer.run_training_loop(n_iter=self.params['n_iter']) + self.trainer.run_training_loop() def evaluate(self): """ Evaluates a trained controller over a specified number trajectories; compares average action per step and average reward per trajectory between imitator and expert """ - self.trainer.evaluate_controller(num_trajs=self.params['num_eval_episodes']) + self.trainer.evaluate_controller() def save_controller_network(self): """ Saves the tensorflow keras model of the imitation policy to a h5 file, whose path is specified in params """ - self.trainer.save_controller_network(imitation_save_path=self.params['imitation_save_path']) + self.trainer.save_controller_network() def save_controller_for_PPO(self): """ Creates and saves (in h5 file format) new tensorflow keras model to run PPO with weighs loaded from imitation learning. This model encapsulates both a policy network and a value function network. """ - self.trainer.save_controller_for_PPO(PPO_save_path=self.params['PPO_save_path']) + self.trainer.save_controller_for_PPO() def main(): diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 56845031e..786444cd2 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -76,18 +76,16 @@ def __init__(self, params, submodule): self.controllers[vehicle_id] = (imitator, expert) - def run_training_loop(self, n_iter): + def run_training_loop(self): """ - Trains imitator for n_iter iterations (each iteration collects new trajectories to put in replay buffer) - - Parameters - __________ - n_iter : - intnumber of iterations to execute training + Trains imitator for self.params['n_iter'] iterations (each iteration collects new trajectories to put in replay buffer) """ + # number of imitation learning iterations (1st iteration is behavioral cloning + n_iter = self.params['n_iter'] # init vars at beginning of training # number of environment steps taken throughout training + self.total_envsteps = 0 for itr in range(n_iter): @@ -146,19 +144,17 @@ def train_controller(self): # train network on sampled data self.action_network.train(ob_batch, expert_ac_batch) - def evaluate_controller(self, num_trajs = 10): + def evaluate_controller(self): """ Evaluates a trained imitation controller on similarity with expert with respect to action taken and total reward per rollout. - - Parameters - __________ - num_trajs: int - number of trajectories to evaluate performance on """ print("\n\n********** Evaluation ************ \n") + # number of trajectories to evaluate performance on + num_trajs = self.params['num_eval_episodes'] + # collect imitator driven trajectories (along with corresponding expert actions) trajectories = sample_n_trajectories(self.env, self.controllers, self.action_network, num_trajs, self.params['ep_len'], self.multiagent, False, v_des=self.params['v_des']) @@ -269,12 +265,15 @@ def learn_value_function(self, num_samples, num_iterations, num_grad_steps): - def save_controller_for_PPO(self, PPO_save_path): + def save_controller_for_PPO(self): """ Build a model, with same policy architecture as imitation network, to run PPO, copy weights from imitation, and save this model. """ + # filepath to h5 file in which keras model will be saved + PPO_save_path = self.params['PPO_save_path'] + vf_net = self.learn_value_function(self.params['vf_batch_size'], self.params['num_vf_iters'], self.params['num_agent_train_steps_per_iter']) input = tf.keras.layers.Input(self.action_network.model.input.shape[1].value) @@ -319,9 +318,11 @@ def save_controller_for_PPO(self, PPO_save_path): ppo_model.save(PPO_save_path) - def save_controller_network(self, imitation_save_path): + def save_controller_network(self): """ Saves a keras tensorflow model to the specified path given in the command line params. Path must end with .h5. """ + + imitation_save_path = self.params['imitation_save_path'] print("Saving tensorflow model to: ", imitation_save_path) self.action_network.save_network(imitation_save_path) From 1aae8f89e6b25fd765be6e10ea66a448d0bd9ca3 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Wed, 17 Jun 2020 18:44:53 -0700 Subject: [PATCH 270/335] Cleanup and rearrange args --- .../imitation_learning/imitation_trainer.py | 3 +-- flow/controllers/imitation_learning/run.py | 25 +++++++++++-------- 2 files changed, 15 insertions(+), 13 deletions(-) diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/imitation_trainer.py index 18c41a795..5a30035d3 100644 --- a/flow/controllers/imitation_learning/imitation_trainer.py +++ b/flow/controllers/imitation_learning/imitation_trainer.py @@ -27,8 +27,7 @@ def _train(self): """ Executes one training iteration on trainer. See superclass definition. """ - print("TRAIN CALLED") - # return self.trainer.train() + return self.trainer.train() def _save(self, tmp_checkpoint_dir): diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index e78c2ce0b..924e1a400 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -66,30 +66,33 @@ def main(): help='Name of the experiment configuration file, as located in ' 'exp_configs/rl/singleagent or exp_configs/rl/multiagent.') - parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts.') + # rollout collection params + parser.add_argument('--ep_len', type=int, default=5000, help='Max length of episodes for rollouts.') parser.add_argument('--num_agent_train_steps_per_iter', type=int, default=1000, help='Number of gradient steps for training policy.') # number of gradient steps for training policy parser.add_argument('--n_iter', type=int, default=3, help='Number of DAgger iterations to run (1st iteration is behavioral cloning') + parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent.') + parser.add_argument('--v_des', type=float, default=15, help='Desired velocity for follower-stopper') + parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') + # imitation training params parser.add_argument('--batch_size', type=int, default=1000, help='Number of environment steps to collect in iteration of DAgger') parser.add_argument('--init_batch_size', type=int, default=2000, help='Number of environment steps to collect on 1st iteration of DAgger (behavioral cloning iteration)') parser.add_argument('--vf_batch_size', type=int, default=2000, help='Number of environment steps to collect to learn value function for a policy') parser.add_argument('--num_vf_iters', type=int, default=100, help='Number of iterations to run vf training') # TODO: better help description for this - - parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size to train on') + parser.add_argument('--train_batch_size', type=int, default=100, help='Batch size for SGD') + parser.add_argument('--stochastic', type=bool, default=False, help='If true, learn a stochastic policy (MV Gaussian)') + parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning loss, for stochastic policies.') + parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existin imitation neural net') parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net') - parser.add_argument('--tensorboard_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') - parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') + parser.add_argument('--save_model', type=int, default=0, help='If true, save both imitation model and PPO model in h5 format') parser.add_argument('--imitation_save_path', type=str, default='', help='Filepath to h5 file in which imitation model should be saved') parser.add_argument('--PPO_save_path', type=str, default='', help='Filepath to h5 file in which PPO model with copied weights should be saved') - parser.add_argument('--save_model', type=int, default=0, help='If true, save models in h5 format') - parser.add_argument('--num_eval_episodes', type=int, default=0, help='Number of episodes on which to evaluate imitation model') - parser.add_argument('--stochastic', type=bool, default=False, help='If true, learn a stochastic policy (MV Gaussian)') - parser.add_argument('--multiagent', type=bool, default=False, help='If true, env is multiagent.') - parser.add_argument('--v_des', type=float, default=15, help='Desired velocity for follower-stopper') - parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning loss, for stochastic policies.') + + # misc params + parser.add_argument('--tensorboard_path', type=str, default='/tensorboard/', help='Path to which tensorboard events should be written.') args = parser.parse_args() From f7451d0212b1084f9dade476dc1210a1d587fd60 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 18 Jun 2020 10:52:08 -0700 Subject: [PATCH 271/335] Custom PPO to log value function predictions: --- examples/train.py | 13 +- .../imitation_learning/custom_ppo.py | 321 ++++++++++++++++++ .../imitation_learning/imitation_trainer.py | 4 +- 3 files changed, 328 insertions(+), 10 deletions(-) create mode 100644 flow/controllers/imitation_learning/custom_ppo.py diff --git a/examples/train.py b/examples/train.py index 6d7b13879..019a96de6 100644 --- a/examples/train.py +++ b/examples/train.py @@ -187,17 +187,14 @@ def setup_exps_rllib(flow_params, if alg_run == "PPO": from ray import tune from ray.tune.registry import register_env - try: - from ray.rllib.agents.agent import get_agent_class - except ImportError: - from ray.rllib.agents.registry import get_agent_class + from custom_ppo import CustomPPOTrainer + from ray.rllib.agents.ppo import DEFAULT_CONFIG + config = deepcopy(DEFAULT_CONFIG) - horizon = flow_params['env'].horizon - alg_run = "PPO" + alg_run = CustomPPOTrainer - agent_cls = get_agent_class(alg_run) - config = deepcopy(agent_cls._default_config) + horizon = flow_params['env'].horizon config["num_workers"] = n_cpus config["horizon"] = horizon diff --git a/flow/controllers/imitation_learning/custom_ppo.py b/flow/controllers/imitation_learning/custom_ppo.py new file mode 100644 index 000000000..cf0def369 --- /dev/null +++ b/flow/controllers/imitation_learning/custom_ppo.py @@ -0,0 +1,321 @@ +from __future__ import absolute_import +from __future__ import division +from __future__ import print_function + +import logging + +import ray +from ray.rllib.evaluation.postprocessing import compute_advantages, \ + Postprocessing +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.policy.tf_policy import LearningRateSchedule, \ + EntropyCoeffSchedule, ACTION_LOGP +from ray.rllib.policy.tf_policy_template import build_tf_policy +from ray.rllib.utils.explained_variance import explained_variance +from ray.rllib.utils.tf_ops import make_tf_callable +from ray.rllib.utils import try_import_tf + +from ray.rllib.agents.trainer_template import build_trainer +from ray.rllib.agents.ppo.ppo import choose_policy_optimizer, DEFAULT_CONFIG +from ray.rllib.agents.ppo.ppo import warn_about_bad_reward_scales +from ray.rllib.agents.impala.vtrace_policy import BEHAVIOUR_LOGITS + +tf = try_import_tf() + +logger = logging.getLogger(__name__) + + +class PPOLoss: + def __init__(self, + dist_class, + model, + value_targets, + advantages, + actions, + prev_logits, + prev_actions_logp, + vf_preds, + curr_action_dist, + value_fn, + cur_kl_coeff, + valid_mask, + entropy_coeff=0, + clip_param=0.1, + vf_clip_param=0.1, + vf_loss_coeff=1.0, + use_gae=True): + """Constructs the loss for Proximal Policy Objective. + + Arguments: + dist_class: action distribution class for logits. + value_targets (Placeholder): Placeholder for target values; used + for GAE. + actions (Placeholder): Placeholder for actions taken + from previous model evaluation. + advantages (Placeholder): Placeholder for calculated advantages + from previous model evaluation. + prev_logits (Placeholder): Placeholder for logits output from + previous model evaluation. + prev_actions_logp (Placeholder): Placeholder for action prob output + from the previous (before update) Model evaluation. + vf_preds (Placeholder): Placeholder for value function output + from the previous (before update) Model evaluation. + curr_action_dist (ActionDistribution): ActionDistribution + of the current model. + value_fn (Tensor): Current value function output Tensor. + cur_kl_coeff (Variable): Variable holding the current PPO KL + coefficient. + valid_mask (Optional[tf.Tensor]): An optional bool mask of valid + input elements (for max-len padded sequences (RNNs)). + entropy_coeff (float): Coefficient of the entropy regularizer. + clip_param (float): Clip parameter + vf_clip_param (float): Clip parameter for the value function + vf_loss_coeff (float): Coefficient of the value function loss + use_gae (bool): If true, use the Generalized Advantage Estimator. + """ + if valid_mask is not None: + + def reduce_mean_valid(t): + return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) + + else: + + def reduce_mean_valid(t): + return tf.reduce_mean(t) + + prev_dist = dist_class(prev_logits, model) + # Make loss functions. + logp_ratio = tf.exp(curr_action_dist.logp(actions) - prev_actions_logp) + action_kl = prev_dist.kl(curr_action_dist) + self.mean_kl = reduce_mean_valid(action_kl) + + curr_entropy = curr_action_dist.entropy() + self.mean_entropy = reduce_mean_valid(curr_entropy) + + surrogate_loss = tf.minimum( + advantages * logp_ratio, + advantages * tf.clip_by_value(logp_ratio, 1 - clip_param, + 1 + clip_param)) + self.mean_policy_loss = reduce_mean_valid(-surrogate_loss) + + if use_gae: + vf_loss1 = tf.square(value_fn - value_targets) + vf_clipped = vf_preds + tf.clip_by_value( + value_fn - vf_preds, -vf_clip_param, vf_clip_param) + vf_loss2 = tf.square(vf_clipped - value_targets) + vf_loss = tf.maximum(vf_loss1, vf_loss2) + self.mean_vf_loss = reduce_mean_valid(vf_loss) + loss = reduce_mean_valid( + -surrogate_loss + cur_kl_coeff * action_kl + + vf_loss_coeff * vf_loss - entropy_coeff * curr_entropy) + else: + self.mean_vf_loss = tf.constant(0.0) + loss = reduce_mean_valid(-surrogate_loss + + cur_kl_coeff * action_kl - + entropy_coeff * curr_entropy) + self.loss = loss + + +def ppo_surrogate_loss(policy, model, dist_class, train_batch): + logits, state = model.from_batch(train_batch) + action_dist = dist_class(logits, model) + + mask = None + if state: + max_seq_len = tf.reduce_max(train_batch["seq_lens"]) + mask = tf.sequence_mask(train_batch["seq_lens"], max_seq_len) + mask = tf.reshape(mask, [-1]) + + policy.loss_obj = PPOLoss( + dist_class, + model, + train_batch[Postprocessing.VALUE_TARGETS], + train_batch[Postprocessing.ADVANTAGES], + train_batch[SampleBatch.ACTIONS], + train_batch[BEHAVIOUR_LOGITS], + train_batch[ACTION_LOGP], + train_batch[SampleBatch.VF_PREDS], + action_dist, + model.value_function(), + policy.kl_coeff, + mask, + entropy_coeff=policy.entropy_coeff, + clip_param=policy.config["clip_param"], + vf_clip_param=policy.config["vf_clip_param"], + vf_loss_coeff=policy.config["vf_loss_coeff"], + use_gae=policy.config["use_gae"], + ) + + return policy.loss_obj.loss + + +def kl_and_loss_stats(policy, train_batch): + return { + "cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64), + "cur_lr": tf.cast(policy.cur_lr, tf.float64), + "total_loss": policy.loss_obj.loss, + "policy_loss": policy.loss_obj.mean_policy_loss, + "vf_loss": policy.loss_obj.mean_vf_loss, + "vf_explained_var": explained_variance( + train_batch[Postprocessing.VALUE_TARGETS], + policy.model.value_function()), + "vf_preds": policy.model.value_function(), + "kl": policy.loss_obj.mean_kl, + "entropy": policy.loss_obj.mean_entropy, + "entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64), + } + + +def vf_preds_and_logits_fetches(policy): + """Adds value function and logits outputs to experience train_batches.""" + return { + SampleBatch.VF_PREDS: policy.model.value_function(), + BEHAVIOUR_LOGITS: policy.model.last_output(), + } + + + +def postprocess_ppo_gae(policy, + sample_batch, + other_agent_batches=None, + episode=None): + """Adds the policy logits, VF preds, and advantages to the trajectory.""" + + completed = sample_batch["dones"][-1] + if completed: + last_r = 0.0 + else: + next_state = [] + for i in range(policy.num_state_tensors()): + next_state.append([sample_batch["state_out_{}".format(i)][-1]]) + last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1], + sample_batch[SampleBatch.ACTIONS][-1], + sample_batch[SampleBatch.REWARDS][-1], + *next_state) + batch = compute_advantages( + sample_batch, + last_r, + policy.config["gamma"], + policy.config["lambda"], + use_gae=policy.config["use_gae"]) + return batch + + +def clip_gradients(policy, optimizer, loss): + variables = policy.model.trainable_variables() + if policy.config["grad_clip"] is not None: + grads_and_vars = optimizer.compute_gradients(loss, variables) + grads = [g for (g, v) in grads_and_vars] + policy.grads, _ = tf.clip_by_global_norm(grads, + policy.config["grad_clip"]) + clipped_grads = list(zip(policy.grads, variables)) + return clipped_grads + else: + return optimizer.compute_gradients(loss, variables) + + +class KLCoeffMixin: + def __init__(self, config): + # KL Coefficient + self.kl_coeff_val = config["kl_coeff"] + self.kl_target = config["kl_target"] + self.kl_coeff = tf.get_variable( + initializer=tf.constant_initializer(self.kl_coeff_val), + name="kl_coeff", + shape=(), + trainable=False, + dtype=tf.float32) + + def update_kl(self, sampled_kl): + if sampled_kl > 2.0 * self.kl_target: + self.kl_coeff_val *= 1.5 + elif sampled_kl < 0.5 * self.kl_target: + self.kl_coeff_val *= 0.5 + self.kl_coeff.load(self.kl_coeff_val, session=self.get_session()) + return self.kl_coeff_val + + +class ValueNetworkMixin: + def __init__(self, obs_space, action_space, config): + if config["use_gae"]: + + @make_tf_callable(self.get_session()) + def value(ob, prev_action, prev_reward, *state): + model_out, _ = self.model({ + SampleBatch.CUR_OBS: tf.convert_to_tensor([ob]), + SampleBatch.PREV_ACTIONS: tf.convert_to_tensor( + [prev_action]), + SampleBatch.PREV_REWARDS: tf.convert_to_tensor( + [prev_reward]), + "is_training": tf.convert_to_tensor(False), + }, [tf.convert_to_tensor([s]) for s in state], + tf.convert_to_tensor([1])) + return self.model.value_function()[0] + + else: + + @make_tf_callable(self.get_session()) + def value(ob, prev_action, prev_reward, *state): + return tf.constant(0.0) + + self._value = value + + +def setup_config(policy, obs_space, action_space, config): + # auto set the model option for layer sharing + config["model"]["vf_share_layers"] = config["vf_share_layers"] + + +def setup_mixins(policy, obs_space, action_space, config): + ValueNetworkMixin.__init__(policy, obs_space, action_space, config) + KLCoeffMixin.__init__(policy, config) + EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"], + config["entropy_coeff_schedule"]) + LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"]) + + +CustomPPOTFPolicy = build_tf_policy( + name="PPOTFPolicy", + get_default_config=lambda: ray.rllib.agents.ppo.ppo.DEFAULT_CONFIG, + loss_fn=ppo_surrogate_loss, + stats_fn=kl_and_loss_stats, + extra_action_fetches_fn=vf_preds_and_logits_fetches, + postprocess_fn=postprocess_ppo_gae, + gradients_fn=clip_gradients, + before_init=setup_config, + before_loss_init=setup_mixins, + mixins=[ + LearningRateSchedule, EntropyCoeffSchedule, KLCoeffMixin, + ValueNetworkMixin + ]) + + +def validate_config(config): + """Check that the config is set up properly.""" + if config["entropy_coeff"] < 0: + raise DeprecationWarning("entropy_coeff must be >= 0") + if isinstance(config["entropy_coeff"], int): + config["entropy_coeff"] = float(config["entropy_coeff"]) + if config["batch_mode"] == "truncate_episodes" and not config["use_gae"]: + raise ValueError( + "Episode truncation is not supported without a value " + "function. Consider setting batch_mode=complete_episodes.") + if config["multiagent"]["policies"] and not config["simple_optimizer"]: + logger.info( + "In multi-agent mode, policies will be optimized sequentially " + "by the multi-GPU optimizer. Consider setting " + "simple_optimizer=True if this doesn't work for you.") + if config["simple_optimizer"]: + logger.warning( + "Using the simple minibatch optimizer. This will significantly " + "reduce performance, consider simple_optimizer=False.") + elif tf and tf.executing_eagerly(): + config["simple_optimizer"] = True # multi-gpu not supported + +CustomPPOTrainer = build_trainer( + name="CustomPPOTrainer", + default_config=DEFAULT_CONFIG, + default_policy=CustomPPOTFPolicy, + make_policy_optimizer=choose_policy_optimizer, + validate_config=validate_config, + after_train_result=warn_about_bad_reward_scales) \ No newline at end of file diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/imitation_trainer.py index 5a30035d3..afa6680cc 100644 --- a/flow/controllers/imitation_learning/imitation_trainer.py +++ b/flow/controllers/imitation_learning/imitation_trainer.py @@ -5,7 +5,7 @@ from ray.rllib.agents.agent import get_agent_class except ImportError: from ray.rllib.agents.registry import get_agent_class - +import custom_ppo class Imitation_PPO_Trainable(tune.Trainable): """ @@ -19,7 +19,7 @@ def _setup(self, config): env_name = config['env'] # agent_cls = get_agent_class(config['env_config']['run']) - self.trainer = ppo.PPOTrainer(env=env_name, config=config) + self.trainer = custom_ppo.CustomPPOTrainer(env=env_name, config=config) policy_id = list(self.trainer.get_weights().keys())[0] self.trainer.import_model(config['model']['custom_options']['h5_load_path'], policy_id=policy_id) From 5ac982892a8600bd9b371b2e0f77e191836bdb89 Mon Sep 17 00:00:00 2001 From: Aboudy Kreidieh Date: Thu, 18 Jun 2020 11:21:37 -0700 Subject: [PATCH 272/335] cleanup to the multi-agent trainer (#971) - make the multi-agent trainer pretty - fix minor bug in Experiment.py where gen_emission had to be set or else it errored out --- .../rl/multiagent/multiagent_i210.py | 259 ++++++++---------- flow/core/experiment.py | 7 +- 2 files changed, 115 insertions(+), 151 deletions(-) diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 96fc78cd2..e5b5b5d81 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -18,51 +18,70 @@ from flow.core.params import InFlows from flow.core.params import VehicleParams from flow.core.params import SumoParams +from flow.core.params import SumoCarFollowingParams from flow.core.params import SumoLaneChangeParams from flow.core.rewards import energy_consumption from flow.envs.multiagent.i210 import I210MultiEnv, ADDITIONAL_ENV_PARAMS from flow.utils.registry import make_create_env +from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION -# SET UP PARAMETERS FOR THE SIMULATION -WANT_GHOST_CELL = True -# WANT_DOWNSTREAM_BOUNDARY = True +# =========================================================================== # +# Specify some configurable constants. # +# =========================================================================== # + +# whether to include the downstream slow-down edge in the network as well as a ghost cell at the upstream edge +WANT_BOUNDARY_CONDITIONS = True +# whether to include vehicles on the on-ramp ON_RAMP = False +# the inflow rate of vehicles (in veh/hr) +INFLOW_RATE = 2050 +# the speed of inflowing vehicles from the main edge (in m/s) +INFLOW_SPEED = 25.5 +# fraction of vehicles that are RL vehicles. 0.10 corresponds to 10% PENETRATION_RATE = 0.10 -V_DES = 7.0 -HORIZON = 1000 +# desired speed of the vehicles in the network +V_DES = 5.0 +# horizon over which to run the env +HORIZON = 1500 +# steps to run before follower-stopper is allowed to take control WARMUP_STEPS = 600 - -inflow_rate = 2050 -inflow_speed = 25.5 - -accel_data = (IDMController, {'a': 1.3, 'b': 2.0, 'noise': 0.3}) - -VEH_PER_HOUR_BASE_119257914 = 10800 -VEH_PER_HOUR_BASE_27414345 = 321 -VEH_PER_HOUR_BASE_27414342 = 421 - -if WANT_GHOST_CELL: - from flow.networks.i210_subnetwork_ghost_cell import I210SubNetworkGhostCell, EDGES_DISTRIBUTION - - edges_distribution = EDGES_DISTRIBUTION - highway_start_edge = 'ghost0' +# whether to turn off the fail safes for the human-driven vehicles +ALLOW_COLLISIONS = False + +# =========================================================================== # +# Specify the path to the network template. # +# =========================================================================== # + +if WANT_BOUNDARY_CONDITIONS: + NET_TEMPLATE = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_" + "downstream.xml") else: - from flow.networks.i210_subnetwork import I210SubNetwork, EDGES_DISTRIBUTION - edges_distribution = EDGES_DISTRIBUTION - highway_start_edge = "119257914" + NET_TEMPLATE = os.path.join( + config.PROJECT_PATH, + "examples/exp_configs/templates/sumo/test2.net.xml") +edges_distribution = EDGES_DISTRIBUTION.copy() + +# =========================================================================== # +# Set up parameters for the environment. # +# =========================================================================== # -# SET UP PARAMETERS FOR THE ENVIRONMENT additional_env_params = ADDITIONAL_ENV_PARAMS.copy() additional_env_params.update({ 'max_accel': 2.6, 'max_decel': 4.5, - # configure the observation space. Look at the I210MultiEnv class for more info. + + # configure the observation space. Look at the I210MultiEnv class for more + # info. 'lead_obs': True, # whether to add in a reward for the speed of nearby vehicles "local_reward": True, - # whether to use the MPG reward. Otherwise, defaults to a target velocity reward + # whether to use the MPG reward. Otherwise, defaults to a target velocity + # reward "mpg_reward": False, - # whether to use the MPJ reward. Otherwise, defaults to a target velocity reward + # whether to use the MPJ reward. Otherwise, defaults to a target velocity + # reward "mpj_reward": False, # how many vehicles to look back for the MPG reward "look_back_length": 1, @@ -74,7 +93,8 @@ # which edges we shouldn't apply control on "no_control_edges": ["ghost0", "119257908#3"], - # whether to add a slight reward for opening up a gap that will be annealed out N iterations in + # whether to add a slight reward for opening up a gap that will be annealed + # out N iterations in "headway_curriculum": False, # how many timesteps to anneal the headway curriculum over "headway_curriculum_iters": 100, @@ -96,144 +116,85 @@ "penalize_accel": True }) -# CREATE VEHICLE TYPES AND INFLOWS -# no vehicles in the network -vehicles = VehicleParams() - -inflow = InFlows() +# =========================================================================== # +# Specify vehicle-specific information and inflows. # +# =========================================================================== # +# create the base vehicle types that will be used for inflows +vehicles = VehicleParams() if ON_RAMP: vehicles.add( "human", num_vehicles=0, - color="white", + routing_controller=(I210Router, {}), + acceleration_controller=(IDMController, { + 'a': 1.3, + 'b': 2.0, + 'noise': 0.3 + }), + car_following_params=SumoCarFollowingParams( + speed_mode=19 if ALLOW_COLLISIONS else 'right_of_way' + ), lane_change_params=SumoLaneChangeParams( lane_change_mode="strategic", ), - acceleration_controller=accel_data, - routing_controller=(I210Router, {}) ) - if PENETRATION_RATE > 0.0: - vehicles.add( - "av", - num_vehicles=0, - color="red", - acceleration_controller=(RLController, {}), - routing_controller=(I210Router, {}) - ) - - # inflow.add( - # veh_type="human", - # edge=highway_start_edge, - # vehs_per_hour=inflow_rate, - # departLane="best", - # departSpeed=inflow_speed) - - lane_list = ['0', '1', '2', '3', '4'] - - for lane in lane_list: - inflow.add( - veh_type="human", - edge=highway_start_edge, - vehs_per_hour=int(inflow_rate * (1 - PENETRATION_RATE)), - departLane=lane, - departSpeed=inflow_speed) +else: + vehicles.add( + "human", + num_vehicles=0, + acceleration_controller=(IDMController, { + 'a': 1.3, + 'b': 2.0, + 'noise': 0.3 + }), + car_following_params=SumoCarFollowingParams( + speed_mode=19 if ALLOW_COLLISIONS else 'right_of_way' + ), + lane_change_params=SumoLaneChangeParams( + lane_change_mode="strategic", + ), + ) +vehicles.add( + "av", + num_vehicles=0, + acceleration_controller=(RLController, {}), +) +inflow = InFlows() +for lane in [0, 1, 2, 3, 4]: + # Add the inflows from the main highway. inflow.add( veh_type="human", - edge="27414345", - vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), - departLane="random", - departSpeed=10) + edge="119257914", + vehs_per_hour=int(INFLOW_RATE * (1 - PENETRATION_RATE)), + departLane=lane, + departSpeed=INFLOW_SPEED) inflow.add( - veh_type="human", - edge="27414342#0", - vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), - departLane="random", - departSpeed=10) - - if PENETRATION_RATE > 0.0: - for lane in lane_list: - inflow.add( - veh_type="av", - edge=highway_start_edge, - vehs_per_hour=int(inflow_rate * PENETRATION_RATE), - departLane=lane, - departSpeed=inflow_speed) - + veh_type="av", + edge="119257914", + vehs_per_hour=int(INFLOW_RATE * PENETRATION_RATE), + departLane=lane, + departSpeed=INFLOW_SPEED) + + # Add the inflows from the on-ramps. + if ON_RAMP: inflow.add( - veh_type="av", + veh_type="human", edge="27414345", - vehs_per_hour=int(500 * PENETRATION_RATE), + vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), departLane="random", departSpeed=10) inflow.add( - veh_type="av", + veh_type="human", edge="27414342#0", - vehs_per_hour=int(500 * PENETRATION_RATE), + vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), departLane="random", departSpeed=10) -else: - # create the base vehicle type that will be used for inflows - vehicles.add( - "human", - num_vehicles=0, - lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", - ), - acceleration_controller=accel_data, - ) - if PENETRATION_RATE > 0.0: - vehicles.add( - "av", - color="red", - num_vehicles=0, - acceleration_controller=(RLController, {}), - ) - - # If you want to turn off the fail safes uncomment this: - - # vehicles.add( - # 'human', - # num_vehicles=0, - # lane_change_params=SumoLaneChangeParams( - # lane_change_mode='strategic', - # ), - # acceleration_controller=accel_data, - # car_following_params=SumoCarFollowingParams(speed_mode='19') - # ) - - lane_list = ['0', '1', '2', '3', '4'] - - for lane in lane_list: - inflow.add( - veh_type="human", - edge=highway_start_edge, - vehs_per_hour=int(inflow_rate * (1 - PENETRATION_RATE)), - departLane=lane, - departSpeed=inflow_speed) - - if PENETRATION_RATE > 0.0: - for lane in lane_list: - inflow.add( - veh_type="av", - edge=highway_start_edge, - vehs_per_hour=int(inflow_rate * PENETRATION_RATE), - departLane=lane, - departSpeed=inflow_speed) - - -network_xml_file = "examples/exp_configs/templates/sumo/i210_with_ghost_cell_with_downstream.xml" - -# network_xml_file = "examples/exp_configs/templates/sumo/i210_with_congestion.xml" - -NET_TEMPLATE = os.path.join(config.PROJECT_PATH, network_xml_file) - -if WANT_GHOST_CELL: - network = I210SubNetworkGhostCell -else: - network = I210SubNetwork +# =========================================================================== # +# Generate the flow_params dict with all relevant simulation information. # +# =========================================================================== # flow_params = dict( # name of the experiment @@ -243,7 +204,7 @@ env_name=I210MultiEnv, # name of the network class the experiment is running on - network=network, + network=I210SubNetwork, # simulator that is used by the experiment simulator='traci', @@ -274,7 +235,7 @@ template=NET_TEMPLATE, additional_params={ "on_ramp": ON_RAMP, - "ghost_edge": WANT_GHOST_CELL + "ghost_edge": WANT_BOUNDARY_CONDITIONS } ), @@ -289,14 +250,16 @@ ), ) -# SET UP RLLIB MULTI-AGENT FEATURES +# =========================================================================== # +# Set up rllib multi-agent features. # +# =========================================================================== # create_env, env_name = make_create_env(params=flow_params, version=0) # register as rllib env register_env(env_name, create_env) -# multiagent configuration +# multi-agent configuration test_env = create_env() obs_space = test_env.observation_space act_space = test_env.action_space diff --git a/flow/core/experiment.py b/flow/core/experiment.py index df8992c20..7f5352c25 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -164,8 +164,9 @@ def rl_actions(*_): metadata['is_baseline'].append(str(is_baseline)) dir_path = self.env.sim_params.emission_path - trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) - metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) + if not dir_path is None: + trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) + metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) for i in range(num_runs): ret = 0 @@ -189,7 +190,7 @@ def rl_actions(*_): extra_info["run_id"].extend(['run_{}'.format(i)] * len(veh_ids)) # write to disk every 100 steps - if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0: + if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0 and not dir_path is None: write_dict_to_csv(trajectory_table_path, extra_info, not j) extra_info.clear() From 57469374e24d413762f531006ed797758a4e1e60 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Thu, 18 Jun 2020 15:34:17 -0700 Subject: [PATCH 273/335] Metadata Configuration (#957) --- .gitignore | 2 + flow/core/experiment.py | 25 +++--- flow/core/util.py | 67 +++++++-------- flow/data_pipeline/README.md | 12 +++ flow/data_pipeline/data_pipeline.py | 45 +++++++++- flow/data_pipeline/lambda_function.py | 4 +- flow/data_pipeline/leaderboard_utils.py | 7 +- flow/data_pipeline/query.py | 108 ++++++++++++++++++------ flow/envs/base.py | 10 +++ flow/visualize/i210_replay.py | 13 +-- flow/visualize/visualizer_rllib.py | 56 ++++++++++++ 11 files changed, 267 insertions(+), 82 deletions(-) create mode 100644 flow/data_pipeline/README.md diff --git a/.gitignore b/.gitignore index 29d788c27..6d9ff7a71 100644 --- a/.gitignore +++ b/.gitignore @@ -111,3 +111,5 @@ flow.ang *.ang.old *.sang +# local configuration file for data pipeline +**/data_pipeline_config diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 7f5352c25..ceb8c7b61 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,6 +1,6 @@ """Contains an experiment class for running simulations.""" from flow.utils.registry import make_create_env -from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info +from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info, get_configuration from flow.data_pipeline.leaderboard_utils import network_name_translate from collections import defaultdict from datetime import datetime, timezone @@ -158,12 +158,18 @@ def rl_actions(*_): cur_datetime = datetime.now(timezone.utc) cur_date = cur_datetime.date().isoformat() cur_time = cur_datetime.time().isoformat() + # collecting information for metadata table metadata['source_id'].append(source_id) metadata['submission_time'].append(cur_time) metadata['network'].append(network_name_translate(self.env.network.name.split('_20')[0])) metadata['is_baseline'].append(str(is_baseline)) + name, strategy = get_configuration() + metadata['submitter_name'].append(name) + metadata['strategy'].append(strategy) + + if convert_to_csv and self.env.simulator == "traci": + dir_path = self.env.sim_params.emission_path - dir_path = self.env.sim_params.emission_path if not dir_path is None: trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) @@ -172,6 +178,8 @@ def rl_actions(*_): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} + run_id = "run_{}".format(i) + self.env.pipeline_params = (extra_info, source_id, run_id) state = self.env.reset() for j in range(num_steps): t0 = time.time() @@ -185,9 +193,7 @@ def rl_actions(*_): ret += reward # collect additional information for the data pipeline - get_extra_info(self.env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend([source_id] * len(veh_ids)) - extra_info["run_id"].extend(['run_{}'.format(i)] * len(veh_ids)) + get_extra_info(self.env.k.vehicle, extra_info, veh_ids, source_id, run_id) # write to disk every 100 steps if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0 and not dir_path is None: @@ -230,8 +236,7 @@ def rl_actions(*_): emission_path = os.path.join(dir_path, emission_filename) # convert the emission file into a csv - # FIXME(@Brent): produce seg fault with large CSV - # emission_to_csv(emission_path) + emission_to_csv(emission_path) # Delete the .xml version of the emission file. os.remove(emission_path) @@ -241,12 +246,12 @@ def rl_actions(*_): if to_aws: upload_to_s3('circles.data.pipeline', - 'metadata_table/date={0}/partition_name={1}_METADATA/' - '{1}_METADATA.csv'.format(cur_date, source_id), + 'metadata_table/date={0}/partition_name={1}_METADATA/{1}_METADATA.csv'.format(cur_date, + source_id), metadata_table_path) upload_to_s3('circles.data.pipeline', 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format(cur_date, source_id), trajectory_table_path, - {'network': metadata['network'][0]}) + {'network': metadata['network'][0], 'is_baseline': metadata['is_baseline'][0]}) return info_dict diff --git a/flow/core/util.py b/flow/core/util.py index 1821a76a5..cd269e6af 100755 --- a/flow/core/util.py +++ b/flow/core/util.py @@ -47,42 +47,39 @@ def emission_to_csv(emission_path, output_path=None): path to the csv file that will be generated, default is the same directory as the emission file, with the same name """ - parser = etree.XMLParser(recover=True) - tree = ElementTree.parse(emission_path, parser=parser) - root = tree.getroot() - - # parse the xml data into a dict + context = etree.iterparse(emission_path, recover=True) out_data = [] - for time in root.findall('timestep'): - t = float(time.attrib['time']) - - for car in time: - out_data.append(dict()) - try: - out_data[-1]['time'] = t - out_data[-1]['CO'] = float(car.attrib['CO']) - out_data[-1]['y'] = float(car.attrib['y']) - out_data[-1]['CO2'] = float(car.attrib['CO2']) - out_data[-1]['electricity'] = float(car.attrib['electricity']) - out_data[-1]['type'] = car.attrib['type'] - out_data[-1]['id'] = car.attrib['id'] - out_data[-1]['eclass'] = car.attrib['eclass'] - out_data[-1]['waiting'] = float(car.attrib['waiting']) - out_data[-1]['NOx'] = float(car.attrib['NOx']) - out_data[-1]['fuel'] = float(car.attrib['fuel']) - out_data[-1]['HC'] = float(car.attrib['HC']) - out_data[-1]['x'] = float(car.attrib['x']) - out_data[-1]['route'] = car.attrib['route'] - out_data[-1]['relative_position'] = float(car.attrib['pos']) - out_data[-1]['noise'] = float(car.attrib['noise']) - out_data[-1]['angle'] = float(car.attrib['angle']) - out_data[-1]['PMx'] = float(car.attrib['PMx']) - out_data[-1]['speed'] = float(car.attrib['speed']) - out_data[-1]['edge_id'] = car.attrib['lane'].rpartition('_')[0] - out_data[-1]['lane_number'] = car.attrib['lane'].\ - rpartition('_')[-1] - except KeyError: - del out_data[-1] + for event, elem in context: + if elem.tag == "timestep": + t = float(elem.attrib['time']) + for car in elem: + out_data.append(dict()) + try: + out_data[-1]['time'] = t + out_data[-1]['CO'] = float(car.attrib['CO']) + out_data[-1]['y'] = float(car.attrib['y']) + out_data[-1]['CO2'] = float(car.attrib['CO2']) + out_data[-1]['electricity'] = float(car.attrib['electricity']) + out_data[-1]['type'] = car.attrib['type'] + out_data[-1]['id'] = car.attrib['id'] + out_data[-1]['eclass'] = car.attrib['eclass'] + out_data[-1]['waiting'] = float(car.attrib['waiting']) + out_data[-1]['NOx'] = float(car.attrib['NOx']) + out_data[-1]['fuel'] = float(car.attrib['fuel']) + out_data[-1]['HC'] = float(car.attrib['HC']) + out_data[-1]['x'] = float(car.attrib['x']) + out_data[-1]['route'] = car.attrib['route'] + out_data[-1]['relative_position'] = float(car.attrib['pos']) + out_data[-1]['noise'] = float(car.attrib['noise']) + out_data[-1]['angle'] = float(car.attrib['angle']) + out_data[-1]['PMx'] = float(car.attrib['PMx']) + out_data[-1]['speed'] = float(car.attrib['speed']) + out_data[-1]['edge_id'] = car.attrib['lane'].rpartition('_')[0] + out_data[-1]['lane_number'] = car.attrib['lane']. \ + rpartition('_')[-1] + except KeyError: + del out_data[-1] + elem.clear() # sort the elements of the dictionary by the vehicle id out_data = sorted(out_data, key=lambda k: k['id']) diff --git a/flow/data_pipeline/README.md b/flow/data_pipeline/README.md new file mode 100644 index 000000000..65aeb8d49 --- /dev/null +++ b/flow/data_pipeline/README.md @@ -0,0 +1,12 @@ +To run a simulation with output stored locally only: + + `python simulate.py EXP_CONFIG --gen_emission` + +To run a simulation and upload output to pipeline: + + `python simulate.py EXP_CONFIG --to_aws` + +To run a simulation, upload output to pipeline, and mark it as baseline: + + `python simulate.py EXP_CONFIG --to_aws --is_baseline` + diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 50c2c8422..74070cc7a 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -4,6 +4,8 @@ from flow.data_pipeline.query import QueryStrings from time import time from datetime import date +import csv +from io import StringIO def generate_trajectory_table(data_path, extra_info, partition_name): @@ -77,7 +79,7 @@ def upload_to_s3(bucket_name, bucket_key, file_path, metadata={}): return -def get_extra_info(veh_kernel, extra_info, veh_ids): +def get_extra_info(veh_kernel, extra_info, veh_ids, source_id, run_id): """Get all the necessary information for the trajectory output from flow.""" for vid in veh_ids: extra_info["time_step"].append(veh_kernel.get_timestep(vid) / 1000) @@ -103,6 +105,32 @@ def get_extra_info(veh_kernel, extra_info, veh_ids): extra_info["edge_id"].append(veh_kernel.get_edge(vid)) extra_info["lane_id"].append(veh_kernel.get_lane(vid)) extra_info["distance"].append(veh_kernel.get_distance(vid)) + extra_info["relative_position"].append(veh_kernel.get_position(vid)) + extra_info["source_id"].append(source_id) + extra_info["run_id"].append(run_id) + + +def get_configuration(): + """Get configuration for the metadata table.""" + try: + config_df = pd.read_csv('./data_pipeline_config') + except FileNotFoundError: + config_df = pd.DataFrame(data={"submitter_name": [""], "strategy": [""]}) + + if not config_df['submitter_name'][0]: + name = input("Please enter your name:").strip() + while not name: + name = input("Please enter a non-empty name:").strip() + config_df['submitter_name'] = [name] + + strategy = input( + "Please enter strategy name (current: \"{}\"):".format(config_df["strategy"][0])).strip() + if strategy: + config_df['strategy'] = [strategy] + + config_df.to_csv('./data_pipeline_config', index=False) + + return config_df['submitter_name'][0], config_df['strategy'][0] def delete_obsolete_data(s3, latest_key, table, bucket="circles.data.pipeline"): @@ -114,6 +142,21 @@ def delete_obsolete_data(s3, latest_key, table, bucket="circles.data.pipeline"): s3.delete_object(Bucket=bucket, Key=key) +def update_baseline(s3, baseline_network, baseline_source_id): + obj = s3.get_object(Bucket='circles.data.pipeline', Key='baseline_table/baselines.csv')['Body'] + original_str = obj.read().decode() + reader = csv.DictReader(StringIO(original_str)) + new_str = StringIO() + writer = csv.DictWriter(new_str, fieldnames=['network', 'source_id']) + writer.writeheader() + writer.writerow({'network': baseline_network, 'source_id': baseline_source_id}) + for row in reader: + if row['network'] != baseline_network: + writer.writerow(row) + s3.put_object(Bucket='circles.data.pipeline', Key='baseline_table/baselines.csv', + Body=new_str.getvalue().replace('\r', '').encode()) + + class AthenaQuery: """Class used to run queries. diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 00cf0fba5..f7a32d5db 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -1,7 +1,7 @@ """lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus -from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data +from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data, update_baseline from flow.data_pipeline.query import tags, tables, network_using_edge from flow.data_pipeline.query import X_FILTER, EDGE_FILTER, WARMUP_STEPS, HORIZON_STEPS @@ -48,6 +48,8 @@ def lambda_handler(event, context): if 'network' in response["Metadata"]: if response["Metadata"]['network'] in network_using_edge: loc_filter = EDGE_FILTER + if 'is_baseline' in response['Metadata'] and response['Metadata']['is_baseline'] == 'True': + update_baseline(s3, response["Metadata"]['network'], source_id) query_dict = tags[table] diff --git a/flow/data_pipeline/leaderboard_utils.py b/flow/data_pipeline/leaderboard_utils.py index afc2fd8bc..dd7055f8b 100644 --- a/flow/data_pipeline/leaderboard_utils.py +++ b/flow/data_pipeline/leaderboard_utils.py @@ -5,7 +5,8 @@ from io import StringIO -network_name_map = {"highway-single": "Single-Lane Straight Road", +network_name_map = {"highway": "Single-Lane Straight Road", + "highway_single": "Single-Lane Straight Road", "ring": "Single-Lane Ring Road", "I-210_subnetwork": "I-210 without Ramps", "I_210_subnetwork": "I-210 without Ramps"} @@ -64,10 +65,12 @@ def get_table_disk(table_name="fact_vehicle_trace", bucket="circles.data.pipelin keys = [e["Key"] for e in response["Contents"] if e["Key"].find(table_name) == 0 and e["Key"][-4:] == ".csv"] names = [key_to_name(k) for k in keys] existing_results = os.listdir("./result/{}".format(table_name)) + updated = False for index in range(len(keys)): if names[index] not in existing_results: + updated = True s3.download_file(bucket, keys[index], "./result/{}/{}".format(table_name, names[index])) - if table_name == "leaderboard_chart_agg": + if table_name == "leaderboard_chart_agg" and updated: for p in existing_results: os.remove("./result/{}/{}".format(table_name, p)) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 184c7217a..d538e7d62 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -65,26 +65,46 @@ X_FILTER = "x BETWEEN 500 AND 2300" -EDGE_FILTER = "edge_id <> ANY (VALUES 'ghost0', '119257908#3')" +EDGE_FILTER = "edge_id <> ALL (VALUES 'ghost0', '119257908#3')" WARMUP_STEPS = 600 * 3 * 0.4 HORIZON_STEPS = 1000 * 3 * 0.4 -VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT = """ +VEHICLE_POWER_DEMAND_TACOMA_FINAL_SELECT = """ SELECT id, time_step, speed, acceleration, road_grade, - GREATEST(0, 1200 * speed * (( + GREATEST(0, 2041 * speed * (( CASE WHEN acceleration > 0 THEN 1 WHEN acceleration < 0 THEN 0 ELSE 0.5 - END * (1 - {0}) + {0}) * acceleration + 9.81 * SIN(road_grade) - ) + 1200 * 9.81 * 0.005 * speed + 0.5 * 1.225 * 2.6 * 0.3 * POW(speed,3)) AS power, + END * (1 - {0}) + {0}) * acceleration + 9.807 * SIN(road_grade) + ) + 2041 * 9.807 * 0.0027 * speed + 0.5 * 1.225 * 3.2 * 0.4 * POW(speed,3)) AS power, + \'{1}\' AS energy_model_id, + source_id + FROM {2} + ORDER BY id, time_step + """ + +VEHICLE_POWER_DEMAND_PRIUS_FINAL_SELECT = """ + SELECT + id, + time_step, + speed, + acceleration, + road_grade, + GREATEST(-2.8 * speed, 1663 * speed * (( + CASE + WHEN acceleration > 0 THEN 1 + WHEN acceleration < 0 THEN 0 + ELSE 0.5 + END * (1 - {0}) + {0}) * acceleration + 9.807 * SIN(road_grade) + ) + 1663 * 9.807 * 0.007 * speed + 0.5 * 1.225 * 2.4 * 0.24 * POW(speed,3)) AS power, \'{1}\' AS energy_model_id, source_id FROM {2} @@ -122,9 +142,9 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, - 'POWER_DEMAND_MODEL', - 'regular_cte')) + {}""".format(VEHICLE_POWER_DEMAND_TACOMA_FINAL_SELECT.format(1, + 'POWER_DEMAND_MODEL', + 'regular_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL = """ WITH denoised_accel_cte AS ( @@ -142,9 +162,9 @@ class QueryStrings(Enum): AND date = \'{{date}}\' AND partition_name=\'{{partition}}\' ) - {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL', - 'denoised_accel_cte')) + {}""".format(VEHICLE_POWER_DEMAND_TACOMA_FINAL_SELECT.format(1, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL', + 'denoised_accel_cte')) POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL = """ WITH lagged_timestep AS ( @@ -175,9 +195,9 @@ class QueryStrings(Enum): source_id FROM lagged_timestep ) - {}""".format(VEHICLE_POWER_DEMAND_COMBUSTION_FINAL_SELECT.format(1, - 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', - 'denoised_speed_cte')) + {}""".format(VEHICLE_POWER_DEMAND_TACOMA_FINAL_SELECT.format(1, + 'POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL', + 'denoised_speed_cte')) FACT_NETWORK_THROUGHPUT_AGG = """ WITH min_time AS ( @@ -305,6 +325,7 @@ class QueryStrings(Enum): FROM min_max_time_step WHERE 1 = 1 AND min_time_step >= {start_filter} + AND min_time_step < {stop_filter} GROUP BY 1, 2 ), outflows AS ( SELECT @@ -313,11 +334,14 @@ class QueryStrings(Enum): 60 * COUNT(DISTINCT id) AS outflow_rate FROM min_max_time_step WHERE 1 = 1 + AND max_time_step >= {start_filter} AND max_time_step < {stop_filter} GROUP BY 1, 2 ) SELECT - COALESCE(i.time_step, o.time_step) AS time_step, + COALESCE(i.time_step, o.time_step) - MIN(COALESCE(i.time_step, o.time_step)) + OVER (PARTITION BY COALESCE(i.source_id, o.source_id) + ORDER BY COALESCE(i.time_step, o.time_step) ASC) AS time_step, COALESCE(i.source_id, o.source_id) AS source_id, COALESCE(i.inflow_rate, 0) AS inflow_rate, COALESCE(o.outflow_rate, 0) AS outflow_rate @@ -434,7 +458,8 @@ class QueryStrings(Enum): SELECT vt.id, vt.source_id, - vt.time_step, + vt.time_step - FIRST_VALUE(vt.time_step) + OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC) AS time_step, energy_model_id, et.speed, et.acceleration, @@ -528,17 +553,44 @@ class QueryStrings(Enum): ;""" LEADERBOARD_CHART_AGG = """ + WITH agg AS ( + SELECT + l.date AS submission_date, + m.submission_time, + l.source_id, + m.submitter_name, + m.strategy, + m.network, + m.is_baseline, + l.energy_model_id, + l.efficiency_meters_per_joules, + l.efficiency_miles_per_gallon, + l.throughput_per_hour, + b.source_id AS baseline_source_id + FROM leaderboard_chart AS l, metadata_table AS m, baseline_table as b + WHERE 1 = 1 + AND l.source_id = m.source_id + AND m.network = b.network + AND (m.is_baseline='False' + OR (m.is_baseline='True' + AND m.source_id = b.source_id)) + ) SELECT - l.date AS submission_date, - l.source_id, - m.network, - m.is_baseline, - l.energy_model_id, - l.efficiency_meters_per_joules, - l.efficiency_miles_per_gallon, - l.throughput_per_hour - FROM leaderboard_chart AS l, metadata_table AS m - WHERE 1 = 1 - AND l.source_id = m.source_id - ORDER BY l.date, m.submission_time, l.source_id ASC + agg.submission_date, + agg.source_id, + agg.submitter_name, + agg.strategy, + agg.network, + agg.is_baseline, + agg.energy_model_id, + agg.efficiency_meters_per_joules, + agg.efficiency_miles_per_gallon, + 100 * (1 - baseline.efficiency_miles_per_gallon / agg.efficiency_miles_per_gallon) AS percent_improvement, + agg.throughput_per_hour + FROM agg + JOIN agg AS baseline ON 1 = 1 + AND agg.network = baseline.network + AND baseline.is_baseline = 'True' + AND agg.baseline_source_id = baseline.source_id + ORDER BY agg.submission_date, agg.submission_time ASC ;""" diff --git a/flow/envs/base.py b/flow/envs/base.py index 1e739faba..9dec30025 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -26,6 +26,8 @@ from flow.core.kernel import Kernel from flow.utils.exceptions import FatalFlowError +from flow.data_pipeline.data_pipeline import get_extra_info + class Env(gym.Env, metaclass=ABCMeta): """Base environment class. @@ -578,6 +580,14 @@ def reset(self): # perform (optional) warm-up steps before training for _ in range(self.env_params.warmup_steps): observation, _, _, _ = self.step(rl_actions=None) + # collect data for pipeline during the warmup period + try: + extra_info, source_id, run_id = self.pipeline_params + veh_ids = self.k.vehicle.get_ids() + get_extra_info(self.k.vehicle, extra_info, veh_ids, source_id, run_id) + # In case the attribute `pipeline_params` if not added to this instance + except AttributeError as e: + pass # render a frame self.render(reset=True) diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index b2e22d5b3..fb6792c11 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -32,7 +32,7 @@ from examples.exp_configs.rl.multiagent.multiagent_i210 import flow_params as I210_MA_DEFAULT_FLOW_PARAMS from examples.exp_configs.rl.multiagent.multiagent_i210 import custom_callables -from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info +from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info, get_configuration from flow.data_pipeline.leaderboard_utils import network_name_translate import uuid @@ -221,6 +221,9 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= metadata['submission_time'].append(cur_time) metadata['network'].append(network_name_translate(env.network.name.split('_20')[0])) metadata['is_baseline'].append(str(args.is_baseline)) + name, strategy = get_configuration() + metadata['submitter_name'].append(name) + metadata['strategy'].append(strategy) i = 0 while i < args.num_rollouts: @@ -231,6 +234,8 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= completed_vehicle_avg_energy = {} completed_vehicle_travel_time = {} custom_vals = {key: [] for key in custom_callables.keys()} + run_id = "run_{}".format(i) + env.pipeline_params = (extra_info, source_id, run_id) state = env.reset() initial_vehicles = set(env.k.vehicle.get_ids()) for _ in range(env_params.horizon): @@ -260,10 +265,8 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= veh_ids = env.k.vehicle.get_ids() vel.append(np.mean(env.k.vehicle.get_speed(veh_ids))) - # Collect information from flow for the trajectory output - get_extra_info(env.k.vehicle, extra_info, veh_ids) - extra_info["source_id"].extend([source_id] * len(veh_ids)) - extra_info["run_id"].extend(['run_{}'.format(i)] * len(veh_ids)) + # collect additional information for the data pipeline + get_extra_info(env.k.vehicle, extra_info, veh_ids, source_id, run_id) # Compute the results for the custom callables. for (key, lambda_func) in custom_callables.items(): diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 0ab658f75..059cabbbd 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -33,6 +33,11 @@ from flow.utils.rllib import get_rllib_config from flow.utils.rllib import get_rllib_pkl +from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info, get_configuration +from flow.data_pipeline.leaderboard_utils import network_name_translate +from collections import defaultdict +from datetime import datetime, timezone +import uuid EXAMPLE_USAGE = """ example usage: @@ -207,6 +212,23 @@ def visualizer_rllib(args): if not sim_params.restart_instance: env.restart_simulation(sim_params=sim_params, render=sim_params.render) + # data pipeline + extra_info = defaultdict(lambda: []) + source_id = 'flow_{}'.format(uuid.uuid4().hex) + metadata = defaultdict(lambda: []) + # collect current time + cur_datetime = datetime.now(timezone.utc) + cur_date = cur_datetime.date().isoformat() + cur_time = cur_datetime.time().isoformat() + # collecting information for metadata table + metadata['source_id'].append(source_id) + metadata['submission_time'].append(cur_time) + metadata['network'].append(network_name_translate(env.network.name.split('_20')[0])) + metadata['is_baseline'].append(str(args.is_baseline)) + name, strategy = get_configuration() + metadata['submitter_name'].append(name) + metadata['strategy'].append(strategy) + # Simulate and collect metrics final_outflows = [] final_inflows = [] @@ -216,6 +238,8 @@ def visualizer_rllib(args): std_speed = [] for i in range(args.num_rollouts): vel = [] + run_id = "run_{}".format(i) + env.pipeline_params = (extra_info, source_id, run_id) state = env.reset() if multiagent: ret = {key: [0] for key in rets.keys()} @@ -246,6 +270,10 @@ def visualizer_rllib(args): else: action = agent.compute_action(state) state, reward, done, _ = env.step(action) + + # collect data for data pipeline + get_extra_info(vehicles, extra_info, vehicles.get_ids(), source_id, run_id) + if multiagent: for actor, rew in reward.items(): ret[policy_map_fn(actor)][0] += rew @@ -341,6 +369,22 @@ def visualizer_rllib(args): # delete the .xml version of the emission file os.remove(emission_path) + # generate datapipeline output + trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) + metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) + write_dict_to_csv(trajectory_table_path, extra_info, True) + write_dict_to_csv(metadata_table_path, metadata, True) + + if args.to_aws: + upload_to_s3('circles.data.pipeline', + 'metadata_table/date={0}/partition_name={1}_METADATA/{1}_METADATA.csv'.format(cur_date, + source_id), + metadata_table_path) + upload_to_s3('circles.data.pipeline', + 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format(cur_date, source_id), + trajectory_table_path, + {'network': metadata['network'][0]}) + def create_parser(): """Create the parser to capture CLI arguments.""" @@ -394,6 +438,18 @@ def create_parser(): '--horizon', type=int, help='Specifies the horizon.') + parser.add_argument( + '--is_baseline', + action='store_true', + help='specifies whether this is a baseline run' + ) + parser.add_argument( + '--to_aws', + type=str, nargs='?', default=None, const="default", + help='Specifies the name of the partition to store the output' + 'file on S3. Putting not None value for this argument' + 'automatically set gen_emission to True.' + ) return parser From 2d964603c47f4a27904a7089a44e43ac927586d2 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 18 Jun 2020 16:18:17 -0700 Subject: [PATCH 274/335] Cleanup to train.py --- examples/train.py | 54 +------------------ .../imitation_learning/imitation_trainer.py | 2 +- 2 files changed, 2 insertions(+), 54 deletions(-) diff --git a/examples/train.py b/examples/train.py index dd07f7c38..87853e6c4 100644 --- a/examples/train.py +++ b/examples/train.py @@ -193,9 +193,7 @@ def setup_exps_rllib(flow_params, config["use_gae"] = True config["lambda"] = 0.97 config["kl_target"] = 0.02 - # TODO: restore this to 10 - config["num_sgd_iter"] = 1 - # config["num_sgd_iter"] = 10 + config["num_sgd_iter"] = 10 if flags.grid_search: config["lambda"] = tune.grid_search([0.5, 0.9]) config["lr"] = tune.grid_search([5e-4, 5e-5]) @@ -335,56 +333,6 @@ def on_train_result(info): register_env(gym_name, create_env) return alg_run, gym_name, config -# def train_rllib_with_imitation(submodule, flags): -# """Train policies using the PPO algorithm in RLlib, with initiale policy weights from imitation learning.""" -# import ray -# from flow.controllers.imitation_learning.ppo_model import PPONetwork -# from ray.rllib.models import ModelCatalog -# -# flow_params = submodule.flow_params -# flow_params['sim'].render = flags.render -# policy_graphs = getattr(submodule, "POLICY_GRAPHS", None) -# policy_mapping_fn = getattr(submodule, "policy_mapping_fn", None) -# policies_to_train = getattr(submodule, "policies_to_train", None) -# -# alg_run, gym_name, config = setup_exps_rllib( -# flow_params, flags.num_cpus, flags.num_rollouts, flags, -# policy_graphs, policy_mapping_fn, policies_to_train) -# -# -# -# config['num_workers'] = flags.num_cpus -# config['env'] = gym_name -# -# # create a custom string that makes looking at the experiment names easier -# def trial_str_creator(trial): -# return "{}_{}".format(trial.trainable_name, trial.experiment_tag) -# -# if flags.local_mode: -# ray.init(local_mode=True) -# else: -# ray.init() -# -# exp_dict = { -# "run_or_experiment": alg_run, -# "name": gym_name, -# "config": config, -# "checkpoint_freq": flags.checkpoint_freq, -# "checkpoint_at_end": True, -# 'trial_name_creator': trial_str_creator, -# "max_failures": 0, -# "stop": { -# "training_iteration": flags.num_iterations, -# }, -# } -# date = datetime.now(tz=pytz.utc) -# date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") -# s3_string = "s3://i210.experiments/i210/" \ -# + date + '/' + flags.exp_title -# if flags.use_s3: -# exp_dict['upload_dir'] = s3_string -# tune.run(**exp_dict, queue_trials=False, raise_on_failed_trial=False) - def train_rllib(submodule, flags): """Train policies using the PPO algorithm in RLlib.""" import ray diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/imitation_trainer.py index afa6680cc..2f2d8f8df 100644 --- a/flow/controllers/imitation_learning/imitation_trainer.py +++ b/flow/controllers/imitation_learning/imitation_trainer.py @@ -5,7 +5,7 @@ from ray.rllib.agents.agent import get_agent_class except ImportError: from ray.rllib.agents.registry import get_agent_class -import custom_ppo +import flow.controllers.imitation_learning.custom_ppo class Imitation_PPO_Trainable(tune.Trainable): """ From 25e623ab6d3d0a8baf22ac91d2c68f2711127194 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 18 Jun 2020 17:55:55 -0700 Subject: [PATCH 275/335] timespace diagram merge bug fix --- flow/utils/rllib.py | 3 +- flow/visualize/time_space_diagram.py | 122 ++------------------------- 2 files changed, 9 insertions(+), 116 deletions(-) diff --git a/flow/utils/rllib.py b/flow/utils/rllib.py index e3404a61f..4d2d8553f 100644 --- a/flow/utils/rllib.py +++ b/flow/utils/rllib.py @@ -95,7 +95,8 @@ def get_flow_params(config): if type(config) == dict: flow_params = json.loads(config['env_config']['flow_params']) else: - flow_params = json.load(open(config, 'r')) + config = json.load(open(config, 'r')) + flow_params = json.loads(config['env_config']['flow_params']) # reinitialize the vehicles class from stored data veh = VehicleParams() diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index fe66c1089..cc0f388bd 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -169,31 +169,7 @@ def _merge(data): return segs, data - # compute the absolute position - for veh_id in data.keys(): - data[veh_id]['abs_pos'] = _get_abs_pos(data[veh_id]['edge'], - data[veh_id]['pos'], edgestarts) - - # prepare the speed and absolute position in a way that is compatible with - # the space-time diagram, and compute the number of vehicles at each step - pos = np.zeros((all_time.shape[0], len(data.keys()))) - speed = np.zeros((all_time.shape[0], len(data.keys()))) - for i, veh_id in enumerate(sorted(data.keys())): - for spd, abs_pos, ti, edge in zip(data[veh_id]['vel'], - data[veh_id]['abs_pos'], - data[veh_id]['time'], - data[veh_id]['edge']): - # avoid vehicles outside the main highway - if edge in ['inflow_merge', 'bottom', ':bottom_0']: - continue - ind = np.where(ti == all_time)[0] - pos[ind, i] = abs_pos - speed[ind, i] = spd - - return pos, speed, all_time - - -def _highway(data, params, all_time): +def _highway(data): r"""Generate position and speed data for the highway subnetwork. Parameters @@ -220,61 +196,11 @@ def _highway(data, params, all_time): time step. Set to zero if the vehicle is not present in the network at that time step. """ - junction_length = 0.1 - length = params['net'].additional_params["length"] - num_edges = params['net'].additional_params.get("num_edges", 1) - edge_starts = {} - # Add the main edges. - edge_starts.update({ - "highway_{}".format(i): i * (length / num_edges + junction_length) - for i in range(num_edges) - }) - - if params['net'].additional_params["use_ghost_edge"]: - edge_starts.update({"highway_end": length + num_edges * junction_length}) - - edge_starts.update({ - ":edge_{}".format(i + 1): (i + 1) * length / num_edges + i * junction_length - for i in range(num_edges - 1) - }) - - if params['net'].additional_params["use_ghost_edge"]: - edge_starts.update({ - ":edge_{}".format(num_edges): length + (num_edges - 1) * junction_length - }) - - # compute the absolute position - for veh_id in data.keys(): - data[veh_id]['abs_pos'] = _get_abs_pos_1_edge(data[veh_id]['edge'], - data[veh_id]['pos'], - edge_starts) - - # track only vehicles that were around during this time period - # create the output variables - pos = np.zeros((all_time.shape[0], len(data.keys()))) - speed = np.zeros((all_time.shape[0], len(data.keys()))) - observed_row_list = [] - for i, veh_id in enumerate(sorted(data.keys())): - for spd, abs_pos, ti, edge, lane in zip(data[veh_id]['vel'], - data[veh_id]['abs_pos'], - data[veh_id]['time'], - data[veh_id]['edge'], - data[veh_id]['lane']): - # avoid vehicles not on the relevant edges. Also only check the - # second to last lane - if edge not in edge_starts.keys() or ti not in all_time: - continue - else: - if i not in observed_row_list: - observed_row_list.append(i) - ind = np.where(ti == all_time)[0] - pos[ind, i] = abs_pos - speed[ind, i] = spd - - pos = pos[:, observed_row_list] - speed = speed[:, observed_row_list] - - return pos, speed, all_time + data.loc[:, :] = data[(data['distance'] > 500)] + data.loc[:, :] = data[(data['distance'] < 2300)] + segs = data[['time_step', 'distance', 'next_time', 'next_pos']].values.reshape((len(data), 2, 2)) + + return segs, data def _ring_road(data, params, all_time): @@ -566,6 +492,7 @@ def plot_tsd(ax, df, segs, args, lane=None): for lane, df in traj_df.groupby('lane_id'): ax = plt.subplot(nlanes, 1, lane+1) + plot_tsd(ax, df, segs[lane], args, lane) else: # perform plotting operation @@ -574,41 +501,6 @@ def plot_tsd(ax, df, segs, args, lane=None): plot_tsd(ax, traj_df, segs, args) - for indx_car in range(pos.shape[1]): - unique_car_pos = pos[:, indx_car] - - if flow_params['network'] == I210SubNetwork or flow_params['network'] == HighwayNetwork: - indices = np.where(pos[:, indx_car] != 0)[0] - unique_car_speed = speed[indices, indx_car] - points = np.array([time[indices], pos[indices, indx_car]]).T.reshape(-1, 1, 2) - else: - - # discontinuity from wraparound - disc = np.where(np.abs(np.diff(unique_car_pos)) >= 10)[0] + 1 - unique_car_time = np.insert(time, disc, np.nan) - unique_car_pos = np.insert(unique_car_pos, disc, np.nan) - unique_car_speed = np.insert(speed[:, indx_car], disc, np.nan) - # - points = np.array( - [unique_car_time, unique_car_pos]).T.reshape(-1, 1, 2) - segments = np.concatenate([points[:-1], points[1:]], axis=1) - lc = LineCollection(segments, cmap=my_cmap, norm=norm) - - # Set the values used for color mapping - lc.set_array(unique_car_speed) - lc.set_linewidth(1.75) - cols.append(lc) - - plt.title(args.title, fontsize=25) - plt.ylabel('Position (m)', fontsize=20) - plt.xlabel('Time (s)', fontsize=20) - - for col in cols: - line = ax.add_collection(col) - cbar = plt.colorbar(line, ax=ax, norm=norm) - cbar.set_label('Velocity (m/s)', fontsize=20) - cbar.ax.tick_params(labelsize=18) - ########################################################################### # Note: For MergeNetwork only # if flow_params['network'] == 'MergeNetwork': # From 2e76a4c1e02112aab24dcf21be6c9dcb9ea7187b Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 18 Jun 2020 20:13:40 -0700 Subject: [PATCH 276/335] reduce time-bins to 10s --- flow/data_pipeline/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index d538e7d62..a60b524fd 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -494,7 +494,7 @@ class QueryStrings(Enum): ), binned_cumulative_energy AS ( SELECT source_id, - CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, + CAST(time_step/10 AS INTEGER) * 10 AS time_seconds_bin, AVG(speed) AS speed_avg, AVG(speed) + STDDEV(speed) AS speed_upper_bound, AVG(speed) - STDDEV(speed) AS speed_lower_bound, @@ -512,7 +512,7 @@ class QueryStrings(Enum): SELECT DISTINCT source_id, id, - CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, + CAST(time_step/10 AS INTEGER) * 10 AS time_seconds_bin, FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 ORDER BY time_step ASC) AS energy_start, From 0c7de605ca86e28ecc731a2d93797151217b9d79 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 18 Jun 2020 20:15:30 -0700 Subject: [PATCH 277/335] reduce time-bins in more places --- flow/data_pipeline/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index a60b524fd..beccf9c99 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -514,10 +514,10 @@ class QueryStrings(Enum): id, CAST(time_step/10 AS INTEGER) * 10 AS time_seconds_bin, FIRST_VALUE(energy_joules) - OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 + OVER (PARTITION BY id, CAST(time_step/10 AS INTEGER) * 10 ORDER BY time_step ASC) AS energy_start, LAST_VALUE(energy_joules) - OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 + OVER (PARTITION BY id, CAST(time_step/10 AS INTEGER) * 10 ORDER BY time_step ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( From 71dee847ee5e8d59c4a09193b0d5639895c0cafc Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 18 Jun 2020 21:16:32 -0700 Subject: [PATCH 278/335] docstring fix --- flow/visualize/time_space_diagram.py | 35 ++++++++++++---------------- 1 file changed, 15 insertions(+), 20 deletions(-) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index cc0f388bd..d8ae7cd73 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -170,31 +170,26 @@ def _merge(data): return segs, data def _highway(data): - r"""Generate position and speed data for the highway subnetwork. + r"""Generate time and position data for the highway. + + We generate plots for all lanes, so the segments are wrapped in + a dictionary. Parameters ---------- - data : dict of dict - Key = "veh_id": name of the vehicle \n Elements: - * "time": time step at every sample - * "edge": edge ID at every sample - * "pos": relative position at every sample - * "vel": speed at every sample - params : dict - flow-specific parameters - all_time : array_like - a (n_steps,) vector representing the unique time steps in the - simulation + data : pd.DataFrame + cleaned dataframe of the trajectory data + Returns ------- - as_array - n_steps x n_veh matrix specifying the absolute position of every - vehicle at every time step. Set to zero if the vehicle is not present - in the network at that time step. - as_array - n_steps x n_veh matrix specifying the speed of every vehicle at every - time step. Set to zero if the vehicle is not present in the network at - that time step. + dict of ndarray + dictionary of 3d array (n_segments x 2 x 2) containing segments + to be plotted. the dictionary is keyed on lane numbers, with the + values being the 3d array representing the segments. every inner + 2d array is comprised of two 1d arrays representing + [start time, start distance] and [end time, end distance] pairs. + pd.DataFrame + modified trajectory dataframe """ data.loc[:, :] = data[(data['distance'] > 500)] data.loc[:, :] = data[(data['distance'] < 2300)] From dfb1c0790f57603a098ef1b5745c992c6231f89e Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 18 Jun 2020 21:29:38 -0700 Subject: [PATCH 279/335] add query to count vehicles in domain at every timestep --- flow/data_pipeline/query.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index d538e7d62..e1c3faa31 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -14,9 +14,13 @@ ], "fact_network_inflows_outflows": [ "FACT_NETWORK_INFLOWS_OUTFLOWS" + ], + "fact_vehicle_counts_by_time": [ + "FACT_VEHICLE_COUNTS_BY_TIME" ] }, "fact_energy_trace": {}, + "fact_vehicle_counts_by_time": {}, "POWER_DEMAND_MODEL_DENOISED_ACCEL": { "fact_vehicle_fuel_efficiency_agg": [ "FACT_VEHICLE_FUEL_EFFICIENCY_AGG" @@ -552,6 +556,22 @@ class QueryStrings(Enum): ORDER BY time_seconds_bin ASC ;""" + FACT_VEHICLE_COUNTS_BY_TIME = """ + SELECT + vt.source_id, + vt.time_step - FIRST_VALUE(vt.time_step) + OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC) AS time_step, + COUNT(DISTINCT vt.id) AS vehicle_counts + FROM fact_vehicle_trace vt + WHERE 1 = 1 + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' + AND vt.{loc_filter} + AND vt.time_step >= {start_filter} + GROUP BY 1, 2 + ; + """ + LEADERBOARD_CHART_AGG = """ WITH agg AS ( SELECT From 23c55fefe168dfc7b95645ae328b27fabef5f35f Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 18 Jun 2020 21:31:59 -0700 Subject: [PATCH 280/335] fix typo in window function --- flow/data_pipeline/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index e1c3faa31..61dda1212 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -560,7 +560,7 @@ class QueryStrings(Enum): SELECT vt.source_id, vt.time_step - FIRST_VALUE(vt.time_step) - OVER (PARTITION BY vt.id, vt.source_id ORDER BY vt.time_step ASC) AS time_step, + OVER (PARTITION BY vt.source_id ORDER BY vt.time_step ASC) AS time_step, COUNT(DISTINCT vt.id) AS vehicle_counts FROM fact_vehicle_trace vt WHERE 1 = 1 From 18c0d9ed8bc8f00f1ca269d0af9d7dce14908b7f Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 18 Jun 2020 22:44:14 -0700 Subject: [PATCH 281/335] add imitation custom models --- flow/visualize/visualizer_rllib.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 75a8c5c8b..312c1dbb4 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -172,7 +172,7 @@ def visualizer_rllib(args): checkpoint = result_dir + '/checkpoint_' + args.checkpoint_num checkpoint = checkpoint + '/checkpoint-' + args.checkpoint_num agent.restore(checkpoint) - # agent.import_model('/Users/akashvelu/Desktop/latest_run3/ppo.h5', 'av') + agent.import_model('/Users/akashvelu/Desktop/combined_test3/ppo_model.h5', 'av') if hasattr(agent, "local_evaluator") and \ From 024cb9360671d0300d805dae45d6e2ce9512f703 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Fri, 19 Jun 2020 10:22:14 -0700 Subject: [PATCH 282/335] code cleanup --- examples/train.py | 2 +- flow/controllers/imitation_learning/imitation_trainer.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/train.py b/examples/train.py index 87853e6c4..05ebb3fe3 100644 --- a/examples/train.py +++ b/examples/train.py @@ -176,7 +176,7 @@ def setup_exps_rllib(flow_params, alg_run = flags.algorithm.upper() if alg_run == "PPO": - from custom_ppo import CustomPPOTrainer + from flow.controllers.imitation_learning.custom_ppo import CustomPPOTrainer from ray.rllib.agents.ppo import DEFAULT_CONFIG config = deepcopy(DEFAULT_CONFIG) diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/imitation_trainer.py index 2f2d8f8df..7db18d005 100644 --- a/flow/controllers/imitation_learning/imitation_trainer.py +++ b/flow/controllers/imitation_learning/imitation_trainer.py @@ -5,7 +5,7 @@ from ray.rllib.agents.agent import get_agent_class except ImportError: from ray.rllib.agents.registry import get_agent_class -import flow.controllers.imitation_learning.custom_ppo +import flow.controllers.imitation_learning.custom_ppo as custom_ppo class Imitation_PPO_Trainable(tune.Trainable): """ From 7ac4c32f4b9c0eed92a9ca0369f97c6c05f7f0c6 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 10:44:25 -0700 Subject: [PATCH 283/335] implement _get_abs_pos() for HighwayNetwork --- flow/visualize/time_space_diagram.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index d8ae7cd73..93a3d713f 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -360,6 +360,8 @@ def _get_abs_pos(df, params): 'bottom_to_top': intersection / 2 + inner, 'right_to_left': junction + 3 * inner, } + elif params['network'] == HighwayNetwork: + return df['x'] else: edgestarts = defaultdict(float) From 5de54b7999410856dc193fd24dec44ce0fc64ae8 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 11:40:49 -0700 Subject: [PATCH 284/335] remove trailing whitespaces --- flow/data_pipeline/query.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index d538e7d62..d303341cf 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -571,9 +571,9 @@ class QueryStrings(Enum): WHERE 1 = 1 AND l.source_id = m.source_id AND m.network = b.network - AND (m.is_baseline='False' - OR (m.is_baseline='True' - AND m.source_id = b.source_id)) + AND (m.is_baseline='False' + OR (m.is_baseline='True' + AND m.source_id = b.source_id)) ) SELECT agg.submission_date, From 38a6d707148bfc5b89281ae6f26ddbb900d541c6 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 11:41:58 -0700 Subject: [PATCH 285/335] remove unused import --- flow/core/util.py | 1 - 1 file changed, 1 deletion(-) diff --git a/flow/core/util.py b/flow/core/util.py index cd269e6af..c0c31f811 100755 --- a/flow/core/util.py +++ b/flow/core/util.py @@ -4,7 +4,6 @@ import errno import os from lxml import etree -from xml.etree import ElementTree def makexml(name, nsl): From 379104893c3bfdfe5d9ef56f620cc0d56b324dcb Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 11:49:34 -0700 Subject: [PATCH 286/335] fix flake8 issues --- flow/core/experiment.py | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index ceb8c7b61..8ede367a7 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -170,7 +170,7 @@ def rl_actions(*_): if convert_to_csv and self.env.simulator == "traci": dir_path = self.env.sim_params.emission_path - if not dir_path is None: + if dir_path: trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) @@ -196,7 +196,7 @@ def rl_actions(*_): get_extra_info(self.env.k.vehicle, extra_info, veh_ids, source_id, run_id) # write to disk every 100 steps - if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0 and not dir_path is None: + if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0 and dir_path: write_dict_to_csv(trajectory_table_path, extra_info, not j) extra_info.clear() @@ -230,17 +230,6 @@ def rl_actions(*_): # wait a short period of time to ensure the xml file is readable time.sleep(0.1) - # collect the location of the emission file - emission_filename = \ - "{0}-emission.xml".format(self.env.network.name) - emission_path = os.path.join(dir_path, emission_filename) - - # convert the emission file into a csv - emission_to_csv(emission_path) - - # Delete the .xml version of the emission file. - os.remove(emission_path) - write_dict_to_csv(trajectory_table_path, extra_info) write_dict_to_csv(metadata_table_path, metadata, True) From ed0135748100bfbd81f5a9f42deba934e72e80c4 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 11:51:19 -0700 Subject: [PATCH 287/335] remove unused error variable --- flow/envs/base.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/envs/base.py b/flow/envs/base.py index 9dec30025..8a36d6a47 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -586,7 +586,7 @@ def reset(self): veh_ids = self.k.vehicle.get_ids() get_extra_info(self.k.vehicle, extra_info, veh_ids, source_id, run_id) # In case the attribute `pipeline_params` if not added to this instance - except AttributeError as e: + except AttributeError: pass # render a frame From b9fd3be9393f5bed6f0b83a6550cae7136747468 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 11:51:58 -0700 Subject: [PATCH 288/335] add expected blank line before function --- flow/visualize/time_space_diagram.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index 93a3d713f..e9c43e0ed 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -169,6 +169,7 @@ def _merge(data): return segs, data + def _highway(data): r"""Generate time and position data for the highway. From 62ee8a04fe04180dba0958708bc106d9a7ca7eee Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 11:58:36 -0700 Subject: [PATCH 289/335] add specified exception to try --- flow/algorithms/centralized_PPO.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/flow/algorithms/centralized_PPO.py b/flow/algorithms/centralized_PPO.py index 57fdd7e33..d30442773 100644 --- a/flow/algorithms/centralized_PPO.py +++ b/flow/algorithms/centralized_PPO.py @@ -228,7 +228,8 @@ def centralized_critic_postprocessing(policy, try: central_obs_batch = np.hstack( (sample_batch["obs"], np.hstack(central_obs_list))) - except: + except Exception as e: + print(‘Error in centralized PPO: ’, e) # TODO(@ev) this is a bug and needs to be fixed central_obs_batch = sample_batch["obs"] max_vf_agents = policy.model.max_num_agents From 885ab6f13eae864909fd79ba3143c2ade48d4c32 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Fri, 19 Jun 2020 12:07:13 -0700 Subject: [PATCH 290/335] custom ppo for vf plotting edits --- .../imitation_learning/custom_ppo.py | 436 +++++++----------- .../custom_ppo_tf_policy.py | 283 ++++++++++++ 2 files changed, 437 insertions(+), 282 deletions(-) create mode 100644 flow/controllers/imitation_learning/custom_ppo_tf_policy.py diff --git a/flow/controllers/imitation_learning/custom_ppo.py b/flow/controllers/imitation_learning/custom_ppo.py index cf0def369..fdbc073a8 100644 --- a/flow/controllers/imitation_learning/custom_ppo.py +++ b/flow/controllers/imitation_learning/custom_ppo.py @@ -1,301 +1,162 @@ -from __future__ import absolute_import -from __future__ import division -from __future__ import print_function - import logging -import ray -from ray.rllib.evaluation.postprocessing import compute_advantages, \ - Postprocessing -from ray.rllib.policy.sample_batch import SampleBatch -from ray.rllib.policy.tf_policy import LearningRateSchedule, \ - EntropyCoeffSchedule, ACTION_LOGP -from ray.rllib.policy.tf_policy_template import build_tf_policy -from ray.rllib.utils.explained_variance import explained_variance -from ray.rllib.utils.tf_ops import make_tf_callable -from ray.rllib.utils import try_import_tf - +from ray.rllib.agents import with_common_config +from flow.controllers.imitation_learning.custom_ppo_tf_policy import CustomPPOTFPolicy from ray.rllib.agents.trainer_template import build_trainer -from ray.rllib.agents.ppo.ppo import choose_policy_optimizer, DEFAULT_CONFIG -from ray.rllib.agents.ppo.ppo import warn_about_bad_reward_scales -from ray.rllib.agents.impala.vtrace_policy import BEHAVIOUR_LOGITS +from ray.rllib.optimizers import SyncSamplesOptimizer, LocalMultiGPUOptimizer +from ray.rllib.utils import try_import_tf tf = try_import_tf() logger = logging.getLogger(__name__) - -class PPOLoss: - def __init__(self, - dist_class, - model, - value_targets, - advantages, - actions, - prev_logits, - prev_actions_logp, - vf_preds, - curr_action_dist, - value_fn, - cur_kl_coeff, - valid_mask, - entropy_coeff=0, - clip_param=0.1, - vf_clip_param=0.1, - vf_loss_coeff=1.0, - use_gae=True): - """Constructs the loss for Proximal Policy Objective. - - Arguments: - dist_class: action distribution class for logits. - value_targets (Placeholder): Placeholder for target values; used - for GAE. - actions (Placeholder): Placeholder for actions taken - from previous model evaluation. - advantages (Placeholder): Placeholder for calculated advantages - from previous model evaluation. - prev_logits (Placeholder): Placeholder for logits output from - previous model evaluation. - prev_actions_logp (Placeholder): Placeholder for action prob output - from the previous (before update) Model evaluation. - vf_preds (Placeholder): Placeholder for value function output - from the previous (before update) Model evaluation. - curr_action_dist (ActionDistribution): ActionDistribution - of the current model. - value_fn (Tensor): Current value function output Tensor. - cur_kl_coeff (Variable): Variable holding the current PPO KL - coefficient. - valid_mask (Optional[tf.Tensor]): An optional bool mask of valid - input elements (for max-len padded sequences (RNNs)). - entropy_coeff (float): Coefficient of the entropy regularizer. - clip_param (float): Clip parameter - vf_clip_param (float): Clip parameter for the value function - vf_loss_coeff (float): Coefficient of the value function loss - use_gae (bool): If true, use the Generalized Advantage Estimator. - """ - if valid_mask is not None: - - def reduce_mean_valid(t): - return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) - - else: - - def reduce_mean_valid(t): - return tf.reduce_mean(t) - - prev_dist = dist_class(prev_logits, model) - # Make loss functions. - logp_ratio = tf.exp(curr_action_dist.logp(actions) - prev_actions_logp) - action_kl = prev_dist.kl(curr_action_dist) - self.mean_kl = reduce_mean_valid(action_kl) - - curr_entropy = curr_action_dist.entropy() - self.mean_entropy = reduce_mean_valid(curr_entropy) - - surrogate_loss = tf.minimum( - advantages * logp_ratio, - advantages * tf.clip_by_value(logp_ratio, 1 - clip_param, - 1 + clip_param)) - self.mean_policy_loss = reduce_mean_valid(-surrogate_loss) - - if use_gae: - vf_loss1 = tf.square(value_fn - value_targets) - vf_clipped = vf_preds + tf.clip_by_value( - value_fn - vf_preds, -vf_clip_param, vf_clip_param) - vf_loss2 = tf.square(vf_clipped - value_targets) - vf_loss = tf.maximum(vf_loss1, vf_loss2) - self.mean_vf_loss = reduce_mean_valid(vf_loss) - loss = reduce_mean_valid( - -surrogate_loss + cur_kl_coeff * action_kl + - vf_loss_coeff * vf_loss - entropy_coeff * curr_entropy) - else: - self.mean_vf_loss = tf.constant(0.0) - loss = reduce_mean_valid(-surrogate_loss + - cur_kl_coeff * action_kl - - entropy_coeff * curr_entropy) - self.loss = loss - - -def ppo_surrogate_loss(policy, model, dist_class, train_batch): - logits, state = model.from_batch(train_batch) - action_dist = dist_class(logits, model) - - mask = None - if state: - max_seq_len = tf.reduce_max(train_batch["seq_lens"]) - mask = tf.sequence_mask(train_batch["seq_lens"], max_seq_len) - mask = tf.reshape(mask, [-1]) - - policy.loss_obj = PPOLoss( - dist_class, - model, - train_batch[Postprocessing.VALUE_TARGETS], - train_batch[Postprocessing.ADVANTAGES], - train_batch[SampleBatch.ACTIONS], - train_batch[BEHAVIOUR_LOGITS], - train_batch[ACTION_LOGP], - train_batch[SampleBatch.VF_PREDS], - action_dist, - model.value_function(), - policy.kl_coeff, - mask, - entropy_coeff=policy.entropy_coeff, - clip_param=policy.config["clip_param"], - vf_clip_param=policy.config["vf_clip_param"], - vf_loss_coeff=policy.config["vf_loss_coeff"], - use_gae=policy.config["use_gae"], - ) - - return policy.loss_obj.loss - - -def kl_and_loss_stats(policy, train_batch): - return { - "cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64), - "cur_lr": tf.cast(policy.cur_lr, tf.float64), - "total_loss": policy.loss_obj.loss, - "policy_loss": policy.loss_obj.mean_policy_loss, - "vf_loss": policy.loss_obj.mean_vf_loss, - "vf_explained_var": explained_variance( - train_batch[Postprocessing.VALUE_TARGETS], - policy.model.value_function()), - "vf_preds": policy.model.value_function(), - "kl": policy.loss_obj.mean_kl, - "entropy": policy.loss_obj.mean_entropy, - "entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64), - } - - -def vf_preds_and_logits_fetches(policy): - """Adds value function and logits outputs to experience train_batches.""" - return { - SampleBatch.VF_PREDS: policy.model.value_function(), - BEHAVIOUR_LOGITS: policy.model.last_output(), - } - - - -def postprocess_ppo_gae(policy, - sample_batch, - other_agent_batches=None, - episode=None): - """Adds the policy logits, VF preds, and advantages to the trajectory.""" - - completed = sample_batch["dones"][-1] - if completed: - last_r = 0.0 +# yapf: disable +# __sphinx_doc_begin__ +DEFAULT_CONFIG = with_common_config({ + # Should use a critic as a baseline (otherwise don't use value baseline; + # required for using GAE). + "use_critic": True, + # If true, use the Generalized Advantage Estimator (GAE) + # with a value function, see https://arxiv.org/pdf/1506.02438.pdf. + "use_gae": True, + # The GAE(lambda) parameter. + "lambda": 1.0, + # Initial coefficient for KL divergence. + "kl_coeff": 0.2, + # Size of batches collected from each worker. + "rollout_fragment_length": 200, + # Number of timesteps collected for each SGD round. This defines the size + # of each SGD epoch. + "train_batch_size": 4000, + # Total SGD batch size across all devices for SGD. This defines the + # minibatch size within each epoch. + "sgd_minibatch_size": 128, + # Whether to shuffle sequences in the batch when training (recommended). + "shuffle_sequences": True, + # Number of SGD iterations in each outer loop (i.e., number of epochs to + # execute per train batch). + "num_sgd_iter": 30, + # Stepsize of SGD. + "lr": 5e-5, + # Learning rate schedule. + "lr_schedule": None, + # Share layers for value function. If you set this to True, it's important + # to tune vf_loss_coeff. + "vf_share_layers": False, + # Coefficient of the value function loss. IMPORTANT: you must tune this if + # you set vf_share_layers: True. + "vf_loss_coeff": 1.0, + # Coefficient of the entropy regularizer. + "entropy_coeff": 0.0, + # Decay schedule for the entropy regularizer. + "entropy_coeff_schedule": None, + # PPO clip parameter. + "clip_param": 0.3, + # Clip param for the value function. Note that this is sensitive to the + # scale of the rewards. If your expected V is large, increase this. + "vf_clip_param": 10.0, + # If specified, clip the global norm of gradients by this amount. + "grad_clip": None, + # Target value for KL divergence. + "kl_target": 0.01, + # Whether to rollout "complete_episodes" or "truncate_episodes". + "batch_mode": "truncate_episodes", + # Which observation filter to apply to the observation. + "observation_filter": "NoFilter", + # Uses the sync samples optimizer instead of the multi-gpu one. This is + # usually slower, but you might want to try it if you run into issues with + # the default optimizer. + "simple_optimizer": False, + # Use PyTorch as framework? + "use_pytorch": False +}) +# __sphinx_doc_end__ +# yapf: enable + + +def choose_policy_optimizer(workers, config): + if config["simple_optimizer"]: + return SyncSamplesOptimizer( + workers, + num_sgd_iter=config["num_sgd_iter"], + train_batch_size=config["train_batch_size"], + sgd_minibatch_size=config["sgd_minibatch_size"], + standardize_fields=["advantages"]) + + return LocalMultiGPUOptimizer( + workers, + sgd_batch_size=config["sgd_minibatch_size"], + num_sgd_iter=config["num_sgd_iter"], + num_gpus=config["num_gpus"], + rollout_fragment_length=config["rollout_fragment_length"], + num_envs_per_worker=config["num_envs_per_worker"], + train_batch_size=config["train_batch_size"], + standardize_fields=["advantages"], + shuffle_sequences=config["shuffle_sequences"]) + + +def update_kl(trainer, fetches): + # Single-agent. + if "kl" in fetches: + trainer.workers.local_worker().for_policy( + lambda pi: pi.update_kl(fetches["kl"])) + + # Multi-agent. else: - next_state = [] - for i in range(policy.num_state_tensors()): - next_state.append([sample_batch["state_out_{}".format(i)][-1]]) - last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1], - sample_batch[SampleBatch.ACTIONS][-1], - sample_batch[SampleBatch.REWARDS][-1], - *next_state) - batch = compute_advantages( - sample_batch, - last_r, - policy.config["gamma"], - policy.config["lambda"], - use_gae=policy.config["use_gae"]) - return batch - -def clip_gradients(policy, optimizer, loss): - variables = policy.model.trainable_variables() - if policy.config["grad_clip"] is not None: - grads_and_vars = optimizer.compute_gradients(loss, variables) - grads = [g for (g, v) in grads_and_vars] - policy.grads, _ = tf.clip_by_global_norm(grads, - policy.config["grad_clip"]) - clipped_grads = list(zip(policy.grads, variables)) - return clipped_grads + def update(pi, pi_id): + if pi_id in fetches: + pi.update_kl(fetches[pi_id]["kl"]) + else: + logger.debug("No data for {}, not updating kl".format(pi_id)) + + trainer.workers.local_worker().foreach_trainable_policy(update) + + +def warn_about_bad_reward_scales(trainer, result): + if result["policy_reward_mean"]: + return # Punt on handling multiagent case. + + # Warn about excessively high VF loss. + learner_stats = result["info"]["learner"] + if "default_policy" in learner_stats: + scaled_vf_loss = (trainer.config["vf_loss_coeff"] * + learner_stats["default_policy"]["vf_loss"]) + policy_loss = learner_stats["default_policy"]["policy_loss"] + if trainer.config["vf_share_layers"] and scaled_vf_loss > 100: + logger.warning( + "The magnitude of your value function loss is extremely large " + "({}) compared to the policy loss ({}). This can prevent the " + "policy from learning. Consider scaling down the VF loss by " + "reducing vf_loss_coeff, or disabling vf_share_layers.".format( + scaled_vf_loss, policy_loss)) + + # Warn about bad clipping configs + if trainer.config["vf_clip_param"] <= 0: + rew_scale = float("inf") else: - return optimizer.compute_gradients(loss, variables) - - -class KLCoeffMixin: - def __init__(self, config): - # KL Coefficient - self.kl_coeff_val = config["kl_coeff"] - self.kl_target = config["kl_target"] - self.kl_coeff = tf.get_variable( - initializer=tf.constant_initializer(self.kl_coeff_val), - name="kl_coeff", - shape=(), - trainable=False, - dtype=tf.float32) - - def update_kl(self, sampled_kl): - if sampled_kl > 2.0 * self.kl_target: - self.kl_coeff_val *= 1.5 - elif sampled_kl < 0.5 * self.kl_target: - self.kl_coeff_val *= 0.5 - self.kl_coeff.load(self.kl_coeff_val, session=self.get_session()) - return self.kl_coeff_val - - -class ValueNetworkMixin: - def __init__(self, obs_space, action_space, config): - if config["use_gae"]: - - @make_tf_callable(self.get_session()) - def value(ob, prev_action, prev_reward, *state): - model_out, _ = self.model({ - SampleBatch.CUR_OBS: tf.convert_to_tensor([ob]), - SampleBatch.PREV_ACTIONS: tf.convert_to_tensor( - [prev_action]), - SampleBatch.PREV_REWARDS: tf.convert_to_tensor( - [prev_reward]), - "is_training": tf.convert_to_tensor(False), - }, [tf.convert_to_tensor([s]) for s in state], - tf.convert_to_tensor([1])) - return self.model.value_function()[0] - - else: - - @make_tf_callable(self.get_session()) - def value(ob, prev_action, prev_reward, *state): - return tf.constant(0.0) - - self._value = value - - -def setup_config(policy, obs_space, action_space, config): - # auto set the model option for layer sharing - config["model"]["vf_share_layers"] = config["vf_share_layers"] - - -def setup_mixins(policy, obs_space, action_space, config): - ValueNetworkMixin.__init__(policy, obs_space, action_space, config) - KLCoeffMixin.__init__(policy, config) - EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"], - config["entropy_coeff_schedule"]) - LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"]) - - -CustomPPOTFPolicy = build_tf_policy( - name="PPOTFPolicy", - get_default_config=lambda: ray.rllib.agents.ppo.ppo.DEFAULT_CONFIG, - loss_fn=ppo_surrogate_loss, - stats_fn=kl_and_loss_stats, - extra_action_fetches_fn=vf_preds_and_logits_fetches, - postprocess_fn=postprocess_ppo_gae, - gradients_fn=clip_gradients, - before_init=setup_config, - before_loss_init=setup_mixins, - mixins=[ - LearningRateSchedule, EntropyCoeffSchedule, KLCoeffMixin, - ValueNetworkMixin - ]) + rew_scale = round( + abs(result["episode_reward_mean"]) / + trainer.config["vf_clip_param"], 0) + if rew_scale > 200: + logger.warning( + "The magnitude of your environment rewards are more than " + "{}x the scale of `vf_clip_param`. ".format(rew_scale) + + "This means that it will take more than " + "{} iterations for your value ".format(rew_scale) + + "function to converge. If this is not intended, consider " + "increasing `vf_clip_param`.") def validate_config(config): - """Check that the config is set up properly.""" if config["entropy_coeff"] < 0: raise DeprecationWarning("entropy_coeff must be >= 0") if isinstance(config["entropy_coeff"], int): config["entropy_coeff"] = float(config["entropy_coeff"]) + if config["sgd_minibatch_size"] > config["train_batch_size"]: + raise ValueError( + "Minibatch size {} must be <= train batch size {}.".format( + config["sgd_minibatch_size"], config["train_batch_size"])) if config["batch_mode"] == "truncate_episodes" and not config["use_gae"]: raise ValueError( "Episode truncation is not supported without a value " @@ -309,13 +170,24 @@ def validate_config(config): logger.warning( "Using the simple minibatch optimizer. This will significantly " "reduce performance, consider simple_optimizer=False.") - elif tf and tf.executing_eagerly(): + elif config["use_pytorch"] or (tf and tf.executing_eagerly()): config["simple_optimizer"] = True # multi-gpu not supported + +def get_policy_class(config): + if config.get("use_pytorch") is True: + from ray.rllib.agents.ppo.ppo_torch_policy import PPOTorchPolicy + return PPOTorchPolicy + else: + return CustomPPOTFPolicy + + CustomPPOTrainer = build_trainer( - name="CustomPPOTrainer", + name="PPO", default_config=DEFAULT_CONFIG, default_policy=CustomPPOTFPolicy, + get_policy_class=get_policy_class, make_policy_optimizer=choose_policy_optimizer, validate_config=validate_config, + after_optimizer_step=update_kl, after_train_result=warn_about_bad_reward_scales) \ No newline at end of file diff --git a/flow/controllers/imitation_learning/custom_ppo_tf_policy.py b/flow/controllers/imitation_learning/custom_ppo_tf_policy.py new file mode 100644 index 000000000..0dc381b55 --- /dev/null +++ b/flow/controllers/imitation_learning/custom_ppo_tf_policy.py @@ -0,0 +1,283 @@ +import logging + +import ray +from ray.rllib.agents.impala.vtrace_policy import BEHAVIOUR_LOGITS +from ray.rllib.evaluation.postprocessing import compute_advantages, \ + Postprocessing +from ray.rllib.policy.sample_batch import SampleBatch +from ray.rllib.policy.policy import ACTION_LOGP +from ray.rllib.policy.tf_policy import LearningRateSchedule, \ + EntropyCoeffSchedule +from ray.rllib.policy.tf_policy_template import build_tf_policy +from ray.rllib.utils.explained_variance import explained_variance +from ray.rllib.utils.tf_ops import make_tf_callable +from ray.rllib.utils import try_import_tf + +tf = try_import_tf() + +logger = logging.getLogger(__name__) + + +class PPOLoss: + def __init__(self, + dist_class, + model, + value_targets, + advantages, + actions, + prev_logits, + prev_actions_logp, + vf_preds, + curr_action_dist, + value_fn, + cur_kl_coeff, + valid_mask, + entropy_coeff=0, + clip_param=0.1, + vf_clip_param=0.1, + vf_loss_coeff=1.0, + use_gae=True): + """Constructs the loss for Proximal Policy Objective. + + Arguments: + dist_class: action distribution class for logits. + value_targets (Placeholder): Placeholder for target values; used + for GAE. + actions (Placeholder): Placeholder for actions taken + from previous model evaluation. + advantages (Placeholder): Placeholder for calculated advantages + from previous model evaluation. + prev_logits (Placeholder): Placeholder for logits output from + previous model evaluation. + prev_actions_logp (Placeholder): Placeholder for action prob output + from the previous (before update) Model evaluation. + vf_preds (Placeholder): Placeholder for value function output + from the previous (before update) Model evaluation. + curr_action_dist (ActionDistribution): ActionDistribution + of the current model. + value_fn (Tensor): Current value function output Tensor. + cur_kl_coeff (Variable): Variable holding the current PPO KL + coefficient. + valid_mask (Optional[tf.Tensor]): An optional bool mask of valid + input elements (for max-len padded sequences (RNNs)). + entropy_coeff (float): Coefficient of the entropy regularizer. + clip_param (float): Clip parameter + vf_clip_param (float): Clip parameter for the value function + vf_loss_coeff (float): Coefficient of the value function loss + use_gae (bool): If true, use the Generalized Advantage Estimator. + """ + if valid_mask is not None: + + def reduce_mean_valid(t): + return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) + + else: + + def reduce_mean_valid(t): + return tf.reduce_mean(t) + + prev_dist = dist_class(prev_logits, model) + # Make loss functions. + logp_ratio = tf.exp(curr_action_dist.logp(actions) - prev_actions_logp) + action_kl = prev_dist.kl(curr_action_dist) + self.mean_kl = reduce_mean_valid(action_kl) + + curr_entropy = curr_action_dist.entropy() + self.mean_entropy = reduce_mean_valid(curr_entropy) + + surrogate_loss = tf.minimum( + advantages * logp_ratio, + advantages * tf.clip_by_value(logp_ratio, 1 - clip_param, + 1 + clip_param)) + self.mean_policy_loss = reduce_mean_valid(-surrogate_loss) + + if use_gae: + vf_loss1 = tf.square(value_fn - value_targets) + vf_clipped = vf_preds + tf.clip_by_value( + value_fn - vf_preds, -vf_clip_param, vf_clip_param) + vf_loss2 = tf.square(vf_clipped - value_targets) + vf_loss = tf.maximum(vf_loss1, vf_loss2) + self.mean_vf_loss = reduce_mean_valid(vf_loss) + loss = reduce_mean_valid( + -surrogate_loss + cur_kl_coeff * action_kl + + vf_loss_coeff * vf_loss - entropy_coeff * curr_entropy) + else: + self.mean_vf_loss = tf.constant(0.0) + loss = reduce_mean_valid(-surrogate_loss + + cur_kl_coeff * action_kl - + entropy_coeff * curr_entropy) + self.loss = loss + + +def ppo_surrogate_loss(policy, model, dist_class, train_batch): + logits, state = model.from_batch(train_batch) + action_dist = dist_class(logits, model) + + mask = None + if state: + max_seq_len = tf.reduce_max(train_batch["seq_lens"]) + mask = tf.sequence_mask(train_batch["seq_lens"], max_seq_len) + mask = tf.reshape(mask, [-1]) + + policy.loss_obj = PPOLoss( + dist_class, + model, + train_batch[Postprocessing.VALUE_TARGETS], + train_batch[Postprocessing.ADVANTAGES], + train_batch[SampleBatch.ACTIONS], + train_batch[BEHAVIOUR_LOGITS], + train_batch[ACTION_LOGP], + train_batch[SampleBatch.VF_PREDS], + action_dist, + model.value_function(), + policy.kl_coeff, + mask, + entropy_coeff=policy.entropy_coeff, + clip_param=policy.config["clip_param"], + vf_clip_param=policy.config["vf_clip_param"], + vf_loss_coeff=policy.config["vf_loss_coeff"], + use_gae=policy.config["use_gae"], + ) + + return policy.loss_obj.loss + + +def kl_and_loss_stats(policy, train_batch): + return { + "cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64), + "cur_lr": tf.cast(policy.cur_lr, tf.float64), + "total_loss": policy.loss_obj.loss, + "policy_loss": policy.loss_obj.mean_policy_loss, + "vf_loss": policy.loss_obj.mean_vf_loss, + "vf_preds": policy.model.value_function(), + "vf_targets": train_batch[Postprocessing.VALUE_TARGETS], + "vf_explained_var": explained_variance( + train_batch[Postprocessing.VALUE_TARGETS], + policy.model.value_function()), + "kl": policy.loss_obj.mean_kl, + "entropy": policy.loss_obj.mean_entropy, + "entropy_coeff": tf.cast(policy.entropy_coeff, tf.float64), + } + + +def vf_preds_and_logits_fetches(policy): + """Adds value function and logits outputs to experience train_batches.""" + return { + SampleBatch.VF_PREDS: policy.model.value_function(), + BEHAVIOUR_LOGITS: policy.model.last_output(), + } + + +def postprocess_ppo_gae(policy, + sample_batch, + other_agent_batches=None, + episode=None): + """Adds the policy logits, VF preds, and advantages to the trajectory.""" + + completed = sample_batch["dones"][-1] + if completed: + last_r = 0.0 + else: + next_state = [] + for i in range(policy.num_state_tensors()): + next_state.append([sample_batch["state_out_{}".format(i)][-1]]) + last_r = policy._value(sample_batch[SampleBatch.NEXT_OBS][-1], + sample_batch[SampleBatch.ACTIONS][-1], + sample_batch[SampleBatch.REWARDS][-1], + *next_state) + batch = compute_advantages( + sample_batch, + last_r, + policy.config["gamma"], + policy.config["lambda"], + use_gae=policy.config["use_gae"]) + return batch + + +def clip_gradients(policy, optimizer, loss): + variables = policy.model.trainable_variables() + if policy.config["grad_clip"] is not None: + grads_and_vars = optimizer.compute_gradients(loss, variables) + grads = [g for (g, v) in grads_and_vars] + policy.grads, _ = tf.clip_by_global_norm(grads, + policy.config["grad_clip"]) + clipped_grads = list(zip(policy.grads, variables)) + return clipped_grads + else: + return optimizer.compute_gradients(loss, variables) + + +class KLCoeffMixin: + def __init__(self, config): + # KL Coefficient + self.kl_coeff_val = config["kl_coeff"] + self.kl_target = config["kl_target"] + self.kl_coeff = tf.get_variable( + initializer=tf.constant_initializer(self.kl_coeff_val), + name="kl_coeff", + shape=(), + trainable=False, + dtype=tf.float32) + + def update_kl(self, sampled_kl): + if sampled_kl > 2.0 * self.kl_target: + self.kl_coeff_val *= 1.5 + elif sampled_kl < 0.5 * self.kl_target: + self.kl_coeff_val *= 0.5 + self.kl_coeff.load(self.kl_coeff_val, session=self.get_session()) + return self.kl_coeff_val + + +class ValueNetworkMixin: + def __init__(self, obs_space, action_space, config): + if config["use_gae"]: + + @make_tf_callable(self.get_session()) + def value(ob, prev_action, prev_reward, *state): + model_out, _ = self.model({ + SampleBatch.CUR_OBS: tf.convert_to_tensor([ob]), + SampleBatch.PREV_ACTIONS: tf.convert_to_tensor( + [prev_action]), + SampleBatch.PREV_REWARDS: tf.convert_to_tensor( + [prev_reward]), + "is_training": tf.convert_to_tensor(False), + }, [tf.convert_to_tensor([s]) for s in state], + tf.convert_to_tensor([1])) + return self.model.value_function()[0] + + else: + + @make_tf_callable(self.get_session()) + def value(ob, prev_action, prev_reward, *state): + return tf.constant(0.0) + + self._value = value + + +def setup_config(policy, obs_space, action_space, config): + # auto set the model option for layer sharing + config["model"]["vf_share_layers"] = config["vf_share_layers"] + + +def setup_mixins(policy, obs_space, action_space, config): + ValueNetworkMixin.__init__(policy, obs_space, action_space, config) + KLCoeffMixin.__init__(policy, config) + EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"], + config["entropy_coeff_schedule"]) + LearningRateSchedule.__init__(policy, config["lr"], config["lr_schedule"]) + + +CustomPPOTFPolicy = build_tf_policy( + name="PPOTFPolicy", + get_default_config=lambda: ray.rllib.agents.ppo.ppo.DEFAULT_CONFIG, + loss_fn=ppo_surrogate_loss, + stats_fn=kl_and_loss_stats, + extra_action_fetches_fn=vf_preds_and_logits_fetches, + postprocess_fn=postprocess_ppo_gae, + gradients_fn=clip_gradients, + before_init=setup_config, + before_loss_init=setup_mixins, + mixins=[ + LearningRateSchedule, EntropyCoeffSchedule, KLCoeffMixin, + ValueNetworkMixin + ]) From 01676d9e8a29449b1fc9a3e5ad028eb0ff36f6e2 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 19 Jun 2020 16:09:36 -0700 Subject: [PATCH 291/335] correct some docstring inconsistencies --- flow/visualize/time_space_diagram.py | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index e9c43e0ed..d194597e4 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -142,7 +142,7 @@ def get_time_space_data(data, params): def _merge(data): - r"""Generate position and speed data for the merge. + r"""Generate time and position data for the merge. This only include vehicles on the main highway, and not on the adjacent on-ramp. @@ -173,9 +173,6 @@ def _merge(data): def _highway(data): r"""Generate time and position data for the highway. - We generate plots for all lanes, so the segments are wrapped in - a dictionary. - Parameters ---------- data : pd.DataFrame @@ -183,11 +180,9 @@ def _highway(data): Returns ------- - dict of ndarray - dictionary of 3d array (n_segments x 2 x 2) containing segments - to be plotted. the dictionary is keyed on lane numbers, with the - values being the 3d array representing the segments. every inner - 2d array is comprised of two 1d arrays representing + ndarray + 3d array (n_segments x 2 x 2) containing segments to be plotted. + every inner 2d array is comprised of two 1d arrays representing [start time, start distance] and [end time, end distance] pairs. pd.DataFrame modified trajectory dataframe @@ -199,8 +194,8 @@ def _highway(data): return segs, data -def _ring_road(data, params, all_time): - r"""Generate position and speed data for the ring road. +def _ring_road(data): + r"""Generate time and position data for the ring road. Vehicles that reach the top of the plot simply return to the bottom and continue. @@ -237,7 +232,7 @@ def _i210_subnetwork(data): Returns ------- - dict of ndarray + dict < str, np.ndarray > dictionary of 3d array (n_segments x 2 x 2) containing segments to be plotted. the dictionary is keyed on lane numbers, with the values being the 3d array representing the segments. every inner @@ -262,7 +257,7 @@ def _i210_subnetwork(data): def _figure_eight(data): - r"""Generate position and speed data for the figure eight. + r"""Generate time and position data for the figure eight. The vehicles traveling towards the intersection from one side will be plotted from the top downward, while the vehicles from the other side will From 85fdd6334be153f7d740fa7bd3b3fbebc195e978 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Fri, 19 Jun 2020 23:44:49 -0700 Subject: [PATCH 292/335] i210 imitation model file --- .../model_files/ppo_model_i210.h5 | Bin 0 -> 53208 bytes 1 file changed, 0 insertions(+), 0 deletions(-) create mode 100644 flow/controllers/imitation_learning/model_files/ppo_model_i210.h5 diff --git a/flow/controllers/imitation_learning/model_files/ppo_model_i210.h5 b/flow/controllers/imitation_learning/model_files/ppo_model_i210.h5 new file mode 100644 index 0000000000000000000000000000000000000000..f93f6df56568b627dedd5a87e98480dc3a013734 GIT binary patch literal 53208 zcmeEv2|SkD);J-NiYB2D%}Gjw;eFPAWk_jKX`m7^m3c~o%tDe1r3q1?L4|~Oul>@T z=2>Y@sc2T3|HOOm&8d6tz2E=d@7(Y7{?4(Vz1MoyUVA-juV?SI)_#gHH!*7`t1K(l z_-oTfOj1n#+okBw*XG}~vrR3cx(GhE`9i$8jBYNoS~XpWi?tSOgmr6fmv5%8F45G` zt@*~OlTA&<8ZbrlMP>7CVq*Q8uG;*i;n&5VNdo4kMw1%HJEM8fB7HjEtY;!wxO>iX zbhBOP=<4j^=iAUI(o4Pw;h!uOn)S7%UdxG@H_Vkr`t8L!i?tD3;OJxLYa8I`FG}37N{cK*F_iut9ZyspVyl(Z=d95?3 z3E!)!5DPilbaAk`{;T-EEdkNIp4+T1$D4=yqPXcoyg}0%L8AFCnvX7SP4MW($=2`_ zZknJ6e#oxASvwv2D7~*Tv4O zkp~+Oe>XQRb=}zwz0C7l^sP;!VFY%5xo~=j2c)oEtyP@^3oO*e>xjHPeb#a|H&(Xv7+gNOe{FZN%2I)DrU|H1LPf7Us z*tvSx*}FBe=;!0#$l;HM;qT$<*T|Xf&;|rM2S3*UJHLi^QbUV|pPh$GW3T?cj<)u$ zc8yrggc@F79u4nJ*M@st?c7`!Hwd)x34Q+zN4t6Yc=}m+xH@_IxVPZm*U@oa!!1oB z{w-fYjk0U(@^`whcJ%S|Z9)1sgZxIUnPVSEXMZ<4pJtAmyZ_eF4e;v^`lD`4`P$O(?8ni^DusT}m4gW0(nmzk_MG0{FA1TQXZ~vtr&6D~6 zf`a_W416$uYZT>2YS8#X`A6-_KdCY;?aD88X?O+vWs&g5w<|55 z-``V}AF0+4)>!|nh51I}mxcMSwlIy;{vWk4|D?*a)X-n*^51P?{+_BdTNqD&zsBY8 zx82aUwf%Q{xPQRDv?TCPT9@B$gxp*`9PNA>dHJ;!`ZsTde)g96JLaY3s z<<0-3t_=CjrumnyG`kW0{oCdry&L{kPi8kfoTjbyU%f;vH`}utk^WW*zIkbEG=6N0 z8mTwjV59L{&G`nJ9Te2$-)i*rH~xO+|B`9~i(KboBEMO4vsjv(=;~MbO9H32iO9Hy6zol1E zUP?=O$%)lB=XJ?7$hezWQ%XQ%tiLfkO-!s)gP^-Kl;Yw|5|{bbDkdf>nsoN6sTT;mncG6;EE%wiU;rIVXUR$FI{xz@df1YpG zv{#q^3y*($(Uh+y_S1Z|zs3D^{pXWFlf9Jui@mIGwu>TrDL=4DChE=kVInPC-&_~r z)i>vvb!*P&;x~(O?zpB45ngn2J!?W!yH_*b@fQ82g*5?$5zPpH@IquSTgvON^Sm*C zwVPN|Chxa2vTp`cL7gbq2C=XmTVDaXI8se>*Bp}KQRc|Kq z+j_Tew<@TseH)*cn7Wu^8!^S;W_+oJ(Kb?Q)vAeL;~J{*Cm|l#EZUH!j=r^uiFItY z4vmcpqiW}fiMcdG{xikz*%h6UO?>>aEB~42@ki_BXLco_MV_{3W{UV1%|(&2|Fd?b zOd{99w!A94d~Xp!WmZDl-YBMs;LcS6&6hPAY^<8OaWF0hpJf$0{_- z@%2Fovgq|Sczj8P%`8wS!|yJ@@TYq*!|fJ4P*P!sHFrZ_?k;RKH$=sSpTSMqfCO0H zggIL!$@KLXL35)TStq;>w?9v3{q?Q6eODh~ywwR<9#Bg6dCsIc0sCBD zos0bnl5yn6=aAED1z=xu!vdqL(6uxHH3~8yWAa$&rtgX`o~qGIl~(M@VpFgQ=nc9P zHbUruAhahTkUq&89IG;TIiADF<}u4@wnN_R zuYl|c+tK;t2^==37@qGlg-bi~sOj*A>v`CDGhIv*BFP zy`8+;NE>?S*h=)y@?-mQpIT5`T<7LGb4mE ztvi#lg5@Q43VllFJ5Li19BV>+qerkMZufD6axXGu(ICF<0|WNKP}!&={3cuZ^Z*Q6 zzMO<#izF{z29vo`{xtH?K;gXLBC7jw3p;t?1s*W0W;{)Kez&NloT~OgQhy_n9g>TJ z%ITf>BkR@p+KbMhyNNa%J4aFYdj26eFO=tp^EQzE>tuvsDIW!N@V`wI2J!{s$l=blMwdqc*(N7_GrB?j9RvuhW6COo>GSlJ^s#$euCnbIu3*AJUe1>d0^et+ zxKq{f)ac6&dL~%KV95DnCF8eDMCYBJfGQpM zyp$o8?7$T#27|51B%g)oy=EWOjJV2_JdCkw+z}XZVH|t6V-N{8>qt6}Il!EB-$SP{ z?TLEFx8S&RAFeQb2ik*Ti_3d2rQ!0^L1KX(h8cKs?n;-ri2JR$%Tfg;#;26ItHSew zgjcsaZU~2_5_6`>)Z!n`OzDqf-`x(K}Rr3uV zII9TA+h|VhwJRLD{h{Ptz#tkiFor(iXY+Cn9^|a{cR-L?0fBvn5yzRg=-5aOheT~; zfqSD#yjv2=+Aks}_r8YfZ!f@BsRhJotpQwLK#5P}5SaPIn~dK$p2azDBL;TiEZ@nE z9qW3Al)4m=4rkkfsrz>LygrA-9@mB!dac34au0UyXO0@yxx{n1DO)psCk|XPf=!rJ zD3~pF1SXs5L9cP@Ec22gd23wFk%rYldH)rwKy63RXCmnZM z<$=1e1V2j1;lx%WFs_?EE_n9>diu^|Pt>KLN@5VCmDItK&)HDAT)-_(+6q>a6`?5K zmpfGxi6?z@G5%^ExSTBFGM=mP4pv+M`J+{^Gc*S5mwv{zJH?n!LLUek-<6%4b&#`j zZcARZt)%wT9l5Cw&ceP9YXnKxo`Y$JOM(~AZSkGOTbO@pFl>|%$JK>rVY>GmIQc?I znWQR-?h;1L-%ZA1*W;X08DVti9`1e7S5Q!BP0s4n(z@i;v}@5aqv40cjjH)RrIMjb zN==3FrJC6*jLh^`l@6T|Yjhzrz)0i53Zu17D~upD+z2997%93$8Y>P9ZVu7WDr>`gtny|{~ zP3Vf!L1%qRseD9fLF7uK|BZFWZ}*K&`#RD7RlS)hQ7O@|;`+9y`=|Gb_4-YuLYgTy z6~Eml27U+sPgj0+-qYNqiIc2mhJJ5Y6ZL<~x>kK+6We>6x&OWQ zOWKqsM3LToH|SrL|ML>~+56>qb6$i<06&UkHoD$L?}L9fZX#OOw#>u$cf@0d{$*|Z z!%+sDXPotd!)zVhC1xselSd9ft6*;2$^R9a0f zt`L0J-yJeLw4!Iz=CPCww>Ysj{i$nOGR|3X2x_-!@Pf`6z{Tg23{%cT!tzraAuj6- zt{>k9RPOrH_Y%I)&fXIXY)`@S>=%Ml(dq&w@sWG_A|8rn4#4R`15lQapcgxI!Q?p( zII+4t)|h6a{k1Z9DeZ*2(l+3+jah=u;cK`F6W!_MH{IaX<;NhS89{YYyYiH_E#rb# zZ|C)KyC_Im*b`&jpBP^AzebC!U%_ZDjs}hFfS1*Wg7pC>+N;ZC93Y5;E!k~Zbzlt` z4eCKEDq5FZS9;1l6(0=iJaoZLO#qLxdt$F1b-W>gPwBn%J@^d8d43Vekfy4~4tDMg zD%(|nFA;@F6@9osxntCM#(dOUz8e<@Dv|cpmYntDhj>}@6I@@CgQMH$!lQ1V=pkis za^5Tw+K4~IB{6gGYmeh#JVISC>ZJ=zh<(N5tx4eSS#3go-XUdduq)DlNO)EJ~?+_Cpd8f)TEjAu_RFKmyDb5t>DF{?#8V8yEvlG2D^0`fT?^% z9Hr0)E*)--Cx)itPJ>u(myH)EoU#UN%i9sdqC`$Vtrf2Go&=v>C(^uF7o5H1FdCU? zksA-@-~&M{aEx5FuwlBqnR)*`r2q>E$wR;mV!3 zZqrt9y?UDV9OF+PjmUtI9BtS<@ha$)x`4QqEWE08#jz)sW9`Iiy!rM=>BsssIMg-? zrmVY-`8HytrfvcbuDgnxBvR<$C^tcISrL`YUx!;&Gcme%C(hpF758+_NI}A#uK3lh zGs3f5oUpAnDX|*>MQ5&X`svz`J+vLXJQj>M4u8hu-o-HH{1fzWYmG-j*TQtN2Sc~N zgZ$2WuZYiZV4PsIC`gxuT-I3a=77p=DwqCTA5wo3RPBkGBT+-13AG6`|nyY9|D3d<~VX8={fL(@>m@+B##o!xn-P(~8@iUxzke9Mz69){BShd1vW}@(%d+dLb&B z+(Vf;FKL{Z0c+Lz5Zcz{G}!DO)MV3bL6;7nX)llG;Aqy0y_Rw0&L6WNb;0kkd|xmq za{J*yvKrFXo4{#yIp*1$B7462G~L;$H6))iAl273S=Fi|a7spx z>GxKLl&|Yy-?buKvRfau>jy#c(yq9XAB*MI(O|ly3OI*4E?DLuc#N>epf0^hjK>TJ zzc`gd-dHQ>e&Y+Bb}fc&_|%`d%dViU_2bcduLdmKWDTI+gZIJ04F;7ZgR%cj!JRL6 zP-o3lTpuwE=n{E$X8js)yFLIOs?A3Q^&HUMFpXT#PkJ#AO%d81ab0)Qqpx zu1h(V_nC(p$-D5}z@uE-qDR;z8DYwO8MylJ0TiD-25J|?V8++!+{Je4U{W`gh6lgn zl=mORi{s8hh;ADw+A)u-N{XdbqbDQZsXb5G%nT{%l59jl22Wzaei)!U z8ss1A<7j0LGW&#crrz=#-8cuQY(Bxs9Fd3aq37|%5lien+n86a*OQeU>qU2s9)oVX zv+Zv)Dd2gj3i%pp9=x)R%{Un+jNt0zp@pOKN zPE0p9kqaqqPmWL658*ct@@90jf&RG@*^twA4A!oOHa$Y{5Y2$aBjeC}Wm|T*T!ZDV zv*VumO{b1u(_#C4X<~Qi9Xe|b6f7T-%)N7MkK)s|qp!0ns;fnS)wmUcd&S~JZE-G6 zcdCRqJSR9ft~EIs*Awm!9*1;mCwOGE2i|mg1@q2j!ItD5bke#?se;1Y_K zhWEktVkfNMpM~ewe#OI;ZE=KIHRcD|LZ2XC*yJ-5AM+QXV`?$Cc*Q!*pUfA?TIcfY zUFXuVBhKKdE;(S_ZZI6t><4$V_re1WIXbkJ1<$sFfM@cd0@L~|1GNtA;lS-SFmAyk z2-toF9CXwmRcZuOosWRKv&Gx8=xb--hVF+7c{g=lE_nVvtQ01wT> zv6t~xY-jTY#P2PPp+1i$QJx%}z!!i`p*azAv(rUR{^U$Z7?Z-ynco95411A@UHXw$8pVP!r#z|qXG^r1n-8f) z_86LEOqRJM@aA7E!GjycNT(g0X(vB3*dVS>-*9a)=$tCcTWrG0Ld@Zbn;*WYiJ}+j zK|E)>0o7j2!cuW>+JPrY&JUkReBV5VYkNH4jPh#yOowCX-W&MDlX3g*xw5fx;%xh| z5L)k43_F%4La0F@CV$o-2|4|7%ezD@Sf3B(tc)gBc7^@&fs}lR1&@!*;99~%kWtwM z674Bnt5Xfm;XUA0PIp)llK^#3&cHyQLAYk-G4xdFhy6zT;q*I+pw)dQjH)Vz-3Ei< z=!-f!IVqA$x==%lzI=rv)6#L}D<8U9?II@#uBY;}idN+BgX)hZoL0Z_m{Bqk;`gq^ z$~$>FYpf&HJC}&Z*La|qUR&%q-hi{Lk_YUiLuV!UVZ3A~n15dq z^9BvX%?n>~q3=sz5Y|Uk zg0(>w#@;i;wD1CSg;rR;;|3mdd&8}EYmaM$FK}D=bPTl0;ubvV2ErXzXnRjXj2NCq zvo$0kpQiwqj+nsV!zPe^WLZOwSRZ(?dq3BubPO$cmWXo=5(H!9uYg_~d4YHOeH?Y_ zK7F?$7c%oNbDDMTFbgi@>Dxt6EvLkW9#J5-*W`n-;5e^rjW`<>Bu=Ua1;dCN;SfkC z(jgW*aChN7oY5}pTbG{Kd(euhC#*b^aj6>R%Wjt!f-uB7E<-Kx%=Pgb`ZBJq6 zSHDBSq=rN zJy0P<0mJs>z|FooTvXpxc<NHm9V@jKk( zeX?ljy%%8GI3x}YqcLzk{N6ju^K=#sZ6&v83f4bnf+IYC%DGrt?O0Ypa zYB|wsnvqmqCT? z)JVjc5++3G^NCs%MsND9UmP$eaX zr^6J9y-yfDe?J`poF&PyZZc%S_ARJfH5w!{&(rjeg}nb&eif;fASwQ zZ)@-TCbqMhx&J-?vY2NRqR8*`-JpL}{?AL`Xa40Z*YAhkGV4intVHAw{#WB$GY&TO z6Ik3-{v&x?-5XGUmbcX$!TQ?|@wJR@eFyt%Z0o!5(f5OIsEOinF3wG$=w|u+BmDj| zd1il&#r;O&fABb5f_Jmfz7ync?WW6bWc%-I_&xh)?$y-$FZ=iJd`!`GJSTNcxk7 zBxsNZe~emJ67#G(9=lXQy!7>0;tXy6t7pSl_XGA&Kja3?bJ$Lf`R`~ruNDO%Pli#O z1v6O`Ye%?{eMC4g7G9D*nXQe>*?J&CjD8I-!MLI=*y;&+(hD3n?@o_tn$ftSf5 zrdl-6sQaTt+(UJi~(V%k=rrFGm0tTVlm@N#XYUq5S&S>MUqSD>PrTgSUzs z1igf7g}rtCNS8Y^IYU`**0b;o%>b7y5kJN=(RpR`B!*ly_BNZMh+&QbnJgOY9;H4JhVY ze{$eYm|9a3oxh#9_i-V%c|<6mCScyBBZyV_YjU8IJk#hO0T%5Bk~8Nuh=TlCa$HMC zm^eR#&D`jKNh37a7fF=|{2wbMHWZ?Il8M=Q8qW(+5`MhD5g3 zioIF3g;+aI6y`fB3mxlv@&hKR^A(;O3tw!OC0Rj3h-ZIOw#7|Hc+IE{IaN4-TzFZ= zhW0wd+A1pXy-zV{Um8H-MrDAh(wG zgY8H{oGgWNl(WE1QjdsT*hW@wIZK1J&-2%m_JF4Y&x7jXtB|l&7AyB(UuI@{+O%?c&q(9%&ijeD%Y}mQpZODaT>C8R;EYpkjWv`=p z3g~W(Qhf3Kq!A%RipxTwcRPro)V=hHhuMg)hUNsYw%QwRd zs|UJN=l0l4%pi(ws_-Rq z1i5%OLzk#lMhbgg+|CXyU&O$ru47T{IER$KIG-olHM%VD%0$*hW&;iz>x`C;iFAOw z1W~eX1L>hhI5&;*hP{6_{j9i|&0QhOl!SY@$8|HoO?E8lyty-(M^}{G{!-6fO}j$^ zI*ed8+8nF9@*2D7og;ypZgAC>DRlY;QDSeN+C4znC@X0{Ss5o{jQk@B9YeA=W7&)wP58k+^un7(t{Pdz7a)twYoa4??hD76( z3tEO(NT1Su{;4>iUnU+tc%0o&2_{(@BlwkN>#2Q}BuTO}BN5hnNmMdlc+yQ$xZ6sV zt%^Fpj%myj&fN8ot=WAOEbNR}2f1U+ctbf^+-neDtb;ZGs!D5;n(|ptt>H;-uU8`z z49$7ljW%(d-AH1#`WD<+B994geOTKpO?FJ;3KyWXhxCZo&KpCe5u359 zC<<&c+p(RAO2WCP_u|%33qh*%7W49n7J9uO!t8g*u#0*b%xiiHTl09oFnL-gF`4t3 zTE-t?o%F-8-o6jJBb5ljx#!^Q(50kydpLPK|1Qc*P~(4eAI-x02ap=)Cp2_O4cmPw zm(%B0kyE!~$r6>hRTth>rY?-g@L?}r zY-ARFR}#%Ad4B7;XIRHp-oi_IM@atA&j-7I4Pnqa?5HijTjX|uB-InyF5@~wyy4Ed+Yl+;A;o6erOca)4Kr~7bS(gJUeo4 z1HXc@^dVN<<1y3pwZt>h1IQ3#8KKFOR5rP^7uHFskcKD+8LZ>QY-@WPX5LxN&i1GV zu?Qz(GT)etR2V`O#+AYGmjn5-GjiBbXI*CcC6PQmsw`CObsoA&*O8ckK)R1kXS3b< z2xs-PNBwT8tjD;8P*W&m{GKcE4L_gNxR*fw&hBi&@Es&}b2dA;QHxpGrSU%JI}zSj zYmiGu)=lv__-qN~ooF=@_}a^Hg-=i7Dmj`&JL?N$s*TAeCu#1q#~xv1a4~s`>)7^f zM?kwW2Z~Kg$Znpgu=PAeE_2*?zND0p*lgX%PpC?Rr!y=`*CQp&Lvt&cke&77QX)4z28_IcxVnYgpDwsm89{*bD6wtVz14j4ma<`&)L5bfnv6o`%g!T?)TZ0$ip&{0+z54&ppJB+TIU@a}1)$=}lU~{xez$oo?9( zRKh-SsrJsolcUUpr!&Lh@(3Y6U9XIoP8q@XGwH*+IeU=Qi3;TO=`SSYX$%u@@Hvlq zV8W^-PZHg2iwJ+dkPx2?rqJF8J`F|UvQe9LKGT-nD!h%x5kugRrvf{+OomO!G~#Ra zdx;N5DU(%GGWaoxec0#(Mzk$e*qVc??BSbr%<@$^`D)*T<&;hlI;%&pfghE4^S16J z1rh^EN7I!C|EA|21Bk&M%-EOhX7vZG@(GTpvp z)|P!FcToy^xVy6v%ANU{>U?o^`nTNATL_Vzi%u2UsTiFIN{PvlAdb@K+>HiPAuzK2=kRtWDu4`c)S zJ%apYV}uS;er(rqd$Q+vI(x6#0j6J7pf^&6kSxO^EbQPkMs_WR+j1w^mGY@<%bOym z;im%}5R(A_#?{A2arx=r$?ZpZ49Y@B5F*bEF#htxd@hx({YF_=2q$UL)_PYOu%Y$1!KOJZojumM@s_ z9A~E62q)yGvEk-v#m~co$u0E(WaX=4Tw2$2ocXY+WS-m#SU0rc`y)C@Y^}92>oaR3 z)XY3V&gg~l4&T-UsW=yKej`o>uHQ)Z^(kY@)g57GxEK=`-$Cw=5dhQ6$Iy-^A;Qgx zkya&47}}3)^^Rl*)Rmb}-;WSgr%3b;XJYP+aZo$54I7f41Lso)um%2NY<7_ZYvXte zd#mN+_|7qKtZ*e0Uo6IYY&eJpdMg>v(T=S)?@6|MajbWx9n%S0NoJ@#;*|S(atZr$ znc0;Z>^#pLRrd~NHD3BmeXBlE;pefjwU$I{Oe8rznq$WHg>-JUDzTDFCSs~d&Pwk zVaz=KoD)&}FFNbNHKd+BF9~B7ejWKI4^k4Rxe48#tYyZD%OQ2XFWWL{70kTXj(PiU zV-E#t{Jc)eq)1X4=gqH1o??5l@{B$9U9f}o+_DQ3_e~(P9Od!aQA_g7#)1W_7IX6D zM_FF%F4*}rfn9BXkAQa-xwfh=gt_^#Fz;xtM8S=%esKjXlcj|tx0#T!EALS8K}Dow z`As~m(v^vQPNHRFK9Z%enegoTL$=T24!Kyb3hiWM$*WymNw$d$QE3%H%+0geBo{GeKa1?3(N|ME_IKe)wR3&zsZ;;FkC1N;#Eeum!1-Bkg z1K&$KP@>IP^o@?=Y+u`_0*Quvj-X3)bsJb!8O;|awA?)3HvT48`Z5`fXZK(uunw&EYop4_q2$Ju6yfN8{rI_?4wVef z?+HuuP1v>@!yw?cI`iu|hX49y4;|B`o z@+NNi&35JY?2p)rCj4Lar}-)W$%>!ZpSjKYBNE5IJ8o4U)zp*7p0(6t5g)(G?vJ3zm0a1HjR)nV*n8K}uy>3es_7L8_|gfy4im1^kc4o< zf@1<$Q0NU?_11D8eZ7D@ZHvRF^#u*1iQw%2p3}N|l3ukD$M@aOf>$dS*nGwee1b!; zEFm6T(#3Jt-$-Sr`H&*8mT84}+8P%kj3_M>S8ys(pC zbfPTF?AigvD+W-vY&p=7%D^tOeQuUSq+a!N{ zU9lFonu>8kc}05E&=|9$6X28S2r8y(j$`Ie0`8$9o+;Xgr}zZw<}IL=Ia+Xfx+hqb zoaDmGO|Wz4Yup6!6z=BpScJO{4d;#%$@2Arq-R9 zi>k69gLQ_$uR&PivJqxQS<~~+{BgNk67)3)0k_lp(5^JDA!oNpu*f@YO+o+E^s9m$;;%9m4yp>x8BJf&TP+l#Yp2>9$~!|YB4o20Zcxxr5z8vpsAV` zFh{!;R-7D#cDKx6*5-~R;chrr;PniRE?@IH0|_-u>Be!X{raz7L5SS^S19^_*2(-LBR=OwMs7zUU1)-vV0 zvgDHuuuq4N3t~gNllimLG30iYKy$Dqrz*D`9IXejo%Nxxbo4^BbkIceF*Yb|v>X(@ z&(arzwBbR`63p6Dg|5f#(f&*!ZV)WM_71z?=y3}=eSJ4lcEBG3Ixa)oTRq@yow?w3 zt0`>6+g3P3Ya8|`u11M69m31?VmDt`aR$yl#OYxUcktOAOfr;)^%mRF{ZT0EPshQo z>(02!Mvab|norFV{UDbPq`^5!*!@#5*ZI;kw)AxnlsX*X4N6PJ?%P{Kkh&&54eLWV zUSC`=NEeCZbzUE%qtJI!ErZ-oBcv8&Qb0b*rgXV1wQ^ZKH5z8WT!SgehpA?7RTy?j4jp=V@XSefxZ1rJhTT#D*@5*uhj#br<2SLm)OaFw zSf!78GZ(iIGl+`TXI-q@i5O2$oi6xaaUfSGYO~(n2_?6uT9V}IEx2#zF0T4Z z1eh)2@PX`I{M^f$>uk3gllFAss^6$U+}XAGvFkGI-F7vmo>PY1Oomy0Oa*R8IKE!I z85QD}!?T%&n6cn3tUnV9U(GA&D~-vxDsC`zTrr9*GQNqktxgNXhV22x1y9l5`4Vhh z6^A3cOR%aVU#Z!N5pdB-hRaxUk@j4wLY}@JhI(nPINMGIRB#A-Tslcp`?%umV=36% zrz5>n)rTy48VBtcu7h)0{o&|`gIxP0O`dI+J!m>26kfjV040FcWF5<`|M>hK{INSIu3^od8tdj#L!vrdl&H zu>0X4R6MXB?vA~|CVjQTwokfXr#tZ_ugjLfif(6%S_QqPD&x!Hy_+U??8R=*J1rBJ zJuyLk{au$Xt~z7>oUOM+L60bb3!h540nX#KJcKAmHZ zuX)44V&@EQma;VrRMLbUs}i{FzAkj{f)l)#R$H)-HU)<>@=&6>ja(bE4u=JAg&r2} z5ZZS%KGyPv6YF=v-YK0xdi^te*7knG*~Q0P{LviT-cc1tMNH!?wyARVB^m%Rn+$JO zPR7FnZ*vBdZ883R5*<)nMOTfui(Q`f5==C`fqt=fxru`Yf$zH*%zCvOuhkDFQw@`W z@3jIuPDiF{o($*0T+za$6Tb1v;%v?x#{st!v2?Zzl^7cX?k8h0+iwL7=sXQhHsqMk zdi{>>UEg3w=t5|fWyBr68V+wKl8fJPr+M*H*%dOJciNIS0K}*3ny#ehPphk z!-4(Q;63xLxNz1suJo}wJRLCw)a(s-cOGq_bq}2|%tHYNBxR#lQ6O}XdQUTg2LW`c z$EATgVDXDD@Ww9`eIDAvSt6Z6QLC-Ly`zy@yTmkIR9 z@I;X6k_g%lExD5}I#gTE2M-TlhWiI;VaV8>+~&*#y3+PBciNy0CSTX$#79O`x2`4h z$%X*xvuzh@cUHiDS`xI?l=m1aSp*-xUg55-oPZn3%4nN@x2TlrD3~|#zTpG46jZF6 z$bGruOvd^Zf<|cqEgIqorwfnM1M6DBX(w;Y%k`q~pN_!~H(SF@lVZVax}$iNbtt}) zlZVr@;^~t%D@!Cz^EhM0Ffd%T6t#sNc&VEP(C`t9VR9tH{4e`>MlLtGJDbkZS*1Rl zRfsnTHck~NUOvy290&z9lZ8BYsRXboQG;<8l5o<8Ei`Wa2G}-&aVq*ZxskQW5PdhP zWYL+foY5+4s9H1%0}uF6gL@AR)>a$grpY%s`$s}dEZ@l6Ff|?~OX@;y)>Og3;%Iyo zdy59&UB|f(zfI2@rQ)5qTJG-oQ}nL(1{yH@B2Dfp^Kvf-UdUuw2`8D(@z6zgD9yP% ziQ9&IXwaP(G@;i)>{&PfB&OSQN3MEfJ6m7KD}Kpk-VK3AAKp`oy55|R`@RyVrRH$- zYZhG`JDYR7UC!C_ictKVHV%yNpc$MY&bjrHR*j3II_DPio?WtFostW%-MJND)vCc} z%e7;hKO2GTr$m8*Yin-bEYD9KF+y!|(yP&&bHB1?}2mPrloPF6FZcFEvhHE-jX>e4v17ZPmc(O-jSMGmq0qxz6j{M;sFT^FvzGJHcbwtW1hdK69VRA zT8$}-u#&`t_ySOken@%Jai}xhoqWA_o^u?+UugtpuwTX_fFta~)&U3(n zL#$BakR_MNKM!&mUC9>D0}#}+VSddy$;nz4TWu+UT~$X z1|*iJH+&bpBSsyRArf0s@p3^i5m#V33I#A}mZv~QeFmJ*E5W@- z?a9!-!E|HW?yy>36^CCLK!o{o(X8k?{k%LH*kEbA7}O40PfDQr=Tty)yAB2?+d$^b z_HeOw6ppU1ecEGTSh>RzH~Tl7w;$6U11smkqzy|sUJXGHosU#?s47v_ z909SWo3U%6D`<|J4crY+Hs!`f=vZ-$rWl`P2QRlpC!1I-sEdW-p*!KC*-5-^FdV9O z214+tICw56pr>kb;X|MCDAQ&-_Hs?8i{_rDBTU}YkT$ynM-@Wwv)AJ54T-&`Hd6PHk()%_c_-K7BRdUS~#F!6ZE+!vf%R@cZ ze`;GA?Dhqu54B_4E`7o0^Cjs-=K$`b_z)KQE*j=c>qoP*BT?<@OJ3aNI_|2b1@|`l zG-bW4gZv%ea`dHT!I<;Trvsn!ZGHZ10K^;RXV&Dx>U zr(@h57ayGW={jxeF`3b4r|>@)w+iH%JS`#LIb42wzTuB~F05Nx6WdwM-2a|`IU%zN zQRH`OzU^1>Cy~I<{L8}B?*-8Elu~1CtMMoD2mh<*!a_DT(MfCW@BjBXtG~v!elvnU zcw9?;N0Vg4{wv~I_32IAc5AjLzh`%@ZEM2+Wp|pN@}I2uncdNA)*q2L{@rn{bIp1y zvSTgvSj5M#@_PxijBBYEeh+V16U}pBT1&{EyT1O*0FYEY2CEmG=giW_;HnBK_R89V zEP7sp(;ssrLaPUiFp1zw?kkf6!Sf*_^fU=e-h%y~YZA|W(^=^0c|=Oti^U{`;}N$6 z(ldKG>0)+-vp*zBrY))>qi=8IBzDAOf-Ax!lVO~)`EF8mH;E0E{lt~Kw_`SbnnK9` zif=Lb~oJQuE&lU4rXgNS(3z$C2Wk!NWSvD+hpuC zL$2R9)lY}9DQ>>J$%!ACp8j68%%Lwi zgezB4eU@@?QksDT#DEi??PvQ}_Ef@Vc=K zo2x8GbVjXZvA6Tdl>`0Bs#)vs&EWwUe9(%_p8Epd_1gi9-U4wH3TfYlQ&|2tV>pX= zJ;7P2G9ZIYiK)CFX`OGz!p+>tsF9W^n`%PJw({7@B|K8O=q0R}Wr$htd$P#IGug?> zQl$2BBw;7*FiT-PoZb8oHp)9=d&{p_v2hxEVYd(F3%Zjm|IzH!VhiRQJdNdc&Bl)N zk73%PDTdQ4rOC9mrfgb6oKwDJ4HSB(LvBN6`?{LY~<0+yw)9h3El4i29MQbhffTJi3P^| ze&v(cjoJRpcjnv@n}=Vp+ZK7|_ofS7?5W3ew^S1S4LwNi|7-8r<7&>fKPh@pB9tB~ zq(o1T3Fo`ksYc2u&q;X{8j3vfi1L^o>YMPSq z9Jz{tuO5HtEC9!i z2k5C5RjOv!pN0h3qM>m8E43+!Sfv#)eeWHm&d>Q&xKV~ajZLNP-x$(GOZKw!y#v@B zvW3Te&+AHWe&w;p6pyDDv&COy=ciVQ$N09@jA-ohyAd2$yd+p}Px%FwF1? zT)ryIA=r{ec4Y|n=tY{NM?n~C_F^ht%?g2@#-rh$-*I|+zc7E=41Jov>on?|=Ahra z3-G99FFME-vjsbXP$kNgjGL>C0XYe@Gb9{yCkIj68@e#@RVpfOW6^TeJTi9D6(&8y z58WHvCn!}9Btu?2B}@8cv#E2VFm$sE4V)PS>f;_06%$3;bB_WYTsMsicH=>+5IZy8*9+)oCS2!FO|%~ zo_ciS@K&NLFv@pcky}dF#-?x%x4~oDN%R3}9Kb6)uts+?7N^Sh6Qf1~L44h*w zP#L(Kj@3vatv+>7W>7>uFTZ7FotMJor&F-{U@=ZM9SJJ!>SWl{RkVffqT_15C5DB` z*xY`Ec1#ocmNxORKJWqgdH)D(T(uv6JQq*0glnM+{`=`vcVql6sg}6iNdj)OEN^By zgGsgncxyIn!qx7pDAsmhs_Pku^*9c5XDp*LXB+bLl-*dr6eoNJGSo@mO3?k+DtOuc z3m95%WlwKQfTzZ)_@?3n>HW$a=Qt|SY#2aK8Jl1e8%PEFgXm2!dz7)&!0~}+SugX2 zFex^OTrz57b>z>(dL2)wFWspf5b-$D#D42)NRA85Cmj*eG@pUb5It!qoh+ zm$nSWYfhl4s(>8lF{t!y8tL|MYC)}67!+CGfwAXrGK!HiXy(R5xc%lKY%#mR1}l%G zCPH}_H%gxJSNr4XqDlFcd$OLU(Du_30pe@hK3VM4yBddRmqLWuu)8{TWFkaw~%iS|*>B zrYDVOmvzI+`K9D=`d(5KH3Il8Y0$_k1HZ++A=2FhZ7bDKwvQTaY?LLzVRwjmp)1Pf zo`I^wZJ<_r6%L&=p{vKp(L;hMa9~X|;Z&Rfn3_hunec#7ysk^S&ss5_dvz$?GZ|Um z8^!Q?_IzfFK9|}Ayd>=n%eYRf8`$l)2h+2>6n0^NJ$Ao!jP0rXigbLR4(EfOgZz#W zB(o}qQ5o+}+G4bs!cti%b?eWl+sNW+$4vG{aL|O&d9B=M8Ds+A#~%(2a)3vwk?gig zM~2&N6eidz(?){}#92WbyBixaVPhSc7KwU^-xuxC1M*MRo3 zI+)}P_VLMJwSqygboJj3_JNEd-gH#wK zM1C+BIYH0lKIlKHU`;0_vw^O@bfSeXqvd2p&0F;7t+!F+Sik@f`l2YWT9dxqcL5TAZwyF;|A^XkbbF_4qc#2S_4BMUw0y-rnQUc2iMUa zZr3nykrDT;qV>7i^N^l68AArxxihV4-H_ul2)z#~5gE<9q^?$z9yjr#yviCG<_7X z`Acs^YcjGXcL!Cs}SaAQ!5{*k0{R z>I7RsyVVfG=kLOO_Y)wt!wR?NxHI)v7BQP|UuO<&d`9Xu&8X6n4JfkWI# zy&)w2HVN)~gN-~4Xkgn)v@9+%&*C&NN;@31g}x%*V@*_-^CR~BXY8$g^-!MK!tOS1 zAf-O1Fm_ZnWGePAcrA4e76+;^Qw^SQ`|4X!Kb1yC?a^a6-Z7S1jc#PiXGPQErIyHl zu!Y7pDAC4)xeU4I4pDj!C@-;)=-gP1ezF76-=QxxaEN3TZoPrm{S)AUq7*nfg|e;E zy0k=n4AE>eqX~I2sB7;*TZipnvc5hCP2LCSnx9qZtht_G_w`w}rC}mNM;nulou2Tt zPdBPrw+GHhE3;Sb-s2AOS0qQ%LSav9BF`xF5U(iLNO0{`tiZD@o|Ygru1L|^2^eKSVrOOT@ct1B?8c^K8n9$4T$_!w@uHnq& z>*y`?M-^|wb(S+}c2*d~SO-xx`D7C3JdsW~u!AOy(A=fH3y=GDU&IagmE>StYI8qh9=Z!68Dx=P`l2Dc$Eo&LrK1xfflag zk7PMjYr*c!6^K4E1qVMoMvmOtgu2t#lcL~O;uW@w+-}Zgj|?I!6sc}3vS zza7I)wHi`iUkM^Ga^RX6OK$?qh&IaTy(rxGzZnL-W=_3WLqyP0z90XWlH1u=dgoAn?Mo!WB1 zu*n`%cGtjRjay6Q)o{$jx6y%sKrDYk0G}4vDX|hR}O0}5%Jx@ZftrT~* z)q>H0L(Ik@y4(nN3y?mw8|Cjx)1iIJNkw}<`uY4?FaBTqwK)G`zC#^B*SeB;-RZBr z%JkRk{ioKKV@3G~#Ut_?c?91ViPwYwRlkx@SZcNZe25`BjQth zzON+y$Il=00a4Xr(Rh!392ejrBKthxQ~S(zvR&#5VKU*uG z(w8M7`4RK`@Ak(n5Xr6hA(ND2F&|&xha({Ak2`VU!*WSX{G3lKkSzIY;~(1>wf6(^ zsXWC02YZ5DBQ2hZF6?7_;`1M!5Gjt#`~L!6qredcqjZI%loWTdl`P`!_w0*Bc^Bb_ zy%pX31^m~KfTVp9%acLThh6{F|4`nCWdCcdOD}fkB#Z7a&Fi`l+ZV~aOVH`kxipAz%^k4zMjzyDtT z_K)>;*Gx*j7l_AS=N)sxj~M^sdfZ>6$Nk^eC;s|ak9&xVJ`4QRdqHw(SM!3hF8F(T z+y6=zo!n>P`b#5!wh@rj+d3i!>qPP+=J((IUf?5=Tk-QMDaT?yzQ7MhK=Qqy28_b8Dp`D^1J+n0A%?Vol{ESF;V$L$G+_rBuyR<-Nu-S@}##OI%d|ADK&1NeE} A?*IS* literal 0 HcmV?d00001 From dd24fb06094d6c0bca33bca86095b82a468cb13f Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sat, 20 Jun 2020 22:38:01 -0700 Subject: [PATCH 293/335] Add query to produce max score line in leaderboard --- flow/data_pipeline/query.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index d303341cf..a7a0b794f 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -44,6 +44,11 @@ "leaderboard_chart_agg": [ "LEADERBOARD_CHART_AGG" ] + }, + "leaderboard_chart_agg": { + "fact_top_scores": [ + "FACT_TOP_SCORES" + ] } } @@ -594,3 +599,30 @@ class QueryStrings(Enum): AND agg.baseline_source_id = baseline.source_id ORDER BY agg.submission_date, agg.submission_time ASC ;""" + + FACT_TOP_SCORES = """ + WITH curr_max AS ( + SELECT + network, + submission_date, + 1000 * MAX(efficiency_meters_per_joules) + OVER (PARTITION BY network ORDER BY submission_date ASC + ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS max_score + FROM leaderboard_chart_agg + WHERE 1 = 1 + AND is_baseline = FALSE + ), prev_max AS ( + SELECT + network, + submission_date, + LAG(max_score, 1) OVER (PARTITION BY network ORDER BY submission_date ASC) AS max_score + FROM curr_max + ), unioned AS ( + SELECT * FROM curr_max + UNION ALL + SELECT * FROM prev_max + ) + SELECT DISTINCT * + FROM unioned + ORDER BY 1, 2, 3 + ;""" From 4e6a9b2f62d2a1617c796ef051865e784430675c Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sun, 21 Jun 2020 22:07:37 -0700 Subject: [PATCH 294/335] Add I210 edgestarts --- flow/visualize/time_space_diagram.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index d194597e4..ad8443da3 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -358,6 +358,22 @@ def _get_abs_pos(df, params): } elif params['network'] == HighwayNetwork: return df['x'] + elif params['network'] == I210SubNetwork: + edgestarts = { + '119257914': -5.0999999999995795, + '119257908#0': 56.49000000018306, + ':300944379_0': 56.18000000000016, + ':300944436_0': 753.4599999999871, + '119257908#1-AddedOnRampEdge': 756.3299999991157, + ':119257908#1-AddedOnRampNode_0': 853.530000000022, + '119257908#1': 856.7699999997207, + ':119257908#1-AddedOffRampNode_0': 1096.4499999999707, + '119257908#1-AddedOffRampEdge': 1099.6899999995558, + ':1686591010_1': 1198.1899999999541, + '119257908#2': 1203.6499999994803, + ':1842086610_1': 1780.2599999999056, + '119257908#3': 1784.7899999996537, + } else: edgestarts = defaultdict(float) From 3a5508cdf7ed9ab002294d4130ec9c04d4c2766f Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Mon, 22 Jun 2020 10:02:36 -0700 Subject: [PATCH 295/335] Replace strategic mode with the new name, sumo_default --- examples/exp_configs/non_rl/i210_subnetwork.py | 2 +- examples/exp_configs/rl/multiagent/multiagent_i210.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 671e18d5a..5c07aadf0 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -76,7 +76,7 @@ "human", num_vehicles=0, lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", + lane_change_mode="sumo_default", ), acceleration_controller=(IDMController, { "a": 1.3, diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index 433489780..f468d37c6 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -136,7 +136,7 @@ speed_mode=19 if ALLOW_COLLISIONS else 'right_of_way' ), lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", + lane_change_mode="sumo_default", ), ) else: @@ -152,7 +152,7 @@ speed_mode=19 if ALLOW_COLLISIONS else 'right_of_way' ), lane_change_params=SumoLaneChangeParams( - lane_change_mode="strategic", + lane_change_mode="sumo_default", ), ) vehicles.add( From 3725e834d4b2d08f0010305cebd3e056a1026a8e Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 22 Jun 2020 19:10:36 -0700 Subject: [PATCH 296/335] remove trailing whitespace --- flow/data_pipeline/query.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 717d49a02..fae7355c0 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -225,9 +225,9 @@ class QueryStrings(Enum): vt.id, vt.time_step, COALESCE(( - value_lower_left*(headway_upper-headway)*(rel_speed_upper-leader_rel_speed) + - value_lower_right*(headway-headway_lower)*(rel_speed_upper-leader_rel_speed) + - value_upper_left*(headway_upper-headway)*(leader_rel_speed-rel_speed_lower) + + value_lower_left*(headway_upper-headway)*(rel_speed_upper-leader_rel_speed) + + value_lower_right*(headway-headway_lower)*(rel_speed_upper-leader_rel_speed) + + value_upper_left*(headway_upper-headway)*(leader_rel_speed-rel_speed_lower) + value_upper_right*(headway-headway_lower)*(leader_rel_speed-rel_speed_lower) ) / ((headway_upper-headway_lower)*(rel_speed_upper-rel_speed_lower)), 200) AS safety_value, vt.source_id From f87f67a817112525eb6401d3803f9860040680c8 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 22 Jun 2020 19:18:51 -0700 Subject: [PATCH 297/335] fix CASE syntax error --- flow/data_pipeline/query.py | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index fae7355c0..02149f204 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -25,17 +25,6 @@ "FACT_SAFETY_METRICS_AGG" ] }, - # @brent: are these needed? Is there a race condition here that may break things? - # "fact_safety_metrics_agg": { - # "leaderboard_chart": [ - # "LEADERBOARD_CHART" - # ] - # }, - # "fact_network_throughput_agg": { - # "leaderboard_chart": [ - # "LEADERBOARD_CHART" - # ] - # } "POWER_DEMAND_MODEL_DENOISED_ACCEL": { "fact_vehicle_fuel_efficiency_agg": [ "FACT_VEHICLE_FUEL_EFFICIENCY_AGG" @@ -246,7 +235,7 @@ class QueryStrings(Enum): FACT_SAFETY_METRICS_AGG = """ SELECT source_id, - SUM(CASE WHEN safety_value < 0 THEN 1 ELSE 0) * 100 / COUNT() safety_rate, + SUM(CASE WHEN safety_value < 0 THEN 1 ELSE 0 END) * 100 / COUNT() safety_rate, MAX(safety_value) AS safety_value_max FROM fact_safety_metrics WHERE 1 = 1 From 54ce4ec4e66d23646ab4d2f7c75a278ee81c07c7 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 18 Jun 2020 20:13:40 -0700 Subject: [PATCH 298/335] reduce time-bins to 10s --- flow/data_pipeline/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 02149f204..d3c80e496 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -545,7 +545,7 @@ class QueryStrings(Enum): ), binned_cumulative_energy AS ( SELECT source_id, - CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, + CAST(time_step/10 AS INTEGER) * 10 AS time_seconds_bin, AVG(speed) AS speed_avg, AVG(speed) + STDDEV(speed) AS speed_upper_bound, AVG(speed) - STDDEV(speed) AS speed_lower_bound, @@ -563,7 +563,7 @@ class QueryStrings(Enum): SELECT DISTINCT source_id, id, - CAST(time_step/60 AS INTEGER) * 60 AS time_seconds_bin, + CAST(time_step/10 AS INTEGER) * 10 AS time_seconds_bin, FIRST_VALUE(energy_joules) OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 ORDER BY time_step ASC) AS energy_start, From be5b853b0a59ba8c676692b50f979654ee86a2a1 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 18 Jun 2020 20:15:30 -0700 Subject: [PATCH 299/335] reduce time-bins in more places --- flow/data_pipeline/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index d3c80e496..480c945c5 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -565,10 +565,10 @@ class QueryStrings(Enum): id, CAST(time_step/10 AS INTEGER) * 10 AS time_seconds_bin, FIRST_VALUE(energy_joules) - OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 + OVER (PARTITION BY id, CAST(time_step/10 AS INTEGER) * 10 ORDER BY time_step ASC) AS energy_start, LAST_VALUE(energy_joules) - OVER (PARTITION BY id, CAST(time_step/60 AS INTEGER) * 60 + OVER (PARTITION BY id, CAST(time_step/10 AS INTEGER) * 10 ORDER BY time_step ASC) AS energy_end FROM cumulative_energy ), binned_energy AS ( From e3de3db5168fe0c03742a856dda173da4e5b4147 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 22 Jun 2020 20:28:44 -0700 Subject: [PATCH 300/335] fix groupby/window fn error --- flow/data_pipeline/query.py | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index b5452206c..fbc258af0 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -608,18 +608,25 @@ class QueryStrings(Enum): ;""" FACT_VEHICLE_COUNTS_BY_TIME = """ + WITH counts AS ( + SELECT + vt.source_id, + vt.time_step, + COUNT(DISTINCT vt.id) AS vehicle_counts + FROM fact_vehicle_trace vt + WHERE 1 = 1 + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' + AND vt.{loc_filter} + AND vt.time_step >= {start_filter} + GROUP BY 1, 2 + ) SELECT - vt.source_id, - vt.time_step - FIRST_VALUE(vt.time_step) - OVER (PARTITION BY vt.source_id ORDER BY vt.time_step ASC) AS time_step, - COUNT(DISTINCT vt.id) AS vehicle_counts - FROM fact_vehicle_trace vt - WHERE 1 = 1 - AND vt.date = \'{date}\' - AND vt.partition_name = \'{partition}\' - AND vt.{loc_filter} - AND vt.time_step >= {start_filter} - GROUP BY 1, 2 + source_id, + time_step - FIRST_VALUE(time_step) + OVER (PARTITION BY source_id ORDER BY time_step ASC) AS time_step, + vehicle_counts + FROM counts ; """ From 7f406cbfbe187874ffc090ffd33414bcf74464be Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 22 Jun 2020 21:12:51 -0700 Subject: [PATCH 301/335] fix is_baseline data type --- flow/data_pipeline/query.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index a7a0b794f..cce4d984d 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -610,7 +610,7 @@ class QueryStrings(Enum): ROWS BETWEEN UNBOUNDED PRECEDING and CURRENT ROW) AS max_score FROM leaderboard_chart_agg WHERE 1 = 1 - AND is_baseline = FALSE + AND is_baseline = 'False' ), prev_max AS ( SELECT network, From 5d897afd644c94467d6b017050bb9882742d4aaa Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 22 Jun 2020 22:18:55 -0700 Subject: [PATCH 302/335] change schema, vehicle_counts -> vehicle_count --- flow/data_pipeline/query.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 21cf05cca..22a0e734e 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -617,7 +617,7 @@ class QueryStrings(Enum): SELECT vt.source_id, vt.time_step, - COUNT(DISTINCT vt.id) AS vehicle_counts + COUNT(DISTINCT vt.id) AS vehicle_count FROM fact_vehicle_trace vt WHERE 1 = 1 AND vt.date = \'{date}\' @@ -630,7 +630,7 @@ class QueryStrings(Enum): source_id, time_step - FIRST_VALUE(time_step) OVER (PARTITION BY source_id ORDER BY time_step ASC) AS time_step, - vehicle_counts + vehicle_count FROM counts ; """ From 7ddf890fef982bb2da7562774cf2fe2b6ba5b2c9 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Tue, 23 Jun 2020 18:14:09 -0700 Subject: [PATCH 303/335] fix some query bugs --- flow/data_pipeline/lambda_function.py | 10 ++++++---- flow/data_pipeline/query.py | 14 ++++++++++---- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index f7a32d5db..97f625eab 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -2,7 +2,7 @@ import boto3 from urllib.parse import unquote_plus from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data, update_baseline -from flow.data_pipeline.query import tags, tables, network_using_edge +from flow.data_pipeline.query import tags, tables, network_using_edge, summary_tables from flow.data_pipeline.query import X_FILTER, EDGE_FILTER, WARMUP_STEPS, HORIZON_STEPS s3 = boto3.client('s3') @@ -31,7 +31,7 @@ def lambda_handler(event, context): queryEngine.repair_partition(table, query_date, partition) # delete obsolete data - if table == "leaderboard_chart_agg": + if table in summary_tables: delete_obsolete_data(s3, key, table) # add table that need to start a query to list @@ -43,12 +43,14 @@ def lambda_handler(event, context): stop_filter = WARMUP_STEPS + HORIZON_STEPS for bucket, key, table, query_date, partition in records: source_id = "flow_{}".format(partition.split('_')[1]) - response = s3.head_object(Bucket=bucket, Key=key) + metadata_key = "fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv".format(query_date, source_id) + response = s3.head_object(Bucket=bucket, Key=metadata_key) loc_filter = X_FILTER if 'network' in response["Metadata"]: if response["Metadata"]['network'] in network_using_edge: loc_filter = EDGE_FILTER - if 'is_baseline' in response['Metadata'] and response['Metadata']['is_baseline'] == 'True': + if table == 'fact_vehicle_trace' \ + and 'is_baseline' in response['Metadata'] and response['Metadata']['is_baseline'] == 'True': update_baseline(s3, response["Metadata"]['network'], source_id) query_dict = tags[table] diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 22a0e734e..302048632 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -67,6 +67,7 @@ tables = [ "fact_vehicle_trace", "fact_energy_trace", + "fact_vehicle_counts_by_time", "fact_safety_metrics", "fact_safety_metrics_agg", "fact_network_throughput_agg", @@ -77,9 +78,12 @@ "fact_network_fuel_efficiency_agg", "leaderboard_chart", "leaderboard_chart_agg", + "fact_top_scores", "metadata_table" ] +summary_tables = ["leaderboard_chart_agg", "fact_top_scores"] + network_using_edge = ["I-210 without Ramps"] X_FILTER = "x BETWEEN 500 AND 2300" @@ -234,8 +238,8 @@ class QueryStrings(Enum): AND vt.leader_rel_speed BETWEEN sm.rel_speed_lower AND sm.rel_speed_upper AND vt.headway BETWEEN sm.headway_lower AND sm.headway_upper WHERE 1 = 1 - AND vt.date = \'{{date}}\' - AND vt.partition_name = \'{{partition}}\' + AND vt.date = \'{date}\' + AND vt.partition_name = \'{partition}\' AND vt.time_step >= {start_filter} AND vt.{loc_filter} ; @@ -248,8 +252,8 @@ class QueryStrings(Enum): MAX(safety_value) AS safety_value_max FROM fact_safety_metrics WHERE 1 = 1 - AND date = \'{{date}}\' - AND partition_name = \'{{partition}}\' + AND date = \'{date}\' + AND partition_name = \'{partition}_FACT_SAFETY_METRICS\' GROUP BY 1 """ @@ -649,6 +653,8 @@ class QueryStrings(Enum): l.efficiency_meters_per_joules, l.efficiency_miles_per_gallon, l.throughput_per_hour, + l.safety_rate, + l.safety_value_max, b.source_id AS baseline_source_id FROM leaderboard_chart AS l, metadata_table AS m, baseline_table as b WHERE 1 = 1 From 9dd65c88fe1b85112ad378bad635623ce21fa09d Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 25 Jun 2020 16:20:24 -0700 Subject: [PATCH 304/335] Code cleanup --- examples/train.py | 8 +- .../imitation_learning/custom_ppo.py | 6 +- .../custom_ppo_tf_policy.py | 5 ++ ...itation_trainer.py => custom_trainable.py} | 3 - .../imitating_controller.py | 13 ++- .../imitation_learning/imitating_network.py | 19 +---- .../imitation_learning/keras_utils.py | 3 - .../imitation_learning/ppo_model.py | 10 +-- .../imitation_learning/replay_buffer.py | 6 ++ .../imitation_learning/replay_script.py | 80 ------------------- flow/controllers/imitation_learning/run.py | 10 ++- .../controllers/imitation_learning/trainer.py | 3 +- 12 files changed, 40 insertions(+), 126 deletions(-) rename flow/controllers/imitation_learning/{imitation_trainer.py => custom_trainable.py} (91%) delete mode 100644 flow/controllers/imitation_learning/replay_script.py diff --git a/examples/train.py b/examples/train.py index 05ebb3fe3..9445e81e0 100644 --- a/examples/train.py +++ b/examples/train.py @@ -75,7 +75,7 @@ def parse_args(args): '--checkpoint_freq', type=int, default=20, help='How often to checkpoint.') parser.add_argument( - '--num_rollouts', type=int, default=1, + '--num_rollouts', type=int, default=20, help='How many rollouts are in a training batch') parser.add_argument( '--rollout_size', type=int, default=1000, @@ -115,6 +115,9 @@ def run_model_stablebaseline(flow_params, stable_baselines.* the trained model """ + from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv + from stable_baselines import PPO2 + if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -200,7 +203,7 @@ def setup_exps_rllib(flow_params, if flags.load_weights_path: from flow.controllers.imitation_learning.ppo_model import PPONetwork - from flow.controllers.imitation_learning.imitation_trainer import Imitation_PPO_Trainable + from flow.controllers.imitation_learning.custom_trainable import Imitation_PPO_Trainable from ray.rllib.models import ModelCatalog # Register custom model @@ -356,7 +359,6 @@ def trial_str_creator(trial): return "{}_{}".format(trial.trainable_name, trial.experiment_tag) if flags.local_mode: - print("LOCAL MODE") ray.init(local_mode=True) else: ray.init() diff --git a/flow/controllers/imitation_learning/custom_ppo.py b/flow/controllers/imitation_learning/custom_ppo.py index fdbc073a8..0075741d3 100644 --- a/flow/controllers/imitation_learning/custom_ppo.py +++ b/flow/controllers/imitation_learning/custom_ppo.py @@ -1,3 +1,7 @@ +""" +Copied from RLLib's PPO, but uses CustomPPOTFPolicy, which tracks value function predictions in Tensorboard. +""" + import logging from ray.rllib.agents import with_common_config @@ -190,4 +194,4 @@ def get_policy_class(config): make_policy_optimizer=choose_policy_optimizer, validate_config=validate_config, after_optimizer_step=update_kl, - after_train_result=warn_about_bad_reward_scales) \ No newline at end of file + after_train_result=warn_about_bad_reward_scales) diff --git a/flow/controllers/imitation_learning/custom_ppo_tf_policy.py b/flow/controllers/imitation_learning/custom_ppo_tf_policy.py index 0dc381b55..680b7cf76 100644 --- a/flow/controllers/imitation_learning/custom_ppo_tf_policy.py +++ b/flow/controllers/imitation_learning/custom_ppo_tf_policy.py @@ -1,3 +1,8 @@ +""" +Copied from RLLIb's ppo_tf_policy, but additionally tracks value function predictions in kl_and_loss_stats. Used +to evaluate the value function learned after imitation. +""" + import logging import ray diff --git a/flow/controllers/imitation_learning/imitation_trainer.py b/flow/controllers/imitation_learning/custom_trainable.py similarity index 91% rename from flow/controllers/imitation_learning/imitation_trainer.py rename to flow/controllers/imitation_learning/custom_trainable.py index 7db18d005..b41728f11 100644 --- a/flow/controllers/imitation_learning/imitation_trainer.py +++ b/flow/controllers/imitation_learning/custom_trainable.py @@ -1,6 +1,4 @@ from ray import tune -from flow.controllers.imitation_learning.ppo_model import * -from ray.rllib.agents import ppo try: from ray.rllib.agents.agent import get_agent_class except ImportError: @@ -18,7 +16,6 @@ def _setup(self, config): """ env_name = config['env'] - # agent_cls = get_agent_class(config['env_config']['run']) self.trainer = custom_ppo.CustomPPOTrainer(env=env_name, config=config) policy_id = list(self.trainer.get_weights().keys())[0] self.trainer.import_model(config['model']['custom_options']['h5_load_path'], policy_id=policy_id) diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index 4d912179d..39fd2421e 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -24,9 +24,9 @@ def __init__(self, veh_id, action_network, multiagent, car_following_params=None """ BaseController.__init__(self, veh_id, car_following_params, delay=time_delay, fail_safe=fail_safe, noise=noise) - self.action_network = action_network # neural network which specifies action to take - self.multiagent = multiagent # whether env is multiagent or singleagent - self.veh_id = veh_id # vehicle id that controller is controlling + self.action_network = action_network + self.multiagent = multiagent + self.veh_id = veh_id def get_accel(self, env): @@ -51,11 +51,10 @@ def get_accel(self, env): if not self.multiagent and self.action_network.action_dim > 1: # get_sorted_rl_ids used for singleagent_straight_road; use get_rl_ids if method does not exist - try: + if hasattr(env, 'get_sorted_rl_ids'): rl_ids = env.get_sorted_rl_ids() - except: - print("Error caught: no get_sorted_rl_ids function, using get_rl_ids instead") - rl_ids = env.k.vehicle.get_rl_ids() + else: + rl_ids = env.get_rl_ids() assert self.veh_id in rl_ids, "Vehicle corresponding to controller not in env!" diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 30eec3696..81642883a 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -1,11 +1,6 @@ import numpy as np import tensorflow as tf -from time import time -from tensorflow.python.keras.callbacks import TensorBoard -import tensorflow_probability as tfp -from flow.controllers.imitation_learning.utils_tensorflow import * -from flow.controllers.imitation_learning.keras_utils import * -from flow.controllers.base_controller import BaseController +from flow.controllers.imitation_learning.keras_utils import build_neural_net_deterministic, build_neural_net_stochastic, get_loss, negative_log_likelihood_loss from flow.controllers.imitation_learning.replay_buffer import ReplayBuffer @@ -55,7 +50,6 @@ def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, # load network if specified, or construct network if load_model: self.load_network(load_path) - else: self.build_network() self.compile_network() @@ -127,10 +121,12 @@ def get_accel_from_observation(self, observation): mean, log_std = network_output[:, :self.action_dim], network_output[:, self.action_dim:] var = np.exp(2 * log_std) + # track variance norm on tensorboard variance_norm = np.linalg.norm(var) summary = tf.Summary(value=[tf.Summary.Value(tag="Variance norm", simple_value=variance_norm), ]) self.writer.add_summary(summary, global_step=self.action_steps) + # var is a 1 x d numpy array, where d is the dimension of the action space, so get the first element and form cov matrix cov_matrix = np.diag(var[0]) action = np.random.multivariate_normal(mean[0], cov_matrix) @@ -256,12 +252,3 @@ def save_network_PPO(self, save_path): # save the model (as a h5 file) ppo_model.save(save_path) - - - - - - - - - diff --git a/flow/controllers/imitation_learning/keras_utils.py b/flow/controllers/imitation_learning/keras_utils.py index 59928affc..f5d9924b8 100644 --- a/flow/controllers/imitation_learning/keras_utils.py +++ b/flow/controllers/imitation_learning/keras_utils.py @@ -119,9 +119,6 @@ def compare_weights(ppo_model, imitation_path): ppo_weights = ppo_layer.get_weights() im_weights = im_layer.get_weights() for i in range(len(ppo_weights)): - print("\n\n") - print(type((ppo_weights[i] == im_weights[i]))) - print("\n\n") assert (ppo_weights[i] == im_weights[i]).all(), "Weights don't match!" ppo_layer = ppo_model.get_layer('policy_output_layer') diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/controllers/imitation_learning/ppo_model.py index cbc51c6c4..85a7c841e 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/controllers/imitation_learning/ppo_model.py @@ -1,9 +1,5 @@ -import numpy as np -import json -import h5py -from ray.rllib.models.tf.misc import normc_initializer + from ray.rllib.models.tf.tf_modelv2 import TFModelV2 -import tensorflow as tf from flow.controllers.imitation_learning.keras_utils import * @@ -90,9 +86,6 @@ def setup_model(self, obs_space, action_space, model_config, num_outputs, imitat # build model from layers self.base_model = tf.keras.Model(inp_layer, [output_layer_policy, output_layer_vf]) - - - def forward(self, input_dict, state, seq_lens): """ Overrides parent class's method. Used to pass a input through model and get policy/vf output. @@ -110,7 +103,6 @@ def forward(self, input_dict, state, seq_lens): (outputs, state) Tuple, first element is policy output, second element state """ - # print(self.base_model.get_weights()) policy_out, value_out = self.base_model(input_dict["obs_flat"]) self.value_out = value_out return policy_out, state diff --git a/flow/controllers/imitation_learning/replay_buffer.py b/flow/controllers/imitation_learning/replay_buffer.py index 4e02a52c8..47ebebaa6 100644 --- a/flow/controllers/imitation_learning/replay_buffer.py +++ b/flow/controllers/imitation_learning/replay_buffer.py @@ -7,6 +7,12 @@ class ReplayBuffer(object): """ Replay buffer class to store state, action, expert_action, reward, next_state, terminal tuples""" def __init__(self, max_size=100000): + """ + Parameters + __________ + max_size: int + maximum size of replay buffer + """ # max size of buffer self.max_size = max_size diff --git a/flow/controllers/imitation_learning/replay_script.py b/flow/controllers/imitation_learning/replay_script.py deleted file mode 100644 index 9d41afea8..000000000 --- a/flow/controllers/imitation_learning/replay_script.py +++ /dev/null @@ -1,80 +0,0 @@ -import time -import numpy as np -import gym -import os -from flow.utils.registry import make_create_env -from i210_multiagent import flow_params as flow_params -from utils import * -from imitating_network import * -from utils_tensorflow import * -from flow.core.experiment import Experiment -from flow.core.params import SimParams - - - -def run_experiment(): - create_env, _ = make_create_env(flow_params) - env = create_env() - - obs_dim = env.observation_space.shape[0] - action_dim = (1,)[0] - - sess = create_tf_session() - action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models8_vdes14/') - - def get_rl_actions(state): - rl_actions = {} - for vehicle_id in state.keys(): - obs = state[vehicle_id] - action = action_network.get_accel_from_observation(obs) - rl_actions[vehicle_id] = action - return rl_actions - - exp = Experiment(flow_params) - exp.run(num_runs=1, rl_actions=get_rl_actions, convert_to_csv=True) - - - -def run_rollout(): - - create_env, _ = make_create_env(flow_params) - env = create_env() - - obs_dim = env.observation_space.shape[0] - action_dim = (1,)[0] - - sess = create_tf_session() - action_network = ImitatingNetwork(sess, action_dim, obs_dim, None, None, None, None, load_existing=True, load_path='/Users/akashvelu/Documents/models2/') - - init_state = env.reset() - - test_state = np.array([[1.0,1.0,1.0]], dtype='float32') - - reward = 0 - while(True): - rl_vehicles = env.k.vehicle.get_rl_ids() - if len(rl_vehicles) == 0: - observation_dict, reward_dict, done_dict, _ = env.step(None) - reward += sum(reward_dict.values()) - if done_dict['__all__']: - break - continue - - rl_actions = {} - observations = env.get_state() - - for vehicle_id in rl_vehicles: - obs = observations[vehicle_id] - action = action_network.get_accel_from_observation(obs) - rl_actions[vehicle_id] = action - - - observation_dict, reward_dict, done_dict, _ = env.step(rl_actions) - reward += sum(reward_dict.values()) - if done_dict['__all__']: - break - - print("Final Reward: ", reward) - -if __name__ == "__main__": - run_experiment() diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 924e1a400..6adc04199 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -1,6 +1,10 @@ -import os -import time -import numpy as np +""" +Runner file for imitation learning. This script performs imitation learning using DAgger and also configures the trained +model to conduct further training with Reinforcement Learning (see train_with_imitation.py). + +Usage: + python run.py EXP_CONFIG +""" from flow.controllers.imitation_learning.trainer import Trainer diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 786444cd2..2c951ac5b 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -254,7 +254,8 @@ def learn_value_function(self, num_samples, num_iterations, num_grad_steps): next_observations = np.concatenate([traj['next_observations'] for traj in trajectories]) # iterate over data multiple times (labels change every iteration) - for _ in range(num_iterations): + for i in range(num_iterations): + print("Iteration: ", i) # form labels next_state_value_preds = vf_net.predict(next_observations).flatten() next_state_value_preds[np.isnan(next_state_value_preds)] = 0 From 739c2ca6346e38b8d97bda1fbf41c51bd51b1a32 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 25 Jun 2020 16:54:54 -0700 Subject: [PATCH 305/335] test files synced to i210_dev --- .../multi_agent/checkpoint_1/checkpoint-1 | Bin 19590 -> 10209 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 210 -> 180 bytes tests/data/rllib_data/multi_agent/params.json | 54 ++- tests/data/rllib_data/multi_agent/params.pkl | Bin 17746 -> 17562 bytes .../single_agent/checkpoint_1/checkpoint-1 | Bin 27018 -> 582 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 210 -> 180 bytes .../data/rllib_data/single_agent/params.json | 42 +- tests/data/rllib_data/single_agent/params.pkl | Bin 10890 -> 6414 bytes .../fast_tests/test_environment_base_class.py | 78 +--- tests/fast_tests/test_examples.py | 31 -- tests/fast_tests/test_files/i210_emission.csv | 2 +- tests/fast_tests/test_scenarios.py | 183 +------- tests/fast_tests/test_vehicles.py | 9 +- tests/fast_tests/test_visualizers.py | 404 +++++++++--------- 14 files changed, 273 insertions(+), 530 deletions(-) diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 index bc68b0b99d2171f688662b7b1e4c841a36f8ffe0..0693ed4b62a9cabcdbecb267201ea862144f212c 100644 GIT binary patch literal 10209 zcmXYX2{cvT`@RSv$&g4&6b*)=TK$u^&epqmDkiULJcu??8{r^Kr zJ8if5A6zHi})gh%fPln#~qUq3f44xMZ-Vm~e|RDM;c!d!bX&v9d&VL}z1oSf|b zjg|j3_9Ee-O8yxZ1==DaeSW`D&|)fV4|xp33)XX$o^KU)HV5K9!5r?@#EkBW{ zt;51e?%WmwH{qbqAVG&{VbP0e+{U7Q407?|(vFufct}zBZ^L|;ck~Z#KxyuqIqUJG zOdOoH@(}v*`FN>=5e}@&!^Zv7&8(;B3UjPfg>#zag<#SL>Z*~jxcLe=L|>w@Rt+>f z*_*3?I{ zxqEV=xaVhGCz?zNCdWqz*Iv;PuADSZ=rX#T8}~$!D?8tWdn7!Nd*i@+h#U|H#fO_w zs@hy=+odIJ$aChVl#HNURx6IKc!6EAzd=Vo3qN+0lJ{5rxl&OsIHN5M-Sn4n?Yd*g z4~u@dbI?qQouf$Ng2*hN)xi3zy5WWfI;Ik-W?yhyV&=)g~+p_yCgsN!@ zYYGP-;iDq=m6M(Dskt3jdVeV=wf8VDS@{@e@tI=IjlvSn$fi8r*YaZCiql0L^-t!U zKG8Z(=dN;&s(&TVqNJ49o_?5j@M}HqYep$&^~ExtMOZ0MC$E~9J?$Xp^M_o{zOX!w znq~>-idzDY8dY&(FPHMpR+REKIv(ac-B`veytJOfP0TbozcZ7!Kg> z!CED7kU9>dZiiW~ICs3_xD$SSn~jppCR7=>kjT5#;pp*h{8q1uZ~9hY$1@L*^m;^Q ze$rv(zMW&c%Z<@Q`8>`ZT|)gM7vO_Ksjx@S2nIS^h_X=yJ+vW~OzAiZ4lfl*b(Aq| z@=8Tr=MpyS#SgkhgwpdjQb?SNH<)ZN0mnl(snX8CU#G?y6~2?68x@nn~*E9Nd;hv^@e;OvE`A#P1FtsVa!{goU++AEv% z$f!e>!hFzd3}$~T%!Xxk*O)2xv z<8vu=W!RDBrN&5SEu9b=a#rP{sC2bTSC8>&BNa&;?%R;N$@u`jpmz3WB2T5q{`Nr>>a*M zj`Nm6=m%dMihf7VTQ4KVHH`Mthk4 zk69$w>mGetD~@X)QZ!q$6p}Q{@k-DnFy4L?(^a!^8Ak~gzBos!Zi|6L^mA%`!2*l3 zwIKbaDDZAokP?k#dfHP7KW|ATF7YBz=wnXMrklO>Zwl&3CgI+;0$O@s1-?b`!75%7 zE8PEMX3D0J5WgzC>^wqb$NLh_)KYjUohZ0FO$n=y6_P_ex~Qfuju+mS!=g78aIbzl zU8l@MvT|9`@2g@;Q_S z&zcQ}Z6Z+Sq)Yx?%B0gnSFznwe-Mi~9?n?`v zRhy{Xn{KkeGZ3_!*W&Qgc_1Zn4d(@1zz-uUY1>FC`Mmuc-tJ!nB~zEcl%)W=b~=zz z^N374H5I?ioQiq!hIrFu5e9h=(z8#wRe)tY%-itE! z^jtYsJZvkoRWc9VzIM=P+i!IFHF@m4kjg}tPQ(6$QS#X08}nOK1RXuOuycnBof)En zFFdTlA|Qtr{!GFpjmg;EX3Lr$V~KtZLq*G{0vjxb6+OZ1?o&agBU?UV?&l`@`G*{8 zq_i^Z%em~!G6NjJFjAux5c{nxKW3IF$sub4j@6L?-!fjS^Zi$?W_q=7NW|6lVn@z1*)}lki@(A(4&K!!LIxg8zZ?2hwpZh2TP|@)$)82y;qe~ zemP6!t_6^Ou_~N!C=JF%?ZullE6CC`6~-l`3QcvNQa{yx;v!m#WlOwZ{?0sXjo}b$ z;(%#uW$C+JEZR&nWxBR5!gi_Sg8S35$){J_aN{)_YF;&q4BKXzWoY&uryC zd)0|o&nQ1mT_nD7AI1b{k=R%{a&Jx@E*ouS-F$P%cTN=PvD`;H zl(k7tWCL^`SO$_UGO$?lIrIFR5o%?(lXESP$RFcbpsbP%lC`>!-1?bxt<}I^4O-As z^PPHg_)sw36Sh3`#pj)IV0(5hY&Y?Q+8|5p+PfVaKP`dpKev*eNHb={IYTqCpr#Yfz3gSb*~HV!?w#a6xyi77;R&=qXoAAV zGV<#)#nlrUVeLj+^jejU;bt#b_nAnN{QX!5Ra@#+u#ZaXj;&Rzby#ONg_>R8z)W;H z0(NQzn7j8nbK<=*O5_HU?5GYB+oTKqpUyE}m-UH(+*-^?XTkZXD!q2B1bzM)z_4f! zy_N2Pg8Y04&{;&ZziPocjyC=}xCTpR8PGi|CqwMlPUymV4EXjA7gjBXx^-u%Xth6j zZ%IUcjUA4XENszG0?pq>n0&OA8tBM^ET)<92Jca|$qCejxk)svbkTulk0P3mpcQ); zQ%ddd;ss?G*P#iv=9H+}mlJn)W%3}r#q^A0li>Em2zZ{_MMd5|r|jQQ^xT{XVJ3#K zdgTQ0IPO48= z)cHT9ZvzgH#dT?%$`8rZPHZx;oHEub(FgMGH!-_5tpoFO8t}5Rz-&qQRt!J20*fj> zbC|^vwEIa4&W($~)}}jTn~N#7x#qE(d)4uCmk%8Kk^;RYE(Et{!iSh45-jk**^_Ru zp8xJro-G5eJ;P?3zb8XjTmn6IVIp#ze~^v>NxI8H7qmk2apg%3QX=Jre_W=Z{G0}i z+q#q*-ndSEzeHf;ZwqRq-AUG|)G+TXkC4sbktDq4GaZ*P8IKtmQ5)%bAZ+CkY%PTT zoEdOCX%_|(1u{uT0Tg~7!IjDa@}wl1HQKq4xqNpeabH_Ovt{H#WobRFNQ^;u6<@Y* zUL9&nDkDz#Kmx9d;O6VM=+%c7WXH(K)}_aclR1*#$ee9`cv={`18-A|13# z?*ty#CrFIlw9s4>f!I+qyP z%8)>P26m@kVg5dIrLraoRKHPpCUTX3kn|R-!;c{W3F+gIL%pW`C&Nw6FFadjH(#Te}A%f*n?4BBF*3Av+l z;klavgt;hSZ0|LCZ^t_}#kGvdcpymydNLR}s|jWMt*GAV8K9v%7b`_;NR?(VtG=m; zEIt{Iu8~)0rjj3=2$4r~;e6bud=H}NPqV1zjWGCikS^S`37R&Fv;4>@Xr;Ih-3^?O zDG-HEuQ!leKA}+1R13!wPBYfOl(Bc@BqUF`O4`RakchUKkeqCeCTr&~i`gT1iob`h zJ6ejT9Q{~hjkoN|HTAr-h=1hu$%$l9mJe-pK1R+in~dNb0e^mqVEgA0vh>zNG8&o) zDfefA#@CzlbKN&JwVpb97L2fbU&(u;5bEA?IAy>qkI#|ldeYnfiTQkG7 zQ{2ULma382sPr#>qjQ+4-`RbFQ7OhOK>dWNCog`{KV!a9i7t=ayE_ ze{$)i>C(Mog8cO&rj71SW|HzneDzBKrYAI|1rpV#`ALWVaJtv&nf+I#ZT5+C%Jh7L zoFF6e5x=_kCa*-iiJ$a#h1o(w`{n>K%8wBL%(HrH#`~{iaJt75NT(yiJRDHkdYL|K%TyIml0qlN3aIzcsDb zkTpGZ_ybQt!19e=z2SsJOc1y^Z!%Rd@a272=wKT8uGn;`hpFjX{Q&+h%W_ULM%Q`JNeEsh=j?e!D8NF#=#!U$n4%f*Lo4w}pHA)l6>IzR!5& z@Co3_Z$rf!j4;d73c4C&xH~5~awmUZhkHJS3p3`+2yI$!!=(-hs8Vqj-Zg9k_xg9V zYlZ~(rukIv6~pzyRQo{g!ryV!Li;(WW$AL={(5ky<_Iy^lPi2tsK-^?q|L4K+%3e{ zKIp1yEBx~J7;>Y>3Fmg~#bjO$)TUSovn>}0%VtFj8x<|FUBwt@$?xEbpUl9|Z4SaB z%lX1RKmTG=iX`{XZB_2KW!5e+5vJd~3V)Wr0>4^u zu6glJjD0MDTRqIV$_{^-&h@~2+c6GDZGADyOojWs-$VHJus_$MQ$^U_be5*|iNYQ3 zL0aBZ#_QZy$}9G&;B}AYbDU}lIllA|C*tr?jyPM+@sX+Gz5I~L5wEG_B>y_ZvllPs zlzk}Y`N@}YN<6AKX%>0B2ajENnro_f@6<|pG0_z~Wz{N9Xw%!~rt-1%L(6#U50>%P zDJF5$>Z&+@eD-nl=T~xK6-s$CjVd`&$J=N_-U~tNuq9pg-+snM(*j-7Z*f%3WoUF2 zLmXGw!C;Ub%4yd@-n|TgOw}=PU6F>|Gq$YNrTwU{`+?eo>?e|;j#NqeG}FO3$`r>M z;==R=G;qRZ#$QStb06E|z$FDx)LIL6nwI2(n#mVi^FpK`d=j`C;CE0Lm+Z`x1xAvJ<(A9|hJ9p(0ho*L_b9foE`r#G4 zw=j^IwX2qX9Mk+hE?5lq-i~BOff?#_Wuu^aJ9sgZX_re98kgvSs8s>|xu=sV=xM?^ zvvfH2^cxWy+sjiO{78I=A7o7*`#xHCj~;nBM6J#&L&MRd{LrF6h&(WlEPA|;COI6y z^z~J+5fu?3xB4e>YFLBuRsm% z>Pn;Stt~i9*PYxj8`H=72s7c9Jn|rvi5kBeIy1MBkOPNl@Uwj+&1(m-t@=$*bo8)w z?th4n@)G#FkBb9)$05jNv*E7K$;F)WwEV(Tl72-FvPB;=lCNDDJ)d-BWX0jKWhy*Q zSAl`qh7i5Pn610YMRS`qQ52ogp6H>10rMkQxVS&{xaSalwn- z)PI#bsynqIZ&?s&zLP{IZ2d_MpKIU}DRo@E8KaraWN!Om;#0+EIOPk;os1cTDTspUHa}Sp zyF#LokWP*y9>kTo!$j=%ezWhl%m}~uBo$sa20>3S>=RqgPCPgOh08>#-)b9B4thj0 zpUJXQXH%HBVT8W?84nJX8{vxDT8unhOCs-2B_XQ1aO(I}`oU~7$|tyzoH2b?EOQ$y zNONQYgSsH$?MAXSCy~B;f0iv!eM0=a=i-t<9XxH8M`sj2qZ$V1iEXzrr6 z@EZdxVD!K{Z!@))%47BRF>rih1OEJY5N_Q+2t~%vSj(rkDChfq`oJNSO11=(m7_{D z_0w!vIO7mwq}GPyLx*5Ae+t|>8c(G+J*UQdIc$%fB3z`2RA?1U>(^J3xQL06*=CQK z4S6JA_B(%G`!G4;t4MWkzM!Q$jL4#q7|fG%fj`CPsdfbmi|xy>>7X?+h&}+?^M=W} zQA6nF4Zyt^eR!HAiun~DG}h0A%9t-8K3|=oqB5WT_a%c&YST6Q^yG}-y81=pk(?_K z(|u1wy6vf;znKY?I>ZiZ??cz!vrvI+0Rdkd$>2%_nEL294c?N1^PRnE&z{RP*D@cR zCYF;XpGRbQ>UsLIH^eOJ+A0E4b{MtuIPG1`AoH%5F0W94t>MKWFIS5PE9>d5%GIR# ziUAm3wx(@?3t-ZM3~YMcL9B|C+2jX~Oq{3({0UiWX6IQ+5A`&W4yDa>!`>}q>dJWX zeS6(lZe58nqMM0P*E72LS~03VRKb^bZ_}->7trSxxwPi+M5H7W!y@`=ZvSjp)T9B^ zuU%nZoaiRG4{yTMVOgA8_<-)3yB{xfCW23WIb6A{hYO8%jIbk((J68oPCSWQo% z-N6J+la~|aE=d?{K0*?&w+kL^J4DwlSB9F&8}U-@GsdJx3{zKRgZth>rX(T5w7u3D ze7`l(<2TZv|Lr7HoS%SqKi0tUj)`cx;|A0E^%MC$Qbn#jdW^jTj>yh*f}5|$oo`P_IL= zKQil4a+rZ?(NA#@V20b!90QvhHu|oDaRE+n<*}1;Brpq3eB8ziRPNjjz z$MW*13VfHEfvk!s_qby=)*lvynvA)yu3;INzOG?iTZic3sXrj+#Sst&HjxR{18kqo z7e*!X6qGEkg<$Z-JKg(LeV2gY%y}GcAj=o(D0ly9L{ApuMK6ei8*|pHWOT=itQx_3$vtsOCWnfsOEKW7L zBEYxf@kv2C8g1=|Z&OY}OGrLj`1J%{{CdN5Z>9)7d}0avYZSO63Q1(|7*{qHXo7{D z6#n)bhyFq0f{Vjtg1qww=HD#k=r7)ab55$G#cmTQoqhyk6ypWg+NRP@ zjwVEU$iUa0CgwqGAFXh1z(=Ac?1_bU;bwrrm^K*-Tl3_omMX!^(h_KS*_ZBXjDxuj zJ4v{GDt62|2#UVHsdSMMEU1%Ya;uGTO~eK~<|bf5KbEsiI-#bqK50 z2KGk~$$v2e71o`mrV){tE|zcBAC-w07Hy;#^V}d)ejSdRT|&-mQ^8@a9n>kV38pz| zU{kyq)c$)wQw;~{lSL}fV%N#Ud$zIXU-#1a-rf8!OJ&&4zieQ-`W$?xv7dCgx(k~3 zY=n-kC(J5vg=St3qxxPRwmZyX7mw>>VkT&jymDXGbj2gS=ujJxx%rL^Jn+XGzk2B= z5odaGx;^Fv9b%I@>&e|TK12=c!(DfGqViK61|p?k>gF`OA(KdJglf#xd5?(+2jF9s zJN(>x2&PE}vrz|c(&G~=Nr(O>JaTP>k>YA##79rGvO7dRU(yEpqyols`=+tS|)j*bc-~O z4F9I-XGLM@EH!A%&Z4Q2LV8^+5a#p)evD26`^jBIw!j-T?)Ss}i`8Vmv=^*Um4lWL0{5V?f0E}IT$eq>BqSW7>K3P&(`)#& zxMe;`2~WnH_MgPEHk<5xQx3ePd#T=ue?+7C3(f!Aj+67J5Uns@6*WXNr5p76g>$5@{|J45(VoZzY{9SH2^e6~i$bx@@Ig(9!u?1XI2;b0 zYfWi|$9IxaI}1l9%fpj2E&8E;2Xxm|!rCQYnV`#*e*6&+&Nli#tG;QZ4D|B z;DgUcTG*fiYvIKERRFbnLFE24FgSC8lpe_--@`R%X4EvS_*#t#i|-KK;-{>k&s`KzY(HgD(Sz!Had03yZ9HLBqkP+J}g5UGx zvHRV6`p~@>-%DI1&Fyb_9_iv;ik!Titub%qnD#PcBPxMZuGqdh#A$+&G zNW)un@Z>gEIC;MjzHVL(!VAjW$Es=6J@yxK&1;ww!i@PC%Fjd8v=?M$!?-a`{~FaW zlVc(V`Jf&AnAYvcr0YKg(qBdSu%dY<2D(iE{*G^KSr}mR!vip*Zoo`pLn8k3t0&(w zHc}uxozv~FUrB$bj^@05tn+SQPGazANMjK$QB@ANG#X1|Y) z!@MXD)c%x4e{Vg=>UbrBLCsh8@C_AU`C%wObO;1RpRmhv5}USJftj&O9*3&a;L6yZ zv!<%lOnO=t%0IAzSie{}_9`EG43ofhOC<{9lbN1d4-#<|Q6?KRNo zvw~^3918XJtLcMnRqU&h7iK;76}(MP4)AUcRdLkJOL<;trM%ihMLd7$N}hRh2`_PJ zDQ|W`C8yrHg5&?Aio^UZ<$WfVoQ=AroVT47oHuv#dDq7LJ;}zUysxe$JoC;X-o~?a zye$30oR}?zyv>=1c};2sycWqy&c(87o>$Kij)iS0Peb7lFX>-3&(JbSu=|}7uX|d& z>FTgI{F&|(1iJSY^Cv%>CeXfgo_~G83x0jlC(|mLX7=mH9@ExYjNsb^8MD*fPfT%@ zky+V?eZ1yl0#obrgZ!$?C44O};IC+!#BW!*YN{Ifm^VJ^6#r*V9?yJ0&1{d-27cay z`KH#zL;SLte|avM?tDGla?XveE;43U6A7IX9;)oWGeUpNeJ3?`ynRP_(6*qMF{hrG kcj)-Jj_ZfUYM`g=*s1U^zo3x7FyH_4+llW9m59*)f66*xxBvhE literal 19590 zcmZU)c|29`*Z+@_Sy84)h76I)km>Ae8A?Kfln5ot6s3boG?2NFu`)*ql_r_a+1H|y zl1j5?5e53Iw#|$P3k&g|xj8B% zEOO@mjFMa&KL0<%CAnj#$8$v^Jw*k7h>Y@!3gC|0!4(S@oK{@lQ}#b+nH9L!*LarT zni~VcxDsnOZr-qI+sw7$5dkwdYz~VG@rw{#L((@QU~Txu$f$_TYojE&lK(SGa^t%H zIm(rC5lZ4p&lUFP%A{~*-MHiRU4&i6xM(DEC;acwby3?k1xRw`{$qNe8CTv#$VE7X zJJF3hN#A9Rr?9{jhO4l6@nYw{g5Q5OE<#aUMgKJWIKjavrHi0+oU~~AuC{sqaaqew ztyUPT7TfZ52gU1AzBpmUTUh*cxsB*!otB3`RBYBfUT9M|4s3Q?7J`%VdYid{J3!HY zQ_Dx*N*mq+@sSpBB6c>z4w>ZJQ$3q*Eqj~M zs&(jFdlj2*1-3|z*$V5H+@=qIWs@)Cf;VMOOmHj=X z|4+pDhxkQuRe~jh|Kt40!2-{#x^dP1@w`S7cS`W2w7GGmW2-IuJMUSy?s08isH4V- zJ#E3M(S2fdHC%_|QhlAL)9}Q4R<$U9a>jHHUnhdMR^Hcoy^#UGQSt%r`E5;(Scfc! z`$3nJSmtE?eocq<$m=lcC3CuYgCSFRH%`g&n>vnK#=1pXkB?U7_XP!64VMY=XAbVO zo`X%iOk+X1B}vMqUDzvHNi!{&1*3&9EU(`pp1*~-w~k_Bp_k3_NXM21x|>?e zc5!UFSKX&;7o39fcE^_Ow|3dg2w8(aQ9?SJ{gjr^61Dh4C7X1o=AlUU8?r@G3!8(CY^EyS#82MFIH2m#QZqWQqg~!;wt_sa|Lrd)UXE5tU6_qkxY~w91Zu36uC4I2# zG_0K2jq9(xhMlL&*m02?F{o`36jocbXdj<#bEn(g=G3Mro1mICxM^QhOIDwz&Ft%X zEvKu(Ta4OeZQdN!woxkgw$a~LN?w|-Zy|CA=xTV4^L|v~?I#FC*e^2@U$A|w3K)ruGsQ-@#r@L_t{x<*({}X_}-Fckp<2v~E6R&f^H>Pt+ zEwVUwX4djY{buv8TyNx~{$>6#r{(;svx4|G0SOrPc?BawxsjdrdDvCW(jlD*A$-kL)=D&u|`p59uZd~L482(SrH2Ej=geC|H-TKqX8Xc0t zk*Okh@xgN%GEEob{}NO^a}*MkD@cc;Ha;mGAhiqnspD`cIO-2Dp_dz}Mbta8Jb5pF z?34nib+rIa`6GJwUK>droC2la^3g72H_@suh09I5Aio<-f0 zSEzH`5S6Sa(DVjA9TQZ8Hv2x%DF^n_VzougRpA7*UOX2*zSP9a2}j7D=0_xB*cf8{ ztuW_n7TGbYlm^WUfErULd=t`5a<-qRm$sgxO7G_2Q=@HIH2jQp-29AY$ym`WcPZli z#Q_xl>_aE>E9BwPJ8*Wr3*DZ!leqdBAx~xx%A9!(da*KW&dPVpWzR`qMqbh%_A6+| zW|nqOON4tnTk*i}K1N9+1^NRVX~6yc)RNtUhy0DOJV%Qf-<^f+4g!&g{fAw!Y?~QR?&3+@`aYd}v=^gh<{k8IQ4)=AF@elN87QP}Y^PKb zz4FF}47weHS<3?m+g5<#?^>x`ZXPiR^~0v2Q*cKk2|l_{flvhz?2VU%RbTcAW{XK| z!>!NsTzn7?-Pnh@Mu*@)@+b0^GZXV-)}zO`?PT2fVlqE&ANjme7>v*9z!dtF#2W-* zW#UBoXiXjspSGWdC0%9K-r0fUcg>***(Hp=r8fCF=_hHb-UeeU)&r_cA;O}TaC2-X zo!J?T%KB3vs$RShM14gDO$UHSC%Gk19Es7RArzq3uhtYEcl0$s4zhuxK%3va#E z!8tOW2rt$|@7WrlzI`EFz4M+9&!{4Mckd?oZd&;H{0nB)sVn4%Z5-*!^ryUgLO5x0 zGP9{Y27f1qpndvRswy@Q>cuAFomv~U4 zi`ysd#SQCSF?qoOI1)3PGu~|~zBI~!2}&s>)_DiIndo6C^NtyblfW)Z9*pr)2GbRL zY4XMeXzjj{ep0I>QqT%1Wn;lVYydp>AB2|muj#&3N*J4L42rybxP5pz9iHAn^`dhz zz2guBT;kDJx5t6ro5w_Lyckq2c|<1u5hmk~3t>=%HE1&|tufdP!m62QKU&W>+AWBs zxN@*F{zeM?#HgjuO1!}_#QtzGc>Lx)`#!}U2K$n6?8g~Q&hth*E1Q8S{&}?4V+Z6a z93#IpUs|t{{Yn138AFVgYtm2G*D?y3ZA56hAbnT;pf&xgQI5?8Wv@JPVT}W7D;|J9 zb*;p_u!Bi0GY5vK(^9(&@N16-R-S32JRu2Uzda39K1KnAEXCYfF*MXL!8e*;=_ijK zqA^AYH@sK|0hconRQDq>9uFPE_cMmT0F_>pgm=sXIr8=^IjQASp`^Q+D$jXM>Ym!-Gu#X>W*?gdiVntYvPGQH6WrtKg|`Avr0e2>sD1s57g9TzJet;GQ@b8>EFo zzq0t5&%tY{JaW-&1DSV1n!L^5MpY)=M?FPzQrxZz0d{p{$u4JNDH{k~ zOgv`YjE1D&6+jy6sa;zsc+L!khVp$-mUIt?r+J`p1dkf~$yg6P2M_a0v~`eH_tD`U^JuFXgUM_k{c@VXp?%)uhIutD>3>3# z*Gl1@l42;^|Bn6~TE(WtX+iTFXBxhu3G&b#i?*$R8;z6T@0{zbcPu*&Ha>D0ZzL`++Xg*x>#$2TAP>tDmsLvrNawJ|^sh*OCT z`^X$vfr|G%n7p9`ocqoRFN_HMA;E=)Ipt*B^mx4W(-%FAQ#q0s2k294MlhewVs=E0 zFu4)4>8mb7a?je?I=p^A`}lV{M0Oa0{kw&Dcbpw)gCy9h$>DEbJ?Jwp;4fo8vI9XM zh{tdY?#g;h6q`-xfp8ZT8F)(VPR#|y>b$9VZ5qE9o##1v&RrlslFP zF}A*V#5E5RRz}0{!!qjo!5WtCRfe7~F(g$@68*?m`to5S-m#lQ-y41-am$Yrm1Eo3 zAA;B$I)9GsV!c5nQyFf0Zf35ZbwrQ(4w(4h2=+M?k`$+(Bx-mZjtc9MOzVR%^YT~f zepU*4M=P1(--z&J9k5nNNVAkk1@k| zjnnMzfJaQB_c>;csW(WynM7x;FJ=2BqhZUS1|%*QgA;XOWO76@4Adlp`zMyZyKDus zeNWIeYSoxlp^gFP9P#=_D-hb`i(`gHNS;F{U8sMB?nO7&$Bf`UsRP(hVh0!htOGga zYUpzChky(E_&Q_*ejIa;&Rx_B!B3wsk*WjKb>0W6c3zHNmY+{{UEIOweh`Q7Q@$`6 z$Kb;mc6jto9I^H^0H$#U9DcA6E|^3wFU+= z(&%I71_u?CAmzRe1gg4n8iS&AS$d%hyZd4e-aaMqkP8y=u_oS47^$@#($QrF5I(W_+SEkxkrEMvRwa(`A8U z@LTf?l-4{1$@lLujcY&B)x(V>F5Q@LmZ+iY?OYtTSqc7~Eb*W7kR2UyW$(+}!Rv1i z!{;=2*r?<{ePg7^B9jETG$9l&i0i;w=S0Z3ZjUt*3#s>MBkYy(g@D3isH7@|k1!c$ zztF>xm`_AKR}?+I?Z@l<2*Smm^tMtRRr;Ml-ug|(@6)YF+$$-ZJh&FM921~Y*%Zs9 z)UY9ZDHKW6pw~%poFcS|`Wqj?U%#5j*Nk>JlVe8Q%6dpw*9Ep~r64!v7{QNO6k>j_ z$KA`P;urtpw5xTHY-!mFFD35KS)X5{k@0ch>RlsFIR{C_+7tAz?LL}Nnulj?R^uf7 zD4&A8GZ+d}9@yYw>`r za=J%KE_|b3#D|E);Z*q6evlp`K9Fd(fNY(XjP8yrNOedOT!{lpqLXM`zc$9*)xyoA zvLO7Cg~V^AgunPRITITKgOb(cXV(B@X!05IMO$FttQwtcYfMBB>?ivbQqj|}hVsug z(Bjo2RKko$t?S|;d%{@KQ`SQduJS^=ldkBjXi9H4>?S5T(+PL(YZ`P5K|16FE&gy0 zbjJ)cnT09fvug=72AqQTlFj_J=Bq(|i7^)TiXx-85#O1HkP+c6^n>VRGU2N*d;gdu z-nlwcEY6in>A4{h6!XElkVII~m5@9$cQ^DYvE7d&un;!gqj+U!= zG8!9NnD1q}_(*>Y25(UyKFiKAFJ$`3+j?D+dv6lCe(gAUA?FQ0)^b4f(-p{Cx(fN* zH6T^A6Rez$qju3-QgZbh*>`U;-KcvLFTEHGe#(0II^ZpxZkJ0ka^5k=W7{z5=Ve;@ zZ~@NT|AH)`2`C<(Pwz12$cr2LkYYYSf4jNR_O6Ze-1ezh6O)TVix=bXky2W=;|Vo# zUkqojcQ9uKbFIO1DS_XcNUNkj?%i^T%F0`@FZ81^rEVVXkc)w%wRdTom=qqKT|-if z7UPP|gK%fz0{RX4IMgbN+1qW=NuwIv&rc@jK9<7VIdW)l=?~eK{*3+c*NC{vu7$NS zn|L)Y8)<5-3QXgf;B8@1sH*CQ`$u98$%%n-;*ngRPV&(TI~D&%`Z866kqf z1xAEV(~UE%Kz6MzI$kcot!Cq)ewH*|e7=WY=a|OK+G9w1|7@iz1`Ki1*PCecMh%_+ zY#{w_l)-FDBB#=}%Id0f3N$C*reOxlVfpzz^i{J13_nW&zuFvJUlvX(EV^lTbq-O? zXe3c4+4P#rWd7qPZ|U=ielUjbfkN$de3Z|_hz4cgnnrQW{x5Yine`v&G3Q@)VEzv~ zuyEsA{x9^f`VaIl_^r)XOE=|f43=8EPg}tI?mm`(29!9iPPW!%!Cc;pNL$W?wHiG6 zi4*xhjxOR2@ojj~^nbLR7n zl{j}ZE?K+GHsF`aow442ZqPdFjw=7ci5Q-$m>lOycmt1X9mVDRztru&=!5y!eDaU^ z;KsH2FZ!_k7k%UoIN+=7cjQg`Lqg_0CkgXJz@o~B{Pq&S-by<%+~IHiC(jKtL)*v- zle6Ib;*)DoCZCD(i`9Ng55x ziX)!I59!{=lZe_XaeSybOea+v!AtoSP@b%XrQP~Owd5Yey%EJFclTo2z&vQ{OoUfC z9#mICgseP19#pppp?#bz{P8ve9pVC`Ng>2(*#+7jkwf}9W+1Ywkkx(slS$|ig-cT( zQxTi_C|h`f8Q$5!98F$?8lzSatKCIjui8qr90M`PeLwW4+T((+k7$C>VfyT-3H=%K z7*8cXgq=4;91vBr-VEZ!zHaSZ% zk##(JR?`s;xtT=ox-NdWZBD|r^T>L$P^hfvqzxgu$hJwtPMzCStVa=E(mZP8F3g(s zj8d~Jf0+s6^N8M_E*Sl81fhjjnQ+%>pcY+4S5@Dj5*|im-k%)ch8zHPt~lIz+0Qu_ zWlQxhH`7mFU$Cx4uSt4$KgUyc9i3Gghwkfsa`Iyg>4)8UbkT&jOngN+Ej!pnJ$7>G zp+Xt{%A+}S=vx_`qHcm)O{Qa}-3S{Hs}Gkq%z>*H=i|s%9rzoYj_aH9@y^L1VtXuu ztr3)i<3H=N#hLRd?}#oQ?An1v_bZwF2j58RvMwU|aR`Hs?q!lN1^;}o!~;@U)c=kr zE2O>+Ht$}DYx=?%6ZKH$ZjUwf9#w^Ksh7l$nxNHcam*`o5FoML{8wL|($w~R3TBdw z^eGESSm%e`HE(I#?!ElZ>_vD$PXjOXDu6zxp4J_DOhkTVf&c4ERDH4%=)Vdj2kmbP zN|TTD?*KAkSCnCkq#w1&oetSf*HK+W3q*^J@z&q-xUBCKyj{5l%D;xgqVFB_^tvY=gL=V+59^RoC-ER$} zo_3hgv7dJOE2He2pJd=e0<(c9M^4}I0JX$K*x9>-UO(}X=pD|X8l9Z28rUb;8-$zkxQkYaZz>RB(kGc zmnpbV2<5zCc4Wgab!~KEUKP)#T@sGqerSmPe6K{^zRQvo+J6{##bE4cHDPBwO2cH! zAaE-=OZ{KXN8dYZtjjw-(ZUM`q|Rv@jZ8d8a)kBqujyF0N)OL@od2^3WcASlThk4X5d=HdZi-P!Z zQ5-+d8+Q0!Bl%K`aFbFrR5>{k7X=ZBJ9v=Jx$H!wUcKhI_C29LTc@K$r#*9$bBug# zACDC)8_9%yl~niQFr7cTlx*Y{AolLYw@0KI$DJR@<`vq6ygNf8RZf$2MaS5w{&Vnf z^>4O7Y8vhheGmSg&&etEW0B zDW;(gdw2%idt@~37ISv!0g;~GMw8t$a9p?s4eW~`I|eiGrtDZ|yqK8bXx z`gi&uVksQkXoKS_$HVLc1k+x{H4g?*G3_^+M&{y)*S8qyj6lx5 z!!uB2%`B43IRp=Fjqo^0#V_h%oC3dh^yb02*ePh#olQSUl~3nmr@0cCn`eQn;u@l{ zY$;JFmO)MXe4?0OLv~MDLgUxUqHc8$iYcmK@E;!gzG*WC{;8mb`bMPKLj!D&jECEy zZy|J~iy2thMBcTlQ)}6ypeWW!>wfQ~4^jqU{)jxjEzyKcVYM`zI{^-lsUY(X-GbtU z739{vOJq^WdPaPk6YbMvpbb1}_pf7g<<2-9;T?rQ&;3|j5CHjQk@!7tDm+!v!4n@=dUyB>+#oV#Gxk!{o|vg&SsiJKXmotliFMEH30E)TX6hj4z@^xe5@w%)s=^l8*8kQO2R1G0A zwZ;m=I~>5ezJ&Te(8P72SLnxBfLjvPGGZsU%m(l}V9%z+hlbQt@JgW&`=_yegSash7?0X}~o$5Rq{3i!5&OMB~r#z;U z`e)Nce({iQ8H;~7b*xc}F`Qp%OqRbXVjf6$kv8RQR2lk99$7cC)*rQDjpI194c3Lm z(Ob0Zu_?n=6_Ab9R?P1MTWP|IIM!aDCB;WilAzc?+$p{vZkL+lp;Pn0Vx2bZ)W6MV zi-o|sWHKuMY=+lgWMFXZddQ0QA;lxp!GEhSjJ(glrA>3N@atvhoTdSWDZTXDrW!hJ zZagN~s6eD-DqN4=4Z`*woL`?(*sKMSVA)hl`@;>Pcg1tC>)8xvmyCh%4bRAv@8Ohe zP{)Wx%8)R3HB?%p(8Y6g$etiGHs&f|_lP#0oo@l#e&1pfEtWCi8}*6!{(kb&ClW>i z4^W3S`(as}2_Adc2Fi1L={?mU=Jz2Ud+|vW&YgdrIG-ti4xvRbKE zaWdRoe+T9aZe_RC2~Y4P}-PBZH~dnYFHLy(O;0b4zn5&3F)&|I8G zl8c_vyNAta>BLfURCgJC=IeoC_)569wG|8XSvoOjEUxd6Z}IBeK_ynofsdt|0O7|# z7S9X+1h!yr(;ISV!yRg5(o0=(0;%rH0{HIM1^dRGfgclPh?9~CoQx2KWXY*GqePZ) zIty`8TPU~+7B(>#_R|dcbYl8=626PO0iI8@Nygm0P|XcN^micGQ;PWHJ2As8JL|Cj zK_EWjr=!t!Er{#0q$7$O=|Fr4kzJNYUzBm^^ZCB8JTw*Z;uo;DgwBxG^T&zmdqrOKa*_5}vRJrIZzF*wTaPT23@!bW^iX5SCn+a4%s9}oyI9Ox9 z8GZ-!u&2MZlL6nO(8VTV{A>%NV2}i}_Y>4Nt|J*Lx5?^TRv?+}3*%-{R%*jadjHlb z;uOe)^|L8{Y@Ekf1Ni@%Fq( zSnzTQS@e)%m*F%<-9xE_55M9(IMZ-jvRt zTZ>$~D6akgtNQp~05bPqf-vtNg5coBo&Uc8#PL4> zjzv==hr1w(>-2wBAEy7RKAitG+~ps`7rJp5{TF>K{uh17=e@&1t9If##|0;9uw-#% zD&NYm0-nTmk&9O+V)WhpuwefHy^)d(8eX?Z?`;>XZf0oQP!`!2oJ*GVdSk5f82U}8 zi}=JSAV=stbuzNWeOJrjn_@Vc{eDPJx||?smI*wfWlsV>29e2*arAUYB)--8PQuF6 zNzvLe6q^~+u3*UyzXLrd-*7xEYXU`q6yqt9zqKrt)~xf>B82Sw`8jCI+)@9 zop?4Hp+uUX6W6nhq*wkTn4}BYUvy!*!DKR=WJ$fY`VgavGHhp~pdg;W@@WgGleHgn zjI;eg`l5@d zo#uLSUhfkrm)(OR;b*A!t4p}L{}c&*VM=QMs)1^bpvzWLPs_JTGD3GQk#ELz@H*cd zY9w-qRhAzqDf$J*Ymc(K2KPdrmL*>QJq=$k8l{JHEzxe>De|j45NCHyMa%4yq;6m; z$(KBgmO(Ng=2wqP5=~I7ldxO2Pi*ODitxB}JCv_qh3;;;#Ccm5*x+>%zEtoEGY8R3 zNEE$1ZQ;Jc6FR@e3_1)1-3Z-E;H@ekY3J(bnE9H(-mPF>?0<&2BPS_LNTR(=9%k{5 z;E(&|?Bgey;B9{#Lwn4Lb6+(cvvP*sg_LPYy2!!@9vS>o0mh~$$lQs^ApUabYwP+J4)I`jaMLM9ww#$Eh}q`@liVUSrSZwn#4G!7~!|Im-r1i=p zkk>7x>JppiwMW*lyx)+KG?T*mmmHk%`Y@TEtBuDVtYqhYUkOb|)IoY<8iolvSoord zY|hcamK()XXj(aSa-Rl6d%baQ#USY)pM#}cR!q>1Bap(xLGQcmWc-)Y)H|k}a`w&z z=My}V8Dm85zpBIotDPDBZPCn{y+!omZD;Va`aqOK`C-X&#;bDTF-yae&J7*$2zk_N=^%GG$Hw^OHK-aX7(09q3h@#s-svdv))R_I;${UymCwnvP9Pye@wW=LUXGan25ij`$fILU(t}BXrD&B8~%`4#-cDV z9K`q}tFY$B&)_VA!wY;X5ws|z_V6>Vy|zZhLd9GU3m*MYD&c-K_Bw@!UFto+JxyN zK_q{i4rmwIfyLk5nA~DZ6=Rdgjho`oTLkc-vXY6rC<=$8*O3$xL1+6x9L#<)ft_Md zN*+fSk%jZNk;7xnVSCnUkdg?a4aKQ={^4Wt@YX)qW-UTz%GkpC>@w!f-Z}JTk^ocp zPK9ViOI#v77QaQOKor?a%{GNm-)|pi#;ive8kt7EI3~KXlXSB+BVc zriF`ptW8G>Nn`0nn#WHeUnH#P+Vu)>?S&lGn&Sh@FSOCGHoa8;qZJWzm=8+N*5Upq zmN4`@1}!X3!o#oxY7=yT7SwEKb5r~=Z={*t@ZL^sPI_XE(=xO)&LAS2Hc{3qo7k1k zL#_E+1geDuVl1o6myFs*URm&f_dAhOVZIVK`P`JMzKY zu81AA%_cpy9H^cDnfBcsizI0q_Jzx>zBs75jF5R2 z)%fu9ZH#+;0Ad{ZTl0=Jml3o6QVy6LU`$mzRU8dNW6GhE@a^OI_8_d@bC?y)A zfSp7jY1wC3vAdEczx>MYEMah1RRzvn>|@qlKLoYc%t1-il3Zu*F`LrjP)y1Rf=rK- zh3A|=p>Qr|-)(JTA$*r=?W|_+y{{$fydTq|YpQ(XC+g63S&jIp7}1UCVdSlZ6xn>_ zIe6QirB*+MX}f7Q*~k=NNplJn(NxFJE5w*H;tNPDYeRSyznCjk)9H@jI68k?80ptZ zg&W5g(Iw0$qTkU;8-MFU!DLVNgtP=sS5PGGC6nof)hWF7gOnCTWsv+$BJ|=ELX($$ zCvy&%5wk7N(C^DOFzl!Y>1~p5Lfrx-%Y(_=eSpJ#k{E4qhqSEvLXST6#Cic5T^@Om zxTU(F+0#5++xm&XP#x|6B2eGeWzi0qVzzbBDC@GW25z2?fvQJ>?p>q_M65MIlYMa{ zQQ|Lg-H-(Ok*|mvio)jO0xUUEn74njpln{|K#var+UHZcWUT^ZoOOq<{3B#s%UY_a zeV=si6b3EdUYNJQ4B{3|fJM@JBrq=M^a~5(;D}glO?mD~^j#Qba%^aT zwir?MVzE+2m$qHs3j^H=AYY+|VbA(lwZ=Yrc~vN!>JxM;yJg7XKYG|OV;@ZU8ciy9 ziQ|bvA0XlV8ynshsz#CIK%Y}n^UMluMQ4ZS3BU?qeIqe3+#z&MI26DxE4L5r?F0IHs~GVg4Z^GB+c@n zxF=;A?s6zV{TvH=Qz4P%maAa6hXEY3y-%i_&LZpD_mF+J*VDJ{69mii{d8gII5yg} zm=s=qMK7OBhm#uPsOi08T>aP>C!S6w*&chys@PkEQ(28sBQxM#odvF05=09gyR(iF z`nV!D9`11M;bN6E+-%#4)??HJ3aDB*Pys1FvV*(~AEJUD zIa&UCE}h-G7|*r;rU!aNKt$sgtaqpXyyxA`P%~ZecH&phqaJVrwN@7 z&)K+xc{tn=K`p|P;E{ z9PG(E4y!V}k*goYUi2QII}&2)F85M0IBo;>s}$qh5l7hB_<)X9e&kM;hy_MN)RwB!mUOYkPt+~se0e6USN+PMwkwgo-5Y}hfWYX1Wg8qF8 zu;$JedbxT7oENl&!_^MJ@d-#~{{F`9nRb~x2|mK?8_J&9w+|JV)wh@-!N@BoOY&93!%?HsN)pVUlm0LLA%2pv_7pqNXi`RSWiG`Sy151=XOo^8kFj zIUty$w1`Bi78sgtC8{3kXw?(}GcIbO-o-;SWWf(&Z+isZG0`Z&aR5|{grj5UKnYla z(NGCH#cngN!nlMEDcmOZu60c7d`l*Oo*ta_^~OD~oT;kU3fyTcQ1L%pgu>Sf@uqqR zIZ)_AryL8zzr&VTsCcy8d6)hw1;QK9>H=4&44>2g}^J%l{X8xc>)w z$cenQ?$X-8GtI8BmVG13&pzU4-Ky)sJJyqF{VK7{TDV$)Cp|pUY*(#dU0x{9_fzHY zWpDQI+%1pKK_e7JpVO+99m+LvFU@pXE@n#?C_wwPk_0Vnv`_ml1{ z=;Lon9iky(Zqzni4=1q?36=$6*4TGcDgP?{QuBidDWHmrlKD z2#Zf{qFLEmVE?#|gq+Nze9g^d=FcgQAXuQ+(_kC zGh%$X3-s?ca#epLqJ{x-mIb#sx_aVdmhEG`i}8FhmFD zgYU_wi59md{I3(>r3HyTnu;KV1>+LA&1@?$=k9q$hr zC&y8Io(l&#N727?I>6!}8Zyxm-F?fjeS+T(SwSefPL6@MqkQ6G|z8DQ_M6*yqpOl&{AB2FVNFkX-% zHy8b4r%pXX9>`bm#SdnX8QjN=^cNRKE=8a3zt{$49k0OW9W8W{;Pve+J&K9%ud%rg z*5lL%)x||;ZN+qGTb)i({tw+-m_^V;2KFVyLDd9tqTl|P zq^ntg*%n!7v)IQnul7>Wgb>JDHWPa14ABR|wU9D?CS7*;GS9MF2>-r4jD04HiE2YC zIX*>&ZrUY=$0kIQz#Dteec@uFcH}HbXXXaeX0Hu;3sMhm^thc+dc8n-?$0wkZs~UBX$VT_Ft0euV2?Wd5 z;ccT=lp7IB@BLjUSZxZPtT z+Knp`l*`q0_AV_fzH5p{cE+NZc`H#4@Wi0}pARk;l5JLpX~YRLlx#bQ4)!}~Tk&T`zPy2i3??yV9=R-j zoq{i#t}spCF5q0@+w7v2kEGL)L7XRp5f>MMdG*+q$1|71-=+sND!z*h&zpo9J;5Zl zc!*x;cBI|)#lZX&lnv(}k|{N1{M0YIVJ5eoCW=+kpb=gAb=@b%_--Rc%H?B5S_d#2 zib%rmD{RhHO6yD`1msbJTr{L9Z~Laaw+uyt!2Zc~2(8)fqgZeWsq=J-i5S=-(pzw-Zov zS^;LPZe~kUuFy^6BsjaM6wS0(WWVV?p)w1;lb?S5ba2NM*#9OO_2*}UaJ)Ev+p7oj zUhgJlHjM&NC{pb~Tf$Rmrz^^nnfWFc1ZdqD9^c8uqT=%~n01lm77vm&Z*`#hi5#tC z8X?$bCzy|a4$=F5uoA}4*jv&iVBV4r{N7E1`|-wtBeRH;)CAxJ_mH|j@r;^AJFQ-< zjP3g`@ogq1kkF@@I8QkM6OKyY*@+tj&y&HB=tX3XpvzDZ{1d`USCaRJ;pBDeXjGw5ccT3WZl ziKe$X@>lj~z~c+mP-!?sevAkSIwbDk_g5M@bP0KVB9wrNGuTc1j4z^_cts0N(#OM9 zM0&ypNO(9AZ(I7{N3UGs;_C*wKhrQbRubOia0py|L#ccM)`w-{+&4NXKFp&*VMoxX zNtP@WsM|Kb|3iv^ntxyue-2|g&`e@$j zQGReY7wUWW;HEMSGLf4G)5KQM&(q~_Q~ybFrdggAn+lfom2;`z@yXCIt^yX8t3%Wj zRY>%bhy6YcZ2dMr5HF9wYv9b})p1#8zfEX3;~35Q?!X@z5Cz*MYVgH}2e03y)0Z#Q z;gy&t1WY$Mi(U zL*jU|4YfB{5s!u{Ch0>VjPcoodk1IYHYpRj=DayhJ2nOD3`NoQY_8RG5ei_ioJOha z#p50u;jV%KM*NK7X!lRSbFCBkb2o9B%|`RV)+UQpx)Z^VFx^KJtq)P1J?H67VR0NS zJPUyW-0ZCIn$>aZq%yZV>At39tdSjMS7%#cXjVM7#!p7)1Y6vExtkVSX+njUA~udx zGP$Rl=<@l}=s(FwP_{S06oI&SaXpW*6vX1oyh%_h*MSd4_Tv5Uw`j500zkNrPIx9w zTCEQgdv9Rxd9Q?>DQj{3Wgq;#-U4%uFT`chv&hs9H6&?KHKp%YLT$x8R`0Y5tZ2$+ z#SWar2Tg8xai$Q|$TdTnkOpjD$HlE}sdV+nG-$GRB=eu~aP101Y`ZAHOt%B^%fa28 zOz$vU_U$j(vcrMor|p3PeldM~dn+n)&g1HIhu{{)U?*D-|z3e*0a9rdET|xen0#9ejR|<9z!F6 zOOPZ=gms^H;~`gXu zJy^TdQ`37%veo-{K=NP~xzm*hgKkoE-71?ECFR46kX7(#i#~D=Q;{V}e?r_0GiWNa zLiqbJ`pn=4JEg6Ho^G9oGkd2%Nue9=JFpmL>>sDIw-e??OEaYSYvR250&H@2GaZzk zKq7%RP8!$1zWu)=eY-@MX|e;oIvxqbB30Ov;|?y#YLE~SOp(PR&>8xf?YdGYi>}S4 z`3lRiW4$tvqxEQkt^+>3wG|nRCF2dfFA=*ULslM^PWvxLflurQ$iHL{iJu$=gPo~J z?@uI0FPU&OfPz_*9d;j#!kaZ>>0oFB2DA-x#`dUqO$=VUsGeSGKMeyOLi%{m5#-%0 zkQ+H%arsSGT9@@D8j}0yZB0J|ySfZk*YX~H5m<`Ef=n`hXDW0=DBv@5T~MlfE@YXN zf?Cf4{Bchr;*M3y){b4E^`@4%Ni&No9L_+7Zxqp$dva{ycZ_CV(!w@oA$Zhc9w|0) z!LGw9cv0hQ%%zvGued~6jL#{#G1!7S6d@60@k=@g#A0e zhduhALEp7&v~R!)hs(Ut%e_j#+&n-t3WBM<$QO@xNRWvr1r=89#@iqQ$Mq-U6I~;0 zr=&=BS??pAPIiL(y z;ZsnmMvE-<;v}SFZ-|qOUGP5hr7(Y)2R7R!0YAg*bkI)=-lScps&!&)>3RdC%ha)_ zu^-O)GJ;mQHX@yMJiSLNGi(FurYm!jBP#&k?4 zmwr$lhYiNW=w(A4JG8t5kzg-ma)h9SthxBcp1&y3`4~msTZ+9dyrKLjgcnbp z1=T(luxYvvyzab%n;v#DeYpv;-Fynxs3?$~4_yy2uglT0|D2_Di&oppc53e41!uAN{_lDjey_Q%CD!S{2BzBSy@ZirxH@DXv5qt-%QM z&I-+<^ohIULZQT1g;7jd%yig#FuD=GwFk+t?O4e%+xh?*KT`j=?t!xt@jX>0G(M=t zY_v%t3-gM3hmaor2lRXGL#u8+;hQc#Z`C@cRrFteV0s0E#!rnK3EtXt05#=e+my|flw@|uUqPWP&m3Z&i3}KBmB_w-n#O(Uuvu3br~F_d+JRx7CFUo#>w8ZjPH6yxMP*^qHp5J8HQw zF&FOZru}XJ@1lPGH3RYcDiIS^B3-zscQa5(;o)Cw^}p@G_V?;zCe+8ea62Z{E9~atCbTOX0T<*4$U}cbg)ABo6Zl3h@u~eisU+63eMZSp5z4ypCf4 diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata index 7c2f8bfeeb78c7ffcd7131dd9a8cca69c635a3bb..7eef2ef15bba26f49eb7e79079714b5c7015bddd 100644 GIT binary patch delta 117 zcmcb_xP@_oyh4hlr9q;lk*PtFQId&iN}`2{shOprp-HNxMUqi!qUA(;ZLfq^5nK!m z3?-SlsqrQGC5btOtnLm)-yTdlz`zi}2b4)I$SlrJNiBxTvU!U{@B)R46U$Ol;uDMG NQ!yn>0jd2(W^rA1mwN|K3%d9sCJvZaY}nq`Wqp@~tdN#aC%?Ti=95nK!m z3?-SlsqrQGC5btOtnLmud;ft1Blv(sYC&dkeoAUFRF=(~F@hH;T%1^zni8K_9G{Yz nTvEv18Nm${EK015FH0>d&dkp%.on_episode_end at 0x14c429f28>", - "on_episode_start": ".on_episode_start at 0x14c3f5d90>", - "on_episode_step": ".on_episode_step at 0x14c429ea0>", - "on_train_result": ".on_train_result at 0x14c44a048>" + "on_episode_end": null, + "on_episode_start": null, + "on_episode_step": null, + "on_postprocess_traj": null, + "on_sample_end": null, + "on_train_result": null }, - "clip_actions": true, + "clip_actions": false, "clip_param": 0.3, "clip_rewards": null, "collect_metrics_timeout": 180, "compress_observations": false, "custom_resources_per_worker": {}, - "eager": false, - "eager_tracing": false, "entropy_coeff": 0.0, "entropy_coeff_schedule": null, - "env": "MultiStraightRoad-v1", + "env": "MultiWaveAttenuationPOEnv-v0", "env_config": { - "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"control_range\": [\n 500,\n 2300\n ],\n \"headway_curriculum\": false,\n \"headway_curriculum_iters\": 100,\n \"headway_reward_gain\": 2.0,\n \"lead_obs\": true,\n \"local_reward\": true,\n \"look_back_length\": 3,\n \"max_accel\": 2.6,\n \"max_decel\": 4.5,\n \"max_num_agents\": 10,\n \"min_time_headway\": 2.0,\n \"mpg_reward\": false,\n \"mpj_reward\": false,\n \"penalize_accel\": true,\n \"penalize_stops\": true,\n \"reroute_on_exit\": true,\n \"sort_vehicles\": false,\n \"speed_curriculum\": true,\n \"speed_curriculum_iters\": 20,\n \"speed_reward_gain\": 1.0,\n \"target_velocity\": 6.0\n },\n \"clip_actions\": true,\n \"done_at_exit\": true,\n \"evaluate\": false,\n \"horizon\": 1000,\n \"sims_per_step\": 3,\n \"warmup_steps\": 500\n },\n \"env_name\": \"flow.envs.multiagent.i210.MultiStraightRoad\",\n \"exp_tag\": \"multiagent_highway\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"uniform\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"boundary_cell_length\": 300,\n \"ghost_speed_limit\": 6.0,\n \"lanes\": 1,\n \"length\": 2500,\n \"num_edges\": 2,\n \"speed_limit\": 30,\n \"use_ghost_edge\": true\n },\n \"inflows\": {\n \"_InFlows__flows\": [\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": 24.1,\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"idm_highway_inflow_0\",\n \"vehsPerHour\": 1993,\n \"vtype\": \"human\"\n },\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": 24.1,\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"rl_highway_inflow_1\",\n \"vehsPerHour\": 221,\n \"vtype\": \"rl\"\n }\n ]\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"flow.networks.highway.HighwayNetwork\",\n \"sim\": {\n \"color_by_speed\": false,\n \"disable_collisions\": false,\n \"emission_path\": null,\n \"force_color_update\": false,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": true,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.4,\n \"teleport_time\": -1,\n \"use_ballistic\": true\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"a\": 1.3,\n \"b\": 2.0,\n \"noise\": 0.3\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 0.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"SL2015\",\n \"lcAccelLat\": \"1.0\",\n \"lcAssertive\": \"1\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcLookaheadLeft\": \"2.0\",\n \"lcPushy\": \"0\",\n \"lcPushyGap\": \"0.6\",\n \"lcSpeedGain\": \"1.0\",\n \"lcSpeedGainRight\": \"1.0\",\n \"lcStrategic\": \"1.0\",\n \"lcSublane\": \"2.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"human\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"rl\"\n }\n ]\n}", + "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"max_accel\": 1,\n \"max_decel\": 1,\n \"ring_length\": [\n 230,\n 230\n ],\n \"target_velocity\": 4\n },\n \"clip_actions\": true,\n \"evaluate\": false,\n \"horizon\": 3000,\n \"sims_per_step\": 1,\n \"warmup_steps\": 750\n },\n \"env_name\": \"MultiWaveAttenuationPOEnv\",\n \"exp_tag\": \"lord_of_numrings1\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 20.0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"custom\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"lanes\": 1,\n \"length\": 230,\n \"num_rings\": 1,\n \"resolution\": 40,\n \"speed_limit\": 30\n },\n \"inflows\": {\n \"_InFlows__flows\": []\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"MultiRingNetwork\",\n \"sim\": {\n \"color_vehicles\": true,\n \"emission_path\": null,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": false,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.1,\n \"teleport_time\": -1\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"noise\": 0.2\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 21,\n \"routing_controller\": [\n \"ContinuousRouter\",\n {}\n ],\n \"veh_id\": \"human_0\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 1,\n \"routing_controller\": [\n \"ContinuousRouter\",\n {}\n ],\n \"veh_id\": \"rl_0\"\n }\n ]\n}", "run": "PPO" }, "evaluation_config": {}, "evaluation_interval": null, "evaluation_num_episodes": 10, - "gamma": 0.995, + "gamma": 0.999, "grad_clip": null, - "horizon": 1000, + "horizon": 3000, "ignore_worker_failures": false, "input": "sampler", "input_evaluation": [ @@ -34,31 +34,27 @@ "wis" ], "kl_coeff": 0.2, - "kl_target": 0.02, - "lambda": 0.97, + "kl_target": 0.01, + "lambda": 1.0, "local_tf_session_args": { "inter_op_parallelism_threads": 8, "intra_op_parallelism_threads": 8 }, - "log_level": "WARN", + "log_level": "INFO", "log_sys_usage": true, - "lr": 5e-05, + "lr": 1e-05, "lr_schedule": null, - "memory": 0, - "memory_per_worker": 0, "metrics_smoothing_episodes": 100, "min_iter_time_s": 0, "model": { "conv_activation": "relu", "conv_filters": null, - "custom_action_dist": null, "custom_model": null, "custom_options": {}, "custom_preprocessor": null, "dim": 84, "fcnet_activation": "tanh", "fcnet_hiddens": [ - 32, 32, 32 ], @@ -78,26 +74,24 @@ "multiagent": { "policies": { "av": [ - null, + "", "Box(3,)", "Box(1,)", {} ] }, - "policies_to_train": null, - "policy_mapping_fn": "" + "policies_to_train": [ + "av" + ], + "policy_mapping_fn": "tune.function(.policy_mapping_fn at 0x7fda132e6c80>)" }, - "no_done_at_end": false, - "no_eager_on_workers": false, "num_cpus_for_driver": 1, "num_cpus_per_worker": 1, "num_envs_per_worker": 1, "num_gpus": 0, "num_gpus_per_worker": 0, - "num_sgd_iter": 1, - "num_workers": 1, - "object_store_memory": 0, - "object_store_memory_per_worker": 0, + "num_sgd_iter": 30, + "num_workers": 2, "observation_filter": "NoFilter", "optimizer": {}, "output": null, @@ -116,7 +110,7 @@ "sgd_minibatch_size": 128, "shuffle_buffer_size": 0, "shuffle_sequences": true, - "simple_optimizer": false, + "simple_optimizer": true, "soft_horizon": false, "synchronize_filters": true, "tf_session_args": { @@ -132,7 +126,7 @@ "log_device_placement": false }, "timesteps_per_iteration": 0, - "train_batch_size": 1000, + "train_batch_size": 60000, "use_gae": true, "vf_clip_param": 10.0, "vf_loss_coeff": 1.0, diff --git a/tests/data/rllib_data/multi_agent/params.pkl b/tests/data/rllib_data/multi_agent/params.pkl index 83774e73e7f850e929d5da88d68d077099b99676..cd832aa1c3eb1713e608fef452dbe168746e4cfa 100644 GIT binary patch literal 17562 zcmeHvcUV(d*Ka~ckR~?7LM)(ku#R02&`3l;Y``%j2TnqoDKrraA^{GAO6+Z{C}8g$ zETAG-z>10rjvd6VsC;XmPy(Yf@7(wPai8aY;UVPgw$}cwve#aFlX*JJGCXP2YmUl} z)EA0HJh@n^N+Ase;s{KDN8th$sWXn{HBm(xa@Ybv7@HF*Q=!IYViAT*crq~;$7FK0 zRIXx?rY*{F2^5)V6yYK+l(dBs8Cxh3;F!7q^^!~3JP{_vWeNcrs2vnZ#4@==D(2ua z8LH;1SPHTOFG3`i;#jm;8i`A>a5hh%fKI@0Tg0#|R)#5L>;4O|P@v4{aBX`qn7lT0pz zY9k>#29x2l0Bwm$Se^dK8&21SR0Z-B(Tp2WyOBJ|AEgr=d zC_v6o6V4XMG^-jwg&KDk7Yzm(GM-R|Nx)KMa$Ew{8c;Amd4viHRfDJv8sI`P)xZ!! z9TR~bK^4Ontdci{9fgmU%W)Af0pR;h91C($^T)trB$%8Xf!YehQZ6PA$3zMt0#oMN z*oFtbg~vt$`6p3oViKkhaR?X(y1BVHY0eGjM&L4x%ah5ayf6h~36TdM)j$wQh9O{! z{@Q39=o~N;s{yVMyiWuhL8M6nkdrHaOM<)o4nJCqf2{RH=C;RBvpY$P3kASYv}3Ax)Ff96>AtC&Z>KPoxkCG{hsv zVZBi6KB{Sfo{WQw*ixPt2&)x!f>4en zuI|yw9vVv-A~8>fcy@7eYchw%$~i4cLRyZ=;IJX~0fxlUAnRYPfoLdD^G4SHt{K&| zak+GJ8rYItBTtB|7e&<ndUW{r}8W{=eWUrGgf2 zl5&_KZ6JZ7Emc4=O54wGBIH)IVF#`vt$(@qc8qdSL6QdL4Ahw*l-g21qXZU8(WH>( znn@j$;R#of23%PBzzuL!q?SOc@*Yw3`Jl3rs3qGqq!>`l3!4dC3K<6&v!g~a+O{-g z{bVX;8`4~(4i+1AE{8Ni&D1$MXr)COpyvpQ0v>2&J8J6#4^(18-AcuztIJWN7UeXu zOGDlXZM76qNC#@x18B|Tg~N`HD+P^~HjM`uwrCG@%o~!Au&w71>Kq&M0%@ksKx!2F z&U`_(Q9q+qSw)(|7EZ*01-=;pNNTAwnQU3Chy!?Ah{I$EH)`LY&Nu?#EU_1rd<14u zJ30VOD-P8<3MUrz~Vj%1AtH^{LEBj{1YMp;0z8rWYN1OZVN8`^ z%|?JS!=x5Z28cz&J5E|?rm?Bkh`n|V$s;Tba!4pWhcdlDNe||XNPy9)KS2c7fEXa8 z5UNwpq&C0@Y>4n^1SC+gY-o*Ao1k?tCp6%A$n5KoZD0!wiMI$OS(Lapkcb654iB>C zAXhDR6clxV)UXprAXacCJWiwl*Ziy^JA;2AV!0@^#blAN;g+L4v4Aa;sWQ=cVG2-g zNEJXDT^mg;A;!bQ|Js7=-aI@tYnhWoB1XSW zG>n-LQ$lB7UJOs9F?J$p9t25NoQ2DgIyF|4jOGy=80N-8Zj{GC>fXq>si(TEY2KNt zMF?Vj4C9J97^Y&$l2!hwqz#6FM}cWUA!*t?HpJ`#nMx-2NBxNO#`eS#V(GssOlr~H zj#xtWY$nngajY@727U?yw+~i=sfZ}j1SygL3kR2n{09;>l$v5fl$t_nKvf~|qR6(O z$CM6WjS67k0WLETX+sjs*^a5lq%rlGX3TaT5~aS>LQYfaC(^>{@!G%B=V&XnrM;Be zN?JTUUMpTZQHM@T)=Ab)riE*B>0GUa26Ob}biP(&z0y!=tTc$%Q?^mk<8|V7m3m4e zu6DW!ou<^13Y1Ol&^TOL08M4bBKF!bb$|mMrY>;cqNGVJU^ESSN;;PoX&`0rU<`~V zf{~S4iSUc5M_?tX$o3c>gWR!tZNq`vXI=l%~s$vj9LX`BN_}F+=R@jQLCCOTNA_@ zSp#o^&az|_skU%p#f7wA!!vOjTp3b6v>L;)qS22y}fSi!S zfs@?`fjA5fjgVtVAyQ>12BZdpn31(eMO+21WC9C-3{F!qbj}2t4J#RoMI=IqgUjKZ zsDUEbdsC4RxlgnRKv5SsLyqeU)q0W|7|&h+pwk<<9#Qux~NqXRAi z4BQT)Z#LMCE@fOQQm=tNsg3B6CZ1!xMo*p)hNLwH88Q3S)*go>i3iPmrb30j&1 zYv2Re5w=Yr(Lv4WOl?@{y1`0kMgc_Q(-O4;XlNacrzHYx)XE5IPaGo+nj#j$HkooC z;KXd18$9&TDq##OgmKd`Kk~9aM}hB(6d-~6lc}MGAu3jji4(S(vv&MXX5B=cQWA8L zflD2nHS2{oOj#S2_jVqPrYS=(k*Ez*21{2j_2{W_+7z4o(ZDizzkI@$)S)yTrhkGe zWp2}$eG|cdm_2_ujMr>{lrDf*g$;Vs2!Bxn#KOM@q|Yg z9k>0(S^_*Qn>SMQ=mbbj~gOgSu}^ z-C(uw1!y46DUbS3ru?_lLjZL9MGH8}S4S|7URCWdw7`0-zoz>fI$z_tMGCpOnm-w1mvSpQ>%TLqjpDI9uW7BwqeyOwzU*ZPLW*M#N&z!qCg z8v$b+&65+DLc)chN~R)WAwB^~@i>s%BE(!+6`HU?K`I^gsa-r^mmCfXr$qZ?+VAv6 zZ$z555)_2*iy)Xr_eP{VF105G!^}oFUH8zUflisiHM|JwR$?Sv5P|gKFP8A#&<8>} z^rz(#j`k3G4dFiQ3y68d0wRe>#??_@JW;^!h+wlpZ;t3~0KBn?VkL2bMcm@IM@2s9 zEdbt#LKbn8-v)iU%;KlRUlxA@dWUV!EBDC>O#I6ZKxZb zVc((^-XD^6Ogf4I?U>pmeHiSbSX5L_y)nVU!$@8AeN3h{BoDM2p4FY_ zLTBoi>f9DG7;piIi-p0>#^`?OVg6K20GJ`%TtK&;LGmC^Qr|bwb;uC+L4QNcSEAJfy`qIK z{0jOpd4|+A>v_K!+Xc?sPak)>{vk8$>8AKg0}kEj6gzL*&_yr$bxe7|Y4>g66^C;d zYEI5{C^TC!NK0yHzN4&Qifd_Ym!erU82#%|t-HZV9G!DjOApk1xP_lt%+ftVPI~<& zrB_}+ZSuhS*)h`lvl2pr(vq&U5B_1;%Q50g-$#3Iu3G%^@zy!DQFDxkjJndL-LP!) zk&Bmh?ehKdp-j;Qo6(XkJ+JrfI&M>r*@F2F^_KKK%imrtd~UJ-^W(<_@sG~z`Ih}= z#htH%a*x06H!SzOo3Kv^XP;M1_ejVim~9C?Q@m|qq)J*mV_=8-Zw4-%X{}n|ZWs0E zC)~3C50ALB$4uu&1`XTWe{e~LnP1JU=%lHxLr)nv#&~RB@y=#P@QyxvHy%Ghzte8P zoyE&^A8wj4ere2t&VvdDn|+wvahmj9m*?$wV-r7_YcWhgxm+kxT7gVVN# z&HUn+TD)i!eOh!EJy!C)!obe6mb1@~zp<&*bjrGO+srDY#;4bg?Ge{?w%hfh+&hOW zh4Vw_KYQ&p;zZi;g5-rxzGZt1vB;iV3#vW#bY5`ZHmuvYqP_A{CE-?+hfLmf{qUBV zGG6Z9jGUkkf3lX9)OyMo)$d(4mE_(KT+vUtC(Fq54f9(VcgO9LYr)>otKs&A{EMsZ zRhM=w*t>Yuq1fM_TzuSpZNd6yW6paX`rgezR8{+Zyl;=7n32(CXT%dTy48I(J#{@{ z@;lYLi0}J!tq*jv7?eR`HdzPfo+;mAy(RGdoB&&cPugR>AG3Vj11AgCd|WWzNULB} z&ci!y*&S%aQ^Ar!1D1Dp+R4iEHLGd9a`F=gg+(IqYO4054l_2)>mg8z39WT zN=J@KmTJSb4PAOM7GNi`zt+%r)#YPsA8DzkY#6-x*!*hiXxF{wH%>p#tely8d%th@ zYVW>U0q$=)UCnufd)4b~H%>Q5WYE25^|#1fQnd2zsXf#8tv56ZJrVd=cDd)};a&Xh zr*(Dzv_iQ}f4Za3w_X18ZPq^?DnI1>#AWVk?A-aziGDX844n7jOr**7<>u!W=eWja z1vt&DjeGnmow3B*iF}(+kBIZA{KC{-k9pqV%*YZ<==I1b>XzyDNY3n`>%#b%cf6j| zU(9?I6L?}(*QCVW9_iOI_Z&GB@+cxh@N|{J`_PIj=@)x>zACx5v+LNESAt{v4d>Y7 zSHB)wwe)aUX5y~omshu3e&^ABW-0C*OvZ~l4DRv1c&mN7-MmvdzV7jFv~sYQiFMz0 zx*m|MPv?Cd>A$%Rt=r+bTOPJQ(j_QIw*yBj&O76JnM3WPyVFAMJJdM`LN zFfoMTv%xViZm3yQhz>SmN!zZf{Ua)dznJmpdmjDaFx^7|mC+0LmB!vQ8GPrY;F-O@ zi>FTUIvt6{myYC`zQZQX4PEnI=sR>-e*P8HwS^x`-qf~NIgGK=+YmPQ`&32x*XPBq zRc=Eo>s%Qf56_!X`K-&XP_9+_*O&;)5oau}ru~p!nA3OQsH4-R=Tw}U&h@O_-5wme zwN1OjuKVjIY_6zpKP3cLhAAALO)lH!ku-;V$9S-1z$&dNs`r)$@cg+OeXF#cJkxHB z<~_k6cabrhnmnQt)t(6$?Cu5Xrmr*Lgnd((|a$s=ZO} zs1^2aOSW1q*m$7BvwTMP5O4Q)pJ&x2WS=i8)4LzK`DDo|uGNF0E7!DlYcKyp-sP5S zk$>K|Yroa;+u!$B<;~I`yxX)4AC-UD@ociW!8*?X`=0bg0}n2fcQaVHtNuY&Z1tX9 zyXTx(QNMlodL2)uU-h}L+O_=oGv_Yxix0BxTXsH2=YFF5f)TR1xB&0Z>Ej09ypu{B zd3)%Bq!;HN1Rd#L-1AHI4!(Yzv2}4pd@0T0bWKPtUg?%-TAEh&up*3OEpd&d8eCqz#zWaUphun}sor90A zH491IGJnRtkE)Z;$G(&9p_kphv5B@m)vtDIwmA9l!DW@gqUqmis`pf{b$YXF)ald9 z+f6>7#qM+Ywa>|$hK{?}_s%Wb^R?^SAK^thW0vN-7nUsPbLn%!tQ+(pR)&vQ`$aax zH;72#nQrATBTqUmx!N)6@T0WUO{+uNbt@h{OZ0YQJ#9qFy;Z7=GVM-JlKFAfmtH*( z{l4kb)y(~A&K|F~(8g82oYc|!VGpAM#jN*No`pR$>6KWzTy`^R;>hABj}|+`TOPae zBjIp$@6v^57Ykl1H~A`eA3v=4;B8hqVEWNMHQrtcQ@QVXTJs8Om@A&tRM@>+2X2NW_ejrvZ%HcK>@Gj{8{-Z{EQEZkEL$f$zAJuyWo#{g8^5NM z)*Rjwu)5eK)cLTTdu+k!s?VuMqpFs+t+6?}Y>uN|U+LCR?=5daD(jEEFFHB{&p23e zIoh>%S6$AKK|yJ^et+|(yMNTqrJTiHcOpjKWhf1QbF!@{Ev`eNX%ESUmLUD64N7s+hB#98g3Gp&ygxO~rEX1_g>Rej$XN>P-Xx z5`5%eMQ^3>#-u}-cmqBCI&MWx)B&~*1lOk#4gR11rprjG%jxjCo)ofJB``B@m!7|7u#mP$X@nLI^c6+;rRX8_G?dU8tVOF z#e%GP(K}8bmM6xCj(xs=&kT9r>4zS5Egc)YYxQZJm0rsPj>$Hr@l|W8^gkZYU-doH zq~7gf`Ix~|yz9nKaUL+9HQnI-7A;nSYCLXM|7t&jwX0(9iq7p8ZrnLVcl*V_yi3P+ z^mlkrPIkO@boKamv$CDqb;}72R;FNqr?fE}HnUETXX))K zpI2$uJ$`1c-PwZFqF(!SQazU~9YwPL)s*Ml9GY}@`bX0b^@;TFm$ib= zUVY@cAa8AqnKovZzo#rScSrD|_4&tlzT0N&&N$G=WLo*kD?X`buMB@zwD{V~ee=uM zJ7%3`3t)@kUh)J;bA7YAd8o%=8Ab9;J%)XOV>fvj_I)tTGFyAK*t zU%T!Jan$a+{Z+s2UuJZnkDX@OA@lx1!(i+DPDZ@lI}Z zBkvf;Cj9W|=5VaN3v1zv2PRuDCx=#_3Sdkh-y`RTVK`@aFJg}A?p~%lXCE1Q zprmB<7++Nanf~l@-3_IA@6Xd7dQIyiFg|*;Lb1>+a=>)2cZ1%@(ydp2UW#!-;@yh- z{W0NdZPbFuQKp**RxPP75pS;z_2?B(9(%W+6T}T4=RCu=*z&7n-3Q|r$Lt5^A0 z4prTAEXj(m&EsvWy}5*b{r$S}B}48?hK%I-=Z_kv@A9OBYhu~GwugpioZ0eZ?U_U4 zxn;{l`-HzAdAfybeD#=b`IV^3vbnFP)EDa7mKcm(>SR}bOv*_cbM(&d=5e%dqlEH9 zRvp^yGUJS0XuV&*G9{+i^|D3Ria32+y-7XC*6YR87EC!5zrffkN5Avj2;E5wMs`^> z#h+gOqwcphc&|R0oqEmt(_>v!+ruM$bk?^yELkWR8WB|zlWQF?(Wd|EgW~q#bkk3N z?$|OtcX4kkNy3GW-Mxl=jzM|PTCX7Rag)v5*U z*Z$D?JvGyEm`J{KM_T{;PqS{*74ruk8WoC}=l-F(A+CP-(sLXA!yvz?%YQm7%p0($ z>V9Z~>B-O`@_~k`8U#CnSF|lh_PWRypwd5;#b|w~M_Mgh~ zy|3h5x%G!nl4Iw&p<#iv1v1Z#i=7r3*qWa2TskI9xy8OHxaOXOh+n5y@_pB_^`{<| zEw!oBNhwf%TH+k_M{Js?$Bl(iQwz?;y!jxzy)HcV>*AduTdQ37wB4U09*lMjmp|=u z!sI|(htN%fZhm&l{p@<6KQr~ynTmw@#u2X4p2eB>g893PvyNsx`G6fBKgXhEOdWgs z0i*U$W@p@d`f>J^YqAALtj{qRYu-N^+|KP=Ilnq-`NMFBq(wfDOnvXI)=Et%ht>6pcDWTsk6!iNO)GK8gtlVF+(Fra*w(zb^5S`UiN_wi-RZ0ww|G|J zlMmneFAX*`zczE{q2)V-DA@zI^v)!sGT8-D`*kg`=iUT9mQSYd#P6J$K$u zuiM#?%xt~$IyW~BCfIw^wvYH?G}4XvjF=l8HY{Xq&S}#U+G2b-ndj?MU~C=Iw*HcB z;y~`eEy*93b+;KcX*IJzvOMkk_?`5-`jy9KjAouU=pOsfwb;{Qc;U41i{zWS2Mg>b zzZ|}%`0$Tw(~rM8`fT~tlt*2I9LC+@T`#;wn=Ui&P3@**7pV8 z)yv%Whve1E`RyXs9L_&&yTK*X?0Q*1WQPi;mtHs1p7+OQzQqOf2r3*d+Y2eQN;p>^V|HrX zja#k@wQkx~?Ob)*VuYcfWKD-;iw*R|38lj)FQ00YTX2nCRnya^=uY382lsz`ZuolE z1>et$Kh`eISZ)3Bv(K$5%LYB{=e_h+(%}fcQ=cP)R~o)_C>~_lHpc#hI4ZzvL%M6y zur5|sxjL2iP6T~;_^@{UQhdME|~b$7J@1yd=427OYH)IR3HU*t@e?T{5&8 zp8B`Q+%zb-?4jG z^^nY)_;H+xXAHX^%l=E7E04{S-Eywy?yYyjR(){=dOfe z5!|t%V{lkOGMewOq9ZPJWh)NRf=eCLzci!EGW-D$Z!$N+iARg0*p~NXnl8ss=db9- zqY!=Z1g4@+GN>V194e^qKEc1Y%bL#<{yJYcO^}5(009?kF}Ya%g%)s!icqi>q(1uU z26PETN$ROD^uU)Syiso=NCUW5!$G%Eq3q%+^jb|?$RfBH+*0PLFRH1p2hW46>+1hG zN8cBrK>s+2zDxpTwlUNm-C0&KyOBl$I0tOJILjhB!gQ~9+fWx8sQ>W;hD+V47ezs{VpOk&*-^JO(0@9kF9R`zsj9yW0lF@X Yd*r>#ayDrO|J;kd45ay%hf?A6Kk00n)|rod(J-l?6c2q=j1WP zDCXcw(n>B>h}bMSj^PrX44Pz0(g>kTlPCZ;jkJ_YSv-+8ikMnmQXh>LC=oGcY_TW< za;iMT?isP6-oQBEd9OZ1%LL`Q%L@WU&;E8aS6g6dVF&V*<0vIa`kuZxiQpn&< zCKp1tsgN}Zli?FF0WMNTwI|I{JI2NZ0u@7IXmqkG)rJO1q<98Jp;&}T@l2K!O=>EW zBL=}F5*!w2!j-awIJB|TVJ=+`PpFKFCympjtSlLuCBR{XaW*a$V?vxI0tg$nLM9gr z5pIc8%*JIhu@umlshd@tj3uqr?}$GzJBKHe1C+VCRV+cRGT^v^G)6B%*i9+ClKN>Z zp^&8v4|r8=;wM>KO&StnDK8rsk2cCoCg#d9P0MW392m$Ei*Sr32Z=$eGXcTDV9&U` zG-^LhxdL$}CSk!MWy;Z(ooTemCJyk!062z^gX)E9U~xD+L>26Y3cN)(1p;{GQn3J& zvP5Y(bdRG@p)Spjm%Dqv9~wRTySuk&Ol}FoAaE8Zla+@LfSk(`$nbv! z#CS+h0o1hi>oDLerPDetS zCMN)@e=E9?)UwzhrW66*Kk4P*s@?%iub=cH6=tR3BDrd%tsy|8L4iDP(~0 z6DlNNcX4W4Ku;-+RFjLW0VV=F2%{LtVla{aVG0!jIggS%CePEuof(7PMr^M|4sixh>S9t_g}{OF8Y4nf1v4z_bh$vS@9f28zO@f*gg+5MsgrW?LxL%VelE0&{N*BRFxuG{}2E>y6aR z%g|^kk0aD57^dP<+gQPl%0}SQ2(dy6*gXah*0Ky5#MFW^Ti9!>LrGlUEw*|+Oo8Gke&JfEW_+(LGhDae0G;1D1 zj>Gvtf_FerhW2!}x8KE-Pv1&LKTI5n-Nb?>u*kXZLilt_$ zObYb1a?A{fbXlozN&!FtPllr0p8(-Po(%Q1u@EqtYzhNRrI2ub?gr6HrBrAp6ALsR zpcO|TJVqW59XWuCmcapKz`#XoqAD#$iZj6Uv(j-)iX7cf=$Alr0?n;vlSp|YIR>6q z1orcz=F+F>lXv`3EG;Cdb12tmWqm|VgHeHM&})LE)h_iY#yR9 zQH;otnBZoRX5uvfEpcmF87v^cgV~m?OoiSb4iimdMPi-|ZJIl?f0Gt#I&)jJB)6o8 z!DdM@E-Xfz2~yP7Q4FC1UDPc5pTH>4HV+>fn$Wai)T}Y+E&wP;Tx-kNwRu`;LdO$= zjRM`GB-g$5p{5B84n`XH**Z#ShT)0ARl!-CXh3p>Xt&y)h>A!;aCGK%Z_`avKw)rX zr{wX6(7%ZyIZFX}pufdNqHZli-2_cx#n8n74D$49F{TwfLmeYGS=ZK)6N+dtHi6FH z|0?yPctRA!1XIxeEdT#3k=m2CaW#Mz`(Keqgi=LVlemqF_H_5~YD?k->_BvWj%Fdd z%K+B@1E5R>=9`y61u|{=YXLi0EQSytq*M+0PdKA+ToR{t2LC*Jv{;M<`?jIzlr34MwE_40on?+sG(Q3Rhio{4?;AbtEIL1|H~XfbTBxA z{%xDN|2=lQ$t3=}jcaWUTbL>&uUh6qBfivKl*V)apR#h=M^2T?Pd0&^gDxbE;B&-? z;gF8dg_}Pm+5U&CKGzHnC1g3!m8R;36lI50@2VUJO4V3tXGKtogbVp64){(`U5=8} z5lA7kVdDMZdADF!(oiUddpO9RK!ye7gakO;sX~4!DKIWpNgBhoy8zv)qb!LNX^C!& zRXHsxD!J6(WlKSLG@OI1J?3tf%ForZFek~E2C2~#<&c=S3$ z8K-n2{79o<2+NvVqUceyD4i(Xs8PZGIW!Kv3oVzPM$4h|X&fD2n)*q%=}E8YNx$jI zz?a6MjiM=?Vo5_a!LS%s2IRcZJrNMr^o+4GAU|h<-fYJYUN?55e zoTH=|oP$|FIH?EMJz^yhS^&I7cd;U-Buh#7#8}gV&1iHZx{3Oq(%D%K%cN3_7@QE; z>2Q^cVX$pR7i{$onA(Exd}@*jR*xbod8uV_X)bw@7P@3zxOjiA;ve zVVC5H@5H!VL<5mX_>g88Hc`P6pgSxyjSGOmr8L|g%P=XN5>uJJRJujx&-`(_EijBF z5{UsTx>SJSAi3rkmY&JMxh%MtMol1*7}nz51lA|!D5zx-Nu(iki`f)pCrnFrFqtA% z1&0!rq$!4_3B+)6Rn4O)P!`0wlNK$A!X-*#s1p(FLqE1HNHCErga& z&()!N47GZU)IGYQ0aTxnR$t3e##%j1qvXbX6OIY=(9|=T=IG?p(OfW_9`u^!=;hO8 zS3u(1aZG`FbM#K9QRB>7jMHrzr%#R3&!-pCW#-h#cGO5{p(EXC$1z9ma*J8C9374Y z$8sKwRrgp?J(ewd)cQk&5=@k`hD#i^hr_5_f2D@!I#GI2Ho@MYmb7GMF0hwRqM_sP)tcJ);Sg zgam|Jhv}{)4O9`MvOr0UAWeMHy#`w>l=v%S9O=OZ2DJ8cGrE#s&QMaCNBaGT@{LQH z1z!v4|KK_a86hVWLYF_!X@iBs ziA54fC3ur45Gk+?QvWO&OD>0J*get^q2O_l$3RbTHBG(fh$P?=sNw`dLF)5lA}CWJ zqDccZ6LPwiLNMy8ykHu*R8+!Y)7&J)(^z~l*hla)PDCsbN@;@%wSjsLqJ%VJWvKN6 ze#`vCEX76z>;oD2WEjXApb$cs~riELhRQ# z1QEd^o&RGUQ4;K2C4t8h+*l%wNFsn*Ub2!%N6w{;WFY^fav~7HFw+o6)j4xa#dhFh z8MveEq!pFnWQ(OZQxnP%+o7kDI6+jx?{Z=YX$%el?y%ru6NL`?kn92{i=qaF+8LN@ zIRnIJEnlEV1rWbz9Dy9H5|}|9JA7B%&{2c%gJ zG9)YxVlLI9j_Ue3utXdOo`@~Qg^)vV5KA54T2%c4_v{XkLCbPraUhoMkH+lKDSdr@@Wh?}NR=_f<~73qRo3ZVzn)Z8ay{*lYt4Ab^TDoj7NC}%>L1c@4)1;uFS%m75`;OH0F zhgUJdU>Wr^8&3}TZ%nO(_>(l$)W?zLnmJW1$Q8#DwUj-691{TkjHY7*7U@Tk;~PM5 zrjfeG|7v-(`SFA0A)abXk4oeeWJg|_2$-QR*MoqVCs2U&!P%xARqvK1!xS=zzu}Oi zkHc?t>S;$kU@#LZ7=Q*F2z69#U|cjShGum!2ugK8h6$HKZ3u+AowS;%WW1*2FUo|p z(&~fYNmUI7HWWq3(Ma9k5eZN+2_sY} zsH(Z<5wjFz2US_uPZq-O4q_5z#J8h<0VE3WU?*j$L`=ghw6C2|h7)JYp)ioLg;W9z zE*_Ope~`ACHknW?mZS0&&EXYT!6B`|N{FEXO0~sQttTMttZoHmQWt1uvUq@=D%`X{ zH7cmqO;zbdwRJ*8UsOd8WvNjwwUwyQQx7EoK}e`#QZ`h7QXp!bfnrjs9aV+4Xs3}~ z)fI}cVyG5?@<#1`U~Mx#&_j$Ph0Aj(Do!;J!jhy}XlfgADj)F$O29xNn%k{ZkJ<^RAox)@E|zS&4f>!w3a~Ki@HD)lm^D5J+o1l z?ZDjtmkYd9717d21K2k-1t^v32~Hg9U58y(2vudQq%Md6kRie|kubn4?bXnzzK=RP zQwWDa2&hMzG*?2Q^Dk*65es;1@B>IfbyykbhQK^6OUOh8L%58YDxL(?2f^nerSmju zh(3q9yCBYx>L&5_C`B}7)k8sUffh@#1rsSxc_<*iY=V3l=2 ziW~ynPX%L-W&=MpL&=W^ z&1g*qe>61yNXKlrk)Y)@YW_pGkV@*IiUTDdg8_UNQZk6C`D}R5o`VPPs=-rXF!i4r z+>R=If~vn%D8I}6H!LbjQ$1QTEaxW-n;onRo7Qp)Rw~p1{M!_hDMFhD8;jt24B`T@ z5bn@a99O+EG1Vm%x+PIVP!}W^N;aYp(=a9;73K`2f=nbY?NUMc;Yfg#8bJ7SVJ6sD z>Q)8y>!#yb|3F=gM5k%56fH!it-T65XiS}5^7N$lKkQ#^J-YO-fdv^kUVCdEm3v;_ zv*P=rz%Cs^_I-Zeso=NRo%L&u?{Z+4`sK)7ua(-#%Kk1Xx6ArPeBhI<_}zSShsdBu z?P?VEg$C*C%WhV8JicVD@-oMw($W5L#l!%s?ooaXtb$c3lM@r%#uiG~2^E1h>A(F| zIL+njs#sH_X(bct8b2i;dV0-$QTI|;sao;Io0=+^Lh`)#r4lePMZEOsL!22XY?A^E#Ainl<99bakjOv9_Ll`EqPkM z$~FF+_c+#AIpIKY7sJVE_B$?4uz0cZ*471sd0P~9PuyQe$XB-8-<~B2c)xee=ggN% z;e|loNVoHSbhCI^?cO)H8Yi~jemro}+^W!xTcWCq_AY*4>ilcZ;v$=nQ67%fpB5J_ zyHEq#01`4VeJUL7-eN8qE;>t5Uo{DkeHb9(98e{jp#`*?Feb>F#> zA?8W%r}93+W%yn#us(_B2hTdtf=9kL(B^k5=|k;=X~&30^AurRsIs(tEME4NzTn;;3$A^gQII}!)!*gP zdTFXJSy09}omNT@9q9M&)3)S&HMGCJ@80=2M(4wlfbwhk+`s2{?tZoKspI1W^83ZW z*LNnae(Yx%@%kox(j^-)<4-eIc|b8w2DwRiEOvwLd0-O&A0(r3k6*{Q9P_>XIUGfL02%g9+Z(AZ+;;}4HK@T;@C z?@us&nAHB-=0PjEo;P>29GtSg=iv=kuHE6`F3cY3fA`Wo-IuYhIO6J~&dF*0x9{q_ zhV9@Yj;tBx^P69Q%jPM)8V;CG-^M(->iE}tbC1`b`kZ!ka9@9KhtorRwv~v0eVDh+H`;Ed8!52?sCtQa(6pf6ZG|y)Ka)!tLj4wCVE$lJ=SG%zX zM-9l^H~C@N#Nr7N<@}QERlY)(&fLGpNBtHeeq1l~YEW*7ntA2hbBjH0n?7~AUYd4j z!;!xBJ0Do?u$V`fo}6^=y~F4l{<|-mm)yPiEviF@!bi`|Ht))1ys1CB!Pe1yZ)kOl z{?3(!JNI0$b*wQu>+oUosM2?T?s2INIiIwFwxPze)6&x!=RO{;PwC*-cqiL1B>k5e zVI6~BtcgGTw9ADhgnw|J%-8a2VgFeAwxPF`FR$V^%Gim|hwf@Gx6g7kqmAtT`Pbc3 zE^<1t1Ja$A)m({{*I)B|nLEiK(tosI-rUa>5tX~FI`*2n;k$bk_p3+$r5)`T+vmqL z{t?pPv;B*nL*(cUH7f_{rznRqJ*|_-1qmO!B|k0wbJA(fo$1BLZd}h>aATi-e3nbK z(N+5Hhue+#Z#o%W6pKv!Bfs7~BD5R0E8Tbh>hHMqbCap_54>M$;IMh{j0YWG>v$O* zCfpwDUV1sz$<6p^;^>O@lSUmn@Y0C2v)x|PWZ$63*fTYsvcKoDQ+qng?{KHa_`ZB; zdpbTPW=i#Nr)ux3R<-7Ktn1G@ZjsqJ1@*A8`Zi|Qw6eYHeEV)y-uW_y)$st$UjI&= zoPYWkFimc^@{%F~cyt?YuWbxsVyEnHnc;&c&Xk^}9ZcEm9r(PP!}BEL6?=OeU>hq@#b~5Q@F&xZ05+{+)S?Btk|b&EpJG#3dw_OC2nVG*C%)Q-F4Qd)MUHbEt~zm zJmPg)a&sts&Yg2IaSvJA;Sc>RUT@mqJz3uj56zjl!c=fKFRV-VbouNxuU9nm3A2Bn zA5na7gm0$#oIv}drn}~>@VZlZ<(&QUy1jl69~_z#N0)y-ak2lbkIRyC^e2oOeelp- zTjAG({@Xe@`(Avv@pQxMOS_+3>ih0a#+mNh_{RlfT!!`Su>I3to2(Yc8N9h)@;;q& z;n+aexwnQNJ$G$x7rQCF2mRTJJzFSz?E1ef4nKlZLnAHJ<$ ze)53Q>Is+y=f-vCsOYfN+Q-9PSPl1=#l+sJ@Gp{)bP z<$Zsc^w;6aFk12K9CMqYPWi6mdz+7)aOTdY>&oW?Mlw22O!(S+*2Pf!&QmrO+y1%p zh3=~4iy>F1#7&;(+Hi_BTDJB8(_DYoviZrodw=`5=Ger`^=1}t9kOO@d30CcrOf>j zz2!s9^WuZ6h?G(In?IGocQ$B` z*QWU0bN3JIakkf*7jJ6!uj;jrQzwkBJIZl&tuNr(Z~n3=KX)Co!$f}j7oiKN%g{e~v$M?}$D&2SN=#xHmgN~nimSIzV{CH+> zhi=^l)o;Pj*%A7Cz z@+$iExT9zP+MOL&erN32CqzPzf=dbG74KF^X6ny%ym$Y>##ry7yr8`6u`j(2_DQJx zc;A)^VRhl~ql5quzFY=Z$nez$g^W5d!k5d?Q@HAinnqKkDpL8wA+A)NH~4Yz$WKBa zW8ou;4gutN^wxH)jzCngx*m;Ico5Cb7yidXxZK`DsUh-zAEFAh>2wM{?>~g+ZwEj` z(@ZpA4t)F&XY&`JuD$4^5I!KX<}XAqZLV1rSQi7#?52o$n{3RtvTjqBF5=#>4#b34 zdrGILT?})2T6NTJtupyVzE8;QoWhC`quEntk2BqR?HKRm^Ze+u&x~)a7BOXy3!S!YewBzwf5|tdIWv2zo68sO z+vQ{4+f+1uo@i6F$K`{Kl~v5G!H*)UEpB9+uyyB*4fUGgXz4a+MmG`b#Mo+Pu*;wo zMSEunGS=^L=*Taeb9iOl!;2@9Z!Yzic;Nl$z`M-Yv5_I)%s#wac1c_{d#2cjm)Ed( z?ye>A_Vqp--(HE(*?ps`+ zw2O_sI%v%xb`w+!Je_*L@q3_N(ht zW`RjW{lMTAmcMOXJkRf=xiQAvDIaoeQMZb@QPz=x3ir}8t34W)-!qWu?5@ALI$&k( z#?R(?7KwS+j&^-MIdU1TxUg!`HRCRq$uns~cJqXMMsZAkv%dLtwjuvCTri*ZiA+A{Q@hdKcb@gg1uLR# zmSjgvcQER3`oy#TH(qu%NL#JH&uxd(vX9m4@2$VJbWvVDPw#u_%O%s=s#&I;m+y@7o-kwVge#7NULA9b*>rh8(e3kJ z4Ufb~yU;fB7raO|))#cx-yO{I$M81=&l5 zCZ#J3B87jhsrQ|h`z(N$7CUFmxIf<}&Mhgs>Ez_ddCdvG9q)K^mW@-yyR1VdByrjUAkd zddLq-JXaiB$s2q9t^CC>-5q7#Jq;z{;ZqL;n)f-rbVy`g;l+36DSpW&bu+g{FW&QX z-My_xSN5o0eC)`Pxn2pE#%)U~y<@qja_5;%vcM4mtLLuu^zPqLV$SgOch4&|W-jzM z8#$Ge*Z5W0{^ElQ{eymhC()=me zYxerQICe5FC9wX8+7Q$NnzXE><7W>e~!!;2~+lq>r^ zJ|e_VFFx6QOVp>CVR6S5I-wr-?hdVR74@hOa#@_c;{_f!mZZ&2TzBfx1OuHxa_h;v z#DnB|Z;U4`5)51Fo|4!jZeP)KQPuK?Rxv@tC1J{uca#Y^BcAz$-;Fr&YT|pZ)rUO) zG}g_YQyG}}c28*ZwDZRF--EwpmB#%cOY|)`w))-r+}R_JOF9iWv}EDBQXh|LRsEMO zV4GPfbvw^7S~YdRgxZq}miKe}t=2$q%!wP)S@&O@ul!bRd~k-IbZ}8OuM)j{Ztjk7 z>}LML0hcVNFy}2A9)3I|i0R?hSH8BWFMXU-O5J{je)78Raw2HEc2`SCDL<9X8o0_?36cyPh#EoO|i!&4euAYdPS_o>*ZpnQiiV_r z4u7V&8}>c4?sMFu+6wZ{MZ0f-(;MsA8>6X@)y0&|!^l^PKQS>>O(fe5b zT*CSr>-yG;or@p7FEVR*|996^?!GgOy-uZ5J+B;IXF6r*#4BTnAuFCOoL=K3IRN$X=H$Ce~T4|2j5&$oAQOJ)W-dW9l7F^xwJX>(#GQPtF>^%gZ;v;Cka2 zLpS;Rv~&C(U3&!SH5l#~v*bp~e$Rvs-RqsVy?Nig;%NDwQ(P|ZG^-g|y47h?)vGCa zYqKmY>;|8#iN1jKcfBKl5{9I?oU-j}`xow7wbt!{o#N=noVYiMPp+qwI&JhQIIcf& zpX=8rOD24c8o#eU_xF7xO4uuP16;c2mHRP5{ruAdNzT&Y<=;xzb_UUTlS zVRie>hl7@zr~j&JXX!Y8qM1`aJ;Oks_K^uW%O`f8GR5(iGm{6E7(K5ybX+kqtYZ1& z!v*(m*`EI-T3meNh5c-+W3!h%d0kznoAHaUBJ|YMw1E~uUX>{qLi%j8y6M|}e%;s4 zjQir^QQvzmN?5z6y7sp5xf7R9H9mYC`giWN$D`v@KQVnhQxh#Vw0}L|_uD2TU3{YM zuUNbP`+bkQgk$C5cizbhgw?Jo&#!DKtCGD-=OmAR*ZASf+}nqrUr)-|_+{rIvmx?5^=kH?+tZk$}rXt;bU>8ifZlXV3yW(!OO~xQbNt4Pk)qd z+M>T8m;GQQdtR@zennX?WL;n69kHL9R^He1m15<#Y<+LD3VY$Rk_pZiUI){JZo`Um zsDN@ggmWlh=C6Qnny{Av*<=K`1v>KoA=eioF>NhBz#!-Z_FC;IZUD#s>+0tD?Z zO*dx%AQ__i)-)!CuT3lYCjdWO`l|3jP5_gOQ63N~Mq(ADA^O@>8VltL4OE$2^!FgB zKL=q#8l?+hHk`|awo}FEr-ZbYrExGw5b?DCG)Nxgh*e)wMt?9vfxe1^N;J(-su01W zbP>wjm*Z)M^n?%4)WYmu>`(#tRg^jjvza>=+Ht( znot#^)VD@};LH7~A8H2ej-fqOsHnfY0bl8dDpSbC7{gLk-?oRL%Iy2$ec)0b+3r zhj9#2aSbR-0ZTrGaSmf{b!~8Y3S@M7aAgW{4_E>K003q*aS%%YO96**5mIpyC`$ua z0RR91JaH3FPEJby|Ns9=|NsC0O8|6n6k>@(_{ZcBKeeY9zJ%;=KRa$1zbFTPCqKrB z1V7Sw48QI>K|d>19zNh*c)h))kv+dxwmq*L2fr~SguTqOc)#~2AiwU&c0X>hPra@X z7e72M{yxh`x4!Q%sy)VSs6DN2guO0tLp+Qbr@c9F;yVn2h`mK-!8><_Y(0BeH9Jqj za=qKSB)=CLMLt}5hrjrfNxyz=(g;6=W`}zll`-E)^<=-7Duu_6Iu*t`FV?i1HZ#`2Ze-C$^SldQ~1uUK4PI# zD+T^jqhYAD1D^wbVyKK;=u}k)!Ik{HQ6^M&>C&b4e|fKe-W>SCL#KHqTl0zY@oC1L zWTlseFj{}Jpm#6eNJ9rzoGS~L)kaD7`%F6FPZ{Us!*SrgZYEyqZqjpNndG9{0InKY z1$I9d5Ls6v65QQI7JRItcz-IWy%3_`gJbDZrGxO-GzvDjd4idL0yX`y7VJ+KQ$2QRG_4);`NltVx7Pl{Lg=yad8;GH9-{6Eay{`p$7OcsgjpZW{?G6dtEjBsbvK z4|yd2wGcT}H%99u^e`&qBS||lL^3-MkpPhdqZ%SJ}4T?)4v(=t&R69P&i>7Hp|viPpji>nvycU(N=`~`PXPM-zc*)GYBK{ zC&W4G}*RX=}?TOAuo7EWk~@|Jl-@Zk$oo699%4}YScj!&oOKkg#?Z6~7m zr`hOdTSa3`hN#e+LDp)^9qKD>4SE|i=&#;Mj8FYCY&tg!3N8tuZVqJ>d^%a(*Gtf~ z#+s=Y&Y%*ZUaWFP7*W31$p}OxV6u2JN!e6`pNrzb@xC*WZ{LTOb%L!gbk!j>B?m7r zj%S#r0&FTT2VHk-e75oioj$Gx+>drJ^0AZr*y+L5xn;`SQqN`jb}ppLm#}1oum)&( z1NF52P9nxF>E94PQYUo^I=5T{`REZUv!E0vPs=5T7TltC+Tu*~@FLu3e~-%i^rt=I z*`&3qm`-zVB>iT^n6pL|r{5Pqwbo3W(PvCHR+p1%yCFJrVH|n=%nKqXWsx6qrxG(0 zDI7EIB32m}$m{NQP%qg>TIRNsv-heXe(E#$T$@U_Ef%69t@|-(Ml-yfJ4QFS<)A^+ zKKNnynAv#Z9z7u~hn2$CpyPHNOa9)ZO_Br5E&o>HlBkb$G4AkW;zaD9;fTp|DruR| zZH`8vGWDpR1tC@1^!vW^pj5@d&WFSNTNVvAY@bmIR{g?2jWx6D*V@>&euBS}^yp4@Nia!}mH@NcY+Ooaz&u z)bg1p621zou5Ccoa6qqX>+qtgAiis*Fwn7&+*X&P7j`~`Cn7rFCi9u3NZz5jZ7ZN{ zStxd%8KNVaI#@0jj`pT&@#ow)+*`C3@}F*JkLa|M@~4MjV6PZl=}TlHyXHgt#!ULK z;~o`UoP?F8jZ|A~3#<&&Cb1V@g5zX0{4`q!TE+uFwXF>8MTbF28OXBCJ9KuXGo}SC z1=umy;^Vm`aA)dsy5@)w`iu#}%LzBY@MjwG`D`WBFOlAoM^e@x1#?Qk3{&z)1DsFHAe?p4_(i&z`DL&N z&IPIB$2Wm!r5cHLk0-XW9eK>2*HZM-*F)4Sv4OC3bhpzF?#!vgD zVAMJvMQt*$x#10SZGI28qw_i}dbo`1I#Cl{_J!fq=~+bg+&+3NdmFevi^BHnPSko; zH`;AXhDWl0sl3B$63kIVdAqMT^raZA0#D%g2nmk&&J4_}{ElxX#zIk|BK8ZI6Q_P5 zxVK9lUg%80yiOrT{Lxj`;rS*6K&-Bmp)2QSHbsfrl4%=11%jdNT5*x z2E>1YNv9*QN-Bf~Eb+q1tYW$#%7;C@*^@ZGDaO{GOgO1D55`(mP{Fd7p>6Jvc3A=s z>5t#in3kc8(e^E{MC2^2Q#gZ_npLVro~i5_Ic1tkQ*jWYYd# zdj9t$^!4mz|6mIHeSsEt!4pF|bXFN_=cG`_l7rY8Z-y5hXpkAL>p&oHK4@Cc$0(<7 zbhO|Eed9vNilSle=KHGV9PK3PvHmn|k$yv7Uz&ttipfCiR%6_&_po349QEAvhwSUp zgsj#kQh59r8ToU75|i1`u+t9C8>B=1QGS$?JVTD8rL$)bUZb0I+8MqrE)ZdSkvUN! zYO!4N4wxOSVc#i^v*vl7=oLIUn46sk==BnV8eeRH^H;L%U8gR;#w) zwCf&Nmhg$OjUDH7L|;dbqg;~LkiZ`2Q=xTXqr}+tDE77cK>uDn;-@?lcaRYHzPJK) zCv79%R}QiD&Fz@CP84OfM+0A(5Vbs#kKM~O(Q4u#owp|i3kHJWaaI;-_q$EqIWK8r zYc)M`P5`og>%z661{z;l1vk$u04B-@o_3`(GBSU$Vmut?4E_d_MP;OkD*)D)E3xT= z5H-JO2z)#8@FinMp3Hv7Ja9>*(gU`*GAj=Gb}hz}vyNhD>{Kf2G>=-3_`&OdnfO*e z2S1rt(#ENWITw9uh+JtZowp<%(v5WK?$goWl`lt^1E=hfVoMeY~lUdR^jMNP`@J{6*xg>^8*}Q%D-~-^Sh{30^xv*hS7mH2n z7+HnAke6M}jD1eS^n}Hz_gNi|{PKd8Y1T}Xj#)OfHQl6T0F8SF1kX8aU3 zU}zN>r+=h}73}E6oX7P1pA&)4f(s|6kOLm;c6Xp)>r%{QeOlGyQlf zWR_c~{C`x4LVW0Kzp2SKF=D3!OdV&4H0#vuH07$bm>j!#*UVyk#B_>cu9-u>p4rmt z*PF%HWtmkNUTPY**=@FFW29-5?o=~{POe$zOogU~X$8#>M7A`i#Q2(d?%6^4d)Cr(H0%k&f5#kcFc}5-tb+fsqkd7S?HYb(79eK z75;xq*uF6^!sq{$VG942GVed7%y$b_{2xK3#4F|>N%U>WYtH5cG1TRp6BP8T;+&sm zbkZ+BaG-oRcF_awALyb-{hUx+KaIY%u)*_f{jh6ib@7VC_mNXjpCuUY}Hee|9MqS+IZ(Ok_#3>?3+KWt2=C{->85? z-UHn3nlwDHMjJ*37Z7=q9(D_V5Uie1hfZzcRA`n4Duqaq$c=80GvixJb=)P|{dzHN zoYVjnZKA|iEgAQ0mV?5uGMvdDh~K&)?!~)!L zosy$WJy<_o%IdB!q8+XYv}DmCQX+JY)b8Ac8S=s~X|xMI2Aw7|?R9wPI>!W_HiQJh zL^`(MG--%jif&a$K=Yw11g9Ke_L^#uk+IX9ooz=XzKbn$j8JUcfX$FvG);q?8iu}M6;XvSsyMHzJY zzz01(SIOjsSJ}G}L1->J3+gJjLGgsF7PabC?AGKT#FiZeyXW7DM#yp^FfE=Kf^lJ?BPNyDprUa!K9iaV1+K<4Wp)Sk|4`3ST9Aa- z4&I^BJh^bSU55A+*@2O2A__RP;)MG%aN)WTBrh`PgzqwRvbR1eg_bbAKc3PbS`5xs zkcT|+N%W1zW+E>Xi=$hnw!W53qpKsW>4zh~n5>td$xpjR?A%-cXXn)6_ABF{I;09I zlWwqiQm>gQXQ~*Ls~lXYdJW=Uo1w^41xS(`p$V&gaGPT$GSU8kAh{F13*E!4V|Qs) zNj6^DeG07vPm*;VK#h7NuvfneRi7Fn zQER1CIyeX~iQlS`HYzd^Dti-m?^GJNG?Rj}X9K=U5nNQN%|Qz>|4lROrQ9TDeLa=D#|GKI3MDD4Wq1 zFAm5{@lz)iE2iV??3UkK^H3(t7w?@^M?F>wqP9rF&DPDhYuj>QVz%O?`_E{6$vq6n zkcSYNB2a2IgGZYWFx{cQ$fH99%sabL;m=Bj-i8+0+j=y5Mjtt4Xo-CTI_U5JhVDL? zPsDq!Q*)bM;u@p|*~xu)Q9PR2dAk|%)Jt%2!cF!ou?59J145sApeTjePaavuRB?)JQ?rl`=ES>HMJ77#q)}}cpyd!biPJoR>%vg7MO=g(H{7{Up`|FIU0j5Jh=_G8g;}G{@WAylQk-S)-4_jM) zg4tVZJi1sO&fbXzHH~XzPj(6Id;gSJGpgi)V?50e)`r{tCD=S=6I1B75k9Y#B!&AH z(~39^uvN6cNf)QVTN7XM)TNL5tMS&D=@+nDMUEK0Rs{NRguYzQ2dmPjL5}q|x;c5A zSz2a@A2;k{&eu*MXWG}J-uGPaS>%F(UN_*%)hd`Tn?wo**1^n*9%4Gz9T%Hruy&76 zfaZeZc=X!=$aCKZ6pBJ!0i%2`!Q$WoB4+<+!{rPPbgq?wkYbEZAaS&gv#m(6BGW8L`)}wnw1`- z4-Psqc8SN(QehU9WmV!r2M0V`A4NqZ*W#WXz96(s2+!O|f_aO0GOq0aQFl?G5;6kt zc8WdJ44hz!Tqd^`dj^uQje9|S*Br2MnuAHkA4!vWFzMR%lPG_T1OB~Zw05@$Jp0Ly z8<)otkNj^`-|GkaX}><4(sIR->6##Xzl@^i8FZNzfa`?^sg~aZ8s~a|c`(>O=NbiJ zfQ>wTwc!#DPK(7~&(6Tb&!<`a>=+c!O{Xd`^4M0L3$NWWV9(MR$Q8K8{+;-Vc9q-1 zqVpG^J}U&KXXoPKi!X`))Jih8<}6t!6^TvzVuAZq6=Y?Wf=*u?I1+7Ge=8Q=U(X<0 z($g`zEQeKUR0dVH8FuabffoK2e{}I7wl8jx=avHEn!j zi9@wAt)mU@7>L${W2FV+~6M-?TwJFo~^e|cePst)utO~9|dHKb&o0P}0L z7EIKijcJ3Ul-UqX`e*y3cGrd>Tgr%gKa@H1MW|FuI_Q zW-pb7fQjeu_mRce-B1a*dM0_`FN*4mh4I0s=Ww#EoaFJ4--%y?+)ke}ELuFk(KHES z<|IN4F3ZEwnabqLQ*}mCFB0r}VqwLNPLj6%E@xh6dm{U0vSR})gPc+)C&S)M3N507R9%BRuMXBWtG zBS$jZtdGuo;|mM6ouGyy{-ozu1zq=^Ql;*XxT)zaBh^eAjzs+M;~9`z>^RC@FDp%SS|D<_AaJOX0a)`px9&nUX?)8`8=2rtpX0k$}qNW zI_Bo7!H(2rXeKia;&$hNNsAAhO*%~rFG!IUIZJ7lPAOb&P=nUiVZt|jfGC_UV1HkT z#gzp=sPGqY$aITht+a1I-&H_gqnp$;HkpQqO(RbXC&Ba+0*t3aBF$9ShCm%x+$h^g z9}9@Wpr#}dbhN_!$_mcq^94}l)4_V|QpRbj3M6PwAtp?)$Q0+TbzC5;+FDy1UWW@f5kZX$7qQd!Hz8If?I? z58s}I1TT9!D=!fx3zX?O?W-uQ!v~kfMaXi_dUATlCe)(c5GC`RuJGA_D|4;LwH#Zb za!Zk%R9}QEzg)oR*ca^b^+sr36a!C{`tepo7k&|*iI)5uz_>{nO;u`O!;~^$Z#{$; zRwXd%X3Cxj}VD!!T#CUG4LO#$ zO8*e;B`c{*NG6#zG6(Iy$%4z-P3+#gV?^R;HP@N*o_YqXVGrMKWsILJMWyf}vYIFP zGEbyXXTwDDs@{-BVIGkm973lTtKsg-DYSL07-0QX2)$@c(sQ+-(!CM+0&mg9aXO&m z;EkH>7N}xol3TyGlNHh#aQIOo|Y1*jn25uOc>fT z6WP1Zk;=bnLiImtLd`8y{lA5g#y`U7{1v;FKdCw`Z&q8iK)qc{ z*SAIH-i}O5q^_;Ga=3=M_3p#X!=4r`QG*9+5F@OBl)|?!-Huu+C)Y6-2&@y;b zw`IC8r)9H-wRxy!c&L`w|Kd#P|LaU?|67F4e~Qp`3)TCli2ry@`u|8GNj|;_m*zs- zS0@@ZaXYlhSCYs_{7@$y%Vry_r`vK@z&##8&OIbgw2QXEW4#D8USfa`mF0M3)eEzl zGwF-NiqPNBYlTJkl4ngksvo)sWDexQlxH(QID9c;`g8JeJs*zm8^)zWYw!#|Li(y1 z?A1SUbXT+{^*>m_Q`_q(XSM`}{_RDstOG3Rd%!k5^rMdQFS+S_4~T5`TjHd$9cmVu z;MJ$b@KBSFjNaMIhW)4pkH1aK8liLagS7|5@|N)Xm##97Q;gv7r6k<@cmepWdPgQ4 zorOh`6Lg?NTG+2CqXcT$l!8hN@$`@H+o9jhQT*m01mb&@T zJqvD;$8r~2Jl^idSvMuY`|D~lk11z6n$$?_tF8E8k_>p-$3^ZbH$Pxp2j>eWd4&)TSF1=xI zKnC~n$0+`<_gK7(vu=`RI zUGX*%^86COv;P^LXykzNR~*2FvZ;`!SHzmU7QxU20MIkQ)V_IK%% zopG9AeETaY*}H=Ak}bim#WI`;*=wONN(4)7LvXdyHniO4j4@_<_&&%Erp`W31I3q8 zr#ornV$ubAHL-y>6iAYeQ>#(kBLObEtViEB`=Ks%AMtsjLpM4If{vUkv6;+=m3c*s zeZ>KE8<-194_1K8{oVA|(nJVL7AI$77eIfWGz1O(XxZJ6Oqt!ij7fnUoc&o23U9Na z`dTg6mA&Thg-48(l`9AsZ~!Lzz6T>dNB?5dq{DC&naAny#b z?YuU!*3(h=emZ#B=8&8E49e^16T|j6n3vr_eapS6@9iq)+mdax_vHyz$Ycm?^_yvf zojMiQ+dz@oZ zY57Fy{Z(+k=*jwp?}IB5_9VfXps9B>XMFoCQr(;mR!=Vy3HpkL-akzzC_JHG&E7J7 z_Qz=dzUSmqRTBH;)>W`VXB}V#sn4Nq-{B*X`u$X z2~Wod8q?7IyDBXC9YqG-SCP)rW=61Hh=_;WpifNls9AX*p@t=_>kA9gdeV@dbZf@; z@!2F|G!DG~u3)-{@}cK&F8U-^z)a;@Qv7Vl+_yO$1z)67L-$^GujM@|AUi7_*Nj!7On?UYc}kz|pfPx*i)B@@0XFQ8rBPWfIGJOP`aTUb?8i|$*?K9u zrYMqa(IT)y<1rbX(Zf2PN~iBF@~BFx4<7qfiz1su@zTxd(0O_jqfs{r&z@RF7D&$k zC*f!?C6_t7hc05w^8tD?z=D14UIVVjZxV%DF2H=b%BX40ZjDzy$Cf@mO%|wa#_i)G z)Y8KO4m!uvosLCJ#)v6Vs-6cQ3fwU7*A`H;IYLGqcQEo@Hn{ryeRApiO1fK8f!=wO zNhbL2K)c3pSTi8+FYjTXB_b1L19Rivu0`hQCP0dhn53- zn9&30$<3b!2(vB+(l$7gO!sWYx?Ke=BkhQ!i6bo@pb&NZFlDTcvRQX+8Ih^Q?8mK= zP`q4eyxF0=cwV z@WeL?X6&8Ew)l#-rrvI)WSTQ-COP&tkf!Ivkcwhh+-s_}YGmWGS`t z3^Ns^GUas7tt%uh(vSSnQ>B(Y!tld+2Qi-zM^>pg@f?4^_6vn{d*gFZa8#!Q>656f znmfE>B!EiEgPbbBXyQIhN;iR<+Y<0^NDLLN{l+=_(wUj_`6OvcPQroFOxQA+=akZ! z_e*2R zWp#e$t^&{1d*KU*SMicCo8GeGj4d7zO@LO(LTbdfn_Mup#Y)a}te<;~%KW_ty1adV zZKM;hr&Gvgoo{&mkr!0YkU}GWM|@>=iZqoN!!H|aQrIH_EyJIwkWn6Odmsn{(r?M7 zwUpHMXF}rbIEWGbOMXoZhOD?|Ht4SmlF$!Ra7zM557-Ax*eIzrhZtlriUDrp27Lh zBn^FAo^YftRI?{12jIswrg%(474D^cW|nlEgM}Fxs1JH@{oO;N))7R5bIUMDvjb0E z7ef_YZE7;}JU#e#5?n1DBv!6dut#_igw%gydpaiJq?@9+@lX}h71hTcK9`KolmJ$R zJz;ON*6b#~Bk0oKO{z?u;j%RwP+Mye+>^5aI|&Pro{&gFrYAu4E(hvSmc`2lwq1-|LFSPN=2Rg;WjJdPH2(=Rg&_lu*mz4*S zh_(B`@u49~ta(M9b84Z;ISq9#y3yaiF4N^P*|bi)fnMO+Vvzbo4DGne4GSuvK~CFX zuG?W~lq`iKjs9@(@-%2xo{Qf$?I#?UWGY)F2oHGw?OLUF)SS8+K1-EB-=`$HQtJkt zoIL}iizYzplqgIM`%H>8UZGym6DHheKOOxWLsK@Vky8zBBx_?9)-7JoOHFv-Hl`BY zbyz0RrJlU=iAS~ei{#}{2G+}{!SL@0Mlv83bgmvJ6Zyiad6g!|_n`)z+nq!etG_|) zcs|>_c#sW!a-WPok7k$IjnF+Zu~fa-i=_5?;?lO`#PM?{5k8Yme0tW?)Y0|Lo9j(c ziY~#KZL>-F?G$*ob_SXlf1(u&Lz(ej{YP#bhcrCvIVWGt+?Jj}MBNZ(B zSrL|64AS^R7R*=2^;Epj8Z>Xl&`#MQCd;RbdQXjHle#98U(ar$_Y_59XTJz%L)a*z&@F*8Hj_RQI)yY^yrOCN54$ieKAae#YInM?+ zQ3J1XHnlMx1pBv6MyiGIWPh&-88omDeBxFjef<;XhbefKy3@*3laZn*|px&CG!aVyCop@YP&(*QRG zm7&k`d)$vSk30QC>_b6$tx!)|`3E}y2LHM%D7q2mEAVOCIrS|X<8z}=)!SJK^^N;v8x0i~vS>oM851F>( zQ%T+nUC=($Nk7LKVLnVFjY{2^T2M^8R~uq$+9mR3mI#dGEJp2zf}Bu;@KD45%_Y{? z`3Hs={mT)I|HBbX+(J$N8-|$u14GW8*KB^gRI6EFy^q<ZTWW`zn&Y9lIM=&b9m<=&7aeIr zmf!U4)DDwkg_+IGg`Uklf3upGEVpSgUsGwCIX}8NC|R%BdiiIQ<38%m2453RL(RiO zIsdmy{69#9`L`7EpHjdr)Z%}Th~>XX58jR* zMka9U<3~m;G=~;@H*#*;hOr)Xn!MJ1I-ZiQVcr|1g7WHlH0zNjo$HN6PjoieHOxo- z@{h#wG6&sO>}LKP35I3v89-yqQ1zfP`{l7CeryWIv4_TV-`n+c$r>d(y|EMpSDScg2(zMLuFbn?)6}q8LL!5CuTXue|6+7p~+NNb%66Exq>r&#aXa;=t4C& z9wI0A?kC=XvRE(u3cQ>qQ8HZyI}gS%eCyX>nXNpTe8rA^(Eo;xH+13TUOAMkTfr{) z5KH+#Eulv|t%*(SMUZWojz7k-@O8xkxNh~FC^bi+kVIjNsFxt_+}j0CS2&>7@s2KY zJji`{=r?^aF$HEB0$IL24jImBk{@x9#%>9P4=xe(>Fp=<5_15KCqH6Bo@K$9fB;Ae z76QL=DeBDUIl{rCNj`cDkxa!ulDJJd&sMg%+D=vq_dB;I?`#vOAM(|P^lHAMI%+15P&Dt>TvlfOU zC!o;#!(DWJn)nJx8QOQ3YL zAH4OC;Lep9kYH6xcNMy$zg`hu$qXg0TI0}TR|5C@D-E_`rZm1vc*v$6n@CqLn~ZZl z3(z%j*Vs6TPn;;{Se)T2Mmn`9GnAh~2Q@~C^_!av-}S@Tn|PWUT@#1Eq^UforhyLK zyGars{Uo}<>)FR?Sr`+Tj+q~d@KkXzb8+E*PP218InvY0sw?<$W-95zjRFZgmuQE> zg$HnnTRk~rR8BY3EFu-7ib*p9aL|5$Q&DUVKhj=Pjk_sC)pZG3#`#5`_>__OV<}kI zw*s!n>cQMgftb9!1_WIXv!(Uh8OvxLoF6JfF4@h2vo`Xub0dfLstMw+Gd{5JOEKwj z)+fSeH!&emmzc7G_Yf6OO_rbAh>zCn;Lhcn3j>aiv9DQ9wSF5cAWw=Y-2Y2J3 zelp#_X<}mjju4ToZE&>l9jnoFhW<=ki;5Z#asMQ7nEvAsIb0V5f)6&rikd8RFBd@H z=YeoBy`4T1@5cML^wIaRIP-@S!Ad11`9ecpr*}8L8%mEdg$RMBTrO}Gl6+`oJr395VMm+ z7%zveEdXTCq1lM3gl@U zS=?d_Wrs@XzPe>3%%=hNtcZcLvnGMxaSKq2okugSv~tvHG~wqH1Nt+1GoG)#&z31p z!=o<+nUV|6B<-0fp4YTw2O~BZRvSn?dD`hqw>uNjDpeG&Zgn9i>N43c^^N4lUU|~Kg{mSOb!zdzoA7&xe%k0gtBBWyQljv>-+DpjU>{zCGt~wfLZ@}>Jd@5IW0DJfMkvn?f8bZ{T@nQs@2EgI`in&&11yt6&F(+&(KXtLuB>4!>BJ^O;hTpL-P|C z+VrxQY`z)>5_fMgN3`Oxy6qIbq2@p**@(eTRcU-AB8J~(Zqo~;y5LghKrct?;@Q4D zxaOWuFT0xHpYZ*tp{t0|T7C3@);D@D@DQQ#ZruGHzNGY1F~&s?(u29lU>bITT=V{YGx)Tj2Us9-@*?#E4x&uz-68?u`6q zx0=aAdI^QA-A8atH5>!(m4RFJE6OLi$T6wBsp9<3#=u#ukWY(Z(oX&1a zgSK2r^vOvkI+r%FzRY2oV>X%j*iZ*)hGvj+`W=1o$dtOSI!`B?%c1P6^*Dji<_dGl zNLjuhp6W9PT_Znos4*F>-}{jzA*y8DQIDo=aWfZm9OI13%i#9!sq9zXRI+o=RXQM5 zN+QLz;D}jmbGAb+QeA}@(}b5%9B12jNdx`1Ba%@6NljNyo6Dk~K& ziO;57r>f>2=p~j;Bae5}p0n$*V$L2?=HmngHTC3|q$;geUk|ssw297c738}vO15Nt zWpfMWp|ZIGTss@b5!IUkmN$z?zgQw{yrF^t%zmm=_?mbIABMkM8;N+?b6OCw7-S_5 zqeA{69`xs^r?eDddXy%4ua`j`+(huWR3YvXa>7SrcBtn$Y;Jav4{!O~!o*XSP_!q6 zSgW^@ijcq5{X;i)bUI?`?ihCDb2V76euz`AwUA%wK_qj%2##Ih^$Xu$QCAEHn+LV9 zaK%iJZ|;DtM^9q$3Lf(EQ=#=%p|r*=9(+UW@NAY6cqQi1X#-Z+a6Xa5+qpqyx)nV( zT?}j{WTJ|uChE_+K%$5IKy$nj4m~=Eq1pQAuT@OfPe>x}A(R{|I0&N?wnF~Ry=2w5 z#Fhb%MNCo33_LV^7BzZnn2Tl4=&Lb+b&@hDYZyqmrHzmm+zCHptfc1(>S}_$jiF{QR7N?-$pQt>V(~aEb6SVBeBT~joU98B^!&ioCbVcfA`ug&mmNw=a@SPTf388c8 zDX}Wrys85fy99xIe=bJunvBn7mBIdDEokg(rE9FpiD7yK+*Dfu(S2*d)3zL1!en5q zpSM?r3=sqKIyTg~mYj-dW90JM(dThITr8Hv)ciKC-%CCwYW-3spL3X17d=Y6-&}`z z4Zh5N#}Vd!dKsHA^CxlIACD`YD$qGHnaHfT#Oxa9B`?;Fk`VP+Tr%8GVr2VhrsQpE zG7yQS{;F_bOCr{b0;;C@J7 z23931(4PI3Op-r{vtPcZaz(N*EZo*ovn~r3eg?K7`9Af(;((#OT-xfnj?NbRLiKO% zr?sXl&?8!!xm6>GgH2(~y?Oh2R^0^VPqidQcvzBp`9ZRjJA?jS!%I8N-GUoxdvIP? zD)oQGL+nnx{;_B)>b_HiavrLTfAEu#z>TCb`2uX%=uAHzC?+?jG!kidE&h! zfZWKq!M!2+lME#A(eK;i$VlcFjLuyL?o%#7Sy49`PW*@sPLVL}mMq%ZbHGbaj?6zG zk9V@4lNkX6WM)q~_iMvAs~zk_e$7+GcY=pWUH~6aP0|HNuTX_AAmDFj}_WMn4M6RHt8$Z&ew)4?4+5ryg$ikbD2x|F22sHTa5z%el zyuQQ^={+qn?h=CmN7Fz$;0+1oE8sBBUzx*q?l99VH<9qM`Q%sCek@sALaJ|;(hV;B zgfD`F-uvA!<7frF@OU+bT7`#N|KBN*{{|yA|I!58|ImbmZlR0*8;scf14dR`TbS$p zS!CY%H@_)solT2t=FAqw4yzWuyH@7s0v5IWjtBFV3&H%FrK$Pz(-tjnTo<=|tFdmm zsczTu{O6(;-y_B?*1KfPmY?&D%?u4YJDz8fzz=k>bgt9tEP z4)W$MNtJblS94~@ScNmc}hFBJ*B-X^5D;32H%`4Al^debY7-5+|nth*`I9aBj*a* zJB0%@vxGit&;y;13NS(1jC}jP1%IqQO|!J^ak+UPMVLdmopr2a{t~$IeIxCiTuZi2 zoCe)dJBhyiGEj8VfgsL(bahOk+sw+T=ck8M=r2E+>iUv>C}Tv2lAP!XjYf7zN)&@{ zWZ}Bif~?N&H%vlvIIb^org|$EgZkV_c-!_O>odn2<=R!q+1m%%XQGa*?}{E;roDw8 z+B!(it?x&*mJWI-CmDw7!l7kNINdIsiHxr%tdw3x$J8#6k0*e1Jc@>aErrCxSqL?X zgyGgU0v-PPU>y*EclW4a&6|93;zk8MvO^#9@1I6aD1(cPWRQuG#SMSY;RP=NDtwKI1!Igd&#`zZMK6+v+&`a6@zg6d_`LHcyENYk_THFGEDR5U z5+ejV28+o{?oH~fx)pj$^$(MwE%cTRHfc~q2HnRA>b z9`3@2s?yAHKXDNFCJ2Lz!*KH{4lPTMqZ-SWqUZPZa3yXZREeC%&k8SS)!z~JmLG>~ z{gcRkdwGfevN(uU%pqJ^7YWtNY^dH?J>y*%23953?>-m-E_`X*JDY@??)+GAn5D*ZU?H=DMSMeeaSYW00PEz_wbr!^FCmdJjb z8KQ(DU;T-TOCMagGY1ZE8z8{qJGrFtjr0$?V_QxYF6y32_8x9wZ|zWprq9<%^q z$GOMJsfopOO;rHMuha#h75lgq%l*jjWmECtlrG|bJr<^_?IG)*l|hN16t2tD!BUTV z+;M|%Y!tr<*laLEXNQBJ!5yaWZ)AgGxima3XlG?v?##t(z%;c?I%+=Yk zQ0?PHGM-hDXN77Yx@;b1tueNj=r)8j%KSB=OJ%N}U$nYkh$fdLO4d`6l2? z`3&x~v?{iVPQ%G z5t>JmsUr1W_wzjWZ@p{1@9+8hTI*W-*n920zsL9Z9N*nW_J90|;#d2F^(6+cZL)^p z@u7I8VH{*`Ou&X0HWQ5-ExPCuzi9XrY14J_6SKH z-vB(XX)JDbyhrTxq#4nwe1X*;Ig*$=O4i%i!s5$v__0km4)}RmFu&s_!iH(&_LgDp zzTp{&o(N#IDV32w7lPdS5^?C8vvi*KdzA0rL>76L3Qh-|A~IoGT<<$ounQX|+2{Mo zOU)zb@T%2NrsOYB(f7e(!rjR8Gy5@PHxV19M!{Z@)3D}zDlC}HlD@Kf(l9U%6$oX7 zqNFGmE1HE((#r6)pm)TmPo2pdX}~% zrtUN1dEh*flDUCR*BRi3(dnc^uZ%dn6T>&#PBUs1aX9|OH%2B=1s)g&A`Qu>g!a(D z%AbA`-|csi7vD}$UwV+(-ZsWEC#>LU=_omAkRq_yX^o4%%)+~LRm?Z4w~@BXN3b}Z zfg8P_kusTiPiRTH0FNofcI}R^tkD~<&`~6Xt#J?- zd5(#>V@rkx!|+;NQCwoBgl~pFgni|e&~@6K8Fso4A4@|3s1(R+655ZQ@@UgHTiEEG zhFyHkg@lqezOrpSaamVN@@7nhH~B(Ru1Xa|ZfhZtsJYm-qk{X8H-tVO+KI;>e8E8Z zZ{lq>7St?$Y&=N7_-QX$mwSic}rhrmc?W|=0yX^_t-_&R^1g;9Hx+=Wjj$k z{F=BX24YjYeK4)%I!PLT4jT;{!tMjl1ZO|5M+YQcp`^#NF}vm+(MWi~%1x1l*bVWd zL3Smc)US-k-Pi;P6ONL;H?oi$ae>@B^$@T0yi9%w+xTOP=itIU4j^M62hCpd@%uGu zMEAx~s(TVEn196vo7YmrVKzcJri-`}aekoor$KNbKpai-vcgS2l^MsovA`ea1KU5Z zA&u#Qc>a;MD0OrSc43^cY`hiqaF|2Av!(Dl>kgvL6G4(kmytU{ns3II75J3Gc36@f zfKP9-06i%OaNBzfsa8gUR%9AlGqZ~BKInqvBOST(UqqM_rzP;Ut_ZfXeHC-z%~cX? z@D{IXjm7JPc^%G1^+dWTg&Zyx$I0y*iC=FQ<0>RX>r_+mo5s=p@V z)g|%wb;7KYJHQ1I z$VlfH8u7reE;R>-gqJY~n&&}l%Xe~r${V8EF3U~+k&n+ODPvttVCJm|!fR|xu=hG; z)N=F@iQkfm+YS2AyFePuTuhlBPvQ7v$^^W7M<9_6Loigc9N*cq0Jb%pXOst0(C({h zxb9K8px4+P=Qnqg)r=l!y_$~itPv{lcV9Sn%|uuqu@8E#?1bJ`RgfbV&Fsqb5M2Iw zg}FJ9h10jLz|MH9;5}8%H4BNvvp+h}LBDtiy5$5zM#WImKM!nm+l2UC07Ph;!>L4; zS^Yc{8x*k2i>iBM$f^KLlSD~tWf1gAPb8z3dysv$9Ia6fD7$1Kc{$k<4p?`SwH~v{ z^N-HZ8ZsM?JGKNX*QCL-VkgEz=CZ(8HH#dnsX$wUClRr?r&fSLoI`@kQK+)l){>rKedba^JmV-;#@biyP3Pf*i-Ei%w-gYT~J zBc<*q!TyW{n6*D4fpRRlVk(Oz7LJhSjDsK+d>aM>vLH}>AM_Quq0q3?sC$W9$Y!ZN`{>Ay8E#C+?R%6V`b-U8+z;gQz)HCBM|eHhqxkoF zYgpYMiJDy};8a-=*dn(Fr5-#e=s(kiH$B+`Q(s;pnOUh!;*3k^mEjrG^-dJf+zd3< zrW}8|qy=|9r@|7Ae)8p~BivNJ2OkJ&ZDi5L2{?*Phc**6s1YQAGJQyJcVYpU2r`KE3r`3xSRfSEe*`IyGw_O? zubBm@Ld*Ai0(S2SA@(^_7_n$6n0Jqdk|vZ3uTKZEa(yO9+M`TPobtk3pZnmaW8#Ty z)?plPItj1rJAj{xe&%Kx>?JqKl3}OYMZq0fmXur2Ohx-l;F)}6x@JgY*AqelSuc+p z6k1U8o#Np9f-EAFFoeW5X2PG{Nu+qII(m~gfxO(83|_+V=G5D+MA5sPv2R@uC@7!Q z?wU_jQ?B6hHNiO9$Qd%F=0nBBB!Pj|Inu)_kUKXdk*t#$=El#)W}(n73bHt?ikx?p#`aO^usW|vA`__+DOgvG(g5g@S0IgoVP9@YcH>6MjTk? z`1~fcFRemgXd#MM1zs>L@?v zgL^!VKmCz0t@cIkHiPRFdoW&P4^HSN_?o(+x*M0!9+m5Kc#Q+-pXk8- zUDDuT{vJJPI#29IqS4}<@h~IR433Elm!Os23fLnm(9CZj^CCYproD}V`(LkN!Sg{P zUD1H=eRGAMPqMJUVJw=M9Sbv(4A7Qv9!!>fOgAF;6}` zmzYERo|Z8>=RXkf_jYS$)>dD*#Pw~y7 zEzGxx9Oh-ZV!H$tygw}ob#$D7>wETsiQH^dC$Ud(uLz+j{u9YHn|o-#vH@&-T!ih0 zJxGc}FVniKN4OjsKy)^*KsmBg@lgLR@Qy14jdAzz=YeGK@Y_dbb}hh_!v5&l`=y|C z{S<^chljfSZcH`sST=uNo40Th6)HtUL!x=a+nr;5F_n<+c8i;&-gkK_m zgdeP1Wj6JC{DM%A@KDeHPKo@FC9?dVQoQ~y#oH~^=YK2_-+wHTp8Kt&T{i;{MLuH= z8|$JybKA&A+YJ0fdNm|{b0I;uCquUTX=3fMj66ErgJvC%#y!TnNr}4$nzwc;?j0Wj z*X~p?$2Bxy%ra|cLg`;PX|gubOGHS$e;qm@?hk`O*GQSLP3Ye=1uh(o2g#0$Wc;az zB*|(O-XdN{5+o{MmzNQ7{uo2DiY}3W1sZs~n+?n=`%LM#&PL}hCF91T{rIA2Da3yO zCO|kL9l7lZo;jv?&aAo6$nsEemm8euPlK0d<}!M6(d6=XS@4K8g7ox>xF}j2{GOhK zm!*L)Rk%CdcXl5ge4`0nGBM%8Qy!zY9@B+xS{+=VM9@*EK_a*`KxChmAme@R@P1`K zvj1R0-v2s;ax1qXq>}_^R;~l3FHUH^<1k5lAq|$Bza!boQW9-{4KLLQ1-Zs2q$b=P zs2$FDkMAi6U_4R$jUHmVSstu}vee+B10Eb$0g>gqkc%*j;j;T)P#l= z5d)-dd<+K$Ux8WmQACBYg@)UU@qYU#VB~2~Eu9Ij^=T+%O9dIWZ70=^RbXkF3mvOY zqn`@z$;0Am_!b;NR{qJxZdJWFw|aa(x)t%&=b3_7x|9@ecSrSuL4vC9G5B%*NfiHf z2btR(3k!c&1JRBW%(WJW#|u<(tzj+gXEzRlk9I&yRvzqET@JQY`tW5+BYCQ|j~p9J zhS^H`kZ;8>xNsUC49f)c@(;1vstO986nLzE05)oO87R)gzsCGTsWzmUrj+2bwR6;0<6Uw!)-k*X> z5kBniKY=hdh0`}v;bGbfGUw!LQf5hkk-9gGIiv=e39h(vh7Xw?H33^++=Ub0=Rw}u zuZ%O7Lv~AlC9qK(ZyjkT&WrsZPsakPhm`PQ&jj*H7~y5jp1)=57PLa(@id@c4qx=3y2-liof1!CEs^eBiGs({N=ML))Ka^yA|#5 zsqO@_oHc;GCvUUbz44^nio%CSov`W53^E{Eg6ns$#G_BA6LI0Ac+YilA$^fWG>iI( zn)O05DbO6<^UFd{>ca4M^VLw$KN}Y;+=$M7-3SHlE)c%>s^Du}3K-g+#u^W-n0lR5 zG|tfzSsa>1R1c4Vt&`(X!kYx}$?(B`t1;|VC?l#blku|XRAz6MA`aeKgxf7uVR^d) zURGa?-kIi*ORM5xqEHB;?KJs~K9-kaTfQz?yO<-(Y`O%2;(EJ zhXi1zkPZb>Z2=IsahqNPZ>RGP?N6k!Y{6>;td76n|@#sR-<83mtc9A6_##CRgOzv>(- z`7H!jOgu<39FmYX9znNLgt4HP_u+3n+GO*u1n3==2HA7&`0Ah`7UuKe_&wtAqcwyp zIaEPzmLFrd3Dr0Xj2 zlqwM%^71cO`$QK-CMUz3^zY2Knm0I;Ck_`H!Vr0|4(bWR}d+fosExq|a+3W1-qc8b2pMX{4dx z{IC}x#t8B|&w_UFLo#@{8fk_-CG%hJ2jy-lTvTxti5R;>(qOu9p~D?dUULZaKKV0B zO}9{_FbiVF<}?(& z3c`5DH0=A=Gm`TomsI?vhBC#P$)S=-VA1c3b-Op?6(*Wk;ZmePa&;~a4gP{gu3MvL zdmrGbj}}3?Y%wXj9f0jP5maP)0Mbn~@Wb-;cthk^(oxDGe`ztutet~z8p>m{YWm&je;o`XvorEui+`3S1p>3bE^gt>8A*lR*RBQsqOitNteif6Src9@U# z?w1R}@`=c-#~HsJaU%B`oXFh`StMjYoAkZvLosLlNm{=@>Ab6j^~~=GN?#mltSj6U@~6vLwy@~I^Ui#HYH zt3$&=!cG^e52$jU<7Js03)2L{VW~tVVF|8zx)oijY$87EhRAV6J*=BK8}c6VvNQ1+`wxmNW%dk zjVY$BiS>vV>~$(bo~~hJ%9mwAOHUHEoIlP;4NqknzrRJr!*OKM=R)W`t3+OPD`C?| z`dIFc4BAiaAdS~lNUdKiz0KhlT9f?_f7N(TUgySO^RIWwa;BTG_R_E+L{!+OpToXB zr63r4n@E&Ifo<(ch`Xr?`pQp`%AX{B@m)8{Z_q)~zn7p&;|W;VU^md z&7|q+Mx+;bSkO1*2x${G;a$^zQ%F`B*Q$DwtBK=~&5Tsy^dkqJ&y;}iJasTo-vFzw z?1E;!deqt{2RYR*kne`CWT^QU>ZI#IL2f+$Lw=LjdFPp~fWN@uWCrp0Ycft+ZVJU{ zAxz+>K~-BG7O7If$Jb{Fr|w6Q`=eq(ps72~>G6f@pKcJjj-y0vygn{7SPBJ~vygAs z8a$;oUoZ<1=6FaPin(BhZ&o#NLpBL0Pje!^KYcUKFVzs_)f|L*Kej{H#ZCBeb{9H5 zeFkJZ&BEpOWuWg-j(Ek(U}0@ObnVn2P7nz%-x=bF)tV4xsfEYfipJKGHeh$OT!2mG z@qDv&V0&vH3AD(=ExA^ZYunA2WbYyC_1)>$1Ko>L3CU!I`Isd2E&C6!4@X~Ck=JeV0(i%xt> zB^RVJi0V&&G};~koL(G`>lnf-O@r`QJwP?A8`gE%hF+c5CT-ugf&0uCNK|qj&aCrA zADx!Mz^Wo-vP=!)rx+9G!6Vpzbt5jGN~7xEu{g0<9W0HVgmIvbe@4RpZ+k}k4%a6PJMh}D3K&;p5UzVDj@37IK0~26d%n{#qOt5 z$o>7#Q0CSRc+Q5kMD$iJtkar?+5I`B)zuVYkL|=Whn@*Kdlo}<_HrPim(f-kPdIOx zhhzINr2?p)z?SI&(f$-v72uZH%EfeOV$@uT;!fmY`AK=`qQ)vxvdI}m8NMa!UmYZ^ zu?!wsc7@38uSHkw{~*z`*9GQ@k>rq;1gtz{2r2bNNIb|DyuMzC2_I9?^SU0Ax|oM% z-zp{zUo6mD$3nENEFW)|c|~I8jsacwX}FLvgJ1hRpgKufxUV^f)M=K$j#@p~#x|fy zpLZ~u6m)QxyaeVg%7EmUX*i|W2eZ!&@R~#^Y=31oD4Bjo39fZyUNr;3KDO|yTMwkP z$0BtlUv9&o0!Y<8MrTLUVC_-?mUEmB0d3o1Xwn~|ka`DADmsIX&Q*bef;r^ZB1b{G zwFtiHQ;R-HO~UUtMUn#kWnsMKKGMIY8!4}~#D04oA;;J1I3-vD!lLG3nY)=tHg7c! z^$QPO@!u(t{}?0w|F{V&|8^4s+(HBYV~hm-%NRM{xsYAyN>eM_=CjmuimHvmY|JcM z3ePZP!>Sl+=_Je^^0r_dV{ItiAJ(i?mnLPcVM84|BT2cPTf{m^o3oE5Qk2(sEB4S# zD{8>sf)Z5`21IYetdx%l<=kgOjWM@mOUG)nE;=@pMYa{IKi`g3e_=`48>+COtHMKr z1DC7+e?w6=M}+^wiLCyoj5U9kvDPhg-M>u{xunqb|1w3Sgu(lk?Np?b9J~9P3_n!g zo{Ri&kJTR1;fsZzrp|p0G}Ab>k>|hfCEMIC!L?8s?5#bRR_|HKPQ75yc?`O zvQ4(s)fb)N<$2WB_!=+dLR?K~tfk0JFOi^Uj;r9sZHk~Idt%wiY#pob@oYi9W3rj5 zG{cR4pTaro+tI4-t7v`ye(IywY+B6v7<)upfjhJMA?siDnm_n>8SST%#_rAQ;%%Fz z#Qo){Mjw)~qk8-0IaNJ#dcf14a`)+FZ*?5wU(7aO)5ngmPJ1afYgsNYdagOYEc$lc zygT>n%oWd2oh`X^_1o9%!a)P>(>4YAuC6mzv9X78UZT#;kD1T?KCqZ>`8=~OzIH6f z*AL{9D+Fwfrvx{rZV{J!t%Tz27SZG1lv7VT*6;?qr*Jc>zVYp8N$xk5LzgJNr~KIh ze)8Nf_R2g3dd9aBN`6H$wSI0Otq>wh53J>QLkgC3+OOSILdG@LGeevuNjGJDt%{mo6~fl;`bt@8n^9haiu97E zN=oAOCiY#QHrE{?LWMQiQ?+3Zw4$FJ9TtC(O%b2VEiIzCC)4kg`7Qq8m*Jm%Qnv%p)}e&&APXJ zo*bsnt+ljd>npGDJl6TL(z3d>47 zxWqrPAaR>Yl@r|nuJ<%kcJ7bq?v29yN(tYA!F#q188gn5Rwq1}@>B(wsuwShj#w zEPl$)2$QB)d1`R?+7#%%b;l|F)yAB3Ya{>2z3J4^`a{&wi3R-3CHvS5lY97;0}0e| zZwKmrv_GpRcAtH!eTKR)DnYF}_LJ=&m8WKJzee3iA4~7IxJ*5ok;?w;t6spQ>r-_F z5&Vyo2|G=A&s5Qp^a>LZ?#H9^tijZkY#H~By;u8){UE&Wo>`8t%iOi;PG?0vFCm%o zmmK7M5NoAc47PJKU*@q!UmsHK2ikbXa>}gXsc5QKR-XPIw8!k-$PTu?^(2)s?J>`H zlL=R7yq)qM;CL>o32bpxF*|&(jm^~E$5+%8P>1hspqf5b^2EJXQq$VJ>k1_|voQrW zl+9gWZC`9-_upGocXo3s|LZhEUTWfV>S5DfcCG1Rdd=lAT%w8%bx`RTb;_ieHoKI{ z{t_Igo%F4_kvF%giF+mK?1DUMiNXl49x(0lOOKT_YNkF2#?cjt>HMX)<+tMD-MD&}lJIv}s8>Rdmst?Nn6eHag1FHuo@?G*iD3cTp7TIcbuhe6{hgEP7>o5dYjP` zebwkV^_05KJNn$Ev+msSASJeRdna|NbPPA;T0gbmaxho9=q@!{)WVy2b2ZNa0;$!r z!&&d#W%Oj12);O9l-s1wvQK9yv%gf*D5Z~*tjcvBT~|Awp1RAJI|FL0D={I1>-3vI2}W_rtt__MKx4sa80mQd@)mhzNZzOo9&&-l-sfABIKwy-;e zVcek`!b3y;H{$efE%C2?;*I|(iA{eiiOp`ITmDuOa*3g#e+wdu6C*+V_j9lD)x<>k z7p!x5qFWK4E_hzIWl{5vxG|I#$!{~Srg-$$~|Ep+?8HH}D8=#GDhGhU!4_aQEgVl`Is z{W@k*O5XFhgERf8Q%w?lzbB$}{9Z$@;KLVo+jM7Y;=nKVSCKZ=yXzGdGs}Yi_+TYF zc4jbVrsGUqysgez??dzol?ckpIFX*FN3k6lg}kQS0xIE%2-obX%ln}6nKG(T=dz~7 zuw%XwO4VZ>FElbdbZ6l5ssF8(f1T4^|5Ot7cO|>sLihY#iAZ8-^xt)ey*p+>g~o)3 z#s)@gjnEfBw7Ff delta 147 zcmdnOc!_a>yn;oVxp}H(a*Cm;p}9$-p;3~dxn*jqfw_fIVv>P{f#F1Z?TicT5nK!m z3?-SlsqrQGC5btOtnLnOO26I#1ta)?L~221aehi_F;te#n=yhHC|sOamYNcuSR9{{ onOsuH-WkCS6f8=tj4w+qD$dN$E98h^1&SExS?C!Qa+c}=0CRRKh5!Hn diff --git a/tests/data/rllib_data/single_agent/params.json b/tests/data/rllib_data/single_agent/params.json index 0e508b4e9..c5e605ef4 100644 --- a/tests/data/rllib_data/single_agent/params.json +++ b/tests/data/rllib_data/single_agent/params.json @@ -1,32 +1,32 @@ { "batch_mode": "truncate_episodes", "callbacks": { - "on_episode_end": ".on_episode_end at 0x147eb0400>", - "on_episode_start": ".on_episode_start at 0x147e97e18>", - "on_episode_step": ".on_episode_step at 0x10be8fea0>", - "on_train_result": ".on_train_result at 0x147eb0510>" + "on_episode_end": null, + "on_episode_start": null, + "on_episode_step": null, + "on_postprocess_traj": null, + "on_sample_end": null, + "on_train_result": null }, - "clip_actions": true, + "clip_actions": false, "clip_param": 0.3, "clip_rewards": null, "collect_metrics_timeout": 180, "compress_observations": false, "custom_resources_per_worker": {}, - "eager": false, - "eager_tracing": false, "entropy_coeff": 0.0, "entropy_coeff_schedule": null, - "env": "SingleStraightRoad-v1", + "env": "WaveAttenuationPOEnv-v0", "env_config": { - "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"lead_obs\": true,\n \"local_reward\": true,\n \"max_accel\": 2.6,\n \"max_decel\": 4.5,\n \"reward_after_exit\": true,\n \"sort_vehicles\": false,\n \"target_velocity\": 18.0,\n \"terminate_on_wave\": false,\n \"wave_termination_horizon\": 1000,\n \"wave_termination_speed\": 10.0\n },\n \"clip_actions\": true,\n \"done_at_exit\": false,\n \"evaluate\": false,\n \"horizon\": 2000,\n \"sims_per_step\": 1,\n \"warmup_steps\": 0\n },\n \"env_name\": \"flow.envs.straightroad_env.SingleStraightRoad\",\n \"exp_tag\": \"singleagent_highway\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"uniform\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"boundary_cell_length\": 500,\n \"ghost_speed_limit\": 25,\n \"lanes\": 1,\n \"length\": 2000,\n \"num_edges\": 2,\n \"speed_limit\": 30,\n \"use_ghost_edge\": false\n },\n \"inflows\": {\n \"_InFlows__flows\": [\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": \"23.0\",\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"idm_highway_inflow_0\",\n \"vehsPerHour\": 1944,\n \"vtype\": \"human\"\n },\n {\n \"begin\": 1,\n \"departLane\": \"free\",\n \"departSpeed\": \"23.0\",\n \"edge\": \"highway_0\",\n \"end\": 86400,\n \"name\": \"rl_highway_inflow_1\",\n \"vehsPerHour\": 216,\n \"vtype\": \"rl\"\n }\n ]\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"flow.networks.highway.HighwayNetwork\",\n \"sim\": {\n \"color_by_speed\": false,\n \"disable_collisions\": false,\n \"emission_path\": null,\n \"force_color_update\": false,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": true,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.5,\n \"teleport_time\": -1,\n \"use_ballistic\": true\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"a\": 0.3,\n \"b\": 2.0,\n \"noise\": 0.5\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 1621\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"human\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 0,\n \"routing_controller\": null,\n \"veh_id\": \"rl\"\n }\n ]\n}", + "flow_params": "{\n \"env\": {\n \"additional_params\": {\n \"max_accel\": 1,\n \"max_decel\": 1,\n \"ring_length\": [\n 220,\n 270\n ]\n },\n \"clip_actions\": false,\n \"evaluate\": false,\n \"horizon\": 3000,\n \"sims_per_step\": 1,\n \"warmup_steps\": 750\n },\n \"env_name\": \"WaveAttenuationPOEnv\",\n \"exp_tag\": \"stabilizing_the_ring\",\n \"initial\": {\n \"additional_params\": {},\n \"bunching\": 0,\n \"edges_distribution\": \"all\",\n \"lanes_distribution\": Infinity,\n \"min_gap\": 0,\n \"perturbation\": 0.0,\n \"shuffle\": false,\n \"spacing\": \"uniform\",\n \"x0\": 0\n },\n \"net\": {\n \"additional_params\": {\n \"lanes\": 1,\n \"length\": 260,\n \"resolution\": 40,\n \"speed_limit\": 30\n },\n \"inflows\": {\n \"_InFlows__flows\": []\n },\n \"osm_path\": null,\n \"template\": null\n },\n \"network\": \"RingNetwork\",\n \"sim\": {\n \"color_vehicles\": true,\n \"emission_path\": null,\n \"lateral_resolution\": null,\n \"no_step_log\": true,\n \"num_clients\": 1,\n \"overtake_right\": false,\n \"port\": null,\n \"print_warnings\": true,\n \"pxpm\": 2,\n \"render\": false,\n \"restart_instance\": false,\n \"save_render\": false,\n \"seed\": null,\n \"show_radius\": false,\n \"sight_radius\": 25,\n \"sim_step\": 0.1,\n \"teleport_time\": -1\n },\n \"simulator\": \"traci\",\n \"veh\": [\n {\n \"acceleration_controller\": [\n \"IDMController\",\n {\n \"noise\": 0.2\n }\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 0,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 21,\n \"routing_controller\": [\n \"ContinuousRouter\",\n {}\n ],\n \"veh_id\": \"human\"\n },\n {\n \"acceleration_controller\": [\n \"RLController\",\n {}\n ],\n \"car_following_params\": {\n \"controller_params\": {\n \"accel\": 2.6,\n \"carFollowModel\": \"IDM\",\n \"decel\": 4.5,\n \"impatience\": 0.5,\n \"maxSpeed\": 30,\n \"minGap\": 2.5,\n \"sigma\": 0.5,\n \"speedDev\": 0.1,\n \"speedFactor\": 1.0,\n \"tau\": 1.0\n },\n \"speed_mode\": 25\n },\n \"initial_speed\": 0,\n \"lane_change_controller\": [\n \"SimLaneChangeController\",\n {}\n ],\n \"lane_change_params\": {\n \"controller_params\": {\n \"laneChangeModel\": \"LC2013\",\n \"lcCooperative\": \"1.0\",\n \"lcKeepRight\": \"1.0\",\n \"lcSpeedGain\": \"1.0\",\n \"lcStrategic\": \"1.0\"\n },\n \"lane_change_mode\": 512\n },\n \"num_vehicles\": 1,\n \"routing_controller\": [\n \"ContinuousRouter\",\n {}\n ],\n \"veh_id\": \"rl\"\n }\n ]\n}", "run": "PPO" }, "evaluation_config": {}, "evaluation_interval": null, "evaluation_num_episodes": 10, - "gamma": 0.995, + "gamma": 0.999, "grad_clip": null, - "horizon": 2000, + "horizon": 3000, "ignore_worker_failures": false, "input": "sampler", "input_evaluation": [ @@ -40,27 +40,23 @@ "inter_op_parallelism_threads": 8, "intra_op_parallelism_threads": 8 }, - "log_level": "WARN", + "log_level": "INFO", "log_sys_usage": true, "lr": 5e-05, "lr_schedule": null, - "memory": 0, - "memory_per_worker": 0, "metrics_smoothing_episodes": 100, "min_iter_time_s": 0, "model": { "conv_activation": "relu", "conv_filters": null, - "custom_action_dist": null, "custom_model": null, "custom_options": {}, "custom_preprocessor": null, "dim": 84, "fcnet_activation": "tanh", "fcnet_hiddens": [ - 32, - 32, - 32 + 3, + 3 ], "framestack": true, "free_log_std": false, @@ -80,17 +76,13 @@ "policies_to_train": null, "policy_mapping_fn": null }, - "no_done_at_end": false, - "no_eager_on_workers": false, "num_cpus_for_driver": 1, "num_cpus_per_worker": 1, "num_envs_per_worker": 1, "num_gpus": 0, "num_gpus_per_worker": 0, - "num_sgd_iter": 1, - "num_workers": 1, - "object_store_memory": 0, - "object_store_memory_per_worker": 0, + "num_sgd_iter": 10, + "num_workers": 2, "observation_filter": "NoFilter", "optimizer": {}, "output": null, @@ -125,7 +117,7 @@ "log_device_placement": false }, "timesteps_per_iteration": 0, - "train_batch_size": 2000, + "train_batch_size": 60000, "use_gae": true, "vf_clip_param": 10.0, "vf_loss_coeff": 1.0, diff --git a/tests/data/rllib_data/single_agent/params.pkl b/tests/data/rllib_data/single_agent/params.pkl index 60cfcb075c7ed028b5ef23a05739e2fb931e1abd..511d3434300e6270d326503ee9bcfce6f9127b1c 100644 GIT binary patch delta 778 zcmZ`%OKTHR6z)vY2a`u*YfS=5+i9znz9z8`T5wTBA+$}455$U0Zzs2%sgrw$xpz_t z7Tk$4i0PSv8*$N{U>C-Pd-rYx(Z3)lxGIVZ?=)SsMepwWzVm(O+{1Z#>sdbeD7q8L zHXXdJU=XxSHd-iNTZDy7A1eCol<3&87%gF)aqI*|KY4`0q$HSxU>GVpHr5+N=;AK2 zi0)$M*<93g3d*63J1#XaW1+ke6yZ<*Xl;8X5}AwC_P@h#X}EX)l8v zVBvktk2AW)^+N{k$i4Dz3LeUr;g=jQ<%X4>BTp@(2YLw`*j8tHPHL`2)^!_`b#4mv z?lEJSkTySa&$tv1ka<>+SG)0VBcyOR7@&5L1ip*)!V3oXF12j zL)@ZdY4IwlK&tt4Cr7K6ZB@f5xQX>}ocd(*O$olVs6CS>daGi2)b_#z5jw6xwzV7X zwax-l<}=K|1i2Os+s#wz+&IMWL+MUduxS}KWo42i$dF5q$@v_hbU(aGbtbP+!Y$%ak6ou8hrO=x`rD4Z zsEGN01-wX)PpAL2hmndF_;JIw9B~|*$SpB(9Sw|(@UJX*0+GxpEDsdU#)wzeH>rCc zyDXTK{k9OVJDvbduyetxb&qzX8ke?;xMAwds^VZ_OD*Q&T${NBpU!`% bFAr`=9f}{bU7Z;wE_pT%ie?r*Uwro$+&}(6 literal 10890 zcmeHN&2J>fb?+Vy$>EU8kL60TR*JwheBe6PfaIR$fbFx-aZUmfz^5E+z(@ce>^~vDSJge!GbDEv z*zh3`7t(Z9y?XWPy;twOdiB2F{HMSDhN1o*7VC08%ma29CHp){i=HW0sm;TKl*K$L z2dIA~wS(B_Y}d{l!BX!5FM9tb+qxM46;tc5&m4-~OWg zTKq4+kh3C6ya!-B32GsY`WdTwK9!4Lpc{pp*%>s3cGrgDFvzU$4U|0R`+juDVjG)G zi@*N%GlubDdkudU;H+2IbVGI6uImw1kcI{y8&)7bw_O(PrWk>u@oy?jG(vxtAAw&b zpHGJ%;n@zPh2gEv)rt8u>T*4LrSrMi?$j*l&S}e!+;!=lC^4%H=Uo&K4w-!IJ{TzEZNxL;7b8C!DEA;|xdD4u>L|Kg( z9HAhOae8@7#Au33YDuG5h_i$Ni?psoHmsA9#Tke!iJ&{^>D=|gfzR)j-FMJ!=@l$~ zKW3Rdp!ul+vIjhbx?}pGeN;htA=K0+6?&TT*4elVyC{H^sIF~@y91u8)0ud?IjKLb zZu@?@npSK(JPC9YM+}r4)sR77*uW;9)t7(~&+-ITS|i`6Ynqjcyx;dvlTRAkj;7bj zL$4ntL7k-g8?=g2n2P3bn5B&{*xkHW!jhSzC?u?pb*Ap!!Rzws%$59V%{4?NPAiQNvzT@Evv-2zVlWRd5q z39y2qUrR&YEiHkyb-A-~7HGLst2`6zY`~DL%D1jwnG|KUvQ-^b&kd?0#x$SK#tOrl z-sZ{MQJ&DwzkTJ(xX6ac53-|J?OWu59a<-NS`$t+=l?mI6MvG+n`a4moXcCA6OOWC zm^EZ_;-A9dF`nNWwrZTWq7-qAt&V({`~FClR>l!(YR5q%YIe=gP_(MFn#?>554F=y zsg#|!wZY!e{pBvnuVBhZ%9a!PQNnhQv{}K}r1wJ5WbY#21)%SxFrRdQCxLk2r8IWN zMqoG{g#k3gJgS!pk0|@8Dg7N*BxLLE9R)PSK)3Kc7P(slyq9QNEZ|Kmm zoWPb+40GoER7WTi6s7`kWn#NtewtKL+U;n#?kY$T6e~f;Mm@}f()pa&!4MALwH#uFO zHaayGtpGF{^5)lZ`&0V3d8TkPI|m+&j|q}c)C_fnSuf4#j)_TIxRp`hAq@ zD>GLa2>tt&nH%xLs-K!MH9y6aTwDV)tb^8$?JoAU*M66p9~F0s zb@7^PbrE%pdU|Hh=rwv%y}RAlj}6y+-gsyZjAL`pa2wZ*@@s1N)f|3J55H!v8Ln~H zDAsr6Y)LRSV;|skfK&{G4ZoRvfR~ImwRy;s!BHV+sS(73;n!Y)ozCH!kT6{HY!EJ+ z$Y(@_cEmhz2ZCq62~tJ~+z?t)>q%y#*&xO6GQHq~PcL%`weBlE}@#MWXerHOC) z4Cw7df8vFNAZmE?ot+)RBG;b4ZPvH(5FpR9KJt<3!jbX}Qp$+$;{At2gZL})s$5{~ zgWUEhNkhwg17JwJB14+81ec)Q&NY>8Phanx6!#Kib{Iy$N_S>Vhw3gewtwjIzMcCS zbwMK;vhB5Y8~cmgTy0CdCucDnISTSQ+---t!_s_L!_m`@oM&v{N4S{jb@GpkkMC^A zr7@y-T!``cjgBzj7;y%?9Dzh-ZUPW4;yM^wl>l#N2bCnYOGZo8=o~-8yU{Z z=Jwid8zV-!()@Abkx6S|wI)Uvj+>9n^gp5T=iGTvzesJ;sk9bRT=y4A@iqgd~mDnyS zo;P~U-fH(Mj-_$0^APOaGxp5A#xXYI8vD};7_54xs0ZK8{n?~>Z2n{8gC-7W&-e&- z#oWhHD`QCC2?O)Yq6Fa|1*t3TS#%ePTJk> z*A#|j!l3nNh>3It$i!zQn$8%S8{+1TH-4v|z-4m)vkG%9-pQb5=SZ3I@G!CCn4CZx znZGh|RmfF#16P*#d-N`pzNbJEunmu-c4}uCz9Bt1ODH^-JO+Is^Qsz+FosJ|&$uF& z(_R|lL08<8Gqe&p-Q@sAUB7(|;8N+adv(+m@PWM-!9K#ztcxA-mO2d@^-Q^j_)NC! zgYsNJe{PF#`vtRWk+WHbvzftv7pGoqUNkQ@Uc&dX`4Y~-vU$-2vv3&5e~5QWM+b)MD-%X z81~K#duMCCtzqxnFbIJWy)JHux8?a!n4u~#V95$`Q!bSO$Efd}s&8-DN2>9;_*ZCE z_0%8mEb*MQQW{!ADaIGuubAECN-;<|p2fd15~=IJe^om01$1EjOX+~(@>wbX`hK3) z{Im*;5UHk!4C=Dc!QG3MY`|Ga$8ES%KLDRiIAjXlW;*clC*uxl+Eeng?=4uZfHu#9<>;8*b$l$~r14jUX(>zc13UAUz; z&8|^$Uv58<%XNm;y^+RrvZwe2VG`aUxQ((Mx-@{O2B#m`hu>~@VHum1;N^FIkEIL5 zpK`XUB$A8On!1Nv@s9W(WsiTo{RaG*(P+UUom9EK83bpQ*8TdamZ#2-6P8E(cV&9C z%H{G|ZxDhRWnmKnV$aW^eYo44QfB)) z3uT{!cmIrLPIYhpC2GqsL!(gjMWY#ybNP%e>p5j3dw&8o>9zJ=Qa^yC01uK(sYIw^ z7RmKFdcfmOh7u}e3)TA$GLJ>?r}ARemj+RkQH4fLI<7nqD-odvM~j$F4K)r1vc-i7LBuP$#EA$}@wqJJnD1 zb7TF6e4(u6!jADAjzX~dI9NNvhjYl>1j*)v%J^miY$-XSrB|cfJ>m-r%5We?{VV#j zju6lJb0u@_dZajuhX(VB#j9my z3UUMeK6t6i4GlR1xzQ3hRQd}yj#BE7avtb{KRE>z02v`aBxS%V7fNU<@=|wah43AO zfKA!P!yQU`D5#_RuWZGU?>X=Tq@gRSafG{f9FunRL#(Yk0l~6}tODZ4om|eUiV&MD z$Bee?R7a=E=-RTi?}Jp{@1yVM|4e_M$@A&JWl$HdUQ+pecnn>YLxqMpy*p9DW`PU` z!ISck9Or#-#SxsY%~uu`a9^dsnhwN4e`p2Npd^ZIwS# zPb47tKQ603wL*R1yI(J3XsTCq=?OXpcjmBFUE_wS#e-ko`250$avATVR6RPXHvBa2 F{4W_Wq{{#R diff --git a/tests/fast_tests/test_environment_base_class.py b/tests/fast_tests/test_environment_base_class.py index ee815393c..b5c6cbc17 100644 --- a/tests/fast_tests/test_environment_base_class.py +++ b/tests/fast_tests/test_environment_base_class.py @@ -13,9 +13,8 @@ from tests.setup_scripts import ring_road_exp_setup, highway_exp_setup import os -import gym.spaces as spaces -from gym.spaces.box import Box import numpy as np +import gym.spaces as spaces os.environ["TEST_FLAG"] = "True" @@ -26,41 +25,6 @@ YELLOW = (255, 255, 0) -class TestFailRLActionsEnv(Env): - """Test environment designed to fail _apply_rl_actions not-implemented test.""" - - @property - def action_space(self): - """See parent class.""" - return Box(low=0, high=0, shape=(0,), dtype=np.float32) # pragma: no cover - - @property - def observation_space(self): - """See parent class.""" - return Box(low=0, high=0, shape=(0,), dtype=np.float32) # pragma: no cover - - def get_state(self, **kwargs): - """See class definition.""" - return np.array([]) # pragma: no cover - - -class TestFailGetStateEnv(Env): - """Test environment designed to fail get_state not-implemented test.""" - - @property - def action_space(self): - """See parent class.""" - return Box(low=0, high=0, shape=(0,), dtype=np.float32) # pragma: no cover - - @property - def observation_space(self): - """See parent class.""" - return Box(low=0, high=0, shape=(0,), dtype=np.float32) # pragma: no cover - - def _apply_rl_actions(self, rl_actions): - return # pragma: no cover - - class TestShuffle(unittest.TestCase): """ Tests that, at resets, the ordering of vehicles changes while the starting @@ -347,34 +311,28 @@ class TestAbstractMethods(unittest.TestCase): """ def setUp(self): - self.env, self.network, _ = ring_road_exp_setup() - self.sim_params = SumoParams() # FIXME: make ambiguous - self.env_params = EnvParams() + env, network, _ = ring_road_exp_setup() + sim_params = SumoParams() # FIXME: make ambiguous + env_params = EnvParams() + self.env = Env(sim_params=sim_params, + env_params=env_params, + network=network) - def test_abstract_base_class(self): - """Checks that instantiating abstract base class raises an error.""" - with self.assertRaises(TypeError): - Env(sim_params=self.sim_params, - env_params=self.env_params, - network=self.network) + def tearDown(self): + self.env.terminate() + self.env = None def test_get_state(self): - """Checks that instantiating without get_state implemented - raises an error. - """ - with self.assertRaises(TypeError): - TestFailGetStateEnv(sim_params=self.sim_params, - env_params=self.env_params, - network=self.network) + """Checks that get_state raises an error.""" + self.assertRaises(NotImplementedError, self.env.get_state) + + def test_compute_reward(self): + """Checks that compute_reward returns 0.""" + self.assertEqual(self.env.compute_reward([]), 0) def test__apply_rl_actions(self): - """Checks that instantiating without _apply_rl_actions - implemented raises an error. - """ - with self.assertRaises(TypeError): - TestFailRLActionsEnv(sim_params=self.sim_params, - env_params=self.env_params, - network=self.network) + self.assertRaises(NotImplementedError, self.env._apply_rl_actions, + rl_actions=None) class TestVehicleColoring(unittest.TestCase): diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index 0b385f28a..336c17bf8 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -26,7 +26,6 @@ flow_params as multiagent_traffic_light_grid from examples.exp_configs.rl.multiagent.multiagent_highway import flow_params as multiagent_highway -from examples.simulate import parse_args as parse_simulate_args from examples.train import parse_args as parse_train_args from examples.train import run_model_stablebaseline as run_stable_baselines_model from examples.train import setup_exps_rllib as setup_rllib_exps @@ -60,36 +59,6 @@ class TestNonRLExamples(unittest.TestCase): done to the functions within the experiment class. """ - def test_parse_args(self): - """Validate the functionality of the parse_args method in simulate.py.""" - # test the default case - args = parse_simulate_args(["exp_config"]) - - self.assertDictEqual(vars(args), { - 'aimsun': False, - 'exp_config': 'exp_config', - 'gen_emission': False, - 'no_render': False, - 'num_runs': 1 - }) - - # test the case when optional args are specified - args = parse_simulate_args([ - "exp_config", - '--aimsun', - '--gen_emission', - '--no_render', - '--num_runs', '2' - ]) - - self.assertDictEqual(vars(args), { - 'aimsun': True, - 'exp_config': 'exp_config', - 'gen_emission': True, - 'no_render': True, - 'num_runs': 2 - }) - def test_bottleneck(self): """Verify that examples/exp_configs/non_rl/bottleneck.py is working.""" self.run_simulation(non_rl_bottleneck) diff --git a/tests/fast_tests/test_files/i210_emission.csv b/tests/fast_tests/test_files/i210_emission.csv index ec63cf9cf..d43c115a4 100644 --- a/tests/fast_tests/test_files/i210_emission.csv +++ b/tests/fast_tests/test_files/i210_emission.csv @@ -1,4 +1,4 @@ -x,time,edge_id,eclass,type,PMx,speed,angle,CO,CO2,electricity,noise,lane_number,NOx,distance,route,y,id,fuel,HC,waiting +x,time,edge_id,eclass,type,PMx,speed,angle,CO,CO2,electricity,noise,lane_number,NOx,relative_position,route,y,id,fuel,HC,waiting 485.04,0.8,119257914,HBEFA3/PC_G_EU4,human,0.05,23.0,119.74,3.32,3793.12,0.0,70.29,1,1.17,5.1,route119257914_0,1068.18,flow_00.0,1.63,0.11,0.0 500.91,1.6,119257914,HBEFA3/PC_G_EU4,human,0.0,22.84,119.74,0.0,0.0,0.0,69.9,1,0.0,23.37,route119257914_0,1059.12,flow_00.0,0.0,0.0,0.0 517.1,2.4,119257914,HBEFA3/PC_G_EU4,human,0.15,23.31,119.74,78.83,7435.5,0.0,71.61,1,2.88,42.02,route119257914_0,1049.87,flow_00.0,3.2,0.54,0.0 diff --git a/tests/fast_tests/test_scenarios.py b/tests/fast_tests/test_scenarios.py index 5fccdcb3b..2263f3474 100644 --- a/tests/fast_tests/test_scenarios.py +++ b/tests/fast_tests/test_scenarios.py @@ -5,11 +5,8 @@ from flow.networks import BottleneckNetwork, FigureEightNetwork, \ TrafficLightGridNetwork, HighwayNetwork, RingNetwork, MergeNetwork, \ MiniCityNetwork, MultiRingNetwork -from flow.networks import I210SubNetwork from tests.setup_scripts import highway_exp_setup -import flow.config as config - __all__ = [ "MultiRingNetwork", "MiniCityNetwork" ] @@ -136,7 +133,7 @@ def test_ghost_edge(self): self.assertEqual(env.k.network.speed_limit("highway_0"), 30) # =================================================================== # - # With a ghost edge (300m, 25m/s) # + # With a ghost edge # # =================================================================== # # create the network @@ -147,37 +144,7 @@ def test_ghost_edge(self): "speed_limit": 30, "num_edges": 1, "use_ghost_edge": True, - "ghost_speed_limit": 25, - "boundary_cell_length": 300, - }) - ) - env.reset() - - # check the network length - self.assertEqual(env.k.network.length(), 1300.1) - - # check the edge list - self.assertEqual(env.k.network.get_edge_list(), - ["highway_0", "highway_end"]) - - # check the speed limits of the edges - self.assertEqual(env.k.network.speed_limit("highway_0"), 30) - self.assertEqual(env.k.network.speed_limit("highway_end"), 25) - - # =================================================================== # - # With a ghost edge (500m, 10m/s) # - # =================================================================== # - - # create the network - env, _, _ = highway_exp_setup( - net_params=NetParams(additional_params={ - "length": 1000, - "lanes": 4, - "speed_limit": 30, - "num_edges": 1, - "use_ghost_edge": True, - "ghost_speed_limit": 10, - "boundary_cell_length": 500, + "ghost_speed_limit": 25 }) ) env.reset() @@ -191,7 +158,7 @@ def test_ghost_edge(self): # check the speed limits of the edges self.assertEqual(env.k.network.speed_limit("highway_0"), 30) - self.assertEqual(env.k.network.speed_limit("highway_end"), 10) + self.assertEqual(env.k.network.speed_limit("highway_end"), 25) class TestRingNetwork(unittest.TestCase): @@ -254,150 +221,6 @@ def test_additional_net_params(self): ) -class TestI210SubNetwork(unittest.TestCase): - - """Tests I210SubNetwork in flow/networks/i210_subnetwork.py.""" - - def test_additional_net_params(self): - """Ensures that not returning the correct params leads to an error.""" - self.assertTrue( - test_additional_params( - network_class=I210SubNetwork, - additional_params={ - "on_ramp": False, - "ghost_edge": False, - } - ) - ) - - def test_specify_routes(self): - """Validates that the routes are properly specified for the network. - - This is done simply by checking the initial edges routes are specified - from, which alternates based on choice of network configuration. - - This method tests the routes for the following cases: - - 1. on_ramp = False, ghost_edge = False - 2. on_ramp = True, ghost_edge = False - 3. on_ramp = False, ghost_edge = True - 4. on_ramp = True, ghost_edge = True - """ - # test case 1 - network = I210SubNetwork( - name='test-3', - vehicles=VehicleParams(), - net_params=NetParams( - template=os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml" - ), - additional_params={ - "on_ramp": False, - "ghost_edge": False, - }, - ), - ) - - self.assertEqual( - ['119257914'], - sorted(list(network.specify_routes(network.net_params).keys())) - ) - - del network - - # test case 2 - network = I210SubNetwork( - name='test-3', - vehicles=VehicleParams(), - net_params=NetParams( - template=os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml" - ), - additional_params={ - "on_ramp": True, - "ghost_edge": True, - }, - ), - ) - - self.assertEqual( - ['119257908#0', - '119257908#1', - '119257908#1-AddedOffRampEdge', - '119257908#1-AddedOnRampEdge', - '119257908#2', - '119257908#3', - '119257914', - '173381935', - '27414342#0', - '27414342#1-AddedOnRampEdge', - '27414345', - 'ghost0'], - sorted(list(network.specify_routes(network.net_params).keys())) - ) - - del network - - # test case 3 - network = I210SubNetwork( - name='test-3', - vehicles=VehicleParams(), - net_params=NetParams( - template=os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml" - ), - additional_params={ - "on_ramp": False, - "ghost_edge": True, - }, - ), - ) - - self.assertEqual( - ['119257914', 'ghost0'], - sorted(list(network.specify_routes(network.net_params).keys())) - ) - - del network - - # test case 4 - network = I210SubNetwork( - name='test-3', - vehicles=VehicleParams(), - net_params=NetParams( - template=os.path.join( - config.PROJECT_PATH, - "examples/exp_configs/templates/sumo/test2.net.xml" - ), - additional_params={ - "on_ramp": True, - "ghost_edge": True, - }, - ), - ) - - self.assertEqual( - ['119257908#0', - '119257908#1', - '119257908#1-AddedOffRampEdge', - '119257908#1-AddedOnRampEdge', - '119257908#2', - '119257908#3', - '119257914', - '173381935', - '27414342#0', - '27414342#1-AddedOnRampEdge', - '27414345', - 'ghost0'], - sorted(list(network.specify_routes(network.net_params).keys())) - ) - - del network - - ############################################################################### # Utility methods # ############################################################################### diff --git a/tests/fast_tests/test_vehicles.py b/tests/fast_tests/test_vehicles.py index 7e1405007..a37b235ff 100644 --- a/tests/fast_tests/test_vehicles.py +++ b/tests/fast_tests/test_vehicles.py @@ -33,7 +33,7 @@ def test_speed_lane_change_modes(self): speed_mode='obey_safe_speed', ), lane_change_params=SumoLaneChangeParams( - lane_change_mode="no_lc_safe", + lane_change_mode="no_lat_collide", ) ) @@ -56,7 +56,7 @@ def test_speed_lane_change_modes(self): self.assertEqual(vehicles.type_parameters["typeB"][ "car_following_params"].speed_mode, 0) self.assertEqual(vehicles.type_parameters["typeB"][ - "lane_change_params"].lane_change_mode, 512) + "lane_change_params"].lane_change_mode, 1621) vehicles.add( "typeC", @@ -89,7 +89,7 @@ def test_controlled_id_params(self): speed_mode="obey_safe_speed", ), lane_change_params=SumoLaneChangeParams( - lane_change_mode="no_lc_safe", + lane_change_mode="no_lat_collide", )) default_mingap = SumoCarFollowingParams().controller_params["minGap"] self.assertEqual(vehicles.types[0]["type_params"]["minGap"], @@ -336,7 +336,6 @@ def test_no_junctions_highway(self): "num_edges": 1, "use_ghost_edge": False, "ghost_speed_limit": 25, - "boundary_cell_length": 300, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -407,7 +406,6 @@ def test_no_junctions_highway(self): "num_edges": 3, "use_ghost_edge": False, "ghost_speed_limit": 25, - "boundary_cell_length": 300, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() @@ -477,7 +475,6 @@ def test_no_junctions_highway(self): "num_edges": 3, "use_ghost_edge": False, "ghost_speed_limit": 25, - "boundary_cell_length": 300, } net_params = NetParams(additional_params=additional_net_params) vehicles = VehicleParams() diff --git a/tests/fast_tests/test_visualizers.py b/tests/fast_tests/test_visualizers.py index d2f4a20a4..7af413909 100644 --- a/tests/fast_tests/test_visualizers.py +++ b/tests/fast_tests/test_visualizers.py @@ -91,226 +91,236 @@ def test_capacity_diagram_generator(self): np.testing.assert_array_almost_equal(std_outflows, expected_stds) def test_time_space_diagram_figure_eight(self): + # check that the exported data matches the expected emission file data + fig8_emission_data = { + 'idm_3': {'pos': [27.25, 28.25, 30.22, 33.17], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.98, 2.95], + 'edge': ['upper_ring', 'upper_ring', 'upper_ring', + 'upper_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_4': {'pos': [56.02, 57.01, 58.99, 61.93], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.98, 2.95], + 'edge': ['upper_ring', 'upper_ring', 'upper_ring', + 'upper_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_5': {'pos': [84.79, 85.78, 87.76, 90.7], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.98, 2.95], + 'edge': ['upper_ring', 'upper_ring', 'upper_ring', + 'upper_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_2': {'pos': [28.77, 29.76, 1.63, 4.58], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.97, 2.95], + 'edge': ['top', 'top', 'upper_ring', 'upper_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_13': {'pos': [106.79, 107.79, 109.77, 112.74], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.98, 2.96], + 'edge': ['lower_ring', 'lower_ring', 'lower_ring', + 'lower_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_9': {'pos': [22.01, 23.0, 24.97, 27.92], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.97, 2.95], + 'edge': ['left', 'left', 'left', 'left'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_6': {'pos': [113.56, 114.55, 116.52, 119.47], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.97, 2.95], + 'edge': ['upper_ring', 'upper_ring', 'upper_ring', + 'upper_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_8': {'pos': [29.44, 0.28, 2.03, 4.78], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.84, 1.76, 2.75], + 'edge': ['right', ':center_0', ':center_0', + ':center_0'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_12': {'pos': [78.03, 79.02, 80.99, 83.94], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.98, 2.95], + 'edge': ['lower_ring', 'lower_ring', 'lower_ring', + 'lower_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_10': {'pos': [20.49, 21.48, 23.46, 26.41], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.98, 2.95], + 'edge': ['lower_ring', 'lower_ring', 'lower_ring', + 'lower_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_11': {'pos': [49.26, 50.25, 52.23, 55.17], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.98, 2.95], + 'edge': ['lower_ring', 'lower_ring', 'lower_ring', + 'lower_ring'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_1': {'pos': [0.0, 0.99, 2.97, 5.91], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.98, 2.95], + 'edge': ['top', 'top', 'top', 'top'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_7': {'pos': [0.67, 1.66, 3.64, 6.58], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 0.99, 1.97, 2.94], + 'edge': ['right', 'right', 'right', 'right'], + 'lane': [0.0, 0.0, 0.0, 0.0]}, + 'idm_0': {'pos': [0.0, 1.0, 2.98, 5.95], + 'time': [1.0, 2.0, 3.0, 4.0], + 'vel': [0.0, 1.0, 1.99, 2.97], + 'edge': ['bottom', 'bottom', 'bottom', 'bottom'], + 'lane': [0.0, 0.0, 0.0, 0.0]} + } dir_path = os.path.dirname(os.path.realpath(__file__)) + actual_emission_data = tsd.import_data_from_emission( + os.path.join(dir_path, 'test_files/fig8_emission.csv')) + self.assertDictEqual(fig8_emission_data, actual_emission_data) + + # test get_time_space_data for figure eight networks flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/fig8.json')) - emission_data = tsd.import_data_from_trajectory( - os.path.join(dir_path, 'test_files/fig8_emission.csv'), flow_params) - - segs, _ = tsd.get_time_space_data(emission_data, flow_params) - - expected_segs = np.array([ - [[1., 60.], [2., 59.]], - [[2., 59.], [3., 57.02]], - [[3., 57.02], [4., 54.05]], - [[1., 23.8], [2., 22.81]], - [[2., 22.81], [3., 20.83]], - [[3., 20.83], [4., 17.89]], - [[1., 182.84166941], [2., 181.85166941]], - [[2., 181.85166941], [3., 179.87166941]], - [[3., 179.87166941], [4., 176.92166941]], - [[1., 154.07166941], [2., 153.08166941]], - [[2., 153.08166941], [3., 151.10166941]], - [[3., 151.10166941], [4., 148.16166941]], - [[1., 125.30166941], [2., 124.31166941]], - [[2., 124.31166941], [3., 122.34166941]], - [[3., 122.34166941], [4., 119.39166941]], - [[1., 96.54166941], [2., 95.54166941]], - [[2., 95.54166941], [3., 93.56166941]], - [[3., 93.56166941], [4., 90.59166941]], - [[1., -203.16166941], [2., -202.17166941]], - [[2., -202.17166941], [3., -200.02166941]], - [[3., -200.02166941], [4., -197.07166941]], - [[1., -174.40166941], [2., -173.40166941]], - [[2., -173.40166941], [3., -171.43166941]], - [[3., -171.43166941], [4., -168.48166941]], - [[1., -145.63166941], [2., -144.64166941]], - [[2., -144.64166941], [3., -142.66166941]], - [[3., -142.66166941], [4., -139.72166941]], - [[1., -116.86166941], [2., -115.87166941]], - [[2., -115.87166941], [3., -113.89166941]], - [[3., -113.89166941], [4., -110.95166941]], - [[1., -88.09166941], [2., -87.10166941]], - [[2., -87.10166941], [3., -85.13166941]], - [[3., -85.13166941], [4., -82.18166941]], - [[1., -59.33], [2., -58.34]], - [[2., -58.34], [3., -56.36]], - [[3., -56.36], [4., -53.42]], - [[1., -30.56], [2., -29.72]], - [[2., -29.72], [3., -27.97]], - [[3., -27.97], [4., -25.22]], - [[1., -1.79], [2., -0.8]], - [[2., -0.8], [3., 208.64166941]], - [[3., 208.64166941], [4., 205.69166941]]] + pos, speed, _ = tsd.get_time_space_data( + actual_emission_data, flow_params) + + expected_pos = np.array( + [[60, 23.8, 182.84166941, 154.07166941, 125.30166941, 96.54166941, + -203.16166941, -174.40166941, -145.63166941, -116.86166941, + -88.09166941, -59.33, -30.56, -1.79], + [59, 22.81, 181.85166941, 153.08166941, 124.31166941, 95.54166941, + -202.17166941, -173.40166941, -144.64166941, -115.87166941, + -87.10166941, -58.34, -29.72, -0.8], + [57.02, 20.83, 179.87166941, 151.10166941, 122.34166941, + 93.56166941, -200.02166941, -171.43166941, -142.66166941, + -113.89166941, -85.13166941, -56.36, -27.97, 208.64166941]] ) + expected_speed = np.array([ + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [1, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, 0.99, + 0.99, 0.84, 0.99], + [1.99, 1.98, 1.98, 1.98, 1.98, 1.98, 1.97, 1.98, 1.98, 1.98, 1.97, + 1.97, 1.76, 1.97] + ]) - np.testing.assert_array_almost_equal(segs, expected_segs) + np.testing.assert_array_almost_equal(pos[:-1, :], expected_pos) + np.testing.assert_array_almost_equal(speed[:-1, :], expected_speed) def test_time_space_diagram_merge(self): dir_path = os.path.dirname(os.path.realpath(__file__)) + emission_data = tsd.import_data_from_emission( + os.path.join(dir_path, 'test_files/merge_emission.csv')) + flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/merge.json')) - emission_data = tsd.import_data_from_trajectory( - os.path.join(dir_path, 'test_files/merge_emission.csv'), flow_params) - - segs, _ = tsd.get_time_space_data(emission_data, flow_params) - - expected_segs = np.array([ - [[2.0000e-01, 7.2949e+02], [4.0000e-01, 7.2953e+02]], - [[4.0000e-01, 7.2953e+02], [6.0000e-01, 7.2961e+02]], - [[6.0000e-01, 7.2961e+02], [8.0000e-01, 7.2973e+02]], - [[8.0000e-01, 7.2973e+02], [1.0000e+00, 7.2988e+02]]] + pos, speed, _ = tsd.get_time_space_data(emission_data, flow_params) + + expected_pos = np.array( + [[4.86, 180.32, 361.32, 547.77, 0], + [4.88, 180.36, 361.36, 547.8, 0], + [4.95, 180.43, 361.44, 547.87, 0], + [5.06, 180.54, 361.56, 547.98, 0], + [5.21, 180.68, 361.72, 548.12, 0], + [5.4, 180.86, 0, 0, 0]] + ) + expected_speed = np.array( + [[0, 0, 0, 0, 0], + [0.15, 0.17, 0.19, 0.14, 0], + [0.35, 0.37, 0.39, 0.34, 0], + [0.54, 0.57, 0.59, 0.54, 0], + [0.74, 0.7, 0.79, 0.71, 0], + [0.94, 0.9, 0, 0, 0]] ) - np.testing.assert_array_almost_equal(segs, expected_segs) + np.testing.assert_array_almost_equal(pos, expected_pos) + np.testing.assert_array_almost_equal(speed, expected_speed) def test_time_space_diagram_I210(self): dir_path = os.path.dirname(os.path.realpath(__file__)) + emission_data = tsd.import_data_from_emission( + os.path.join(dir_path, 'test_files/i210_emission.csv')) + module = __import__("examples.exp_configs.non_rl", fromlist=["i210_subnetwork"]) flow_params = getattr(module, "i210_subnetwork").flow_params - emission_data = tsd.import_data_from_trajectory( - os.path.join(dir_path, 'test_files/i210_emission.csv'), flow_params) - - segs, _ = tsd.get_time_space_data(emission_data, flow_params) - - expected_segs = { - 1: np.array([ - [[0.8, 5.1], [1.6, 23.37]], - [[1.6, 23.37], [2.4, 42.02]], - [[2.4, 42.02], [3.2, 61.21]], - [[3.2, 61.21], [4., 18.87]], - [[4., 18.87], [4.8, 39.93]], - [[2.4, 5.1], [3.2, 22.97]], - [[3.2, 22.97], [4., 40.73]]] - ), - 2: np.array([ - [[2.4, 5.1], [3.2, 23.98]], - [[3.2, 23.98], [4., 43.18]]] - ), - 3: np.array([ - [[0.8, 5.1], [1.6, 23.72]], - [[1.6, 23.72], [2.4, 43.06]], - [[2.4, 43.06], [3.2, 1.33]], - [[3.2, 1.33], [4., 21.65]], - [[4., 21.65], [4.8, 43.46]], - [[2.4, 5.1], [3.2, 23.74]], - [[3.2, 23.74], [4., 42.38]]] - ), - 4: np.array([ - [[2.4, 5.1], [3.2, 23.6]], - [[3.2, 23.6], [4., 42.46]]] - )} - - for lane, expected_seg in expected_segs.items(): - np.testing.assert_array_almost_equal(segs[lane], expected_seg) + pos, speed, _ = tsd.get_time_space_data(emission_data, flow_params) + + expected_pos = np.array( + [[5.1, 0., 0.], + [23.37, 0., 0.], + [42.02, 5.1, 0.], + [61.21, 22.97, 0.], + [80.45, 40.73, 5.1], + [101.51, 0., 0.]] + ) + expected_speed = np.array( + [[23., 0., 0.], + [22.84, 0., 0.], + [23.31, 23., 0.], + [23.98, 22.33, 0.], + [24.25, 22.21, 23.], + [26.33, 0., 0.]] + ) + + np.testing.assert_array_almost_equal(pos, expected_pos) + np.testing.assert_array_almost_equal(speed, expected_speed) def test_time_space_diagram_ring_road(self): dir_path = os.path.dirname(os.path.realpath(__file__)) + emission_data = tsd.import_data_from_emission( + os.path.join(dir_path, 'test_files/ring_230_emission.csv')) + flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/ring_230.json')) - emission_data = tsd.import_data_from_trajectory( - os.path.join(dir_path, 'test_files/ring_230_emission.csv'), flow_params) - - segs, _ = tsd.get_time_space_data(emission_data, flow_params) - - expected_segs = np.array([ - [[1.0000e-01, 0.0000e+00], [2.0000e-01, 1.0000e-02]], - [[2.0000e-01, 1.0000e-02], [3.0000e-01, 2.0000e-02]], - [[3.0000e-01, 2.0000e-02], [4.0000e-01, 5.0000e-02]], - [[4.0000e-01, 5.0000e-02], [5.0000e-01, 8.0000e-02]], - [[5.0000e-01, 8.0000e-02], [6.0000e-01, 1.2000e-01]], - [[1.0000e-01, 9.5500e+00], [2.0000e-01, 9.5500e+00]], - [[2.0000e-01, 9.5500e+00], [3.0000e-01, 9.5700e+00]], - [[3.0000e-01, 9.5700e+00], [4.0000e-01, 9.5900e+00]], - [[4.0000e-01, 9.5900e+00], [5.0000e-01, 9.6200e+00]], - [[5.0000e-01, 9.6200e+00], [6.0000e-01, 9.6600e+00]], - [[1.0000e-01, 9.5550e+01], [2.0000e-01, 9.5560e+01]], - [[2.0000e-01, 9.5560e+01], [3.0000e-01, 9.5580e+01]], - [[3.0000e-01, 9.5580e+01], [4.0000e-01, 9.5600e+01]], - [[4.0000e-01, 9.5600e+01], [5.0000e-01, 9.5630e+01]], - [[5.0000e-01, 9.5630e+01], [6.0000e-01, 9.5670e+01]], - [[1.0000e-01, 1.0510e+02], [2.0000e-01, 1.0511e+02]], - [[2.0000e-01, 1.0511e+02], [3.0000e-01, 1.0512e+02]], - [[3.0000e-01, 1.0512e+02], [4.0000e-01, 1.0515e+02]], - [[4.0000e-01, 1.0515e+02], [5.0000e-01, 1.0518e+02]], - [[5.0000e-01, 1.0518e+02], [6.0000e-01, 1.0522e+02]], - [[1.0000e-01, 1.1465e+02], [2.0000e-01, 1.1465e+02]], - [[2.0000e-01, 1.1465e+02], [3.0000e-01, 1.1467e+02]], - [[3.0000e-01, 1.1467e+02], [4.0000e-01, 1.1469e+02]], - [[4.0000e-01, 1.1469e+02], [5.0000e-01, 1.1472e+02]], - [[5.0000e-01, 1.1472e+02], [6.0000e-01, 1.1476e+02]], - [[1.0000e-01, 1.2429e+02], [2.0000e-01, 1.2430e+02]], - [[2.0000e-01, 1.2430e+02], [3.0000e-01, 1.2431e+02]], - [[3.0000e-01, 1.2431e+02], [4.0000e-01, 1.2434e+02]], - [[4.0000e-01, 1.2434e+02], [5.0000e-01, 1.2437e+02]], - [[5.0000e-01, 1.2437e+02], [6.0000e-01, 1.2441e+02]], - [[1.0000e-01, 1.3384e+02], [2.0000e-01, 1.3384e+02]], - [[2.0000e-01, 1.3384e+02], [3.0000e-01, 1.3386e+02]], - [[3.0000e-01, 1.3386e+02], [4.0000e-01, 1.3388e+02]], - [[4.0000e-01, 1.3388e+02], [5.0000e-01, 1.3391e+02]], - [[1.0000e-01, 1.4338e+02], [2.0000e-01, 1.4339e+02]], - [[2.0000e-01, 1.4339e+02], [3.0000e-01, 1.4341e+02]], - [[3.0000e-01, 1.4341e+02], [4.0000e-01, 1.4343e+02]], - [[4.0000e-01, 1.4343e+02], [5.0000e-01, 1.4346e+02]], - [[1.0000e-01, 1.5293e+02], [2.0000e-01, 1.5294e+02]], - [[2.0000e-01, 1.5294e+02], [3.0000e-01, 1.5295e+02]], - [[3.0000e-01, 1.5295e+02], [4.0000e-01, 1.5297e+02]], - [[4.0000e-01, 1.5297e+02], [5.0000e-01, 1.5301e+02]], - [[1.0000e-01, 1.6247e+02], [2.0000e-01, 1.6248e+02]], - [[2.0000e-01, 1.6248e+02], [3.0000e-01, 1.6250e+02]], - [[3.0000e-01, 1.6250e+02], [4.0000e-01, 1.6252e+02]], - [[4.0000e-01, 1.6252e+02], [5.0000e-01, 1.6255e+02]], - [[1.0000e-01, 1.7202e+02], [2.0000e-01, 1.7203e+02]], - [[2.0000e-01, 1.7203e+02], [3.0000e-01, 1.7204e+02]], - [[3.0000e-01, 1.7204e+02], [4.0000e-01, 1.7207e+02]], - [[4.0000e-01, 1.7207e+02], [5.0000e-01, 1.7210e+02]], - [[1.0000e-01, 1.8166e+02], [2.0000e-01, 1.8167e+02]], - [[2.0000e-01, 1.8167e+02], [3.0000e-01, 1.8169e+02]], - [[3.0000e-01, 1.8169e+02], [4.0000e-01, 1.8171e+02]], - [[4.0000e-01, 1.8171e+02], [5.0000e-01, 1.8174e+02]], - [[1.0000e-01, 1.9090e+01], [2.0000e-01, 1.9100e+01]], - [[2.0000e-01, 1.9100e+01], [3.0000e-01, 1.9110e+01]], - [[3.0000e-01, 1.9110e+01], [4.0000e-01, 1.9140e+01]], - [[4.0000e-01, 1.9140e+01], [5.0000e-01, 1.9170e+01]], - [[1.0000e-01, 1.9121e+02], [2.0000e-01, 1.9122e+02]], - [[2.0000e-01, 1.9122e+02], [3.0000e-01, 1.9123e+02]], - [[3.0000e-01, 1.9123e+02], [4.0000e-01, 1.9126e+02]], - [[4.0000e-01, 1.9126e+02], [5.0000e-01, 1.9129e+02]], - [[1.0000e-01, 2.0075e+02], [2.0000e-01, 2.0076e+02]], - [[2.0000e-01, 2.0076e+02], [3.0000e-01, 2.0078e+02]], - [[3.0000e-01, 2.0078e+02], [4.0000e-01, 2.0081e+02]], - [[4.0000e-01, 2.0081e+02], [5.0000e-01, 2.0085e+02]], - [[1.0000e-01, 2.8640e+01], [2.0000e-01, 2.8640e+01]], - [[2.0000e-01, 2.8640e+01], [3.0000e-01, 2.8660e+01]], - [[3.0000e-01, 2.8660e+01], [4.0000e-01, 2.8680e+01]], - [[4.0000e-01, 2.8680e+01], [5.0000e-01, 2.8710e+01]], - [[1.0000e-01, 3.8180e+01], [2.0000e-01, 3.8190e+01]], - [[2.0000e-01, 3.8190e+01], [3.0000e-01, 3.8210e+01]], - [[3.0000e-01, 3.8210e+01], [4.0000e-01, 3.8230e+01]], - [[4.0000e-01, 3.8230e+01], [5.0000e-01, 3.8260e+01]], - [[1.0000e-01, 4.7730e+01], [2.0000e-01, 4.7740e+01]], - [[2.0000e-01, 4.7740e+01], [3.0000e-01, 4.7750e+01]], - [[3.0000e-01, 4.7750e+01], [4.0000e-01, 4.7770e+01]], - [[4.0000e-01, 4.7770e+01], [5.0000e-01, 4.7810e+01]], - [[1.0000e-01, 5.7270e+01], [2.0000e-01, 5.7280e+01]], - [[2.0000e-01, 5.7280e+01], [3.0000e-01, 5.7300e+01]], - [[3.0000e-01, 5.7300e+01], [4.0000e-01, 5.7320e+01]], - [[4.0000e-01, 5.7320e+01], [5.0000e-01, 5.7350e+01]], - [[1.0000e-01, 6.6920e+01], [2.0000e-01, 6.6930e+01]], - [[2.0000e-01, 6.6930e+01], [3.0000e-01, 6.6940e+01]], - [[3.0000e-01, 6.6940e+01], [4.0000e-01, 6.6970e+01]], - [[4.0000e-01, 6.6970e+01], [5.0000e-01, 6.7000e+01]], - [[1.0000e-01, 7.6460e+01], [2.0000e-01, 7.6470e+01]], - [[2.0000e-01, 7.6470e+01], [3.0000e-01, 7.6490e+01]], - [[3.0000e-01, 7.6490e+01], [4.0000e-01, 7.6510e+01]], - [[4.0000e-01, 7.6510e+01], [5.0000e-01, 7.6540e+01]], - [[1.0000e-01, 8.6010e+01], [2.0000e-01, 8.6020e+01]], - [[2.0000e-01, 8.6020e+01], [3.0000e-01, 8.6030e+01]], - [[3.0000e-01, 8.6030e+01], [4.0000e-01, 8.6060e+01]], - [[4.0000e-01, 8.6060e+01], [5.0000e-01, 8.6090e+01]]] + pos, speed, _ = tsd.get_time_space_data(emission_data, flow_params) + + expected_pos = np.array( + [[0.0000e+00, 9.5500e+00, 9.5550e+01, 1.0510e+02, 1.1465e+02, + 1.2429e+02, 1.3384e+02, 1.4338e+02, 1.5293e+02, 1.6247e+02, + 1.7202e+02, 1.8166e+02, 1.9090e+01, 1.9121e+02, 2.0075e+02, + 2.8640e+01, 3.8180e+01, 4.7730e+01, 5.7270e+01, 6.6920e+01, + 7.6460e+01, 8.6010e+01], + [1.0000e-02, 9.5500e+00, 9.5560e+01, 1.0511e+02, 1.1465e+02, + 1.2430e+02, 1.3384e+02, 1.4339e+02, 1.5294e+02, 1.6248e+02, + 1.7203e+02, 1.8167e+02, 1.9100e+01, 1.9122e+02, 2.0076e+02, + 2.8640e+01, 3.8190e+01, 4.7740e+01, 5.7280e+01, 6.6930e+01, + 7.6470e+01, 8.6020e+01], + [2.0000e-02, 9.5700e+00, 9.5580e+01, 1.0512e+02, 1.1467e+02, + 1.2431e+02, 1.3386e+02, 1.4341e+02, 1.5295e+02, 1.6250e+02, + 1.7204e+02, 1.8169e+02, 1.9110e+01, 1.9123e+02, 2.0078e+02, + 2.8660e+01, 3.8210e+01, 4.7750e+01, 5.7300e+01, 6.6940e+01, + 7.6490e+01, 8.6030e+01], + [5.0000e-02, 9.5900e+00, 9.5600e+01, 1.0515e+02, 1.1469e+02, + 1.2434e+02, 1.3388e+02, 1.4343e+02, 1.5297e+02, 1.6252e+02, + 1.7207e+02, 1.8171e+02, 1.9140e+01, 1.9126e+02, 2.0081e+02, + 2.8680e+01, 3.8230e+01, 4.7770e+01, 5.7320e+01, 6.6970e+01, + 7.6510e+01, 8.6060e+01], + [8.0000e-02, 9.6200e+00, 9.5630e+01, 1.0518e+02, 1.1472e+02, + 1.2437e+02, 1.3391e+02, 1.4346e+02, 1.5301e+02, 1.6255e+02, + 1.7210e+02, 1.8174e+02, 1.9170e+01, 1.9129e+02, 2.0085e+02, + 2.8710e+01, 3.8260e+01, 4.7810e+01, 5.7350e+01, 6.7000e+01, + 7.6540e+01, 8.6090e+01], + [1.2000e-01, 9.6600e+00, 9.5670e+01, 1.0522e+02, 1.1476e+02, + 1.2441e+02, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, + 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, + 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, 0.0000e+00, + 0.0000e+00, 0.0000e+00]] ) - - np.testing.assert_array_almost_equal(segs, expected_segs) + expected_speed = np.array([ + [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], + [0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, + 0.08, 0.08, 0.08, 0.1, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08, 0.08], + [0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, + 0.16, 0.16, 0.16, 0.2, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16, 0.16], + [0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, + 0.23, 0.23, 0.23, 0.29, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23, 0.23], + [0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, + 0.31, 0.31, 0.31, 0.39, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31, 0.31], + [0.41, 0.41, 0.41, 0.41, 0.41, 0.41, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0] + ]) + + np.testing.assert_array_almost_equal(pos, expected_pos) + np.testing.assert_array_almost_equal(speed, expected_speed) def test_plot_ray_results(self): dir_path = os.path.dirname(os.path.realpath(__file__)) From ddce32ed60b5404e16a42d5f8ed8e17ac862c329 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 25 Jun 2020 17:04:47 -0700 Subject: [PATCH 306/335] Cleanup code --- flow/visualize/visualizer_rllib.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 5afbb39a3..80b901ebb 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -85,8 +85,6 @@ def visualizer_rllib(args): sim_params.use_ballistic = False # Determine agent and checkpoint - # TODO(akashvelu): remove this - # print("NEW CONFIGGG: ", config['env_config']['run']) config_run = config['env_config']['run'] if 'run' in config['env_config'] \ else None if args.run and config_run: @@ -177,7 +175,6 @@ def visualizer_rllib(args): checkpoint = result_dir + '/checkpoint_' + args.checkpoint_num checkpoint = checkpoint + '/checkpoint-' + args.checkpoint_num agent.restore(checkpoint) - agent.import_model('/Users/akashvelu/Desktop/combined_test3/ppo_model.h5', 'av') if hasattr(agent, "local_evaluator") and \ From 4e6302e2d2d5223e567a25817b84e64be4dedd64 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 25 Jun 2020 17:17:53 -0700 Subject: [PATCH 307/335] Handle case with vehicle in no-control edge --- .../imitation_learning/imitating_controller.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index 39fd2421e..64622ef73 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -38,7 +38,11 @@ def get_accel(self, env): instance of environment being used """ # observation is a dictionary for multiagent envs, list for singleagent envs + if self.multiagent: + # if vehicle is in non-control edge, it will not be in observation, so return None to default control to Sumo + if self.veh_id not in env.get_state().keys(): + return None observation = env.get_state()[self.veh_id] else: observation = env.get_state() @@ -56,7 +60,9 @@ def get_accel(self, env): else: rl_ids = env.get_rl_ids() - assert self.veh_id in rl_ids, "Vehicle corresponding to controller not in env!" + if not (self.veh_id in rl_ids): + # vehicle in non-control edge, so return None to default control to Sumo + return None # return the action taken by the vehicle ind = rl_ids.index(self.veh_id) From 97cfdee02f53ac84bf8069cb66b2cee2fea71ce5 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 1 Jul 2020 17:39:20 -0700 Subject: [PATCH 308/335] grey out warmup period and ghost cells --- flow/visualize/time_space_diagram.py | 50 +++++++++++++++++++--------- 1 file changed, 35 insertions(+), 15 deletions(-) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index ad8443da3..c5703b11e 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -27,7 +27,8 @@ import matplotlib matplotlib.use('TkAgg') from matplotlib import pyplot as plt -from matplotlib.collections import LineCollection +from matplotlib.collections import LineCollection, PatchCollection +from matplotlib.patches import Rectangle import matplotlib.colors as colors import numpy as np import pandas as pd @@ -187,8 +188,6 @@ def _highway(data): pd.DataFrame modified trajectory dataframe """ - data.loc[:, :] = data[(data['distance'] > 500)] - data.loc[:, :] = data[(data['distance'] < 2300)] segs = data[['time_step', 'distance', 'next_time', 'next_pos']].values.reshape((len(data), 2, 2)) return segs, data @@ -241,10 +240,6 @@ def _i210_subnetwork(data): pd.DataFrame modified trajectory dataframe """ - # Omit ghost edges - omit_edges = {'ghost0', '119257908#3'} - data.loc[:, :] = data[~data['edge_id'].isin(omit_edges)] - # Reset lane numbers that are offset by ramp lanes offset_edges = set(data[data['lane_id'] == 5]['edge_id'].unique()) data.loc[data['edge_id'].isin(offset_edges), 'lane_id'] -= 1 @@ -391,7 +386,7 @@ def _get_abs_pos(df, params): return ret -def plot_tsd(ax, df, segs, args, lane=None): +def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None): """Plot the time-space diagram. Take the pre-processed segments and other meta-data, then plot all the line segments. @@ -408,6 +403,10 @@ def plot_tsd(ax, df, segs, args, lane=None): parsed arguments lane : int, optional lane number to be shown in plot title + ghost_edges : list or set of str + ghost edge names to be greyed out, default None + ghost_bounds : tuple + lower and upper bounds of domain, excluding ghost edges, default None Returns ------- @@ -415,8 +414,7 @@ def plot_tsd(ax, df, segs, args, lane=None): """ norm = plt.Normalize(args.min_speed, args.max_speed) - xmin = max(df['time_step'].min(), args.start) - xmax = min(df['time_step'].max(), args.stop) + xmin, xmax = df['time_step'].min(), df['time_step'].max() xbuffer = (xmax - xmin) * 0.025 # 2.5% of range ymin, ymax = df['distance'].min(), df['distance'].max() ybuffer = (ymax - ymin) * 0.025 # 2.5% of range @@ -430,6 +428,25 @@ def plot_tsd(ax, df, segs, args, lane=None): ax.add_collection(lc) ax.autoscale() + rects = [] + if ghost_edges: + y_domain_min = df[~df['edge_id'].isin(ghost_edges)]['distance'].min() + y_domain_max = df[~df['edge_id'].isin(ghost_edges)]['distance'].max() + rects.append(Rectangle((xmin, y_domain_min), args.start - xmin, y_domain_max - y_domain_min)) + rects.append(Rectangle((xmin, ymin), xmax - xmin, y_domain_min - ymin)) + rects.append(Rectangle((xmin, y_domain_min + y_domain_max), xmax - xmin, ymax - (y_domain_min + y_domain_max))) + elif ghost_bounds: + rects.append(Rectangle((xmin, ghost_bounds[0]), args.start - xmin, ghost_bounds[1] - ghost_bounds[0])) + rects.append(Rectangle((xmin, ymin), xmax - xmin, ghost_bounds[0] - ymin)) + rects.append(Rectangle((xmin, ghost_bounds[0] + ghost_bounds[1]), xmax - xmin, ymax - (ghost_bounds[0] + ghost_bounds[1]))) + else: + rects.append(Rectangle((xmin, ymin), args.start - xmin, ymax - ymin)) + + if rects: + pc = PatchCollection(rects, facecolor='grey', alpha=0.5, edgecolor=None) + pc.set_zorder(20) + ax.add_collection(pc) + if lane: ax.set_title('Time-Space Diagram: Lane {}'.format(lane), fontsize=25) else: @@ -469,8 +486,6 @@ def plot_tsd(ax, df, segs, args, lane=None): help='The minimum speed in the color range.') parser.add_argument('--start', type=float, default=0, help='initial time (in sec) in the plot.') - parser.add_argument('--stop', type=float, default=float('inf'), - help='final time (in sec) in the plot.') args = parser.parse_args() @@ -502,13 +517,17 @@ def plot_tsd(ax, df, segs, args, lane=None): for lane, df in traj_df.groupby('lane_id'): ax = plt.subplot(nlanes, 1, lane+1) - plot_tsd(ax, df, segs[lane], args, lane) + plot_tsd(ax, df, segs[lane], args, int(lane+1), ghost_edges={'ghost0', '119257908#3'}) + plt.tight_layout() else: # perform plotting operation fig = plt.figure(figsize=(16, 9)) ax = plt.axes() - plot_tsd(ax, traj_df, segs, args) + if flow_params['network'] == HighwayNetwork: + plot_tsd(ax, traj_df, segs, args, ghost_bounds=(500, 2300)) + else: + plot_tsd(ax, traj_df, segs, args) ########################################################################### # Note: For MergeNetwork only # @@ -519,4 +538,5 @@ def plot_tsd(ax, df, segs, args, lane=None): [-0.1, -0.1], linewidth=3, color="white") # ########################################################################### - plt.show() + outfile = args.trajectory_path.replace('csv', 'png') + plt.savefig(outfile) From e22189ebfc9c4f818fda2d26ec88e46cd2dbed88 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 1 Jul 2020 18:58:57 -0700 Subject: [PATCH 309/335] fix rectangle positioning for both networks --- flow/visualize/time_space_diagram.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index c5703b11e..ec5c3d7da 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -434,11 +434,11 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) y_domain_max = df[~df['edge_id'].isin(ghost_edges)]['distance'].max() rects.append(Rectangle((xmin, y_domain_min), args.start - xmin, y_domain_max - y_domain_min)) rects.append(Rectangle((xmin, ymin), xmax - xmin, y_domain_min - ymin)) - rects.append(Rectangle((xmin, y_domain_min + y_domain_max), xmax - xmin, ymax - (y_domain_min + y_domain_max))) + rects.append(Rectangle((xmin, y_domain_max), xmax - xmin, ymax - y_domain_max)) elif ghost_bounds: rects.append(Rectangle((xmin, ghost_bounds[0]), args.start - xmin, ghost_bounds[1] - ghost_bounds[0])) rects.append(Rectangle((xmin, ymin), xmax - xmin, ghost_bounds[0] - ymin)) - rects.append(Rectangle((xmin, ghost_bounds[0] + ghost_bounds[1]), xmax - xmin, ymax - (ghost_bounds[0] + ghost_bounds[1]))) + rects.append(Rectangle((xmin, ghost_bounds[1]), xmax - xmin, ymax - ghost_bounds[1])) else: rects.append(Rectangle((xmin, ymin), args.start - xmin, ymax - ymin)) From c830f7815bc38abc9a8c7050a1b54085459ebb00 Mon Sep 17 00:00:00 2001 From: Eugene Vinitsky Date: Wed, 1 Jul 2020 23:00:29 -0700 Subject: [PATCH 310/335] Reward options in I210-dev Add accel penalty, stop penalty, mpg reward, and ability to compute reward for any vehicles upstream of you (i.e. make you less greedy and more social) --- .../exp_configs/non_rl/i210_subnetwork.py | 11 +- .../rl/multiagent/multiagent_i210.py | 77 +++++---- .../rl/multiagent/multiagent_straight_road.py | 5 +- examples/train.py | 60 +++---- flow/algorithms/centralized_PPO.py | 72 +++----- flow/algorithms/custom_ppo.py | 20 ++- flow/controllers/car_following_models.py | 1 + flow/controllers/velocity_controllers.py | 2 - flow/core/experiment.py | 4 +- flow/core/kernel/vehicle/aimsun.py | 17 ++ flow/core/kernel/vehicle/traci.py | 7 +- flow/core/rewards.py | 2 +- flow/envs/multiagent/base.py | 2 +- flow/envs/multiagent/i210.py | 62 +++---- flow/networks/i210_subnetwork_ghost_cell.py | 162 ------------------ flow/visualize/i210_replay.py | 11 +- flow/visualize/time_space_diagram.py | 35 +--- flow/visualize/transfer/util.py | 15 +- tests/fast_tests/test_examples.py | 10 +- 19 files changed, 203 insertions(+), 372 deletions(-) delete mode 100644 flow/networks/i210_subnetwork_ghost_cell.py diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 5c07aadf0..73e49caef 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -31,6 +31,8 @@ ON_RAMP = False # the inflow rate of vehicles (in veh/hr) INFLOW_RATE = 2050 +# on-ramp inflow_rate +ON_RAMP_INFLOW_RATE = 500 # the speed of inflowing vehicles from the main edge (in m/s) INFLOW_SPEED = 25.5 # fraction of vehicles that are follower-stoppers. 0.10 corresponds to 10% @@ -123,7 +125,7 @@ inflow.add( veh_type="human", edge="27414345", - vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), + vehs_per_hour=int(ON_RAMP_INFLOW_RATE * (1 - PENETRATION_RATE)), departSpeed=10, ) @@ -131,7 +133,7 @@ inflow.add( veh_type="av", edge="27414345", - vehs_per_hour=int(500 * PENETRATION_RATE), + vehs_per_hour=int(ON_RAMP_INFLOW_RATE * PENETRATION_RATE), departLane="random", departSpeed=10) @@ -209,11 +211,6 @@ def valid_ids(env, veh_ids): env.k.vehicle.get_speed(valid_ids(env, env.k.vehicle.get_ids())))), "avg_outflow": lambda env: np.nan_to_num( env.k.vehicle.get_outflow_rate(120)), - # # we multiply by 5 to account for the vehicle length and by 1000 to - # # convert into veh/km - # "avg_density": lambda env: 5 * 1000 * len(env.k.vehicle.get_ids_by_edge( - # edge_id)) / (env.k.network.edge_length(edge_id) - # * env.k.network.num_lanes(edge_id)), "mpg": lambda env: miles_per_gallon( env, valid_ids(env, env.k.vehicle.get_ids()), gain=1.0), "mpj": lambda env: miles_per_megajoule( diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index f468d37c6..a1c2e4f25 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -35,14 +35,16 @@ ON_RAMP = False # the inflow rate of vehicles (in veh/hr) INFLOW_RATE = 2050 +# the inflow rate on the on-ramp (in veh/hr) +ON_RAMP_INFLOW_RATE = 500 # the speed of inflowing vehicles from the main edge (in m/s) INFLOW_SPEED = 25.5 # fraction of vehicles that are RL vehicles. 0.10 corresponds to 10% -PENETRATION_RATE = 0.10 +PENETRATION_RATE = 0.05 # desired speed of the vehicles in the network V_DES = 5.0 # horizon over which to run the env -HORIZON = 1500 +HORIZON = 1000 # steps to run before follower-stopper is allowed to take control WARMUP_STEPS = 600 # whether to turn off the fail safes for the human-driven vehicles @@ -83,11 +85,11 @@ # whether to use the MPJ reward. Otherwise, defaults to a target velocity # reward "mpj_reward": False, - # how many vehicles to look back for the MPG reward - "look_back_length": 1, + # how many vehicles to look back for any reward + "look_back_length": 3, # whether to reroute vehicles once they have exited - "reroute_on_exit": True, - 'target_velocity': 8.0, + "reroute_on_exit": False, + 'target_velocity': 5.0, # how many AVs there can be at once (this is only for centralized critics) "max_num_agents": 10, # which edges we shouldn't apply control on @@ -108,12 +110,14 @@ # how many timesteps to anneal the headway curriculum over "speed_curriculum_iters": 20, # weight of the headway reward - "speed_reward_gain": 0.5, + "speed_reward_gain": 5.0, # penalize stopped vehicles - "penalize_stops": True, + "penalize_stops": False, + "stop_penalty": 0.01, # penalize accels - "penalize_accel": True + "penalize_accel": False, + "accel_penalty": (1 / 400.0) }) # =========================================================================== # @@ -163,32 +167,47 @@ inflow = InFlows() for lane in [0, 1, 2, 3, 4]: - # Add the inflows from the main highway. - inflow.add( - veh_type="human", - edge="119257914", - vehs_per_hour=int(INFLOW_RATE * (1 - PENETRATION_RATE)), - departLane=lane, - departSpeed=INFLOW_SPEED) - inflow.add( - veh_type="av", - edge="119257914", - vehs_per_hour=int(INFLOW_RATE * PENETRATION_RATE), - departLane=lane, - departSpeed=INFLOW_SPEED) + if WANT_BOUNDARY_CONDITIONS: + # Add the inflows from the main highway. + inflow.add( + veh_type="human", + edge="ghost0", + vehs_per_hour=int(INFLOW_RATE * (1 - PENETRATION_RATE)), + departLane=lane, + departSpeed=INFLOW_SPEED) + inflow.add( + veh_type="av", + edge="ghost0", + vehs_per_hour=int(INFLOW_RATE * PENETRATION_RATE), + departLane=lane, + departSpeed=INFLOW_SPEED) + else: + # Add the inflows from the main highway. + inflow.add( + veh_type="human", + edge="119257914", + vehs_per_hour=int(INFLOW_RATE * (1 - PENETRATION_RATE)), + departLane=lane, + departSpeed=INFLOW_SPEED) + inflow.add( + veh_type="av", + edge="119257914", + vehs_per_hour=int(INFLOW_RATE * PENETRATION_RATE), + departLane=lane, + departSpeed=INFLOW_SPEED) # Add the inflows from the on-ramps. if ON_RAMP: inflow.add( veh_type="human", edge="27414345", - vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), + vehs_per_hour=int(ON_RAMP_INFLOW_RATE * (1 - PENETRATION_RATE)), departLane="random", departSpeed=10) inflow.add( veh_type="human", edge="27414342#0", - vehs_per_hour=int(500 * (1 - PENETRATION_RATE)), + vehs_per_hour=int(ON_RAMP_INFLOW_RATE * (1 - PENETRATION_RATE)), departLane="random", departSpeed=10) @@ -225,7 +244,7 @@ sims_per_step=3, warmup_steps=WARMUP_STEPS, additional_params=additional_env_params, - done_at_exit=False + done_at_exit=not additional_env_params["reroute_on_exit"] ), # network-related parameters (see flow.core.params.NetParams and the @@ -278,8 +297,8 @@ def policy_mapping_fn(_): "avg_speed": lambda env: np.mean([speed for speed in env.k.vehicle.get_speed(env.k.vehicle.get_ids()) if speed >= 0]), "avg_outflow": lambda env: np.nan_to_num(env.k.vehicle.get_outflow_rate(120)), - "avg_energy": lambda env: -1*energy_consumption(env, 0.1), - "avg_per_step_energy": lambda env: -1*energy_consumption(env, 0.1) / env.k.vehicle.num_vehicles - if env.k.vehicle.num_vehicles > 0 - else 0, + "avg_energy": lambda env: -1 * energy_consumption(env, 0.1), + "avg_per_step_energy": lambda env: -1 * energy_consumption(env, 0.1) / env.k.vehicle.num_vehicles + if env.k.vehicle.num_vehicles > 0 + else 0, } diff --git a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py index 5816d3fe7..73460d656 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_straight_road.py +++ b/examples/exp_configs/rl/multiagent/multiagent_straight_road.py @@ -86,9 +86,12 @@ # penalize stopped vehicles "penalize_stops": True, + "stop_penalty": 0.05, # penalize accels - "penalize_accel": True + "penalize_accel": True, + "accel_penalty": 0.05, + }) diff --git a/examples/train.py b/examples/train.py index 112b7fa0d..d062fd39a 100644 --- a/examples/train.py +++ b/examples/train.py @@ -113,7 +113,6 @@ def run_model_stablebaseline(flow_params, """ from stable_baselines.common.vec_env import DummyVecEnv, SubprocVecEnv from stable_baselines import PPO2 - if num_cpus == 1: constructor = env_constructor(params=flow_params, version=0)() # The algorithms require a vectorized environment to run @@ -186,6 +185,7 @@ def setup_exps_rllib(flow_params, config["train_batch_size"] = horizon * n_rollouts config["gamma"] = 0.995 # discount rate config["use_gae"] = True + config["no_done_at_end"] = False config["lambda"] = 0.97 config["kl_target"] = 0.02 config["num_sgd_iter"] = 10 @@ -218,8 +218,8 @@ def setup_exps_rllib(flow_params, config["lr"] = tune.grid_search([5e-4, 5e-5]) elif alg_run == "TD3": - agent_cls = get_agent_class(alg_run) - config = deepcopy(agent_cls._default_config) + alg_run = get_agent_class(alg_run) + config = deepcopy(alg_run._default_config) config["num_workers"] = n_cpus config["horizon"] = horizon @@ -242,6 +242,9 @@ def on_episode_start(info): episode.user_data["avg_energy"] = [] episode.user_data["avg_mpg"] = [] episode.user_data["avg_mpj"] = [] + episode.user_data["num_cars"] = [] + episode.user_data["avg_accel_human"] = [] + episode.user_data["avg_accel_avs"] = [] def on_episode_step(info): episode = info["episode"] @@ -271,6 +274,15 @@ def on_episode_step(info): episode.user_data["avg_speed_avs"].append(av_speed) episode.user_data["avg_mpg"].append(miles_per_gallon(env, veh_ids, gain=1.0)) episode.user_data["avg_mpj"].append(miles_per_megajoule(env, veh_ids, gain=1.0)) + episode.user_data["num_cars"].append(len(env.k.vehicle.get_ids())) + episode.user_data["avg_accel_human"].append(np.nan_to_num(np.mean( + [np.abs((env.k.vehicle.get_speed(veh_id) - env.k.vehicle.get_previous_speed(veh_id))/env.sim_step) for + veh_id in veh_ids if veh_id in env.k.vehicle.previous_speeds.keys()] + ))) + episode.user_data["avg_accel_avs"].append(np.nan_to_num(np.mean( + [np.abs((env.k.vehicle.get_speed(veh_id) - env.k.vehicle.get_previous_speed(veh_id))/env.sim_step) for + veh_id in rl_ids if veh_id in env.k.vehicle.previous_speeds.keys()] + ))) def on_episode_end(info): episode = info["episode"] @@ -278,12 +290,14 @@ def on_episode_end(info): episode.custom_metrics["avg_speed"] = avg_speed avg_speed_avs = np.mean(episode.user_data["avg_speed_avs"]) episode.custom_metrics["avg_speed_avs"] = avg_speed_avs + episode.custom_metrics["avg_accel_avs"] = np.mean(episode.user_data["avg_accel_avs"]) episode.custom_metrics["avg_energy_per_veh"] = np.mean(episode.user_data["avg_energy"]) episode.custom_metrics["avg_mpg_per_veh"] = np.mean(episode.user_data["avg_mpg"]) episode.custom_metrics["avg_mpj_per_veh"] = np.mean(episode.user_data["avg_mpj"]) + episode.custom_metrics["num_cars"] = np.mean(episode.user_data["num_cars"]) def on_train_result(info): - """Store the mean score of the episode, and adjust the number of adversaries.""" + """Store the mean score of the episode, and increment or decrement the iteration number for curriculum.""" trainer = info["trainer"] trainer.workers.foreach_worker( lambda ev: ev.foreach_env( @@ -361,13 +375,10 @@ def trial_str_creator(trial): tune.run(**exp_dict, queue_trials=False, raise_on_failed_trial=False) -def train_h_baselines(flow_params, args, multiagent): +def train_h_baselines(env_name, args, multiagent): """Train policies using SAC and TD3 with h-baselines.""" from hbaselines.algorithms import OffPolicyRLAlgorithm from hbaselines.utils.train import parse_options, get_hyperparameters - from hbaselines.envs.mixed_autonomy import FlowEnv - - flow_params = deepcopy(flow_params) # Get the command-line arguments that are relevant here args = parse_options(description="", example_usage="", args=args) @@ -375,31 +386,6 @@ def train_h_baselines(flow_params, args, multiagent): # the base directory that the logged data will be stored in base_dir = "training_data" - # Create the training environment. - env = FlowEnv( - flow_params, - multiagent=multiagent, - shared=args.shared, - maddpg=args.maddpg, - render=args.render, - version=0 - ) - - # Create the evaluation environment. - if args.evaluate: - eval_flow_params = deepcopy(flow_params) - eval_flow_params['env'].evaluate = True - eval_env = FlowEnv( - eval_flow_params, - multiagent=multiagent, - shared=args.shared, - maddpg=args.maddpg, - render=args.render_eval, - version=1 - ) - else: - eval_env = None - for i in range(args.n_training): # value of the next seed seed = args.seed + i @@ -447,8 +433,8 @@ def train_h_baselines(flow_params, args, multiagent): # Create the algorithm object. alg = OffPolicyRLAlgorithm( policy=policy, - env=env, - eval_env=eval_env, + env="flow:{}".format(env_name), + eval_env="flow:{}".format(env_name) if args.evaluate else None, **hp ) @@ -468,7 +454,6 @@ def train_stable_baselines(submodule, flags): """Train policies using the PPO algorithm in stable-baselines.""" from stable_baselines.common.vec_env import DummyVecEnv from stable_baselines import PPO2 - flow_params = submodule.flow_params # Path to the saved files exp_tag = flow_params['exp_tag'] @@ -541,8 +526,7 @@ def main(args): elif flags.rl_trainer.lower() == "stable-baselines": train_stable_baselines(submodule, flags) elif flags.rl_trainer.lower() == "h-baselines": - flow_params = submodule.flow_params - train_h_baselines(flow_params, args, multiagent) + train_h_baselines(flags.exp_config, args, multiagent) else: raise ValueError("rl_trainer should be either 'rllib', 'h-baselines', " "or 'stable-baselines'.") diff --git a/flow/algorithms/centralized_PPO.py b/flow/algorithms/centralized_PPO.py index d30442773..133d7c8bf 100644 --- a/flow/algorithms/centralized_PPO.py +++ b/flow/algorithms/centralized_PPO.py @@ -1,14 +1,14 @@ +"""An example of customizing PPO to leverage a centralized critic.""" + from __future__ import absolute_import from __future__ import division from __future__ import print_function -"""An example of customizing PPO to leverage a centralized critic.""" - import argparse import numpy as np from ray.rllib.agents.ppo.ppo import PPOTrainer -from flow.algorithms.custom_ppo import CustomPPOTFPolicy +from flow.algorithms.custom_ppo import CustomPPOTFPolicy, KLCoeffMixin from ray.rllib.evaluation.postprocessing import compute_advantages, \ Postprocessing from ray.rllib.policy.sample_batch import SampleBatch @@ -65,14 +65,17 @@ def __init__(self, obs_space, action_space, num_outputs, model_config, self.register_variables(self.central_vf.variables) def forward(self, input_dict, state, seq_lens): + """Run forward inference.""" return self.model.forward(input_dict, state, seq_lens) def central_value_function(self, central_obs): + """Compute the centralized value function.""" return tf.reshape( self.central_vf( [central_obs]), [-1]) def value_function(self): + """Compute the normal value function; this is only here to make the code run.""" return self.model.value_function() # not used @@ -145,23 +148,27 @@ def __init__(self, @override(RecurrentTFModelV2) def forward_rnn(self, inputs, state, seq_lens): + """Forward inference on the RNN.""" model_out, self._value_out, h, c = self.model( [inputs, seq_lens] + state) return model_out, [h, c] @override(ModelV2) def get_initial_state(self): + """Set up the initial RNN state.""" return [ np.zeros(self.cell_size, np.float32), np.zeros(self.cell_size, np.float32), ] def central_value_function(self, central_obs): + """Compute the central value function.""" return tf.reshape( self.central_vf( [central_obs]), [-1]) def value_function(self): + """Compute the normal value function; this is only here to make the code run.""" return tf.reshape(self._value_out, [-1]) # not used @@ -175,18 +182,18 @@ def __init__(self): ) def compute_central_vf(self, central_obs): + """Run forward inference on the model.""" feed_dict = { self.get_placeholder(CENTRAL_OBS): central_obs, } return self.get_session().run(self.central_value_function, feed_dict) -# Grabs the opponent obs/act and includes it in the experience train_batch, -# and computes GAE using the central vf predictions. def centralized_critic_postprocessing(policy, sample_batch, other_agent_batches=None, episode=None): + """Find all other agents that overlapped with you and stack their obs to be passed to the central VF.""" if policy.loss_initialized(): assert other_agent_batches is not None @@ -207,13 +214,6 @@ def centralized_critic_postprocessing(policy, agent_id: other_agent_batches[agent_id][1]["obs"].copy() for agent_id in other_agent_batches.keys() } - # padded_agent_obs = {agent_id: - # overlap_and_pad_agent( - # time_span, - # rel_agent_time, - # other_obs[agent_id]) - # for agent_id, - # rel_agent_time in rel_agents.items()} padded_agent_obs = { agent_id: fill_missing( agent_time, @@ -229,7 +229,7 @@ def centralized_critic_postprocessing(policy, central_obs_batch = np.hstack( (sample_batch["obs"], np.hstack(central_obs_list))) except Exception as e: - print(‘Error in centralized PPO: ’, e) + print("Error in centralized PPO: ", e) # TODO(@ev) this is a bug and needs to be fixed central_obs_batch = sample_batch["obs"] max_vf_agents = policy.model.max_num_agents @@ -287,6 +287,7 @@ def time_overlap(time_span, agent_time): def fill_missing(agent_time, other_agent_time, obs): + """Pad the obs to the appropriate length for agents that don't overlap perfectly in time.""" # shortcut, the two overlap perfectly if np.sum(agent_time == other_agent_time) == agent_time.shape[0]: return obs @@ -315,15 +316,9 @@ def overlap_and_pad_agent(time_span, agent_time, obs): assert time_overlap(time_span, agent_time) print(time_span) print(agent_time) - if time_span[0] == 7 or agent_time[0] == 7: - import ipdb - ipdb.set_trace() # FIXME(ev) some of these conditions can be combined # no padding needed if agent_time[0] == time_span[0] and agent_time[1] == time_span[1]: - if obs.shape[0] < 200: - import ipdb - ipdb.set_trace() return obs # agent enters before time_span starts and exits before time_span end if agent_time[0] < time_span[0] and agent_time[1] < time_span[1]: @@ -332,9 +327,6 @@ def overlap_and_pad_agent(time_span, agent_time, obs): overlap_obs = obs[non_overlap_time:] padding = np.zeros((missing_time, obs.shape[1])) obs_concat = np.concatenate((overlap_obs, padding)) - if obs_concat.shape[0] < 200: - import ipdb - ipdb.set_trace() return obs_concat # agent enters after time_span starts and exits after time_span ends elif agent_time[0] > time_span[0] and agent_time[1] > time_span[1]: @@ -343,9 +335,6 @@ def overlap_and_pad_agent(time_span, agent_time, obs): missing_time = agent_time[0] - time_span[0] padding = np.zeros((missing_time, obs.shape[1])) obs_concat = np.concatenate((padding, overlap_obs)) - if obs_concat.shape[0] < 200: - import ipdb - ipdb.set_trace() return obs_concat # agent time is entirely contained in time_span elif agent_time[0] >= time_span[0] and agent_time[1] <= time_span[1]: @@ -358,9 +347,6 @@ def overlap_and_pad_agent(time_span, agent_time, obs): if missing_right > 0: padding = np.zeros((missing_right, obs.shape[1])) obs_concat = np.concatenate((obs_concat, padding)) - if obs_concat.shape[0] < 200: - import ipdb - ipdb.set_trace() return obs_concat # agent time totally contains time_span elif agent_time[0] <= time_span[0] and agent_time[1] >= time_span[1]: @@ -371,14 +357,11 @@ def overlap_and_pad_agent(time_span, agent_time, obs): overlap_obs = overlap_obs[non_overlap_left:] if non_overlap_right > 0: overlap_obs = overlap_obs[:-non_overlap_right] - if overlap_obs.shape[0] < 200: - import ipdb - ipdb.set_trace() return overlap_obs -# Copied from PPO but optimizing the central value function def loss_with_central_critic(policy, model, dist_class, train_batch): + """Set up the PPO loss but replace the VF loss with the centralized VF loss.""" CentralizedValueMixin.__init__(policy) logits, state = model.from_batch(train_batch) @@ -409,6 +392,8 @@ def loss_with_central_critic(policy, model, dist_class, train_batch): class PPOLoss(object): + """Object containing the PPO loss function.""" + def __init__(self, action_space, dist_class, @@ -472,6 +457,7 @@ def __init__(self, model_config : dict, optional model config for use in specifying action distributions. """ + def reduce_mean_valid(t): return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) @@ -508,28 +494,13 @@ def reduce_mean_valid(t): def new_ppo_surrogate_loss(policy, model, dist_class, train_batch): + """Return the PPO loss with the centralized value function.""" loss = loss_with_central_critic(policy, model, dist_class, train_batch) return loss -class KLCoeffMixin(object): - def __init__(self, config): - # KL Coefficient - self.kl_coeff_val = config["kl_coeff"] - self.kl_target = config["kl_target"] - self.kl_coeff = tf.get_variable( - initializer=tf.constant_initializer(self.kl_coeff_val), - name="kl_coeff", - shape=(), - trainable=False, - dtype=tf.float32) - - def update_kl(self, blah): - pass - - def setup_mixins(policy, obs_space, action_space, config): - # copied from PPO + """Construct additional classes that add on to PPO.""" KLCoeffMixin.__init__(policy, config) EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"], @@ -541,7 +512,7 @@ def setup_mixins(policy, obs_space, action_space, config): def central_vf_stats(policy, train_batch, grads): - # Report the explained variance of the central value function. + """Report the explained variance of the centralized value function.""" return { "vf_explained_var": explained_variance( train_batch[Postprocessing.VALUE_TARGETS], @@ -550,6 +521,7 @@ def central_vf_stats(policy, train_batch, grads): def kl_and_loss_stats(policy, train_batch): + """Trianing stats to pass to the tensorboard.""" return { "cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64), "cur_lr": tf.cast(policy.cur_lr, tf.float64), diff --git a/flow/algorithms/custom_ppo.py b/flow/algorithms/custom_ppo.py index 65291f1d4..47a4459aa 100644 --- a/flow/algorithms/custom_ppo.py +++ b/flow/algorithms/custom_ppo.py @@ -1,4 +1,4 @@ -"""PPO but we add in the outflow after the reward to the final reward.""" +"""PPO but without the adaptive KL term that RLlib added.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function @@ -15,11 +15,11 @@ from ray.rllib.utils.explained_variance import explained_variance from ray.rllib.utils.tf_ops import make_tf_callable from ray.rllib.utils import try_import_tf + from ray.rllib.agents.trainer_template import build_trainer from ray.rllib.agents.ppo.ppo import choose_policy_optimizer, DEFAULT_CONFIG from ray.rllib.agents.ppo.ppo import warn_about_bad_reward_scales - tf = try_import_tf() logger = logging.getLogger(__name__) @@ -29,6 +29,8 @@ class PPOLoss(object): + """PPO Loss object.""" + def __init__(self, action_space, dist_class, @@ -92,6 +94,7 @@ def __init__(self, model_config : dict, optional model config for use in specifying action distributions. """ + def reduce_mean_valid(t): return tf.reduce_mean(tf.boolean_mask(t, valid_mask)) @@ -127,6 +130,7 @@ def reduce_mean_valid(t): def ppo_surrogate_loss(policy, model, dist_class, train_batch): + """Construct and return the PPO loss.""" logits, state = model.from_batch(train_batch) action_dist = dist_class(logits, model) @@ -163,6 +167,7 @@ def ppo_surrogate_loss(policy, model, dist_class, train_batch): def kl_and_loss_stats(policy, train_batch): + """Return statistics for the tensorboard.""" return { "cur_kl_coeff": tf.cast(policy.kl_coeff, tf.float64), "cur_lr": tf.cast(policy.cur_lr, tf.float64), @@ -216,6 +221,7 @@ def postprocess_ppo_gae(policy, def clip_gradients(policy, optimizer, loss): + """If grad_clip is not None, clip the gradients.""" variables = policy.model.trainable_variables() if policy.config["grad_clip"] is not None: grads_and_vars = optimizer.compute_gradients(loss, variables) @@ -229,6 +235,8 @@ def clip_gradients(policy, optimizer, loss): class ValueNetworkMixin(object): + """Construct the value function.""" + def __init__(self, obs_space, action_space, config): if config["use_gae"]: @@ -242,7 +250,7 @@ def value(ob, prev_action, prev_reward, *state): [prev_reward]), "is_training": tf.convert_to_tensor(False), }, [tf.convert_to_tensor([s]) for s in state], - tf.convert_to_tensor([1])) + tf.convert_to_tensor([1])) return self.model.value_function()[0] else: @@ -255,11 +263,13 @@ def value(ob, prev_action, prev_reward, *state): def setup_config(policy, obs_space, action_space, config): + """Add additional custom options from the config.""" # auto set the model option for layer sharing config["model"]["vf_share_layers"] = config["vf_share_layers"] def setup_mixins(policy, obs_space, action_space, config): + """Construct additional classes that add on to PPO.""" KLCoeffMixin.__init__(policy, config) ValueNetworkMixin.__init__(policy, obs_space, action_space, config) EntropyCoeffSchedule.__init__(policy, config["entropy_coeff"], @@ -268,6 +278,8 @@ def setup_mixins(policy, obs_space, action_space, config): class KLCoeffMixin(object): + """Update the KL Coefficient. This is intentionally disabled to match the PPO paper better.""" + def __init__(self, config): # KL Coefficient self.kl_coeff_val = config["kl_coeff"] @@ -280,6 +292,7 @@ def __init__(self, config): dtype=tf.float32) def update_kl(self, blah): + """Disabled to match the PPO paper better.""" pass @@ -300,6 +313,7 @@ def update_kl(self, blah): def validate_config(config): + """Check that the config is set up properly.""" if config["entropy_coeff"] < 0: raise DeprecationWarning("entropy_coeff must be >= 0") if isinstance(config["entropy_coeff"], int): diff --git a/flow/controllers/car_following_models.py b/flow/controllers/car_following_models.py index 42c9b2a9b..280c94d37 100755 --- a/flow/controllers/car_following_models.py +++ b/flow/controllers/car_following_models.py @@ -647,6 +647,7 @@ def __init__(self, def get_accel(self, env): """See parent class.""" + # without generating waves. lead_id = env.k.vehicle.get_leader(self.veh_id) if not lead_id: # no car ahead if self.want_max_accel: diff --git a/flow/controllers/velocity_controllers.py b/flow/controllers/velocity_controllers.py index 86868c5f7..4d8bfec79 100644 --- a/flow/controllers/velocity_controllers.py +++ b/flow/controllers/velocity_controllers.py @@ -122,8 +122,6 @@ def get_accel(self, env): or env.k.vehicle.get_x_by_id(self.veh_id) > self.control_length[1])) \ or (self.no_control_edges is not None and len(self.no_control_edges) > 0 and edge in self.no_control_edges): - # TODO(@evinitsky) put back - # or env.k.vehicle.get_edge(self.veh_id) in self.no_control_edges: return None else: # compute the acceleration from the desired velocity diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 8ede367a7..1274280ba 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -169,8 +169,6 @@ def rl_actions(*_): if convert_to_csv and self.env.simulator == "traci": dir_path = self.env.sim_params.emission_path - - if dir_path: trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) @@ -196,7 +194,7 @@ def rl_actions(*_): get_extra_info(self.env.k.vehicle, extra_info, veh_ids, source_id, run_id) # write to disk every 100 steps - if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0 and dir_path: + if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0: write_dict_to_csv(trajectory_table_path, extra_info, not j) extra_info.clear() diff --git a/flow/core/kernel/vehicle/aimsun.py b/flow/core/kernel/vehicle/aimsun.py index ce0d026e5..16c94558a 100644 --- a/flow/core/kernel/vehicle/aimsun.py +++ b/flow/core/kernel/vehicle/aimsun.py @@ -65,6 +65,7 @@ def __init__(self, # number of vehicles to exit the network for every time-step self._num_arrived = [] self._arrived_ids = [] + self._arrived_rl_ids = [] # contains conversion from Flow-ID to Aimsun-ID self._id_aimsun2flow = {} @@ -174,11 +175,17 @@ def update(self, reset): added_vehicles = self.kernel_api.get_entered_ids() exited_vehicles = self.kernel_api.get_exited_ids() + # keep track of arrived rl vehicles + arrived_rl_ids = [] + # add the new vehicles if they should be tracked for aimsun_id in added_vehicles: veh_type = self.kernel_api.get_vehicle_type_name(aimsun_id) if veh_type in self.tracked_vehicle_types: self._add_departed(aimsun_id) + if aimsun_id in self.get_rl_ids(): + arrived_rl_ids.append(aimsun_id) + self._arrived_rl_ids.append(arrived_rl_ids) # remove the exited vehicles if they were tracked if not reset: @@ -639,6 +646,16 @@ def get_arrived_ids(self): """See parent class.""" raise NotImplementedError + def get_arrived_rl_ids(self, k=1): + """See parent class.""" + if len(self._arrived_rl_ids) > 0: + arrived = [] + for arr in self._arrived_rl_ids[-k:]: + arrived.extend(arr) + return arrived + else: + return 0 + def get_departed_ids(self): """See parent class.""" raise NotImplementedError diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 2a6a4da12..0fa1e6e17 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -524,10 +524,13 @@ def get_arrived_ids(self): """See parent class.""" return self._arrived_ids - def get_arrived_rl_ids(self): + def get_arrived_rl_ids(self, k=1): """See parent class.""" if len(self._arrived_rl_ids) > 0: - return self._arrived_rl_ids[-1] + arrived = [] + for arr in self._arrived_rl_ids[-k:]: + arrived.extend(arr) + return arrived else: return 0 diff --git a/flow/core/rewards.py b/flow/core/rewards.py index 0f234da7e..6462b0a8c 100755 --- a/flow/core/rewards.py +++ b/flow/core/rewards.py @@ -322,7 +322,7 @@ def energy_consumption(env, gain=.001): rho = 1.225 # air density (kg/m^3) A = 2.6 # vehicle cross sectional area (m^2) for veh_id in env.k.vehicle.get_ids(): - if veh_id not in env.k.vehicle.previous_speeds: + if veh_id not in env.k.vehicle.previous_speeds.keys(): continue speed = env.k.vehicle.get_speed(veh_id) prev_speed = env.k.vehicle.get_previous_speed(veh_id) diff --git a/flow/envs/multiagent/base.py b/flow/envs/multiagent/base.py index 881461d63..77a3d2c12 100644 --- a/flow/envs/multiagent/base.py +++ b/flow/envs/multiagent/base.py @@ -128,7 +128,7 @@ def step(self, rl_actions): reward = self.compute_reward(rl_actions, fail=crash) if self.env_params.done_at_exit: - for rl_id in self.k.vehicle.get_arrived_rl_ids(): + for rl_id in self.k.vehicle.get_arrived_rl_ids(self.env_params.sims_per_step): done[rl_id] = True reward[rl_id] = 0 states[rl_id] = -1 * np.ones(self.observation_space.shape[0]) diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index ad301a3f5..450a0269c 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -1,4 +1,5 @@ """Environment for training vehicles to reduce congestion in the I210.""" + from gym.spaces import Box import numpy as np @@ -94,9 +95,11 @@ def __init__(self, env_params, sim_params, network, simulator='traci'): # penalize stops self.penalize_stops = env_params.additional_params["penalize_stops"] + self.stop_penalty = env_params.additional_params["stop_penalty"] # penalize accel self.penalize_accel = env_params.additional_params.get("penalize_accel", False) + self.accel_penalty = env_params.additional_params["accel_penalty"] @property def observation_space(self): @@ -142,16 +145,9 @@ def _apply_rl_actions(self, rl_actions): if rl_actions: for rl_id, actions in rl_actions.items(): accel = actions[0] - - # lane_change_softmax = np.exp(actions[1:4]) - # lane_change_softmax /= np.sum(lane_change_softmax) - # lane_change_action = np.random.choice([-1, 0, 1], - # p=lane_change_softmax) id_list.append(rl_id) accel_list.append(accel) self.k.vehicle.apply_acceleration(id_list, accel_list) - # self.k.vehicle.apply_lane_change(rl_id, lane_change_action) - # print('time to apply actions is ', time() - t) def in_control_range(self, veh_id): """Return if a veh_id is on an edge that is allowed to be controlled. @@ -194,6 +190,7 @@ def compute_reward(self, rl_actions, **kwargs): rewards = {} valid_ids = [rl_id for rl_id in self.k.vehicle.get_rl_ids() if self.in_control_range(rl_id)] + valid_human_ids = [veh_id for veh_id in self.k.vehicle.get_ids() if self.in_control_range(veh_id)] if self.env_params.additional_params["local_reward"]: des_speed = self.env_params.additional_params["target_velocity"] @@ -220,26 +217,27 @@ def compute_reward(self, rl_actions, **kwargs): else: break else: - speeds = [] - follow_speed = self.k.vehicle.get_speed(self.k.vehicle.get_follower(rl_id)) - if follow_speed >= 0: - speeds.append(follow_speed) - if self.k.vehicle.get_speed(rl_id) >= 0: - speeds.append(self.k.vehicle.get_speed(rl_id)) - if len(speeds) > 0: - # rescale so the critic can estimate it quickly - rewards[rl_id] = np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 - for speed in speeds]) / (des_speed ** 2) + follow_id = rl_id + for i in range(self.look_back_length + 1): + if follow_id not in ["", None]: + follow_speed = self.k.vehicle.get_speed(self.k.vehicle.get_follower(follow_id)) + reward = (des_speed - min(np.abs(follow_speed - des_speed), des_speed)) ** 2 + reward /= ((des_speed ** 2) * self.look_back_length) + rewards[rl_id] += reward + else: + break + follow_id = self.k.vehicle.get_follower(follow_id) + else: if self.mpg_reward: - reward = np.nan_to_num(miles_per_gallon(self, self.k.vehicle.get_ids(), gain=1.0)) / 100.0 + reward = np.nan_to_num(miles_per_gallon(self, valid_human_ids, gain=1.0)) / 100.0 else: - speeds = self.k.vehicle.get_speed(self.k.vehicle.get_ids()) + speeds = self.k.vehicle.get_speed(valid_human_ids) des_speed = self.env_params.additional_params["target_velocity"] # rescale so the critic can estimate it quickly if self.reroute_on_exit: reward = np.nan_to_num(np.mean([(des_speed - np.abs(speed - des_speed)) - for speed in speeds]) / (des_speed)) + for speed in speeds]) / des_speed) else: reward = np.nan_to_num(np.mean([(des_speed - np.abs(speed - des_speed)) ** 2 for speed in speeds]) / (des_speed ** 2)) @@ -256,10 +254,8 @@ def compute_reward(self, rl_actions, **kwargs): t_headway = max( self.k.vehicle.get_headway(veh_id) / self.k.vehicle.get_speed(veh_id), 0) - # print('time headway is {}, headway is {}'.format(t_headway, self.k.vehicle.get_headway(veh_id))) scaling_factor = max(0, 1 - self.num_training_iters / self.headway_curriculum_iters) penalty += scaling_factor * self.headway_reward_gain * min((t_headway - t_min) / t_min, 0) - # print('penalty is ', penalty) rewards[veh_id] += penalty @@ -274,7 +270,7 @@ def compute_reward(self, rl_actions, **kwargs): follow_id = self.k.vehicle.get_follower(follow_id) if follow_id not in ["", None]: if self.reroute_on_exit: - speed_reward += ((des_speed - np.abs(speed - des_speed))) / (des_speed) + speed_reward += (des_speed - np.abs(speed - des_speed)) / des_speed else: speed_reward += ((des_speed - np.abs(speed - des_speed)) ** 2) / (des_speed ** 2) else: @@ -287,11 +283,11 @@ def compute_reward(self, rl_actions, **kwargs): speed = self.k.vehicle.get_speed(veh_id) if self.penalize_stops: if speed < 1.0: - rewards[veh_id] -= .01 + rewards[veh_id] -= self.stop_penalty if self.penalize_accel and veh_id in self.k.vehicle.previous_speeds: prev_speed = self.k.vehicle.get_previous_speed(veh_id) abs_accel = abs(speed - prev_speed) / self.sim_step - rewards[veh_id] -= abs_accel / 400.0 + rewards[veh_id] -= abs_accel * self.accel_penalty # print('time to get reward is ', time() - t) return rewards @@ -324,8 +320,6 @@ def additional_command(self): if edge == self.exit_edge and \ (self.k.vehicle.get_position(veh_id) > self.k.network.edge_length(self.exit_edge) - 100) \ and self.k.vehicle.get_leader(veh_id) is None: - # if self.step_counter > 6000: - # import ipdb; ipdb.set_trace() type_id = self.k.vehicle.get_type(veh_id) # remove the vehicle self.k.vehicle.remove(veh_id) @@ -333,8 +327,7 @@ def additional_command(self): lane = valid_lanes[index] del valid_lanes[index] # reintroduce it at the start of the network - # TODO(@evinitsky) select the lane and speed a bit more cleanly - # Note, the position is 10 so you are not overlapping with the inflow car that is being removed. + # Note, the position is 20 so you are not overlapping with the inflow car that is being removed. # this allows the vehicle to be immediately inserted. try: self.k.vehicle.add( @@ -405,6 +398,17 @@ def step(self, rl_actions): done[rl_id] = True reward[rl_id] = 0 state[rl_id] = -1 * np.ones(self.observation_space.shape[0]) + else: + # you have to catch the vehicles on the exit edge, they have not yet + # recieved a done when the env terminates + if done['__all__']: + on_exit_edge = [rl_id for rl_id in self.k.vehicle.get_rl_ids() + if self.k.vehicle.get_edge(rl_id) == self.exit_edge] + for rl_id in on_exit_edge: + done[rl_id] = True + reward[rl_id] = 0 + state[rl_id] = -1 * np.ones(self.observation_space.shape[0]) + return state, reward, done, info diff --git a/flow/networks/i210_subnetwork_ghost_cell.py b/flow/networks/i210_subnetwork_ghost_cell.py deleted file mode 100644 index 8a45b4d91..000000000 --- a/flow/networks/i210_subnetwork_ghost_cell.py +++ /dev/null @@ -1,162 +0,0 @@ -"""Contains the I-210 sub-network class.""" - -from flow.networks.base import Network - -EDGES_DISTRIBUTION = [ - # Main highway - "ghost0", - "119257914", - "119257908#0", - "119257908#1-AddedOnRampEdge", - "119257908#1", - "119257908#1-AddedOffRampEdge", - "119257908#2", - "119257908#3", - - # On-ramp - "27414345", - "27414342#0", - "27414342#1-AddedOnRampEdge", - - # Off-ramp - "173381935", -] - - -class I210SubNetworkGhostCell(Network): - """A network used to simulate the I-210 sub-network. - - Usage - ----- - >>> from flow.core.params import NetParams - >>> from flow.core.params import VehicleParams - >>> from flow.core.params import InitialConfig - >>> from flow.networks import I210SubNetwork - >>> - >>> network = I210SubNetwork( - >>> name='I-210_subnetwork', - >>> vehicles=VehicleParams(), - >>> net_params=NetParams() - >>> ) - """ - - def specify_routes(self, net_params): - """See parent class. - - Routes for vehicles moving through the I210. - """ - if net_params.additional_params["on_ramp"]: - rts = { - # Main highway - "ghost0": [ - (["ghost0", "119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1 - 17 / 8378), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) - (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - 17 / 8378) - ], - "119257914": [ - (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1 - 17 / 8378), # HOV: 1509 (on ramp: 57), Non HOV: 6869 (onramp: 16) - (["119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - 17 / 8378) - ], - "119257908#0": [ - (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1.0), - # (["119257908#0", "119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 0.5), - ], - "119257908#1-AddedOnRampEdge": [ - (["119257908#1-AddedOnRampEdge", "119257908#1", - "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1.0), - # (["119257908#1-AddedOnRampEdge", "119257908#1", - # "119257908#1-AddedOffRampEdge", "173381935"], - # 0.5), - ], - "119257908#1": [ - (["119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1.0), - # (["119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - # 0.5), - ], - "119257908#1-AddedOffRampEdge": [ - (["119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1.0), - # (["119257908#1-AddedOffRampEdge", "173381935"], - # 0.5), - ], - "119257908#2": [ - (["119257908#2", "119257908#3"], 1), - ], - "119257908#3": [ - (["119257908#3"], 1), - ], - - # On-ramp - "27414345": [ - (["27414345", "27414342#1-AddedOnRampEdge", - "27414342#1", - "119257908#1-AddedOnRampEdge", "119257908#1", - "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1 - 9 / 321), - (["27414345", "27414342#1-AddedOnRampEdge", - "27414342#1", - "119257908#1-AddedOnRampEdge", "119257908#1", - "119257908#1-AddedOffRampEdge", "173381935"], - 9 / 321), - ], - "27414342#0": [ - (["27414342#0", "27414342#1-AddedOnRampEdge", - "27414342#1", - "119257908#1-AddedOnRampEdge", "119257908#1", - "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1 - 20 / 421), - (["27414342#0", "27414342#1-AddedOnRampEdge", - "27414342#1", - "119257908#1-AddedOnRampEdge", "119257908#1", - "119257908#1-AddedOffRampEdge", "173381935"], - 20 / 421), - ], - "27414342#1-AddedOnRampEdge": [ - (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 0.5), - (["27414342#1-AddedOnRampEdge", "27414342#1", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "173381935"], - 0.5), - ], - - # Off-ramp - "173381935": [ - (["173381935"], 1), - ], - } - - else: - rts = { - # Main highway - "ghost0": [ - (["ghost0", "119257914", "119257908#0", "119257908#1-AddedOnRampEdge", - "119257908#1", "119257908#1-AddedOffRampEdge", "119257908#2", - "119257908#3"], - 1), - ], - } - - return rts diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 162a16121..8a4684a61 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -19,7 +19,7 @@ from ray.tune.registry import register_env from flow.core.util import emission_to_csv, ensure_dir -from flow.core.rewards import vehicle_energy_consumption +from flow.core.rewards import veh_energy_consumption from flow.utils.registry import make_create_env from flow.utils.rllib import get_flow_params from flow.utils.rllib import get_rllib_config @@ -142,6 +142,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= if not sim_params.restart_instance: env.restart_simulation(sim_params=sim_params, render=sim_params.render) + # reroute on exit is a training hack, it should be turned off at test time. + if hasattr(env, "reroute_on_exit"): + env.reroute_on_exit = False + if rllib_config: # check if we have a multiagent environment but in a # backwards compatible way @@ -167,7 +171,6 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= rllib_flow_params = get_flow_params(rllib_config) agent_create_env, agent_env_name = make_create_env(params=rllib_flow_params, version=0) register_env(agent_env_name, agent_create_env) - # agent_cls = get_agent_class(config_run) if rllib_config['env_config']['run'] == "": from flow.algorithms.centralized_PPO import CCTrainer, CentralizedCriticModel @@ -179,6 +182,8 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= agent_cls = CustomPPOTrainer elif config_run: agent_cls = get_agent_class(config_run) + else: + raise Exception('You forgot to store the algorithm type') # create the agent that will be used to compute the actions agent = agent_cls(env=agent_env_name, config=rllib_config) @@ -303,7 +308,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= per_vehicle_energy_trace[veh_id].append(0) completed_veh_types[veh_id] = env.k.vehicle.get_type(veh_id) else: - per_vehicle_energy_trace[veh_id].append(-1 * vehicle_energy_consumption(env, veh_id)) + per_vehicle_energy_trace[veh_id].append(-1 * veh_energy_consumption(env, veh_id)) if type(done) is dict and done['__all__']: break diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index ec5c3d7da..7182e8acf 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -1,16 +1,12 @@ """Generate a time space diagram for some networks. - This method accepts as input a csv file containing the sumo-formatted emission file, and then uses this data to generate a time-space diagram, with the x-axis being the time (in seconds), the y-axis being the position of a vehicle, and color representing the speed of te vehicles. - If the number of simulation steps is too dense, you can plot every nth step in the plot by setting the input `--steps=n`. - Note: This script assumes that the provided network has only one lane on the each edge, or one lane on the main highway in the case of MergeNetwork. - Usage ----- :: @@ -46,21 +42,18 @@ def import_data_from_trajectory(fp, params=dict()): r"""Import and preprocess data from the Flow trajectory (.csv) file. - Parameters ---------- fp : str file path (for the .csv formatted file) params : dict flow-specific parameters, including: - * "network" (str): name of the network that was used when generating the emission file. Must be one of the network names mentioned in ACCEPTABLE_NETWORKS, * "net_params" (flow.core.params.NetParams): network-specific parameters. This is used to collect the lengths of various network links. - Returns ------- pd.DataFrame @@ -88,32 +81,27 @@ def import_data_from_trajectory(fp, params=dict()): def get_time_space_data(data, params): r"""Compute the unique inflows and subsequent outflow statistics. - Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data params : dict flow-specific parameters, including: - * "network" (str): name of the network that was used when generating the emission file. Must be one of the network names mentioned in ACCEPTABLE_NETWORKS, * "net_params" (flow.core.params.NetParams): network-specific parameters. This is used to collect the lengths of various network links. - Returns ------- - ndarray (or dict of ndarray) + ndarray (or dict < str, np.ndarray >) 3d array (n_segments x 2 x 2) containing segments to be plotted. every inner 2d array is comprised of two 1d arrays representing [start time, start distance] and [end time, end distance] pairs. - in the case of I210, the nested arrays are wrapped into a dict, keyed on the lane number, so that each lane can be plotted separately. - Raises ------ AssertionError @@ -123,7 +111,6 @@ def get_time_space_data(data, params): assert params['network'] in ACCEPTABLE_NETWORKS, \ 'Network must be one of: ' + ', '.join([network.__name__ for network in ACCEPTABLE_NETWORKS]) - # switcher used to compute the positions based on the type of network # switcher used to compute the positions based on the type of network switcher = { RingNetwork: _ring_road, @@ -147,12 +134,10 @@ def _merge(data): This only include vehicles on the main highway, and not on the adjacent on-ramp. - Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data - Returns ------- ndarray @@ -174,6 +159,9 @@ def _merge(data): def _highway(data): r"""Generate time and position data for the highway. + We generate plots for all lanes, so the segments are wrapped in + a dictionary. + Parameters ---------- data : pd.DataFrame @@ -198,12 +186,10 @@ def _ring_road(data): Vehicles that reach the top of the plot simply return to the bottom and continue. - Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data - Returns ------- ndarray @@ -220,15 +206,12 @@ def _ring_road(data): def _i210_subnetwork(data): r"""Generate time and position data for the i210 subnetwork. - We generate plots for all lanes, so the segments are wrapped in a dictionary. - Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data - Returns ------- dict < str, np.ndarray > @@ -257,12 +240,10 @@ def _figure_eight(data): The vehicles traveling towards the intersection from one side will be plotted from the top downward, while the vehicles from the other side will be plotted from the bottom upward. - Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data - Returns ------- ndarray @@ -279,16 +260,13 @@ def _figure_eight(data): def _get_abs_pos(df, params): """Compute the absolute positions from edges and relative positions. - This is the variable we will ultimately use to plot individual vehicles. - Parameters ---------- df : pd.DataFrame dataframe of trajectory data params : dict flow-specific parameters - Returns ------- pd.Series @@ -388,9 +366,7 @@ def _get_abs_pos(df, params): def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None): """Plot the time-space diagram. - Take the pre-processed segments and other meta-data, then plot all the line segments. - Parameters ---------- ax : matplotlib.axes.Axes @@ -403,11 +379,14 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) parsed arguments lane : int, optional lane number to be shown in plot title +<<<<<<< HEAD +======= ghost_edges : list or set of str ghost edge names to be greyed out, default None ghost_bounds : tuple lower and upper bounds of domain, excluding ghost edges, default None +>>>>>>> 06ff2d970176c51dee5a5be092b85d44e84e6d82 Returns ------- None diff --git a/flow/visualize/transfer/util.py b/flow/visualize/transfer/util.py index 107e6d026..8c933c5a3 100644 --- a/flow/visualize/transfer/util.py +++ b/flow/visualize/transfer/util.py @@ -2,8 +2,7 @@ from copy import deepcopy from flow.core.params import InFlows -from examples.exp_configs.rl.multiagent.multiagent_i210 import VEH_PER_HOUR_BASE_119257914, \ - VEH_PER_HOUR_BASE_27414345, VEH_PER_HOUR_BASE_27414342 +from examples.exp_configs.rl.multiagent.multiagent_i210 import INFLOW_RATE, ON_RAMP_INFLOW_RATE def make_inflows(pr=0.1, fr_coef=1.0, departSpeed=20, on_ramp=False): @@ -27,8 +26,8 @@ def make_inflows(pr=0.1, fr_coef=1.0, departSpeed=20, on_ramp=False): all_inflows = [] inflow_119257914 = dict(veh_type="human", - edge="119257914", - vehs_per_hour=VEH_PER_HOUR_BASE_119257914 * (1 - (pr)) * fr_coef, + edge="ghost0", + vehs_per_hour=INFLOW_RATE * (1 - (pr)) * fr_coef, # probability=1.0, departLane="random", departSpeed=departSpeed) @@ -36,8 +35,8 @@ def make_inflows(pr=0.1, fr_coef=1.0, departSpeed=20, on_ramp=False): if pr > 0.0: inflow_119257914_av = dict(veh_type="av", - edge="119257914", - vehs_per_hour=int(VEH_PER_HOUR_BASE_119257914 * pr * fr_coef), + edge="ghost0", + vehs_per_hour=int(INFLOW_RATE * pr * fr_coef), # probability=1.0, departLane="random", departSpeed=departSpeed) @@ -46,14 +45,14 @@ def make_inflows(pr=0.1, fr_coef=1.0, departSpeed=20, on_ramp=False): if on_ramp: inflow_27414345 = dict(veh_type="human", edge="27414345", - vehs_per_hour=VEH_PER_HOUR_BASE_27414345 * (1 - (pr)) * fr_coef, + vehs_per_hour=ON_RAMP_INFLOW_RATE * (1 - (pr)) * fr_coef, departLane="random", departSpeed=departSpeed) all_inflows.append(inflow_27414345) if pr > 0.0: inflow_27414342 = dict(veh_type="human", edge="27414342#0", - vehs_per_hour=VEH_PER_HOUR_BASE_27414342 * pr * fr_coef, + vehs_per_hour=ON_RAMP_INFLOW_RATE * pr * fr_coef, departLane="random", departSpeed=departSpeed) all_inflows.append(inflow_27414342) diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index 0b385f28a..b5faf6517 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -229,11 +229,11 @@ class TestHBaselineExamples(unittest.TestCase): confirming that it runs. """ @staticmethod - def run_exp(flow_params, multiagent): + def run_exp(env_name, multiagent): train_h_baselines( - flow_params=flow_params, + env_name=env_name, args=[ - flow_params["env_name"].__name__, + env_name, "--initial_exploration_steps", "1", "--total_steps", "10" ], @@ -241,10 +241,10 @@ def run_exp(flow_params, multiagent): ) def test_singleagent_ring(self): - self.run_exp(singleagent_ring.copy(), multiagent=False) + self.run_exp("singleagent_ring", multiagent=False) def test_multiagent_ring(self): - self.run_exp(multiagent_ring.copy(), multiagent=True) + self.run_exp("multiagent_ring", multiagent=True) class TestRllibExamples(unittest.TestCase): From cb74b8cbcadc33d2fe4be56fcc662943ff4542c5 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 6 Jul 2020 16:53:03 -0700 Subject: [PATCH 311/335] fix pydocstyle --- flow/visualize/time_space_diagram.py | 23 ++++++++++++++++++++--- 1 file changed, 20 insertions(+), 3 deletions(-) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index 7182e8acf..b3c0e8091 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -1,4 +1,5 @@ """Generate a time space diagram for some networks. + This method accepts as input a csv file containing the sumo-formatted emission file, and then uses this data to generate a time-space diagram, with the x-axis being the time (in seconds), the y-axis being the position of a vehicle, and @@ -42,6 +43,7 @@ def import_data_from_trajectory(fp, params=dict()): r"""Import and preprocess data from the Flow trajectory (.csv) file. + Parameters ---------- fp : str @@ -54,6 +56,7 @@ def import_data_from_trajectory(fp, params=dict()): * "net_params" (flow.core.params.NetParams): network-specific parameters. This is used to collect the lengths of various network links. + Returns ------- pd.DataFrame @@ -81,6 +84,7 @@ def import_data_from_trajectory(fp, params=dict()): def get_time_space_data(data, params): r"""Compute the unique inflows and subsequent outflow statistics. + Parameters ---------- data : pd.DataFrame @@ -93,6 +97,7 @@ def get_time_space_data(data, params): * "net_params" (flow.core.params.NetParams): network-specific parameters. This is used to collect the lengths of various network links. + Returns ------- ndarray (or dict < str, np.ndarray >) @@ -102,6 +107,7 @@ def get_time_space_data(data, params): in the case of I210, the nested arrays are wrapped into a dict, keyed on the lane number, so that each lane can be plotted separately. + Raises ------ AssertionError @@ -134,10 +140,12 @@ def _merge(data): This only include vehicles on the main highway, and not on the adjacent on-ramp. + Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data + Returns ------- ndarray @@ -186,10 +194,12 @@ def _ring_road(data): Vehicles that reach the top of the plot simply return to the bottom and continue. + Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data + Returns ------- ndarray @@ -206,12 +216,15 @@ def _ring_road(data): def _i210_subnetwork(data): r"""Generate time and position data for the i210 subnetwork. + We generate plots for all lanes, so the segments are wrapped in a dictionary. + Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data + Returns ------- dict < str, np.ndarray > @@ -240,10 +253,12 @@ def _figure_eight(data): The vehicles traveling towards the intersection from one side will be plotted from the top downward, while the vehicles from the other side will be plotted from the bottom upward. + Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data + Returns ------- ndarray @@ -260,13 +275,16 @@ def _figure_eight(data): def _get_abs_pos(df, params): """Compute the absolute positions from edges and relative positions. + This is the variable we will ultimately use to plot individual vehicles. + Parameters ---------- df : pd.DataFrame dataframe of trajectory data params : dict flow-specific parameters + Returns ------- pd.Series @@ -366,7 +384,9 @@ def _get_abs_pos(df, params): def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None): """Plot the time-space diagram. + Take the pre-processed segments and other meta-data, then plot all the line segments. + Parameters ---------- ax : matplotlib.axes.Axes @@ -379,14 +399,11 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) parsed arguments lane : int, optional lane number to be shown in plot title -<<<<<<< HEAD -======= ghost_edges : list or set of str ghost edge names to be greyed out, default None ghost_bounds : tuple lower and upper bounds of domain, excluding ghost edges, default None ->>>>>>> 06ff2d970176c51dee5a5be092b85d44e84e6d82 Returns ------- None From eb2416b941bc62110ed4b1a99c80063eb726bdbe Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 6 Jul 2020 16:54:24 -0700 Subject: [PATCH 312/335] add docstring --- flow/data_pipeline/data_pipeline.py | 1 + 1 file changed, 1 insertion(+) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 74070cc7a..858640914 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -143,6 +143,7 @@ def delete_obsolete_data(s3, latest_key, table, bucket="circles.data.pipeline"): def update_baseline(s3, baseline_network, baseline_source_id): + """Update the baseline table on S3 if new baseline run is added.""" obj = s3.get_object(Bucket='circles.data.pipeline', Key='baseline_table/baselines.csv')['Body'] original_str = obj.read().decode() reader = csv.DictReader(StringIO(original_str)) From 6ed00e374b6495f5ece0bac1e6bcd2e10be1f4f9 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 6 Jul 2020 16:55:18 -0700 Subject: [PATCH 313/335] remove excess whitespace --- flow/visualize/time_space_diagram.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index b3c0e8091..b1500b48d 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -386,7 +386,7 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) """Plot the time-space diagram. Take the pre-processed segments and other meta-data, then plot all the line segments. - + Parameters ---------- ax : matplotlib.axes.Axes From b80e5634be495f85e3e245e61ad2a9681694f8ea Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Mon, 6 Jul 2020 18:01:11 -0700 Subject: [PATCH 314/335] only call get_configuration() if to_aws --- flow/core/experiment.py | 7 ++++--- flow/visualize/i210_replay.py | 7 ++++--- flow/visualize/visualizer_rllib.py | 7 ++++--- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 1274280ba..464b0a405 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -163,9 +163,10 @@ def rl_actions(*_): metadata['submission_time'].append(cur_time) metadata['network'].append(network_name_translate(self.env.network.name.split('_20')[0])) metadata['is_baseline'].append(str(is_baseline)) - name, strategy = get_configuration() - metadata['submitter_name'].append(name) - metadata['strategy'].append(strategy) + if to_aws: + name, strategy = get_configuration() + metadata['submitter_name'].append(name) + metadata['strategy'].append(strategy) if convert_to_csv and self.env.simulator == "traci": dir_path = self.env.sim_params.emission_path diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index 8a4684a61..c9e820b15 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -241,9 +241,10 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= metadata['submission_time'].append(cur_time) metadata['network'].append(network_name_translate(env.network.name.split('_20')[0])) metadata['is_baseline'].append(str(args.is_baseline)) - name, strategy = get_configuration() - metadata['submitter_name'].append(name) - metadata['strategy'].append(strategy) + if args.to_aws: + name, strategy = get_configuration() + metadata['submitter_name'].append(name) + metadata['strategy'].append(strategy) i = 0 while i < args.num_rollouts: diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 059cabbbd..261dcbbc3 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -225,9 +225,10 @@ def visualizer_rllib(args): metadata['submission_time'].append(cur_time) metadata['network'].append(network_name_translate(env.network.name.split('_20')[0])) metadata['is_baseline'].append(str(args.is_baseline)) - name, strategy = get_configuration() - metadata['submitter_name'].append(name) - metadata['strategy'].append(strategy) + if args.to_aws: + name, strategy = get_configuration() + metadata['submitter_name'].append(name) + metadata['strategy'].append(strategy) # Simulate and collect metrics final_outflows = [] From 7c9a48ad9a671a5b980934c3e06c20cf5f07401e Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 8 Jul 2020 11:10:45 -0700 Subject: [PATCH 315/335] Energy class for inventorying multiple energy models (#944) * New energy class to inventory multiple energy models Co-authored-by: Joy Carpio --- flow/core/kernel/vehicle/base.py | 16 ++++ flow/core/kernel/vehicle/traci.py | 16 ++++ flow/core/params.py | 21 ++++- flow/core/rewards.py | 139 ++++------------------------ flow/energy_models/base_energy.py | 59 ++++++++++++ flow/energy_models/power_demand.py | 127 +++++++++++++++++++++++++ flow/energy_models/toyota_energy.py | 58 ++++++++++++ tests/fast_tests/test_examples.py | 4 +- 8 files changed, 318 insertions(+), 122 deletions(-) create mode 100644 flow/energy_models/base_energy.py create mode 100644 flow/energy_models/power_demand.py create mode 100644 flow/energy_models/toyota_energy.py diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index fc1818e58..843ec7eb6 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -363,6 +363,22 @@ def get_fuel_consumption(self, veh_id, error=-1001): """ pass + @abstractmethod + def get_energy_model(self, veh_id, error=""): + """Return the energy model class object of the specified vehicle. + + Parameters + ---------- + veh_id : str or list of str + vehicle id, or list of vehicle ids + error : str + value that is returned if the vehicle is not found + Returns + ------- + subclass of BaseEnergyModel + """ + pass + @abstractmethod def get_speed(self, veh_id, error=-1001): """Return the speed of the specified vehicle. diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index 0fa1e6e17..ef401d180 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -292,6 +292,12 @@ def _add_departed(self, veh_id, veh_type): # specify the type self.__vehicles[veh_id]["type"] = veh_type + # specify energy model + energy_model = \ + self.type_parameters[veh_type]["energy_model"] + self.__vehicles[veh_id]["energy_model"] = \ + energy_model[0](veh_id, **energy_model[1]) + car_following_params = \ self.type_parameters[veh_type]["car_following_params"] @@ -549,6 +555,16 @@ def get_fuel_consumption(self, veh_id, error=-1001): return [self.get_fuel_consumption(vehID, error) for vehID in veh_id] return self.__sumo_obs.get(veh_id, {}).get(tc.VAR_FUELCONSUMPTION, error) * ml_to_gallons + def get_energy_model(self, veh_id, error=""): + """See parent class.""" + if isinstance(veh_id, (list, np.ndarray)): + return [self.get_energy_model(vehID) for vehID in veh_id] + try: + return self.__vehicles.get(veh_id, {'energy_model': error})['energy_model'] + except KeyError: + print("Energy model not specified for vehicle {}".format(veh_id)) + raise + def get_previous_speed(self, veh_id, error=-1001): """See parent class.""" if isinstance(veh_id, (list, np.ndarray)): diff --git a/flow/core/params.py b/flow/core/params.py index 94970d614..c6feb5086 100755 --- a/flow/core/params.py +++ b/flow/core/params.py @@ -7,6 +7,10 @@ from flow.controllers.car_following_models import SimCarFollowingController from flow.controllers.rlcontroller import RLController from flow.controllers.lane_change_controllers import SimLaneChangeController +from flow.energy_models.toyota_energy import PriusEnergy +from flow.energy_models.toyota_energy import TacomaEnergy +from flow.energy_models.power_demand import PDMCombustionEngine +from flow.energy_models.power_demand import PDMElectric SPEED_MODES = { @@ -39,6 +43,9 @@ "only_right_drive_safe": 576 } +ENERGY_MODELS = set([PriusEnergy, TacomaEnergy, PDMCombustionEngine, PDMElectric]) +DEFAULT_ENERGY_MODEL = PDMCombustionEngine + # Traffic light defaults PROGRAM_ID = 1 MAX_GAP = 3.0 @@ -262,6 +269,7 @@ def add(self, num_vehicles=0, car_following_params=None, lane_change_params=None, + energy_model=DEFAULT_ENERGY_MODEL, color=None): """Add a sequence of vehicles to the list of vehicles in the network. @@ -298,6 +306,12 @@ def add(self, # FIXME: depends on simulator lane_change_params = SumoLaneChangeParams() + if energy_model not in ENERGY_MODELS: + print('{} for vehicle {} is not a valid energy model. Defaulting to {}\n'.format(energy_model, + veh_id, + DEFAULT_ENERGY_MODEL)) + energy_model = DEFAULT_ENERGY_MODEL + type_params = {} type_params.update(car_following_params.controller_params) type_params.update(lane_change_params.controller_params) @@ -311,7 +325,8 @@ def add(self, "routing_controller": routing_controller, "initial_speed": initial_speed, "car_following_params": car_following_params, - "lane_change_params": lane_change_params} + "lane_change_params": lane_change_params, + "energy_model": energy_model} if color: type_params['color'] = color @@ -334,7 +349,9 @@ def add(self, "car_following_params": car_following_params, "lane_change_params": - lane_change_params + lane_change_params, + "energy_model": + energy_model }) # This is used to return the actual headways from the vehicles class. diff --git a/flow/core/rewards.py b/flow/core/rewards.py index 6462b0a8c..b4af4c5bc 100755 --- a/flow/core/rewards.py +++ b/flow/core/rewards.py @@ -306,139 +306,40 @@ def punish_rl_lane_changes(env, penalty=1): return total_lane_change_penalty -def energy_consumption(env, gain=.001): - """Calculate power consumption of a vehicle. - - Assumes vehicle is an average sized vehicle. - The power calculated here is the lower bound of the actual power consumed - by a vehicle. - """ - power = 0 - - M = 1200 # mass of average sized vehicle (kg) - g = 9.81 # gravitational acceleration (m/s^2) - Cr = 0.005 # rolling resistance coefficient - Ca = 0.3 # aerodynamic drag coefficient - rho = 1.225 # air density (kg/m^3) - A = 2.6 # vehicle cross sectional area (m^2) - for veh_id in env.k.vehicle.get_ids(): - if veh_id not in env.k.vehicle.previous_speeds.keys(): - continue - speed = env.k.vehicle.get_speed(veh_id) - prev_speed = env.k.vehicle.get_previous_speed(veh_id) - - accel = abs(speed - prev_speed) / env.sim_step - - power += M * speed * accel + M * g * Cr * speed + 0.5 * rho * A * Ca * speed ** 3 - - return -gain * power - - -def veh_energy_consumption(env, veh_id, gain=.001): - """Calculate power consumption of a vehicle. - - Assumes vehicle is an average sized vehicle. - The power calculated here is the lower bound of the actual power consumed - by a vehicle. - """ - power = 0 - - M = 1200 # mass of average sized vehicle (kg) - g = 9.81 # gravitational acceleration (m/s^2) - Cr = 0.005 # rolling resistance coefficient - Ca = 0.3 # aerodynamic drag coefficient - rho = 1.225 # air density (kg/m^3) - A = 2.6 # vehicle cross sectional area (m^2) - - if veh_id not in env.k.vehicle.previous_speeds: - return 0 - - speed = env.k.vehicle.get_speed(veh_id) - prev_speed = env.k.vehicle.get_previous_speed(veh_id) - - accel = abs(speed - prev_speed) / env.sim_step - - power += M * speed * accel + M * g * Cr * speed + 0.5 * rho * A * Ca * speed ** 3 - - return -gain * power - - -def miles_per_megajoule(env, veh_ids=None, gain=.001): - """Calculate miles per mega-joule of either a particular vehicle or the total average of all the vehicles. - - Assumes vehicle is an average sized vehicle. - The power calculated here is the lower bound of the actual power consumed - by a vehicle. +def instantaneous_mpg(env, veh_ids=None, gain=.001): + """Calculate the instantaneous mpg for every simulation step specific to the vehicle type. Parameters ---------- env : flow.envs.Env the environment variable, which contains information on the current state of the system. - veh_ids : [list] - list of veh_ids to compute the reward over + veh_ids : [list] or str + list of veh_ids or single veh_id to compute the reward over gain : float scaling factor for the reward """ - mpj = 0 - counter = 0 if veh_ids is None: veh_ids = env.k.vehicle.get_ids() elif not isinstance(veh_ids, list): veh_ids = [veh_ids] - for veh_id in veh_ids: - speed = env.k.vehicle.get_speed(veh_id) - # convert to be positive since the function called is a penalty - power = -veh_energy_consumption(env, veh_id, gain=1.0) - if power > 0 and speed >= 0.1: - counter += 1 - # meters / joule is (v * \delta t) / (power * \delta t) - mpj += speed / power - if counter > 0: - mpj /= counter - - # convert from meters per joule to miles per joule - mpj /= 1609.0 - # convert from miles per joule to miles per megajoule - mpj *= 10 ** 6 - - return mpj * gain - -def miles_per_gallon(env, veh_ids=None, gain=.001): - """Calculate mpg of either a particular vehicle or the total average of all the vehicles. - - Assumes vehicle is an average sized vehicle. - The power calculated here is the lower bound of the actual power consumed - by a vehicle. - - Parameters - ---------- - env : flow.envs.Env - the environment variable, which contains information on the current - state of the system. - veh_ids : [list] - list of veh_ids to compute the reward over - gain : float - scaling factor for the reward - """ - mpg = 0 - counter = 0 - if veh_ids is None: - veh_ids = env.k.vehicle.get_ids() - elif not isinstance(veh_ids, list): - veh_ids = [veh_ids] + cumulative_gallons = 0 + cumulative_distance = 0 for veh_id in veh_ids: - speed = env.k.vehicle.get_speed(veh_id) - gallons_per_s = env.k.vehicle.get_fuel_consumption(veh_id) - if gallons_per_s > 0 and speed >= 0.0: - counter += 1 - # meters / gallon is (v * \delta t) / (gallons_per_s * \delta t) - mpg += speed / gallons_per_s - if counter > 0: - mpg /= counter - - # convert from meters per gallon to miles per gallon - mpg /= 1609.0 + energy_model = env.k.vehicle.get_energy_model(veh_id) + if energy_model != "": + speed = env.k.vehicle.get_speed(veh_id) + accel = env.k.vehicle.get_accel_no_noise_with_failsafe(veh_id) + grade = env.k.vehicle.get_road_grade(veh_id) + gallons_per_hr = energy_model.get_instantaneous_fuel_consumption(accel, speed, grade) + if gallons_per_hr > 0 and speed >= 0.0: + cumulative_gallons += gallons_per_hr + cumulative_distance += speed + + cumulative_gallons /= 3600.0 + cumulative_distance /= 1609.0 + # miles / gallon is (distance_dot * \delta t) / (gallons_dot * \delta t) + mpg = cumulative_distance / cumulative_gallons return mpg * gain diff --git a/flow/energy_models/base_energy.py b/flow/energy_models/base_energy.py new file mode 100644 index 000000000..bf1e16e09 --- /dev/null +++ b/flow/energy_models/base_energy.py @@ -0,0 +1,59 @@ +"""Script containing the base vehicle energy class.""" +from abc import ABCMeta, abstractmethod + + +class BaseEnergyModel(metaclass=ABCMeta): + """Base energy model class. + + Calculate the instantaneous power consumption of a vehicle in + the network. It returns the power in Watts regardless of the + vehicle type: whether EV or Combustion Engine, Toyota Prius or Tacoma + or non-Toyota vehicles. Non-Toyota vehicles are set by default + to be an averaged-size vehicle. + """ + + def __init__(self, kernel): + self.k = kernel + + # 15 kilowatts = 1 gallon/hour conversion factor + self.conversion = 15e3 + + @abstractmethod + def get_instantaneous_power(self, accel, speed, grade): + """Calculate the instantaneous power consumption of a vehicle. + + Must be implemented by child classes. + + Parameters + ---------- + accel : float + Instantaneous acceleration of the vehicle + speed : float + Instantaneous speed of the vehicle + grade : float + Instantaneous road grade of the vehicle + Returns + ------- + float + """ + pass + + def get_instantaneous_fuel_consumption(self, accel, speed, grade): + """Calculate the instantaneous fuel consumption of a vehicle. + + Fuel consumption is reported in gallons per hour, with the conversion + rate of 15kW = 1 gallon/hour. + + Parameters + ---------- + accel : float + Instantaneous acceleration of the vehicle + speed : float + Instantaneous speed of the vehicle + grade : float + Instantaneous road grade of the vehicle + Returns + ------- + float + """ + return self.get_instantaneous_power(accel, speed, grade) * self.conversion diff --git a/flow/energy_models/power_demand.py b/flow/energy_models/power_demand.py new file mode 100644 index 000000000..ddf09b2fc --- /dev/null +++ b/flow/energy_models/power_demand.py @@ -0,0 +1,127 @@ +"""Script containing the vehicle power demand model energy classes.""" +import math +import numpy as np +from flow.energy_models.base_energy import BaseEnergyModel +from abc import ABCMeta, abstractmethod + + +class PowerDemandModel(BaseEnergyModel, metaclass=ABCMeta): + """Vehicle Power Demand base energy model class. + + Calculate power consumption of a vehicle based on physics + derivation. Assumes some vehicle characteristics. The + power calculated here is the lower bound of the actual + power consumed by the vehicle plus a bilinear polynomial + function used as a correction factor. + """ + + def __init__(self, + kernel, + mass=2041, + area=3.2, + rolling_res_coeff=0.0027, + aerodynamic_drag_coeff=0.4, + p1_correction=4598.7155, + p3_correction=975.12719): + self.k = kernel + self.g = 9.807 + self.rho_air = 1.225 + self.gamma = 1 + self.mass = mass + self.cross_area = area + self.rolling_res_coeff = rolling_res_coeff + self.aerodynamic_drag_coeff = aerodynamic_drag_coeff + self.p1_correction = p1_correction + self.p3_correction = p3_correction + + def calculate_power_at_the_wheels(self, accel, speed, grade): + """Calculate the instantaneous power required. + + Parameters + ---------- + accel : float + Instantaneous acceleration of the vehicle + speed : float + Instantaneous speed of the vehicle + grade : float + Instantaneous road grade of the vehicle + Returns + ------- + float + """ + accel_slope_forces = self.mass * speed * ((np.heaviside(accel, 0.5) * (1 - self.gamma) + self.gamma)) * accel + accel_slope_forces += self.g * math.sin(grade) + rolling_friction = self.mass * self.g * self.rolling_res_coeff * speed + air_drag = 0.5 * self.rho_air * self.cross_area * self.aerodynamic_drag_coeff * speed**3 + power = accel_slope_forces + rolling_friction + air_drag + return power + + @abstractmethod + def get_regen_cap(self, accel, speed, grade): + """Set the maximum power retainable from regenerative braking. + + A negative regen cap is interpretted as a positive regenerative power. + + Parameters + ---------- + accel : float + Instantaneous acceleration of the vehicle + speed : float + Instantaneous speed of the vehicle + grade : float + Instantaneous road grade of the vehicle + Returns + ------- + float + """ + pass + + def get_power_correction_factor(self, accel, speed, grade): + """Calculate the instantaneous power correction of a vehicle. + + Parameters + ---------- + accel : float + Instantaneous acceleration of the vehicle + speed : float + Instantaneous speed of the vehicle + grade : float + Instantaneous road grade of the vehicle + Returns + ------- + float + """ + return self.p1_correction * accel + self.p3_correction * accel * speed + + def get_instantaneous_power(self, accel, speed, grade): + """See parent class. + + Apply the regenerative braking cap to the modelled power demand. + """ + regen_cap = self.get_regen_cap(accel, speed, grade) + power_at_the_wheels = max(regen_cap, self.calculate_power_at_the_wheels(accel, speed, grade)) + correction_factor = max(regen_cap, self.get_power_correction_factor(accel, speed, grade)) + return power_at_the_wheels + correction_factor + + +class PDMCombustionEngine(PowerDemandModel): + """Power Demand Model for a combustion engine vehicle.""" + + def get_regen_cap(self, accel, speed, grade): + """See parent class.""" + return 0 + + +class PDMElectric(PowerDemandModel): + """Power Demand Model for an electric vehicle.""" + + def __init__(self, kernel): + super(PDMElectric, self).__init__(kernel, + mass=1663, + area=2.4, + rolling_res_coeff=0.007, + aerodynamic_drag_coeff=0.24) + + def get_regen_cap(self, accel, speed, grade): + """See parent class.""" + return -2.8 * speed diff --git a/flow/energy_models/toyota_energy.py b/flow/energy_models/toyota_energy.py new file mode 100644 index 000000000..d24b41662 --- /dev/null +++ b/flow/energy_models/toyota_energy.py @@ -0,0 +1,58 @@ +"""Script containing the Toyota energy classes.""" +import dill as pickle +import boto3 +from flow.energy_models.base_energy import BaseEnergyModel +import os +from abc import ABCMeta, abstractmethod + + +class ToyotaModel(BaseEnergyModel, metaclass=ABCMeta): + """Base Toyota Energy model class.""" + + def __init__(self, kernel, filename=None): + self.k = kernel + + # download file from s3 bucket + s3 = boto3.client('s3') + s3.download_file('toyota.restricted', filename, 'temp.pkl') + with open('temp.pkl', 'rb') as file: + self.toyota_energy = pickle.load(file) + + # delete pickle file + os.remove(file) + + @abstractmethod + def get_instantaneous_power(self, accel, speed, grade): + """See parent class.""" + pass + + +class PriusEnergy(ToyotaModel): + """Toyota Prius (EV) energy model class.""" + + def __init__(self, kernel, soc=0.9): + super(PriusEnergy, self).__init__(kernel, filename='prius_ev.pkl') + self.soc = soc + + def get_instantaneous_power(self, accel, speed, grade): + """See parent class.""" + socdot = self.toyota_energy(self.soc, accel, speed, grade) + self.soc -= socdot * self.k.env.sim_step + # FIXME (Joy): convert socdot to power + return socdot + + +class TacomaEnergy(ToyotaModel): + """Toyota Tacoma energy model class.""" + + def __init__(self, kernel): + super(TacomaEnergy, self).__init__(kernel, filename='tacoma.pkl') + + def get_instantaneous_power(self, accel, speed, grade): + """See parent class.""" + return self.get_instantaneous_fuel_consumption(accel, speed, grade) / self.conversion + + def get_instantaneous_fuel_consumption(self, accel, speed, grade): + """See parent class.""" + fc = self.toyota_energy(accel, speed, grade) + return fc diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index b5faf6517..8e871afb4 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -168,6 +168,7 @@ def test_parse_args(self): self.assertDictEqual(vars(args), { 'exp_config': 'exp_config', + 'local_mode': False, 'rl_trainer': 'rllib', 'num_cpus': 1, 'num_steps': 5000, @@ -188,6 +189,7 @@ def test_parse_args(self): self.assertDictEqual(vars(args), { 'checkpoint_path': '5', 'exp_config': 'exp_config', + 'local_mode': False, 'num_cpus': 1, 'num_steps': 3, 'rl_trainer': 'h-baselines', @@ -409,7 +411,7 @@ def run_exp(flow_params, **kwargs): alg_run, env_name, config = setup_rllib_exps(flow_params, 1, 1, **kwargs) try: - ray.init(num_cpus=1) + ray.init(num_cpus=1, local_mode=True) except Exception as e: print("ERROR", e) config['train_batch_size'] = 50 From 5b7e8b27a781d738395b81572c0f386cf3ef955a Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 8 Jul 2020 11:45:45 -0700 Subject: [PATCH 316/335] Time-Space Diagrams automatically to S3 (#993) * Add time-space diagram plotting to experiment.py --- flow/core/experiment.py | 15 ++- flow/visualize/time_space_diagram.py | 148 ++++++++++++++++++--------- 2 files changed, 111 insertions(+), 52 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index 464b0a405..a7ac07738 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -2,6 +2,7 @@ from flow.utils.registry import make_create_env from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info, get_configuration from flow.data_pipeline.leaderboard_utils import network_name_translate +from flow.visualize.time_space_diagram import tsd_main from collections import defaultdict from datetime import datetime, timezone import logging @@ -20,8 +21,8 @@ class Experiment: the actions of RL agents in the network, type the following: >>> from flow.envs import Env - >>> flow_params = dict(...) # see the examples in exp_config - >>> exp = Experiment(flow_params) # for some experiment configuration + {'network': >>> self.env.network.__class__} = dict(...) # see the examples in exp_config + {'network': >>> exp = Experiment(self.env.network.__class__}) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be @@ -39,7 +40,7 @@ class can generate csv files from emission files produced by sumo. These ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams - >>> flow_params['sim'] = SimParams(emission_path="./data") + {'network': >>> self.env.network.__class__}['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: @@ -233,6 +234,11 @@ def rl_actions(*_): write_dict_to_csv(metadata_table_path, metadata, True) if to_aws: + tsd_main(trajectory_table_path, + {'network': self.env.network.__class__}, + min_speed=0, + max_speed=10, + start=self.env.env_params.warmup_steps) upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/{1}_METADATA.csv'.format(cur_date, source_id), @@ -241,5 +247,8 @@ def rl_actions(*_): 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format(cur_date, source_id), trajectory_table_path, {'network': metadata['network'][0], 'is_baseline': metadata['is_baseline'][0]}) + upload_to_s3('circles.data.pipeline', + 'time_space_diagram/date={0}/partition_name={1}/{1}.png'.format(cur_date, source_id), + trajectory_table_path.replace('csv', 'png')) return info_dict diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index b1500b48d..a9392e21d 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -382,7 +382,7 @@ def _get_abs_pos(df, params): return ret -def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None): +def plot_tsd(ax, df, segs, cmap, min_speed=0, max_speed=10, start=0, lane=None, ghost_edges=None, ghost_bounds=None): """Plot the time-space diagram. Take the pre-processed segments and other meta-data, then plot all the line segments. @@ -395,8 +395,12 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) data used for axes bounds and speed coloring segs : list of list of lists line segments to be plotted, where each segment is a list of two [x,y] pairs - args : dict - parsed arguments + min_speed : int or float + minimum speed in colorbar + max_speed : int or float + maximum speed in colorbar + start : int or float + starting time_step not greyed out lane : int, optional lane number to be shown in plot title ghost_edges : list or set of str @@ -408,7 +412,7 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) ------- None """ - norm = plt.Normalize(args.min_speed, args.max_speed) + norm = plt.Normalize(min_speed, max_speed) xmin, xmax = df['time_step'].min(), df['time_step'].max() xbuffer = (xmax - xmin) * 0.025 # 2.5% of range @@ -418,7 +422,7 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) ax.set_xlim(xmin - xbuffer, xmax + xbuffer) ax.set_ylim(ymin - ybuffer, ymax + ybuffer) - lc = LineCollection(segs, cmap=my_cmap, norm=norm) + lc = LineCollection(segs, cmap=cmap, norm=norm) lc.set_array(df['speed'].values) lc.set_linewidth(1) ax.add_collection(lc) @@ -428,15 +432,15 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) if ghost_edges: y_domain_min = df[~df['edge_id'].isin(ghost_edges)]['distance'].min() y_domain_max = df[~df['edge_id'].isin(ghost_edges)]['distance'].max() - rects.append(Rectangle((xmin, y_domain_min), args.start - xmin, y_domain_max - y_domain_min)) + rects.append(Rectangle((xmin, y_domain_min), start - xmin, y_domain_max - y_domain_min)) rects.append(Rectangle((xmin, ymin), xmax - xmin, y_domain_min - ymin)) rects.append(Rectangle((xmin, y_domain_max), xmax - xmin, ymax - y_domain_max)) elif ghost_bounds: - rects.append(Rectangle((xmin, ghost_bounds[0]), args.start - xmin, ghost_bounds[1] - ghost_bounds[0])) + rects.append(Rectangle((xmin, ghost_bounds[0]), start - xmin, ghost_bounds[1] - ghost_bounds[0])) rects.append(Rectangle((xmin, ymin), xmax - xmin, ghost_bounds[0] - ymin)) rects.append(Rectangle((xmin, ghost_bounds[1]), xmax - xmin, ymax - ghost_bounds[1])) else: - rects.append(Rectangle((xmin, ymin), args.start - xmin, ymax - ymin)) + rects.append(Rectangle((xmin, ymin), start - xmin, ymax - ymin)) if rects: pc = PatchCollection(rects, facecolor='grey', alpha=0.5, edgecolor=None) @@ -457,41 +461,28 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) cbar.ax.tick_params(labelsize=18) -if __name__ == '__main__': - # create the parser - parser = argparse.ArgumentParser( - formatter_class=argparse.RawDescriptionHelpFormatter, - description='[Flow] Generates time space diagrams for flow networks.', - epilog='python time_space_diagram.py .csv ' - '.json') - - # required arguments - parser.add_argument('trajectory_path', type=str, - help='path to the Flow trajectory csv file.') - parser.add_argument('flow_params', type=str, - help='path to the flow_params json file.') - - # optional arguments - parser.add_argument('--steps', type=int, default=1, - help='rate at which steps are plotted.') - parser.add_argument('--title', type=str, default='Time Space Diagram', - help='rate at which steps are plotted.') - parser.add_argument('--max_speed', type=int, default=8, - help='The maximum speed in the color range.') - parser.add_argument('--min_speed', type=int, default=0, - help='The minimum speed in the color range.') - parser.add_argument('--start', type=float, default=0, - help='initial time (in sec) in the plot.') - - args = parser.parse_args() - - # flow_params is imported as a dictionary - if '.json' in args.flow_params: - flow_params = get_flow_params(args.flow_params) - else: - module = __import__("examples.exp_configs.non_rl", fromlist=[args.flow_params]) - flow_params = getattr(module, args.flow_params).flow_params +def tsd_main(trajectory_path, flow_params, min_speed=0, max_speed=10, start=0): + """Prepare and plot the time-space diagram. + Parameters + ---------- + trajectory_path : str + file path (for the .csv formatted file) + flow_params : dict + flow-specific parameters, including: + * "network" (str): name of the network that was used when generating + the emission file. Must be one of the network names mentioned in + ACCEPTABLE_NETWORKS, + * "net_params" (flow.core.params.NetParams): network-specific + parameters. This is used to collect the lengths of various network + links. + min_speed : int or float + minimum speed in colorbar + max_speed : int or float + maximum speed in colorbar + start : int or float + starting time_step not greyed out + """ # some plotting parameters cdict = { 'red': ((0, 0, 0), (0.2, 1, 1), (0.6, 1, 1), (1, 0, 0)), @@ -501,29 +492,50 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) my_cmap = colors.LinearSegmentedColormap('my_colormap', cdict, 1024) # Read trajectory csv into pandas dataframe - traj_df = import_data_from_trajectory(args.trajectory_path, flow_params) + traj_df = import_data_from_trajectory(trajectory_path, flow_params) # Convert df data into segments for plotting segs, traj_df = get_time_space_data(traj_df, flow_params) if flow_params['network'] == I210SubNetwork: nlanes = traj_df['lane_id'].nunique() - fig = plt.figure(figsize=(16, 9*nlanes)) + plt.figure(figsize=(16, 9*nlanes)) for lane, df in traj_df.groupby('lane_id'): ax = plt.subplot(nlanes, 1, lane+1) - plot_tsd(ax, df, segs[lane], args, int(lane+1), ghost_edges={'ghost0', '119257908#3'}) + plot_tsd(ax=ax, + df=df, + segs=segs[lane], + cmap=my_cmap, + min_speed=min_speed, + max_speed=max_speed, + start=start, + lane=int(lane+1), + ghost_edges={'ghost0', '119257908#3'}) plt.tight_layout() else: # perform plotting operation - fig = plt.figure(figsize=(16, 9)) + plt.figure(figsize=(16, 9)) ax = plt.axes() if flow_params['network'] == HighwayNetwork: - plot_tsd(ax, traj_df, segs, args, ghost_bounds=(500, 2300)) + plot_tsd(ax=ax, + df=traj_df, + segs=segs, + cmap=my_cmap, + min_speed=min_speed, + max_speed=max_speed, + start=start, + ghost_bounds=(500, 2300)) else: - plot_tsd(ax, traj_df, segs, args) + plot_tsd(ax=ax, + df=traj_df, + segs=segs, + cmap=my_cmap, + min_speed=min_speed, + max_speed=max_speed, + start=start) ########################################################################### # Note: For MergeNetwork only # @@ -534,5 +546,43 @@ def plot_tsd(ax, df, segs, args, lane=None, ghost_edges=None, ghost_bounds=None) [-0.1, -0.1], linewidth=3, color="white") # ########################################################################### - outfile = args.trajectory_path.replace('csv', 'png') + outfile = trajectory_path.replace('csv', 'png') plt.savefig(outfile) + + +if __name__ == '__main__': + # create the parser + parser = argparse.ArgumentParser( + formatter_class=argparse.RawDescriptionHelpFormatter, + description='[Flow] Generates time space diagrams for flow networks.', + epilog='python time_space_diagram.py .csv ' + '.json') + + # required arguments + parser.add_argument('trajectory_path', type=str, + help='path to the Flow trajectory csv file.') + parser.add_argument('flow_params', type=str, + help='path to the flow_params json file.') + + # optional arguments + parser.add_argument('--steps', type=int, default=1, + help='rate at which steps are plotted.') + parser.add_argument('--title', type=str, default='Time Space Diagram', + help='rate at which steps are plotted.') + parser.add_argument('--max_speed', type=int, default=8, + help='The maximum speed in the color range.') + parser.add_argument('--min_speed', type=int, default=0, + help='The minimum speed in the color range.') + parser.add_argument('--start', type=float, default=0, + help='initial time (in sec) in the plot.') + + args = parser.parse_args() + + # flow_params is imported as a dictionary + if '.json' in args.flow_params: + flow_params = get_flow_params(args.flow_params) + else: + module = __import__("examples.exp_configs.non_rl", fromlist=[args.flow_params]) + flow_params = getattr(module, args.flow_params).flow_params + + tsd_main(args.trajectory_path, flow_params, min_speed=args.min_speed, max_speed=args.max_speed, start=args.start) From c4ba7adbfe113fe0ce477f937e6e38dd181316e8 Mon Sep 17 00:00:00 2001 From: Brent Zhao Date: Wed, 8 Jul 2020 12:08:06 -0700 Subject: [PATCH 317/335] Query Prereq Check (#987) * prereq dict added to query * prereq checking mechanism implemented, not tested yet * prereq checking tested * change to more flexible filter handling * make safety_rate and safety_max_value floats * ignore nulls in fact_top_scores * fix typo * remove unneeded import * replace uneccessary use of list to set * add queries to pre-bin histogram data * fix the serialization issue with set, convert to list before write as json * fix query * fix query * fixed query bug Co-authored-by: liljonnystyle --- flow/data_pipeline/data_pipeline.py | 40 ++++- flow/data_pipeline/lambda_function.py | 79 +++++----- flow/data_pipeline/query.py | 215 +++++++++++++++++--------- 3 files changed, 221 insertions(+), 113 deletions(-) diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 858640914..626c59e39 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -1,11 +1,13 @@ """contains class and helper functions for the data pipeline.""" import pandas as pd import boto3 -from flow.data_pipeline.query import QueryStrings +from botocore.exceptions import ClientError +from flow.data_pipeline.query import QueryStrings, prerequisites from time import time from datetime import date import csv from io import StringIO +import json def generate_trajectory_table(data_path, extra_info, partition_name): @@ -158,6 +160,42 @@ def update_baseline(s3, baseline_network, baseline_source_id): Body=new_str.getvalue().replace('\r', '').encode()) +def get_completed_queries(s3, source_id): + """Return the deserialized list of completed queries from S3.""" + try: + completed_queries_obj = \ + s3.get_object(Bucket='circles.data.pipeline', Key='lambda_temp/{}'.format(source_id))['Body'] + completed_queries = json.loads(completed_queries_obj.read().decode('utf-8')) + except ClientError as e: + if e.response['Error']['Code'] == 'NoSuchKey': + completed_queries = set() + else: + raise + return set(completed_queries) + + +def put_completed_queries(s3, completed_queries): + """Put all the completed queries lists into S3 as in a serialized json format.""" + for source_id, completed_queries_set in completed_queries.items(): + completed_queries_list = list(completed_queries_set) + completed_queries_json = json.dumps(completed_queries_list) + s3.put_object(Bucket='circles.data.pipeline', Key='lambda_temp/{}'.format(source_id), + Body=completed_queries_json.encode('utf-8')) + + +def get_ready_queries(completed_queries, new_query): + """Return queries whose prerequisite queries are completed.""" + readied_queries = [] + unfinished_queries = prerequisites.keys() - completed_queries + upadted_completed_queries = completed_queries.copy() + upadted_completed_queries.add(new_query) + for query_name in unfinished_queries: + if not prerequisites[query_name][1].issubset(completed_queries): + if prerequisites[query_name][1].issubset(upadted_completed_queries): + readied_queries.append((query_name, prerequisites[query_name][0])) + return readied_queries + + class AthenaQuery: """Class used to run queries. diff --git a/flow/data_pipeline/lambda_function.py b/flow/data_pipeline/lambda_function.py index 97f625eab..1d813f98b 100644 --- a/flow/data_pipeline/lambda_function.py +++ b/flow/data_pipeline/lambda_function.py @@ -1,9 +1,9 @@ """lambda function on AWS Lambda.""" import boto3 from urllib.parse import unquote_plus -from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data, update_baseline -from flow.data_pipeline.query import tags, tables, network_using_edge, summary_tables -from flow.data_pipeline.query import X_FILTER, EDGE_FILTER, WARMUP_STEPS, HORIZON_STEPS +from flow.data_pipeline.data_pipeline import AthenaQuery, delete_obsolete_data, update_baseline, \ + get_ready_queries, get_completed_queries, put_completed_queries +from flow.data_pipeline.query import tables, network_filters, summary_tables, triggers s3 = boto3.client('s3') queryEngine = AthenaQuery() @@ -11,6 +11,8 @@ def lambda_handler(event, context): """Handle S3 put event on AWS Lambda.""" + # stores all lists of completed query for each source_id + completed = {} records = [] # do a pre-sweep to handle tasks other than initalizing a query for record in event['Records']: @@ -19,58 +21,55 @@ def lambda_handler(event, context): table = key.split('/')[0] if table not in tables: continue - # delete unwanted metadata files - if (key[-9:] == '.metadata'): - s3.delete_object(Bucket=bucket, Key=key) - continue - + s3.delete_object(Bucket=bucket, Key=(key + '.metadata')) # load the partition for newly added table query_date = key.split('/')[-3].split('=')[-1] partition = key.split('/')[-2].split('=')[-1] + source_id = "flow_{}".format(partition.split('_')[1]) + if table == "fact_vehicle_trace": + query_name = "FACT_VEHICLE_TRACE" + else: + query_name = partition.replace(source_id, "")[1:] queryEngine.repair_partition(table, query_date, partition) - # delete obsolete data if table in summary_tables: delete_obsolete_data(s3, key, table) - # add table that need to start a query to list - if table in tags.keys(): - records.append((bucket, key, table, query_date, partition)) + if query_name in triggers: + records.append((bucket, key, table, query_name, query_date, partition, source_id)) # initialize the queries - start_filter = WARMUP_STEPS - stop_filter = WARMUP_STEPS + HORIZON_STEPS - for bucket, key, table, query_date, partition in records: - source_id = "flow_{}".format(partition.split('_')[1]) + for bucket, key, table, query_name, query_date, partition, source_id in records: + # retrieve the set of completed query for this source_id if not already available + if source_id not in completed.keys(): + completed[source_id] = get_completed_queries(s3, source_id) + # if query already recorded before, skip it. This is to tolerate repetitive execution by Lambda + if query_name in completed[source_id]: + continue + # retrieve metadata and use it to determine the right loc_filter metadata_key = "fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv".format(query_date, source_id) response = s3.head_object(Bucket=bucket, Key=metadata_key) - loc_filter = X_FILTER if 'network' in response["Metadata"]: - if response["Metadata"]['network'] in network_using_edge: - loc_filter = EDGE_FILTER + network = response["Metadata"]['network'] + loc_filter = network_filters[network]['loc_filter'] + start_filter = network_filters[network]['warmup_steps'] + stop_filter = network_filters[network]['horizon_steps'] + + # update baseline if needed if table == 'fact_vehicle_trace' \ and 'is_baseline' in response['Metadata'] and response['Metadata']['is_baseline'] == 'True': - update_baseline(s3, response["Metadata"]['network'], source_id) - - query_dict = tags[table] - - # handle different energy models - if table == "fact_energy_trace": - energy_model_id = partition.replace(source_id, "")[1:] - query_dict = tags[energy_model_id] + update_baseline(s3, network, source_id) + readied_queries = get_ready_queries(completed[source_id], query_name) + completed[source_id].add(query_name) # initialize queries and store them at appropriate locations - for table_name, query_list in query_dict.items(): - for query_name in query_list: - result_location = 's3://circles.data.pipeline/{}/date={}/partition_name={}_{}'.format(table_name, - query_date, - source_id, - query_name) - queryEngine.run_query(query_name, - result_location, - query_date, - partition, - loc_filter=loc_filter, - start_filter=start_filter, - stop_filter=stop_filter) + for readied_query_name, table_name in readied_queries: + result_location = 's3://circles.data.pipeline/{}/date={}/partition_name={}_{}'.format(table_name, + query_date, + source_id, + readied_query_name) + queryEngine.run_query(readied_query_name, result_location, query_date, partition, loc_filter=loc_filter, + start_filter=start_filter, stop_filter=stop_filter) + # stores all the updated lists of completed queries back to S3 + put_completed_queries(s3, completed) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 302048632..adc472176 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -1,78 +1,90 @@ """stores all the pre-defined query strings.""" +from collections import defaultdict from enum import Enum # tags for different queries -tags = { - "fact_vehicle_trace": { - "fact_energy_trace": [ - "POWER_DEMAND_MODEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL", - "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL" - ], - "fact_safety_metrics": [ - "FACT_SAFETY_METRICS" - ], - "fact_network_throughput_agg": [ - "FACT_NETWORK_THROUGHPUT_AGG" - ], - "fact_network_inflows_outflows": [ - "FACT_NETWORK_INFLOWS_OUTFLOWS" - ], - "fact_vehicle_counts_by_time": [ - "FACT_VEHICLE_COUNTS_BY_TIME" - ] - }, - "fact_energy_trace": {}, - "fact_vehicle_counts_by_time": {}, - "fact_safety_metrics": { - "fact_safety_metrics_agg": [ - "FACT_SAFETY_METRICS_AGG" - ] - }, - "POWER_DEMAND_MODEL_DENOISED_ACCEL": { - "fact_vehicle_fuel_efficiency_agg": [ - "FACT_VEHICLE_FUEL_EFFICIENCY_AGG" - ], - "fact_network_metrics_by_distance_agg": [ - "FACT_NETWORK_METRICS_BY_DISTANCE_AGG" - ], - "fact_network_metrics_by_time_agg": [ - "FACT_NETWORK_METRICS_BY_TIME_AGG" - ] - }, - "POWER_DEMAND_MODEL": {}, - "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL": {}, - "fact_vehicle_fuel_efficiency_agg": { - "fact_network_fuel_efficiency_agg": [ - "FACT_NETWORK_FUEL_EFFICIENCY_AGG" - ] - }, - "fact_network_fuel_efficiency_agg": { - "leaderboard_chart": [ - "LEADERBOARD_CHART" - ] - }, - "leaderboard_chart": { - "leaderboard_chart_agg": [ - "LEADERBOARD_CHART_AGG" - ] - }, - "leaderboard_chart_agg": { - "fact_top_scores": [ - "FACT_TOP_SCORES" - ] - } +prerequisites = { + "POWER_DEMAND_MODEL": ( + "fact_energy_trace", {"FACT_VEHICLE_TRACE"} + ), + "POWER_DEMAND_MODEL_DENOISED_ACCEL": ( + "fact_energy_trace", {"FACT_VEHICLE_TRACE"} + ), + "POWER_DEMAND_MODEL_DENOISED_ACCEL_VEL": ( + "fact_energy_trace", {"FACT_VEHICLE_TRACE"} + ), + "FACT_SAFETY_METRICS": ( + "fact_safety_metrics", {"FACT_VEHICLE_TRACE"} + ), + "FACT_NETWORK_THROUGHPUT_AGG": ( + "fact_network_throughput_agg", {"FACT_VEHICLE_TRACE"} + ), + "FACT_NETWORK_INFLOWS_OUTFLOWS": ( + "fact_network_inflows_outflows", {"FACT_VEHICLE_TRACE"} + ), + "FACT_VEHICLE_COUNTS_BY_TIME": ( + "fact_vehicle_counts_by_time", {"FACT_VEHICLE_TRACE"} + ), + "FACT_VEHICLE_FUEL_EFFICIENCY_AGG": ( + "fact_vehicle_fuel_efficiency_agg", {"FACT_VEHICLE_TRACE", + "POWER_DEMAND_MODEL_DENOISED_ACCEL"} + ), + "FACT_NETWORK_METRICS_BY_DISTANCE_AGG": ( + "fact_network_metrics_by_distance_agg", {"FACT_VEHICLE_TRACE", + "POWER_DEMAND_MODEL_DENOISED_ACCEL"} + ), + "FACT_NETWORK_METRICS_BY_TIME_AGG": ( + "fact_network_metrics_by_time_agg", {"FACT_VEHICLE_TRACE", + "POWER_DEMAND_MODEL_DENOISED_ACCEL"} + ), + "FACT_VEHICLE_FUEL_EFFICIENCY_BINNED": ( + "fact_vehicle_fuel_efficiency_binned", {"FACT_VEHICLE_FUEL_EFFICIENCY_AGG"} + ), + "FACT_NETWORK_FUEL_EFFICIENCY_AGG": ( + "fact_network_fuel_efficiency_agg", {"FACT_VEHICLE_FUEL_EFFICIENCY_AGG"} + ), + "FACT_SAFETY_METRICS_AGG": ( + "fact_safety_metrics_agg", {"FACT_SAFETY_METRICS"} + ), + "FACT_SAFETY_METRICS_BINNED": ( + "fact_safety_metrics_binned", {"FACT_SAFETY_METRICS"} + ), + "LEADERBOARD_CHART": ( + "leaderboard_chart", {"FACT_NETWORK_THROUGHPUT_AGG", + "FACT_NETWORK_FUEL_EFFICIENCY_AGG", + "FACT_SAFETY_METRICS_AGG"} + ), + "LEADERBOARD_CHART_AGG": ( + "leaderboard_chart_agg", {"LEADERBOARD_CHART"} + ), + "FACT_TOP_SCORES": ( + "fact_top_scores", {"LEADERBOARD_CHART_AGG"} + ), } +triggers = [ + "FACT_VEHICLE_TRACE", + "POWER_DEMAND_MODEL_DENOISED_ACCEL", + "FACT_VEHICLE_FUEL_EFFICIENCY_AGG", + "FACT_SAFETY_METRICS", + "FACT_NETWORK_THROUGHPUT_AGG", + "FACT_NETWORK_FUEL_EFFICIENCY_AGG", + "FACT_SAFETY_METRICS_AGG", + "LEADERBOARD_CHART", + "LEADERBOARD_CHART_AGG" +] + tables = [ "fact_vehicle_trace", "fact_energy_trace", "fact_vehicle_counts_by_time", "fact_safety_metrics", "fact_safety_metrics_agg", + "fact_safety_metrics_binned", "fact_network_throughput_agg", "fact_network_inflows_outflows", "fact_vehicle_fuel_efficiency_agg", + "fact_vehicle_fuel_efficiency_binned", "fact_network_metrics_by_distance_agg", "fact_network_metrics_by_time_agg", "fact_network_fuel_efficiency_agg", @@ -84,15 +96,16 @@ summary_tables = ["leaderboard_chart_agg", "fact_top_scores"] -network_using_edge = ["I-210 without Ramps"] - -X_FILTER = "x BETWEEN 500 AND 2300" - -EDGE_FILTER = "edge_id <> ALL (VALUES 'ghost0', '119257908#3')" - -WARMUP_STEPS = 600 * 3 * 0.4 - -HORIZON_STEPS = 1000 * 3 * 0.4 +network_filters = defaultdict(lambda: { + 'loc_filter': "x BETWEEN 500 AND 2300", + 'warmup_steps': 500 * 3 * 0.4, + 'horizon_steps': 1000 * 3 * 0.4 + }) +network_filters['I-210 without Ramps'] = { + 'loc_filter': "edge_id <> ALL (VALUES 'ghost0', '119257908#3')", + 'warmup_steps': 600 * 3 * 0.4, + 'horizon_steps': 1000 * 3 * 0.4 + } VEHICLE_POWER_DEMAND_TACOMA_FINAL_SELECT = """ SELECT @@ -231,7 +244,7 @@ class QueryStrings(Enum): value_lower_right*(headway-headway_lower)*(rel_speed_upper-leader_rel_speed) + value_upper_left*(headway_upper-headway)*(leader_rel_speed-rel_speed_lower) + value_upper_right*(headway-headway_lower)*(leader_rel_speed-rel_speed_lower) - ) / ((headway_upper-headway_lower)*(rel_speed_upper-rel_speed_lower)), 200) AS safety_value, + ) / ((headway_upper-headway_lower)*(rel_speed_upper-rel_speed_lower)), 200.0) AS safety_value, vt.source_id FROM fact_vehicle_trace vt LEFT OUTER JOIN fact_safety_matrix sm ON 1 = 1 @@ -248,13 +261,42 @@ class QueryStrings(Enum): FACT_SAFETY_METRICS_AGG = """ SELECT source_id, - SUM(CASE WHEN safety_value < 0 THEN 1 ELSE 0 END) * 100 / COUNT() safety_rate, + SUM(CASE WHEN safety_value < 0 THEN 1.0 ELSE 0.0 END) * 100.0 / COUNT() safety_rate, MAX(safety_value) AS safety_value_max FROM fact_safety_metrics WHERE 1 = 1 AND date = \'{date}\' AND partition_name = \'{partition}_FACT_SAFETY_METRICS\' GROUP BY 1 + ; + """ + + FACT_SAFETY_METRICS_BINNED = """ + WITH unfilter_bins AS ( + SELECT + ROW_NUMBER() OVER() - 51 AS lb, + ROW_NUMBER() OVER() - 50 AS ub + FROM fact_safety_metrics + ), bins AS ( + SELECT + lb, + ub + FROM unfilter_bins + WHERE 1=1 + AND lb >= -10 + AND ub <= 10 + ) + SELECT + CONCAT('[', CAST(bins.lb AS VARCHAR), ', ', CAST(bins.ub AS VARCHAR), ')') AS safety_value_bin, + COUNT() AS count + FROM bins, fact_safety_metrics fsm + WHERE 1 = 1 + AND fsm.date = \'{date}\' + AND fsm.partition_name = \'{partition}_FACT_SAFETY_METRICS\' + AND fsm.safety_value >= bins.lb + AND fsm.safety_value < bins.ub + GROUP BY 1 + ; """ FACT_NETWORK_THROUGHPUT_AGG = """ @@ -326,6 +368,35 @@ class QueryStrings(Enum): ; """ + FACT_VEHICLE_FUEL_EFFICIENCY_BINNED = """ + WITH unfilter_bins AS ( + SELECT + ROW_NUMBER() OVER() - 1 AS lb, + ROW_NUMBER() OVER() AS ub + FROM fact_safety_metrics + ) bins AS ( + SELECT + lb, + ub + FROM unfilter_bins + WHERE 1=1 + AND lb >= 0 + AND ub <= 20 + ) + SELECT + CONCAT('[', CAST(bins.lb AS VARCHAR), ', ', CAST(bins.ub AS VARCHAR), ')') AS fuel_efficiency_bin, + COUNT() AS count + FROM bins, fact_vehicle_fuel_efficiency_agg agg + WHERE 1 = 1 + AND agg.date = \'{date}\' + AND agg.partition_name = \'{partition}_FACT_VEHICLE_FUEL_EFFICIENCY_AGG\' + AND agg.energy_model_id = 'POWER_DEMAND_MODEL_DENOISED_ACCEL' + AND 1000 * agg.efficiency_meters_per_joules >= bins.lb + AND 1000 * agg.efficiency_meters_per_joules < bins.ub + GROUP BY 1 + ; + """ + FACT_NETWORK_FUEL_EFFICIENCY_AGG = """ SELECT source_id, @@ -701,7 +772,7 @@ class QueryStrings(Enum): SELECT network, submission_date, - LAG(max_score, 1) OVER (PARTITION BY network ORDER BY submission_date ASC) AS max_score + LAG(max_score IGNORE NULLS, 1) OVER (PARTITION BY network ORDER BY submission_date ASC) AS max_score FROM curr_max ), unioned AS ( SELECT * FROM curr_max From bb1f4f5fb06382b609e3ced212b2ac62790e0e96 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 8 Jul 2020 16:39:19 -0700 Subject: [PATCH 318/335] remove extra whitespace --- flow/core/params.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/core/params.py b/flow/core/params.py index c6feb5086..6f3ec2fbc 100755 --- a/flow/core/params.py +++ b/flow/core/params.py @@ -308,8 +308,8 @@ def add(self, if energy_model not in ENERGY_MODELS: print('{} for vehicle {} is not a valid energy model. Defaulting to {}\n'.format(energy_model, - veh_id, - DEFAULT_ENERGY_MODEL)) + veh_id, + DEFAULT_ENERGY_MODEL)) energy_model = DEFAULT_ENERGY_MODEL type_params = {} From 9f1a8344c00b863915311d4cd3ae74cee960cf97 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 8 Jul 2020 16:42:33 -0700 Subject: [PATCH 319/335] whitespace linting --- flow/core/experiment.py | 8 ++++---- flow/data_pipeline/query.py | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index a7ac07738..f29c547ab 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -235,10 +235,10 @@ def rl_actions(*_): if to_aws: tsd_main(trajectory_table_path, - {'network': self.env.network.__class__}, - min_speed=0, - max_speed=10, - start=self.env.env_params.warmup_steps) + {'network': self.env.network.__class__}, + min_speed=0, + max_speed=10, + start=self.env.env_params.warmup_steps) upload_to_s3('circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/{1}_METADATA.csv'.format(cur_date, source_id), diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index adc472176..a57c6e19e 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -278,13 +278,13 @@ class QueryStrings(Enum): ROW_NUMBER() OVER() - 50 AS ub FROM fact_safety_metrics ), bins AS ( - SELECT + SELECT lb, ub FROM unfilter_bins WHERE 1=1 AND lb >= -10 - AND ub <= 10 + AND ub <= 10 ) SELECT CONCAT('[', CAST(bins.lb AS VARCHAR), ', ', CAST(bins.ub AS VARCHAR), ')') AS safety_value_bin, From 220994e38df38aa0ba1b0ae3123b0af4f0332c0d Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 8 Jul 2020 17:30:42 -0700 Subject: [PATCH 320/335] Update energy query with new power demand model (#996) * update tacoma power demand query, meters/Joules -> mpg conversion --- flow/data_pipeline/query.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index a57c6e19e..558488d8e 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -114,13 +114,12 @@ speed, acceleration, road_grade, - GREATEST(0, 2041 * speed * (( - CASE - WHEN acceleration > 0 THEN 1 - WHEN acceleration < 0 THEN 0 - ELSE 0.5 - END * (1 - {0}) + {0}) * acceleration + 9.807 * SIN(road_grade) - ) + 2041 * 9.807 * 0.0027 * speed + 0.5 * 1.225 * 3.2 * 0.4 * POW(speed,3)) AS power, + GREATEST(0, 2041 * acceleration * speed + + 3405.5481762 + + 83.12392997 * speed + + 6.7650718327 * POW(speed,2) + + 0.7041355229 * POW(speed,3) + ) + GREATEST(0, 4598.7155 * accel + 975.12719 * accel * speed) AS power, \'{1}\' AS energy_model_id, source_id FROM {2} @@ -361,7 +360,7 @@ class QueryStrings(Enum): distance_meters, power_watts * time_step_size_seconds AS energy_joules, distance_meters / (power_watts * time_step_size_seconds) AS efficiency_meters_per_joules, - 19972 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon + 33561 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon FROM sub_fact_vehicle_trace WHERE 1 = 1 AND power_watts * time_step_size_seconds != 0 @@ -404,7 +403,7 @@ class QueryStrings(Enum): SUM(distance_meters) AS distance_meters, SUM(energy_joules) AS energy_joules, SUM(distance_meters) / SUM(energy_joules) AS efficiency_meters_per_joules, - 19972 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon + 33561 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon FROM fact_vehicle_fuel_efficiency_agg WHERE 1 = 1 AND date = \'{date}\' @@ -420,7 +419,7 @@ class QueryStrings(Enum): t.source_id, e.energy_model_id, e.efficiency_meters_per_joules, - 19972 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, + 33561 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, t.throughput_per_hour, s.safety_rate, s.safety_value_max From f1ded54309240880cf03a652873aa5bbba227f04 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Wed, 8 Jul 2020 19:48:02 -0700 Subject: [PATCH 321/335] Power-Demand Model fix (#995) * fix some implementation errors in energy models * pull i210_dev and fix flake8 --- flow/energy_models/base_energy.py | 4 +- flow/energy_models/power_demand.py | 63 +++++++++++++++++++++++------ flow/energy_models/toyota_energy.py | 15 ++++--- 3 files changed, 59 insertions(+), 23 deletions(-) diff --git a/flow/energy_models/base_energy.py b/flow/energy_models/base_energy.py index bf1e16e09..a16c84694 100644 --- a/flow/energy_models/base_energy.py +++ b/flow/energy_models/base_energy.py @@ -12,9 +12,7 @@ class BaseEnergyModel(metaclass=ABCMeta): to be an averaged-size vehicle. """ - def __init__(self, kernel): - self.k = kernel - + def __init__(self): # 15 kilowatts = 1 gallon/hour conversion factor self.conversion = 15e3 diff --git a/flow/energy_models/power_demand.py b/flow/energy_models/power_demand.py index ddf09b2fc..d8c6bc9ec 100644 --- a/flow/energy_models/power_demand.py +++ b/flow/energy_models/power_demand.py @@ -16,14 +16,12 @@ class PowerDemandModel(BaseEnergyModel, metaclass=ABCMeta): """ def __init__(self, - kernel, mass=2041, area=3.2, rolling_res_coeff=0.0027, aerodynamic_drag_coeff=0.4, p1_correction=4598.7155, p3_correction=975.12719): - self.k = kernel self.g = 9.807 self.rho_air = 1.225 self.gamma = 1 @@ -31,8 +29,7 @@ def __init__(self, self.cross_area = area self.rolling_res_coeff = rolling_res_coeff self.aerodynamic_drag_coeff = aerodynamic_drag_coeff - self.p1_correction = p1_correction - self.p3_correction = p3_correction + self.power_correction_coeffs = np.array([p1_correction, p3_correction]) def calculate_power_at_the_wheels(self, accel, speed, grade): """Calculate the instantaneous power required. @@ -91,7 +88,8 @@ def get_power_correction_factor(self, accel, speed, grade): ------- float """ - return self.p1_correction * accel + self.p3_correction * accel * speed + state_variables = np.array([accel, accel * speed]) + return max(0, np.dot(self.power_correction_coeffs, state_variables)) def get_instantaneous_power(self, accel, speed, grade): """See parent class. @@ -100,27 +98,68 @@ def get_instantaneous_power(self, accel, speed, grade): """ regen_cap = self.get_regen_cap(accel, speed, grade) power_at_the_wheels = max(regen_cap, self.calculate_power_at_the_wheels(accel, speed, grade)) - correction_factor = max(regen_cap, self.get_power_correction_factor(accel, speed, grade)) + correction_factor = self.get_power_correction_factor(accel, speed, grade) return power_at_the_wheels + correction_factor class PDMCombustionEngine(PowerDemandModel): """Power Demand Model for a combustion engine vehicle.""" + def __init__(self, + idle_coeff=3405.5481762, + linear_friction_coeff=83.123929917, + quadratic_friction_coeff=6.7650718327, + drag_coeff=0.7041355229, + p1_correction=4598.7155, + p3_correction=975.12719): + super(PDMCombustionEngine, self).__init__() + self.fuel_consumption_power_coeffs = np.array([idle_coeff, + linear_friction_coeff, + quadratic_friction_coeff, + drag_coeff]) + def get_regen_cap(self, accel, speed, grade): """See parent class.""" return 0 + def calculate_fuel_consumption_power(self, accel, speed, grade): + """Calculate the instantaneous power from a fitted function to Toyota Tacoma fuel consumption. + + Parameters + ---------- + accel : float + Instantaneous acceleration of the vehicle + speed : float + Instantaneous speed of the vehicle + grade : float + Instantaneous road grade of the vehicle + Returns + ------- + float + """ + state_variables = np.array([1, speed, speed**2, speed**3]) + power_0 = np.dot(self.fuel_consumption_power_coeffs, state_variables) + return max(self.mass * accel * speed + power_0, 0) + + def get_instantaneous_power(self, accel, speed, grade): + """See parent class.""" + fuel_consumption_power = self.calculate_fuel_consumption_power(accel, speed, grade) + power_correction_factor = self.get_power_correction_factor(accel, speed, grade) + return fuel_consumption_power + power_correction_factor + class PDMElectric(PowerDemandModel): """Power Demand Model for an electric vehicle.""" - def __init__(self, kernel): - super(PDMElectric, self).__init__(kernel, - mass=1663, - area=2.4, - rolling_res_coeff=0.007, - aerodynamic_drag_coeff=0.24) + def __init__(self, + mass=1663, + area=2.4, + rolling_res_coeff=0.007, + aerodynamic_drag_coeff=0.24): + super(PDMElectric, self).__init__(mass=mass, + area=area, + rolling_res_coeff=rolling_res_coeff, + aerodynamic_drag_coeff=aerodynamic_drag_coeff) def get_regen_cap(self, accel, speed, grade): """See parent class.""" diff --git a/flow/energy_models/toyota_energy.py b/flow/energy_models/toyota_energy.py index d24b41662..64036aab6 100644 --- a/flow/energy_models/toyota_energy.py +++ b/flow/energy_models/toyota_energy.py @@ -9,9 +9,7 @@ class ToyotaModel(BaseEnergyModel, metaclass=ABCMeta): """Base Toyota Energy model class.""" - def __init__(self, kernel, filename=None): - self.k = kernel - + def __init__(self, filename): # download file from s3 bucket s3 = boto3.client('s3') s3.download_file('toyota.restricted', filename, 'temp.pkl') @@ -30,14 +28,15 @@ def get_instantaneous_power(self, accel, speed, grade): class PriusEnergy(ToyotaModel): """Toyota Prius (EV) energy model class.""" - def __init__(self, kernel, soc=0.9): - super(PriusEnergy, self).__init__(kernel, filename='prius_ev.pkl') + def __init__(self, sim_step, soc=0.9): + super(PriusEnergy, self).__init__(filename='prius_ev.pkl') + self.sim_step = sim_step self.soc = soc def get_instantaneous_power(self, accel, speed, grade): """See parent class.""" socdot = self.toyota_energy(self.soc, accel, speed, grade) - self.soc -= socdot * self.k.env.sim_step + self.soc -= socdot * self.sim_step # FIXME (Joy): convert socdot to power return socdot @@ -45,8 +44,8 @@ def get_instantaneous_power(self, accel, speed, grade): class TacomaEnergy(ToyotaModel): """Toyota Tacoma energy model class.""" - def __init__(self, kernel): - super(TacomaEnergy, self).__init__(kernel, filename='tacoma.pkl') + def __init__(self): + super(TacomaEnergy, self).__init__(filename='tacoma.pkl') def get_instantaneous_power(self, accel, speed, grade): """See parent class.""" From f63cc37262c70f1f41cd4785dff2c4c8e758a5e8 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 9 Jul 2020 14:41:51 -0700 Subject: [PATCH 322/335] convert tacoma fc to gallons per hour --- flow/energy_models/power_demand.py | 3 ++- flow/energy_models/toyota_energy.py | 7 ++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/flow/energy_models/power_demand.py b/flow/energy_models/power_demand.py index d8c6bc9ec..8cb5fd20c 100644 --- a/flow/energy_models/power_demand.py +++ b/flow/energy_models/power_demand.py @@ -1,8 +1,9 @@ """Script containing the vehicle power demand model energy classes.""" +from abc import ABCMeta, abstractmethod import math import numpy as np + from flow.energy_models.base_energy import BaseEnergyModel -from abc import ABCMeta, abstractmethod class PowerDemandModel(BaseEnergyModel, metaclass=ABCMeta): diff --git a/flow/energy_models/toyota_energy.py b/flow/energy_models/toyota_energy.py index 64036aab6..ae1ecf9f3 100644 --- a/flow/energy_models/toyota_energy.py +++ b/flow/energy_models/toyota_energy.py @@ -1,9 +1,10 @@ """Script containing the Toyota energy classes.""" +from abc import ABCMeta, abstractmethod import dill as pickle import boto3 -from flow.energy_models.base_energy import BaseEnergyModel import os -from abc import ABCMeta, abstractmethod + +from flow.energy_models.base_energy import BaseEnergyModel class ToyotaModel(BaseEnergyModel, metaclass=ABCMeta): @@ -54,4 +55,4 @@ def get_instantaneous_power(self, accel, speed, grade): def get_instantaneous_fuel_consumption(self, accel, speed, grade): """See parent class.""" fc = self.toyota_energy(accel, speed, grade) - return fc + return fc * 3600.0 / 3217.25 From c2836e86bab242b9181fab1e72b8a4c173967f75 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Thu, 9 Jul 2020 15:09:55 -0700 Subject: [PATCH 323/335] comment on road grade; exception handling on unpickling --- flow/energy_models/base_energy.py | 3 +++ flow/energy_models/toyota_energy.py | 9 ++++++++- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/flow/energy_models/base_energy.py b/flow/energy_models/base_energy.py index a16c84694..fe7f463bb 100644 --- a/flow/energy_models/base_energy.py +++ b/flow/energy_models/base_energy.py @@ -10,6 +10,9 @@ class BaseEnergyModel(metaclass=ABCMeta): vehicle type: whether EV or Combustion Engine, Toyota Prius or Tacoma or non-Toyota vehicles. Non-Toyota vehicles are set by default to be an averaged-size vehicle. + + Note: road grade is included as an input parameter, but the + functional dependence on road grade is not yet implemented. """ def __init__(self): diff --git a/flow/energy_models/toyota_energy.py b/flow/energy_models/toyota_energy.py index ae1ecf9f3..492304b48 100644 --- a/flow/energy_models/toyota_energy.py +++ b/flow/energy_models/toyota_energy.py @@ -14,8 +14,15 @@ def __init__(self, filename): # download file from s3 bucket s3 = boto3.client('s3') s3.download_file('toyota.restricted', filename, 'temp.pkl') + with open('temp.pkl', 'rb') as file: - self.toyota_energy = pickle.load(file) + try: + self.toyota_energy = pickle.load(file) + except TypeError: + print('Must use Python version 3.6.8 to unpickle') + # delete pickle file + os.remove(file) + raise # delete pickle file os.remove(file) From 29eb5a02733bbace696701c2d1994e9d3fd81823 Mon Sep 17 00:00:00 2001 From: akashvelu Date: Thu, 9 Jul 2020 17:03:19 -0700 Subject: [PATCH 324/335] Add learning rate as a parameter, override import_from_h5 method using setattr --- .../imitation_learning/custom_ppo.py | 31 +++++++++++++++++++ .../imitation_learning/custom_trainable.py | 2 ++ .../imitating_controller.py | 2 +- .../imitation_learning/imitating_network.py | 5 +-- flow/controllers/imitation_learning/run.py | 3 ++ .../controllers/imitation_learning/trainer.py | 4 +-- 6 files changed, 42 insertions(+), 5 deletions(-) diff --git a/flow/controllers/imitation_learning/custom_ppo.py b/flow/controllers/imitation_learning/custom_ppo.py index 0075741d3..ed6fa032b 100644 --- a/flow/controllers/imitation_learning/custom_ppo.py +++ b/flow/controllers/imitation_learning/custom_ppo.py @@ -195,3 +195,34 @@ def get_policy_class(config): validate_config=validate_config, after_optimizer_step=update_kl, after_train_result=warn_about_bad_reward_scales) + + +from ray.rllib.policy.sample_batch import DEFAULT_POLICY_ID +def import_model(self, import_file, policy_id=DEFAULT_POLICY_ID): + """Imports a model from import_file. + + Note: Currently, only h5 files are supported. + + Args: + import_file (str): The file to import the model from. + + Returns: + A dict that maps ExportFormats to successfully exported models. + """ + # Check for existence. + if not os.path.exists(import_file): + raise FileNotFoundError( + "`import_file` '{}' does not exist! Can't import Model.". + format(import_file)) + # Get the format of the given file. + import_format = "h5" # TODO(sven): Support checkpoint loading. + + ExportFormat.validate([import_format]) + if import_format != ExportFormat.H5: + raise NotImplementedError + else: + return self.import_policy_model_from_h5(import_file, policy_id=policy_id) + +from ray.rllib.agents import Trainer +print('Overriding import model') +setattr(Trainer, 'import_model', import_model) \ No newline at end of file diff --git a/flow/controllers/imitation_learning/custom_trainable.py b/flow/controllers/imitation_learning/custom_trainable.py index b41728f11..66785d905 100644 --- a/flow/controllers/imitation_learning/custom_trainable.py +++ b/flow/controllers/imitation_learning/custom_trainable.py @@ -17,7 +17,9 @@ def _setup(self, config): env_name = config['env'] self.trainer = custom_ppo.CustomPPOTrainer(env=env_name, config=config) + # kind of hacky, but don't know a better solution to the default policy not existing policy_id = list(self.trainer.get_weights().keys())[0] + print("test: ", list(self.trainer.get_weights().keys())) self.trainer.import_model(config['model']['custom_options']['h5_load_path'], policy_id=policy_id) def _train(self): diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/controllers/imitation_learning/imitating_controller.py index 64622ef73..4fdd4ebd7 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/controllers/imitation_learning/imitating_controller.py @@ -62,7 +62,7 @@ def get_accel(self, env): if not (self.veh_id in rl_ids): # vehicle in non-control edge, so return None to default control to Sumo - return None + return None # return the action taken by the vehicle ind = rl_ids.index(self.veh_id) diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/controllers/imitation_learning/imitating_network.py index 81642883a..a95222855 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/controllers/imitation_learning/imitating_network.py @@ -9,7 +9,7 @@ class ImitatingNetwork(): Class containing neural network which learns to imitate a given expert controller. """ - def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, stochastic=False, variance_regularizer = 0, load_model=False, load_path='', tensorboard_path=''): + def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, learning_rate, stochastic=False, variance_regularizer = 0, load_model=False, load_path='', tensorboard_path=''): """Initializes and constructs neural network. Parameters @@ -41,6 +41,7 @@ def __init__(self, sess, action_dim, obs_dim, fcnet_hiddens, replay_buffer_size, self.fcnet_hiddens = fcnet_hiddens self.stochastic=stochastic self.variance_regularizer = variance_regularizer + self.learning_rate = learning_rate self.train_steps = 0 self.action_steps = 0 @@ -72,7 +73,7 @@ def compile_network(self): Compiles Keras network with appropriate loss and optimizer """ loss = get_loss(self.stochastic, self.variance_regularizer) - self.model.compile(loss=loss, optimizer='adam') + self.model.compile(loss=loss, optimizer=tf.keras.optimizers.Adam(learning_rate=self.learning_rate)) def train(self, observation_batch, action_batch): diff --git a/flow/controllers/imitation_learning/run.py b/flow/controllers/imitation_learning/run.py index 6adc04199..41ceb82a6 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/controllers/imitation_learning/run.py @@ -88,6 +88,7 @@ def main(): parser.add_argument('--stochastic', type=bool, default=False, help='If true, learn a stochastic policy (MV Gaussian)') parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning loss, for stochastic policies.') parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') + parser.add_argument('--lr', type=float, default=0.001, help='Learning rate for imitation learning and value function learning') parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existin imitation neural net') parser.add_argument('--load_imitation_path', type=str, default='', help='Path to h5 file from which to load existing imitation neural net') @@ -120,5 +121,7 @@ def main(): if params['num_eval_episodes'] > 0: runner.evaluate() + print('done') + if __name__ == "__main__": main() diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/controllers/imitation_learning/trainer.py index 2c951ac5b..c027368ae 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/controllers/imitation_learning/trainer.py @@ -62,7 +62,7 @@ def __init__(self, params, submodule): self.params['obs_dim'] = obs_dim # initialize neural network class and tf variables - self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['fcnet_hiddens'], self.params['replay_buffer_size'], stochastic=self.params['stochastic'], variance_regularizer=self.params['variance_regularizer'], load_model=self.params['load_imitation_model'], load_path=self.params['load_imitation_path'], tensorboard_path=self.params['tensorboard_path']) + self.action_network = ImitatingNetwork(self.sess, self.params['action_dim'], self.params['obs_dim'], self.params['fcnet_hiddens'], self.params['replay_buffer_size'], self.params['lr'], stochastic=self.params['stochastic'], variance_regularizer=self.params['variance_regularizer'], load_model=self.params['load_imitation_model'], load_path=self.params['load_imitation_path'], tensorboard_path=self.params['tensorboard_path']) # controllers setup @@ -239,7 +239,7 @@ def learn_value_function(self, num_samples, num_iterations, num_grad_steps): print("\n\n********** Learning value function of imitation policy ************ \n") # init value function neural net vf_net = build_neural_net_deterministic(self.params['obs_dim'], 1, self.params['fcnet_hiddens']) - vf_net.compile(loss='mean_squared_error', optimizer = 'adam') + vf_net.compile(loss='mean_squared_error', optimizer = tf.keras.optimizers.Adam(learning_rate=self.params['lr'])) max_decel = self.flow_params['env'].additional_params['max_decel'] # collect trajectory samples to train on From 97333cff14f2742dd76fe503dd35c0d11e6c06a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Nathan=20Lichtl=C3=A9?= Date: Fri, 10 Jul 2020 02:43:56 +0200 Subject: [PATCH 325/335] add --multi_node flag --- examples/train.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/examples/train.py b/examples/train.py index d062fd39a..3bdd751e6 100644 --- a/examples/train.py +++ b/examples/train.py @@ -84,6 +84,9 @@ def parse_args(args): parser.add_argument( '--checkpoint_path', type=str, default=None, help='Directory with checkpoint to restore training from.') + parser.add_argument('--multi_node', action='store_true', + help='Set to true if this will be run in cluster mode.' + 'Relevant for rllib') return parser.parse_known_args(args)[0] @@ -350,7 +353,9 @@ def train_rllib(submodule, flags): def trial_str_creator(trial): return "{}_{}".format(trial.trainable_name, trial.experiment_tag) - if flags.local_mode: + if flags.multi_node: + ray.init(redis_address='localhost:6379') + elif flags.local_mode: ray.init(local_mode=True) else: ray.init() From 3ac508aaf380c11c0d2ad8ee0bad6ca920fcf905 Mon Sep 17 00:00:00 2001 From: Aboudy Kreidieh Date: Fri, 10 Jul 2020 19:43:48 -0700 Subject: [PATCH 326/335] Ak/i210 master merge (#994) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * implement HighwayNetwork for Time-Space Diagrams (#979) * fixed h-baselines bug (#982) * Replicated changes in 867. Done bug (#980) * Aimsun changes minus reset * removed crash attribute * tensorflow 1.15.2 * merge custom output and failsafes to master (#981) * add write_to_csv() function to master * include pipeline README.md * add data pipeline __init__ * add experiment.py changes * add write_to_csv() function to master * change warning print to ValueError message * update to new update_accel methods * add display_warnings boolean * add get_next_speed() function to base vehicle class * revert addition of get_next_speed * merge custom output and failsafes to master * add write_to_csv() function to master * add display_warnings boolean * add get_next_speed() function to base vehicle class * revert addition of get_next_speed * revert change to get_feasible_action call signature * change print syntax to be python3.5 compliant * add tests for new failsafe features * smooth default to True * rearrange raise exception for test coverage * moved simulation logging to the simulation kernel (#991) * add 210 edgestarts for backwards compatibility (#985) * fastforward PR 989 * fix typo * Requirements update (#963) * updated requirements.txt and environment.yml * Visualizer tests fixes * remove .func * move all miles_per_* rewards to instantaneous_mpg * update reward fns to new get_accel() method * made tests faster * some fixes to utils * change the column order, modify the pipeline to use SUMO emission file * write metadata to csv * change apply_acceleration smoothness setting * make save_csv return the file paths Co-authored-by: AboudyKreidieh Co-authored-by: liljonnystyle Co-authored-by: Kathy Jang Co-authored-by: Nathan Lichtlé Co-authored-by: akashvelu Co-authored-by: Brent Zhao --- environment.yml | 2 +- examples/exp_configs/non_rl/highway_single.py | 7 +- .../exp_configs/non_rl/i210_subnetwork.py | 7 +- .../rl/multiagent/multiagent_i210.py | 3 +- examples/train.py | 29 ++- flow/controllers/base_controller.py | 137 ++++++------- flow/controllers/car_following_models.py | 52 +++-- flow/core/experiment.py | 150 ++++++++------ flow/core/kernel/simulation/traci.py | 190 ++++++++++++++++-- flow/core/kernel/vehicle/base.py | 64 +----- flow/core/kernel/vehicle/traci.py | 77 +++---- flow/core/rewards.py | 61 +++++- flow/data_pipeline/data_pipeline.py | 47 ++--- flow/energy_models/base_energy.py | 2 + flow/energy_models/power_demand.py | 6 +- flow/energy_models/toyota_energy.py | 2 + flow/envs/base.py | 31 ++- flow/envs/multiagent/base.py | 14 +- flow/envs/multiagent/i210.py | 31 +-- flow/utils/rllib.py | 5 +- flow/visualize/i210_replay.py | 11 +- flow/visualize/time_space_diagram.py | 35 +++- flow/visualize/visualizer_rllib.py | 8 +- requirements.txt | 2 +- .../multi_agent/checkpoint_1/checkpoint-1 | Bin 10209 -> 20358 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 180 -> 210 bytes tests/data/rllib_data/multi_agent/params.json | 40 ++-- tests/data/rllib_data/multi_agent/params.pkl | Bin 17562 -> 21381 bytes .../single_agent/checkpoint_1/checkpoint-1 | Bin 582 -> 26194 bytes .../checkpoint_1/checkpoint-1.tune_metadata | Bin 180 -> 210 bytes .../data/rllib_data/single_agent/params.json | 28 ++- tests/data/rllib_data/single_agent/params.pkl | Bin 6414 -> 6687 bytes tests/fast_tests/test_controllers.py | 169 ++++++++++++++++ tests/fast_tests/test_examples.py | 48 ++++- .../fast_tests/test_experiment_base_class.py | 36 +++- tests/fast_tests/test_rewards.py | 26 --- 36 files changed, 860 insertions(+), 460 deletions(-) diff --git a/environment.yml b/environment.yml index 162bed533..ecbe5785f 100644 --- a/environment.yml +++ b/environment.yml @@ -9,7 +9,7 @@ dependencies: - path.py - python-dateutil==2.7.3 - pip>=18.0 - - tensorflow==1.14.0 + - tensorflow==1.15.2 - setuptools==41.0.0 - plotly==2.4.0 - gym==0.14.0 diff --git a/examples/exp_configs/non_rl/highway_single.py b/examples/exp_configs/non_rl/highway_single.py index 8ec189140..ff486b3f5 100644 --- a/examples/exp_configs/non_rl/highway_single.py +++ b/examples/exp_configs/non_rl/highway_single.py @@ -11,7 +11,7 @@ from flow.core.params import VehicleParams from flow.core.params import SumoParams from flow.core.params import SumoLaneChangeParams -from flow.core.rewards import miles_per_gallon, miles_per_megajoule +from flow.core.rewards import instantaneous_mpg from flow.core.params import SumoCarFollowingParams from flow.networks import HighwayNetwork from flow.envs import TestEnv @@ -147,10 +147,7 @@ env.k.vehicle.get_speed(env.k.vehicle.get_ids()))), "avg_outflow": lambda env: np.nan_to_num( env.k.vehicle.get_outflow_rate(120)), - "miles_per_megajoule": lambda env: np.nan_to_num( - miles_per_megajoule(env, env.k.vehicle.get_ids(), gain=1.0) - ), "miles_per_gallon": lambda env: np.nan_to_num( - miles_per_gallon(env, env.k.vehicle.get_ids(), gain=1.0) + instantaneous_mpg(env, env.k.vehicle.get_ids(), gain=1.0) ) } diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 73e49caef..9e415fc65 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -12,8 +12,7 @@ from flow.core.params import VehicleParams from flow.core.params import InitialConfig from flow.core.params import InFlows -from flow.core.rewards import miles_per_gallon -from flow.core.rewards import miles_per_megajoule +from flow.core.rewards import instantaneous_mpg from flow.networks import I210SubNetwork from flow.networks.i210_subnetwork import EDGES_DISTRIBUTION from flow.envs import TestEnv @@ -211,8 +210,6 @@ def valid_ids(env, veh_ids): env.k.vehicle.get_speed(valid_ids(env, env.k.vehicle.get_ids())))), "avg_outflow": lambda env: np.nan_to_num( env.k.vehicle.get_outflow_rate(120)), - "mpg": lambda env: miles_per_gallon( + "mpg": lambda env: instantaneous_mpg( env, valid_ids(env, env.k.vehicle.get_ids()), gain=1.0), - "mpj": lambda env: miles_per_megajoule( - env, valid_ids(env, env.k.vehicle.get_ids()), gain=1.0), } diff --git a/examples/exp_configs/rl/multiagent/multiagent_i210.py b/examples/exp_configs/rl/multiagent/multiagent_i210.py index a1c2e4f25..3a8207eb8 100644 --- a/examples/exp_configs/rl/multiagent/multiagent_i210.py +++ b/examples/exp_configs/rl/multiagent/multiagent_i210.py @@ -29,7 +29,8 @@ # Specify some configurable constants. # # =========================================================================== # -# whether to include the downstream slow-down edge in the network as well as a ghost cell at the upstream edge +# whether to include the downstream slow-down edge in the network as well as a +# ghost cell at the upstream edge WANT_BOUNDARY_CONDITIONS = True # whether to include vehicles on the on-ramp ON_RAMP = False diff --git a/examples/train.py b/examples/train.py index 3bdd751e6..f889ac9b6 100644 --- a/examples/train.py +++ b/examples/train.py @@ -17,7 +17,7 @@ import pytz from flow.core.util import ensure_dir -from flow.core.rewards import miles_per_gallon, miles_per_megajoule +from flow.core.rewards import instantaneous_mpg from flow.utils.registry import env_constructor from flow.utils.rllib import FlowParamsEncoder, get_flow_params from flow.utils.registry import make_create_env @@ -42,10 +42,6 @@ def parse_args(args): help='Name of the experiment configuration file, as located in ' 'exp_configs/rl/singleagent or exp_configs/rl/multiagent.') - parser.add_argument( - 'exp_title', type=str, - help='Name of experiment that results will be stored in') - # optional input parameters parser.add_argument( '--rl_trainer', type=str, default="rllib", @@ -76,7 +72,8 @@ def parse_args(args): parser.add_argument( '--rollout_size', type=int, default=1000, help='How many steps are in a training batch.') - parser.add_argument('--use_s3', action='store_true', help='If true, upload results to s3') + parser.add_argument('--use_s3', action='store_true', default=False, + help='If true, upload results to s3') parser.add_argument('--local_mode', action='store_true', default=False, help='If true only 1 CPU will be used') parser.add_argument('--render', action='store_true', default=False, @@ -84,6 +81,9 @@ def parse_args(args): parser.add_argument( '--checkpoint_path', type=str, default=None, help='Directory with checkpoint to restore training from.') + parser.add_argument( + '--exp_title', type=str, default=None, + help='Name of experiment that results will be stored in') parser.add_argument('--multi_node', action='store_true', help='Set to true if this will be run in cluster mode.' 'Relevant for rllib') @@ -147,7 +147,7 @@ def setup_exps_rllib(flow_params, number of CPUs to run the experiment over n_rollouts : int number of rollouts per training iteration - flags: + flags : TODO custom arguments policy_graphs : dict, optional TODO @@ -243,8 +243,7 @@ def on_episode_start(info): episode.user_data["avg_speed"] = [] episode.user_data["avg_speed_avs"] = [] episode.user_data["avg_energy"] = [] - episode.user_data["avg_mpg"] = [] - episode.user_data["avg_mpj"] = [] + episode.user_data["inst_mpg"] = [] episode.user_data["num_cars"] = [] episode.user_data["avg_accel_human"] = [] episode.user_data["avg_accel_avs"] = [] @@ -275,8 +274,7 @@ def on_episode_step(info): av_speed = np.mean([speed for speed in env.k.vehicle.get_speed(rl_ids) if speed >= 0]) if not np.isnan(av_speed): episode.user_data["avg_speed_avs"].append(av_speed) - episode.user_data["avg_mpg"].append(miles_per_gallon(env, veh_ids, gain=1.0)) - episode.user_data["avg_mpj"].append(miles_per_megajoule(env, veh_ids, gain=1.0)) + episode.user_data["inst_mpg"].append(instantaneous_mpg(env, veh_ids, gain=1.0)) episode.user_data["num_cars"].append(len(env.k.vehicle.get_ids())) episode.user_data["avg_accel_human"].append(np.nan_to_num(np.mean( [np.abs((env.k.vehicle.get_speed(veh_id) - env.k.vehicle.get_previous_speed(veh_id))/env.sim_step) for @@ -295,8 +293,7 @@ def on_episode_end(info): episode.custom_metrics["avg_speed_avs"] = avg_speed_avs episode.custom_metrics["avg_accel_avs"] = np.mean(episode.user_data["avg_accel_avs"]) episode.custom_metrics["avg_energy_per_veh"] = np.mean(episode.user_data["avg_energy"]) - episode.custom_metrics["avg_mpg_per_veh"] = np.mean(episode.user_data["avg_mpg"]) - episode.custom_metrics["avg_mpj_per_veh"] = np.mean(episode.user_data["avg_mpj"]) + episode.custom_metrics["avg_mpg_per_veh"] = np.mean(episode.user_data["inst_mpg"]) episode.custom_metrics["num_cars"] = np.mean(episode.user_data["num_cars"]) def on_train_result(info): @@ -361,7 +358,7 @@ def trial_str_creator(trial): ray.init() exp_dict = { "run_or_experiment": alg_run, - "name": flags.exp_title, + "name": flags.exp_title or flow_params['exp_tag'], "config": config, "checkpoint_freq": flags.checkpoint_freq, "checkpoint_at_end": True, @@ -373,9 +370,9 @@ def trial_str_creator(trial): } date = datetime.now(tz=pytz.utc) date = date.astimezone(pytz.timezone('US/Pacific')).strftime("%m-%d-%Y") - s3_string = "s3://i210.experiments/i210/" \ - + date + '/' + flags.exp_title if flags.use_s3: + s3_string = "s3://i210.experiments/i210/" \ + + date + '/' + flags.exp_title exp_dict['upload_dir'] = s3_string tune.run(**exp_dict, queue_trials=False, raise_on_failed_trial=False) diff --git a/flow/controllers/base_controller.py b/flow/controllers/base_controller.py index 9806413e0..a657bf87c 100755 --- a/flow/controllers/base_controller.py +++ b/flow/controllers/base_controller.py @@ -38,6 +38,8 @@ class BaseController(metaclass=ABCMeta): List of failsafes which can be "instantaneous", "safe_velocity", "feasible_accel", or "obey_speed_limit". The order of applying the falsafes will be based on the order in the list. + display_warnings : bool + Flag for toggling on/off printing failsafe warnings to screen. noise : double variance of the gaussian from which to sample a noisy acceleration """ @@ -47,6 +49,7 @@ def __init__(self, car_following_params, delay=0, fail_safe=None, + display_warnings=False, noise=0): """Instantiate the base class for acceleration behavior.""" self.veh_id = veh_id @@ -59,17 +62,27 @@ def __init__(self, # longitudinal failsafe used by the vehicle if isinstance(fail_safe, str): - self.fail_safe = [fail_safe] + failsafe_list = [fail_safe] elif isinstance(fail_safe, list) or fail_safe is None: - self.fail_safe = fail_safe + failsafe_list = fail_safe else: - print( - "==========================================================\n" - "WARNING: fail_safe should be string or list of strings. \n" - "Set fal_safe to None\n" - "==========================================================\n" - ) - self.fail_safe = None + raise ValueError("fail_safe should be string or list of strings. Setting fail_safe to None\n") + + failsafe_map = { + 'instantaneous': self.get_safe_action_instantaneous, + 'safe_velocity': self.get_safe_velocity_action, + 'feasible_accel': lambda _, accel: self.get_feasible_action(accel), + 'obey_speed_limit': self.get_obey_speed_limit_action + } + self.failsafes = [] + if failsafe_list: + for check in failsafe_list: + if check in failsafe_map: + self.failsafes.append(failsafe_map.get(check)) + else: + raise ValueError('Skipping {}, as it is not a valid failsafe.'.format(check)) + + self.display_warnings = display_warnings self.max_accel = car_following_params.controller_params['accel'] # max deaccel should always be a positive @@ -103,11 +116,11 @@ def get_action(self, env): float the modified form of the acceleration """ - # clear the current stored accel_no_noise_no_failsafe of this vehicle None - env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, None) - env.k.vehicle.update_accel_no_noise_with_failsafe(self.veh_id, None) - env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, None) - env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, None) + # clear the current stored accels of this vehicle to None + env.k.vehicle.update_accel(self.veh_id, None, noise=False, failsafe=False) + env.k.vehicle.update_accel(self.veh_id, None, noise=False, failsafe=True) + env.k.vehicle.update_accel(self.veh_id, None, noise=True, failsafe=False) + env.k.vehicle.update_accel(self.veh_id, None, noise=True, failsafe=True) # this is to avoid abrupt decelerations when a vehicle has just entered # a network and it's data is still not subscribed @@ -128,45 +141,24 @@ def get_action(self, env): # store the acceleration without noise to each vehicle # run fail safe if requested - env.k.vehicle.update_accel_no_noise_no_failsafe(self.veh_id, accel) + env.k.vehicle.update_accel(self.veh_id, accel, noise=False, failsafe=False) accel_no_noise_with_failsafe = accel - if self.fail_safe is not None: - for check in self.fail_safe: - if check == 'instantaneous': - accel_no_noise_with_failsafe = self.get_safe_action_instantaneous( - env, accel_no_noise_with_failsafe) - elif check == 'safe_velocity': - accel_no_noise_with_failsafe = self.get_safe_velocity_action( - env, accel_no_noise_with_failsafe) - elif check == 'feasible_accel': - accel_no_noise_with_failsafe = self.get_feasible_action( - accel_no_noise_with_failsafe) - elif check == 'obey_speed_limit': - accel_no_noise_with_failsafe = self.get_obey_speed_limit_action( - env, accel_no_noise_with_failsafe) - - env.k.vehicle.update_accel_no_noise_with_failsafe( - self.veh_id, accel_no_noise_with_failsafe) + for failsafe in self.failsafes: + accel_no_noise_with_failsafe = failsafe(env, accel_no_noise_with_failsafe) + + env.k.vehicle.update_accel(self.veh_id, accel_no_noise_with_failsafe, noise=False, failsafe=True) # add noise to the accelerations, if requested if self.accel_noise > 0: accel += np.sqrt(env.sim_step) * np.random.normal(0, self.accel_noise) - env.k.vehicle.update_accel_with_noise_no_failsafe(self.veh_id, accel) + env.k.vehicle.update_accel(self.veh_id, accel, noise=True, failsafe=False) # run the fail-safes, if requested - if self.fail_safe is not None: - for check in self.fail_safe: - if check == 'instantaneous': - accel = self.get_safe_action_instantaneous(env, accel) - elif check == 'safe_velocity': - accel = self.get_safe_velocity_action(env, accel) - elif check == 'feasible_accel': - accel = self.get_feasible_action(accel) - elif check == 'obey_speed_limit': - accel = self.get_obey_speed_limit_action(env, accel) - - env.k.vehicle.update_accel_with_noise_with_failsafe(self.veh_id, accel) + for failsafe in self.failsafes: + accel = failsafe(env, accel) + + env.k.vehicle.update_accel(self.veh_id, accel, noise=True, failsafe=True) return accel def get_safe_action_instantaneous(self, env, action): @@ -212,11 +204,12 @@ def get_safe_action_instantaneous(self, env, action): # if the vehicle will crash into the vehicle ahead of it in the # next time step (assuming the vehicle ahead of it is not # moving), then stop immediately - print( - "=====================================\n" - "Vehicle {} is about to crash. Instantaneous acceleration " - "clipping applied.\n" - "=====================================".format(self.veh_id)) + if self.display_warnings: + print( + "=====================================\n" + "Vehicle {} is about to crash. Instantaneous acceleration " + "clipping applied.\n" + "=====================================".format(self.veh_id)) return -this_vel / sim_step else: @@ -296,11 +289,12 @@ def safe_velocity(self, env): # edge_speed_limit = env.k.network.speed_limit(this_edge) if this_vel > v_safe: - print( - "=====================================\n" - "Speed of vehicle {} is greater than safe speed. Safe velocity " - "clipping applied.\n" - "=====================================".format(self.veh_id)) + if self.display_warnings: + print( + "=====================================\n" + "Speed of vehicle {} is greater than safe speed. Safe velocity " + "clipping applied.\n" + "=====================================".format(self.veh_id)) return v_safe @@ -333,11 +327,12 @@ def get_obey_speed_limit_action(self, env, action): if this_vel + action * sim_step > edge_speed_limit: if edge_speed_limit > 0: - print( - "=====================================\n" - "Speed of vehicle {} is greater than speed limit. Obey " - "speed limit clipping applied.\n" - "=====================================".format(self.veh_id)) + if self.display_warnings: + print( + "=====================================\n" + "Speed of vehicle {} is greater than speed limit. Obey " + "speed limit clipping applied.\n" + "=====================================".format(self.veh_id)) return (edge_speed_limit - this_vel) / sim_step else: return -this_vel / sim_step @@ -365,19 +360,21 @@ def get_feasible_action(self, action): if action > self.max_accel: action = self.max_accel - print( - "=====================================\n" - "Acceleration of vehicle {} is greater than the max " - "acceleration. Feasible acceleration clipping applied.\n" - "=====================================".format(self.veh_id)) + if self.display_warnings: + print( + "=====================================\n" + "Acceleration of vehicle {} is greater than the max " + "acceleration. Feasible acceleration clipping applied.\n" + "=====================================".format(self.veh_id)) if action < -self.max_deaccel: action = -self.max_deaccel - print( - "=====================================\n" - "Deceleration of vehicle {} is greater than the max " - "deceleration. Feasible acceleration clipping applied.\n" - "=====================================".format(self.veh_id)) + if self.display_warnings: + print( + "=====================================\n" + "Deceleration of vehicle {} is greater than the max " + "deceleration. Feasible acceleration clipping applied.\n" + "=====================================".format(self.veh_id)) return action diff --git a/flow/controllers/car_following_models.py b/flow/controllers/car_following_models.py index 280c94d37..2840e291e 100755 --- a/flow/controllers/car_following_models.py +++ b/flow/controllers/car_following_models.py @@ -56,7 +56,8 @@ def __init__(self, v_des=8, time_delay=0.0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=False): """Instantiate a CFM controller.""" BaseController.__init__( self, @@ -64,7 +65,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.veh_id = veh_id self.k_d = k_d @@ -132,7 +135,8 @@ def __init__(self, v_des=8, time_delay=0.0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=False): """Instantiate a Bilateral car-following model controller.""" BaseController.__init__( self, @@ -140,7 +144,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.veh_id = veh_id self.k_d = k_d @@ -212,7 +218,8 @@ def __init__(self, a=0, time_delay=0.0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=False): """Instantiate a Linear Adaptive Cruise controller.""" BaseController.__init__( self, @@ -220,7 +227,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.veh_id = veh_id self.k_1 = k_1 @@ -289,7 +298,8 @@ def __init__(self, v_max=30, time_delay=0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=False): """Instantiate an Optimal Vehicle Model controller.""" BaseController.__init__( self, @@ -297,7 +307,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.veh_id = veh_id self.v_max = v_max self.alpha = alpha @@ -364,7 +376,8 @@ def __init__(self, h_st=5, time_delay=0.0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=False): """Instantiate a Linear OVM controller.""" BaseController.__init__( self, @@ -372,7 +385,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.veh_id = veh_id # 4.8*1.85 for case I, 3.8*1.85 for case II, per Nakayama self.v_max = v_max @@ -445,6 +460,7 @@ def __init__(self, time_delay=0.0, noise=0, fail_safe=None, + display_warnings=False, car_following_params=None): """Instantiate an IDM controller.""" BaseController.__init__( @@ -453,7 +469,9 @@ def __init__(self, car_following_params, delay=time_delay, fail_safe=fail_safe, - noise=noise) + noise=noise, + display_warnings=display_warnings, + ) self.v0 = v0 self.T = T self.a = a @@ -546,7 +564,8 @@ def __init__(self, tau=1, delay=0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=False): """Instantiate a Gipps' controller.""" BaseController.__init__( self, @@ -554,8 +573,9 @@ def __init__(self, car_following_params, delay=delay, fail_safe=fail_safe, - noise=noise - ) + noise=noise, + display_warnings=display_warnings, + ) self.v_desired = v0 self.acc = acc @@ -627,7 +647,8 @@ def __init__(self, want_max_accel=False, time_delay=0, noise=0, - fail_safe=None): + fail_safe=None, + display_warnings=False): """Instantiate an Bando controller.""" BaseController.__init__( self, @@ -636,6 +657,7 @@ def __init__(self, delay=time_delay, fail_safe=fail_safe, noise=noise, + display_warnings=display_warnings, ) self.veh_id = veh_id self.v_max = v_max diff --git a/flow/core/experiment.py b/flow/core/experiment.py index f29c547ab..b9ce3ac0e 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -1,15 +1,19 @@ """Contains an experiment class for running simulations.""" from flow.utils.registry import make_create_env -from flow.data_pipeline.data_pipeline import write_dict_to_csv, upload_to_s3, get_extra_info, get_configuration +from flow.data_pipeline.data_pipeline import upload_to_s3 +from flow.data_pipeline.data_pipeline import get_configuration +from flow.data_pipeline.data_pipeline import generate_trajectory_table +from flow.data_pipeline.data_pipeline import write_dict_to_csv from flow.data_pipeline.leaderboard_utils import network_name_translate from flow.visualize.time_space_diagram import tsd_main from collections import defaultdict -from datetime import datetime, timezone +from datetime import timezone +from datetime import datetime import logging import time -import os import numpy as np import uuid +import os class Experiment: @@ -21,8 +25,8 @@ class Experiment: the actions of RL agents in the network, type the following: >>> from flow.envs import Env - {'network': >>> self.env.network.__class__} = dict(...) # see the examples in exp_config - {'network': >>> exp = Experiment(self.env.network.__class__}) # for some experiment configuration + >>> flow_params = dict(...) # see the examples in exp_config + >>> exp = Experiment(flow_params) # for some experiment configuration >>> exp.run(num_runs=1) If you wish to specify the actions of RL agents in the network, this may be @@ -40,7 +44,7 @@ class can generate csv files from emission files produced by sumo. These ``emission_path`` attribute in ``SimParams`` to some path. >>> from flow.core.params import SimParams - {'network': >>> self.env.network.__class__}['sim'] = SimParams(emission_path="./data") + >>> flow_params['sim'] = SimParams(emission_path="./data") Once you have included this in your environment, run your Experiment object as follows: @@ -89,7 +93,13 @@ def __init__(self, flow_params, custom_callables=None): logging.info("Initializing environment.") - def run(self, num_runs, rl_actions=None, convert_to_csv=False, to_aws=None, only_query="", is_baseline=False): + def run(self, + num_runs, + rl_actions=None, + convert_to_csv=False, + to_aws=None, + only_query="", + is_baseline=False): """Run the given network for a set number of runs. Parameters @@ -151,35 +161,45 @@ def rl_actions(*_): t = time.time() times = [] - # data pipeline - extra_info = defaultdict(lambda: []) - source_id = 'flow_{}'.format(uuid.uuid4().hex) - metadata = defaultdict(lambda: []) - # collect current time - cur_datetime = datetime.now(timezone.utc) - cur_date = cur_datetime.date().isoformat() - cur_time = cur_datetime.time().isoformat() - # collecting information for metadata table - metadata['source_id'].append(source_id) - metadata['submission_time'].append(cur_time) - metadata['network'].append(network_name_translate(self.env.network.name.split('_20')[0])) - metadata['is_baseline'].append(str(is_baseline)) - if to_aws: - name, strategy = get_configuration() - metadata['submitter_name'].append(name) - metadata['strategy'].append(strategy) - if convert_to_csv and self.env.simulator == "traci": - dir_path = self.env.sim_params.emission_path - trajectory_table_path = os.path.join(dir_path, '{}.csv'.format(source_id)) - metadata_table_path = os.path.join(dir_path, '{}_METADATA.csv'.format(source_id)) + # data pipeline + source_id = 'flow_{}'.format(uuid.uuid4().hex) + metadata = defaultdict(lambda: []) + + # collect current time + cur_datetime = datetime.now(timezone.utc) + cur_date = cur_datetime.date().isoformat() + cur_time = cur_datetime.time().isoformat() + if to_aws: + # collecting information for metadata table + metadata['source_id'].append(source_id) + metadata['submission_time'].append(cur_time) + metadata['network'].append( + network_name_translate(self.env.network.name.split('_20')[0])) + metadata['is_baseline'].append(str(is_baseline)) + name, strategy = get_configuration() + metadata['submitter_name'].append(name) + metadata['strategy'].append(strategy) + + # emission-specific parameters + dir_path = self.env.sim_params.emission_path + trajectory_table_path = os.path.join( + dir_path, '{}.csv'.format(source_id)) + metadata_table_path = os.path.join( + dir_path, '{}_METADATA.csv'.format(source_id)) + else: + source_id = None + trajectory_table_path = None + metadata_table_path = None + metadata = None + cur_date = None + + emission_files = [] for i in range(num_runs): ret = 0 vel = [] custom_vals = {key: [] for key in self.custom_callables.keys()} - run_id = "run_{}".format(i) - self.env.pipeline_params = (extra_info, source_id, run_id) state = self.env.reset() for j in range(num_steps): t0 = time.time() @@ -192,19 +212,11 @@ def rl_actions(*_): vel.append(np.mean(self.env.k.vehicle.get_speed(veh_ids))) ret += reward - # collect additional information for the data pipeline - get_extra_info(self.env.k.vehicle, extra_info, veh_ids, source_id, run_id) - - # write to disk every 100 steps - if convert_to_csv and self.env.simulator == "traci" and j % 100 == 0: - write_dict_to_csv(trajectory_table_path, extra_info, not j) - extra_info.clear() - # Compute the results for the custom callables. for (key, lambda_func) in self.custom_callables.items(): custom_vals[key].append(lambda_func(self.env)) - if type(done) is dict and done['__all__'] or type(done) is not dict and done: + if type(done) is dict and done['__all__'] or done is True: break # Store the information from the run in info_dict. @@ -217,6 +229,11 @@ def rl_actions(*_): print("Round {0}, return: {1}".format(i, ret)) + # Save emission data at the end of every rollout. This is skipped + # by the internal method if no emission path was specified. + if self.env.simulator == "traci": + emission_files.append(self.env.k.simulation.save_emission(run_id=i)) + # Print the averages/std for all variables in the info_dict. for key in info_dict.keys(): print("Average, std {}: {}, {}".format( @@ -226,29 +243,38 @@ def rl_actions(*_): print("steps/second:", np.mean(times)) self.env.terminate() - if convert_to_csv and self.env.simulator == "traci": - # wait a short period of time to ensure the xml file is readable - time.sleep(0.1) - - write_dict_to_csv(trajectory_table_path, extra_info) + if to_aws: + generate_trajectory_table(emission_files, trajectory_table_path, source_id) write_dict_to_csv(metadata_table_path, metadata, True) - - if to_aws: - tsd_main(trajectory_table_path, - {'network': self.env.network.__class__}, - min_speed=0, - max_speed=10, - start=self.env.env_params.warmup_steps) - upload_to_s3('circles.data.pipeline', - 'metadata_table/date={0}/partition_name={1}_METADATA/{1}_METADATA.csv'.format(cur_date, - source_id), - metadata_table_path) - upload_to_s3('circles.data.pipeline', - 'fact_vehicle_trace/date={0}/partition_name={1}/{1}.csv'.format(cur_date, source_id), - trajectory_table_path, - {'network': metadata['network'][0], 'is_baseline': metadata['is_baseline'][0]}) - upload_to_s3('circles.data.pipeline', - 'time_space_diagram/date={0}/partition_name={1}/{1}.png'.format(cur_date, source_id), - trajectory_table_path.replace('csv', 'png')) + tsd_main( + trajectory_table_path, + {'network': self.env.network.__class__}, + min_speed=0, + max_speed=10, + start=self.env.env_params.warmup_steps + ) + exit() + upload_to_s3( + 'circles.data.pipeline', + 'metadata_table/date={0}/partition_name={1}_METADATA/' + '{1}_METADATA.csv'.format(cur_date, source_id), + metadata_table_path + ) + upload_to_s3( + 'circles.data.pipeline', + 'fact_vehicle_trace/date={0}/partition_name={1}/' + '{1}.csv'.format(cur_date, source_id), + trajectory_table_path, + {'network': metadata['network'][0], + 'is_baseline': metadata['is_baseline'][0]} + ) + upload_to_s3( + 'circles.data.pipeline', + 'time_space_diagram/date={0}/partition_name={1}/' + '{1}.png'.format(cur_date, source_id), + trajectory_table_path.replace('csv', 'png') + ) + os.remove(trajectory_table_path) + os.remove(metadata_table_path) return info_dict diff --git a/flow/core/kernel/simulation/traci.py b/flow/core/kernel/simulation/traci.py index ed21c924b..79fc12eaa 100644 --- a/flow/core/kernel/simulation/traci.py +++ b/flow/core/kernel/simulation/traci.py @@ -11,6 +11,7 @@ import logging import subprocess import signal +import csv # Number of retries on restarting SUMO before giving up RETRIES_ON_ERROR = 10 @@ -20,6 +21,32 @@ class TraCISimulation(KernelSimulation): """Sumo simulation kernel. Extends flow.core.kernel.simulation.KernelSimulation + + Attributes + ---------- + sumo_proc : subprocess.Popen + contains the subprocess.Popen instance used to start traci + sim_step : float + seconds per simulation step + emission_path : str or None + Path to the folder in which to create the emissions output. Emissions + output is not generated if this value is not specified + time : float + used to internally keep track of the simulation time + stored_data : dict >> + a dict object used to store additional data if an emission file is + provided. The first key corresponds to the name of the vehicle, the + second corresponds to the time the sample was issued, and the final + keys represent the additional data stored at every given time for every + vehicle, and consists of the following keys: + + * acceleration (no noise): the accelerations issued to the vehicle, + excluding noise + * acceleration (requested): the requested acceleration by the vehicle, + including noise + * acceleration (actual): the actual acceleration by the vehicle, + collected by computing the difference between the speeds of the + vehicle and dividing it by the sim_step term """ def __init__(self, master_kernel): @@ -32,8 +59,12 @@ def __init__(self, master_kernel): sub-kernels) """ KernelSimulation.__init__(self, master_kernel) - # contains the subprocess.Popen instance used to start traci + self.sumo_proc = None + self.sim_step = None + self.emission_path = None + self.time = 0 + self.stored_data = dict() def pass_api(self, kernel_api): """See parent class. @@ -61,10 +92,61 @@ def simulation_step(self): def update(self, reset): """See parent class.""" - pass + if reset: + self.time = 0 + else: + self.time += self.sim_step + + # Collect the additional data to store in the emission file. + if self.emission_path is not None: + kv = self.master_kernel.vehicle + for veh_id in self.master_kernel.vehicle.get_ids(): + t = round(self.time, 2) + + # some miscellaneous pre-processing + position = kv.get_2d_position(veh_id) + + # Make sure dictionaries corresponding to the vehicle and + # time are available. + if veh_id not in self.stored_data.keys(): + self.stored_data[veh_id] = dict() + if t not in self.stored_data[veh_id].keys(): + self.stored_data[veh_id][t] = dict() + + # Add the speed, position, and lane data. + self.stored_data[veh_id][t].update({ + "speed": kv.get_speed(veh_id), + "lane_number": kv.get_lane(veh_id), + "edge_id": kv.get_edge(veh_id), + "relative_position": kv.get_position(veh_id), + "x": position[0], + "y": position[1], + "headway": kv.get_headway(veh_id), + "leader_id": kv.get_leader(veh_id), + "follower_id": kv.get_follower(veh_id), + "leader_rel_speed": + kv.get_speed(kv.get_leader(veh_id)) + - kv.get_speed(veh_id), + "target_accel_with_noise_with_failsafe": + kv.get_accel(veh_id, noise=True, failsafe=True), + "target_accel_no_noise_no_failsafe": + kv.get_accel(veh_id, noise=False, failsafe=False), + "target_accel_with_noise_no_failsafe": + kv.get_accel(veh_id, noise=True, failsafe=False), + "target_accel_no_noise_with_failsafe": + kv.get_accel(veh_id, noise=False, failsafe=True), + "realized_accel": + kv.get_realized_accel(veh_id), + "road_grade": kv.get_road_grade(veh_id), + "distance": kv.get_distance(veh_id), + }) def close(self): """See parent class.""" + # Save the emission data to a csv. + if self.emission_path is not None: + self.save_emission() + self.kernel_api.close() def check_collision(self): @@ -74,10 +156,24 @@ def check_collision(self): def start_simulation(self, network, sim_params): """Start a sumo simulation instance. - This method uses the configuration files created by the network class - to initialize a sumo instance. Also initializes a traci connection to - interface with sumo from Python. + This method performs the following operations: + + 1. It collect the simulation step size and the emission path + information. If an emission path is specifies, it ensures that the + path exists. + 2. It also uses the configuration files created by the network class to + initialize a sumo instance. + 3. Finally, It initializes a traci connection to interface with sumo + from Python and returns the connection. """ + # Save the simulation step size (for later use). + self.sim_step = sim_params.sim_step + + # Update the emission path term. + self.emission_path = sim_params.emission_path + if self.emission_path is not None: + ensure_dir(self.emission_path) + error = None for _ in range(RETRIES_ON_ERROR): try: @@ -113,17 +209,6 @@ def start_simulation(self, network, sim_params): sumo_call.append("--lateral-resolution") sumo_call.append(str(sim_params.lateral_resolution)) - # add the emission path to the sumo command (if requested) - if sim_params.emission_path is not None: - ensure_dir(sim_params.emission_path) - emission_out = os.path.join( - sim_params.emission_path, - "{0}-emission.xml".format(network.name)) - sumo_call.append("--emission-output") - sumo_call.append(emission_out) - else: - emission_out = None - if sim_params.overtake_right: sumo_call.append("--lanechange.overtake-right") sumo_call.append("true") @@ -150,7 +235,7 @@ def start_simulation(self, network, sim_params): if sim_params.num_clients > 1: logging.info(" Num clients are" + str(sim_params.num_clients)) - logging.debug(" Emission file: " + str(emission_out)) + logging.debug(" Emission file: " + str(self.emission_path)) logging.debug(" Step length: " + str(sim_params.sim_step)) # Opening the I/O thread to SUMO @@ -184,3 +269,74 @@ def teardown_sumo(self): os.killpg(self.sumo_proc.pid, signal.SIGTERM) except Exception as e: print("Error during teardown: {}".format(e)) + + def save_emission(self, run_id=0): + """Save any collected emission data to a csv file. + + If no data was collected, nothing happens. Moreover, any internally + stored data by this class is cleared whenever data is stored. + + Parameters + ---------- + run_id : int + the rollout number, appended to the name of the emission file. Used + to store emission files from multiple rollouts run sequentially. + + Returns + ------- + emission_file_path: str + the relative path of the emission file + """ + # If there is no stored data, ignore this operation. This is to ensure + # that data isn't deleted if the operation is called twice. + if len(self.stored_data) == 0: + return + + # Get a csv name for the emission file. + name = "{}-{}_emission.csv".format( + self.master_kernel.network.network.name, run_id) + + # The name of all stored data-points (excluding id and time) + stored_ids = [ + "x", + "y", + "speed", + "headway", + "leader_id", + "follower_id", + "leader_rel_speed", + "target_accel_with_noise_with_failsafe", + "target_accel_no_noise_no_failsafe", + "target_accel_with_noise_no_failsafe", + "target_accel_no_noise_with_failsafe", + "realized_accel", + "road_grade", + "edge_id", + "lane_number", + "distance", + "relative_position", + ] + + # Update the stored data to push to the csv file. + final_data = {"time": [], "id": []} + final_data.update({key: [] for key in stored_ids}) + + for veh_id in self.stored_data.keys(): + for t in self.stored_data[veh_id].keys(): + final_data['time'].append(t) + final_data['id'].append(veh_id) + for key in stored_ids: + final_data[key].append(self.stored_data[veh_id][t][key]) + + emission_file_path = os.path.join(self.emission_path, name) + with open(emission_file_path, "w") as f: + print(emission_file_path, self.emission_path) + writer = csv.writer(f, delimiter=',') + writer.writerow(final_data.keys()) + writer.writerows(zip(*final_data.values())) + + # Clear all memory from the stored data. This is useful if this + # function is called in between resets. + self.stored_data.clear() + + return emission_file_path diff --git a/flow/core/kernel/vehicle/base.py b/flow/core/kernel/vehicle/base.py index 843ec7eb6..226528259 100644 --- a/flow/core/kernel/vehicle/base.py +++ b/flow/core/kernel/vehicle/base.py @@ -128,25 +128,13 @@ def remove(self, veh_id): pass @abstractmethod - def apply_acceleration(self, veh_id, acc): + def apply_acceleration(self, veh_id, acc, smooth_duration=0): """Apply the acceleration requested by a vehicle in the simulator. - In SUMO, this function applies slowDown method which applies smoothing. - - Parameters - ---------- - veh_id : str or list of str - list of vehicle identifiers - acc : float or array_like - requested accelerations from the vehicles - """ - raise NotImplementedError - - def apply_acceleration_not_smooth(self, veh_id, acc): - """Apply the acceleration requested by a vehicle in the simulator. - - In SUMO, this function applies setSpeed method which doesn't apply - smoothing. + In SUMO, this function applies setSpeed for smooth_duration=0, otherwise + the slowDown method applies acceleration smoothly over the smooth_duration + time (in seconds). For more information, see: + https://sumo.dlr.de/pydoc/traci._vehicle.html#VehicleDomain-slowDown Parameters ---------- @@ -154,6 +142,8 @@ def apply_acceleration_not_smooth(self, veh_id, acc): list of vehicle identifiers acc : float or array_like requested accelerations from the vehicles + smooth_duration : float + duration in seconds over which acceleration should be smoothly applied, default: 0 """ pass @@ -373,6 +363,7 @@ def get_energy_model(self, veh_id, error=""): vehicle id, or list of vehicle ids error : str value that is returned if the vehicle is not found + Returns ------- subclass of BaseEnergyModel @@ -785,23 +776,8 @@ def get_accel(self, veh_id): pass @abstractmethod - def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): - """Update stored acceleration without noise without failsafe of vehicle with veh_id.""" - pass - - @abstractmethod - def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): - """Update stored acceleration without noise with failsafe of vehicle with veh_id.""" - raise NotImplementedError - - @abstractmethod - def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): - """Update stored acceleration with noise without failsafe of vehicle with veh_id.""" - pass - - @abstractmethod - def update_accel_with_noise_with_failsafe(self, veh_id, accel_with_noise_with_failsafe): - """Update stored acceleration with noise with failsafe of vehicle with veh_id.""" + def update_accel(self, veh_id, accel, noise=True, failsafe=True): + """Update stored acceleration of vehicle with veh_id.""" pass @abstractmethod @@ -809,26 +785,6 @@ def get_2d_position(self, veh_id, error=-1001): """Return (x, y) position of vehicle with veh_id.""" pass - @abstractmethod - def get_accel_no_noise_no_failsafe(self, veh_id): - """Return the acceleration without noise without failsafe of vehicle with veh_id.""" - pass - - @abstractmethod - def get_accel_no_noise_with_failsafe(self, veh_id): - """Return the acceleration without noise with failsafe of vehicle with veh_id.""" - pass - - @abstractmethod - def get_accel_with_noise_no_failsafe(self, veh_id): - """Return the acceleration with noise without failsafe of vehicle with veh_id.""" - pass - - @abstractmethod - def get_accel_with_noise_with_failsafe(self, veh_id): - """Return the acceleration with noise with failsafe of vehicle with veh_id.""" - pass - @abstractmethod def get_realized_accel(self, veh_id): """Return the acceleration that the vehicle actually make.""" diff --git a/flow/core/kernel/vehicle/traci.py b/flow/core/kernel/vehicle/traci.py index ef401d180..8f76b40d0 100644 --- a/flow/core/kernel/vehicle/traci.py +++ b/flow/core/kernel/vehicle/traci.py @@ -293,10 +293,8 @@ def _add_departed(self, veh_id, veh_type): self.__vehicles[veh_id]["type"] = veh_type # specify energy model - energy_model = \ - self.type_parameters[veh_type]["energy_model"] - self.__vehicles[veh_id]["energy_model"] = \ - energy_model[0](veh_id, **energy_model[1]) + self.__vehicles[veh_id]["energy_model"] = self.type_parameters[ + veh_type]["energy_model"]() car_following_params = \ self.type_parameters[veh_type]["car_following_params"] @@ -971,7 +969,7 @@ def _prev_edge_followers(self, veh_id, edge_dict, lane, num_edges): return tailway, follower - def apply_acceleration(self, veh_ids, acc): + def apply_acceleration(self, veh_ids, acc, smooth_duration=0): """See parent class.""" # to handle the case of a single vehicle if type(veh_ids) == str: @@ -983,7 +981,10 @@ def apply_acceleration(self, veh_ids, acc): self.__vehicles[vid]["accel"] = acc[i] this_vel = self.get_speed(vid) next_vel = max([this_vel + acc[i] * self.sim_step, 0]) - self.kernel_api.vehicle.slowDown(vid, next_vel, 1e-3) + if smooth_duration: + self.kernel_api.vehicle.slowDown(vid, next_vel, smooth_duration) + else: + self.kernel_api.vehicle.setSpeed(vid, next_vel) def apply_acceleration_not_smooth(self, veh_ids, acc): """See parent class.""" @@ -1158,52 +1159,34 @@ def set_max_speed(self, veh_id, max_speed): """See parent class.""" self.kernel_api.vehicle.setMaxSpeed(veh_id, max_speed) - # add for data pipeline - def get_accel(self, veh_id): + def get_accel(self, veh_id, noise=True, failsafe=True): """See parent class.""" - if "accel" not in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["accel"] = None - return self.__vehicles[veh_id]["accel"] - - def update_accel_no_noise_no_failsafe(self, veh_id, accel_no_noise_no_failsafe): - """See parent class.""" - self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = accel_no_noise_no_failsafe - - def update_accel_no_noise_with_failsafe(self, veh_id, accel_no_noise_with_failsafe): - """See parent class.""" - self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = accel_no_noise_with_failsafe - - def update_accel_with_noise_no_failsafe(self, veh_id, accel_with_noise_no_failsafe): - """See parent class.""" - self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = accel_with_noise_no_failsafe - - def update_accel_with_noise_with_failsafe(self, veh_id, accel_with_noise_with_failsafe): - """See parent class.""" - self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] = accel_with_noise_with_failsafe - - def get_accel_no_noise_no_failsafe(self, veh_id): - """See parent class.""" - if "accel_no_noise_no_failsafe" not in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] = None - return self.__vehicles[veh_id]["accel_no_noise_no_failsafe"] + metric_name = 'accel' + if noise: + metric_name += '_with_noise' + else: + metric_name += '_no_noise' + if failsafe: + metric_name += '_with_failsafe' + else: + metric_name += '_no_failsafe' - def get_accel_no_noise_with_failsafe(self, veh_id): - """See parent class.""" - if "accel_no_noise_with_failsafe" not in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] = None - return self.__vehicles[veh_id]["accel_no_noise_with_failsafe"] + return self.__vehicles[veh_id].get(metric_name, None) \ + or self.get_realized_accel(veh_id) - def get_accel_with_noise_no_failsafe(self, veh_id): + def update_accel(self, veh_id, accel, noise=True, failsafe=True): """See parent class.""" - if "accel_with_noise_no_failsafe" not in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] = None - return self.__vehicles[veh_id]["accel_with_noise_no_failsafe"] + metric_name = 'accel' + if noise: + metric_name += '_with_noise' + else: + metric_name += '_no_noise' + if failsafe: + metric_name += '_with_failsafe' + else: + metric_name += '_no_failsafe' - def get_accel_with_noise_with_failsafe(self, veh_id): - """See parent class.""" - if "accel_with_noise_with_failsafe" not in self.__vehicles[veh_id]: - self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] = None - return self.__vehicles[veh_id]["accel_with_noise_with_failsafe"] + self.__vehicles[veh_id][metric_name] = accel def get_realized_accel(self, veh_id): """See parent class.""" diff --git a/flow/core/rewards.py b/flow/core/rewards.py index b4af4c5bc..33960f8cd 100755 --- a/flow/core/rewards.py +++ b/flow/core/rewards.py @@ -306,6 +306,61 @@ def punish_rl_lane_changes(env, penalty=1): return total_lane_change_penalty +def energy_consumption(env, gain=.001): + """Calculate power consumption for all vehicle. + + Assumes vehicle is an average sized vehicle. + The power calculated here is the lower bound of the actual power consumed + by a vehicle. + + Parameters + ---------- + env : flow.envs.Env + the environment variable, which contains information on the current + state of the system. + gain : float + scaling factor for the reward + """ + veh_ids = env.k.vehicle.get_ids() + return veh_energy_consumption(env, veh_ids, gain) + + +def veh_energy_consumption(env, veh_ids=None, gain=.001): + """Calculate power consumption of a vehicle. + + Assumes vehicle is an average sized vehicle. + The power calculated here is the lower bound of the actual power consumed + by a vehicle. + + Parameters + ---------- + env : flow.envs.Env + the environment variable, which contains information on the current + state of the system. + veh_ids : [list] or str + list of veh_ids or single veh_id to compute the reward over + gain : float + scaling factor for the reward + """ + if veh_ids is None: + veh_ids = env.k.vehicle.get_ids() + elif not isinstance(veh_ids, list): + veh_ids = [veh_ids] + + power = 0 + for veh_id in veh_ids: + if veh_id not in env.k.vehicle.previous_speeds: + continue + energy_model = env.k.vehicle.get_energy_model(veh_id) + if energy_model != "": + speed = env.k.vehicle.get_speed(veh_id) + accel = env.k.vehicle.get_accel(veh_id, noise=False, failsafe=True) + grade = env.k.vehicle.get_road_grade(veh_id) + power += energy_model.get_instantaneous_power(accel, speed, grade) + + return -gain * power + + def instantaneous_mpg(env, veh_ids=None, gain=.001): """Calculate the instantaneous mpg for every simulation step specific to the vehicle type. @@ -330,16 +385,16 @@ def instantaneous_mpg(env, veh_ids=None, gain=.001): energy_model = env.k.vehicle.get_energy_model(veh_id) if energy_model != "": speed = env.k.vehicle.get_speed(veh_id) - accel = env.k.vehicle.get_accel_no_noise_with_failsafe(veh_id) + accel = env.k.vehicle.get_accel(veh_id, noise=False, failsafe=True) grade = env.k.vehicle.get_road_grade(veh_id) gallons_per_hr = energy_model.get_instantaneous_fuel_consumption(accel, speed, grade) - if gallons_per_hr > 0 and speed >= 0.0: + if speed >= 0.0: cumulative_gallons += gallons_per_hr cumulative_distance += speed cumulative_gallons /= 3600.0 cumulative_distance /= 1609.0 # miles / gallon is (distance_dot * \delta t) / (gallons_dot * \delta t) - mpg = cumulative_distance / cumulative_gallons + mpg = cumulative_distance / (cumulative_gallons + 1e-6) return mpg * gain diff --git a/flow/data_pipeline/data_pipeline.py b/flow/data_pipeline/data_pipeline.py index 626c59e39..f0e3637f6 100644 --- a/flow/data_pipeline/data_pipeline.py +++ b/flow/data_pipeline/data_pipeline.py @@ -10,37 +10,24 @@ import json -def generate_trajectory_table(data_path, extra_info, partition_name): - """Generate desired output for the trajectory_table based on standard SUMO emission. +def generate_trajectory_table(emission_files, trajectory_table_path, source_id): + """Generate desired output for the trajectory_table based on SUMO emissions. Parameters ---------- - data_path : str - path to the standard SUMO emission - extra_info : dict - extra information needed in the trajectory table, collected from flow - partition_name : str - the name of the partition to put this output to - - Returns - ------- - output_file_path : str - the local path of the outputted csv file + emission_files : list + paths to the SUMO emission + trajectory_table_path : str + path to the file for S3 upload only + source_id : str + a unique id for the simulation that generate these emissions """ - raw_output = pd.read_csv(data_path, index_col=["time", "id"]) - required_cols = {"time", "id", "speed", "x", "y"} - raw_output = raw_output.drop(set(raw_output.columns) - required_cols, axis=1) - - extra_info = pd.DataFrame.from_dict(extra_info) - extra_info.set_index(["time", "id"]) - raw_output = raw_output.merge(extra_info, how="left", left_on=["time", "id"], right_on=["time", "id"]) - - # add the partition column - # raw_output['partition'] = partition_name - raw_output = raw_output.sort_values(by=["time", "id"]) - output_file_path = data_path[:-4]+"_trajectory.csv" - raw_output.to_csv(output_file_path, index=False) - return output_file_path + for i in range(len(emission_files)): + emission_output = pd.read_csv(emission_files[i]) + emission_output['source_id'] = source_id + emission_output['run_id'] = "run_{}".format(i) + # add header row to the file only at the first run (when i==0) + emission_output.to_csv(trajectory_table_path, mode='a+', index=False, header=(i == 0)) def write_dict_to_csv(data_path, extra_info, include_header=False): @@ -97,11 +84,11 @@ def get_extra_info(veh_kernel, extra_info, veh_ids, source_id, run_id): veh_kernel.get_leader(vid)) - veh_kernel.get_speed(vid)) extra_info["target_accel_with_noise_with_failsafe"].append(veh_kernel.get_accel(vid)) extra_info["target_accel_no_noise_no_failsafe"].append( - veh_kernel.get_accel_no_noise_no_failsafe(vid)) + veh_kernel.get_accel(vid, noise=False, failsafe=False)) extra_info["target_accel_with_noise_no_failsafe"].append( - veh_kernel.get_accel_with_noise_no_failsafe(vid)) + veh_kernel.get_accel(vid, noise=True, failsafe=False)) extra_info["target_accel_no_noise_with_failsafe"].append( - veh_kernel.get_accel_no_noise_with_failsafe(vid)) + veh_kernel.get_accel(vid, noise=False, failsafe=True)) extra_info["realized_accel"].append(veh_kernel.get_realized_accel(vid)) extra_info["road_grade"].append(veh_kernel.get_road_grade(vid)) extra_info["edge_id"].append(veh_kernel.get_edge(vid)) diff --git a/flow/energy_models/base_energy.py b/flow/energy_models/base_energy.py index fe7f463bb..ed75efd09 100644 --- a/flow/energy_models/base_energy.py +++ b/flow/energy_models/base_energy.py @@ -33,6 +33,7 @@ def get_instantaneous_power(self, accel, speed, grade): Instantaneous speed of the vehicle grade : float Instantaneous road grade of the vehicle + Returns ------- float @@ -53,6 +54,7 @@ def get_instantaneous_fuel_consumption(self, accel, speed, grade): Instantaneous speed of the vehicle grade : float Instantaneous road grade of the vehicle + Returns ------- float diff --git a/flow/energy_models/power_demand.py b/flow/energy_models/power_demand.py index 8cb5fd20c..d8cd918e0 100644 --- a/flow/energy_models/power_demand.py +++ b/flow/energy_models/power_demand.py @@ -23,6 +23,8 @@ def __init__(self, aerodynamic_drag_coeff=0.4, p1_correction=4598.7155, p3_correction=975.12719): + super(PowerDemandModel, self).__init__() + self.g = 9.807 self.rho_air = 1.225 self.gamma = 1 @@ -68,6 +70,7 @@ def get_regen_cap(self, accel, speed, grade): Instantaneous speed of the vehicle grade : float Instantaneous road grade of the vehicle + Returns ------- float @@ -85,6 +88,7 @@ def get_power_correction_factor(self, accel, speed, grade): Instantaneous speed of the vehicle grade : float Instantaneous road grade of the vehicle + Returns ------- float @@ -113,7 +117,7 @@ def __init__(self, drag_coeff=0.7041355229, p1_correction=4598.7155, p3_correction=975.12719): - super(PDMCombustionEngine, self).__init__() + super(PDMCombustionEngine, self).__init__(p1_correction=p1_correction, p3_correction=p3_correction) self.fuel_consumption_power_coeffs = np.array([idle_coeff, linear_friction_coeff, quadratic_friction_coeff, diff --git a/flow/energy_models/toyota_energy.py b/flow/energy_models/toyota_energy.py index 492304b48..b65b7a0c1 100644 --- a/flow/energy_models/toyota_energy.py +++ b/flow/energy_models/toyota_energy.py @@ -11,6 +11,8 @@ class ToyotaModel(BaseEnergyModel, metaclass=ABCMeta): """Base Toyota Energy model class.""" def __init__(self, filename): + super(ToyotaModel, self).__init__() + # download file from s3 bucket s3 = boto3.client('s3') s3.download_file('toyota.restricted', filename, 'temp.pkl') diff --git a/flow/envs/base.py b/flow/envs/base.py index 8a36d6a47..c8df037b0 100644 --- a/flow/envs/base.py +++ b/flow/envs/base.py @@ -151,11 +151,12 @@ def __init__(self, self.state = None self.obs_var_labels = [] - self.num_training_iters = 0 + # number of training iterations (used by the rllib training procedure) + self._num_training_iters = 0 # track IDs that have ever been observed in the system - self.observed_ids = set() - self.observed_rl_ids = set() + self._observed_ids = set() + self._observed_rl_ids = set() # simulation step size self.sim_step = sim_params.sim_step @@ -333,8 +334,8 @@ def step(self, rl_actions): for _ in range(self.env_params.sims_per_step): # This tracks vehicles that have appeared during warmup steps if self.time_counter <= self.env_params.sims_per_step * self.env_params.warmup_steps: - self.observed_ids.update(self.k.vehicle.get_ids()) - self.observed_rl_ids.update(self.k.vehicle.get_rl_ids()) + self._observed_ids.update(self.k.vehicle.get_ids()) + self._observed_rl_ids.update(self.k.vehicle.get_rl_ids()) self.time_counter += 1 self.step_counter += 1 @@ -390,7 +391,7 @@ def step(self, rl_actions): # crash encodes whether the simulator experienced a collision crash = self.k.simulation.check_collision() - self.crash = crash + # stop collecting new simulation steps if there is a collision if crash: break @@ -411,16 +412,6 @@ def step(self, rl_actions): # time horizon being met done = (self.time_counter >= self.env_params.sims_per_step * (self.env_params.warmup_steps + self.env_params.horizon)) - if crash: - print( - "**********************************************************\n" - "**********************************************************\n" - "**********************************************************\n" - "WARNING: There was a crash. \n" - "**********************************************************\n" - "**********************************************************\n" - "**********************************************************" - ) # compute the info for each agent infos = {} @@ -454,8 +445,8 @@ def reset(self): self.time_counter = 0 # reset the observed ids - self.observed_ids = set() - self.observed_rl_ids = set() + self._observed_ids = set() + self._observed_rl_ids = set() # Now that we've passed the possibly fake init steps some rl libraries # do, we can feel free to actually render things @@ -836,3 +827,7 @@ def pyglet_render(self): sight = self.renderer.get_sight( orientation, id) self.sights.append(sight) + + def set_iteration_num(self): + """Increment the number of training iterations.""" + self._num_training_iters += 1 diff --git a/flow/envs/multiagent/base.py b/flow/envs/multiagent/base.py index 77a3d2c12..e708a6ce6 100644 --- a/flow/envs/multiagent/base.py +++ b/flow/envs/multiagent/base.py @@ -50,8 +50,8 @@ def step(self, rl_actions): """ for _ in range(self.env_params.sims_per_step): if self.time_counter <= self.env_params.sims_per_step * self.env_params.warmup_steps: - self.observed_ids.update(self.k.vehicle.get_ids()) - self.observed_rl_ids.update(self.k.vehicle.get_rl_ids()) + self._observed_ids.update(self.k.vehicle.get_ids()) + self._observed_rl_ids.update(self.k.vehicle.get_rl_ids()) self.time_counter += 1 self.step_counter += 1 @@ -104,7 +104,7 @@ def step(self, rl_actions): # crash encodes whether the simulator experienced a collision crash = self.k.simulation.check_collision() - self.crash = crash + # stop collecting new simulation steps if there is a collision if crash: print('A CRASH! A CRASH!!!!!! AAAAAAAAAH!!!!!') @@ -155,8 +155,8 @@ def reset(self, new_inflow_rate=None): self.time_counter = 0 # reset the observed ids - self.observed_ids = set() - self.observed_rl_ids = set() + self._observed_ids = set() + self._observed_rl_ids = set() # Now that we've passed the possibly fake init steps some rl libraries # do, we can feel free to actually render things @@ -322,7 +322,3 @@ def apply_rl_actions(self, rl_actions=None): # clip according to the action space requirements clipped_actions = self.clip_actions(rl_actions) self._apply_rl_actions(clipped_actions) - - def set_iteration_num(self): - """Increment the number of training iterations.""" - self.num_training_iters += 1 diff --git a/flow/envs/multiagent/i210.py b/flow/envs/multiagent/i210.py index 450a0269c..004208cb4 100644 --- a/flow/envs/multiagent/i210.py +++ b/flow/envs/multiagent/i210.py @@ -3,7 +3,7 @@ from gym.spaces import Box import numpy as np -from flow.core.rewards import miles_per_gallon, miles_per_megajoule +from flow.core.rewards import instantaneous_mpg from flow.envs.multiagent.base import MultiEnv # largest number of lanes on any given edge in the network @@ -75,7 +75,6 @@ def __init__(self, env_params, sim_params, network, simulator='traci'): self.control_range = env_params.additional_params.get('control_range', None) self.no_control_edges = env_params.additional_params.get('no_control_edges', []) self.mpg_reward = env_params.additional_params["mpg_reward"] - self.mpj_reward = env_params.additional_params["mpj_reward"] self.look_back_length = env_params.additional_params["look_back_length"] # whether to add a slight reward for opening up a gap that will be annealed out N iterations in @@ -90,7 +89,6 @@ def __init__(self, env_params, sim_params, network, simulator='traci'): # how many timesteps to anneal the headway curriculum over self.speed_curriculum_iters = env_params.additional_params["speed_curriculum_iters"] self.speed_reward_gain = env_params.additional_params["speed_reward_gain"] - self.num_training_iters = 0 self.leader = [] # penalize stops @@ -197,23 +195,12 @@ def compute_reward(self, rl_actions, **kwargs): for rl_id in valid_ids: rewards[rl_id] = 0 if self.mpg_reward: - rewards[rl_id] = miles_per_gallon(self, rl_id, gain=1.0) / 100.0 + rewards[rl_id] = instantaneous_mpg(self, rl_id, gain=1.0) / 100.0 follow_id = rl_id for i in range(self.look_back_length): follow_id = self.k.vehicle.get_follower(follow_id) if follow_id not in ["", None]: - rewards[rl_id] += miles_per_gallon(self, follow_id, gain=1.0) / 100.0 - else: - break - elif self.mpj_reward: - rewards[rl_id] = miles_per_megajoule(self, rl_id, gain=1.0) / 100.0 - follow_id = rl_id - for i in range(self.look_back_length): - follow_id = self.k.vehicle.get_follower(follow_id) - if follow_id not in ["", None]: - # if self.time_counter > 700 and miles_per_megajoule(self, follow_id, gain=1.0) > 1.0: - # import ipdb; ipdb.set_trace() - rewards[rl_id] += miles_per_megajoule(self, follow_id, gain=1.0) / 100.0 + rewards[rl_id] += instantaneous_mpg(self, follow_id, gain=1.0) / 100.0 else: break else: @@ -230,7 +217,7 @@ def compute_reward(self, rl_actions, **kwargs): else: if self.mpg_reward: - reward = np.nan_to_num(miles_per_gallon(self, valid_human_ids, gain=1.0)) / 100.0 + reward = np.nan_to_num(instantaneous_mpg(self, valid_human_ids, gain=1.0)) / 100.0 else: speeds = self.k.vehicle.get_speed(valid_human_ids) des_speed = self.env_params.additional_params["target_velocity"] @@ -244,7 +231,7 @@ def compute_reward(self, rl_actions, **kwargs): rewards = {rl_id: reward for rl_id in valid_ids} # curriculum over time-gaps - if self.headway_curriculum and self.num_training_iters <= self.headway_curriculum_iters: + if self.headway_curriculum and self._num_training_iters <= self.headway_curriculum_iters: t_min = self.min_time_headway # smallest acceptable time headway for veh_id, rew in rewards.items(): lead_id = self.k.vehicle.get_leader(veh_id) @@ -254,12 +241,12 @@ def compute_reward(self, rl_actions, **kwargs): t_headway = max( self.k.vehicle.get_headway(veh_id) / self.k.vehicle.get_speed(veh_id), 0) - scaling_factor = max(0, 1 - self.num_training_iters / self.headway_curriculum_iters) + scaling_factor = max(0, 1 - self._num_training_iters / self.headway_curriculum_iters) penalty += scaling_factor * self.headway_reward_gain * min((t_headway - t_min) / t_min, 0) rewards[veh_id] += penalty - if self.speed_curriculum and self.num_training_iters <= self.speed_curriculum_iters: + if self.speed_curriculum and self._num_training_iters <= self.speed_curriculum_iters: des_speed = self.env_params.additional_params["target_velocity"] for veh_id, rew in rewards.items(): @@ -275,7 +262,7 @@ def compute_reward(self, rl_actions, **kwargs): speed_reward += ((des_speed - np.abs(speed - des_speed)) ** 2) / (des_speed ** 2) else: break - scaling_factor = max(0, 1 - self.num_training_iters / self.speed_curriculum_iters) + scaling_factor = max(0, 1 - self._num_training_iters / self.speed_curriculum_iters) rewards[veh_id] += speed_reward * scaling_factor * self.speed_reward_gain @@ -345,7 +332,7 @@ def additional_command(self): departed_ids = self.k.vehicle.get_departed_ids() if isinstance(departed_ids, tuple) and len(departed_ids) > 0: for veh_id in departed_ids: - if veh_id not in self.observed_ids: + if veh_id not in self._observed_ids: self.k.vehicle.remove(veh_id) def state_util(self, rl_id): diff --git a/flow/utils/rllib.py b/flow/utils/rllib.py index 4d2d8553f..fc3229e52 100644 --- a/flow/utils/rllib.py +++ b/flow/utils/rllib.py @@ -95,8 +95,9 @@ def get_flow_params(config): if type(config) == dict: flow_params = json.loads(config['env_config']['flow_params']) else: - config = json.load(open(config, 'r')) - flow_params = json.loads(config['env_config']['flow_params']) + flow_params = json.load(open(config, 'r')) + if 'env_config' in flow_params: + flow_params = json.loads(flow_params['env_config']['flow_params']) # reinitialize the vehicles class from stored data veh = VehicleParams() diff --git a/flow/visualize/i210_replay.py b/flow/visualize/i210_replay.py index c9e820b15..4c7498413 100644 --- a/flow/visualize/i210_replay.py +++ b/flow/visualize/i210_replay.py @@ -247,6 +247,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= metadata['strategy'].append(strategy) i = 0 + t = 0 while i < args.num_rollouts: print("Rollout iter", i) vel = [] @@ -259,7 +260,7 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= env.pipeline_params = (extra_info, source_id, run_id) state = env.reset() initial_vehicles = set(env.k.vehicle.get_ids()) - for _ in range(env_params.horizon): + for t in range(env_params.horizon): if rllib_config: if multiagent: action = {} @@ -311,13 +312,13 @@ def replay(args, flow_params, output_dir=None, transfer_test=None, rllib_config= else: per_vehicle_energy_trace[veh_id].append(-1 * veh_energy_consumption(env, veh_id)) - if type(done) is dict and done['__all__']: - break - elif type(done) is not dict and done: + if type(done) is dict and done['__all__'] or done is True: break elif max_completed_trips is not None and len(completed_vehicle_avg_energy) > max_completed_trips: break - if env.crash: + + if t < env_params.horizon - 1: + # Early terminations signify a collision. print("Crash on iter", i) else: # Store the information from the run in info_dict. diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index a9392e21d..a9742e249 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -4,10 +4,13 @@ file, and then uses this data to generate a time-space diagram, with the x-axis being the time (in seconds), the y-axis being the position of a vehicle, and color representing the speed of te vehicles. + If the number of simulation steps is too dense, you can plot every nth step in the plot by setting the input `--steps=n`. + Note: This script assumes that the provided network has only one lane on the each edge, or one lane on the main highway in the case of MergeNetwork. + Usage ----- :: @@ -167,9 +170,6 @@ def _merge(data): def _highway(data): r"""Generate time and position data for the highway. - We generate plots for all lanes, so the segments are wrapped in - a dictionary. - Parameters ---------- data : pd.DataFrame @@ -382,10 +382,20 @@ def _get_abs_pos(df, params): return ret -def plot_tsd(ax, df, segs, cmap, min_speed=0, max_speed=10, start=0, lane=None, ghost_edges=None, ghost_bounds=None): +def plot_tsd(ax, + df, + segs, + cmap, + min_speed=0, + max_speed=10, + start=0, + lane=None, + ghost_edges=None, + ghost_bounds=None): """Plot the time-space diagram. - Take the pre-processed segments and other meta-data, then plot all the line segments. + Take the pre-processed segments and other meta-data, then plot all the line + segments. Parameters ---------- @@ -394,7 +404,8 @@ def plot_tsd(ax, df, segs, cmap, min_speed=0, max_speed=10, start=0, lane=None, df : pd.DataFrame data used for axes bounds and speed coloring segs : list of list of lists - line segments to be plotted, where each segment is a list of two [x,y] pairs + line segments to be plotted, where each segment is a list of two [x,y] + pairs min_speed : int or float minimum speed in colorbar max_speed : int or float @@ -407,10 +418,6 @@ def plot_tsd(ax, df, segs, cmap, min_speed=0, max_speed=10, start=0, lane=None, ghost edge names to be greyed out, default None ghost_bounds : tuple lower and upper bounds of domain, excluding ghost edges, default None - - Returns - ------- - None """ norm = plt.Normalize(min_speed, max_speed) @@ -585,4 +592,10 @@ def tsd_main(trajectory_path, flow_params, min_speed=0, max_speed=10, start=0): module = __import__("examples.exp_configs.non_rl", fromlist=[args.flow_params]) flow_params = getattr(module, args.flow_params).flow_params - tsd_main(args.trajectory_path, flow_params, min_speed=args.min_speed, max_speed=args.max_speed, start=args.start) + tsd_main( + args.trajectory_path, + flow_params, + min_speed=args.min_speed, + max_speed=args.max_speed, + start=args.start + ) diff --git a/flow/visualize/visualizer_rllib.py b/flow/visualize/visualizer_rllib.py index 261dcbbc3..ec96e3306 100644 --- a/flow/visualize/visualizer_rllib.py +++ b/flow/visualize/visualizer_rllib.py @@ -26,7 +26,7 @@ from ray.rllib.agents.registry import get_agent_class from ray.tune.registry import register_env -from flow.core.rewards import miles_per_gallon, miles_per_megajoule +from flow.core.rewards import instantaneous_mpg from flow.core.util import emission_to_csv from flow.utils.registry import make_create_env from flow.utils.rllib import get_flow_params @@ -234,7 +234,6 @@ def visualizer_rllib(args): final_outflows = [] final_inflows = [] mpg = [] - mpj = [] mean_speed = [] std_speed = [] for i in range(args.num_rollouts): @@ -254,8 +253,7 @@ def visualizer_rllib(args): if speeds: vel.append(np.mean(speeds)) - mpg.append(miles_per_gallon(env.unwrapped, vehicles.get_ids(), gain=1.0)) - mpj.append(miles_per_megajoule(env.unwrapped, vehicles.get_ids(), gain=1.0)) + mpg.append(instantaneous_mpg(env.unwrapped, vehicles.get_ids(), gain=1.0)) if multiagent: action = {} @@ -329,8 +327,6 @@ def visualizer_rllib(args): print('Average, std miles per gallon: {}, {}'.format(np.mean(mpg), np.std(mpg))) - print('Average, std miles per megajoule: {}, {}'.format(np.mean(mpj), np.std(mpj))) - # Compute arrival rate of vehicles in the last 500 sec of the run print("\nOutflows (veh/hr):") print(final_outflows) diff --git a/requirements.txt b/requirements.txt index f06c3c69f..a4f6f83f8 100644 --- a/requirements.txt +++ b/requirements.txt @@ -25,7 +25,7 @@ redis~=2.10.6 pandas==0.24.2 plotly==2.4.0 tabulate -tensorflow==1.14.0 +tensorflow==1.15.2 awscli==1.16.309 torch==1.4.0 pytz diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1 index 0693ed4b62a9cabcdbecb267201ea862144f212c..d346e9dc58b39a5b511ced70927eac1d0d32579b 100644 GIT binary patch literal 20358 zcmZU)c{oKn`xww<#`NGS4uu8$eLzHzgEsBdrxPk1j+#8YJS!rC1?(e=S=!wiG{{Wq^O+#b5w zKg95VMoGB_I{Y(SiYKNQ#S;(l6d!#cBy?@4FHd4GZ;apQYscz(%Kh^eqmAo*OpHe7 z+~(`glU%=Tdq7Z_;rhT}U&Day{-K-K29M4mI#)6rF{fg%fy6 zZoKij&Z3^eqoy!CWmi|%1%F3>|NJ=%h4NI^?Y9*Y6%wkuFCvJRzCj*c`hXiJ55r%Z zI%qt?g}8;2xLcADKIQmh@ajke^W*f(&~(AO!7})`(v_~dCNJ1UW^mVE83At?C&(F8 z6im09$gC1ortzNk&~uasB;U^xoYhnk#K+7Mxb_*~M*kn+GrkWlrJcd5**wNY0r?!RumkKwT4)Zs6eyj3(CyzB8LZKiRn2f!Qb;b+>zTn@=8An zIAStP%Wre8)w~c~kZH~Rn$ro=s|>+QIg?#HMo7@`sfyGFXTf=sX;fdzQ392>bB?pY%amdS$Rlg#JFSjjS;K} zsKc$%x)>rgjQ$~Y(CQHcSt*_lmu8cZY2-Tt&(Zn7Y~Rz|QQ<2xKX zjT3;6h{7Dy@-uhj#Ro~ z$qX%lV%9(eFs_U>*A=Eu_x zVxrVb-%ve4Lsb&7`5g%}WW(XYxIJWp-U4zxPoMcH_YMo=uS5FX&!AD90*fAKbG=+2 za?aiihi=hXNNW1Y+^3~5vt&FsjXc9r%T(B2XTasvGlJ*(v$*3|D&Y`2o@=+)o-1ED zotsqhoa~jljyj(8Kmy`mV~d?Y(}EJ-gizkZb)GW+$79!RUK_%j zH{O*0c;3`Fo|@nI{dSQLrL(Q|DhfFo8?rfu>IK$+U8h;QWp3yE5tHZtP4sA}SnbCD zl$FZqXdA<+QIz3NJsZb)cQk>MXL+yjnv#pP<^HM6x!Mf=hgqtu@<171h)GWgW{8|Ml70&fKf<`xgaPxkrrQsfY43)_IQquh~1+ z`fvCB|HIAHf6dqYkNH||ylMXp9NMEF@Q>>+cFrX^p}{!KS(+=ha3N{=Jc4SnayVUj zJ&5-w!IW7Vg7S;0c&sLZ7I6dUl~5lz;$_F3^^4*Do#_C^slr&VAudp`=*9PkJ3%Z_ znj8IfJeNPa5?4B@;rY}futzZojlx{HadzjySTqZzaywz01{YH8ys+FZAB&Ytxj_mt z0?r0wZuIy*8Z9D;nPcx^`KJJGz&#N`L1GYh-}v1EvG{jT&@npi*+#@c5y6QywIFCU z0OblrVz=@dz_ZJcG|n7y-ZsMEfvwp3a-rZ{RyEx7oK6nh9uLLMaiBN4oK$TkpjTrt zxqo}J;MuJ(f$ioK;3kpLbWFgH(z->0+i`-?%)b=D_0_DgVcg>|`i>JO0zADn2bx{W}t zIs@{qW0K&}z%YF`=QR$`e~DTRE>xW% zVCn6TmO*O-*>2TP&6|%Bts2}LYA4XDvJdtq8*=-M4!~lcSMWXCoHcL`!s9lDIKSE# zK+{uDdRK>A?C}ok-wvZfmbSq7U;@dJaRi}l%J^Nx5qV;A)c$injNz$q=kwn}+Uv{s z)F>ABn*O53s4rM!X9_Jnc3jOJ18j$)J~wVdKbY~%u*u;Kl-QdH9t?hkw%}SElGA>+a9K|Cr<@(1@V zNF76U>7e)-@R;cf>wd`yE^#fnGrmf5`5qmRse2agIR%3D{46|oHxjq3l@qLBWVtV2 zox#`I-B>y8H09}p@}{rzRQ!K@=$`;E<6jTb{f`Ifx$*S>Hvr81Cjk7Z459Arb#z`_ z4VjpCfGqu0M9ntcpxxcxBs(R8*uGDrw_OUTnQ1(|75#|$*mQ`Dxt>oPi)zW?6M1CS zh7`(|Jx$eS=g?fkG-B(0gpAWTLJYGG(FrXf)U9#!oss$Uct|CwI$uI`>obUYS{8|0 z+CW0K9i%*iP~NO{|0mNh{WsGv{MT@!{}^uU#xwcH@c-ltrvJFUypYhfj7-?o-pkeu z{UFvh1@ur!IbAbokGF$s@F!hE+h+b?9TnHXxnmKyuK5j3N!t&F^D^lY6(_hk=NP-f zP6Jyl9%DSL%4hglL{=%PB69=^N=m(Xa`N;M|APL~PM{7Wxs3)tZrN`AJ! zvvsi8wIDyY8rIo+124jR7N^gv4m znYAg7#@;T#LH9GnqV^a`xOSWNT{T5+b19koHV%?tChS|`L$qNzS+jwMhCUBaL)aWl z1{T5$Ek53;*1-*J!Z5V>4OKle2^XtH(x1tDF;7z)rDku&Ha#QUNVeed$Gg~)5Le=0 zHVFc{9hkJxnasF~)i96SMJse7VPpSfJpa3p4roTh2ZMvqykQ(=DZ0Um<7I{9r}%X0$1p zhECynr0}jU1g);-E6_Y}&=uT&*Ce5;R)H`>KKhf~B!(G7iawxeofC5;Vu&{!uv z8~crFh#mSei(8N4%-FZ|_nSq;qVOph_wX(WnjS?KPBx@3?D~n9Rz6(asRD1_1Ot?> zL^-Tv_hxyqa=l~m+mu`6^_U|xYHtX9?8qTbODyOoAqRStza68C*22C!ui2IZjx=@X zGBhM+5c%+CqBupBRa$tQRoe8G-Vc}qCfAMd>OxgCo;DW(%m&E=8w(od5)Z0cMX*9R zh7Lcif(Gdf=>@CIaM~QSoUI+$G&4uaBl2kFd3>W6VVD#JwGtW5y=B$dr z@;~BmCUGmB+r5A>mpo2eHf$y~x$^K}{Vl40-j7~Yy+k&B6=!ZcG~geX6Ev%|j;zX_ zik7FnvCqGo*(#$3UyL?mgS-wtKGjdRpWKKFO7BU%>nXhD_K;CB%fOP-2c#=v4y@05 z4`M@obkpfG5NNOt)k;lp@LMXf*|KDe0i~OtoPq7%cEG92b3t=L2Afmk1k@@TkJ7`? zea*S?$3YPibKQvvh>D{TEs`Wfe=J_|>BfjVsYKKGJ)NA@3i^*vvtvFjfYeq^sP?HL z>jmk|d&4cX_pdeH-Nu6nUZUh~tTeM|uL1E6X6awB!{aknk$GJ=X^h%icHEd~Xb1{H zmmLPMa``GUTkj~{F0=&hUvUSHw=+Ca-ax(8OyKc}XndK$BhPn8!L6h6_)+f^>II!4 zi(eN)fJ6q}n^Zt*K3dV)W6zWKU*Ayw0TIr{(-PRKW(H?|$x!1ze~D}AQVbnBgUuIR z3C$3O^{EA9!>grm^G_=k6DcEfxj3vz$)y3`4Df@}1Jc+b4)3=+;k?y#BunWO38^S1 zt&?5Rme)rVI>lj!%VF}VI|_c?eZV-)Q^K+}Q|Mo(3^*ly4@qk`)n9dhy`11e!!IMh zeWC{#R!t+VR${2+avKTi7GN{?ImuI1rzB9zCb7#xtS$ke7X(tl?ax4~(iwm0Jex zJ-&}jKGRJ6Zk~imld_S#csx7%SU6q0wUSDGi@;+4d;Dj5NieV>k?DL~MfC!mQ8?5M zBUd(2v!8dE8^dQfGa4o_v72M*1@|PNhVjT(?jU1>Md9*YMYs{T269Gp&|z>jcuajl zDkt8C!+ok?w^bL~uRmhNR8G^D;y4m9BeAjPjtP{NtD#7BEGy+*4?>;Fz`634Qf?Wo zl}{rV1d;TJa4uPBz(t!2srV{rgzD-dO+h`m z-%&|dU2Y&wAG6`TO%c+W*r>cZ1Ex&sWl{l>(stz?330OpjT<3wb;eV2(2S39 zB^&VLnqJa7IkWL;`ET0RM;V)oAz1w3G-tL&KP^4`kqEU*GR3Pd(RyJouu6=_%bP1e z*4Q2SPQ@6L<4F|NTFBx#Uo;o7Y+T^s&I6lDbDdO#Zv{d;kS+`YHyfJ&aaDssb;#UxokBi&Jls7 zwi@)+n1hUWwHJz?n~G~T&!c8h0W{^!bbRik399eKV7^`o_3IcQkl~FL>!!h4j!`x}O%>bQNciy)a(b&LgB}lQ*PSA4kMJRK^AyngX&jc-^^-rEl_;n4ijsp%BL{%mlyhqL{f@sL`R}Ap7Lf zH>NCE9t_%fIAcr>l>K7pA^*+fQd3O*ae}nLDq?(pEnN`ZPg}p6L6^vM z%F7YM70(-q@yS{|QzJqBh0j4~odNyP`kLHa&=+)!vl;%PYgbpG8Hm( zUy=zOx#-xrjcIi{P4B;wg5!Tv*pt`d>GR*eVAwYg-_CQzj%{!GB@y2k^LTL(J2xJ_ zk68;zd=)xY?+be^!wD9?6lQd#rUO%*iwUb3V7~Sc)BJO6rp6GNl^er;OS7X%0gHeQ z6~m+RW?`|;V#xR9kON03`{7S9X1p`NvTg_Z<>O=Me0ZHWKDtVsZ&uODm6|v^MjTwn z{^oz+`LX2%J884xM|#!qEYVnQ!bC?G(s7&Sk(k6Ku&RG7{(fFd*N7F-=tG-G(vc~+ zQ6K~%dR!P0pH4U1DYCi+pUCCSn(Xq~O|-*{YR zapoW{GROq(vjXz{Q66>qr~^3}34mwU5Y@Yxu-C-{s-?Bydwv24!n+ugcfz1({)6#a z&_Ui>1;DkLk0EGy0=6eQz{I_iF{y7K{#Muvcdk{l9j{(fE7XF{dtUHiyA=4(_9dG- zVsPV!SejPRhL{~k9@?zH*?xc)BMqd{EeI`}&Vhn#HhthNjs3eVVZu-y3DkVax(Qz5 z&&_f;>#iY&L|f3oT5>z(F)@(ra>6~wqex)p2m1Zn0zA0#84||_bn?+Wa23zO&8u$m>m-m&%&MdA*FxyT z{5z=i!30WoIU(ne6-GAN&^V)0s9Jso&A*7#E7MG1Fn9tgtZ=0-)4$Q|N$R-i<|(-U zeFk{em2g%mU8cWocR;IkC2P$@!_(L=thLQ+8nyi}@t3?z1;vq^4>_H5|7LRttn`IB zoI!Hxz;e=k`z#GBh=a(#{vi4)AM3j{h%Y|@JX5o1NwyuVxy>in@<#E1Z7%G&=!$Bc zxv=OO~Rsdl&qJ$?F ziC26iFf>;K4ZjP+{nA?Y-KzxbnI;a7mHp(K-FKF2zZhKW67luemqhsQdT3Ac1pReG zbVIuz>Ra-NkL)Hi)e&Y6(;&Khl=K}C4WjsjOEr>`DPwY-YO54b>GDo`SMLyAP_hK; z%wCAq&hk6r&$msocf(lxK!$j)Pe|= zDEz+j8O}5nL(2;dG|9yn?tH8v_y1VadOH=;c+3U`Qasog=ZvokeewFZ2#nLUht)cD z?9|UssB-uoCU?(Yx=w2~w0j??b$5#SlIvyh;eNGrUn@kJkB|y_+kSbo*hmR>~=-xd-#S81G_yh~^ zZ9a@c9jRE_z5t4L8>51e4E8^^h2Kx7qQu8!vT)-X=HTKfxY93$tfhnW?~!~m?0bvu zJRySy4;K-WBtEVER)BL(E1<6QHt?Ueo;18#fj{arK=agHCS_OzrDh&rilncTH-Ddy z2hVpRPeBe|zAy%#D>gXcq8Fr{c)>s3w4a8$@JYdp=Y(e#$}|7J)YH`HALwE6FFUaO z4?CFc#+&oM(8KB<=y9cGHs?b^3xB}0_$Ue&W$cjgmXUZ0{?}`FOIy36vyG?9qZj|j5tQ?G&w<= zf&Xc&E%RDIi^KN)11IkWvTIXvr79_Rn1p8rK3%)jQ7 z|CkSMJnnzdht0p}!#DaWjWF-SeQIN{=}kON?kXd$fjzLzT1Fs^sknPzIlfIOz?~J= zNSbu;zEC1;la+3;%DF*&XZqqnCm!mxjs=ZTGMLB7!u|dk@KP!sidHTlkG9`|v-OF% zAt#h3*W?4|wieC5qL1xgHnD9>C-RT&6~`M|E9fno@layRp<^#PLyB`gy7eS7(Va%* zw|pNAi)_J`CTn;&{w^)-lgIkb2|>#+G;%>WZNs@m0B4sjI<)-7kJ~NU=e|i zYUF4Gs2IHV$bz!FH06$_#Eax3iC<;|%_`at$(el4S0hi@Ati~|PRU@vk`Sn$A|Q7D zE|us*m2e>=AFjcmMd+87@G;5ieMB4=jhPcvwdF(fPa4{5u=2WIB?(`4h_@G7r> z=0-(ggu_L;BlbLKlyRBn?jiiMW-NA3)4=#Yx=`N!nt7%u0kaO}!;{f<=soo>cpo1k z+BOalyF~@sDom_L%K~~cVIjFQWfo0R`#=&mJfLM&b~x!}6bW#wgq(1SVWNsCEPisd zTrfk)=}M4Owh8z{i8wbM;PuzZbdhQoQ>ZY3Y}v1Z+2b~WiiHVGnW#!7eht&|h)<2a zlP&P_Vjb$A_!N&jJ1~XIGr*vCIS76Rk@Y<0cH|-sEJDg2Bx|o_AwSDtl!T9m->=@V z8&*n^{<0iWQz!xJ7mt=amc>vtn1SDC_>q!ZreLz6m+Wbh!AXWWu(GO%bs9IAQEIS^=^5e#5%C+i}Zp6T#K?+xTdF9I4e_jH`D}!A)D$;Nr{g5LvT; zmJi54gy=&YTE3K+4+$V}j5j{@i6IAeeZq&!GN@#NEq*^34Sbb)sA()jUxO~1^DP_a zr|?l^eH9VRp9bEY_vqrB6F8&xE$LSYpl05kluuS-#a|tGDwa*(jh2xv8m74AN*e6u z2~g~;2EIwv#!IQ;DA=@}=H}04PiHm~XVoLP;mT34DUBl5<&Qas_~U8G(+0*>Ab~sD zYq991<>w4FWigisQL(gEfAf>h%-9BtQge`VQx%6D z)3M$i>9n=u(B38&@^apgiRq{LuDP1PJGGTA7F>bVpJst;j3Wu;AH|YWVc=G(jDe0j z;d5jKT`TMa$G#;r1UvN7<*CYu^UKKv!xq|8eV;kiSPGlnK2ZOspUGW^1n{1^n=rY2 zM)br+P?2n>;Tqq_n~qFU{x=X#30nR)?5R-mpe(GT|>qc5a(C z?s7}O{(g1%)$^B?6z`>f7uC`%IXQ9^e9?9H9h9ErgZ2-Dpy}Zkx>95rX>u9|F5ws1 zs>58oKClzsU6)6_!&QWvz7f3Ei_vW(5gyW z!7~DReLK>-nM=17h0}(O&9SIVSOq2Q0~aF;E_c8!pw(;BIEW)``(tbuO&s*Q8_!_-@ovWI>wB*mZO z(K5^dI4@i9Q0hLMccTMZ-rU70Lpy1w^L27Cua=g^&H;Cg<G`9#!nCL zif@A21q(2Ax|Cq#m=HGkWaG}jEbxkT!R`oEe#8ARbe-P|UQ@Vez4Ii|#&Ql#-ORqx zw}9=7*5kUP97xvmrt-eEuzd0$acvnO^SDPLjJuTF-WQA;P5yvU?HMMp;S!xUAPGWj z%$%*09>JcZl_cnK7Og3Drndez_;>IEnS6~UH#a2VxcmcvFTKef^<4U>W*Ty)8o}8G zO9`#*p|?y_L4CAwcj4|+sy-(NeeNg0?_h86UYCVUuYBP*r8KYh3kY>@WDJGZ;-yJ8 zU~KuAK3{u*JtrLr>!t$;rY|RFSKNYqyX|22zCnKbUO9B*@aXao4G@%`V%EjvF(XQr z%-Rnz(A;Q86u0^jvu9eUCp`lv?K=woTa#$?!FRwt6iWjx+7kQgU86`kj$dVNfW0f- zu~LNrulL6p-Vq17GJidLL@u6jF{ZS(V>?dooe0N#5^ze@0dNbCBxRtL z7Q9cTJ{PutmA(e%ggjx#Ot8fVMZxTsX?M{FhR9B{@8m%DTHHHlKhu<%h#swB;5nuY zuY@lEzT_pc$Tyx1c^`&z)=bB){4;3z=qTKDdI;!XOY^uz_ZleC16lm=%BRuQZz>>U05bGj^C#t@Xefe^NeQi_W zVq7#XdtwfPaed?>T!iZ%J?L^9MHsWXntT#6PJ=Fk#c7Op^=$*TYj6b`N99wXhtU5LwAFl|4JC5F@_I=xEQ(YFe9eK!JYd6DHzH6btWG-oz zAu#>TVLJBPUD_Zw9}=<`gVx|!=qnnHxgGj+$FXEweYBKTq)VgONF}3FodV>?4Z7N| z0!oi;hPMa)ux;;Fu)lV{#7ASTVNbX`S}%SHGi96zCpwXCW)w!_el@w1Xvj(9zoHH^ zPk>Rg0SY^oj-rqP?2(&|kC(q;IX`7Uw{0HYxupp!qoSba_ac6{|03$4K8rnik_*8v zf>EQ!fT1Gs81^L#?$7sxynQTrAzMd(tg(Sf50(MKD6TNX7qQi$@1@^>c8@krQ(J`M4@URPsV7KZZYX$LZ^TWvUEoe@0@E#j0oT?( zVcHG$(0RsJv9doBoYT)^6)K<~HF^*PD#mb#vtH@P#VF5-dw`WoA*>NVB zWDRGLRr!6)i;x(q(&kBh@2XJl-6E1CNTUu{GKs~POwv-sBlLL%^_0vcPMbDSIk{Bo z_$!@QgC4b#PbXa`qo`iuF_Ia3i=DaDo!Ixgl5;Wz^u(tu!gCDeIsISNhuOcXj|Kl4 z?))FaUEFvJ|BF6c|3x3hQd}~r#ETTVm7v|JSfak6g6|WQ2xHBQIgf8=;)Vx2G9+&d zI!<#zVTmr=WG#%HfvMy{;tdq4Ji~rgjtBJ)D>&jL1(&Q$!M;?5y%~^8)KpB+?{EM; z_3$jY`auu6*4|?*v!@UhE&-!F9h_IEMRO*cq6;8{CdOZ&?(apwDDwt&x0Dbhdnb}N ztqydT!%=ceRSOO$C!%CQ79H_;O?tP8GLI)k61884shHg}&J)>_V6`m_3qOb8?%^7! za5&Abe*T^0EqKZ-_R50w1(V2YkwCgWbc7h*s-jzOGQ`O_1F}u<$I2dh>wX z(oaFL3}xtEmJXuB;`s4-GUW-aK+cQ(Bv@S-c2_Kc!s@zX_`(Ts0JI+rJc@vk1vuDY3cb?6mU%RfSveV)a6P$2<_wiQxza|M~G zBe1utlsw)fMKl+cpz59_Y)jEDtZTkcZ%24T#^8K9@62z~m{0MT0kBTy!eBneiKBkx zE)@h%L0cUSq>nFyVRJOzdn<-1%8eY~wKrL%#me|z`!PKnXOHJAPJ;R2=XB+kE10)& zDNY+r!+ux|p?G>aYKRx&tuL00R81!vxoitbQMO`VoRXuW*3XDv@8QPHnxTY!CWDXv zYJ%FUKJvp~mMCi1(0ZR;BvQWsi}PKX&LzpTP4zm}d7y@E+ONn;Rt#QWoeZaL%R;!o zgUHLiXIII)faf(IQgJndPT`Bg{aR(~+R%4wcNH#HZ4Qs0Yp_cki~7$^*89V_Hlz;^*S6=}L6*L4DA$TsP@JXg~GGw=4*<_K(bwH%HB@N@!G~j4zf&lED4rK=QE& zs$C8y*W%iU?fJ{}T6zs_UAqNx_hmAs3J&-?zJa#>sDyagpYU*3INplLq)(eFsA6dz zEibrFPTDjRqaOpLr|Ay#3ww-er)}ZalR|26wizwD7QuO~N|JN?I1yPa3xC4ppnFX$ zy0o4{UQjc?%lIB_y4%S7I2Q^r!`;+BdInBe9mH?j&t>G-O-A4Sx9FOChv>wzaq#u& zLo%Yp;Du|KsKxa#R&j7VhS{DW`@iL*`}jlXiBFgzeiaR^&_LNmk@%wKHv9bgM|!|A zkenV{#&(P6QNODk7-N$}SAEYw)$koSWv(dm(Xg8Is^2F+Rvo4ue)iC5_kc~}7r{^4 zTHKlKh4JUKU`63nTy$$XyGIa5^Et(s*49MlN8W^c8!u439jCDImN|*g8r5Q_HWGt9 zmzj@C6Y=@IA$sR_5C&`*p^D0*ofb7mjH=j1`!B!bJpZ|e?mXuX*e^|LqggsJ`xp7} zG8y`;v{^|H8EVEkLtkFrOD5E}k^829Xw{W|`dZ>DiJEt`u}f(UOehzHmf3}v=zfft z8YRPpdB;iL@f|c=Esk|i*vd&5O{wy#*N`@|_xu=NOSBhH0X3x^^t9R`*!HOc*43Wn zteG4|ZXW+mI$lQ8Ydz(#Nca^!nO;uB1Ow!f{UuV^a0h%2?Waq!F4L>&=Qsh2{xTDi z50EP5T&z6lPR71?LEnv?jc=7pu(;74dskeb*<<5TktpK2oJ6>hR7tHz#jtZ;6STV+ z!X|5dvcA_m1I`s&(I*E&Km=1cnPOYW$i+5V*nN*)xcY`O=vYEzhNn{JIn$wZ{~NY^ z?P#f{=!*k=I>cxZ2M(`H!uvi`@t3Cv;rcB{gLt{oZrNh$eZT~=obIt=7gUBU40BXnQ-{*lEo+eNMuI zs_W@yv$yPAbvIlpHpE(uKShF%IDnZ2-s7X!zqvKIE^;Kn$B>@_V^vwc))(>8@X z%gSNp4*qO3?_3FYhHg@~q^WpnMHUtR`KeK%NexzeR+93w`=RpP37jxFoR(Id;ZGV> zvn(v}#4i&yp=FmQeo=oyhlg8nugYv-cz*N>6@jm_Iw31B0gb0_C5^gLf^Q-7FfTbA za^}1wOAmad4F?6pR%9!j+_njIA&nUAip9^yhpBH^6k9ubuA`kGm zG+O8}q#uZ;DShkFIMtfgxoeS`CkBYfQWw0JWrjm`9J-||VH8<5(0c<4P_=6d`c-Qa z+ZhBmR4bxYwIrii^v0i6L9NsrW);f^hw$f;ga#diPB=>CiXPIl z!Bg?EXa`lECJA9`CqaJ90epBj3{Kj`Nwo9}s6gXNcKw9jI6yfTZR!m@j>iocAY7XvsO6Y#M6XeS>8=NOA1KYj@gSS#5xLUMPk7_j@ zPj->7H^xziH|D?}?W~rstHVE{ABnKc1FDmz2Qz8|SQi6ZcI%g1vZU1r#~Q3Bwhq#8 zG_8hw)yzj(_cV~+w*Vsgj4}OE28o@m1Ye%0z*<#(DnBt6XDBSj8OEl#fnS1JU+UqS zSPcEM%n%Q3`%OMcB-7XhPsm%BsZ_W*(B;`@IR zn2$`)(J9~)y^;^@E_C4jJgS)-3R?S+h8@c#rU9bdzrxxiWaKB^^v)kQn@_X9jzk^x^oH4e)i!BIG31GdilJ z%xMirx?(h+%pr^5UXcr255Gy$OP$FxnNo7*+CfH4v6MJ2^Tn@U*3)&)`>@mS5SgpG z0FpCuuxR02vO!S{T#V+>*4RUYtd7O*TjiLz-4Nk;B4rx9!RXo}PGhw)(H}fS1-bc9 zrA?q2PLP2YKWW6=)41yE4=NIqL9N~^fjH+a6aD%z^hC-M8Gap`k{QW}Z*_s+l}E|5 z)MB*!(TV7w55s29*+12W%-?N0>4e+|^o2t$eNm$gvJQTjvSmE=IN(RMsu;$;BU)KAp=aevd>3y{4YL{YLi{+Hc({s6c20wU zfDyV{?-G?A=8T1VU#gz;4`7~$sLKTMMS02EEk!XhhDja=X~k)0!V+2YU@;Hos5>)mUn z9j0w`Ugu@7h|Hr}1J+RH>4Wb!3g|Ch2D8MW*;;FP1awSH1fR5A##MMb*{1c8-rM-nba>*)i}sAVP;QkIF>fLZdx7G{Y_*w;yvs)7_>JvY%luJ8Z$f!uxSV z{Q|9hcNaEna)WdC)8X10VPa25mD(jn)a%(#c6ZS_n7}SYb9Pjv>=^}p>kKg}qmk9| zo=#=^#)ALP-(VGeSIgQ_LgCur%;@i((sfJ@vV_*1q@HP2H3prUd?ZkG_E_=hO;bMIF zqYs+j?8isdC1mTC@5FWT15!756sJypOosm+N7wfWL^|d*`@T+vTsqTB`kM!d!;;JF zS1&`zsMl%?P_hMuu1`z>nS$S|mg9+N6H-ugj{f}S2G1`oz!AMvd{`(YkP4_lLCP`a zNT&f^`nQleJ=P*0^-9@A-6=%ec>-CM^Brtg&n?!=I0f*2oT;YEO2GkWG*0qzoOsQw{?d90ui(l}{q8eB{MIUp=+H+^D*n)4n zy(nvO1L?=r=;!4D8E-C=hmGHG($O)3lOJy32zP`^s-;0@=OL8!i=f;Sd-3Mi`SgwN z7fxZl7?@ca!1&4-RGk|Ni@h#nPMm%+xU56r}M0^(jD58Nru zIITAxPIe`d0znMen-zhK&;!oMJs1^H7e0Ig-U|A+9m==j1@8y^qzQdgR=7s3CjtiUC7>ynbTMn699dylP{TL=@1Z_sn29ZY9`18QHr zp7PeJ(ZgN+IHT1Yo)>1I4lGQsM28q?GA0#0sdrD_HdWZ>OqvZWi@SF@(WIrVAy z_xvBcd{>5fKX)<4CC1WhT7o~q`ia3cHQaLa0En-fIC@qv9ewSKkynyOS2swa;8_^8 zrS(LfH;D0Xlc_?}CgjiL!3FMJS~7P6-U`#;D)lO0MMNRo^|P?bTxLsrp6^3T=6GQ|UPATpiO<5d|$llQ6=EU&B=-I!SC)+5@H1C&;tF-?VFN44UpdiWl<7a7UIK;tlkIPc_Zh>241xTjxVn zN)vhCwS`7*m!N{b#f+g^2Bgio#@Tw+8moNXkmA}b47<7rJu{Rb+h&NW-ucbhJ9=0n z>~t;WMQj1n6Irx&Sd+Xg98adbKZ`B)mLR@26=u2hP;cH&3|dl4E-8x%nBzC8qq~gY zZ?GnKyf!7-v#$`nBXLk|ybogv4*@6q819l60jV-W+WlcHtR6E=D}J`Ko$Jd9H^7-J znQ;_XSM0!QLuouT?;fr-4Fa)pFFeS!l8&_R^zQr>z>OSY_DQ;9tP>aaoyvmd+(RVJ z{4zNr{E|-Tvc$px9jq&uirp2G+}H_1^w+%?^!ev^U_6_UpeX`2cQv5#4sXm4DyKsq zpF+vXdb%WH2K0O3z5T@qP#%5r^`9 zC+UK6We7A}20b<$X!9%r&T|8@Fue@t?l}g_o_?pp8+Az!jH<(iH5tthQxmqCp9j4#!kC(@7Br`MvM6QxO?D$yAgEqC)j+5EYdnzeJ;lfNqgY4LFiWC}gS(_l;|-aA?ti1qldS~N^Dn)9$uWITu^&G;WS4?AC7)&{}7yn^#l^!RV zVoIJ9soplGRf*y#6V)SDQUj=9alFwK-p18u!&kh zGu@=v7V62*9^F)URU|xoC+Jb?r*UkESsgdXzXbzd)nb-x7<6$nsNVq@jc_tzFUzvX zqI4R~cHT{rJEtHz?<9^Nc%2fwi&?5m4|YOh9Pkr=rAE6XY+Lh$sccsQi{*nT;<~t+ zfAautaEZ`yj3fIX;TG!d-Q@j z5;IxmelfPd!3qq{JF*G8524PsgQ)wu7cDqMq)$hzeAzmaWJTS#%huk#55R}^p)P0a;Pdx+aVFJ7@Swmx{C9!2!Pq2o~)$HR#c`x@ z*^o3$Zi+8iW}xVsfDfu$uzmTj^5zE{C~4OWN=)4f>hUtnpm=z0m`b` zQERyc_}DF?(qe18?$v>JdTLS66*rlm^&)ifc}&k8C($I?bm(y5=#^p_ED1Lvi*h5d z?e7B5bW?fTmks>FGyBnwKPI`&Z=xBijPRYuH6d)24BqA|)3n8DR1tHWep@V{-D6Ff zwRItG-K#=AHudu4z(9FJK?5FLU=5i^>fvRO20c0cPM$lsjLhd%$u)WwFoQD}S^Kwm z9J3bL?by+Ld;dJV^;8go?SpBKNOzf-f03uPJcdr|*X--NX!4S!5a->*S5+M0FZO$n z4=q3P!*1lVE6q3A>(h;xJ2DPVspl}&RtX%j_#5?I=fkqMd+CGgB6*}=6uUGx4__K@ z5^)Gacv;4Q`y)$Ao3fT-mpqbJ*Hy{2Bk%Dt*#I~hwnCoaFc#0RiG`(;{K?SANA3{B z@qVHe)aU6^sL7rMdG~*zu=NvYr{=(-e-23`ov=?-)oDq=hG}e-Lw3>f^V(o}N}!HX z6_KEc0E-Q(a5^~;)t98>=m8I@(R?Kw)9YqIzZ%fc+7x)|c^eZiC|u|}q7cGI58>m6DZ=^aHuU@|Wj&sDqG^6LiyS+Iwytr5 zqQ}J&cMWsMc%}rAQByc?jD*nP6?jTNpUoYkePQOzQcQQ%!2^0OWYH@TntQ~+ijZ~U zS&V`a2d?0YhBDH!?Z5&}Ll!skDAREa;k~dN#Cq0wsv7Bu8J{%+NyGPNAeJB zdaH}DY&{Nrz7&=8>zMwjC{Cqf5gXf;0!EX|nY}y$?7WxI0H<+K@k<`ASs98e`Ww(w zw>0Vu?tu8+9rAP1Ft%z`J}ql=$1`(ZVe8_zOtbbWd#{&-Zs#LV_nZognUlcuujbQC ztI&!QXy1=FD@ zQjmX7G_8!C4%H*L}_qs=EnWuBFJe&jrFjEqBIl8qmhz-K@Io znJ~x41rk;+q-foK;tTv9O3ygL`ivUFUcR?tlY7}fS9vUj4)LXe)WML{^osrIqX_x6v6q_OC_&8f zP?Dz{V)swDg2@+=lUR^~P9%t6(c(T60FCY@Cb zJx_dsUE*y!V}vHN)YuDIu@T&^k7X#`84iJ~6S#q0n?a(R4O45Muv?e<x$}!c3qDD|)oSgFjpFoA`fnnEe)5$DU3#B9+L=IKT1>I^1k!Ugi7c zN(aiJy)=c6{xuZV`;COT7kN7Pu9$BT@49vJN@k#6z@nB$qPG}65Td>VHjj#tpJ>|# zdb?LZtq85E`roF0&SfyB#0XkKdr^4A5`L+-BR5UBL|SD!biC`Cd|Ofy)*FSO$BjL7 zX{!b(^4Vl#JrBJWYhiCsRZuypPV=mtu)W?MhsuM)@hCGCK%tFB!Qbh|>St|-% z8(UeBaW*|vSVSvIqT!Ea|DbUwfzfluqH@szkVJJb?V?T`n(P41`Rj0*hZ$U)HypmL zF2OsVRal5b8nN@B&(3tn6LL6{lb31F^mH27JA^G(%wdN^vI`}{GAMj-xO|YOC)*g3 zjyWNrq}Xde1x$^Ho>tdzz}H#4fk72Ek~j6WuVhk3f9cBqr#}8;fH?hh5S+Um1Q%PW z>wgT8Ro@Me;F@$+I!MN1m&+Jm6u^9!YNL9-C3~~b9Cgm?F_q>Z?3|g4)eST7O_m!L z@`-G6as*q{)gte&JQyR|x1r3|8F%Nzv*L?7xWPM|Jt=g@h$nm4t3Cm&;oCmEY8=ky zeLN|TIUB}oyyIB2uN|ApB5=Zo2qsz zP26eiT<&F}nWTm(^Y`zH8=1yl!a+}-TNb{LTd$=c1WR{w1&a?CUGdn)jkU@WT+8=y zi{r)#s*A1*$sMs=$(uvM^jjN1F(JpH1XehE3vpt$vl9ZdT); zxv!Lj1ZWFBHEI$c%Q|6XSSDw6>ks~EnXb_FFiQB;->UGzWhQJ|c}XHJ=O1lTIeCW{)&TMN8C-wE0 z`nlR^{uscIsHOj!f&b4c0o_$L+DZd|%%Eq4bkp|(YQ)Fl&%al)oquGrE1}P%H#>FllfSQvhnJhL(~nTR-dm)70*w9*V#6`m literal 10209 zcmXYX2{cvT`@RSv$&g4&6b*)=TK$u^&epqmDkiULJcu??8{r^Kr zJ8if5A6zHi})gh%fPln#~qUq3f44xMZ-Vm~e|RDM;c!d!bX&v9d&VL}z1oSf|b zjg|j3_9Ee-O8yxZ1==DaeSW`D&|)fV4|xp33)XX$o^KU)HV5K9!5r?@#EkBW{ zt;51e?%WmwH{qbqAVG&{VbP0e+{U7Q407?|(vFufct}zBZ^L|;ck~Z#KxyuqIqUJG zOdOoH@(}v*`FN>=5e}@&!^Zv7&8(;B3UjPfg>#zag<#SL>Z*~jxcLe=L|>w@Rt+>f z*_*3?I{ zxqEV=xaVhGCz?zNCdWqz*Iv;PuADSZ=rX#T8}~$!D?8tWdn7!Nd*i@+h#U|H#fO_w zs@hy=+odIJ$aChVl#HNURx6IKc!6EAzd=Vo3qN+0lJ{5rxl&OsIHN5M-Sn4n?Yd*g z4~u@dbI?qQouf$Ng2*hN)xi3zy5WWfI;Ik-W?yhyV&=)g~+p_yCgsN!@ zYYGP-;iDq=m6M(Dskt3jdVeV=wf8VDS@{@e@tI=IjlvSn$fi8r*YaZCiql0L^-t!U zKG8Z(=dN;&s(&TVqNJ49o_?5j@M}HqYep$&^~ExtMOZ0MC$E~9J?$Xp^M_o{zOX!w znq~>-idzDY8dY&(FPHMpR+REKIv(ac-B`veytJOfP0TbozcZ7!Kg> z!CED7kU9>dZiiW~ICs3_xD$SSn~jppCR7=>kjT5#;pp*h{8q1uZ~9hY$1@L*^m;^Q ze$rv(zMW&c%Z<@Q`8>`ZT|)gM7vO_Ksjx@S2nIS^h_X=yJ+vW~OzAiZ4lfl*b(Aq| z@=8Tr=MpyS#SgkhgwpdjQb?SNH<)ZN0mnl(snX8CU#G?y6~2?68x@nn~*E9Nd;hv^@e;OvE`A#P1FtsVa!{goU++AEv% z$f!e>!hFzd3}$~T%!Xxk*O)2xv z<8vu=W!RDBrN&5SEu9b=a#rP{sC2bTSC8>&BNa&;?%R;N$@u`jpmz3WB2T5q{`Nr>>a*M zj`Nm6=m%dMihf7VTQ4KVHH`Mthk4 zk69$w>mGetD~@X)QZ!q$6p}Q{@k-DnFy4L?(^a!^8Ak~gzBos!Zi|6L^mA%`!2*l3 zwIKbaDDZAokP?k#dfHP7KW|ATF7YBz=wnXMrklO>Zwl&3CgI+;0$O@s1-?b`!75%7 zE8PEMX3D0J5WgzC>^wqb$NLh_)KYjUohZ0FO$n=y6_P_ex~Qfuju+mS!=g78aIbzl zU8l@MvT|9`@2g@;Q_S z&zcQ}Z6Z+Sq)Yx?%B0gnSFznwe-Mi~9?n?`v zRhy{Xn{KkeGZ3_!*W&Qgc_1Zn4d(@1zz-uUY1>FC`Mmuc-tJ!nB~zEcl%)W=b~=zz z^N374H5I?ioQiq!hIrFu5e9h=(z8#wRe)tY%-itE! z^jtYsJZvkoRWc9VzIM=P+i!IFHF@m4kjg}tPQ(6$QS#X08}nOK1RXuOuycnBof)En zFFdTlA|Qtr{!GFpjmg;EX3Lr$V~KtZLq*G{0vjxb6+OZ1?o&agBU?UV?&l`@`G*{8 zq_i^Z%em~!G6NjJFjAux5c{nxKW3IF$sub4j@6L?-!fjS^Zi$?W_q=7NW|6lVn@z1*)}lki@(A(4&K!!LIxg8zZ?2hwpZh2TP|@)$)82y;qe~ zemP6!t_6^Ou_~N!C=JF%?ZullE6CC`6~-l`3QcvNQa{yx;v!m#WlOwZ{?0sXjo}b$ z;(%#uW$C+JEZR&nWxBR5!gi_Sg8S35$){J_aN{)_YF;&q4BKXzWoY&uryC zd)0|o&nQ1mT_nD7AI1b{k=R%{a&Jx@E*ouS-F$P%cTN=PvD`;H zl(k7tWCL^`SO$_UGO$?lIrIFR5o%?(lXESP$RFcbpsbP%lC`>!-1?bxt<}I^4O-As z^PPHg_)sw36Sh3`#pj)IV0(5hY&Y?Q+8|5p+PfVaKP`dpKev*eNHb={IYTqCpr#Yfz3gSb*~HV!?w#a6xyi77;R&=qXoAAV zGV<#)#nlrUVeLj+^jejU;bt#b_nAnN{QX!5Ra@#+u#ZaXj;&Rzby#ONg_>R8z)W;H z0(NQzn7j8nbK<=*O5_HU?5GYB+oTKqpUyE}m-UH(+*-^?XTkZXD!q2B1bzM)z_4f! zy_N2Pg8Y04&{;&ZziPocjyC=}xCTpR8PGi|CqwMlPUymV4EXjA7gjBXx^-u%Xth6j zZ%IUcjUA4XENszG0?pq>n0&OA8tBM^ET)<92Jca|$qCejxk)svbkTulk0P3mpcQ); zQ%ddd;ss?G*P#iv=9H+}mlJn)W%3}r#q^A0li>Em2zZ{_MMd5|r|jQQ^xT{XVJ3#K zdgTQ0IPO48= z)cHT9ZvzgH#dT?%$`8rZPHZx;oHEub(FgMGH!-_5tpoFO8t}5Rz-&qQRt!J20*fj> zbC|^vwEIa4&W($~)}}jTn~N#7x#qE(d)4uCmk%8Kk^;RYE(Et{!iSh45-jk**^_Ru zp8xJro-G5eJ;P?3zb8XjTmn6IVIp#ze~^v>NxI8H7qmk2apg%3QX=Jre_W=Z{G0}i z+q#q*-ndSEzeHf;ZwqRq-AUG|)G+TXkC4sbktDq4GaZ*P8IKtmQ5)%bAZ+CkY%PTT zoEdOCX%_|(1u{uT0Tg~7!IjDa@}wl1HQKq4xqNpeabH_Ovt{H#WobRFNQ^;u6<@Y* zUL9&nDkDz#Kmx9d;O6VM=+%c7WXH(K)}_aclR1*#$ee9`cv={`18-A|13# z?*ty#CrFIlw9s4>f!I+qyP z%8)>P26m@kVg5dIrLraoRKHPpCUTX3kn|R-!;c{W3F+gIL%pW`C&Nw6FFadjH(#Te}A%f*n?4BBF*3Av+l z;klavgt;hSZ0|LCZ^t_}#kGvdcpymydNLR}s|jWMt*GAV8K9v%7b`_;NR?(VtG=m; zEIt{Iu8~)0rjj3=2$4r~;e6bud=H}NPqV1zjWGCikS^S`37R&Fv;4>@Xr;Ih-3^?O zDG-HEuQ!leKA}+1R13!wPBYfOl(Bc@BqUF`O4`RakchUKkeqCeCTr&~i`gT1iob`h zJ6ejT9Q{~hjkoN|HTAr-h=1hu$%$l9mJe-pK1R+in~dNb0e^mqVEgA0vh>zNG8&o) zDfefA#@CzlbKN&JwVpb97L2fbU&(u;5bEA?IAy>qkI#|ldeYnfiTQkG7 zQ{2ULma382sPr#>qjQ+4-`RbFQ7OhOK>dWNCog`{KV!a9i7t=ayE_ ze{$)i>C(Mog8cO&rj71SW|HzneDzBKrYAI|1rpV#`ALWVaJtv&nf+I#ZT5+C%Jh7L zoFF6e5x=_kCa*-iiJ$a#h1o(w`{n>K%8wBL%(HrH#`~{iaJt75NT(yiJRDHkdYL|K%TyIml0qlN3aIzcsDb zkTpGZ_ybQt!19e=z2SsJOc1y^Z!%Rd@a272=wKT8uGn;`hpFjX{Q&+h%W_ULM%Q`JNeEsh=j?e!D8NF#=#!U$n4%f*Lo4w}pHA)l6>IzR!5& z@Co3_Z$rf!j4;d73c4C&xH~5~awmUZhkHJS3p3`+2yI$!!=(-hs8Vqj-Zg9k_xg9V zYlZ~(rukIv6~pzyRQo{g!ryV!Li;(WW$AL={(5ky<_Iy^lPi2tsK-^?q|L4K+%3e{ zKIp1yEBx~J7;>Y>3Fmg~#bjO$)TUSovn>}0%VtFj8x<|FUBwt@$?xEbpUl9|Z4SaB z%lX1RKmTG=iX`{XZB_2KW!5e+5vJd~3V)Wr0>4^u zu6glJjD0MDTRqIV$_{^-&h@~2+c6GDZGADyOojWs-$VHJus_$MQ$^U_be5*|iNYQ3 zL0aBZ#_QZy$}9G&;B}AYbDU}lIllA|C*tr?jyPM+@sX+Gz5I~L5wEG_B>y_ZvllPs zlzk}Y`N@}YN<6AKX%>0B2ajENnro_f@6<|pG0_z~Wz{N9Xw%!~rt-1%L(6#U50>%P zDJF5$>Z&+@eD-nl=T~xK6-s$CjVd`&$J=N_-U~tNuq9pg-+snM(*j-7Z*f%3WoUF2 zLmXGw!C;Ub%4yd@-n|TgOw}=PU6F>|Gq$YNrTwU{`+?eo>?e|;j#NqeG}FO3$`r>M z;==R=G;qRZ#$QStb06E|z$FDx)LIL6nwI2(n#mVi^FpK`d=j`C;CE0Lm+Z`x1xAvJ<(A9|hJ9p(0ho*L_b9foE`r#G4 zw=j^IwX2qX9Mk+hE?5lq-i~BOff?#_Wuu^aJ9sgZX_re98kgvSs8s>|xu=sV=xM?^ zvvfH2^cxWy+sjiO{78I=A7o7*`#xHCj~;nBM6J#&L&MRd{LrF6h&(WlEPA|;COI6y z^z~J+5fu?3xB4e>YFLBuRsm% z>Pn;Stt~i9*PYxj8`H=72s7c9Jn|rvi5kBeIy1MBkOPNl@Uwj+&1(m-t@=$*bo8)w z?th4n@)G#FkBb9)$05jNv*E7K$;F)WwEV(Tl72-FvPB;=lCNDDJ)d-BWX0jKWhy*Q zSAl`qh7i5Pn610YMRS`qQ52ogp6H>10rMkQxVS&{xaSalwn- z)PI#bsynqIZ&?s&zLP{IZ2d_MpKIU}DRo@E8KaraWN!Om;#0+EIOPk;os1cTDTspUHa}Sp zyF#LokWP*y9>kTo!$j=%ezWhl%m}~uBo$sa20>3S>=RqgPCPgOh08>#-)b9B4thj0 zpUJXQXH%HBVT8W?84nJX8{vxDT8unhOCs-2B_XQ1aO(I}`oU~7$|tyzoH2b?EOQ$y zNONQYgSsH$?MAXSCy~B;f0iv!eM0=a=i-t<9XxH8M`sj2qZ$V1iEXzrr6 z@EZdxVD!K{Z!@))%47BRF>rih1OEJY5N_Q+2t~%vSj(rkDChfq`oJNSO11=(m7_{D z_0w!vIO7mwq}GPyLx*5Ae+t|>8c(G+J*UQdIc$%fB3z`2RA?1U>(^J3xQL06*=CQK z4S6JA_B(%G`!G4;t4MWkzM!Q$jL4#q7|fG%fj`CPsdfbmi|xy>>7X?+h&}+?^M=W} zQA6nF4Zyt^eR!HAiun~DG}h0A%9t-8K3|=oqB5WT_a%c&YST6Q^yG}-y81=pk(?_K z(|u1wy6vf;znKY?I>ZiZ??cz!vrvI+0Rdkd$>2%_nEL294c?N1^PRnE&z{RP*D@cR zCYF;XpGRbQ>UsLIH^eOJ+A0E4b{MtuIPG1`AoH%5F0W94t>MKWFIS5PE9>d5%GIR# ziUAm3wx(@?3t-ZM3~YMcL9B|C+2jX~Oq{3({0UiWX6IQ+5A`&W4yDa>!`>}q>dJWX zeS6(lZe58nqMM0P*E72LS~03VRKb^bZ_}->7trSxxwPi+M5H7W!y@`=ZvSjp)T9B^ zuU%nZoaiRG4{yTMVOgA8_<-)3yB{xfCW23WIb6A{hYO8%jIbk((J68oPCSWQo% z-N6J+la~|aE=d?{K0*?&w+kL^J4DwlSB9F&8}U-@GsdJx3{zKRgZth>rX(T5w7u3D ze7`l(<2TZv|Lr7HoS%SqKi0tUj)`cx;|A0E^%MC$Qbn#jdW^jTj>yh*f}5|$oo`P_IL= zKQil4a+rZ?(NA#@V20b!90QvhHu|oDaRE+n<*}1;Brpq3eB8ziRPNjjz z$MW*13VfHEfvk!s_qby=)*lvynvA)yu3;INzOG?iTZic3sXrj+#Sst&HjxR{18kqo z7e*!X6qGEkg<$Z-JKg(LeV2gY%y}GcAj=o(D0ly9L{ApuMK6ei8*|pHWOT=itQx_3$vtsOCWnfsOEKW7L zBEYxf@kv2C8g1=|Z&OY}OGrLj`1J%{{CdN5Z>9)7d}0avYZSO63Q1(|7*{qHXo7{D z6#n)bhyFq0f{Vjtg1qww=HD#k=r7)ab55$G#cmTQoqhyk6ypWg+NRP@ zjwVEU$iUa0CgwqGAFXh1z(=Ac?1_bU;bwrrm^K*-Tl3_omMX!^(h_KS*_ZBXjDxuj zJ4v{GDt62|2#UVHsdSMMEU1%Ya;uGTO~eK~<|bf5KbEsiI-#bqK50 z2KGk~$$v2e71o`mrV){tE|zcBAC-w07Hy;#^V}d)ejSdRT|&-mQ^8@a9n>kV38pz| zU{kyq)c$)wQw;~{lSL}fV%N#Ud$zIXU-#1a-rf8!OJ&&4zieQ-`W$?xv7dCgx(k~3 zY=n-kC(J5vg=St3qxxPRwmZyX7mw>>VkT&jymDXGbj2gS=ujJxx%rL^Jn+XGzk2B= z5odaGx;^Fv9b%I@>&e|TK12=c!(DfGqViK61|p?k>gF`OA(KdJglf#xd5?(+2jF9s zJN(>x2&PE}vrz|c(&G~=Nr(O>JaTP>k>YA##79rGvO7dRU(yEpqyols`=+tS|)j*bc-~O z4F9I-XGLM@EH!A%&Z4Q2LV8^+5a#p)evD26`^jBIw!j-T?)Ss}i`8Vmv=^*Um4lWL0{5V?f0E}IT$eq>BqSW7>K3P&(`)#& zxMe;`2~WnH_MgPEHk<5xQx3ePd#T=ue?+7C3(f!Aj+67J5Uns@6*WXNr5p76g>$5@{|J45(VoZzY{9SH2^e6~i$bx@@Ig(9!u?1XI2;b0 zYfWi|$9IxaI}1l9%fpj2E&8E;2Xxm|!rCQYnV`#*e*6&+&Nli#tG;QZ4D|B z;DgUcTG*fiYvIKERRFbnLFE24FgSC8lpe_--@`R%X4EvS_*#t#i|-KK;-{>k&s`KzY(HgD(Sz!Had03yZ9HLBqkP+J}g5UGx zvHRV6`p~@>-%DI1&Fyb_9_iv;ik!Titub%qnD#PcBPxMZuGqdh#A$+&G zNW)un@Z>gEIC;MjzHVL(!VAjW$Es=6J@yxK&1;ww!i@PC%Fjd8v=?M$!?-a`{~FaW zlVc(V`Jf&AnAYvcr0YKg(qBdSu%dY<2D(iE{*G^KSr}mR!vip*Zoo`pLn8k3t0&(w zHc}uxozv~FUrB$bj^@05tn+SQPGazANMjK$QB@ANG#X1|Y) z!@MXD)c%x4e{Vg=>UbrBLCsh8@C_AU`C%wObO;1RpRmhv5}USJftj&O9*3&a;L6yZ zv!<%lOnO=t%0IAzSie{}_9`EG43ofhOC<{9lbN1d4-#<|Q6?KRNo zvw~^3918XJtLcMnRqU&h7iK;76}(MP4)AUcRdLkJOL<;trM%ihMLd7$N}hRh2`_PJ zDQ|W`C8yrHg5&?Aio^UZ<$WfVoQ=AroVT47oHuv#dDq7LJ;}zUysxe$JoC;X-o~?a zye$30oR}?zyv>=1c};2sycWqy&c(87o>$Kij)iS0Peb7lFX>-3&(JbSu=|}7uX|d& z>FTgI{F&|(1iJSY^Cv%>CeXfgo_~G83x0jlC(|mLX7=mH9@ExYjNsb^8MD*fPfT%@ zky+V?eZ1yl0#obrgZ!$?C44O};IC+!#BW!*YN{Ifm^VJ^6#r*V9?yJ0&1{d-27cay z`KH#zL;SLte|avM?tDGla?XveE;43U6A7IX9;)oWGeUpNeJ3?`ynRP_(6*qMF{hrG kcj)-Jj_ZfUYM`g=*s1U^zo3x7FyH_4+llW9m59*)f66*xxBvhE diff --git a/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata b/tests/data/rllib_data/multi_agent/checkpoint_1/checkpoint-1.tune_metadata index 7eef2ef15bba26f49eb7e79079714b5c7015bddd..febe7b205e46a15ce78f3248344fddfc47a3eb3e 100644 GIT binary patch delta 147 zcmdnOc!_a>yh3Vfs%f%?foYO~Ns5V?WolxwrMYoZvN4dKY+`6)IMH4^!=N;Ri-Ccm zBr`WPz9hdSF{hB#-68FY_Claw1Rsz{Eyyg+Pf0C?%CdQjM(_fKixbOIQ{oeg<5Mz| mOA6ULBe;QrMTwR1WvNBQnfZBz91*NQ5d%F7J%d8dQau2Xqbj8U delta 117 zcmcb_xP@_oyh4hlr9q;lk*PtFQId&iN}`2{shOprp-HNxMUqi!qUA(;ZLfq^5nK!m z3?-SlsqrQGC5btOtnLm)-yTdlz`zi}2b4)I$SlrJNiBxTvU!U{@B)R46U$Ol;uDMG NQ!", - "Box(3,)", + "Box(6,)", "Box(1,)", {} ] }, - "policies_to_train": [ - "av" - ], - "policy_mapping_fn": "tune.function(.policy_mapping_fn at 0x7fda132e6c80>)" + "policies_to_train": null, + "policy_mapping_fn": "" }, + "no_done_at_end": false, + "no_eager_on_workers": false, "num_cpus_for_driver": 1, "num_cpus_per_worker": 1, "num_envs_per_worker": 1, "num_gpus": 0, "num_gpus_per_worker": 0, - "num_sgd_iter": 30, + "num_sgd_iter": 10, "num_workers": 2, + "object_store_memory": 0, + "object_store_memory_per_worker": 0, "observation_filter": "NoFilter", "optimizer": {}, "output": null, @@ -110,7 +118,7 @@ "sgd_minibatch_size": 128, "shuffle_buffer_size": 0, "shuffle_sequences": true, - "simple_optimizer": true, + "simple_optimizer": false, "soft_horizon": false, "synchronize_filters": true, "tf_session_args": { @@ -126,7 +134,7 @@ "log_device_placement": false }, "timesteps_per_iteration": 0, - "train_batch_size": 60000, + "train_batch_size": 30000, "use_gae": true, "vf_clip_param": 10.0, "vf_loss_coeff": 1.0, diff --git a/tests/data/rllib_data/multi_agent/params.pkl b/tests/data/rllib_data/multi_agent/params.pkl index cd832aa1c3eb1713e608fef452dbe168746e4cfa..192cf7558830fe2e280e383cf7777e9ee669a7f0 100644 GIT binary patch delta 10700 zcmb7K2|SeF`kxv5n(X^dMD|^jEoDMQq%h5DW{eqTk$ptAOrdwQXr-jIC|a~AC0Z0M zT8Jdls#R2||9NL5{ciVv@14(Q&YW}J=RD_ppY1&7ol{nT%&9`^W{Q@kgdhZ;biNr+ zhR%t@tYI)?iA)wBje@$Btsw&y14Bu;KPH|7IuSetHWN#sV{lkDo{V8pQi%LO8&1h% zBIJpDucV0Qu-J?^3>MF(Fz6Tpg~f(uqvjiSi}7Sx3=$hdW-uu!3_8DnCr@W!2n;$A zgJolgbOMxuGC=Lnf_9=7dD&Y^>reX_#$l5%1R|bDGj!Ac=Q9?A$;KoS$rL<|$O6wv zSQ?86tq?H~v9pne%H@=xjUv7%TSurv#08QU)wCwj7;CJE^hB0b9EZlHU}K1MwiS~? zkFmny!4y`3aIY8K>jk%=xuWW7zbP>!N(_fd#1JVlWHv+;T?8E$Juc3nQ%DRZ6eC7c zU}6ar4hsV_gjZ_=jfveL(TQwWtf0eCF#TkkWi_3f)dB%)KbUjShKDSZ6I!iPZlsT0;(t2-HZU z0VPrJQ&b(2#Ym)WUGWTCB+6DADpAy`>s3%jA)V@O$R#3?_E7u`HON}pQCO?XkXD~Y zSA~MA<)k5s{Ml>&d=rNF0;wyr-LCbBwI&kCuc{3D<12B zb#QjIb+&b}cfb+sNp=nnu1-SMR*I4)Q_ybXzfb8xi9;cT33arQ(PJkiz0*%^vEf#>U|Y*eU&)k+Y!)&0^$r=(Q$KFV=cF3j|tgY%G>VP9)Me){JroB`8N3u`7ZexxgUD0r8SeyCwK59 zF&GM+!UovUG$=yH7TE_C>S%Kxk{^)=c#1JZHi(}jEQo9%GSETL;gfrL5;O)-B8ko? zKP5jWKLLC?bNH~f1zQ0I6OYBm0^b2Vc_6AKTQNbTz*)r$jSrrYj*Ww#k}tyz5``vc zbZ~-#%*fk$(!jD8$AU;iq|tzZ1i|)89-sUnZv@)`kYBgaUN+760VYVn)6W4e2h^i}{bDw1x8YOw5&$J_x?K`6Nj4C{Ot}Fu_K^vWkyq z@X6nRrxGUy1OfmOH~|2qh#_(4!mVwZ@l*8yRXQv-@;9nFsPkkom;?@%HYp%s z-wX@}OQ$n{!+@QU1>^>NL>_~QT{Ec}xGM-A`Kvt#?jY}`>H?0yB6(`S?HC+FJOv*M zM9t#h;ux@dv-nY9VlEV~t1CvG18AWVUA4*DAziJ>+I3w$E|DjL!NkxQAm$6_r_KcA zf-yW1AP9k|SsZ~2|0(E=JVhWEFej7AhyfmtflX&hH65Hpq2V|PE=DtC)Eu61X0c5GAI|04wr!hsIx&kicfVD z(2)7$(#gd_Mf#ee0%NGrpN;HyxLfaQEjV_o$s8AfJQ%yj@bAvGY#!!jA8ux!#rcj(@Lo&QRGc*#70ACTb zl8!)cmcbW$Br@>4Wz=Vp7%(+vX_<45{EdC^15dc>NEehZoPQgW{p|V8;Z~^qR$<{B()M6wTfdGIz$%Z^p zftlqA(EnzQuABl-O^5X&JN4`z(5czru|V(gcp4=hGetM4^69dun39F$H|pkJ!m#)K zA4MP!Z{Y+CHa?z~jA6tJ=ce~)MG*z<;E^H|04`-sMkcz2W0ply!?yhDzm3-l>{u7irNTlR$#9$ zsh7c&xLZcP%@fGqx*TH&2A*GRg0hpZMHYz|d-2vOCp|~nUp>OUC4Vg$e2$}tF zc%U&;-RS8K2wS8nut;?_f{mmgC`b|#cExNY3K~5yk~ed)A4n+umSI9dy+T7U zXuk+P^#NGfFQ{M&ofz~PA8Fv10Hh>R8YP6uQza&Wq!U9UVwrRdi6~hA-wjou(do(@ zoF;~Dffzu+^AyCP1`~N`zM0(LV$olHl-dc8l@lxjq^iQOJQ+ZFvTG=8GDdh_VDWj9 zlZhy(%EE*cd>oNYW)OhiOj-j%5XZ#O;J{{X!F`zva#cnmk;$YGfW-*n)t@ZUO+F|D zh*mSe`7ee-$I`&Yhh@gVw9_ILoOy^(?)>nsR!H7JE+%gx@zPf~yZh|-cz z4TQnTPmk^(3Q{R>@1Kz|2vX27;O6tBLE;V4cm{z;1KaLYE=2Vc44H})u%f6t1(JjN zgkpm`gZ`RU2~q)`CY*2y56A>cB85f4QD_u)GKNV^0ONo?aQ|u$V2c-;%H-*JnypZ! z;Uy^Gh~Ri9+_sdN7!zm!2ih{cog%Riv96>y3EQt+tPw|EcgLpG1Ct_ja z;%T5ChQ)#Ris`PXftuQ;sowx=;a+G0Y*54P53BiaU6T3*Z~+;zDOXtIPfIy?l314T zP{yZz0%X&y4c@kGhs-36Z-3L^{y1saHvx&l;$Y>{YqzG%=EGMc{#aGL+muYi*rj=`Z5hy)Cd z2m|{MEEl05hUILoI8yNWlK~b7;?jR&5HDEM^zl;kcnlNdu#7l9Pgy8cVbcmDOu+fz zFfd60haqs#;er;F~KOoSxY3A znEd~&Ffbl9r6$h*qb4q_ETNvf8>Iv<^)gqlE_@MX$+Wp{a4{|WAWPWf`*>mCdh^{01w7+gArpGBboO|c%w0_kwXKk508hb=CXp`cMFQ3Iv&)w^3RnQcY zN@=g~&1oR!pSE1nG3+7Yzo^w@=CzLd180xjj%(;Nc+Y*ImaG^NcYL|%(&FQEu2ez9 zrI&cC{PDXjD2hiM`ynQmwpdSMpVf?_r447lh?h!dX#EhWO<1>YLsmjx0J;7rQ+~G2 zu!`OMVfo@?>yJ_`&+i=ab=N7sRI_1?RUTxpS98Vkgxu<#?-(g*v^R-E9wAi>bd`_t zc}xBNOYilTb4w-H=83+ve7v}NSwQ(<)11}apCf2m3&i?oJ#5dZ3}@H|wUt%SwL@_O zwjR;_tL65(NcWfKCmxl@J^e7d)k7=%ldh$_RJ~^B;>g(Jda9Z~)*tjV&o1N65KU-e zH`^Y29GqxgswfjVix^LPI9O8<8D&5qmzWf-OFxyjP~$MSrEl??jL?*D?EZI%kLmj9 zhr*ABF_Fz;7xu>goHuIgbvG$^RU4IcOTJm3q*QQ{Us++^p^c4n_ZwNqZfHA{cXEfK z*-I-@_$xR6p5qqCk|pcq?PbUh@Ly^qwqwF3bQV8;>gKG$S#aIlbl^;Q(R&hG52Mmv zIZl(*H6G6!Gj-tMfOpzwxBI0%({mgsuG?=KQWtqt+`)G=4|am-^nP~8pVjK z95XJdthq?;*w%%ndw63zZg$>1b0}x=NUUk86=9&^qkq%E^AS7Z(wKw0%jdA~pQI9c zhrf*8-cw%bxp&6Nt*`t~ndlM3>uDG@>;B}n#_F>Yha)t@xDGp6XV&(8I9AdAv%Wg< zy(HbJJ4N5t!}v`Bd;TKwh^}kt$9tNJq?R3YInB3oMiH^iEa@jvtMcBfWk2|tE;fWb z>@xG?pwFD2d4oebeVI{KnwLKfC8HQ*Zm7B@x?-)zHD#;NoPdkB?k?6@PLR9Qo_fvr zv4ttK?*>-&DXom#RQO{S!cs|tF(ZGG)4&eM$U0ora`5Enn-50s9c~qWgZhH{CT2D! z+rDA+?%LMi^)1yyGi`NxC?Q9?GDEr;Vf|f!DE(Pgo*?x$=4XMe?nFXlB4_E)6R*`K z`e7*u8of{az{w)jLTguNB*V4Pag~In$@z-@nT4MVn_vFe&#eo1ayxwa)^%rO4Ktr@ zNFIB?`Nht`l0Zv2zm)a`?WlG4g3%{qcbv#w>3h>FeBs;cv{Z+y_0a+46Db61ufeeu z3w`Gfr+&_$UcT$LZ~1je3(v;h{`IeCWm;?L%(>bkH}XOI&C>espAgmP^EBDV+m0O6 zm){U5(pB70W*2RopT!-q4NJOHr@FF#6J77Fxv#XP=+=1|PJ2!%D1?)e>!fx4qPiBD zWNk*w-BwZ8SnjuA@4RZ34pyqS$m9b3KwjrTHBLW!(_sV0(X|RkkEh%>epgk|c<}RB zY;m6YP^vZYltHkgss*k2X&L3^(e!8aBtwm==cwcKBhMx(tIQINxO)xo$9fXG4=s&< zcIO+X-FSVY;cCShae?HVW3R(7+btJcDKF4$@LIS-J!@2^W-Xs_qQ9PL^5Ae>Us8h8 zqCoi=9zLa;f{dC~`&IPjipZ{l5k<$#p1dCq-R6iz&Sgnv*L@l;@rqE4FurNc-xC(E zqBPrn&FvnCk*h4OB|(Kd5t1%$bBBB9b(wj~^Wm&RG{&q!R%cUd1-ia-*Q1uS$Mdm- zo_pIJE0+h%Ihc>dB53&&YL)MUTDV;ok~bbX@Nj!z zxut>|ap7%IMnB?I%>MV0UTYhNK6KtYbboY9+`THZhn397erBURJ{4#B`1YeV419z<&XWEufi`#egn|(^mTF%>DP|WX||J{Km{{2GRzzxpLZPXUo z=sN*tb1gfg4-l|zAI4KY7jp~I6O`k<^_BE{KK3u9!nDg}j6_J$whuRmx~rTrPmz&a z;jG=1*j0Y*gUAR?_E~oA*2XIXJ&}%-+8arVklxCI@i_NbhxpH;?c%34Jwo6X54X5j zjr?qWhj*YtEXDz|X85sU}mtbJLt)r}B?Y1X~{omr!Uai6?t*dmCAm=`b!m}hR*7xM z>*aofuM1@pyFzhoWmg^)Zu0EM8Xqe@DbxFIVEDEEt0GL1(SoSsN0m>R7h9VuoWH7_ zcZ$45cC&^|+$X{QRt7V>b;Ig=4b(x0vOs>VbrqLfp=HFQsKdVQN%2fr~t@-kK*?2qRoV6dD zvpRwB{eps@?KmdCcoTh|xpLNX#`kp<$r@haA-alcfpc}`=j0@R| zUwSjUyNBG?_Uyz7xv+%7j}Ezq-}Jl~xfUVS{M6vMYlDmS(iiT|f%wI%F>j5^chAav zxX%CBTNi7Or5Qf>3;iFA;}^}aJ<`-vtR-4;m=ajtvC3I#@z@e{>rx%g%Ti1j%ez8Q zY&KXkL{GhPpTyp^)V|2m`#WLlId}D&`fac74Rw<^U1aeL^W|H{DvJ4sToMbm7y0et zK6BM``BwWa$h*2i98y@Y)z|5%W(cM`qR6{s?(z8U<*D|=-rzw`lOn+`Nr(H)Da8qWo7uQhqb9q9Sc6JI1{;FQ;^z< zfKx6XB!wKF6d4>pVz3;R;E!3Zn6TH<)uE{2O>P@s_@ zt6U1V{(Zgdj$5Mc_AL<;CR|0O;3 z03d`Hl?HyfR4yq0CHrN+&r zH^T98QJS&hFESWkmmWllQ*o4ReA4#*XzEH_E8SoO1n zdp)jp>#D3ubrC-?RL^di&F-xdr077E{VB3;`=4T-YHK#2{oe7KOFcW59kFh&THorO zA-kx4=g$(YgQiy)n5wECP0Eken%>I|QQcCttu1?Wy?nR+SX@?mRP2#^N8-5cs$H|t zp)a0X2}xR}P5)R>-M`GN*4Mb@5FyvD+4&_`y<=aR$Fq{R3NxTo$g!d5I>*2C zb&WqYKjV<1_Q>kAMp#THtMpN>cDl^F!n>Dlw`JDMYCT|?nkv)&e4gE%w(<2E>JPu< z8B`NS=JviwSY;D_RHk3F*KhN#Z#jiQ#s*0~p%)$2ZtpYRC_38N8@QLs{I=VDe`dg{ zivy9)3%QyXY62ew(8ywSkBG&y<1Wr;Cxo#rvIo7vrg{McrdbUEl6LDjACpSgo>`k)6Lf zIcAyOk`a7`9%<~H?H7m|U@x-5%~hss^L_A-;-l_cT$FhRk-1=NqoR>v&Xu=&0?kFw z;^!axaN6EPtlnSI0 zynUgJ06nf-=ejq8L07Xrw_Wl#x>wV1vFP@tfqVPiCysIoR*QruE$};C6`+tOYPuo1 zqNMn(-8qAH6hiAuj*a~7v$NaV4tBSUSjH~a{n3DKIOCJiQA7>J>>oW;OIU4yy@v3f zqx~{lvv1LsoTd=juU}O~Bh-~S_Y(uMvt_e2;{1AKg1J%OZ#O)9H72TtlCUj0GyB>7 zwPL4Ec$D_7tl3g*#nqx}6WLiF5}fr9(yq*lW_Xv-*V=v1KBD|0o`%P zdXt2^o{JPDR;&CmDr=d1Sy{AOWuT;MVt-a7Dfraj+)VEH^DnGX`{;z-cMf}u@L!yb zb6m9ar0CmE_18zYy$#msDk*+Ydi1m(%693xV@D4x^dA^PDSTQ=Iey~ozJ2=ooTu5C z&8>qv*8=68JC2i$5a&H*2cN}qH*e`jYhT)R^V4F_BSQoYccm4bquu?VX0`V46Nr>^ z?P=C3L7P3?aUZxRHODU*6!KT^ktFjlp4ZQ$K~8i_|=8Iffq(J>5mBUb%Bq-Rn7ptcKq3J&d6vpAzqyOmJgT zAx!y4#mb|fnW=k-FP>-xqGT_3CLWg$r<*-?a?dAN$uo`EOJv5<8o{g9wOXIq=7DYp z=laHxcbvPm>p}EO+#Av0)gn04vO1+kV{!L${_~ELYohy~$#FAnzlKXhq4)QRy}Va( zReNO18d}E%&kSGnYDu4k^ZmYP%zl6Twy~o0%$2b=yI8Ya<3d$>9Omp&D9j58PFZmB zL>IZ>)n`N1u+<@XCOgXA943gos|WKfBB{zxI27)d<^Nvb}iBzR#+%Sq5cVj+}yxO?j$>Y1$ zf}ffu?fNE{y)Q&Yr00z2&U1@zmGrPlo_o>j(*D@={=v;oD$bTQXIiTa@z{mlU1NK; z+w~r3u$`agk$)R)yw>jp_rl879+NQpt#zXEuUud2$%&#q`_EO(pD7x_DmbzJ#dw9@ z;I4^ijHG!@wdO_#nH)L8sGHBZR*r?vTCyaQYcXDn)H)1)8YpnS+@3A{Bxm_NsaLB` z;`T`fOAWtybkp-}u>E4>rM6BxUOjFgDzbO{bEK2h+OmzK+eEYu{1nJp>_>F@cdrO(_@MG8XvvyO7x{VX+Tzg|***e&Eq0TRXiIc^xAC;xyBuYo!HdNKvJy%)0nY`f$qq<|!*Ea|E zeW+>8C@_mivic#1H}K&`x!VI{@le?vArMSL6^YVI5M+ahf?ZI+f^uRjo@XJRb5F3v1#pCcKh0PoO{)|Ehg82mb(*W>- zjHra3zRvHXVM0(NxSdZ16lD`e8LRSHY*g=8p~#aGByP7ut}ObhLC6kcWfwr zJB@}93c=C{ol@-k~;&5GypHGSzlTuAb~{b8J2bn>Y$K%AW|d_ zJjg!}Zd$YZA06(aKw()K8R`EBRWnovw3Xd}Oa=j2CGM>gOXW%9z$^&L*-P_6STSK_ zN&uchWjdf3+;H7LI0UL7z*c#}2{|oMK^g^12*UVaX*d8wB}C$xjtAeK*riZ9EcXEk z9#{Bey&gls0};LyEDevL2H;6}8aW7qBVnmjCI~qYw@(NGvO*9QavEAhFdk}T@H#o| zcC~RLlL)v7CJ?R)90vP0?%;-RfFnROD5ogNxmp1&6b*i~$AX6yT?M!>7+*3DgJGhn ziA-lEsI8>nOmj|P9$^A$t4+||2_TdDe`$>WZMZ6M0(4i_vj}DIWBdpt62>ni2nYKP z3s0FS%1cDFMbdjy# z7d2^m9Z(OnGlc!|G$@Gtu#lHyAcB4be(mq)-{$e-ffH(dYyw&~=2#;KxCfLMV7lD3%n0r!u{ms%SJ2 z%ivR_;%OnlKUhNGp|;hgKtecdX}Ad-+q8ib4HQBO1G_b3Wpsckh9CtSp+zB)2tHa^ ze|!*)%8ZBPfu4YZno?}=RYy~fLkB1ZNs4Of-Z0PzaXv zS91u>83rsI98AVQnxK&VVJC<63qeFDX+zJ10!SEakT0tdL#1IMOdu0a6oiNZ1wqs}?0e>-$7Zd%P_<+g}*`fXn zO>kCIal=pgOb7~t$De}rC9oXnhdTN7Wau;m5eY#-MxkQa{sO?_N8Aq4g8aJyP+_pM z3<&|LWwkow|AONu;eW;P7Yw2~MDH*EVwpx43LHW(<|m1Nm<5*MAJD*h-8E7$D1LYv z@x@?+gGmt>axn83WDfrg+1|g9@x#2SEWZrJLUTI{BmCUq`cI7Dtg)!=KP>cLko|5P zB24ZF2(Vqxgyq-_eL4C++{?vs9qp$>`9YQ;vK@yU3wA11plj7E{tz{we08pZ>;YFD z%3xVh$Ti4cL;Sa>SRMPj3FOefg#b+HhnUbUV;k&7ps;}q2dryZ@VLG>9q)_rCEy_D zt0DO#Fi=onNJ4?Z2L}cm3ai@xH7HgE2!RO0J_Q9yEfKCb)U2wR8w!ba_;6?m#b8+r z2*wW&G5cF+Y+Id^e}=|CeCltShdKNTX~&S7+5fdkVFteOu<+0T+m;0c2ljn)xwR2pSnlb|o+wJmgS3g+lO!db519 z5XR915{+c(e@)7(A%7Ek1T9$b<@Y%m&K5ZTT-Tut05}T70P#Q~esyh&0-A~9u<-J5 zp$Qj;P=N+Il7NS3;0aL_ZnA?57pMSQ(7*t34HB^v0koruX>eEKIksu2AMgg%WWn{y`?O4Wi88f^svs7u4W@h-X?N zeOP%DJTE|$i5+ksl%E0g&;bdr(W(w?#BNwvG@6x$&{?Ge6dgXvmV5}r(Ul)RC=c^t zumc2tfJuA`gLnoNFc0E$=w_AHDwCH`eRVJ$8qE5!2Cs!u$RJY-s;);%kvr>FMjd=! zUyOQWSkW&LaWvm7;q4~zt)i zF+)A`jK{GzO=Ol$FFwIHW}rE$7<)diL~VQPcmi z+E~dzPR6^JSB}imLyz#8ZhI^xvZYYSA|q2q>RWpS81ztnd$82T9wnKb7mEaAj%lo{ zL*6<$*VQs9T)On~WpmV6{eb^IM6Rxl;BcwKHl|ElP@~;lH-$F zp7^w#YjcyWlKv(u$I!dHQP{Or=P624{XHt2=4Uot+^IA-q*y%nNvD2DVu@)ju{&RQ z>16K2z5!o(AjNnQd>oOR#iN?i+Zt%j>CNlSBhBcV573& z7mgKkxI6pW*(CLf&%w`{DIE#hko&`=IMIpETU^&4I*EN?-+!@Jz&)=iUvPxNSC?zO zAyVeBUQb)`VC8t=G4EqonLjbQyCyWhG%7p0=rr1YLWJZjL_3Sd3&t1+2NWH!J;p5@~T}59^ zt|LD7jPyF}Tl5i(FXb@_7d%q%$Zi=NNgwx=c<`eB8j39_`j23bQ0@zSrq8C z+ZMCNHF5j}%yZr6r09wYF__xIFVhIZ)X)x{r|hJBE)Pw+uQ!~J$)9~`Myt?%r9(fBX?n0e)~Wx6(a}YEV*vlvlR`}yMY>T1 zE?Ng?BVWExQ#fI(#h7bA`bS!he|F$Fi?JHS9Vj5}QXJzAeIjr*0C(6d&xZ&O+Pqq6 z1;@f&Yjb4cW0fq^y1`pjjb3B^>7>^=A+{CSkJDNet=@M&zb<2a>al0UCUcxRzUxax zPG+SK7<(h}UDxGy`n;vw!Ct(!CnJh1X0ld|{ej`~qEs3Od}*yXR+b<2X! zrV+kaIA2jmmLvN-H|jq3OmU6%Qm{Lx z=^AM!80y82IdEdF46}6Wi22(CW8dnLqgyyCT*kxVZudqE@Y78O?~>lAJL_1ncjU1L z3x5`8oKf4dhwgo5A<)q*tD)hsKyJ&&&Y4+Jrp69Q&T~HWZ~H>hzD#!LPUxA9FY79Z zR~|hu{zmGCx36T{mvH}eTN{PDQomCk9#J#eR_#G)V&bOPub^+rzNmPT&n9+bIB(ab zkrh#QFMN_uh{l_}^uBz{_#=#Yg%_6%b5QQgg>_~42KoiZ2{tXO)P4)1nlpCS+q9lx z8$EgO5^hHFT5eTWsw~&T;doDT%j_6SQy}pMSDw|V)W+&SNm1V7ZQ1H`omV7dE|iJA zX;6^!velnoIJ_@J$ibJ+XR-Oe0e$ro*pyV-8CojgyIdP3K2bFTl|y?L1^I_%7> zKg4BnQ=kvOt)Wu$exeX}o|TLGMr49fc@|BUJMPBHi-L%$TQ_bVsm)%wYJQg8%E4)> z$!9i~c(U1A{3Di>GEW@o$@6xK$Z+Z2UZR z&B?TfnK)+DzCK)@{<5s@;EC^5Z^a(4yATl1cNV-Kce=marJ6RFX(y!CVz;80&NcdV zQr9Tq)2l)f$qd0i z)#r;))He2g3aDK<##KczcFX6l-dspqEcP;9?^&HI=#_H$*a7p6C(}tsF~Dxp>-3 zM7Cr5p`f`7D~PSh&*?eL^gcF;SBb>PsYmZ$1RcNlsS7Mk)wcX_8L@Nf-5zo2(G9%K zA%_+ozwsI6SB&jFNgW8?ZPD>+EJGt|UCra~(Unt5y>a(5NFS0eIwsw`QyH>oE7)u3 zQLQ{}YZJZCcY%O9+C1%${c3tdbv{A2&ra}-=0~rHHOSVKlnZX<^tU(G6>miH72U{A z|9Z}!j?sAW6erN)Xm`}Uyu#(c^=|dq0y|25R6Hic!=iPc*hI4p*UFBh z4`0ehJg$#AvoonEcOkyGt5uyt6g<@-th8tvH`Ee4QYi7>ljIE zZVuIae`nFMF6O$L-8UwkNV}D7h1RY;b1xZ=_ zi`RW~b)Pn$U*R^?OZL8*^m70aN>I3+6dU&BEyL{fHp#E)%<%gX8gTjvrFjkr z+ttkl_~p?j#D~}d<(O*6;6EjakDyy(7hLor5p*cIhH}DwxcZ;7RJhA(s>}|h$#pOj z;vfH)Ar`D5AO1On{%b}Kvsq1skx15n&;K0oulBs60=g2}paRZ`i5zh4KcZq~ z!*n7a)G2vKJr7G?M;QhjIuv-ePqTj>Js>SdyM6MxfOt+Z-J ztU`*T#EzR6nu&A>MCimQQp>*rMC{y z)I2K2WO}VVZ=9}UKV_3e(oB>Wh?=0Enc(_(ry=JX$iJf3I<&*Y-FDgDUE9ze?ZLfp z8HJ8!+T#UR-j^z%Z;X^=uNR5CaNV8bYO8Dgqnc|58ZU+z;@#D!?dJ~_YKh1ed3z?M z;wlPTwpWdBG+T#Ja(FLp@8_2NHsW>R5zoc2R8EQfK0;Yq=`_tm_R=xla)*!%dTKX+ zv~yXaU|pq>+tV{)V(D5B@9b$x!qsJWE{YEaf=kK{{8^^|rfYV2 z9cwSBgg>H~|fc_y4~xO06z zUqxS`OqqZG(5c6EDfb_n&$nfCzq@^`4}0yeqa>k}Bvw5$8YSnM*T~2bJ4@e=c_pcKJhvRrWrRfb4 zyo8(V9~1~v3K3on?tBG_JtgH@Bj>rz7_od4r%kWx9R`KxU3G-a;@fy_b&PViHX%Kd z+Y81^a80T?)$wZM&vRU#Vm-E7RLv{Y?D}pgt5GAWgN}Rqg8xc;qW9E27X=Ud4MpF1 z{BY)qz!8C)iUJc(HG^q|Rc2+Ko!jX<9GTIKv^VX`{YgSfOZ!J{_A8V4s;fsr;sgT> zJ#6NUXQ*k?rEL!Fh{(Z)?;|iW)lm1k9MvH9HHUX6p(0vjZoPJqvmEouBjawUt3|G-gx;{ZexYLZ(ml>?ZD$zuP^)Zb=5cyJq{i3qknK;Y2i@mBc)Fr8D4}@t56isJwUrij>}S_h2FH=i{6jm#i=|z5 z%Nv|7CyV+a1wQ?8?XpL4hLU7(^h0qun=MNL($N!vDYs|08c^lHnz_t&x7p~1+D+az zr#8x$SQD+iUoBgNj`W=TzR{I5xkQ`^N&&mib=0$U{ZGOwi?48JaTiVQhsRI$w)BUF5z*-xGteP@-ATr_qHZT~|f zuHNw0#ISd?z+G=sni0=7;T79Ebn+p!N6H2KenN#xN|92Go^HGC1=YSSt*12y)+DlB zeK=r+6lgb3sh30Psow)iCT`E?OK3wG7=_L8?$?$N$$Pi%mMJKbGmk^jP7zf3g?E(R zD{vf6B0PR_-Y#BqJ>A>K6%j+Vx{#rjz^x+iV14fnpQOv`ZJyK5gMp|#PI~9J8#QO| zjrL{AFS92%Cw)4h9eO?@HE2VBTeD>Gh%Xt}y{=5?5D_<*{9kO>^rO92 zuH`RVtKogocwkAdcuBWRyz85Id7H-Gjf=f+Kg@lYn^#{^k>#5`ii;CCmbBokA>p`x zwg544t8Z~Z-5~4KNI|-})@;dfy1g&jcdos8=)!#;8#9eyfn@J)f8Ud*tN9-ZByy@m zS>R@lt4re#qNa}Ra%^ba_o;Ct`WTm8h3Up}M<0G$33IpBi=HCiy$4+0FDqYE9JiddbdWb$+Ty0ly-v5?jy;9$Iog}) zW64{N`r@CU+{ov8t*)%3A31w$u(e~^)4*_a{3}~7K#?viZRvYZW~67TuertQRG7dz zuI0l)6F2l)!nWsp(?i9Y?pjM$pwo>DT`^bcBZoSU*2mVon7gjc5t(tQ<<;U>gG^6B zq3(m%D^AA0HhbFDEo<3p3>n;}^at87@Dmy|?Gl zvqaoeojd$*N)~D~y1U-Z&+K|BIwCg>ylC0BZ%;ycoXs%;-m&R{6Q}Hb3x`5F=L7bE zizWcJB=zdn&%73T4sQT@n9mlk+@d;xPDBRYoKf#+*UTsFwRYu^O01Eu(dERCS#t8* z_MCQT4nCRs&Hg&_8P|Btf$a_txaA^7bvvwt&0F@{C(tg+d6HE3zB515QTe^w(d7=VD!YM`cLkqb*ewYPW~8zGhN!SCN46Y)P&NpGlA zIi~{(_Vl>~h>d8yvl&R8G{793!z3iVEYv;R@bYvSW2B&93w3|wKKT$2!#b0wh0nD^ zR?@*aQxCZ^ZFZ|m7rKXbkTlMI){M3hO)!YS$)6Ftx3m9=ZX9Yrb>ezXo$yv3Qs)`5 zMB#JD*j>Hmdr$6@FK+I}PE2od&r^wZKGm+YRnXBm%f zvdw%FU+GWOQm!&N#q&<1!+71=aP?YpsEf_HG~M_uQj(Iz?BmaC-4;hD4_>c&qBL`+ zZ8}WhRL?WSkY#>r&x(*{HtKUKuX@$a_PWr|9BFw0VUEV~oT`iI=?YH!;(Hr8uF&dK)b3xCfc>g;eH(5VBjFw7gQ(zc-O+8qHIv6Y$=ER+0ef5Q8xbYM2 zLgCEzR~6f*+AY(v!gDpuS8%#v?vBzPgQ}wwqm269A9wOwg=06N)jzL!{8A`Uuc)WE zWPC2N$c3_JDT_BW@ZNC6({5{~ZiJyeUAR&y@u0+xyRc)SYm-qhVyR zJN>}GG|yw_ph3rgFS3&>_n$qGOqlFrYa3=V`_)>8q4y__n2h$j%iO7(&R-~REWx!Q z($8d7s_TAz7Q(;uB)WGO$A3tNk3j8IhEOn>M2F5sK<5mCgW=OK(2fy90Ny;Og#_WX zsMx?@XbYRc{mZ!oRtn8vhg*JZ1N_*>LOQruviq`j+<=e-rWyl@fwlt}3afvI!~5{? z=8F%3M2LiW(EvOKI@3b;*9wkc5)Gjw)Zv093h~=6-0vIhKX=zzJ8ukb3?w&nW(31z z?Gdm@{P?#XN}Cxn|Z8u`((; zJS18u=_C^>A(8lB$7RLGLzgUz4T+8sO0E-1`KgExED8x-xMEps!1AbNOM>Ht(!tRS z<1M0>En5;~u_AWKvKWj1x6)L((qzaWLNd^g((}gNCgabypNxJE{8KenS|NGQMvGL186ohJi zQvcLwB^>A`;U<|bRG%RnG}2AlPf~o86KZ&Pc)0!*zy5u5lZX`#4$5?r5Z{F*GX%WE zk}0C+t3vQjO9gJrxjT|d*_gV17zQ@ zq0ijtu+(=J*;hi#!e`_B-!il(@;q<8rw-C&%~9CbL#rLLDERYZ3aCkhzV14hCv>99 ztS`_xc_+?3`VKa2DOf0j^rV#59(3~@g3UYL)Y@r>_^RHk}>bBBAyE@3rK1o$SX3*qFT~hHc=FA^D z;g{il;P*NVhZ*if{bf%eeeOJy6fkYfFF-f8|L8Jdk-MvK{5Ae_A(ljA5NctJO$;8 z_Dmz|BPYFMG=yI(r=N)#v~&JVe%r_*F0Xk7&MMZYEiQ|2UX3w-Y^^>`@6|;kzLY=w zWHyGclEnwH%jk60Y1%wo6~8AappEHtuK95i4w~E~`sE^pw623?4?E1vPmO^Vqi&FK zNP^Oc#V~5S7JjbjVyfz=;oDOM^e+2rU#IwzMU3A<$+lae%kvXkQ#Ov>;|iF2&3x9g z|1da?(MOv@IruYaHgciA;CWLpm60@MRL-ZOu?5g0H50#j_2XY?AFy}vj3I%g4^yo$ zL$lz6Flurn9eFi@0;d~c>}8y8C6K_fwjsP>==1tVFls#<#v4B3!DOW_J~A2u71j?~t78xUsdNS$jJIUB zm%SGSwokJ29QTVkSx=)`-z{)~gCW^p_NBSHbMbeuE2m+W%CtTuW1`Fs3XL5^Pe=KY z_TgzT^xfGxNG>huT9$Fj#LheP6W^ zaw-n8XO|Da3yUGRTfv*e>TSe`Kgr7Q}`vGgwgNozNrk8ZHPtGtSFZ=5M_ zbQ#JvyHc%XI(a+l;FIky*-xV@@M(Y-I9>e(? zC%3Xki6;ajQnWECv7ELh*`e9yQ~cy%n`z3Je5`jJ#gEQWV^J!P1%EsG*xtkaG0n^d zN6TulH*KJ23D%SNE?(R7Ln_(`XZDdMHG9F2Hk z3R|rVaG6p)>{@z@^L<)^N*fZHUEfSdsWyjW=KHCB;Y>*VvV&I1Y2dtoQ~pZK`(Do z$dO|789x7k2dH-^(67F!f*?bq=7dX1B=pz};S(Rg$i#b-&YB+{Hsbb(i z5H^1M0h_A)Y0}r7=&|37_y=FXJ);FGTlpq~D?)g6>@^Ds+)S#YhEmIr5dK^J5AbKl zNWXtEysch<617G+!&jA^@=So`9xqvDZ#ubr8%=rErc7;LF7I`CI}R7EqRS(4!O~_k zW&BP+*AIrcDU4%PSIuDD%|@mum_Tm>-PpaT#kg{S_`hv(4OI<#0j@{A*yy`O_#<~b zP7ONF>hnclcqxj$gnYrrg^Or}cwRr(D50k8PC-U+EoQxrpwE5wd`;XNoILHS;ESva z=QdNBv=41(gCk^-WVTXk`ylMLk;nA3gJ8^;;+gh`czv%IT}Vj6g4fn?)vFBEr+wx{ zW932StS)XJoy9(V=px?>sSw%T4oU%Oq`7wvxAbobQ{53m{K6#qF29UZ^s0e0oiy6_ zTN<`ml%uBKM6xJLqe*FLP$Fw#w3R9e1{H$}WDDb{n((HIPOr5I3ppH2Y(=lH6xx&_=U* zShj7K=)&_^I5O-U>-`W#McwsG&1yW|5!K?FZEx|L%MSYEyo1a%H85S>0M-^?XU$Q0 zRP?@|!*;{1wD@2kMaC_po6)8CcBcdl(JH{WTb@*=GKA}LiiDwiwn5K@&Fq&$4-2Yt zX9K=`#5JwA*t`MV%sA2n_v)-<`y^xtQkT=>Yuky7{t02Dm1y*>i)`sKEoNEK%XN)7 z$bM?o;)06t$Q?OD=D*FE`BE7QDA|i)Foh4jb{Wp(6wpJ(R4|z1!k%BqWU}6#?3n)^ zIOJYR?US4^=GSKGoLWXhRJ`DVg&BJoX~t%Z*~Q%VX~3u}tJt69G}vrcjlautzzJe0 zFzz5r3VYAe=k0)ScbQWE)GQ*rlYv_~YbGR+9 zKo&2(@ODidlg#grC1wrO)O?VBN9nP&$*FL_`~({>k%T`+s?&Xr!1K5=U23#JCHRc$ z-x}Gq-Z^|s?M$rMaupLxqqs9gL(pz^1Yb36H@l@&fP*B)LG|=J_U=h8Tfe*-_j*p` zi>&X%^!bn2p>G>N?bZ!g*QU!}-0BC@;>TmUrzV%lUxLaZt-OLy26^Vb2Q0mduf_@4 zo1X#f>yay9srCikmA2rt{%crtO(kYoyn_B?{ZKUN2U{%{$lm`3w79m2j?^B38TU7! zy+jf8+Pq-vKCI!NHm)PDfMA@{p2S`XjbK^7Q>1*LTd(Egut;uCVB`-Eiuz7g<#SYq8pTZ|4t!5)zyBWM241E!;012a^e47TT8X2RX=6L+H*qT&) z1F?UhHtmSc#+uuD{M_9WFk!wFxya<@u#K1gBT;uza!=}xshsVwQj3N+Bt zVZ7&Fc;+RZ6C3*TvjqZrkhO>|1(Y$j!R?}buT9YN!yT@0_f8t>yPN876|tT-{fPE# zL}$Z$EJxgvJ#LjojpaNm$X3TS%^UGp<0-f^Clb}hE~0Znebk%j19f*^Le~WaD4p~J z%7;DxnTu2Dhx~ayrKS+~R?j50X^Jc)tUndrpN%iq72>Rt0-EkUmO8MSPAa~{w!(4j z^H}jSf!bWZRj!fsBn_?mh$)Y&LX9u|*n>@B^j1cl1tqqUmEHih`ENRRv}!!t ztgr!3O87Dzt*LM-MUL$6s$==j37FYlNAv$|q%FTeq&fc@^eURc`mkqsVXYQ-w{ROJ zzb&WV`4F^LUEEahVI!%( z+Q;{n<)e+Eo8af_bedXyf!6n|fsT*snCH4W{&bWkE0BJV3bV68P2Q7LrKsZHgjUFu zA4U)L53$KVKEeTYJ=}Z44sVAXfVVx7U>MOKgqpF!Awhma{ulKcv}9q7P%BI!>|Y@| zG)$~QwPy%*{!yU@sY2bbL79^hTbREnKbMJ4%qqDp z>wESF+;+i<-T|VBv}(bK_ICSbudb%wH&=^h&08z-I@8{?Z;((h!&F!F%D>ru(?w;0 z_Ktpn2d;{O)*K&E-|c$47Qg!IeB5 z{Qs5^w{Y2tkpEYP8~mpf!+%N{HbXf4e+0FWxR`$>vEIBtxYR3!X2*Jx?58eT_C<}H zCF8I+QVw1IJb{ol4YKK5LXJ}pvmHN%(SElmc2z-_vpHK&hrU-}<+A>G;uw!E?_=?q zx+%rXQ75j7pHXC(AyRl?8;fhb~g>Cli5SC+)169!uq&fofXij zDT!sfFR~DYrIh+85FI=YKwN_t=J_um`Q<~|45dY+*SLXGh!0?qM$%l{>^| zl|XmpLY6w+0;9?-xG}{%tUqf^v&Bo(Ni|t|IrbcU*UTeF`N04-NBM&*7qjW16S1Wp z=w8i9QnL@o_9P258N3oiSVQVAN zXRsIgO-p5Y!mXtIF$IG!dt=_Ek9hovgJA6 zJ2sR0y%^YhH9dq1P9KB~fn}_x!~&i6X}?!7zRmazCmq9a^p)2%eB~VOn0f)aSq?(;D~)(O>j3Au<|gwnk!Jc5Mi?sT zjplzv2pXqwR@NiXTd<8Dl$YYn^X`~+z?zzO$dkyRfz8Xz=kKvi@afbdTFlP{37Hji zApR3uxhs$UG#p{^tERy`M}5}tvXRAo38o|0n_1{5b7pd}O%Pei@$dTikjtzKF#UTX zZJ#!TyvE-~rJr-~+wip*_9p-)l&{A}->1OGxg~hv2w=F|J`9k^0h`q4)Gu@ybN@RD z>lR7kJH=d7oTG%N=FOnXy2s&zCKM?|UigQZ?n;M{6;WaoZwY55XJ+Bju3b>9tw95R*ke({7|N=ufTq@13Mei`pQQcN`74aIT}i=!RYlY> zYYOW1=9AtFcj%ZsiuVXxjm8JF1hLBFSZ=uozv=B#ocvG?qYBp0;r`aN|E(>?4m?Ot zwC)mj{yd62q#bgW$)S{U6083*O8`5PP?ev86KN&xat&pheKrbqmRGP1z1w-`0S#Di zK8>7Pj*!QgwYWaF9`!3tu`^Q!V`mP=qrHxlciRrlRmWrCw-(WhAT=_-n~X||XIOHE zBt~5wMYkvMT&tQE>L%sT(5c#N(Ju{TVJ)2TOfNF(s0Ca9e01_i=Jm>5C^*gyjfeTu zD(MQ5WbFg^Vib;F^OD&Q8F9NYy9HNRX5iIdf7l|$>n!H=NkN)tJ_a9M%W3bKPp(_b z$RqhF3?4X&a^Fs25s9Z*n0y6IDZPZLzT&x}$H=3o2ESDlFtaI3XwuuC>`>cj*1B~) z@(WM0;V*`umQw-6f2?Whu6jfU-@Dm^-WY5jbCA07^x*Wasoabs65wRMgj~!T1tFfV zpl4AHo*8F{aUcA7w?P?H9{a(5L5Kmxb(W#&vJ>o~@--%Bs^~D!-H#F%p5lIVmQwJ} zE!>Mq`kYVnOqkJflFgXW!9D%vh}99NS=-=hdfdIAg>RDPp18_Uzem$BaMdd&{B(&8 zlW4+*t$M7MU1bX$v)IT(LOj-)&4l5qm=WlNH{yf1l-F`7Kj001sJw_8i=-XyXX)aX zGrzzp!yAIO4;MH~)pCu7KhZ!%7aCpLxV}Y-wAiF*f^feAJkV~S;)%(q9M;KAU6jNI zchvFnsY7Abg$9Z!Dx}iUa}a&YVAe$oQk*J@{P>|*y;T5iKAZ8WoC>i^2jRxO$84nO z3N~Mc!_VcX*!l;#lpVDR^ws`CLO7#ID>8YdmzJ1W^$f;+n1fJgk0wV0&@9~>m5Z-Z zzM>Al9#nDb^gYZZr9j-u1LB$EWH{OhEpOJsxDueKtGht_mFV;7B&OJY0IB=7;yKVC zjWq|*!Z8VA30?}b$4(*W9qD9Hxpg zA?i>T$MD9nyIj~WHI%Xp!k(!cp!cXR{@SyS30HjJRtFts`&0(P9K)$tHf8|KJ>r9s z6L#|!`*ZR5?}Ip`Sp>VU{Nj?L{h9aeHe3$_=}z(jrd@NIqHNpX<$_XL+Lp$a`WjPv zs2uj3=zxqyCGdG{OrJG!s9UFs%vGG=+$T%!$Z7@H^f8k?JpP1POc{phdQ!Oj!XsMN zF9Kz{WhpJ(88Z$#@D2Vd*!ZMc40knHgtH?qsaK~&(`@#0oC&A(ek5Ht-b7xNO(K=Y zd*JdxIpX+a{Ccht44qzrs?sy2K3;}y*wr$_b8Ref?N`|HsF#bc8iS|oe#4+;`pumq ze8`t&kn4eySU#vK zgdWMPYUNx07LeAGG;sX%90e*8&4Jd|WR!6p&5~*%;oc2MDjWsDPn&S;(X+VzT#+F8 z&3p9IUB~5apGb0xjiB&lD{Bkw0bA2J@{2T~OS3k>cJ;+{R?dvo*$hA<>8&t1?ItLz zoq>F>5peBmHVP|4@k?qxUwQX5{+cO|m&$UPZpJQHTGz~$d9<;ZRq^O9af9hBZv$D` zsW5z18P+OvUv$YaO(YOg3J@C$^)g_rPI3u4%L&XRIl1+tb99wY_oP>@0G! zsU%NF8@e=62Jd#7;)a8jbnSa8-igy835OB*`C;g`?| zYH2FuE!rQFm)0$&ajpWD>yN;?SK*NRZ9jSGD$smcDavn|#2wx zflbF@_>IxbHfJ3+E)L>Sl|7;UHlXa=?|fp&R9JrQAz!0?m<89L_U9f+vE;p!Jgqy0r!bbz1O>zkl z+!?+GSDUtwcH}*pzwI`4Jlo1fXqsa~d@>z9#8I#ID^@deJ1W<1XHu`VsMg8SZotwz zm?Txp`uw8tq?9gtnIOCu%w(e@FLSPYWnhuUS)AfOoW^)sz;xGT82x)IsfI2Gd)*Ew zy8as`Z=Ay)KGMcd`>>5(UmQuFO^?91A+69|lLneSGtgB14vwFl2Qw!mA(vXiHRzX- z!P9y+e782sc@c*ngGOVFgahtAu$@U47Q=ylN4W^MNf>M|Ue`8-klr~H>iwF*Z+kQ! zvuEd0#~ed)+GCDepSIwgvmw;cctEs;AsmMGignGX_Q?TQGn15c4 z;yY8B|EglJ9-)lynL-y~bMbzpdi@z1SLDsyL?K{yTn0U|% zJ);{)LvuHjr+bkw+7@QlT;vbj%7me63N#{5{NLRb&6i);!sc{1p=xqA4Iiw^Hf4BI zMRPo#@IV3In+MbWn_0~NwFAZOOTp{*z06=>8Y=gha8g!fxa26|DaUP~H#wR0pLPrD zKUuSyzvHp;(R7k*naexd+OdZ7{m?dG1H_lBu!f3!jIRpjPGro3$T>e*f|olP$0%Y` z`Y^mCd;_;CQc-08m;acqjt?7;vf1}{(^LIAn&sy}DvSK+c2LfGM`)p=Hlk!Kc7u{J1}kth&$%%FQi+t`27kd1|a1hQk1j z$&3YNal%ot!qGwhi!(j$KhCtpe~K9MPZ5?ggk%3L;vbKB+`p1oRYKzE2YDuZ=Z)T8 z-B9s!zChdTHgqi61Wmu)>5hdV*=Ww;T9!rQgAJ4L+mQsEIBzc-|F)8C-8Ta6Oz>hI zZ_M!3wOTqdW;#WVe@?XD6}J}c#?d~%*mNmvx@$TD@25Oq7bhNpRSl1L(X$y}4?Rp0|Ip@i!l_rXaDj9H9xN`HRr|9$>e13d~=t z^$+P{)$ik&blE`q@kt-|6>q0$Tb9#;G*{ZWqlD2$S-?I09lY=}n`>!KVGVPkOFkOz z-`B-D&87Gxzlhb1)@EA*mt#Wp2#Wc%fDaeh)9ytvl)pfleYOx_m}3nbv)#`1sh%L? zNjj8TFb8wEJ#2@%8p?e71}QD2{LS1yY}ng6Xqz{j394OSnpO#I@o0g2`JqrXFp|1w zW?*5~GM2o%kXIYL1oPG+S(umdI^xIrf{imNi)Gh%``cjCn~K|93c&T)6=oVNMJ0YI_@Y7sE9?ze?R4a0H2m%7cc0*6Li$li)Cc%0ahSX8H;@L( z>|(2W_R_qfdtB1YAl7+k3tHdZ&fPgSi5axarU6ZRL1OMGbe?7hM`zCj&+K71c-3Wr zac2=)ee}TR3PrRs(~uIqw~*?y8f-D%#gf#zp>gMPc(q~``@Lcxoh_-Ls-tquYLYZo zD%GLnU=!3oTLdn8{g_+p7OwpG7V791N*jmHq3V+on7LvPlX-1R%~~g!Q}iIrUiA)4 zf5yRgA1S(`rc7-whq9^PLGa`T?vEJi%Hr5P;@pK4;k6gAys({h|(l(cs4`bY}cY@SYJ#U*5sLK7R%Zehi?-%PBCn*qQWe=g`l^QLx); zF~6EW!QV>rp!18QX^TY#+q6jzXF8b(IPK|V{uju#MFTx=3SdiZ5$J7or7IG5>E4== zxNB@G4*a7`vm&F#GT#XO_Bo?dkS#8_bQ_{fZg76$R=3=>6~3K5Nj<*~LebGOMA^Zp z8C%NT!ALZFcL-yvI$)x768NV#z+&+;vsYKr-ce};zVFe7%Im49X{JU*Vu(vEcpHb_e%H>3@&CR7c{6% zV>bx9N6_q-H(+%{CU~M+S+xNg+@OXBzPN9R!km?JE%(jJI=FFH|{|CluR@j`hXWM zoQw);R+Q7y15N$hvBq>7Cymh>%C@wXQ2n$T zmfU)oS$d7epKgnplgA`{lc+|AC-gD9SuQNy>^HpoQ_0VE51_XT%*E-1BFr0H1B;7f zF+E40q*at)%{UdN;~2_&tyoTfI{VN_zKF$cKFWOTw!xyb7z}Yf&9-bcAPJ36{CW!~ zW)*Y-o(0KKv-3$fvT_oo|9r`t*9T(TmN}pkexJM9UsQeJ=k_dWT|SjM9YNFKCj^pvHo^XN1E~Fa5?B6u z17{A6r@Ni6jr+)zN@$Uc>uFHh?}#RM9k6Ob1G#0Xa^4RONq+QYXz|(y zzfEKKsd@*1ciO~WR<`k3FQRFAi33f|;4tCUJ2oJ10NeB_2aWy0SaiTd%)5R=^y$?m zluZA|G@b0xL#2UpkBY>tWn(GjtUA?;v53UcM-X7*M>@whV3V&Bt|+<>eJAHod$1i9 zEOaE{`YdWxvBB8MF5F_{2JC@fV5HNZ{@7>Ikl_*Z@IVBL2d2xYO^N5qIJ3F?Nv_A3 zC5NeEfnEi+9vXr>C#7?Sx6iOZK?v&%&Sg5}!XC=k@;`@8W?g4v*m7xW%65IqMS47i zM9BhFoWC6heVzdGE}EdD*#Jy#Zs*m`X7JT@E77Z}oqL_RhpCUSW|`j8Fs$hloZFd& zD^LXGF&pVoi3=%yx1$+@1JE@_%ybAJ`42X6*(e$qrcjz>IGD z9^l4xTjSffe5w49V37O{6{hl8WaNp5=X zMf3+OUbZgu?4D#F!W3P@S{qey#Q%#S_^c+EF=_(5L zvBz7cVx;h~fof~iiq?0Usz*sgGndHq$#TQw8$DP7{$W|`q0lOY&3>yK!U`7AV# zThBghoaLFXzA2OPp*k#Hun*u{A?x6FKw*hBO<4Jy6G>bW)oj;bHzIXd!<*Y|u7?)p zZjQwAKlAbY&r~?)`;8?$y8uSx+qkR=4e&`z4<<}&}_BJepG7bgLiFZx1DT6zb}puf!}LyBgSD;5OLO?9xZtYHEmUJsc{6_zf?q}`PyvA`0IRP zOga<< zUS5Kq0&nBfF_Fyij2n01q$OU`sKG&{`t+e(0bG^*$ob|6KIv`?O8A|?Y>5U~D(qzw zm7Mrl%Udw+t5WmPB5gK#>s6L6sfeGhq%yzzo|GEC1x~Fkf{wor@Y3u6bl2%Z!Ea4? zvw9fSfBC|EmQTYn`E^XGL!3nN(t-^){rU1;_t~}CrLd(;1^4TFkn@FDv9+`d#<)4N zg+&=m6tIwrH1@(w!!&qaW|KIBh5RfK=k-)@ zH&$a|ekaSnHV&UDl+h<~GUd^v7m&PV5xmr%3h`b$u&1+|*E_3#pVGduqyCntq#Dhp zD1_p{(=#c+w?D2)^uW+_x7dU^#<+UV6BJnQ#L`KT#91!{J2x-EPVXDg{UVpf@7_em z+wL>H<=eooK6zRa%3rl8f>A~rm;fEh6@oUp(G9$XZ#yAvhpOH2tC^-t%zH8t3U zW8rXDW;sS}8^%hO4uIocGB|wH8PN^r`E)Yn6CCjD$DJ*SrpK$Np;5+mJn(xd*f0Lg z!ZN%-X-^nAeLcxuIb=}dCrMl>w&l781=5L+^RV-!19rC?fSH^T4U_l83HF6}nHuP# zuN2$=DubWqqrx_3T@vdCY4+8;9ZJpav#<8)oN#=s(CU9n35*-_FATB%4@a>1ha=d| z5Kj1S7-IJ?3|Uv5ExPEE($qZLN<>-}f)6wA*=q*-3XZ9Gi$13AYdV{kE*dvAyeVMK zOwmQHO3|TJyPEDEI(H&ww7zKa(NvMoh$;3RLRG=<4u3&wvbN~#(wlK%_e?xD_$>RXp++XP4+U3!O{lKYh%z$n!Fl&w(X!~BROu>YQE!F3 z$v$oTu?1W%l)JsgzHyv+fN12 z0v!*!ChyBOs5P>6*$*IR_6dHn_IFm&#F>>$7Ocg(2F|X9HQeF9y7HClJqd|1&o|s$d4=Z#Jw&h zV4>2Fe5_ZJPiqksSDN6OyZYeptsHhJJK{Nw9B~WmiUT$T)0nBVY3lbJlAALe`;||o z0SV?bzG5y@pM#;-zHhcP>z*OTPr8X#3s8^!-UDJy89?K71ZR zt24(DZX8JC9|j`VP>CbYo?)xs&ca$@9JzOiU3Bj=xMs{y8aqM)&n&3Nnc|Q=>z1SY zpu$fL;_Tgr+Y%yI>Z*kQ8`I;?cri9ajF zu9=ASR&D}MDJ`ZOTEnhPQzr9>S6r0aTedkVm(~nVUBpq9q^0 znXjE}+x3-Twx|}C6gZP0-~(JqG^VM4CvtUbTZx^D!=wp11lty{GY57s$v1_V;JO9^ zj$MJ1eWzLF%_XE$TZ|>%96i_k14I8FgRGSq6j(aQ;r{kJm~h_+BLoF3{+caWrd?x# zDc3}q?=^9gr#I$LIS&w;0~`I-&@OE;-Zu#cnWG0-p4ukn8g7ImOAP70=Rv&CAP!(w z4u-uIF?c|&zr(IoI~ig*%{M2JRNq`w(N&?|`5t)ZP&BS9y+~=ppEC)~Vv(~@jot=D z(V+CJVB#Ewm*+hI(W;Rcv-AUVY*VDCvM)seW#5?Z?(<+=FM}sQ+J3_z9^$p^C`|q> zOTTZ8F70EeSvL%)Pxr^y-O=pT^EceY&^qPC?NBDQoOJ2RvlqK(30M9q<0MlS` zn{?tmj-E4*`U3v2++oY9cby9+uaqF8^b?SoFCd-sIp~mOL3+s=wBcz0&X(38#kH>N z@0J1dv@Q|e&mRa?Yu>P>5*y&}luP_vtfh{jO_URF3!99!$?;o193YBhHd-5~wQoDi z8{`9}&B0Xr?l$!8dd06Dn!~oZE8(Nwrw}HP!?~SCM5||Dp^*S*t}H|A(U)o1u}|IpNt6cB?9nw_Hhb$!{5dfe8+-%s^PxA?5-}G~E%hY#w_TiJ-$d$;v7yqj z+i_#Ub(Xb5n)!t6(X*K54jHX*!MF zq>bZv8B*T8gHQEDS`xYj`}cX`X~P?A{!0bgplwedhOg$%H$3HL z)jMI(vx^X6kxt(SDq(c=SYl>k%x1lV-ROTCCRQI~{enKT?gP!Nr1T*lI!VNz)-IrZ z?uc2UA;b@>#jvDU`Z`g2Q)EuE4|XvY^EJp={i*bS7-eM#JV%C>B-0$#-A5m5_iPcjfVur7~wKFMBh;-hcO!3h)Q(TZ<9Y*5R}Cab>&XfoUZ3%k_ecXAbX=F>3Lo2`YV($|6A zd&nLft`Iy^-c7+@Ch;1MxolYWS#~5vg?Hk-xVEM9ILYJFsL{HW{al=aakC#lz_|X+ zYW4$h^^u#Dnb04XdsNW!%+q9iZZz(gY)Ze?%s|@c9#{v}!uqG_oW14(7~|;!mbv5U zoYHJICFeJA_N8wV1~)G`?u;h;?r|lvMpLzDDtV~5P_oqt^!PXvTHaZ) zKYA&w;HV}>ZSZDu6c^Eutv>XAZ49hccQbQ8`Ha=5NYcV~b?P^)na>mivA;~nEuS~u?6y#{&BS72e;$?P^BVa;M&;KU_gsJ6=@t8v?bw$EzR&8J`$8l1Kh}bO zBSZKR;xBuPQx+Dc{s-5KJr={SW~$(_qWjwGklkFP8TI$nz^nhH*YH#*(k8b z->h()K?%NihR`4*K=bbh5R+0_U2Z5vq&i|~;9$ymI0|fX=8__3O&dC=@*?*ge42s* z&0JfK{k@~{&b?m9vbrM@PQJ~psn_J^-tnR6_EM~94(FdNtYT>*Dag%Krs_gBJheN4 zEzk9#_K$!OHCy3A${%iw4wCap1<;8IU`vwTK*e<)E~rlh|2O?9HCRaB?#{rQ8g<;Z zn2T(_-Z@w~;39R)6+##L4O^Zj(Zgk_xVa|^w^}vQ%JkLn{6zrH8)87it5;H{^%Hm) zq5~Up>zHnz2RAeF2An&fj_tNXXxySjR3WvPGkNm}4@U;mmDYo-Y=RawEWM4k>uueH&Bc#vU=Eon-fUpmJC<)XCe+y z;`uUh8t6#1*h(DWNmZF|*n}AkXpmqn_*y+0r{1@N{;zFdzj%Y<*N-+7WEx8QHyr1u zs>9{uwg_IeeW9JbC!lt%1!w-~x+u^wlXl&lO+Fvu1%?a#Kw_0Ib%=DR zut%Rl>&o#*mLXOD*^f)x*WkKqN|aV+ikmd$(0uq7*f=YRt8>VMfv#H6asE3=JX?T+ zKObR%FFZxP10%UgjU5=QzMH$U=Q$s+{31N){kII=j2w{V?j zAIo;IH$pX5K1i48+FRqb(pwbS@(@j(+h~qzqu7GhX0`k!rrhR${MnO~*L;}mk58qr z8!9N@sYY@}Jf8SggX;UjC`n~Bg@-dijLQ>9uF`1=9~z3+TI{Lm-6Iy%?S`TE2k{3l zj^f@-lw`Rx^l5v|S(vN39_nMCLu=+I_Qt}S9WmbkS9_w_1*?r{QY?kp=H2k3bmg;de82B*bD;kO4nnX17rRwkE3EtTQy@f>@cyzvOAOPP~|aUwlNH|$N@0LAmy z;11`F5Vy$)N5K~Ikhsf?ThqZbxsjDAL{dM?Ec~rMh$5#m_Huncylj1$bv|%mhs0w{ zSfT=>Zf#_9weFGJOMe=({s}E`J_a~j9&5#Yk{r}xM28CwF|GyW)gx*8?L+)DQ8`^0 z{DBWL)Tij-E@-1PhOV!f%Q7lQ!TI~`th~q#*V(NTb={keNjBfe3ezxfZU%Z8Y+}NR zu|lW+J0MLX{oz7!C0;;@zPMHn6kUo!ytn6O9P%H^LR_|;dDN(PTT^x;jHQ#>`1*iAOE0@eeeDWYA5s9iyx`tKoERzX_{8ltG=Z#QVbjjA8N4L)lI&=2!pGqtAjh6yZ4)_hgR3NssO_ z1)a&L%&u~$;~uk5&2>z9>KDjSte_dityEM|hEx9LvupSAnb{5xrnxv5+3R6=ZPRJ) zRDKq^IV5rOFNk-)@zq?$b02y*D;>L3a&VUQ5kcF#J){ynlBUher1MI5*qdBkZm{D4 zSgfu97nF~nTiw3@r>`@Q%CYVKzGya;iqcFaLuE**>pXVIl!Qt&2n~ixaZ!c{jhY7y z5*3vUk)cps=iZ4(rc4o~LMYTNM7Ip@<$0d_x8Akh_xJpLt#z!mkMq3NxsUJue7^^u zR&Iogxhys<;S?+{A4R|Bg;0uHC%5aqHaO4E1E&Mc*mkcDzv&KPb4};4Z7W4w)A*~b zq>Bv<$>+S`)o&vG5;(r6b?yjfNk3rk@bQwW}QzJ@}r|z zm3=y!j3?>z9U(5XZA7URWlXBBm-X%n1^JnBaAjyFxJIodAE^-W@n|31O-{ynkDb74 z?HY0qPU7cv6+q=WJyv{q5ucLNiJKpCv~_nayDnu3VPDq>cHUe78gnElp?E38x-SC< zg)y{g*a;ZfYk_Bu)`Lpo2R2+FB*ktE{_+nI)K7c~mhnT#%r2h}ZSq9V9hq?Uh#c!4 zxsNxwmdQr=WpD>%%xG&sCI2#SAp5J|Ew&**iTb83Vc+_ek)D(x{Vw*U=|djC7nx8B z5}Lu}=K&PAHx&bdgINC^t7%Sh1%6V!#@WfFvXd%z1dodEvrS@{zT{pKJ6F4bf?R(x zjS*+a`Sxs(?XeIm@RfX``BxSqw;nFMEQjO?u5@6*4%&3B3{$>^2^ipbB?lpF23Me9e{ps#?kCj~}lg5P~z=!qI0e7DhyA=u{ z)WVdG&zb;_*RNw~0$D1#VFmSNj?}JP$d-MI$1cTiIKH9@%O@F9g0DHWsj1R(Z+kB0 zYag14r#DH?MQH5XBzXMj6UyAG=bqI>bBzHvA?@NyPXE+Jn(yES3*0Urj{DCQ@_?!`Hu?@sMQESauHQpQ&^F2GrP3l7#*CaCO&RQV@Iy@+A2b(K`%ec!Mg!|O=gF)h zT;T4=Nl-g$DCO%PU>BaGP(gor2&#Sw)#C1q6l&1uqImFTxP=VXo@GW2E8y?91NqdA zMYw2mCi{qs`LT&(XlAf6J8gc0cZ#b*kH(L<$X^`<*ArRz!Exk}aTAOsD@3x%auBnm z6Mwi~uP~i5mptZsK;rHNTvSIGEL+*11+*LR#qTq?r6c#V(=Wc@j@4&SdUY0@Q&Aur zwe2)!!f7lYH;y_#E7B~xotT^W38X(XaZa0Vvf&-dRAG4r*giG7^w5N`$DQA1bCmym zaW93sEr*Gc8>w4%FYB{71c&{cVD+Kv7<0}7?0Vkds4x?-2;T>J!4g#A=f$L^J?3Ly zJ;L4J)__SvEOhI>5q&B;h6^3T(L{R&r7Bp#t93V+>G~3okoSkkGC9^^J%+RLzsUOQ zzrnH1`*F0h3wxoe!Jg!8!NSU3F>iR9oAK~G)3PaL-?cWtgTNN+SSQqZ@5)5dX&(rfHlVV(Z(zse@@zi zQfGjQj`{G)J76%f9Sq z=|N*jt6c`P6%0w>W{<^1v)RA{`{-fAI5hby4a>8qz}N}(P$cuZqJCN=f6i(mnaY*Y zzBiWy8s2tTwKkX9GIYp1PZQ*Z?}Wsk>Tv#X44*F?8rZ2DlCXk2eX+f+}7*9(A0kj_7R5{tjRZ^wr^jR0m38 zK2vKp1M4&^QGUxfJd~6NBHAL_bQ($+sVQgb@9X~KmrvD!?AJVcGzgphf;oq zw0zZ7wl%gA58c>6z4oaXo^z1NU6@bcvl=C>nmK0!Q<8l%2)yp7)ep<^m=~P?P?bIpq|Cec+2;5G$i+_u5h8>99nohq2koDI?!GJ4W5io zfxF-cp-ICrN!gog-FFi{XBEPZmLhB(ypL6?Mxv*`IlH{|JFEJ2i79C9;|iueLQAvL z_&`Ge(+62_>%`eICZF=~`LrfBA-j~~8U}#;S23=1vyiWmFvSykx?;NF3<}+A*y&5H zxa@9_xPNgF`?@u0b#x0WRbC_RS?u9+nhXurk^;T#@u2hd8Es}3tWkRpJ-MAsbq}^u zk-ZI;RnHOi>clWmAH#CC>a%C^GSKKdj#)1Zh1olzS$me)81=fHwTOn(_kziEafLQ( zT@ym-M-<_4MjZd+*Cx8Utc%$X+Qw>&t!U=$@zA-^2d&D4*xT|REAGgFN78pnUtvn~ z*L#a&T?WJ2*i~q>Q-ZB3(xMxhduY}7V!Et$3{IGeO#qVxq&)f-zv-zPrG8@E_V}YX z{h~MRkbcSjF1*Aq@JJym{W|7u0u;0I9_RW!gT2#fL~YqZ2)enPg0}@&ZvxUXg6<(@Pn!>ULv7M3 z>e7Ekn%d>e*;Wdcy|;l2g^Os?Bo93Bss`&bLW!PPQU0%GRJYZJcJKqp)<2862jAz^ zX*}gAuO`P)uLzwN&>V*omQo{=@lfSZ+<9a*`Rf9yJrBdjDjjUL(lDs^ie>lWvgl`@ zJsWUvFog`52=$ihY2)cMS}{@qY>yW((;iKX)i-7p6a6u1v?Jd8=#8I}b*LgH3nJ4x z(Yy2>o)?^@`|}WY?X;oD>H#o$<$T(dTaTJbN7$IL>TpHV04_Y*%3VGaThVAXg1Sy> z!{|FzsFcyme)pQwRW3(xsW^_i{AnNtt3`pz3|%T3cm~_vg_7*zr7-z@CyUR204+T- zpta!|>!@u(Q%6Oze%dHDK3(B=uKR&&Yx>h7^R+PIdpmnxh@vp@*rg?WJuhWA6esG> zBw6*-tUas(9Og=a&BZOaJuHC=FTP<}=b^TKM)XN)Efj-Rx);jPDvtmcEF=#>#A8>K=GBLDjPN*8!TWD+3?P2T^6G zAN~qF%FBOOA}yB$d{ZKpr%ufQh1`KOc7i-@`z3(f^qw&sRH}*X&#LER)DN)hBX`i!yk~rE!XAwDzESb~Kr#2%)3Gp>SEWx9 zKgl(HAnvKq+edLTuXayl?&fH)sx^d8=SxEyUuJVT4Q(tX z;OWSER?Lj3$|R9~s>HBEiJ^?7O6gg&Hhr+1K(~Abu;EJtRCQWx{#JIT^R1uwN|j{z zFlZQ9Tnxl>zc*8ASb@NGVm^EG@h#SVP=(%~>rnr-H#R!F5cKVrg-v&qS##oXvT@1e z0){XYUoGWQ{W@9R<)a^#-qm$4RGw78UvZWg1|f+;lfiV@4VT?q-> zss(PltwHuxKl~)lve;dJjeU{d1YM4+VR33GlTV!l>kEf6vlZ{*>&Gs1&3r-U6trOK z2Y>V{7=SO_tl>pyI~!w}ND-@Iz*Au!HtL**`7$jyAuomhknF`mS2gg^9nO+u&*M?m zMI3Awfkvn^6D|rAF8<#rktzSCMC||35gh*L2#yZICI1maoc<+-IGbmeJq}-5vBCUk z#o;I)QPvb4QO(0GqOoba1)<4lqE`Xlg4IP#w0CxbAX;XvU}J7f#j^dTqWBM!MBSfV zMTNnN6>0BJh@vuYiW*a`M6)N277dLp7AY_4Ez7f9gCMMbat`0)C|B*zN|09V!>(ql6HFL?y z_7*-l{g&SD(c&!T{9;-?*I1<5-zeKw$I67`>6c77cAu?433IUx$Ylxh71It&Z{LF# zGeeo*=1ejw-9RU=#4z>p11!kWA7`s*QhDYj%8|a#a^K}q#9t*LF=acrFyYx}38EwC2kcF{yO^pR@t&E>8iPem1HOJA4H;3h$-zsLekbptp$*D(qX`$xLQcVm zt@mKP`bXSyxP{&|HL>set0;GOu1J$zfc_=tq3cRKy~`O#d8V5|d$k2kkrih;WWMLz zbFx8Z&2k7j8AlJuZ%-bAi4xF_5j|4o`w4;lAAk3`uXWC_48X$`!9_E-|j7jlnFjevxQ%jjnr;a~oe><0wgV8ux z00&wAGFAS@##{_t*dNoc9i*sVv2?g!C0q5*3nYiWWq*yhj2Etc!TBSlaZ>IVxSG~p z;pn2veUortRFws;12RCA{sK-u5R%nPLt1hE2EQw78ICpZ1AqI2l-u$b3!88Q>eUTM zv^bZd!so;9kzwq0X)5$|`vJ2pVd)KBVv%t>dv1G>t_2&=nS!gJs5%ZBp1i_A*Nyl? z)!F<2jYl|m-aY1eK?70-O<`etG%Fhw4U5Jr!s>uJ+%fqYDRo)$BlXMZYx{KZq+cJx z#o*Y4?2nin5(Kf$VyeR69rGU*PRm^{p>6qgDD({AB699CYrzQ%munGFBbCPITuh{M ziC$PXcr>Y5B=W~2cJSleZ(y6J433;@%_bel!GRf__{^k5^k%0GKJp$Rp0K#{k8_e} zL3tM6Q@WGw-nNIWezhO1oYP6Bbfv&{&;(}fssdX%Z*ZxZOrm51kmIlNnt}bGE=(Hi z%jJ?|#1I2g)2TVruD zjwQj%LJSLVV@lJuL*Mn6Y;s6G3{BF7a=J}b<_qbPjWqa+t|H~jf>-o97_6^PH>a%U zzgoMIVSzD4s})k_nhY9oEgC0eJYeer*5lpBr$BY?ck-5e!A~E4in&Lc@o9&%@ZQiE zba(7uc_N+IQE{xA=J37nJjRFgEe_+IzGhs%mtbqW34GXe7Y1=2Fe+>v6X}>FWT~`d?+5JCk|E-duKJ$QgDsEtP~LWGQC;3Gz#nqu7O`P?`V4s-t4bWBOCR zBUX}5O^$>{-7&PP=qQxLAEZZWLNNE4fNAv~aa{RVHY28PsxBAYoe4##VE!oNL%=hCy zE{+3Et%qr!-$-|+{>591yxB+Z_Z6;2CvldVAMH%6pyN}N*sPrEV0!5Tye?b8veu;I zXxV3ik@f&bY%k&XtHWq|QZu{I63UiH9Ky17&R{=l55)D(VFQ-!#UuOr!3;6oBe6|^ z246YP6@A&o%Rjb*U^^x9IULH=jpxwQw+=8~uoPyEa)gCDi|LG3ojAiM99GGRSZml} zG~AvD{oiOpKk+-^C$p8l8fsIlx(VC$+>2hfnp5pSVl&4Eh+|HVatCX*p=IM2-p)jk zerr~u?hAEN^!m*ti$8OEpEp3#;QbIYErFsMRmdgnFmv35xaI3ZOYV*x7v(jmGU;}< zSfbRUyQgg-XwP)$)JmsAQqO2i`!w{k+{wFMeoivi4^!uoLG;qGQcNDiVx;O<)?{eN zTxvR5YeF8xyy-{!#!|R`uovd(i{o_Pet^4!_v6UP$9XrwbxyU-n5Aok({Z5#cv#-& zB;)U(^oOV5y4xP+Zo5npud6UCr5S$uR52Oxl=+TDCK>PU#>DVrFh6z&)vp?b<_DVD z?#F9s!w+#prD`FV_V#g-C4;CYZwJ**Q>90;BPttQTDbkvp3GrEKP&%<=LAP59On`Z zZ-GfyHXD*w$pUljVc)J9P&;A^KWy7xmiW2?X2&~Ipq30IT`a=D-SfdRwGzFi8^CIh zFo+#F6;9jPLcpgU>}-nz`GAqENqkhdzZFd17e&*HYqs=FX*HW8e8XQIQosl89>q@O zL_&OVG=|OeCDG6@koEVZ)1za#OYN&*(0X?^=HNW`xo8qK4V%gB513Db+gDRtlPufN zIRhpvp9a&!(xYl?D|VX8P<7q~KJ?gNI$Wv@>$BZ3CEU(~q{k zE1{K#8wq!yva4zGB_dV&_x$#KU7r#s~#JMJXg9x+iIBRYaFGb+IvjU)W1#Zr~IjT0#9 zl@0bRGvV({*@G@7q3~+6xaIGTg&&rZAXT;$DkORKxt}GjcpplNzuVc8)A?XAy@yYC zI)pL4a?m}iko&sC7qo@CFlN;w40v)8x}zkl%uWe_+wMih)=TK{EHOEgvIbt($#PyY z`q(l>2J#inp|Us~9)x$ZHJ!sjJtjp=^NpgM`3`XN`#bpj(Te+a-v;hvJYqIYrfi6H z6SFP8N||{A?oF!+tvJz1i%03wtBu3xNM|J$gaKSNr^WC1F+|wZW_ru}so{`khp}`hC90msOQ;2T- zO+OtD!I!23Z1UNqU_B`T*Dg$9Wl{x@|LGa)w5(y~(oBmVgI!%U0X7V{ERLRah2%03n;mw6mFi{V_hr$59_5f*_!9Z0%kWov z9d`Cb;L4svw%ya8MT!l4HcES$Qu}0Fa_ktVSe3`J#N(d513n1)G_>LS*f?0c{xCSL z9SiMwg;Y1m4*IG5X0Zi4GrStdWPcL0g*7r`gX>(9ZY|Ti{ z#DcY*A-W}n(jy9RzP2toi*qfOM=plA9~Dfse~75?s1Iv3iKjmXhbs@-EA$jZR8rX)9yAmOF?}|G3H0HBSf@&Nzd6CK+IoVmQ`~u4S@!heCJQF3>$?1Xr62Xv&L0 zqD*F6`1h$XhR-0du<%Z2UAFUM;$%W(3laWG-hLR8nCg1Me){0Ai$ zvRUK}J~uq5|Bz?Q#J3(ll?-PijkB5aDG~GN-iL)B2f=F+faf=260(nRNlRwAq2x;H zNSw|xzi_Plvl7kxbsx{}98bQ_N7GwDD(~ymA1tjS*qA7N7-qsj$u$*xFTsFp3(;M@ z5g(_^F{#ZO*m{2x{djN5rLC-E<)-Dpy)~fN%jIZz>m;p{+)r&zr(l8CS<8;m{prn* zE;i9}Bbm3Hr`qB;61ncBh^Xnv*(&k(`%L)@cL&pr{3iMxvWOb;L@dNC4%$tHkdXEU zBP#E4gZ6BKFO$RsU$8lOh^0e^=?W}3atW+d@dCMz6QRLrF8sAClolM(rm^#mV!nnI z*6A!}n}u#*HYJi?Pd*Lpx{|oG?$6eu+ILjMH zIjn|&SIOWO7Y8l-rZBgLBvf)TV-7E6sQ;-rW`BMQ7uM_rbH()8;t6Naz<4u?0#*6c z?EN(Hj2ZBE?ARn(TiB)J0)F{#S#8Z?EI1U8b+_X%y*VGcThw9m!l_L8`at}7VmZ5% zRSMffPGf1(F$!!nCXWupCjTV*(CSHtd1F%6(u4D#&+*kyoJ9H&OW=gqOn$}Kmh;g6 z%|Da3qLL*OScgXyE>|~$JH2<<_akz&CS?OGdOnq=8vI~^ClV=oWPf`^Q|AvD6f14Skh5ZA_|dr>a+wkgmS+0E=Y{Gf6+`tfmmlUD>GZR|$Ssm7txRB62P`Kj1QzHKnMppi# zCan6SCU`gqJ^v$&c>PNl*`YlRIll%HsqBT3mi7HzIEKd!eV-b@q z5~$2!g6rF|1XF)Bh{BYc1+i?H;GohrQOl7c0T-Ann0D|%`6a)4k&5avUf^6{d8nmL z@at!-DE!O-fo)uss4>M%~6E*)|k7M4zr`msS?E)_fK8 zth^@JX7J7O%122-z{dc~sVkpYDsQnC1sAUtN#{!Q7d#~R3tuuU)%40l4`dZZ(OCoe z0TxD_Ma_PJmA0{H9ctHS1nN-WrP27eE|#gu|Bvd0-uP}6{LEMCK1gKwjHY$+-^ zRN@}r)}tvUCS2F6k+tv0@mJmk68zv z$Mby`*@mLaDBK<>4EA)^{BO1VYj1Y^Q_0Rhm4rA5cm1hEB2u{fPaTqHXT>t%og%`K?p%1cQsDoU*6D&+KLXf5P&W=tyNZZG5s zD&*DhX7sjeE#!kLOev`>NaZTzk6>b8U`R756!2#7W^6AM3@Q}T@Mex+1d7@e3j6u_ zdHn|hFyYNmQYezt>HOpP69N0J%f&Xd@$!CtR!f2@PW{!%5+{&&`n_S&Ix zb`N4J_U>Ie(RP3MHrw?w?E4K>TK1mVT(SSVy2AcfCm?QbqxaA5wEOmb z?+sSj9?M-}yEd<7uU?_EO^@XAy_N-!ta+L`_PVAWv@UPWv8|3Uwe~+$wDeVoAlNdWB38oIrklK}lwAW>xBB?KDogSH20aQX_bQ Z!X-tCnR%&2@x>*HC8>q1wS{b@dH^{&vL65d diff --git a/tests/data/rllib_data/single_agent/checkpoint_1/checkpoint-1.tune_metadata b/tests/data/rllib_data/single_agent/checkpoint_1/checkpoint-1.tune_metadata index e83b72aeaf2291e2f177e78504c94fde7e5a3392..55b72be28978f4b959b001b57aad8683ce0f562a 100644 GIT binary patch delta 105 zcmdnOc!_a>yn>;LiIGWil3_}kiMdH~YHCudrJ;dks!3v6iiufrs?kJyZ7GA&2rdQ& zhLX(O)cBJ8lEj=sR(FTQ4ck^VOl;9ph~Ngw6(v^2m!%dJXXfV>azwBKMGW*T^b86) GOZ5Pp1Rp;D delta 75 zcmcb_xP@_oyh2K{ak8bMiII7tiLptlxw)lqD0&z+vtKr&yE|cKHf@4J z!HdOE8YW&u@vH|At``s9yoeX?>QBf)@CP`XL|Ut}u*|;iy*E4G_x4v_U6|WVJuA$P z4c4RH*-kc$0<=xp17N(jsmrH@W%;$RV2vlZ*N7B2);TShLqb$_1g;}a+R(dEin`yb z>Efn(WJT+nc^q*;1LR;qXoy_Gg*>NMtnca0Nlt45)hQ!w8us=&lOaVe4FNd~@u*Bqb2F}-k@2MNokn245Q zt(4>)W6=;SlDkG(FJF|gu_pV*?A4l2w`~ZU+-4;7YzM1j?e*ln5jL%%u?0H~Bpgo} zSzNJva6AYFsu3?@fPi>)5noRET8=p`i8xBEl7E*U(qGm>5J{&K@!+u0H@A<$kSLOYfGGRxmZ*Q-cjsk-_Tn3NY2YhnNb+pQ!E0kLLCGXQ8jj#X|jFCV+sl rRuVRoR__BANX1FD^bnsKZczi+$R%OV{fGTGCH^t}P}59#cjoFnXm$xn~ZNi9pwnbO1J>F4G@Sy*J# z#7POv%-+lszrUUQi_unK2X_xoaei7!d`5mzW>tRPX&nzxnl9-g4lUW6_ zu_Plk9%S`qS(Y3|ZixLr*BO~jp22!rqBuD^yA1I>alb>G@4CPOr%_h%RmYR{7 zoReA%lq@MKoqUo_d$T0_EXK{3IVLbNo9LNN-pD0Bxt1$r@&+!6$*;LqF&mk3O`aei zz4?}a2os~xrMVD;wi(OSDG7N zo?n!mT2ws6o2iEb#7{3M1#%cBYl&x0l-Cm#C@D(J%!^M-EJ@CYFV3t=o#LDDYNC(Q VW;@ZhjGNz!S2Hq7PR^Fx0st4Wrak}w diff --git a/tests/fast_tests/test_controllers.py b/tests/fast_tests/test_controllers.py index 58967cef8..bef765396 100644 --- a/tests/fast_tests/test_controllers.py +++ b/tests/fast_tests/test_controllers.py @@ -405,6 +405,175 @@ def test_no_crash_LinearOVM(self): self.tearDown_failsafe() +class TestFeasibleAccelFailsafe(TestInstantaneousFailsafe): + """ + Tests that the feasible accel failsafe of the base acceleration controller + does not fail under extreme conditions. + """ + + def test_no_crash_OVM(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(OVMController, { + "fail_safe": "feasible_accel" + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + self.setUp_failsafe(vehicles=vehicles) + + # run the experiment, see if it fails + self.exp.run(1) + + self.tearDown_failsafe() + + def test_no_crash_LinearOVM(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(LinearOVM, { + "fail_safe": "feasible_accel" + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + self.setUp_failsafe(vehicles=vehicles) + + # run the experiment, see if it fails + self.exp.run(1) + + self.tearDown_failsafe() + + +class TestObeySpeedLimitFailsafe(TestInstantaneousFailsafe): + """ + Tests that the obey speed limit failsafe of the base acceleration controller + does not fail under extreme conditions. + """ + + def test_no_crash_OVM(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(OVMController, { + "fail_safe": "obey_speed_limit" + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + self.setUp_failsafe(vehicles=vehicles) + + # run the experiment, see if it fails + self.exp.run(1) + + self.tearDown_failsafe() + + def test_no_crash_LinearOVM(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(LinearOVM, { + "fail_safe": "obey_speed_limit" + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + self.setUp_failsafe(vehicles=vehicles) + + # run the experiment, see if it fails + self.exp.run(1) + + self.tearDown_failsafe() + + +class TestBrokenFailsafe(TestInstantaneousFailsafe): + """ + Tests that the failsafe logic triggers exceptions when instantiated + incorrectly. + """ + + def test_invalid_failsafe_string(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(OVMController, { + "fail_safe": "default" + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + additional_env_params = { + "target_velocity": 8, + "max_accel": 3, + "max_decel": 3, + "sort_vehicles": False + } + env_params = EnvParams(additional_params=additional_env_params) + + additional_net_params = { + "length": 100, + "lanes": 1, + "speed_limit": 30, + "resolution": 40 + } + net_params = NetParams(additional_params=additional_net_params) + + initial_config = InitialConfig(bunching=10) + + # create the environment and network classes, see that it raises ValueError + with self.assertRaises(ValueError): + ring_road_exp_setup(vehicles=vehicles, + env_params=env_params, + net_params=net_params, + initial_config=initial_config) + + self.tearDown_failsafe() + + def test_invalid_failsafe_type(self): + vehicles = VehicleParams() + vehicles.add( + veh_id="test", + acceleration_controller=(LinearOVM, { + "fail_safe": True + }), + routing_controller=(ContinuousRouter, {}), + num_vehicles=10, + ) + + additional_env_params = { + "target_velocity": 8, + "max_accel": 3, + "max_decel": 3, + "sort_vehicles": False + } + env_params = EnvParams(additional_params=additional_env_params) + + additional_net_params = { + "length": 100, + "lanes": 1, + "speed_limit": 30, + "resolution": 40 + } + net_params = NetParams(additional_params=additional_net_params) + + initial_config = InitialConfig(bunching=10) + + # create the environment and network classes, see that it raises ValueError + with self.assertRaises(ValueError): + ring_road_exp_setup(vehicles=vehicles, + env_params=env_params, + net_params=net_params, + initial_config=initial_config) + + self.tearDown_failsafe() + + class TestStaticLaneChanger(unittest.TestCase): """ Makes sure that vehicles with a static lane-changing controller do not diff --git a/tests/fast_tests/test_examples.py b/tests/fast_tests/test_examples.py index 8e871afb4..fbd78294d 100644 --- a/tests/fast_tests/test_examples.py +++ b/tests/fast_tests/test_examples.py @@ -69,8 +69,11 @@ def test_parse_args(self): 'aimsun': False, 'exp_config': 'exp_config', 'gen_emission': False, + 'is_baseline': False, 'no_render': False, - 'num_runs': 1 + 'num_runs': 1, + 'only_query': "['all']", + 'to_aws': None, }) # test the case when optional args are specified @@ -86,8 +89,11 @@ def test_parse_args(self): 'aimsun': True, 'exp_config': 'exp_config', 'gen_emission': True, + 'is_baseline': False, 'no_render': True, - 'num_runs': 2 + 'num_runs': 2, + 'only_query': "['all']", + 'to_aws': None, }) def test_bottleneck(self): @@ -148,9 +154,12 @@ def test_highway_single(self): @staticmethod def run_simulation(flow_params): + flow_params = deepcopy(flow_params) + # make the horizon small and set render to False flow_params['sim'].render = False flow_params['env'].horizon = 5 + flow_params['env'].warmup_steps = 0 # create an experiment object exp = Experiment(flow_params) @@ -167,13 +176,22 @@ def test_parse_args(self): args = parse_train_args(["exp_config"]) self.assertDictEqual(vars(args), { + 'algorithm': 'PPO', + 'checkpoint_freq': 20, 'exp_config': 'exp_config', + 'exp_title': None, + 'grid_search': False, 'local_mode': False, 'rl_trainer': 'rllib', 'num_cpus': 1, + 'num_iterations': 200, + 'num_rollouts': 1, 'num_steps': 5000, + 'render': False, 'rollout_size': 1000, - 'checkpoint_path': None + 'checkpoint_path': None, + 'use_s3': False, + 'multi_node': False, }) # test the case when optional args are specified @@ -187,13 +205,22 @@ def test_parse_args(self): ]) self.assertDictEqual(vars(args), { + 'algorithm': 'PPO', + 'checkpoint_freq': 20, 'checkpoint_path': '5', 'exp_config': 'exp_config', + 'exp_title': None, + 'grid_search': False, 'local_mode': False, 'num_cpus': 1, + 'num_iterations': 200, + 'num_rollouts': 1, 'num_steps': 3, + 'render': False, 'rl_trainer': 'h-baselines', - 'rollout_size': 4 + 'rollout_size': 4, + 'use_s3': False, + 'multi_node': False, }) @@ -205,6 +232,11 @@ class TestStableBaselineExamples(unittest.TestCase): """ @staticmethod def run_exp(flow_params): + # Reduce the number of warmup steps to speedup tests. + flow_params = deepcopy(flow_params) + flow_params['env'].warmup_steps = 0 + + # Run the example. train_model = run_stable_baselines_model(flow_params, 1, 4, 4) train_model.env.close() @@ -408,7 +440,13 @@ def test_multiagent_i210(self): @staticmethod def run_exp(flow_params, **kwargs): - alg_run, env_name, config = setup_rllib_exps(flow_params, 1, 1, **kwargs) + # Reduce the number of warmup steps to speedup tests. + flow_params = deepcopy(flow_params) + flow_params['env'].warmup_steps = 0 + + # Run the example. + alg_run, env_name, config = setup_rllib_exps( + flow_params, 1, 1, parse_train_args([""]), **kwargs) try: ray.init(num_cpus=1, local_mode=True) diff --git a/tests/fast_tests/test_experiment_base_class.py b/tests/fast_tests/test_experiment_base_class.py index b3863a77c..8a7a9500c 100644 --- a/tests/fast_tests/test_experiment_base_class.py +++ b/tests/fast_tests/test_experiment_base_class.py @@ -1,6 +1,7 @@ import unittest import os import time +import csv from flow.core.experiment import Experiment from flow.core.params import VehicleParams @@ -168,15 +169,44 @@ def test_convert_to_csv(self): time.sleep(1.0) # check that both the csv file exists and the xml file doesn't. - self.assertFalse(os.path.isfile(dir_path + "/{}-emission.xml".format( + self.assertFalse(os.path.isfile(dir_path + "/{}-0_emission.xml".format( exp.env.network.name))) - self.assertTrue(os.path.isfile(dir_path + "/{}-emission.csv".format( + self.assertTrue(os.path.isfile(dir_path + "/{}-0_emission.csv".format( exp.env.network.name))) + # check that the keys within the emission file matches its expected + # values + with open(dir_path + "/{}-0_emission.csv".format( + exp.env.network.name), "r") as f: + reader = csv.reader(f) + header = next(reader) + + self.assertListEqual(header, [ + "time", + "id", + "x", + "y", + "speed", + "headway", + "leader_id", + "follower_id", + "leader_rel_speed", + "target_accel_with_noise_with_failsafe", + "target_accel_no_noise_no_failsafe", + "target_accel_with_noise_no_failsafe", + "target_accel_no_noise_with_failsafe", + "realized_accel", + "road_grade", + "edge_id", + "lane_number", + "distance", + "relative_position", + ]) + time.sleep(0.1) # delete the files - os.remove(os.path.expanduser(dir_path + "/{}-emission.csv".format( + os.remove(os.path.expanduser(dir_path + "/{}-0_emission.csv".format( exp.env.network.name))) diff --git a/tests/fast_tests/test_rewards.py b/tests/fast_tests/test_rewards.py index 3f2e08cde..ac406b545 100644 --- a/tests/fast_tests/test_rewards.py +++ b/tests/fast_tests/test_rewards.py @@ -7,7 +7,6 @@ from flow.core.rewards import average_velocity, min_delay from flow.core.rewards import desired_velocity, boolean_action_penalty from flow.core.rewards import penalize_near_standstill, penalize_standstill -from flow.core.rewards import energy_consumption os.environ["TEST_FLAG"] = "True" @@ -152,31 +151,6 @@ def test_penalize_near_standstill(self): self.assertEqual(penalize_near_standstill(env, thresh=2), -10) self.assertEqual(penalize_near_standstill(env, thresh=0.5), -9) - def test_energy_consumption(self): - """Test the energy consumption method.""" - vehicles = VehicleParams() - vehicles.add("test", num_vehicles=10) - - env_params = EnvParams(additional_params={ - "target_velocity": 10, "max_accel": 1, "max_decel": 1, - "sort_vehicles": False}) - - env, _, _ = ring_road_exp_setup(vehicles=vehicles, - env_params=env_params) - - # check the penalty is zero at speed zero - self.assertEqual(energy_consumption(env, gain=1), 0) - - # change the speed of one vehicle - env.k.vehicle.test_set_speed("test_0", 1) - self.assertEqual(energy_consumption(env), -12.059337750000001) - - # check that stepping change the previous speeds and increases the energy consumption - env.step(rl_actions=None) - env.step(rl_actions=None) - self.assertGreater(env.k.vehicle.get_previous_speed("test_0"), 0.0) - self.assertLess(energy_consumption(env), -12.059337750000001) - def test_boolean_action_penalty(self): """Test the boolean_action_penalty method.""" actions = [False, False, False, False, False] From bb94c27518182a4dd8e069d0af17bc2ee7496ce5 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Fri, 10 Jul 2020 22:19:21 -0700 Subject: [PATCH 327/335] remove line from testing --- flow/core/experiment.py | 1 - 1 file changed, 1 deletion(-) diff --git a/flow/core/experiment.py b/flow/core/experiment.py index b9ce3ac0e..ca1b54409 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -253,7 +253,6 @@ def rl_actions(*_): max_speed=10, start=self.env.env_params.warmup_steps ) - exit() upload_to_s3( 'circles.data.pipeline', 'metadata_table/date={0}/partition_name={1}_METADATA/' From d373965b7325b338dfaa7456b05d14cc8dbcfbe9 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sat, 11 Jul 2020 00:18:08 -0700 Subject: [PATCH 328/335] fix toyota temp file removal --- flow/energy_models/toyota_energy.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/flow/energy_models/toyota_energy.py b/flow/energy_models/toyota_energy.py index b65b7a0c1..b40146d80 100644 --- a/flow/energy_models/toyota_energy.py +++ b/flow/energy_models/toyota_energy.py @@ -20,15 +20,14 @@ def __init__(self, filename): with open('temp.pkl', 'rb') as file: try: self.toyota_energy = pickle.load(file) + # delete pickle file + os.remove('temp.pkl') except TypeError: print('Must use Python version 3.6.8 to unpickle') # delete pickle file - os.remove(file) + os.remove('temp.pkl') raise - # delete pickle file - os.remove(file) - @abstractmethod def get_instantaneous_power(self, accel, speed, grade): """See parent class.""" From ab6732e1164da2fcf700d119728cad3a8dfa97e2 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sat, 11 Jul 2020 01:10:47 -0700 Subject: [PATCH 329/335] fix fc <> power unit conversion --- flow/energy_models/base_energy.py | 2 +- flow/energy_models/toyota_energy.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/flow/energy_models/base_energy.py b/flow/energy_models/base_energy.py index ed75efd09..ba5da5080 100644 --- a/flow/energy_models/base_energy.py +++ b/flow/energy_models/base_energy.py @@ -59,4 +59,4 @@ def get_instantaneous_fuel_consumption(self, accel, speed, grade): ------- float """ - return self.get_instantaneous_power(accel, speed, grade) * self.conversion + return self.get_instantaneous_power(accel, speed, grade) / self.conversion diff --git a/flow/energy_models/toyota_energy.py b/flow/energy_models/toyota_energy.py index b40146d80..397610089 100644 --- a/flow/energy_models/toyota_energy.py +++ b/flow/energy_models/toyota_energy.py @@ -58,7 +58,7 @@ def __init__(self): def get_instantaneous_power(self, accel, speed, grade): """See parent class.""" - return self.get_instantaneous_fuel_consumption(accel, speed, grade) / self.conversion + return self.get_instantaneous_fuel_consumption(accel, speed, grade) * self.conversion def get_instantaneous_fuel_consumption(self, accel, speed, grade): """See parent class.""" From c0de59b994c84c56ebb2f7c9e6d09b8303b983f6 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sat, 11 Jul 2020 01:11:46 -0700 Subject: [PATCH 330/335] make default highway single penetration rate 0 --- examples/exp_configs/non_rl/highway_single.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/exp_configs/non_rl/highway_single.py b/examples/exp_configs/non_rl/highway_single.py index ff486b3f5..fcd2f2da4 100644 --- a/examples/exp_configs/non_rl/highway_single.py +++ b/examples/exp_configs/non_rl/highway_single.py @@ -28,7 +28,7 @@ # whether to include noise in the car-following models INCLUDE_NOISE = True # penetration rate of the follower-stopper vehicles -PENETRATION_RATE = 10.0 +PENETRATION_RATE = 0.0 additional_net_params = ADDITIONAL_NET_PARAMS.copy() additional_net_params.update({ From 5f6acc2045a7ea0ceaff16f2dce7975ae43014f4 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sat, 11 Jul 2020 01:24:09 -0700 Subject: [PATCH 331/335] use 1609.34 meters per mile --- flow/core/rewards.py | 2 +- flow/data_pipeline/query.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/flow/core/rewards.py b/flow/core/rewards.py index 33960f8cd..20ed1c6a7 100755 --- a/flow/core/rewards.py +++ b/flow/core/rewards.py @@ -393,7 +393,7 @@ def instantaneous_mpg(env, veh_ids=None, gain=.001): cumulative_distance += speed cumulative_gallons /= 3600.0 - cumulative_distance /= 1609.0 + cumulative_distance /= 1609.34 # miles / gallon is (distance_dot * \delta t) / (gallons_dot * \delta t) mpg = cumulative_distance / (cumulative_gallons + 1e-6) diff --git a/flow/data_pipeline/query.py b/flow/data_pipeline/query.py index 558488d8e..f68dfa321 100644 --- a/flow/data_pipeline/query.py +++ b/flow/data_pipeline/query.py @@ -360,7 +360,7 @@ class QueryStrings(Enum): distance_meters, power_watts * time_step_size_seconds AS energy_joules, distance_meters / (power_watts * time_step_size_seconds) AS efficiency_meters_per_joules, - 33561 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon + 33554.13 * distance_meters / (power_watts * time_step_size_seconds) AS efficiency_miles_per_gallon FROM sub_fact_vehicle_trace WHERE 1 = 1 AND power_watts * time_step_size_seconds != 0 @@ -403,7 +403,7 @@ class QueryStrings(Enum): SUM(distance_meters) AS distance_meters, SUM(energy_joules) AS energy_joules, SUM(distance_meters) / SUM(energy_joules) AS efficiency_meters_per_joules, - 33561 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon + 33554.13 * SUM(distance_meters) / SUM(energy_joules) AS efficiency_miles_per_gallon FROM fact_vehicle_fuel_efficiency_agg WHERE 1 = 1 AND date = \'{date}\' @@ -419,7 +419,7 @@ class QueryStrings(Enum): t.source_id, e.energy_model_id, e.efficiency_meters_per_joules, - 33561 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, + 33554.13 * e.efficiency_meters_per_joules AS efficiency_miles_per_gallon, t.throughput_per_hour, s.safety_rate, s.safety_value_max From 7a773e343f6ddbd5e1836be689e873469c836731 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sat, 11 Jul 2020 13:18:23 -0700 Subject: [PATCH 332/335] fix av routing controller if no on-ramp --- examples/exp_configs/non_rl/i210_subnetwork.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 9e415fc65..399ef7f9f 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -95,7 +95,7 @@ "v_des": V_DES, "no_control_edges": ["ghost0", "119257908#3"] }), - routing_controller=(I210Router, {}) + routing_controller=(I210Router, {}) if ON_RAMP else None, ) inflow = InFlows() From 0e8be957940039d4b199a7dfdf5670cd011c0136 Mon Sep 17 00:00:00 2001 From: liljonnystyle Date: Sat, 11 Jul 2020 22:15:21 -0700 Subject: [PATCH 333/335] Time-Space Diagram offset axes (#999) * refactor tsd to allow for axes offsets * update time-space plotter unit tests --- .../exp_configs/non_rl/i210_subnetwork.py | 14 +- flow/core/experiment.py | 9 +- flow/visualize/time_space_diagram.py | 233 ++++++++-------- .../test_files/ring_230_emission.csv | 142 ++-------- tests/fast_tests/test_visualizers.py | 256 ++++++------------ 5 files changed, 237 insertions(+), 417 deletions(-) diff --git a/examples/exp_configs/non_rl/i210_subnetwork.py b/examples/exp_configs/non_rl/i210_subnetwork.py index 399ef7f9f..0c66f42e7 100644 --- a/examples/exp_configs/non_rl/i210_subnetwork.py +++ b/examples/exp_configs/non_rl/i210_subnetwork.py @@ -108,16 +108,16 @@ veh_type="human", edge=highway_start_edge, vehs_per_hour=INFLOW_RATE * (1 - PENETRATION_RATE), - departLane=lane, - departSpeed=INFLOW_SPEED) + depart_lane=lane, + depart_speed=INFLOW_SPEED) if PENETRATION_RATE > 0.0: inflow.add( veh_type="av", edge=highway_start_edge, vehs_per_hour=INFLOW_RATE * PENETRATION_RATE, - departLane=lane, - departSpeed=INFLOW_SPEED) + depart_lane=lane, + depart_speed=INFLOW_SPEED) # on ramp if ON_RAMP: @@ -125,7 +125,7 @@ veh_type="human", edge="27414345", vehs_per_hour=int(ON_RAMP_INFLOW_RATE * (1 - PENETRATION_RATE)), - departSpeed=10, + depart_speed=10, ) if PENETRATION_RATE > 0.0: @@ -133,8 +133,8 @@ veh_type="av", edge="27414345", vehs_per_hour=int(ON_RAMP_INFLOW_RATE * PENETRATION_RATE), - departLane="random", - departSpeed=10) + depart_lane="random", + depart_speed=10) # =========================================================================== # # Generate the flow_params dict with all relevant simulation information. # diff --git a/flow/core/experiment.py b/flow/core/experiment.py index ca1b54409..38599b002 100755 --- a/flow/core/experiment.py +++ b/flow/core/experiment.py @@ -248,10 +248,13 @@ def rl_actions(*_): write_dict_to_csv(metadata_table_path, metadata, True) tsd_main( trajectory_table_path, - {'network': self.env.network.__class__}, + { + 'network': self.env.network.__class__, + 'env': self.env.env_params, + 'sim': self.env.sim_params + }, min_speed=0, - max_speed=10, - start=self.env.env_params.warmup_steps + max_speed=10 ) upload_to_s3( 'circles.data.pipeline', diff --git a/flow/visualize/time_space_diagram.py b/flow/visualize/time_space_diagram.py index a9742e249..955043691 100644 --- a/flow/visualize/time_space_diagram.py +++ b/flow/visualize/time_space_diagram.py @@ -43,6 +43,17 @@ HighwayNetwork ] +# networks that use edgestarts +USE_EDGESTARTS = set([ + RingNetwork, + FigureEightNetwork, + MergeNetwork +]) + +GHOST_DICT = defaultdict(dict) +GHOST_DICT[I210SubNetwork] = {'ghost_edges': {'ghost0', '119257908#3'}} +GHOST_DICT[HighwayNetwork] = {'ghost_bounds': (500, 2300)} + def import_data_from_trajectory(fp, params=dict()): r"""Import and preprocess data from the Flow trajectory (.csv) file. @@ -62,8 +73,10 @@ def import_data_from_trajectory(fp, params=dict()): Returns ------- - pd.DataFrame + pd.DataFrame, float, float """ + network = params['network'] + # Read trajectory csv into pandas dataframe df = pd.read_csv(fp) @@ -73,33 +86,47 @@ def import_data_from_trajectory(fp, params=dict()): 'lane_number': 'lane_id', } df = df.rename(columns=column_conversions) - if 'distance' not in df.columns: + if network in USE_EDGESTARTS: df['distance'] = _get_abs_pos(df, params) + start = params['env'].warmup_steps * params['env'].sims_per_step * params['sim'].sim_step + # produce upper and lower bounds for the non-greyed-out domain + ghost_edges = GHOST_DICT[network].get('ghost_edges') + ghost_bounds = GHOST_DICT[network].get('ghost_bounds') + if ghost_edges: + domain_lb = df[~df['edge_id'].isin(ghost_edges)]['distance'].min() + domain_ub = df[~df['edge_id'].isin(ghost_edges)]['distance'].max() + elif ghost_bounds: + domain_lb = ghost_bounds[0] + domain_ub = ghost_bounds[1] + else: + domain_lb = df['distance'].min() + domain_ub = df['distance'].max() + + df.loc[:, 'time_step'] = df['time_step'].apply(lambda x: x - start) + df.loc[:, 'distance'] = df['distance'].apply(lambda x: x - domain_lb) + domain_ub -= domain_lb + # Compute line segment ends by shifting dataframe by 1 row df[['next_pos', 'next_time']] = df.groupby('id')[['distance', 'time_step']].shift(-1) # Remove nans from data df = df[df['next_time'].notna()] - return df + return df, domain_lb, domain_ub, start -def get_time_space_data(data, params): +def get_time_space_data(data, network): r"""Compute the unique inflows and subsequent outflow statistics. Parameters ---------- data : pd.DataFrame cleaned dataframe of the trajectory data - params : dict - flow-specific parameters, including: - * "network" (str): name of the network that was used when generating - the emission file. Must be one of the network names mentioned in - ACCEPTABLE_NETWORKS, - * "net_params" (flow.core.params.NetParams): network-specific - parameters. This is used to collect the lengths of various network - links. + network : child class of Network() + network that was used when generating the emission file. + Must be one of the network names mentioned in + ACCEPTABLE_NETWORKS Returns ------- @@ -117,8 +144,8 @@ def get_time_space_data(data, params): if the specified network is not supported by this method """ # check that the network is appropriate - assert params['network'] in ACCEPTABLE_NETWORKS, \ - 'Network must be one of: ' + ', '.join([network.__name__ for network in ACCEPTABLE_NETWORKS]) + assert network in ACCEPTABLE_NETWORKS, \ + 'Network must be one of: ' + ', '.join([network_.__name__ for network_ in ACCEPTABLE_NETWORKS]) # switcher used to compute the positions based on the type of network switcher = { @@ -130,7 +157,7 @@ def get_time_space_data(data, params): } # Get the function from switcher dictionary - func = switcher[params['network']] + func = switcher[network] # Execute the function segs, data = func(data) @@ -238,7 +265,7 @@ def _i210_subnetwork(data): """ # Reset lane numbers that are offset by ramp lanes offset_edges = set(data[data['lane_id'] == 5]['edge_id'].unique()) - data.loc[data['edge_id'].isin(offset_edges), 'lane_id'] -= 1 + data.loc[data['edge_id'].isin(offset_edges), 'lane_id'] = data[data['edge_id'].isin(offset_edges)]['lane_id'] - 1 segs = dict() for lane, df in data.groupby('lane_id'): @@ -382,16 +409,7 @@ def _get_abs_pos(df, params): return ret -def plot_tsd(ax, - df, - segs, - cmap, - min_speed=0, - max_speed=10, - start=0, - lane=None, - ghost_edges=None, - ghost_bounds=None): +def plot_tsd(df, network, cmap, min_speed=0, max_speed=10, start=0, domain_bounds=None): """Plot the time-space diagram. Take the pre-processed segments and other meta-data, then plot all the line @@ -399,24 +417,21 @@ def plot_tsd(ax, Parameters ---------- - ax : matplotlib.axes.Axes - figure axes that will be plotted on df : pd.DataFrame data used for axes bounds and speed coloring - segs : list of list of lists - line segments to be plotted, where each segment is a list of two [x,y] - pairs + network : child class of Network() + network that was used when generating the emission file. + Must be one of the network names mentioned in + ACCEPTABLE_NETWORKS + cmap : colors.LinearSegmentedColormap + colormap for plotting speed min_speed : int or float minimum speed in colorbar max_speed : int or float maximum speed in colorbar start : int or float starting time_step not greyed out - lane : int, optional - lane number to be shown in plot title - ghost_edges : list or set of str - ghost edge names to be greyed out, default None - ghost_bounds : tuple + domain_bounds : tuple lower and upper bounds of domain, excluding ghost edges, default None """ norm = plt.Normalize(min_speed, max_speed) @@ -426,49 +441,57 @@ def plot_tsd(ax, ymin, ymax = df['distance'].min(), df['distance'].max() ybuffer = (ymax - ymin) * 0.025 # 2.5% of range - ax.set_xlim(xmin - xbuffer, xmax + xbuffer) - ax.set_ylim(ymin - ybuffer, ymax + ybuffer) + # Convert df data into segments for plotting + segs, df = get_time_space_data(df, network) - lc = LineCollection(segs, cmap=cmap, norm=norm) - lc.set_array(df['speed'].values) - lc.set_linewidth(1) - ax.add_collection(lc) - ax.autoscale() + nlanes = df['lane_id'].nunique() + plt.figure(figsize=(16, 9*nlanes)) + if nlanes == 1: + segs = [segs] - rects = [] - if ghost_edges: - y_domain_min = df[~df['edge_id'].isin(ghost_edges)]['distance'].min() - y_domain_max = df[~df['edge_id'].isin(ghost_edges)]['distance'].max() - rects.append(Rectangle((xmin, y_domain_min), start - xmin, y_domain_max - y_domain_min)) - rects.append(Rectangle((xmin, ymin), xmax - xmin, y_domain_min - ymin)) - rects.append(Rectangle((xmin, y_domain_max), xmax - xmin, ymax - y_domain_max)) - elif ghost_bounds: - rects.append(Rectangle((xmin, ghost_bounds[0]), start - xmin, ghost_bounds[1] - ghost_bounds[0])) - rects.append(Rectangle((xmin, ymin), xmax - xmin, ghost_bounds[0] - ymin)) - rects.append(Rectangle((xmin, ghost_bounds[1]), xmax - xmin, ymax - ghost_bounds[1])) - else: - rects.append(Rectangle((xmin, ymin), start - xmin, ymax - ymin)) + for lane, lane_df in df.groupby('lane_id'): + ax = plt.subplot(nlanes, 1, lane+1) + + ax.set_xlim(xmin - xbuffer, xmax + xbuffer) + ax.set_ylim(ymin - ybuffer, ymax + ybuffer) + + lc = LineCollection(segs[lane], cmap=cmap, norm=norm) + lc.set_array(lane_df['speed'].values) + lc.set_linewidth(1) + ax.add_collection(lc) + ax.autoscale() + + rects = [] + # rectangle for warmup period, but not ghost edges + rects.append(Rectangle((xmin, 0), start, domain_bounds[1])) + # rectangle for lower ghost edge (including warmup period) + rects.append(Rectangle((xmin, ymin), xmax - xmin, domain_bounds[0])) + # rectangle for upper ghost edge (including warmup period) + rects.append(Rectangle((xmin, domain_bounds[1]), xmax - xmin, ymax - domain_bounds[1])) - if rects: pc = PatchCollection(rects, facecolor='grey', alpha=0.5, edgecolor=None) pc.set_zorder(20) ax.add_collection(pc) - if lane: - ax.set_title('Time-Space Diagram: Lane {}'.format(lane), fontsize=25) - else: - ax.set_title('Time-Space Diagram', fontsize=25) - ax.set_ylabel('Position (m)', fontsize=20) - ax.set_xlabel('Time (s)', fontsize=20) - plt.xticks(fontsize=18) - plt.yticks(fontsize=18) + if nlanes > 1: + ax.set_title('Time-Space Diagram: Lane {}'.format(lane), fontsize=25) + else: + ax.set_title('Time-Space Diagram', fontsize=25) - cbar = plt.colorbar(lc, ax=ax, norm=norm) - cbar.set_label('Velocity (m/s)', fontsize=20) - cbar.ax.tick_params(labelsize=18) + ax.set_ylabel('Position (m)', fontsize=20) + if lane == nlanes - 1: + ax.set_xlabel('Time (s)', fontsize=20) + plt.xticks(fontsize=18) + plt.yticks(fontsize=18) + cbar = plt.colorbar(lc, ax=ax, norm=norm) + cbar.set_label('Velocity (m/s)', fontsize=20) + cbar.ax.tick_params(labelsize=18) -def tsd_main(trajectory_path, flow_params, min_speed=0, max_speed=10, start=0): + plt.tight_layout() + + +def tsd_main(trajectory_path, flow_params, min_speed=0, max_speed=10): """Prepare and plot the time-space diagram. Parameters @@ -487,9 +510,9 @@ def tsd_main(trajectory_path, flow_params, min_speed=0, max_speed=10, start=0): minimum speed in colorbar max_speed : int or float maximum speed in colorbar - start : int or float - starting time_step not greyed out """ + network = flow_params['network'] + # some plotting parameters cdict = { 'red': ((0, 0, 0), (0.2, 1, 1), (0.6, 1, 1), (1, 0, 0)), @@ -499,58 +522,23 @@ def tsd_main(trajectory_path, flow_params, min_speed=0, max_speed=10, start=0): my_cmap = colors.LinearSegmentedColormap('my_colormap', cdict, 1024) # Read trajectory csv into pandas dataframe - traj_df = import_data_from_trajectory(trajectory_path, flow_params) + traj_df, domain_lb, domain_ub, start = import_data_from_trajectory(trajectory_path, flow_params) - # Convert df data into segments for plotting - segs, traj_df = get_time_space_data(traj_df, flow_params) - - if flow_params['network'] == I210SubNetwork: - nlanes = traj_df['lane_id'].nunique() - plt.figure(figsize=(16, 9*nlanes)) - - for lane, df in traj_df.groupby('lane_id'): - ax = plt.subplot(nlanes, 1, lane+1) - - plot_tsd(ax=ax, - df=df, - segs=segs[lane], - cmap=my_cmap, - min_speed=min_speed, - max_speed=max_speed, - start=start, - lane=int(lane+1), - ghost_edges={'ghost0', '119257908#3'}) - plt.tight_layout() - else: - # perform plotting operation - plt.figure(figsize=(16, 9)) - ax = plt.axes() - - if flow_params['network'] == HighwayNetwork: - plot_tsd(ax=ax, - df=traj_df, - segs=segs, - cmap=my_cmap, - min_speed=min_speed, - max_speed=max_speed, - start=start, - ghost_bounds=(500, 2300)) - else: - plot_tsd(ax=ax, - df=traj_df, - segs=segs, - cmap=my_cmap, - min_speed=min_speed, - max_speed=max_speed, - start=start) + plot_tsd(df=traj_df, + network=network, + cmap=my_cmap, + min_speed=min_speed, + max_speed=max_speed, + start=start, + domain_bounds=(domain_lb, domain_ub)) ########################################################################### # Note: For MergeNetwork only # - if flow_params['network'] == 'MergeNetwork': # - plt.plot([df['time_step'].min(), df['time_step'].max()], - [0, 0], linewidth=3, color="white") # - plt.plot([df['time_step'].min(), df['time_step'].max()], - [-0.1, -0.1], linewidth=3, color="white") # + if network == MergeNetwork: # + plt.plot([traj_df['time_step'].min(), traj_df['time_step'].max()], + [0, 0], linewidth=3, color="white") # + plt.plot([traj_df['time_step'].min(), traj_df['time_step'].max()], + [-0.1, -0.1], linewidth=3, color="white") # ########################################################################### outfile = trajectory_path.replace('csv', 'png') @@ -575,13 +563,11 @@ def tsd_main(trajectory_path, flow_params, min_speed=0, max_speed=10, start=0): parser.add_argument('--steps', type=int, default=1, help='rate at which steps are plotted.') parser.add_argument('--title', type=str, default='Time Space Diagram', - help='rate at which steps are plotted.') + help='Title for the time-space diagrams.') parser.add_argument('--max_speed', type=int, default=8, help='The maximum speed in the color range.') parser.add_argument('--min_speed', type=int, default=0, help='The minimum speed in the color range.') - parser.add_argument('--start', type=float, default=0, - help='initial time (in sec) in the plot.') args = parser.parse_args() @@ -596,6 +582,5 @@ def tsd_main(trajectory_path, flow_params, min_speed=0, max_speed=10, start=0): args.trajectory_path, flow_params, min_speed=args.min_speed, - max_speed=args.max_speed, - start=args.start + max_speed=args.max_speed ) diff --git a/tests/fast_tests/test_files/ring_230_emission.csv b/tests/fast_tests/test_files/ring_230_emission.csv index 9051074c8..342c5c7f3 100644 --- a/tests/fast_tests/test_files/ring_230_emission.csv +++ b/tests/fast_tests/test_files/ring_230_emission.csv @@ -1,117 +1,25 @@ -speed,CO,electricity,x,NOx,id,fuel,angle,time,edge_id,eclass,route,waiting,CO2,lane_number,PMx,type,noise,relative_position,HC,y -0.0,164.78,0.0,36.64,1.2,idm_0,1.13,94.02,0.1,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2624.72,0,0.07,idm,55.94,0.0,0.81,-1.65 -0.08,163.5,0.0,36.65,1.21,idm_0,1.13,94.01,0.2,bottom,HBEFA3/PC_G_EU4,routebottom,0.1,2631.03,0,0.07,idm,59.48,0.01,0.81,-1.65 -0.16,162.24,0.0,36.66,1.21,idm_0,1.13,93.98,0.3,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2636.67,0,0.07,idm,59.44,0.02,0.8,-1.65 -0.23,161.0,0.0,36.69,1.21,idm_0,1.14,93.94,0.4,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2641.63,0,0.07,idm,59.4,0.05,0.79,-1.65 -0.31,159.78,0.0,36.72,1.21,idm_0,1.14,93.88,0.5,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2645.91,0,0.06,idm,59.36,0.08,0.79,-1.65 -0.41,158.73,0.0,36.76,1.22,idm_0,1.15,93.8,0.6,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2679.14,0,0.07,idm,60.47,0.12,0.79,-1.65 -0.0,164.78,0.0,46.49,1.2,idm_1,1.13,78.81,0.1,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2624.72,0,0.07,idm,55.94,9.55,0.81,-0.34 -0.08,163.5,0.0,46.5,1.21,idm_1,1.13,78.8,0.2,bottom,HBEFA3/PC_G_EU4,routebottom,0.1,2631.03,0,0.07,idm,59.48,9.55,0.81,-0.33 -0.16,162.24,0.0,46.51,1.21,idm_1,1.13,78.78,0.3,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2636.67,0,0.07,idm,59.44,9.57,0.8,-0.33 -0.23,161.0,0.0,46.54,1.21,idm_1,1.14,78.74,0.4,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2641.63,0,0.07,idm,59.4,9.59,0.79,-0.32 -0.31,159.78,0.0,46.57,1.21,idm_1,1.14,78.7,0.5,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2645.91,0,0.06,idm,59.36,9.62,0.79,-0.31 -0.41,158.73,0.0,46.61,1.22,idm_1,1.15,78.64,0.6,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2679.14,0,0.07,idm,60.47,9.66,0.79,-0.3 -0.0,164.78,0.0,56.08,1.2,idm_10,1.13,304.55,0.1,right,HBEFA3/PC_G_EU4,routeright,0.0,2624.72,0,0.07,idm,55.94,37.95,0.81,69.53 -0.08,163.5,0.0,56.08,1.21,idm_10,1.13,304.54,0.2,right,HBEFA3/PC_G_EU4,routeright,0.1,2631.03,0,0.07,idm,59.48,37.96,0.81,69.53 -0.16,162.24,0.0,56.06,1.21,idm_10,1.13,304.52,0.3,right,HBEFA3/PC_G_EU4,routeright,0.0,2636.67,0,0.07,idm,59.44,37.98,0.8,69.54 -0.23,161.0,0.0,56.04,1.21,idm_10,1.14,304.48,0.4,right,HBEFA3/PC_G_EU4,routeright,0.0,2641.63,0,0.07,idm,59.4,38.0,0.79,69.55 -0.31,159.78,0.0,56.01,1.21,idm_10,1.14,304.44,0.5,right,HBEFA3/PC_G_EU4,routeright,0.0,2645.91,0,0.06,idm,59.36,38.03,0.79,69.57 -0.41,158.73,0.0,55.98,1.22,idm_10,1.15,304.38,0.6,right,HBEFA3/PC_G_EU4,routeright,0.0,2679.14,0,0.07,idm,60.47,38.07,0.79,69.59 -0.0,164.78,0.0,46.95,1.2,idm_11,1.13,289.47,0.1,right,HBEFA3/PC_G_EU4,routeright,0.0,2624.72,0,0.07,idm,55.94,47.5,0.81,73.43 -0.08,163.5,0.0,46.94,1.21,idm_11,1.13,289.45,0.2,right,HBEFA3/PC_G_EU4,routeright,0.1,2631.03,0,0.07,idm,59.48,47.51,0.81,73.43 -0.16,162.24,0.0,46.92,1.21,idm_11,1.13,289.42,0.3,right,HBEFA3/PC_G_EU4,routeright,0.0,2636.67,0,0.07,idm,59.44,47.52,0.8,73.44 -0.23,161.0,0.0,46.9,1.21,idm_11,1.14,289.38,0.4,right,HBEFA3/PC_G_EU4,routeright,0.0,2641.63,0,0.07,idm,59.4,47.55,0.79,73.44 -0.31,159.78,0.0,46.87,1.21,idm_11,1.14,289.32,0.5,right,HBEFA3/PC_G_EU4,routeright,0.0,2645.91,0,0.06,idm,59.36,47.58,0.79,73.45 -0.41,158.73,0.0,46.83,1.22,idm_11,1.15,289.24,0.6,right,HBEFA3/PC_G_EU4,routeright,0.0,2679.14,0,0.07,idm,60.47,47.62,0.79,73.46 -0.0,164.78,0.0,37.11,1.2,idm_12,1.13,274.71,0.1,right,HBEFA3/PC_G_EU4,routeright,0.0,2624.72,0,0.07,idm,55.94,57.05,0.81,74.86 -0.08,163.5,0.0,37.11,1.21,idm_12,1.13,274.7,0.2,right,HBEFA3/PC_G_EU4,routeright,0.1,2631.03,0,0.07,idm,59.48,57.05,0.81,74.86 -0.16,162.24,0.0,37.09,1.21,idm_12,1.13,274.68,0.3,right,HBEFA3/PC_G_EU4,routeright,0.0,2636.67,0,0.07,idm,59.44,57.07,0.8,74.86 -0.23,161.0,0.0,37.07,1.21,idm_12,1.14,274.65,0.4,right,HBEFA3/PC_G_EU4,routeright,0.0,2641.63,0,0.07,idm,59.4,57.09,0.79,74.86 -0.31,159.78,0.0,37.03,1.21,idm_12,1.14,274.6,0.5,right,HBEFA3/PC_G_EU4,routeright,0.0,2645.91,0,0.06,idm,59.36,57.12,0.79,74.86 -0.41,158.73,0.0,36.99,1.22,idm_12,1.15,274.55,0.6,right,HBEFA3/PC_G_EU4,routeright,0.0,2679.14,0,0.07,idm,60.47,57.16,0.79,74.86 -0.0,164.78,0.0,27.19,1.2,idm_13,1.13,259.6,0.1,top,HBEFA3/PC_G_EU4,routetop,0.0,2624.72,0,0.07,idm,55.94,9.09,0.81,73.68 -0.08,163.5,0.0,27.18,1.21,idm_13,1.13,259.58,0.2,top,HBEFA3/PC_G_EU4,routetop,0.1,2631.03,0,0.07,idm,59.48,9.1,0.81,73.68 -0.16,162.24,0.0,27.17,1.21,idm_13,1.13,259.55,0.3,top,HBEFA3/PC_G_EU4,routetop,0.0,2636.67,0,0.07,idm,59.44,9.11,0.8,73.67 -0.23,161.0,0.0,27.14,1.21,idm_13,1.14,259.51,0.4,top,HBEFA3/PC_G_EU4,routetop,0.0,2641.63,0,0.07,idm,59.4,9.14,0.79,73.67 -0.31,159.78,0.0,27.11,1.21,idm_13,1.14,259.45,0.5,top,HBEFA3/PC_G_EU4,routetop,0.0,2645.91,0,0.06,idm,59.36,9.17,0.79,73.66 -0.41,158.73,0.0,27.07,1.22,idm_13,1.15,259.37,0.6,top,HBEFA3/PC_G_EU4,routetop,0.0,2679.14,0,0.07,idm,60.47,9.21,0.79,73.65 -0.0,164.78,0.0,17.96,1.2,idm_14,1.13,244.67,0.1,top,HBEFA3/PC_G_EU4,routetop,0.0,2624.72,0,0.07,idm,55.94,18.64,0.81,70.0 -0.08,163.5,0.0,17.95,1.21,idm_14,1.13,244.66,0.2,top,HBEFA3/PC_G_EU4,routetop,0.1,2631.03,0,0.07,idm,59.48,18.64,0.81,70.0 -0.16,162.24,0.0,17.94,1.21,idm_14,1.13,244.63,0.3,top,HBEFA3/PC_G_EU4,routetop,0.0,2636.67,0,0.07,idm,59.44,18.66,0.8,69.99 -0.23,161.0,0.0,17.92,1.21,idm_14,1.14,244.6,0.4,top,HBEFA3/PC_G_EU4,routetop,0.0,2641.63,0,0.07,idm,59.4,18.68,0.79,69.98 -0.31,159.78,0.0,17.89,1.21,idm_14,1.14,244.55,0.5,top,HBEFA3/PC_G_EU4,routetop,0.0,2645.91,0,0.06,idm,59.36,18.71,0.79,69.96 -0.0,164.78,0.0,9.98,1.2,idm_15,1.13,229.84,0.1,top,HBEFA3/PC_G_EU4,routetop,0.0,2624.72,0,0.07,idm,55.94,28.18,0.81,64.07 -0.08,163.5,0.0,9.98,1.21,idm_15,1.13,229.83,0.2,top,HBEFA3/PC_G_EU4,routetop,0.1,2631.03,0,0.07,idm,59.48,28.19,0.81,64.07 -0.16,162.24,0.0,9.97,1.21,idm_15,1.13,229.8,0.3,top,HBEFA3/PC_G_EU4,routetop,0.0,2636.67,0,0.07,idm,59.44,28.21,0.8,64.06 -0.23,161.0,0.0,9.95,1.21,idm_15,1.14,229.76,0.4,top,HBEFA3/PC_G_EU4,routetop,0.0,2641.63,0,0.07,idm,59.4,28.23,0.79,64.04 -0.31,159.78,0.0,9.93,1.21,idm_15,1.14,229.7,0.5,top,HBEFA3/PC_G_EU4,routetop,0.0,2645.91,0,0.06,idm,59.36,28.26,0.79,64.02 -0.0,164.78,0.0,3.81,1.2,idm_16,1.13,214.88,0.1,top,HBEFA3/PC_G_EU4,routetop,0.0,2624.72,0,0.07,idm,55.94,37.73,0.81,56.29 -0.08,163.5,0.0,3.81,1.21,idm_16,1.13,214.87,0.2,top,HBEFA3/PC_G_EU4,routetop,0.1,2631.03,0,0.07,idm,59.48,37.74,0.81,56.28 -0.16,162.24,0.0,3.8,1.21,idm_16,1.13,214.85,0.3,top,HBEFA3/PC_G_EU4,routetop,0.0,2636.67,0,0.07,idm,59.44,37.75,0.8,56.27 -0.23,161.0,0.0,3.79,1.21,idm_16,1.14,214.81,0.4,top,HBEFA3/PC_G_EU4,routetop,0.0,2641.63,0,0.07,idm,59.4,37.77,0.79,56.24 -0.31,159.78,0.0,3.77,1.21,idm_16,1.14,214.77,0.5,top,HBEFA3/PC_G_EU4,routetop,0.0,2645.91,0,0.06,idm,59.36,37.81,0.79,56.22 -0.0,164.78,0.0,-0.15,1.2,idm_17,1.13,199.9,0.1,top,HBEFA3/PC_G_EU4,routetop,0.0,2624.72,0,0.07,idm,55.94,47.27,0.81,47.18 -0.08,163.5,0.0,-0.15,1.21,idm_17,1.13,199.88,0.2,top,HBEFA3/PC_G_EU4,routetop,0.1,2631.03,0,0.07,idm,59.48,47.28,0.81,47.17 -0.16,162.24,0.0,-0.16,1.21,idm_17,1.13,199.85,0.3,top,HBEFA3/PC_G_EU4,routetop,0.0,2636.67,0,0.07,idm,59.44,47.3,0.8,47.15 -0.23,161.0,0.0,-0.16,1.21,idm_17,1.14,199.81,0.4,top,HBEFA3/PC_G_EU4,routetop,0.0,2641.63,0,0.07,idm,59.4,47.32,0.79,47.13 -0.31,159.78,0.0,-0.17,1.21,idm_17,1.14,199.75,0.5,top,HBEFA3/PC_G_EU4,routetop,0.0,2645.91,0,0.06,idm,59.36,47.35,0.79,47.1 -0.0,164.78,0.0,-1.64,1.2,idm_18,1.13,185.04,0.1,top,HBEFA3/PC_G_EU4,routetop,0.0,2624.72,0,0.07,idm,55.94,56.82,0.81,37.35 -0.08,163.5,0.0,-1.64,1.21,idm_18,1.13,185.03,0.2,top,HBEFA3/PC_G_EU4,routetop,0.1,2631.03,0,0.07,idm,59.48,56.83,0.81,37.34 -0.16,162.24,0.0,-1.64,1.21,idm_18,1.13,185.0,0.3,top,HBEFA3/PC_G_EU4,routetop,0.0,2636.67,0,0.07,idm,59.44,56.84,0.8,37.33 -0.23,161.0,0.0,-1.64,1.21,idm_18,1.14,184.97,0.4,top,HBEFA3/PC_G_EU4,routetop,0.0,2641.63,0,0.07,idm,59.4,56.87,0.79,37.3 -0.31,159.78,0.0,-1.64,1.21,idm_18,1.14,184.93,0.5,top,HBEFA3/PC_G_EU4,routetop,0.0,2645.91,0,0.06,idm,59.36,56.9,0.79,37.27 -0.0,164.78,0.0,-0.52,1.2,idm_19,1.13,170.03,0.1,left,HBEFA3/PC_G_EU4,routeleft,0.0,2624.72,0,0.07,idm,55.94,8.86,0.81,27.42 -0.08,163.5,0.0,-0.52,1.21,idm_19,1.13,170.01,0.2,left,HBEFA3/PC_G_EU4,routeleft,0.1,2631.03,0,0.07,idm,59.48,8.87,0.81,27.41 -0.16,162.24,0.0,-0.51,1.21,idm_19,1.13,169.98,0.3,left,HBEFA3/PC_G_EU4,routeleft,0.0,2636.67,0,0.07,idm,59.44,8.89,0.8,27.39 -0.23,161.0,0.0,-0.51,1.21,idm_19,1.14,169.94,0.4,left,HBEFA3/PC_G_EU4,routeleft,0.0,2641.63,0,0.07,idm,59.4,8.91,0.79,27.37 -0.31,159.78,0.0,-0.5,1.21,idm_19,1.14,169.88,0.5,left,HBEFA3/PC_G_EU4,routeleft,0.0,2645.91,0,0.06,idm,59.36,8.94,0.79,27.34 -0.0,164.78,0.0,55.68,1.2,idm_2,1.13,64.0,0.1,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2624.72,0,0.07,idm,55.94,19.09,0.81,3.45 -0.08,163.5,0.0,55.68,1.21,idm_2,1.13,63.99,0.2,bottom,HBEFA3/PC_G_EU4,routebottom,0.1,2631.03,0,0.07,idm,59.48,19.1,0.81,3.45 -0.16,162.24,0.0,55.7,1.21,idm_2,1.13,63.97,0.3,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2636.67,0,0.07,idm,59.44,19.11,0.8,3.46 -0.23,161.0,0.0,55.72,1.21,idm_2,1.14,63.93,0.4,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2641.63,0,0.07,idm,59.4,19.14,0.79,3.47 -0.31,159.78,0.0,55.75,1.21,idm_2,1.14,63.88,0.5,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2645.91,0,0.06,idm,59.36,19.17,0.79,3.49 -0.0,164.78,0.0,3.11,1.2,idm_20,1.13,155.0,0.1,left,HBEFA3/PC_G_EU4,routeleft,0.0,2624.72,0,0.07,idm,55.94,18.41,0.81,18.17 -0.08,163.5,0.0,3.11,1.21,idm_20,1.13,154.99,0.2,left,HBEFA3/PC_G_EU4,routeleft,0.1,2631.03,0,0.07,idm,59.48,18.42,0.81,18.16 -0.16,162.24,0.0,3.12,1.21,idm_20,1.13,154.96,0.3,left,HBEFA3/PC_G_EU4,routeleft,0.0,2636.68,0,0.07,idm,59.44,18.43,0.8,18.15 -0.23,161.0,0.0,3.13,1.21,idm_20,1.14,154.93,0.4,left,HBEFA3/PC_G_EU4,routeleft,0.0,2641.7,0,0.07,idm,59.41,18.46,0.79,18.12 -0.31,159.77,0.0,3.15,1.21,idm_20,1.14,154.89,0.5,left,HBEFA3/PC_G_EU4,routeleft,0.0,2646.14,0,0.06,idm,59.37,18.49,0.79,18.1 -0.0,164.78,0.0,8.98,1.2,idm_21,1.13,140.22,0.1,left,HBEFA3/PC_G_EU4,routeleft,0.0,2624.72,0,0.07,idm,55.94,27.95,0.81,10.15 -0.1,163.3,0.0,8.99,1.21,idm_21,1.13,140.21,0.2,left,HBEFA3/PC_G_EU4,routeleft,0.1,2637.25,0,0.07,idm,60.3,27.96,0.81,10.15 -0.2,161.84,0.0,9.0,1.21,idm_21,1.14,140.18,0.3,left,HBEFA3/PC_G_EU4,routeleft,0.0,2649.89,0,0.07,idm,60.34,27.98,0.8,10.13 -0.29,160.38,0.0,9.02,1.21,idm_21,1.14,140.14,0.4,left,HBEFA3/PC_G_EU4,routeleft,0.0,2662.63,0,0.07,idm,60.37,28.01,0.79,10.11 -0.39,158.94,0.0,9.05,1.22,idm_21,1.15,140.07,0.5,left,HBEFA3/PC_G_EU4,routeleft,0.0,2675.48,0,0.07,idm,60.41,28.05,0.79,10.08 -0.0,164.78,0.0,63.57,1.2,idm_3,1.13,49.05,0.1,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2624.72,0,0.07,idm,55.94,28.64,0.81,9.48 -0.08,163.5,0.0,63.58,1.21,idm_3,1.13,49.04,0.2,bottom,HBEFA3/PC_G_EU4,routebottom,0.1,2631.03,0,0.07,idm,59.48,28.64,0.81,9.49 -0.16,162.24,0.0,63.59,1.21,idm_3,1.13,49.02,0.3,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2636.67,0,0.07,idm,59.44,28.66,0.8,9.5 -0.23,161.0,0.0,63.61,1.21,idm_3,1.14,48.99,0.4,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2641.63,0,0.07,idm,59.4,28.68,0.79,9.52 -0.31,159.78,0.0,63.63,1.21,idm_3,1.14,48.94,0.5,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2645.91,0,0.06,idm,59.36,28.71,0.79,9.54 -0.0,164.78,0.0,69.65,1.2,idm_4,1.13,34.22,0.1,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2624.72,0,0.07,idm,55.94,38.18,0.81,17.34 -0.08,163.5,0.0,69.65,1.21,idm_4,1.13,34.21,0.2,bottom,HBEFA3/PC_G_EU4,routebottom,0.1,2631.03,0,0.07,idm,59.48,38.19,0.81,17.35 -0.16,162.24,0.0,69.66,1.21,idm_4,1.13,34.19,0.3,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2636.67,0,0.07,idm,59.44,38.21,0.8,17.36 -0.23,161.0,0.0,69.68,1.21,idm_4,1.14,34.15,0.4,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2641.63,0,0.07,idm,59.4,38.23,0.79,17.38 -0.31,159.78,0.0,69.69,1.21,idm_4,1.14,34.11,0.5,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2645.91,0,0.06,idm,59.36,38.26,0.79,17.41 -0.0,164.78,0.0,73.49,1.2,idm_5,1.13,19.04,0.1,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2624.72,0,0.07,idm,55.94,47.73,0.81,26.5 -0.08,163.5,0.0,73.5,1.21,idm_5,1.13,19.02,0.2,bottom,HBEFA3/PC_G_EU4,routebottom,0.1,2631.03,0,0.07,idm,59.48,47.74,0.81,26.51 -0.16,162.24,0.0,73.5,1.21,idm_5,1.13,18.99,0.3,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2636.67,0,0.07,idm,59.44,47.75,0.8,26.53 -0.23,161.0,0.0,73.51,1.21,idm_5,1.14,18.95,0.4,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2641.63,0,0.07,idm,59.4,47.77,0.79,26.55 -0.31,159.78,0.0,73.52,1.21,idm_5,1.14,18.91,0.5,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2645.91,0,0.06,idm,59.36,47.81,0.79,26.58 -0.0,164.78,0.0,74.87,1.2,idm_6,1.13,4.39,0.1,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2624.72,0,0.07,idm,55.94,57.27,0.81,36.34 -0.08,163.5,0.0,74.87,1.21,idm_6,1.13,4.38,0.2,bottom,HBEFA3/PC_G_EU4,routebottom,0.1,2631.03,0,0.07,idm,59.48,57.28,0.81,36.35 -0.16,162.24,0.0,74.87,1.21,idm_6,1.13,4.36,0.3,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2636.67,0,0.07,idm,59.44,57.3,0.8,36.37 -0.23,161.0,0.0,74.87,1.21,idm_6,1.14,4.32,0.4,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2641.63,0,0.07,idm,59.4,57.32,0.79,36.39 -0.31,159.78,0.0,74.87,1.21,idm_6,1.14,4.28,0.5,bottom,HBEFA3/PC_G_EU4,routebottom,0.0,2645.91,0,0.06,idm,59.36,57.35,0.79,36.42 -0.0,164.78,0.0,73.62,1.2,idm_7,1.13,349.16,0.1,right,HBEFA3/PC_G_EU4,routeright,0.0,2624.72,0,0.07,idm,55.94,9.32,0.81,46.26 -0.08,163.5,0.0,73.62,1.21,idm_7,1.13,349.15,0.2,right,HBEFA3/PC_G_EU4,routeright,0.1,2631.03,0,0.07,idm,59.48,9.33,0.81,46.27 -0.16,162.24,0.0,73.61,1.21,idm_7,1.13,349.12,0.3,right,HBEFA3/PC_G_EU4,routeright,0.0,2636.67,0,0.07,idm,59.44,9.34,0.8,46.28 -0.23,161.0,0.0,73.6,1.21,idm_7,1.14,349.07,0.4,right,HBEFA3/PC_G_EU4,routeright,0.0,2641.63,0,0.07,idm,59.4,9.37,0.79,46.31 -0.31,159.78,0.0,73.6,1.21,idm_7,1.14,349.01,0.5,right,HBEFA3/PC_G_EU4,routeright,0.0,2645.91,0,0.06,idm,59.36,9.4,0.79,46.34 -0.0,164.78,0.0,69.89,1.2,idm_8,1.13,334.33,0.1,right,HBEFA3/PC_G_EU4,routeright,0.0,2624.72,0,0.07,idm,55.94,18.86,0.81,55.47 -0.08,163.5,0.0,69.88,1.21,idm_8,1.13,334.32,0.2,right,HBEFA3/PC_G_EU4,routeright,0.1,2631.03,0,0.07,idm,59.48,18.87,0.81,55.47 -0.16,162.24,0.0,69.87,1.21,idm_8,1.13,334.3,0.3,right,HBEFA3/PC_G_EU4,routeright,0.0,2636.67,0,0.07,idm,59.44,18.89,0.8,55.49 -0.23,161.0,0.0,69.86,1.21,idm_8,1.14,334.27,0.4,right,HBEFA3/PC_G_EU4,routeright,0.0,2641.63,0,0.07,idm,59.4,18.91,0.79,55.51 -0.31,159.78,0.0,69.85,1.21,idm_8,1.14,334.22,0.5,right,HBEFA3/PC_G_EU4,routeright,0.0,2645.91,0,0.06,idm,59.36,18.94,0.79,55.54 -0.0,164.78,0.0,63.91,1.2,idm_9,1.13,319.44,0.1,right,HBEFA3/PC_G_EU4,routeright,0.0,2624.72,0,0.07,idm,55.94,28.41,0.81,63.4 -0.08,163.5,0.0,63.9,1.21,idm_9,1.13,319.42,0.2,right,HBEFA3/PC_G_EU4,routeright,0.1,2631.03,0,0.07,idm,59.48,28.42,0.81,63.41 -0.16,162.24,0.0,63.89,1.21,idm_9,1.13,319.39,0.3,right,HBEFA3/PC_G_EU4,routeright,0.0,2636.67,0,0.07,idm,59.44,28.43,0.8,63.42 -0.23,161.0,0.0,63.87,1.21,idm_9,1.14,319.35,0.4,right,HBEFA3/PC_G_EU4,routeright,0.0,2641.63,0,0.07,idm,59.4,28.46,0.79,63.44 -0.31,159.78,0.0,63.85,1.21,idm_9,1.14,319.3,0.5,right,HBEFA3/PC_G_EU4,routeright,0.0,2645.91,0,0.06,idm,59.36,28.49,0.79,63.46 +time,id,x,y,speed,headway,leader_id,follower_id,leader_rel_speed,target_accel_with_noise_with_failsafe,target_accel_no_noise_no_failsafe,target_accel_with_noise_no_failsafe,target_accel_no_noise_with_failsafe,realized_accel,road_grade,edge_id,lane_number,distance,relative_position +0.0,idm_0,36.64,-1.6,0.0,4.545454545454547,idm_1,idm_21,0.0,0.0,0.0,0.0,0.0,0.0,0,bottom,0,0.0,0.0 +0.1,idm_0,36.648322761506634,-1.599834647122385,0.07984158415841586,4.545454545454546,idm_1,idm_21,0.0,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.7984158415841586,0,bottom,0,0.007984158415841587,0.007984158415841587 +0.2,idm_0,36.66480556684144,-1.599507174168713,0.15812219156578355,4.545454545454545,idm_1,idm_21,0.0,0.7906341348144134,0.7906341348144134,0.7906341348144134,0.7906341348144134,0.7828060740736771,0,bottom,0,0.023796377572419945,0.023796377572419945 +0.3,idm_0,36.68928269645688,-1.599020873580327,0.23481302481051264,4.545454545454546,idm_1,idm_21,5.551115123125783e-17,0.7745774157717638,0.7745774157717638,0.7745774157717638,0.7745774157717638,0.7669083324472908,0,bottom,0,0.04727768005347121,0.04727768005347121 +0.0,idm_1,46.477059895666216,-0.2910450274933619,0.0,4.545454545454547,idm_2,idm_0,0.0,0.0,0.0,0.0,0.0,0.0,0,bottom,0,0.0,9.545454545454547 +0.1,idm_1,46.48510950976829,-0.2889238453988948,0.07984158415841586,4.545454545454547,idm_2,idm_0,0.0,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.7984158415841586,0,bottom,0,0.007984158415840879,9.553438703870388 +0.2,idm_1,46.5010513605782,-0.2847229522800698,0.15812219156578355,4.5454545454545485,idm_2,idm_0,0.0,0.7906341348144135,0.7906341348144135,0.7906341348144135,0.7906341348144135,0.7828060740736771,0,bottom,0,0.023796377572418592,9.569250923026964 +0.3,idm_1,46.524725167351825,-0.2784845842789108,0.2348130248105127,4.5454545454545485,idm_2,idm_0,-5.551115123125783e-17,0.7745774157717642,0.7745774157717642,0.7745774157717642,0.7745774157717642,0.7669083324472914,0,bottom,0,0.04727768005347066,9.592732225508016 +0.0,idm_2,55.65270828548022,3.488595652781747,0.0,4.545454545454547,idm_3,idm_1,0.0,0.0,0.0,0.0,0.0,0.0,0,bottom,0,0.0,19.090909090909093 +0.1,idm_2,55.66000796138611,3.4925969566116453,0.07984158415841586,4.545454545454547,idm_3,idm_1,0.0,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.7984158415841586,0,bottom,0,0.007984158415840879,19.098893249324934 +0.2,idm_2,55.67446459778839,3.5005213350840068,0.15812219156578355,4.545454545454547,idm_3,idm_1,0.0,0.7906341348144135,0.7906341348144135,0.7906341348144135,0.7906341348144135,0.7828060740736771,0,bottom,0,0.02379637757242037,19.114705468481514 +0.3,idm_2,55.69593284641682,3.5122891158136613,0.23481302481051264,4.545454545454547,idm_3,idm_1,5.551115123125783e-17,0.7745774157717641,0.7745774157717641,0.7745774157717641,0.7745774157717641,0.7669083324472908,0,bottom,0,0.04727768005347244,19.138186770962566 +0.0,idm_3,63.54122270574333,9.511222705743334,0.0,4.545454545454547,idm_4,idm_2,0.0,0.0,0.0,0.0,0.0,0.0,0,bottom,0,0.0,28.63636363636364 +0.1,idm_3,63.54710894820649,9.517108948206497,0.07984158415841586,4.545454545454549,idm_4,idm_2,0.0,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.7984158415841586,0,bottom,0,0.007984158415840879,28.64434779477948 +0.2,idm_3,63.558766351653254,9.528766351653257,0.15812219156578355,4.545454545454549,idm_4,idm_2,0.0,0.7906341348144138,0.7906341348144138,0.7906341348144138,0.7906341348144138,0.7828060740736771,0,bottom,0,0.02379637757242037,28.660160013936057 +0.3,idm_3,63.57607771154312,9.546077711543122,0.2348130248105127,4.545454545454549,idm_4,idm_2,-5.551115123125783e-17,0.7745774157717643,0.7745774157717643,0.7745774157717643,0.7745774157717643,0.7669083324472914,0,bottom,0,0.04727768005347244,28.683641316417113 +0.0,idm_4,69.61055207686752,17.363529025870548,0.0,4.545454545454547,idm_5,idm_3,0.0,0.0,0.0,0.0,0.0,0.0,0,bottom,0,0.0,38.18181818181819 +0.1,idm_4,69.61489064350748,17.370633428743492,0.07984158415841586,4.545454545454547,idm_5,idm_3,0.0,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.7984158415841586,0,bottom,0,0.007984158415844433,38.18980234023403 +0.2,idm_4,69.62348295380036,17.384703336848084,0.15812219156578355,4.545454545454547,idm_5,idm_3,0.0,0.7906341348144135,0.7906341348144135,0.7906341348144135,0.7906341348144135,0.7828060740736771,0,bottom,0,0.023796377572423918,38.20561455939061 +0.3,idm_4,69.63624261991681,17.40559729011379,0.23481302481051264,4.545454545454547,idm_5,idm_3,0.0,0.7745774157717641,0.7745774157717641,0.7745774157717641,0.7745774157717641,0.7669083324472908,0,bottom,0,0.04727768005347599,38.229095861871656 +0.0,idm_5,73.45066460734851,26.51380415096358,0.0,4.545454545454547,idm_6,idm_4,0.0,0.0,0.0,0.0,0.0,0.0,0,bottom,0,0.0,47.72727272727274 +0.1,idm_5,73.45278578944298,26.521853765065657,0.07984158415841586,4.545454545454547,idm_6,idm_4,-1.3877787807814454e-17,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.8064000000000001,0.7984158415841586,0,bottom,0,0.007984158415844433,47.73525688568858 +0.2,idm_5,73.4569866825618,26.53779561587557,0.15812219156578355,4.545454545454547,idm_6,idm_4,-5.551115123125783e-17,0.7906341348144135,0.7906341348144135,0.7906341348144135,0.7906341348144135,0.7828060740736771,0,bottom,0,0.023796377572423918,47.75106910484515 +0.3,idm_5,73.46322505056297,26.561469422649196,0.23481302481051264,4.545454545454547,idm_6,idm_4,-1.1102230246251563e-16,0.7745774157717641,0.7745774157717641,0.7745774157717641,0.7745774157717641,0.7669083324472908,0,bottom,0,0.04727768005347599,47.77455040732621 diff --git a/tests/fast_tests/test_visualizers.py b/tests/fast_tests/test_visualizers.py index d2f4a20a4..47aa9d968 100644 --- a/tests/fast_tests/test_visualizers.py +++ b/tests/fast_tests/test_visualizers.py @@ -94,54 +94,54 @@ def test_time_space_diagram_figure_eight(self): dir_path = os.path.dirname(os.path.realpath(__file__)) flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/fig8.json')) - emission_data = tsd.import_data_from_trajectory( + emission_data, _, _, _ = tsd.import_data_from_trajectory( os.path.join(dir_path, 'test_files/fig8_emission.csv'), flow_params) - segs, _ = tsd.get_time_space_data(emission_data, flow_params) + segs, _ = tsd.get_time_space_data(emission_data, flow_params['network']) expected_segs = np.array([ - [[1., 60.], [2., 59.]], - [[2., 59.], [3., 57.02]], - [[3., 57.02], [4., 54.05]], - [[1., 23.8], [2., 22.81]], - [[2., 22.81], [3., 20.83]], - [[3., 20.83], [4., 17.89]], - [[1., 182.84166941], [2., 181.85166941]], - [[2., 181.85166941], [3., 179.87166941]], - [[3., 179.87166941], [4., 176.92166941]], - [[1., 154.07166941], [2., 153.08166941]], - [[2., 153.08166941], [3., 151.10166941]], - [[3., 151.10166941], [4., 148.16166941]], - [[1., 125.30166941], [2., 124.31166941]], - [[2., 124.31166941], [3., 122.34166941]], - [[3., 122.34166941], [4., 119.39166941]], - [[1., 96.54166941], [2., 95.54166941]], - [[2., 95.54166941], [3., 93.56166941]], - [[3., 93.56166941], [4., 90.59166941]], - [[1., -203.16166941], [2., -202.17166941]], - [[2., -202.17166941], [3., -200.02166941]], - [[3., -200.02166941], [4., -197.07166941]], - [[1., -174.40166941], [2., -173.40166941]], - [[2., -173.40166941], [3., -171.43166941]], - [[3., -171.43166941], [4., -168.48166941]], - [[1., -145.63166941], [2., -144.64166941]], - [[2., -144.64166941], [3., -142.66166941]], - [[3., -142.66166941], [4., -139.72166941]], - [[1., -116.86166941], [2., -115.87166941]], - [[2., -115.87166941], [3., -113.89166941]], - [[3., -113.89166941], [4., -110.95166941]], - [[1., -88.09166941], [2., -87.10166941]], - [[2., -87.10166941], [3., -85.13166941]], - [[3., -85.13166941], [4., -82.18166941]], - [[1., -59.33], [2., -58.34]], - [[2., -58.34], [3., -56.36]], - [[3., -56.36], [4., -53.42]], - [[1., -30.56], [2., -29.72]], - [[2., -29.72], [3., -27.97]], - [[3., -27.97], [4., -25.22]], - [[1., -1.79], [2., -0.8]], - [[2., -0.8], [3., 208.64166941]], - [[3., 208.64166941], [4., 205.69166941]]] + [[1., 263.16166941], [2., 262.16166941]], + [[2., 262.16166941], [3., 260.18166941]], + [[3., 260.18166941], [4., 257.21166941]], + [[1., 226.96166941], [2., 225.97166941]], + [[2., 225.97166941], [3., 223.99166941]], + [[3., 223.99166941], [4., 221.05166941]], + [[1., 386.00333882], [2., 385.01333882]], + [[2., 385.01333882], [3., 383.03333882]], + [[3., 383.03333882], [4., 380.08333882]], + [[1., 357.23333882], [2., 356.24333882]], + [[2., 356.24333882], [3., 354.26333882]], + [[3., 354.26333882], [4., 351.32333882]], + [[1., 328.46333882], [2., 327.47333882]], + [[2., 327.47333882], [3., 325.50333882]], + [[3., 325.50333882], [4., 322.55333882]], + [[1., 299.70333882], [2., 298.70333882]], + [[2., 298.70333882], [3., 296.72333882]], + [[3., 296.72333882], [4., 293.75333882]], + [[1., 0.], [2., 0.99]], + [[2., 0.99], [3., 3.14]], + [[3., 3.14], [4., 6.09]], + [[1., 28.76], [2., 29.76]], + [[2., 29.76], [3., 31.73]], + [[3., 31.73], [4., 34.68]], + [[1., 57.53], [2., 58.52]], + [[2., 58.52], [3., 60.5]], + [[3., 60.5], [4., 63.44]], + [[1., 86.3], [2., 87.29]], + [[2., 87.29], [3., 89.27]], + [[3., 89.27], [4., 92.21]], + [[1., 115.07], [2., 116.06]], + [[2., 116.06], [3., 118.03]], + [[3., 118.03], [4., 120.98]], + [[1., 143.83166941], [2., 144.82166941]], + [[2., 144.82166941], [3., 146.80166941]], + [[3., 146.80166941], [4., 149.74166941]], + [[1., 172.60166941], [2., 173.44166941]], + [[2., 173.44166941], [3., 175.19166941]], + [[3., 175.19166941], [4., 177.94166941]], + [[1., 201.37166941], [2., 202.36166941]], + [[2., 202.36166941], [3., 411.80333882]], + [[3., 411.80333882], [4., 408.85333882]]] ) np.testing.assert_array_almost_equal(segs, expected_segs) @@ -150,16 +150,16 @@ def test_time_space_diagram_merge(self): dir_path = os.path.dirname(os.path.realpath(__file__)) flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/merge.json')) - emission_data = tsd.import_data_from_trajectory( + emission_data, _, _, _ = tsd.import_data_from_trajectory( os.path.join(dir_path, 'test_files/merge_emission.csv'), flow_params) - segs, _ = tsd.get_time_space_data(emission_data, flow_params) + segs, _ = tsd.get_time_space_data(emission_data, flow_params['network']) expected_segs = np.array([ - [[2.0000e-01, 7.2949e+02], [4.0000e-01, 7.2953e+02]], - [[4.0000e-01, 7.2953e+02], [6.0000e-01, 7.2961e+02]], - [[6.0000e-01, 7.2961e+02], [8.0000e-01, 7.2973e+02]], - [[8.0000e-01, 7.2973e+02], [1.0000e+00, 7.2988e+02]]] + [[2.0000e-01, 7.2463e+02], [4.0000e-01, 7.2467e+02]], + [[4.0000e-01, 7.2467e+02], [6.0000e-01, 7.2475e+02]], + [[6.0000e-01, 7.2475e+02], [8.0000e-01, 7.2487e+02]], + [[8.0000e-01, 7.2487e+02], [1.0000e+00, 7.2502e+02]]] ) np.testing.assert_array_almost_equal(segs, expected_segs) @@ -168,37 +168,37 @@ def test_time_space_diagram_I210(self): dir_path = os.path.dirname(os.path.realpath(__file__)) module = __import__("examples.exp_configs.non_rl", fromlist=["i210_subnetwork"]) flow_params = getattr(module, "i210_subnetwork").flow_params - emission_data = tsd.import_data_from_trajectory( + emission_data, _, _, _ = tsd.import_data_from_trajectory( os.path.join(dir_path, 'test_files/i210_emission.csv'), flow_params) - segs, _ = tsd.get_time_space_data(emission_data, flow_params) + segs, _ = tsd.get_time_space_data(emission_data, flow_params['network']) expected_segs = { 1: np.array([ - [[0.8, 5.1], [1.6, 23.37]], - [[1.6, 23.37], [2.4, 42.02]], - [[2.4, 42.02], [3.2, 61.21]], - [[3.2, 61.21], [4., 18.87]], - [[4., 18.87], [4.8, 39.93]], - [[2.4, 5.1], [3.2, 22.97]], - [[3.2, 22.97], [4., 40.73]]] + [[-719.2, 3.77], [-718.4, 22.04]], + [[-718.4, 22.04], [-717.6, 40.69]], + [[-717.6, 40.69], [-716.8, 59.88]], + [[-716.8, 59.88], [-716., 17.54]], + [[-716., 17.54], [-715.2, 38.6]], + [[-717.6, 3.77], [-716.8, 21.64]], + [[-716.8, 21.64], [-716., 39.4]]] ), 2: np.array([ - [[2.4, 5.1], [3.2, 23.98]], - [[3.2, 23.98], [4., 43.18]]] + [[-717.6, 3.77], [-716.8, 22.65]], + [[-716.8, 22.65], [-716., 41.85]]] ), 3: np.array([ - [[0.8, 5.1], [1.6, 23.72]], - [[1.6, 23.72], [2.4, 43.06]], - [[2.4, 43.06], [3.2, 1.33]], - [[3.2, 1.33], [4., 21.65]], - [[4., 21.65], [4.8, 43.46]], - [[2.4, 5.1], [3.2, 23.74]], - [[3.2, 23.74], [4., 42.38]]] + [[-719.2, 3.77], [-718.4, 22.39]], + [[-718.4, 22.39], [-717.6, 41.73]], + [[-717.6, 41.73], [-716.8, 0.]], + [[-716.8, 0.], [-716., 20.32]], + [[-716., 20.32], [-715.2, 42.13]], + [[-717.6, 3.77], [-716.8, 22.41]], + [[-716.8, 22.41], [-716., 41.05]]] ), 4: np.array([ - [[2.4, 5.1], [3.2, 23.6]], - [[3.2, 23.6], [4., 42.46]]] + [[-717.6, 3.77], [-716.8, 22.27]], + [[-716.8, 22.27], [-716., 41.13]]] )} for lane, expected_seg in expected_segs.items(): @@ -208,106 +208,30 @@ def test_time_space_diagram_ring_road(self): dir_path = os.path.dirname(os.path.realpath(__file__)) flow_params = tsd.get_flow_params( os.path.join(dir_path, 'test_files/ring_230.json')) - emission_data = tsd.import_data_from_trajectory( + emission_data, _, _, _ = tsd.import_data_from_trajectory( os.path.join(dir_path, 'test_files/ring_230_emission.csv'), flow_params) - segs, _ = tsd.get_time_space_data(emission_data, flow_params) + segs, _ = tsd.get_time_space_data(emission_data, flow_params['network']) expected_segs = np.array([ - [[1.0000e-01, 0.0000e+00], [2.0000e-01, 1.0000e-02]], - [[2.0000e-01, 1.0000e-02], [3.0000e-01, 2.0000e-02]], - [[3.0000e-01, 2.0000e-02], [4.0000e-01, 5.0000e-02]], - [[4.0000e-01, 5.0000e-02], [5.0000e-01, 8.0000e-02]], - [[5.0000e-01, 8.0000e-02], [6.0000e-01, 1.2000e-01]], - [[1.0000e-01, 9.5500e+00], [2.0000e-01, 9.5500e+00]], - [[2.0000e-01, 9.5500e+00], [3.0000e-01, 9.5700e+00]], - [[3.0000e-01, 9.5700e+00], [4.0000e-01, 9.5900e+00]], - [[4.0000e-01, 9.5900e+00], [5.0000e-01, 9.6200e+00]], - [[5.0000e-01, 9.6200e+00], [6.0000e-01, 9.6600e+00]], - [[1.0000e-01, 9.5550e+01], [2.0000e-01, 9.5560e+01]], - [[2.0000e-01, 9.5560e+01], [3.0000e-01, 9.5580e+01]], - [[3.0000e-01, 9.5580e+01], [4.0000e-01, 9.5600e+01]], - [[4.0000e-01, 9.5600e+01], [5.0000e-01, 9.5630e+01]], - [[5.0000e-01, 9.5630e+01], [6.0000e-01, 9.5670e+01]], - [[1.0000e-01, 1.0510e+02], [2.0000e-01, 1.0511e+02]], - [[2.0000e-01, 1.0511e+02], [3.0000e-01, 1.0512e+02]], - [[3.0000e-01, 1.0512e+02], [4.0000e-01, 1.0515e+02]], - [[4.0000e-01, 1.0515e+02], [5.0000e-01, 1.0518e+02]], - [[5.0000e-01, 1.0518e+02], [6.0000e-01, 1.0522e+02]], - [[1.0000e-01, 1.1465e+02], [2.0000e-01, 1.1465e+02]], - [[2.0000e-01, 1.1465e+02], [3.0000e-01, 1.1467e+02]], - [[3.0000e-01, 1.1467e+02], [4.0000e-01, 1.1469e+02]], - [[4.0000e-01, 1.1469e+02], [5.0000e-01, 1.1472e+02]], - [[5.0000e-01, 1.1472e+02], [6.0000e-01, 1.1476e+02]], - [[1.0000e-01, 1.2429e+02], [2.0000e-01, 1.2430e+02]], - [[2.0000e-01, 1.2430e+02], [3.0000e-01, 1.2431e+02]], - [[3.0000e-01, 1.2431e+02], [4.0000e-01, 1.2434e+02]], - [[4.0000e-01, 1.2434e+02], [5.0000e-01, 1.2437e+02]], - [[5.0000e-01, 1.2437e+02], [6.0000e-01, 1.2441e+02]], - [[1.0000e-01, 1.3384e+02], [2.0000e-01, 1.3384e+02]], - [[2.0000e-01, 1.3384e+02], [3.0000e-01, 1.3386e+02]], - [[3.0000e-01, 1.3386e+02], [4.0000e-01, 1.3388e+02]], - [[4.0000e-01, 1.3388e+02], [5.0000e-01, 1.3391e+02]], - [[1.0000e-01, 1.4338e+02], [2.0000e-01, 1.4339e+02]], - [[2.0000e-01, 1.4339e+02], [3.0000e-01, 1.4341e+02]], - [[3.0000e-01, 1.4341e+02], [4.0000e-01, 1.4343e+02]], - [[4.0000e-01, 1.4343e+02], [5.0000e-01, 1.4346e+02]], - [[1.0000e-01, 1.5293e+02], [2.0000e-01, 1.5294e+02]], - [[2.0000e-01, 1.5294e+02], [3.0000e-01, 1.5295e+02]], - [[3.0000e-01, 1.5295e+02], [4.0000e-01, 1.5297e+02]], - [[4.0000e-01, 1.5297e+02], [5.0000e-01, 1.5301e+02]], - [[1.0000e-01, 1.6247e+02], [2.0000e-01, 1.6248e+02]], - [[2.0000e-01, 1.6248e+02], [3.0000e-01, 1.6250e+02]], - [[3.0000e-01, 1.6250e+02], [4.0000e-01, 1.6252e+02]], - [[4.0000e-01, 1.6252e+02], [5.0000e-01, 1.6255e+02]], - [[1.0000e-01, 1.7202e+02], [2.0000e-01, 1.7203e+02]], - [[2.0000e-01, 1.7203e+02], [3.0000e-01, 1.7204e+02]], - [[3.0000e-01, 1.7204e+02], [4.0000e-01, 1.7207e+02]], - [[4.0000e-01, 1.7207e+02], [5.0000e-01, 1.7210e+02]], - [[1.0000e-01, 1.8166e+02], [2.0000e-01, 1.8167e+02]], - [[2.0000e-01, 1.8167e+02], [3.0000e-01, 1.8169e+02]], - [[3.0000e-01, 1.8169e+02], [4.0000e-01, 1.8171e+02]], - [[4.0000e-01, 1.8171e+02], [5.0000e-01, 1.8174e+02]], - [[1.0000e-01, 1.9090e+01], [2.0000e-01, 1.9100e+01]], - [[2.0000e-01, 1.9100e+01], [3.0000e-01, 1.9110e+01]], - [[3.0000e-01, 1.9110e+01], [4.0000e-01, 1.9140e+01]], - [[4.0000e-01, 1.9140e+01], [5.0000e-01, 1.9170e+01]], - [[1.0000e-01, 1.9121e+02], [2.0000e-01, 1.9122e+02]], - [[2.0000e-01, 1.9122e+02], [3.0000e-01, 1.9123e+02]], - [[3.0000e-01, 1.9123e+02], [4.0000e-01, 1.9126e+02]], - [[4.0000e-01, 1.9126e+02], [5.0000e-01, 1.9129e+02]], - [[1.0000e-01, 2.0075e+02], [2.0000e-01, 2.0076e+02]], - [[2.0000e-01, 2.0076e+02], [3.0000e-01, 2.0078e+02]], - [[3.0000e-01, 2.0078e+02], [4.0000e-01, 2.0081e+02]], - [[4.0000e-01, 2.0081e+02], [5.0000e-01, 2.0085e+02]], - [[1.0000e-01, 2.8640e+01], [2.0000e-01, 2.8640e+01]], - [[2.0000e-01, 2.8640e+01], [3.0000e-01, 2.8660e+01]], - [[3.0000e-01, 2.8660e+01], [4.0000e-01, 2.8680e+01]], - [[4.0000e-01, 2.8680e+01], [5.0000e-01, 2.8710e+01]], - [[1.0000e-01, 3.8180e+01], [2.0000e-01, 3.8190e+01]], - [[2.0000e-01, 3.8190e+01], [3.0000e-01, 3.8210e+01]], - [[3.0000e-01, 3.8210e+01], [4.0000e-01, 3.8230e+01]], - [[4.0000e-01, 3.8230e+01], [5.0000e-01, 3.8260e+01]], - [[1.0000e-01, 4.7730e+01], [2.0000e-01, 4.7740e+01]], - [[2.0000e-01, 4.7740e+01], [3.0000e-01, 4.7750e+01]], - [[3.0000e-01, 4.7750e+01], [4.0000e-01, 4.7770e+01]], - [[4.0000e-01, 4.7770e+01], [5.0000e-01, 4.7810e+01]], - [[1.0000e-01, 5.7270e+01], [2.0000e-01, 5.7280e+01]], - [[2.0000e-01, 5.7280e+01], [3.0000e-01, 5.7300e+01]], - [[3.0000e-01, 5.7300e+01], [4.0000e-01, 5.7320e+01]], - [[4.0000e-01, 5.7320e+01], [5.0000e-01, 5.7350e+01]], - [[1.0000e-01, 6.6920e+01], [2.0000e-01, 6.6930e+01]], - [[2.0000e-01, 6.6930e+01], [3.0000e-01, 6.6940e+01]], - [[3.0000e-01, 6.6940e+01], [4.0000e-01, 6.6970e+01]], - [[4.0000e-01, 6.6970e+01], [5.0000e-01, 6.7000e+01]], - [[1.0000e-01, 7.6460e+01], [2.0000e-01, 7.6470e+01]], - [[2.0000e-01, 7.6470e+01], [3.0000e-01, 7.6490e+01]], - [[3.0000e-01, 7.6490e+01], [4.0000e-01, 7.6510e+01]], - [[4.0000e-01, 7.6510e+01], [5.0000e-01, 7.6540e+01]], - [[1.0000e-01, 8.6010e+01], [2.0000e-01, 8.6020e+01]], - [[2.0000e-01, 8.6020e+01], [3.0000e-01, 8.6030e+01]], - [[3.0000e-01, 8.6030e+01], [4.0000e-01, 8.6060e+01]], - [[4.0000e-01, 8.6060e+01], [5.0000e-01, 8.6090e+01]]] + [[-7.50000000e+01, 0.00000000e+00], [-7.49000000e+01, 7.98415842e-03]], + [[-7.49000000e+01, 7.98415842e-03], [-7.48000000e+01, 2.37963776e-02]], + [[-7.48000000e+01, 2.37963776e-02], [-7.47000000e+01, 4.72776801e-02]], + [[-7.50000000e+01, 9.54545455e+00], [-7.49000000e+01, 9.55343870e+00]], + [[-7.49000000e+01, 9.55343870e+00], [-7.48000000e+01, 9.56925092e+00]], + [[-7.48000000e+01, 9.56925092e+00], [-7.47000000e+01, 9.59273223e+00]], + [[-7.50000000e+01, 1.90909091e+01], [-7.49000000e+01, 1.90988932e+01]], + [[-7.49000000e+01, 1.90988932e+01], [-7.48000000e+01, 1.91147055e+01]], + [[-7.48000000e+01, 1.91147055e+01], [-7.47000000e+01, 1.91381868e+01]], + [[-7.50000000e+01, 2.86363636e+01], [-7.49000000e+01, 2.86443478e+01]], + [[-7.49000000e+01, 2.86443478e+01], [-7.48000000e+01, 2.86601600e+01]], + [[-7.48000000e+01, 2.86601600e+01], [-7.47000000e+01, 2.86836413e+01]], + [[-7.50000000e+01, 3.81818182e+01], [-7.49000000e+01, 3.81898023e+01]], + [[-7.49000000e+01, 3.81898023e+01], [-7.48000000e+01, 3.82056146e+01]], + [[-7.48000000e+01, 3.82056146e+01], [-7.47000000e+01, 3.82290959e+01]], + [[-7.50000000e+01, 4.77272727e+01], [-7.49000000e+01, 4.77352569e+01]], + [[-7.49000000e+01, 4.77352569e+01], [-7.48000000e+01, 4.77510691e+01]], + [[-7.48000000e+01, 4.77510691e+01], [-7.47000000e+01, 4.77745504e+01]]] ) np.testing.assert_array_almost_equal(segs, expected_segs) From 6c6880096c874779efec8932b649239cdeb5ba1e Mon Sep 17 00:00:00 2001 From: akashvelu Date: Mon, 13 Jul 2020 10:25:32 -0700 Subject: [PATCH 334/335] Move imitation to algorithms folder --- examples/train.py | 6 +++--- flow/algorithms/__init__.py | 0 .../algorithms/imitation_learning/__init__.py | 0 .../imitation_learning/custom_ppo.py | 6 ++++-- .../custom_ppo_tf_policy.py | 0 .../imitation_learning/custom_trainable.py | 2 +- .../imitating_controller.py | 5 +---- .../imitation_learning/imitating_network.py | 4 ++-- .../imitation_learning/keras_utils.py | 0 .../imitation_learning/ppo_model.py | 2 +- .../imitation_learning/replay_buffer.py | 0 .../imitation_learning/run.py | 3 +-- .../train_with_imitation.py | 5 +++-- .../imitation_learning/trainer.py | 19 +++++------------- .../imitation_learning/utils.py | 4 ++-- .../imitation_learning/utils_tensorflow.py | 0 .../model_files/follower_stopper1.h5 | Bin 35456 -> 0 bytes .../model_files/ppo_model_i210.h5 | Bin 53208 -> 0 bytes 18 files changed, 23 insertions(+), 33 deletions(-) create mode 100644 flow/algorithms/__init__.py create mode 100644 flow/algorithms/imitation_learning/__init__.py rename flow/{controllers => algorithms}/imitation_learning/custom_ppo.py (98%) rename flow/{controllers => algorithms}/imitation_learning/custom_ppo_tf_policy.py (100%) rename flow/{controllers => algorithms}/imitation_learning/custom_trainable.py (96%) rename flow/{controllers => algorithms}/imitation_learning/imitating_controller.py (94%) rename flow/{controllers => algorithms}/imitation_learning/imitating_network.py (97%) rename flow/{controllers => algorithms}/imitation_learning/keras_utils.py (100%) rename flow/{controllers => algorithms}/imitation_learning/ppo_model.py (98%) rename flow/{controllers => algorithms}/imitation_learning/replay_buffer.py (100%) rename flow/{controllers => algorithms}/imitation_learning/run.py (98%) rename flow/{controllers => algorithms}/imitation_learning/train_with_imitation.py (97%) rename flow/{controllers => algorithms}/imitation_learning/trainer.py (96%) rename flow/{controllers => algorithms}/imitation_learning/utils.py (98%) rename flow/{controllers => algorithms}/imitation_learning/utils_tensorflow.py (100%) delete mode 100644 flow/controllers/imitation_learning/model_files/follower_stopper1.h5 delete mode 100644 flow/controllers/imitation_learning/model_files/ppo_model_i210.h5 diff --git a/examples/train.py b/examples/train.py index 9445e81e0..20b4b373a 100644 --- a/examples/train.py +++ b/examples/train.py @@ -179,7 +179,7 @@ def setup_exps_rllib(flow_params, alg_run = flags.algorithm.upper() if alg_run == "PPO": - from flow.controllers.imitation_learning.custom_ppo import CustomPPOTrainer + from flow.algorithms.imitation_learning.custom_ppo import CustomPPOTrainer from ray.rllib.agents.ppo import DEFAULT_CONFIG config = deepcopy(DEFAULT_CONFIG) @@ -202,8 +202,8 @@ def setup_exps_rllib(flow_params, config["lr"] = tune.grid_search([5e-4, 5e-5]) if flags.load_weights_path: - from flow.controllers.imitation_learning.ppo_model import PPONetwork - from flow.controllers.imitation_learning.custom_trainable import Imitation_PPO_Trainable + from flow.algorithms.imitation_learning.ppo_model import PPONetwork + from flow.algorithms.imitation_learning.custom_trainable import Imitation_PPO_Trainable from ray.rllib.models import ModelCatalog # Register custom model diff --git a/flow/algorithms/__init__.py b/flow/algorithms/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/flow/algorithms/imitation_learning/__init__.py b/flow/algorithms/imitation_learning/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/flow/controllers/imitation_learning/custom_ppo.py b/flow/algorithms/imitation_learning/custom_ppo.py similarity index 98% rename from flow/controllers/imitation_learning/custom_ppo.py rename to flow/algorithms/imitation_learning/custom_ppo.py index ed6fa032b..c7e81e13c 100644 --- a/flow/controllers/imitation_learning/custom_ppo.py +++ b/flow/algorithms/imitation_learning/custom_ppo.py @@ -3,9 +3,12 @@ """ import logging +import os from ray.rllib.agents import with_common_config -from flow.controllers.imitation_learning.custom_ppo_tf_policy import CustomPPOTFPolicy +from ray.tune.trial import ExportFormat + +from flow.algorithms.imitation_learning.custom_ppo_tf_policy import CustomPPOTFPolicy from ray.rllib.agents.trainer_template import build_trainer from ray.rllib.optimizers import SyncSamplesOptimizer, LocalMultiGPUOptimizer from ray.rllib.utils import try_import_tf @@ -224,5 +227,4 @@ def import_model(self, import_file, policy_id=DEFAULT_POLICY_ID): return self.import_policy_model_from_h5(import_file, policy_id=policy_id) from ray.rllib.agents import Trainer -print('Overriding import model') setattr(Trainer, 'import_model', import_model) \ No newline at end of file diff --git a/flow/controllers/imitation_learning/custom_ppo_tf_policy.py b/flow/algorithms/imitation_learning/custom_ppo_tf_policy.py similarity index 100% rename from flow/controllers/imitation_learning/custom_ppo_tf_policy.py rename to flow/algorithms/imitation_learning/custom_ppo_tf_policy.py diff --git a/flow/controllers/imitation_learning/custom_trainable.py b/flow/algorithms/imitation_learning/custom_trainable.py similarity index 96% rename from flow/controllers/imitation_learning/custom_trainable.py rename to flow/algorithms/imitation_learning/custom_trainable.py index 66785d905..993113607 100644 --- a/flow/controllers/imitation_learning/custom_trainable.py +++ b/flow/algorithms/imitation_learning/custom_trainable.py @@ -3,7 +3,7 @@ from ray.rllib.agents.agent import get_agent_class except ImportError: from ray.rllib.agents.registry import get_agent_class -import flow.controllers.imitation_learning.custom_ppo as custom_ppo +import flow.algorithms.imitation_learning.custom_ppo as custom_ppo class Imitation_PPO_Trainable(tune.Trainable): """ diff --git a/flow/controllers/imitation_learning/imitating_controller.py b/flow/algorithms/imitation_learning/imitating_controller.py similarity index 94% rename from flow/controllers/imitation_learning/imitating_controller.py rename to flow/algorithms/imitation_learning/imitating_controller.py index 4fdd4ebd7..115930744 100644 --- a/flow/controllers/imitation_learning/imitating_controller.py +++ b/flow/algorithms/imitation_learning/imitating_controller.py @@ -1,8 +1,5 @@ -import numpy as np -import tensorflow as tf -import tensorflow_probability as tfp from flow.controllers.base_controller import BaseController -from flow.controllers.imitation_learning.replay_buffer import ReplayBuffer + class ImitatingController(BaseController): """ diff --git a/flow/controllers/imitation_learning/imitating_network.py b/flow/algorithms/imitation_learning/imitating_network.py similarity index 97% rename from flow/controllers/imitation_learning/imitating_network.py rename to flow/algorithms/imitation_learning/imitating_network.py index a95222855..6e9e9c3c7 100644 --- a/flow/controllers/imitation_learning/imitating_network.py +++ b/flow/algorithms/imitation_learning/imitating_network.py @@ -1,7 +1,7 @@ import numpy as np import tensorflow as tf -from flow.controllers.imitation_learning.keras_utils import build_neural_net_deterministic, build_neural_net_stochastic, get_loss, negative_log_likelihood_loss -from flow.controllers.imitation_learning.replay_buffer import ReplayBuffer +from flow.algorithms.imitation_learning.keras_utils import build_neural_net_deterministic, build_neural_net_stochastic, get_loss, negative_log_likelihood_loss +from flow.algorithms.imitation_learning.replay_buffer import ReplayBuffer class ImitatingNetwork(): diff --git a/flow/controllers/imitation_learning/keras_utils.py b/flow/algorithms/imitation_learning/keras_utils.py similarity index 100% rename from flow/controllers/imitation_learning/keras_utils.py rename to flow/algorithms/imitation_learning/keras_utils.py diff --git a/flow/controllers/imitation_learning/ppo_model.py b/flow/algorithms/imitation_learning/ppo_model.py similarity index 98% rename from flow/controllers/imitation_learning/ppo_model.py rename to flow/algorithms/imitation_learning/ppo_model.py index 85a7c841e..47ae61f77 100644 --- a/flow/controllers/imitation_learning/ppo_model.py +++ b/flow/algorithms/imitation_learning/ppo_model.py @@ -1,6 +1,6 @@ from ray.rllib.models.tf.tf_modelv2 import TFModelV2 -from flow.controllers.imitation_learning.keras_utils import * +from flow.algorithms.imitation_learning.keras_utils import * class PPONetwork(TFModelV2): diff --git a/flow/controllers/imitation_learning/replay_buffer.py b/flow/algorithms/imitation_learning/replay_buffer.py similarity index 100% rename from flow/controllers/imitation_learning/replay_buffer.py rename to flow/algorithms/imitation_learning/replay_buffer.py diff --git a/flow/controllers/imitation_learning/run.py b/flow/algorithms/imitation_learning/run.py similarity index 98% rename from flow/controllers/imitation_learning/run.py rename to flow/algorithms/imitation_learning/run.py index 41ceb82a6..ed8717a5a 100644 --- a/flow/controllers/imitation_learning/run.py +++ b/flow/algorithms/imitation_learning/run.py @@ -5,7 +5,7 @@ Usage: python run.py EXP_CONFIG """ -from flow.controllers.imitation_learning.trainer import Trainer +from flow.algorithms.imitation_learning.trainer import Trainer class Runner(object): @@ -121,7 +121,6 @@ def main(): if params['num_eval_episodes'] > 0: runner.evaluate() - print('done') if __name__ == "__main__": main() diff --git a/flow/controllers/imitation_learning/train_with_imitation.py b/flow/algorithms/imitation_learning/train_with_imitation.py similarity index 97% rename from flow/controllers/imitation_learning/train_with_imitation.py rename to flow/algorithms/imitation_learning/train_with_imitation.py index 057c62835..2aae7c2e8 100644 --- a/flow/controllers/imitation_learning/train_with_imitation.py +++ b/flow/algorithms/imitation_learning/train_with_imitation.py @@ -1,4 +1,4 @@ -from flow.controllers.imitation_learning.run import * +from flow.algorithms.imitation_learning.run import * from examples.train import * def parse_args(args): @@ -17,7 +17,7 @@ def parse_args(args): parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description="Parse argument used when running a Flow simulation.", - epilog="python train.py EXP_CONFIG") + epilog="python train.py EXP_CONFIG EXP_TITLE") # required input parameters parser.add_argument( @@ -89,6 +89,7 @@ def parse_args(args): parser.add_argument('--variance_regularizer', type=float, default=0.5, help='Regularization hyperparameter to penalize variance in imitation learning negative log-likelihood loss, for stochastic policies.') parser.add_argument('--stochastic', type=bool, default=True, help='If true, learn a stochastic policy (MV Gaussian). Must be true to continue with PPO training.') parser.add_argument('--replay_buffer_size', type=int, default=1000000, help='Max size of replay buffer') + parser.add_argument('--lr', type=float, default=0.001, help='Learning rate for imitation learning and value function learning') # loading and saving params: parser.add_argument('--load_imitation_model', type=bool, default=False, help='Whether to load an existing imitation neural network.') diff --git a/flow/controllers/imitation_learning/trainer.py b/flow/algorithms/imitation_learning/trainer.py similarity index 96% rename from flow/controllers/imitation_learning/trainer.py rename to flow/algorithms/imitation_learning/trainer.py index c027368ae..203eee0b1 100644 --- a/flow/controllers/imitation_learning/trainer.py +++ b/flow/algorithms/imitation_learning/trainer.py @@ -1,17 +1,9 @@ -import time -from collections import OrderedDict -import pickle -import numpy as np -import gym -import os -import tensorflow as tf -from utils import * +from flow.algorithms.imitation_learning.utils import sample_n_trajectories, sample_trajectories from flow.utils.registry import make_create_env -from flow.controllers.imitation_learning.imitating_controller import ImitatingController -from flow.controllers.imitation_learning.imitating_network import ImitatingNetwork -from flow.controllers.imitation_learning.utils_tensorflow import * -from flow.controllers.imitation_learning.keras_utils import * -from flow.controllers.car_following_models import IDMController +from flow.algorithms.imitation_learning.imitating_controller import ImitatingController +from flow.algorithms.imitation_learning.imitating_network import ImitatingNetwork +from flow.algorithms.imitation_learning.utils_tensorflow import * +from flow.algorithms.imitation_learning.keras_utils import * from flow.controllers.velocity_controllers import FollowerStopper from flow.core.params import SumoCarFollowingParams @@ -255,7 +247,6 @@ def learn_value_function(self, num_samples, num_iterations, num_grad_steps): # iterate over data multiple times (labels change every iteration) for i in range(num_iterations): - print("Iteration: ", i) # form labels next_state_value_preds = vf_net.predict(next_observations).flatten() next_state_value_preds[np.isnan(next_state_value_preds)] = 0 diff --git a/flow/controllers/imitation_learning/utils.py b/flow/algorithms/imitation_learning/utils.py similarity index 98% rename from flow/controllers/imitation_learning/utils.py rename to flow/algorithms/imitation_learning/utils.py index 36f7844e9..cb75ccc19 100644 --- a/flow/controllers/imitation_learning/utils.py +++ b/flow/algorithms/imitation_learning/utils.py @@ -3,8 +3,8 @@ import numpy as np import math from flow.core.params import SumoCarFollowingParams -from flow.controllers.imitation_learning.imitating_controller import ImitatingController -from flow.controllers.imitation_learning.imitating_network import ImitatingNetwork +from flow.algorithms.imitation_learning.imitating_controller import ImitatingController +from flow.algorithms.imitation_learning.imitating_network import ImitatingNetwork from flow.controllers.car_following_models import IDMController from flow.controllers.velocity_controllers import FollowerStopper from flow.core.rewards import * diff --git a/flow/controllers/imitation_learning/utils_tensorflow.py b/flow/algorithms/imitation_learning/utils_tensorflow.py similarity index 100% rename from flow/controllers/imitation_learning/utils_tensorflow.py rename to flow/algorithms/imitation_learning/utils_tensorflow.py diff --git a/flow/controllers/imitation_learning/model_files/follower_stopper1.h5 b/flow/controllers/imitation_learning/model_files/follower_stopper1.h5 deleted file mode 100644 index 45b46d582cb089fed0c240f42bbf47cf14e28397..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 35456 zcmeHP30xD$_YVk46%a4P8!1IZg>Y2}$?oRh&?@4MT8;n#QbV95D0otp3iZOHcvPy^ zs#vR{SOiISQ?=ED6*fZV|bCidZUhJXeX$zT)lSWMbUeO8ONE|mDcg8 zifp+iVVWXaqs&y(^*!hbn4QG@2Xi^l-_Un@t1$&o#kM>02soYtJ6@7Jd8$I4VsRXH zrH*V#fh^BxkBqm1)eBmgGuo*C-dWMbUnZiC}v3c@i8`=VfE0E99 zzK-e*7~v9NFDq#+2ypkcH(dBes7D9~r8+A|o8Zq&QK&Tvx{{@+oDcx_7>|eS0_tau zqJ5pnY-0M859f2FEezP#jKVVgl zO%pG`H^4V&h&C0OSr91jcv|{8WM-$TGN*%n(4F&G`$1RD^hj38H5wK<4}rVKNX9OE z40iVbriiYZ>A|4Nv}I+gl*xGsYK3-sX7*HuiA7yQw@qo}g2=LC7!5s)QH>GIb>fVl zB-bXVCxCvMba@st*%Wn-N;TL$Fpi!jMVrUgdC<#{YXbwA?wTxlwuWI$m8&!gx&jG_ zrk_pMDb-1tIqH-IHN7ZS$~d~VQBwADo60F;RF@+n8vSt>ZMIygmM5tg5wzJkj08=I znWI)}nVJ0qXa;$*Ryj?sr5z7lU{IrJUh{1JOQ8O*=;=-B&4BF;08ik*7-Pd|Sryn$e^3=n&4Nm)R>)ZkX*CQj zY#3NwV0ZA7+7L$@M_@7i;-byS=p=eh#qxKNXdRxY(totBU-*D}6~@5|@=^T8)*Xi(_T zpkV(%dT}X=WO*KFXPkvXE2n{C*h50u&(K%>gG2m-0{l^H0NYBpF=P1$gbWROl^@8A zq{z}JRjkYNR}A3~rEBCFnzU>=%OK2`c4Og-V&8 znVCWt>2|trhC-XIOa{)JE`kH3A|;`*9=aWkJR?h`fTzg>=6FR958If*&^Z&<&W6_k zku~T7pb9IsB49}}i0=w4 z^M*Z#MZ5r0Hs)Rbd_EY&ZZILkSsTcw^32~S;qY6FtqTEeJ{9HF>w6jFfc9;Sadopx zz>YNDJ#^^{hC=g%z;diRs7H3B7A9;FUjr<=1p+?E&MOWBWyDtn%XK19kJeeoBxmUl z;wy*w(OS;>0oRN77v-h7ayQx+=Dv;_4;)Y$6VsjCacIK{UQh>K&~Gpyi}KW_D{b1o zn9Cd2x`Ppl;Xsz|>+oLzjOe<+ZdTGt5a8M&q;0wG#sILH4Y~^L*C^xWKU_llhih*d z@9Q|R_jS1Yhq!AA`h_Z6}f!wix4@ykBSOD!6xYKJYt_`m(SQ}iR4bPip zvatmf=)Q>~>}pzeWBYei^{YTepjR%;q$`epa_9fQ~@LBB(z@fGM<=Pi_ z2tXawAA|?mv8aspEZBWKUI=RVofO15%+n9xlLhq_jmOpF-#KinV?*aoWCsj>Plo{E zo!I~Be66i@`!`Fmv-&T|y=%*;)N)U1`uNwW%j5$5(kJgyx7D+$p8gZ5;Ukw*{7XLg z-1>RcM>igkdCTpobEmqH9k;K+6?@;qqIca8R`_lu1^dpD-xb`Tew=%QJmlhr-BuT3 z?UHv2Ucrwl)^Eqyt8 z|JM7$_>g$q*?l`zkk~@tpT7v&)f))>5_zv}B zfP(6HbqbXeT|dr$`7G5F@GcGUaB#^4{Xn@43k=VR|p8i2ptK|_i3pAx#R zUex)C1hL9C0e?gu)@OuVCu{ObsjtsD;?)#InAee8 zWOQg(VqAQ%A=Y=EesSeetZkhS_HEjF?43=^sk;)5;r77g*x<-T)cT$Uc>N+LvTI%> zaaH&gl{RH8#sBacwr#(c{#M(ESjj|htYE*rL1OnU>FQ^1_<7C{YW5dAyk8qn{FlnJ zjU9FxXUOxPXsEk0gX&#$hn&~W1D`(p09i@AO+55YH&8t~ zQ@hF)*yc?oslyZVv6>4r4Zp6Rjla7@juRaM zsOn#);o0RneayZB!{vH!+*S1fMd~(S>xuWtHXr6-mjvs{jM^TCy!yNNgf%~lH1X^7 z8F#vosu{17{rMNkjq9AShIPHMk@bDCzP8R}rFb$fmOmivl0PH`L*AqM9ZSYDlP_R* zv1012%un!ky9Q9(y*e2d?#ZKy9$djM*lg2}-{(mctz3rxwB#4;;47yo@3xz+Oa$t$7&Md}lPrrrT zpJt#6OFJ4|oX1iB?BGO5*XA1T#=T?+{`WNU^S(O`hxEhn-X}?e;iNwv6zGH5e*Oj( z?^H#l2HH~NwOjClY9~W`+YhK)wpDumu?WNcYxl5^CMgZISTXK;>mw}6tDIUD+|_XU zbDZ+~>=FL;gh_^nqZ_EnFLxu){Ib^2&o>M&esibc!^n9=&EXI6sf#}_w7u)_>E&EE zLsa4w;>g*^Pq$ap8SK{HAXBTCd^#Y-h3NbJ#!n+QJtX@av%?mAya4-h;Y4cMVO3Z= z%^`hJUbHB8ODT5MZ4>sdJAG@0t~^2YIUk9isTx9gR`kL)KB&T??BAmt<5z?!g1*-K zye$>w%>51%tiOj{DIXm=Yd~j%s(cwfMN>(x_x2Ob9))9*2a2hS0d<11JDqh}VvVk@ zOA)sI(l%_*8f6CPgRfs z-}WVsh3^bo>a_$9x$i)g=y#BBXUB`an(vRTIqOB$-trZ=+^o{sy^~&Z(k2D_^5-QO zZ&&vaSKE!$fp^?+`KA@3dwZ|x3P+UH2qz?A7e`bG8nqCBN{Nrsm8W+q3FB5di}EZ?DZKx@7B91>}%9R zys_J(Khsw&9u&5BN4RKM?hO5ZDk}AE`|NAl?j0#AqxHQ~{*AEnT)zHl+Z#2d(R0c6 zx3-D|X9rR0cY9*{_wK=@-L_Eo$9x{PYVvM%f$w)Q1S@d*X5kxoo+o1>)Wrz zT|e>Cc@3E_bWr_QR~X%qJn?eYff?H)M7#N=dUZ;YDA)UaVfRIq2Rc-&By;CDVvCm^ z33FdCQh(*tSkW1*NL0MlIqb}l-g->CR#bO=ly2}6k*F}*PPlJrJa(ez>-tB&o%Dk5 zH|fsax+AQ|`jEfgf91dt;cDS+)k9tHRcg`n)T5&4A7_O<$SxuGMRdfzd{}@R4&Bz@ z+}NLrm)yj^9MoCYJ24E?tqLMt^{-Gf>*J|1nNatjcsz09>Q|KPfd_WP(V09@G!o;r z?}$}=CB+MpMq^I>x{#iuDn+XYg;9~+mbfKrFmkXPsPsG}##bR**5 z6?$I0p>x{P4tFg)N**TqU|G_3Ls=JTHB&%yNSy#UF=_UdSFta`?u0aa>+x+7 zr<2}{tV&)Q5w)?Gbj8(sk@jEr3b!eBje4kFDShSgal-U`2_h3K32usx^0=JHp?FrSD{!PmSdgvIU&G3U%Eg! zLZBWYvA~l#{_LJ&>Y`9XGbzf4aP#25Cx6Ari1t0oTOkVNG|pfp`1_&raC7rl$gY^9 z&9^uC(B2Se2`r+f_9nfVNH?#Wx%^dZFc>cv+6_zgB_tH^TkT8puESrw7uUYH!v(E@ z`h(Wj()lYs)LWFl;_5My!%B@2;O4LL1;*{%658kPLEM(sU4C})tHB2#{nw0NV=y57 z?~Px}C4e94UF+7e*0*}De?jqU1AJZrAp+vU#`ouP7%DL!jNT6u&k|0l(G07+v00Q{dVccewIcs6PmZ zmX2TRaWE5#U%7gW1i0}l5pLYhEunpW1;!!V_SRaq6u%-rf@t&oNz5RCF}f0< zWa>|zZ~nFMIYk%t_b1r*7Mb_Lz{_P8e}95~FAXjcyefqv7h?@Gm*>re&!2CE2Emda zyB^&Xs>X9!OMhPkxPA<2n>*({2If6S1_k72qW&Z#C{E_SH{r-jw_#qnu;H11PO&@F z4xdwWf^s0r$myrlDzY0NOk_CNYVgz|@W_4{48s%EOd=RmK3Pp?h#UDExnbg%J4gbG z&zXiNxIviCu%z}2*lD*}}0Dgeki?v3p zG%b$+*WP-=DT|^0APJBOV{H~)3GZJ)+L*7Pjo9z{aG%@shI)(kGgps)*PyMC3E1G5l~C|8#4*;x%|QTt?Yjf1kCAgIn*DtO7{3h z7tl%O&+0GA2O_;In*wG+!JCe7xP=q7GCwKJISB~&@pY?RU z*4Ano9|22_@*e0u9iq+8tJcGKh;Ib3X*`r`)c&js=KKqHUt01n>GT_)%m?WmC|Uou z76i=s7Z<1(2#1#PFUT&gnT%#?nj-z=bALdsY4%oAYibw0*|no{BeI9)X!Gr)`(OaF z`R8$8q;n%TzhKEO5~*NyNIzSRLRlBeB4Exgdc*t_g6UbE8_VFlM-^aZOZr=w2KbQO zMDki`>p;Mq{$7XrgHUKGzmD|oiW1C(&VvqbfU>s=P-I{8p?)JiuD&CATMOl$8_N@( zzH)>&G3V)gt*zBGK5D6RW1=s>mTv^{U*+7$-Itd9%SO0=k=_|C+FJPABVf+IL_xhk z0JM~UL4Kn)9nI7JT5D<-y;&%@=SK9Lq9X(fm79NVEQWS6(Fl4|J89v< z8ukA31#|f{?)_#oLx+W&nuv{Jgal#6*Mo@OH2A|$N>@} zyNTqr($;~1IsJ8q`h!qtDZh^N&JEsPLFYjNmJ??H36OoQg8Gg4xcZLdZ7r00ZoJMu zrEGrn2=Bx~^E|Dw#WX%LJ~yH`2+`)pL-jBoLK+RwHjRhi26#>v!(4pC-Itd9OC8+5 zNbjE0a;(!nCj`v-mwc!fNUWCfFKCxJOh+>{O_6?P6*b-7T5C=1qBjc#Iybg~z@YP@ zX}P(-H`V~{WU&$Srgk#jsJ*2Nrg#|p34{Zp&G!?9@E39UMoThX|MT_}Pyc;$go8QS ze0*ZyPoTe8_oEXA?U8&tOe~(&yFz?^=Qe4>YC;gSvYeRwu|HOF+gEaDjlJZxRpnt5 z56qF-c}$nKAM7i2=}L+nzxR{$&hnIf;p`Wda&L(E^5G-cgvmQ3KiSWhWbyNaXHNYh z`~27)>D1A)B^7lqiL-p>iqBT6rSd^{1Qq^y#DYWr5OqJ(AU^R(FHZfzUiibTU|D&H zpLFopK*QxYDW+8X;Ohfn)oop6j&{ZTgvc@Cw&E2HBYi~D^JWTI)Ue}n~9y+eH@bwuO{OC&xg(t+!k%o z923q8kl_-ZEAH8?LNLQ^iFoaU1;k#@YH3-&$&z`MI!X4)9kSlNa)qk%m&DW44iQuQ z0;MAb?WJDFCy9Q_n-|{a+DzG&iM8UsE4E-S9sE$dW8OggrwswpUSI!)i&l*n9~xCc zByC(FjURrT_;1M|Y3-Zu;pM`WQnfgfa1C~n$fD;H>)X#Kq?6C#2l^LE?bWAtO!^Uup(eZz$^mKEly~^NV$UK@T z=2>Y@sc2T3|HOOm&8d6tz2E=d@7(Y7{?4(Vz1MoyUVA-juV?SI)_#gHH!*7`t1K(l z_-oTfOj1n#+okBw*XG}~vrR3cx(GhE`9i$8jBYNoS~XpWi?tSOgmr6fmv5%8F45G` zt@*~OlTA&<8ZbrlMP>7CVq*Q8uG;*i;n&5VNdo4kMw1%HJEM8fB7HjEtY;!wxO>iX zbhBOP=<4j^=iAUI(o4Pw;h!uOn)S7%UdxG@H_Vkr`t8L!i?tD3;OJxLYa8I`FG}37N{cK*F_iut9ZyspVyl(Z=d95?3 z3E!)!5DPilbaAk`{;T-EEdkNIp4+T1$D4=yqPXcoyg}0%L8AFCnvX7SP4MW($=2`_ zZknJ6e#oxASvwv2D7~*Tv4O zkp~+Oe>XQRb=}zwz0C7l^sP;!VFY%5xo~=j2c)oEtyP@^3oO*e>xjHPeb#a|H&(Xv7+gNOe{FZN%2I)DrU|H1LPf7Us z*tvSx*}FBe=;!0#$l;HM;qT$<*T|Xf&;|rM2S3*UJHLi^QbUV|pPh$GW3T?cj<)u$ zc8yrggc@F79u4nJ*M@st?c7`!Hwd)x34Q+zN4t6Yc=}m+xH@_IxVPZm*U@oa!!1oB z{w-fYjk0U(@^`whcJ%S|Z9)1sgZxIUnPVSEXMZ<4pJtAmyZ_eF4e;v^`lD`4`P$O(?8ni^DusT}m4gW0(nmzk_MG0{FA1TQXZ~vtr&6D~6 zf`a_W416$uYZT>2YS8#X`A6-_KdCY;?aD88X?O+vWs&g5w<|55 z-``V}AF0+4)>!|nh51I}mxcMSwlIy;{vWk4|D?*a)X-n*^51P?{+_BdTNqD&zsBY8 zx82aUwf%Q{xPQRDv?TCPT9@B$gxp*`9PNA>dHJ;!`ZsTde)g96JLaY3s z<<0-3t_=CjrumnyG`kW0{oCdry&L{kPi8kfoTjbyU%f;vH`}utk^WW*zIkbEG=6N0 z8mTwjV59L{&G`nJ9Te2$-)i*rH~xO+|B`9~i(KboBEMO4vsjv(=;~MbO9H32iO9Hy6zol1E zUP?=O$%)lB=XJ?7$hezWQ%XQ%tiLfkO-!s)gP^-Kl;Yw|5|{bbDkdf>nsoN6sTT;mncG6;EE%wiU;rIVXUR$FI{xz@df1YpG zv{#q^3y*($(Uh+y_S1Z|zs3D^{pXWFlf9Jui@mIGwu>TrDL=4DChE=kVInPC-&_~r z)i>vvb!*P&;x~(O?zpB45ngn2J!?W!yH_*b@fQ82g*5?$5zPpH@IquSTgvON^Sm*C zwVPN|Chxa2vTp`cL7gbq2C=XmTVDaXI8se>*Bp}KQRc|Kq z+j_Tew<@TseH)*cn7Wu^8!^S;W_+oJ(Kb?Q)vAeL;~J{*Cm|l#EZUH!j=r^uiFItY z4vmcpqiW}fiMcdG{xikz*%h6UO?>>aEB~42@ki_BXLco_MV_{3W{UV1%|(&2|Fd?b zOd{99w!A94d~Xp!WmZDl-YBMs;LcS6&6hPAY^<8OaWF0hpJf$0{_- z@%2Fovgq|Sczj8P%`8wS!|yJ@@TYq*!|fJ4P*P!sHFrZ_?k;RKH$=sSpTSMqfCO0H zggIL!$@KLXL35)TStq;>w?9v3{q?Q6eODh~ywwR<9#Bg6dCsIc0sCBD zos0bnl5yn6=aAED1z=xu!vdqL(6uxHH3~8yWAa$&rtgX`o~qGIl~(M@VpFgQ=nc9P zHbUruAhahTkUq&89IG;TIiADF<}u4@wnN_R zuYl|c+tK;t2^==37@qGlg-bi~sOj*A>v`CDGhIv*BFP zy`8+;NE>?S*h=)y@?-mQpIT5`T<7LGb4mE ztvi#lg5@Q43VllFJ5Li19BV>+qerkMZufD6axXGu(ICF<0|WNKP}!&={3cuZ^Z*Q6 zzMO<#izF{z29vo`{xtH?K;gXLBC7jw3p;t?1s*W0W;{)Kez&NloT~OgQhy_n9g>TJ z%ITf>BkR@p+KbMhyNNa%J4aFYdj26eFO=tp^EQzE>tuvsDIW!N@V`wI2J!{s$l=blMwdqc*(N7_GrB?j9RvuhW6COo>GSlJ^s#$euCnbIu3*AJUe1>d0^et+ zxKq{f)ac6&dL~%KV95DnCF8eDMCYBJfGQpM zyp$o8?7$T#27|51B%g)oy=EWOjJV2_JdCkw+z}XZVH|t6V-N{8>qt6}Il!EB-$SP{ z?TLEFx8S&RAFeQb2ik*Ti_3d2rQ!0^L1KX(h8cKs?n;-ri2JR$%Tfg;#;26ItHSew zgjcsaZU~2_5_6`>)Z!n`OzDqf-`x(K}Rr3uV zII9TA+h|VhwJRLD{h{Ptz#tkiFor(iXY+Cn9^|a{cR-L?0fBvn5yzRg=-5aOheT~; zfqSD#yjv2=+Aks}_r8YfZ!f@BsRhJotpQwLK#5P}5SaPIn~dK$p2azDBL;TiEZ@nE z9qW3Al)4m=4rkkfsrz>LygrA-9@mB!dac34au0UyXO0@yxx{n1DO)psCk|XPf=!rJ zD3~pF1SXs5L9cP@Ec22gd23wFk%rYldH)rwKy63RXCmnZM z<$=1e1V2j1;lx%WFs_?EE_n9>diu^|Pt>KLN@5VCmDItK&)HDAT)-_(+6q>a6`?5K zmpfGxi6?z@G5%^ExSTBFGM=mP4pv+M`J+{^Gc*S5mwv{zJH?n!LLUek-<6%4b&#`j zZcARZt)%wT9l5Cw&ceP9YXnKxo`Y$JOM(~AZSkGOTbO@pFl>|%$JK>rVY>GmIQc?I znWQR-?h;1L-%ZA1*W;X08DVti9`1e7S5Q!BP0s4n(z@i;v}@5aqv40cjjH)RrIMjb zN==3FrJC6*jLh^`l@6T|Yjhzrz)0i53Zu17D~upD+z2997%93$8Y>P9ZVu7WDr>`gtny|{~ zP3Vf!L1%qRseD9fLF7uK|BZFWZ}*K&`#RD7RlS)hQ7O@|;`+9y`=|Gb_4-YuLYgTy z6~Eml27U+sPgj0+-qYNqiIc2mhJJ5Y6ZL<~x>kK+6We>6x&OWQ zOWKqsM3LToH|SrL|ML>~+56>qb6$i<06&UkHoD$L?}L9fZX#OOw#>u$cf@0d{$*|Z z!%+sDXPotd!)zVhC1xselSd9ft6*;2$^R9a0f zt`L0J-yJeLw4!Iz=CPCww>Ysj{i$nOGR|3X2x_-!@Pf`6z{Tg23{%cT!tzraAuj6- zt{>k9RPOrH_Y%I)&fXIXY)`@S>=%Ml(dq&w@sWG_A|8rn4#4R`15lQapcgxI!Q?p( zII+4t)|h6a{k1Z9DeZ*2(l+3+jah=u;cK`F6W!_MH{IaX<;NhS89{YYyYiH_E#rb# zZ|C)KyC_Im*b`&jpBP^AzebC!U%_ZDjs}hFfS1*Wg7pC>+N;ZC93Y5;E!k~Zbzlt` z4eCKEDq5FZS9;1l6(0=iJaoZLO#qLxdt$F1b-W>gPwBn%J@^d8d43Vekfy4~4tDMg zD%(|nFA;@F6@9osxntCM#(dOUz8e<@Dv|cpmYntDhj>}@6I@@CgQMH$!lQ1V=pkis za^5Tw+K4~IB{6gGYmeh#JVISC>ZJ=zh<(N5tx4eSS#3go-XUdduq)DlNO)EJ~?+_Cpd8f)TEjAu_RFKmyDb5t>DF{?#8V8yEvlG2D^0`fT?^% z9Hr0)E*)--Cx)itPJ>u(myH)EoU#UN%i9sdqC`$Vtrf2Go&=v>C(^uF7o5H1FdCU? zksA-@-~&M{aEx5FuwlBqnR)*`r2q>E$wR;mV!3 zZqrt9y?UDV9OF+PjmUtI9BtS<@ha$)x`4QqEWE08#jz)sW9`Iiy!rM=>BsssIMg-? zrmVY-`8HytrfvcbuDgnxBvR<$C^tcISrL`YUx!;&Gcme%C(hpF758+_NI}A#uK3lh zGs3f5oUpAnDX|*>MQ5&X`svz`J+vLXJQj>M4u8hu-o-HH{1fzWYmG-j*TQtN2Sc~N zgZ$2WuZYiZV4PsIC`gxuT-I3a=77p=DwqCTA5wo3RPBkGBT+-13AG6`|nyY9|D3d<~VX8={fL(@>m@+B##o!xn-P(~8@iUxzke9Mz69){BShd1vW}@(%d+dLb&B z+(Vf;FKL{Z0c+Lz5Zcz{G}!DO)MV3bL6;7nX)llG;Aqy0y_Rw0&L6WNb;0kkd|xmq za{J*yvKrFXo4{#yIp*1$B7462G~L;$H6))iAl273S=Fi|a7spx z>GxKLl&|Yy-?buKvRfau>jy#c(yq9XAB*MI(O|ly3OI*4E?DLuc#N>epf0^hjK>TJ zzc`gd-dHQ>e&Y+Bb}fc&_|%`d%dViU_2bcduLdmKWDTI+gZIJ04F;7ZgR%cj!JRL6 zP-o3lTpuwE=n{E$X8js)yFLIOs?A3Q^&HUMFpXT#PkJ#AO%d81ab0)Qqpx zu1h(V_nC(p$-D5}z@uE-qDR;z8DYwO8MylJ0TiD-25J|?V8++!+{Je4U{W`gh6lgn zl=mORi{s8hh;ADw+A)u-N{XdbqbDQZsXb5G%nT{%l59jl22Wzaei)!U z8ss1A<7j0LGW&#crrz=#-8cuQY(Bxs9Fd3aq37|%5lien+n86a*OQeU>qU2s9)oVX zv+Zv)Dd2gj3i%pp9=x)R%{Un+jNt0zp@pOKN zPE0p9kqaqqPmWL658*ct@@90jf&RG@*^twA4A!oOHa$Y{5Y2$aBjeC}Wm|T*T!ZDV zv*VumO{b1u(_#C4X<~Qi9Xe|b6f7T-%)N7MkK)s|qp!0ns;fnS)wmUcd&S~JZE-G6 zcdCRqJSR9ft~EIs*Awm!9*1;mCwOGE2i|mg1@q2j!ItD5bke#?se;1Y_K zhWEktVkfNMpM~ewe#OI;ZE=KIHRcD|LZ2XC*yJ-5AM+QXV`?$Cc*Q!*pUfA?TIcfY zUFXuVBhKKdE;(S_ZZI6t><4$V_re1WIXbkJ1<$sFfM@cd0@L~|1GNtA;lS-SFmAyk z2-toF9CXwmRcZuOosWRKv&Gx8=xb--hVF+7c{g=lE_nVvtQ01wT> zv6t~xY-jTY#P2PPp+1i$QJx%}z!!i`p*azAv(rUR{^U$Z7?Z-ynco95411A@UHXw$8pVP!r#z|qXG^r1n-8f) z_86LEOqRJM@aA7E!GjycNT(g0X(vB3*dVS>-*9a)=$tCcTWrG0Ld@Zbn;*WYiJ}+j zK|E)>0o7j2!cuW>+JPrY&JUkReBV5VYkNH4jPh#yOowCX-W&MDlX3g*xw5fx;%xh| z5L)k43_F%4La0F@CV$o-2|4|7%ezD@Sf3B(tc)gBc7^@&fs}lR1&@!*;99~%kWtwM z674Bnt5Xfm;XUA0PIp)llK^#3&cHyQLAYk-G4xdFhy6zT;q*I+pw)dQjH)Vz-3Ei< z=!-f!IVqA$x==%lzI=rv)6#L}D<8U9?II@#uBY;}idN+BgX)hZoL0Z_m{Bqk;`gq^ z$~$>FYpf&HJC}&Z*La|qUR&%q-hi{Lk_YUiLuV!UVZ3A~n15dq z^9BvX%?n>~q3=sz5Y|Uk zg0(>w#@;i;wD1CSg;rR;;|3mdd&8}EYmaM$FK}D=bPTl0;ubvV2ErXzXnRjXj2NCq zvo$0kpQiwqj+nsV!zPe^WLZOwSRZ(?dq3BubPO$cmWXo=5(H!9uYg_~d4YHOeH?Y_ zK7F?$7c%oNbDDMTFbgi@>Dxt6EvLkW9#J5-*W`n-;5e^rjW`<>Bu=Ua1;dCN;SfkC z(jgW*aChN7oY5}pTbG{Kd(euhC#*b^aj6>R%Wjt!f-uB7E<-Kx%=Pgb`ZBJq6 zSHDBSq=rN zJy0P<0mJs>z|FooTvXpxc<NHm9V@jKk( zeX?ljy%%8GI3x}YqcLzk{N6ju^K=#sZ6&v83f4bnf+IYC%DGrt?O0Ypa zYB|wsnvqmqCT? z)JVjc5++3G^NCs%MsND9UmP$eaX zr^6J9y-yfDe?J`poF&PyZZc%S_ARJfH5w!{&(rjeg}nb&eif;fASwQ zZ)@-TCbqMhx&J-?vY2NRqR8*`-JpL}{?AL`Xa40Z*YAhkGV4intVHAw{#WB$GY&TO z6Ik3-{v&x?-5XGUmbcX$!TQ?|@wJR@eFyt%Z0o!5(f5OIsEOinF3wG$=w|u+BmDj| zd1il&#r;O&fABb5f_Jmfz7ync?WW6bWc%-I_&xh)?$y-$FZ=iJd`!`GJSTNcxk7 zBxsNZe~emJ67#G(9=lXQy!7>0;tXy6t7pSl_XGA&Kja3?bJ$Lf`R`~ruNDO%Pli#O z1v6O`Ye%?{eMC4g7G9D*nXQe>*?J&CjD8I-!MLI=*y;&+(hD3n?@o_tn$ftSf5 zrdl-6sQaTt+(UJi~(V%k=rrFGm0tTVlm@N#XYUq5S&S>MUqSD>PrTgSUzs z1igf7g}rtCNS8Y^IYU`**0b;o%>b7y5kJN=(RpR`B!*ly_BNZMh+&QbnJgOY9;H4JhVY ze{$eYm|9a3oxh#9_i-V%c|<6mCScyBBZyV_YjU8IJk#hO0T%5Bk~8Nuh=TlCa$HMC zm^eR#&D`jKNh37a7fF=|{2wbMHWZ?Il8M=Q8qW(+5`MhD5g3 zioIF3g;+aI6y`fB3mxlv@&hKR^A(;O3tw!OC0Rj3h-ZIOw#7|Hc+IE{IaN4-TzFZ= zhW0wd+A1pXy-zV{Um8H-MrDAh(wG zgY8H{oGgWNl(WE1QjdsT*hW@wIZK1J&-2%m_JF4Y&x7jXtB|l&7AyB(UuI@{+O%?c&q(9%&ijeD%Y}mQpZODaT>C8R;EYpkjWv`=p z3g~W(Qhf3Kq!A%RipxTwcRPro)V=hHhuMg)hUNsYw%QwRd zs|UJN=l0l4%pi(ws_-Rq z1i5%OLzk#lMhbgg+|CXyU&O$ru47T{IER$KIG-olHM%VD%0$*hW&;iz>x`C;iFAOw z1W~eX1L>hhI5&;*hP{6_{j9i|&0QhOl!SY@$8|HoO?E8lyty-(M^}{G{!-6fO}j$^ zI*ed8+8nF9@*2D7og;ypZgAC>DRlY;QDSeN+C4znC@X0{Ss5o{jQk@B9YeA=W7&)wP58k+^un7(t{Pdz7a)twYoa4??hD76( z3tEO(NT1Su{;4>iUnU+tc%0o&2_{(@BlwkN>#2Q}BuTO}BN5hnNmMdlc+yQ$xZ6sV zt%^Fpj%myj&fN8ot=WAOEbNR}2f1U+ctbf^+-neDtb;ZGs!D5;n(|ptt>H;-uU8`z z49$7ljW%(d-AH1#`WD<+B994geOTKpO?FJ;3KyWXhxCZo&KpCe5u359 zC<<&c+p(RAO2WCP_u|%33qh*%7W49n7J9uO!t8g*u#0*b%xiiHTl09oFnL-gF`4t3 zTE-t?o%F-8-o6jJBb5ljx#!^Q(50kydpLPK|1Qc*P~(4eAI-x02ap=)Cp2_O4cmPw zm(%B0kyE!~$r6>hRTth>rY?-g@L?}r zY-ARFR}#%Ad4B7;XIRHp-oi_IM@atA&j-7I4Pnqa?5HijTjX|uB-InyF5@~wyy4Ed+Yl+;A;o6erOca)4Kr~7bS(gJUeo4 z1HXc@^dVN<<1y3pwZt>h1IQ3#8KKFOR5rP^7uHFskcKD+8LZ>QY-@WPX5LxN&i1GV zu?Qz(GT)etR2V`O#+AYGmjn5-GjiBbXI*CcC6PQmsw`CObsoA&*O8ckK)R1kXS3b< z2xs-PNBwT8tjD;8P*W&m{GKcE4L_gNxR*fw&hBi&@Es&}b2dA;QHxpGrSU%JI}zSj zYmiGu)=lv__-qN~ooF=@_}a^Hg-=i7Dmj`&JL?N$s*TAeCu#1q#~xv1a4~s`>)7^f zM?kwW2Z~Kg$Znpgu=PAeE_2*?zND0p*lgX%PpC?Rr!y=`*CQp&Lvt&cke&77QX)4z28_IcxVnYgpDwsm89{*bD6wtVz14j4ma<`&)L5bfnv6o`%g!T?)TZ0$ip&{0+z54&ppJB+TIU@a}1)$=}lU~{xez$oo?9( zRKh-SsrJsolcUUpr!&Lh@(3Y6U9XIoP8q@XGwH*+IeU=Qi3;TO=`SSYX$%u@@Hvlq zV8W^-PZHg2iwJ+dkPx2?rqJF8J`F|UvQe9LKGT-nD!h%x5kugRrvf{+OomO!G~#Ra zdx;N5DU(%GGWaoxec0#(Mzk$e*qVc??BSbr%<@$^`D)*T<&;hlI;%&pfghE4^S16J z1rh^EN7I!C|EA|21Bk&M%-EOhX7vZG@(GTpvp z)|P!FcToy^xVy6v%ANU{>U?o^`nTNATL_Vzi%u2UsTiFIN{PvlAdb@K+>HiPAuzK2=kRtWDu4`c)S zJ%apYV}uS;er(rqd$Q+vI(x6#0j6J7pf^&6kSxO^EbQPkMs_WR+j1w^mGY@<%bOym z;im%}5R(A_#?{A2arx=r$?ZpZ49Y@B5F*bEF#htxd@hx({YF_=2q$UL)_PYOu%Y$1!KOJZojumM@s_ z9A~E62q)yGvEk-v#m~co$u0E(WaX=4Tw2$2ocXY+WS-m#SU0rc`y)C@Y^}92>oaR3 z)XY3V&gg~l4&T-UsW=yKej`o>uHQ)Z^(kY@)g57GxEK=`-$Cw=5dhQ6$Iy-^A;Qgx zkya&47}}3)^^Rl*)Rmb}-;WSgr%3b;XJYP+aZo$54I7f41Lso)um%2NY<7_ZYvXte zd#mN+_|7qKtZ*e0Uo6IYY&eJpdMg>v(T=S)?@6|MajbWx9n%S0NoJ@#;*|S(atZr$ znc0;Z>^#pLRrd~NHD3BmeXBlE;pefjwU$I{Oe8rznq$WHg>-JUDzTDFCSs~d&Pwk zVaz=KoD)&}FFNbNHKd+BF9~B7ejWKI4^k4Rxe48#tYyZD%OQ2XFWWL{70kTXj(PiU zV-E#t{Jc)eq)1X4=gqH1o??5l@{B$9U9f}o+_DQ3_e~(P9Od!aQA_g7#)1W_7IX6D zM_FF%F4*}rfn9BXkAQa-xwfh=gt_^#Fz;xtM8S=%esKjXlcj|tx0#T!EALS8K}Dow z`As~m(v^vQPNHRFK9Z%enegoTL$=T24!Kyb3hiWM$*WymNw$d$QE3%H%+0geBo{GeKa1?3(N|ME_IKe)wR3&zsZ;;FkC1N;#Eeum!1-Bkg z1K&$KP@>IP^o@?=Y+u`_0*Quvj-X3)bsJb!8O;|awA?)3HvT48`Z5`fXZK(uunw&EYop4_q2$Ju6yfN8{rI_?4wVef z?+HuuP1v>@!yw?cI`iu|hX49y4;|B`o z@+NNi&35JY?2p)rCj4Lar}-)W$%>!ZpSjKYBNE5IJ8o4U)zp*7p0(6t5g)(G?vJ3zm0a1HjR)nV*n8K}uy>3es_7L8_|gfy4im1^kc4o< zf@1<$Q0NU?_11D8eZ7D@ZHvRF^#u*1iQw%2p3}N|l3ukD$M@aOf>$dS*nGwee1b!; zEFm6T(#3Jt-$-Sr`H&*8mT84}+8P%kj3_M>S8ys(pC zbfPTF?AigvD+W-vY&p=7%D^tOeQuUSq+a!N{ zU9lFonu>8kc}05E&=|9$6X28S2r8y(j$`Ie0`8$9o+;Xgr}zZw<}IL=Ia+Xfx+hqb zoaDmGO|Wz4Yup6!6z=BpScJO{4d;#%$@2Arq-R9 zi>k69gLQ_$uR&PivJqxQS<~~+{BgNk67)3)0k_lp(5^JDA!oNpu*f@YO+o+E^s9m$;;%9m4yp>x8BJf&TP+l#Yp2>9$~!|YB4o20Zcxxr5z8vpsAV` zFh{!;R-7D#cDKx6*5-~R;chrr;PniRE?@IH0|_-u>Be!X{raz7L5SS^S19^_*2(-LBR=OwMs7zUU1)-vV0 zvgDHuuuq4N3t~gNllimLG30iYKy$Dqrz*D`9IXejo%Nxxbo4^BbkIceF*Yb|v>X(@ z&(arzwBbR`63p6Dg|5f#(f&*!ZV)WM_71z?=y3}=eSJ4lcEBG3Ixa)oTRq@yow?w3 zt0`>6+g3P3Ya8|`u11M69m31?VmDt`aR$yl#OYxUcktOAOfr;)^%mRF{ZT0EPshQo z>(02!Mvab|norFV{UDbPq`^5!*!@#5*ZI;kw)AxnlsX*X4N6PJ?%P{Kkh&&54eLWV zUSC`=NEeCZbzUE%qtJI!ErZ-oBcv8&Qb0b*rgXV1wQ^ZKH5z8WT!SgehpA?7RTy?j4jp=V@XSefxZ1rJhTT#D*@5*uhj#br<2SLm)OaFw zSf!78GZ(iIGl+`TXI-q@i5O2$oi6xaaUfSGYO~(n2_?6uT9V}IEx2#zF0T4Z z1eh)2@PX`I{M^f$>uk3gllFAss^6$U+}XAGvFkGI-F7vmo>PY1Oomy0Oa*R8IKE!I z85QD}!?T%&n6cn3tUnV9U(GA&D~-vxDsC`zTrr9*GQNqktxgNXhV22x1y9l5`4Vhh z6^A3cOR%aVU#Z!N5pdB-hRaxUk@j4wLY}@JhI(nPINMGIRB#A-Tslcp`?%umV=36% zrz5>n)rTy48VBtcu7h)0{o&|`gIxP0O`dI+J!m>26kfjV040FcWF5<`|M>hK{INSIu3^od8tdj#L!vrdl&H zu>0X4R6MXB?vA~|CVjQTwokfXr#tZ_ugjLfif(6%S_QqPD&x!Hy_+U??8R=*J1rBJ zJuyLk{au$Xt~z7>oUOM+L60bb3!h540nX#KJcKAmHZ zuX)44V&@EQma;VrRMLbUs}i{FzAkj{f)l)#R$H)-HU)<>@=&6>ja(bE4u=JAg&r2} z5ZZS%KGyPv6YF=v-YK0xdi^te*7knG*~Q0P{LviT-cc1tMNH!?wyARVB^m%Rn+$JO zPR7FnZ*vBdZ883R5*<)nMOTfui(Q`f5==C`fqt=fxru`Yf$zH*%zCvOuhkDFQw@`W z@3jIuPDiF{o($*0T+za$6Tb1v;%v?x#{st!v2?Zzl^7cX?k8h0+iwL7=sXQhHsqMk zdi{>>UEg3w=t5|fWyBr68V+wKl8fJPr+M*H*%dOJciNIS0K}*3ny#ehPphk z!-4(Q;63xLxNz1suJo}wJRLCw)a(s-cOGq_bq}2|%tHYNBxR#lQ6O}XdQUTg2LW`c z$EATgVDXDD@Ww9`eIDAvSt6Z6QLC-Ly`zy@yTmkIR9 z@I;X6k_g%lExD5}I#gTE2M-TlhWiI;VaV8>+~&*#y3+PBciNy0CSTX$#79O`x2`4h z$%X*xvuzh@cUHiDS`xI?l=m1aSp*-xUg55-oPZn3%4nN@x2TlrD3~|#zTpG46jZF6 z$bGruOvd^Zf<|cqEgIqorwfnM1M6DBX(w;Y%k`q~pN_!~H(SF@lVZVax}$iNbtt}) zlZVr@;^~t%D@!Cz^EhM0Ffd%T6t#sNc&VEP(C`t9VR9tH{4e`>MlLtGJDbkZS*1Rl zRfsnTHck~NUOvy290&z9lZ8BYsRXboQG;<8l5o<8Ei`Wa2G}-&aVq*ZxskQW5PdhP zWYL+foY5+4s9H1%0}uF6gL@AR)>a$grpY%s`$s}dEZ@l6Ff|?~OX@;y)>Og3;%Iyo zdy59&UB|f(zfI2@rQ)5qTJG-oQ}nL(1{yH@B2Dfp^Kvf-UdUuw2`8D(@z6zgD9yP% ziQ9&IXwaP(G@;i)>{&PfB&OSQN3MEfJ6m7KD}Kpk-VK3AAKp`oy55|R`@RyVrRH$- zYZhG`JDYR7UC!C_ictKVHV%yNpc$MY&bjrHR*j3II_DPio?WtFostW%-MJND)vCc} z%e7;hKO2GTr$m8*Yin-bEYD9KF+y!|(yP&&bHB1?}2mPrloPF6FZcFEvhHE-jX>e4v17ZPmc(O-jSMGmq0qxz6j{M;sFT^FvzGJHcbwtW1hdK69VRA zT8$}-u#&`t_ySOken@%Jai}xhoqWA_o^u?+UugtpuwTX_fFta~)&U3(n zL#$BakR_MNKM!&mUC9>D0}#}+VSddy$;nz4TWu+UT~$X z1|*iJH+&bpBSsyRArf0s@p3^i5m#V33I#A}mZv~QeFmJ*E5W@- z?a9!-!E|HW?yy>36^CCLK!o{o(X8k?{k%LH*kEbA7}O40PfDQr=Tty)yAB2?+d$^b z_HeOw6ppU1ecEGTSh>RzH~Tl7w;$6U11smkqzy|sUJXGHosU#?s47v_ z909SWo3U%6D`<|J4crY+Hs!`f=vZ-$rWl`P2QRlpC!1I-sEdW-p*!KC*-5-^FdV9O z214+tICw56pr>kb;X|MCDAQ&-_Hs?8i{_rDBTU}YkT$ynM-@Wwv)AJ54T-&`Hd6PHk()%_c_-K7BRdUS~#F!6ZE+!vf%R@cZ ze`;GA?Dhqu54B_4E`7o0^Cjs-=K$`b_z)KQE*j=c>qoP*BT?<@OJ3aNI_|2b1@|`l zG-bW4gZv%ea`dHT!I<;Trvsn!ZGHZ10K^;RXV&Dx>U zr(@h57ayGW={jxeF`3b4r|>@)w+iH%JS`#LIb42wzTuB~F05Nx6WdwM-2a|`IU%zN zQRH`OzU^1>Cy~I<{L8}B?*-8Elu~1CtMMoD2mh<*!a_DT(MfCW@BjBXtG~v!elvnU zcw9?;N0Vg4{wv~I_32IAc5AjLzh`%@ZEM2+Wp|pN@}I2uncdNA)*q2L{@rn{bIp1y zvSTgvSj5M#@_PxijBBYEeh+V16U}pBT1&{EyT1O*0FYEY2CEmG=giW_;HnBK_R89V zEP7sp(;ssrLaPUiFp1zw?kkf6!Sf*_^fU=e-h%y~YZA|W(^=^0c|=Oti^U{`;}N$6 z(ldKG>0)+-vp*zBrY))>qi=8IBzDAOf-Ax!lVO~)`EF8mH;E0E{lt~Kw_`SbnnK9` zif=Lb~oJQuE&lU4rXgNS(3z$C2Wk!NWSvD+hpuC zL$2R9)lY}9DQ>>J$%!ACp8j68%%Lwi zgezB4eU@@?QksDT#DEi??PvQ}_Ef@Vc=K zo2x8GbVjXZvA6Tdl>`0Bs#)vs&EWwUe9(%_p8Epd_1gi9-U4wH3TfYlQ&|2tV>pX= zJ;7P2G9ZIYiK)CFX`OGz!p+>tsF9W^n`%PJw({7@B|K8O=q0R}Wr$htd$P#IGug?> zQl$2BBw;7*FiT-PoZb8oHp)9=d&{p_v2hxEVYd(F3%Zjm|IzH!VhiRQJdNdc&Bl)N zk73%PDTdQ4rOC9mrfgb6oKwDJ4HSB(LvBN6`?{LY~<0+yw)9h3El4i29MQbhffTJi3P^| ze&v(cjoJRpcjnv@n}=Vp+ZK7|_ofS7?5W3ew^S1S4LwNi|7-8r<7&>fKPh@pB9tB~ zq(o1T3Fo`ksYc2u&q;X{8j3vfi1L^o>YMPSq z9Jz{tuO5HtEC9!i z2k5C5RjOv!pN0h3qM>m8E43+!Sfv#)eeWHm&d>Q&xKV~ajZLNP-x$(GOZKw!y#v@B zvW3Te&+AHWe&w;p6pyDDv&COy=ciVQ$N09@jA-ohyAd2$yd+p}Px%FwF1? zT)ryIA=r{ec4Y|n=tY{NM?n~C_F^ht%?g2@#-rh$-*I|+zc7E=41Jov>on?|=Ahra z3-G99FFME-vjsbXP$kNgjGL>C0XYe@Gb9{yCkIj68@e#@RVpfOW6^TeJTi9D6(&8y z58WHvCn!}9Btu?2B}@8cv#E2VFm$sE4V)PS>f;_06%$3;bB_WYTsMsicH=>+5IZy8*9+)oCS2!FO|%~ zo_ciS@K&NLFv@pcky}dF#-?x%x4~oDN%R3}9Kb6)uts+?7N^Sh6Qf1~L44h*w zP#L(Kj@3vatv+>7W>7>uFTZ7FotMJor&F-{U@=ZM9SJJ!>SWl{RkVffqT_15C5DB` z*xY`Ec1#ocmNxORKJWqgdH)D(T(uv6JQq*0glnM+{`=`vcVql6sg}6iNdj)OEN^By zgGsgncxyIn!qx7pDAsmhs_Pku^*9c5XDp*LXB+bLl-*dr6eoNJGSo@mO3?k+DtOuc z3m95%WlwKQfTzZ)_@?3n>HW$a=Qt|SY#2aK8Jl1e8%PEFgXm2!dz7)&!0~}+SugX2 zFex^OTrz57b>z>(dL2)wFWspf5b-$D#D42)NRA85Cmj*eG@pUb5It!qoh+ zm$nSWYfhl4s(>8lF{t!y8tL|MYC)}67!+CGfwAXrGK!HiXy(R5xc%lKY%#mR1}l%G zCPH}_H%gxJSNr4XqDlFcd$OLU(Du_30pe@hK3VM4yBddRmqLWuu)8{TWFkaw~%iS|*>B zrYDVOmvzI+`K9D=`d(5KH3Il8Y0$_k1HZ++A=2FhZ7bDKwvQTaY?LLzVRwjmp)1Pf zo`I^wZJ<_r6%L&=p{vKp(L;hMa9~X|;Z&Rfn3_hunec#7ysk^S&ss5_dvz$?GZ|Um z8^!Q?_IzfFK9|}Ayd>=n%eYRf8`$l)2h+2>6n0^NJ$Ao!jP0rXigbLR4(EfOgZz#W zB(o}qQ5o+}+G4bs!cti%b?eWl+sNW+$4vG{aL|O&d9B=M8Ds+A#~%(2a)3vwk?gig zM~2&N6eidz(?){}#92WbyBixaVPhSc7KwU^-xuxC1M*MRo3 zI+)}P_VLMJwSqygboJj3_JNEd-gH#wK zM1C+BIYH0lKIlKHU`;0_vw^O@bfSeXqvd2p&0F;7t+!F+Sik@f`l2YWT9dxqcL5TAZwyF;|A^XkbbF_4qc#2S_4BMUw0y-rnQUc2iMUa zZr3nykrDT;qV>7i^N^l68AArxxihV4-H_ul2)z#~5gE<9q^?$z9yjr#yviCG<_7X z`Acs^YcjGXcL!Cs}SaAQ!5{*k0{R z>I7RsyVVfG=kLOO_Y)wt!wR?NxHI)v7BQP|UuO<&d`9Xu&8X6n4JfkWI# zy&)w2HVN)~gN-~4Xkgn)v@9+%&*C&NN;@31g}x%*V@*_-^CR~BXY8$g^-!MK!tOS1 zAf-O1Fm_ZnWGePAcrA4e76+;^Qw^SQ`|4X!Kb1yC?a^a6-Z7S1jc#PiXGPQErIyHl zu!Y7pDAC4)xeU4I4pDj!C@-;)=-gP1ezF76-=QxxaEN3TZoPrm{S)AUq7*nfg|e;E zy0k=n4AE>eqX~I2sB7;*TZipnvc5hCP2LCSnx9qZtht_G_w`w}rC}mNM;nulou2Tt zPdBPrw+GHhE3;Sb-s2AOS0qQ%LSav9BF`xF5U(iLNO0{`tiZD@o|Ygru1L|^2^eKSVrOOT@ct1B?8c^K8n9$4T$_!w@uHnq& z>*y`?M-^|wb(S+}c2*d~SO-xx`D7C3JdsW~u!AOy(A=fH3y=GDU&IagmE>StYI8qh9=Z!68Dx=P`l2Dc$Eo&LrK1xfflag zk7PMjYr*c!6^K4E1qVMoMvmOtgu2t#lcL~O;uW@w+-}Zgj|?I!6sc}3vS zza7I)wHi`iUkM^Ga^RX6OK$?qh&IaTy(rxGzZnL-W=_3WLqyP0z90XWlH1u=dgoAn?Mo!WB1 zu*n`%cGtjRjay6Q)o{$jx6y%sKrDYk0G}4vDX|hR}O0}5%Jx@ZftrT~* z)q>H0L(Ik@y4(nN3y?mw8|Cjx)1iIJNkw}<`uY4?FaBTqwK)G`zC#^B*SeB;-RZBr z%JkRk{ioKKV@3G~#Ut_?c?91ViPwYwRlkx@SZcNZe25`BjQth zzON+y$Il=00a4Xr(Rh!392ejrBKthxQ~S(zvR&#5VKU*uG z(w8M7`4RK`@Ak(n5Xr6hA(ND2F&|&xha({Ak2`VU!*WSX{G3lKkSzIY;~(1>wf6(^ zsXWC02YZ5DBQ2hZF6?7_;`1M!5Gjt#`~L!6qredcqjZI%loWTdl`P`!_w0*Bc^Bb_ zy%pX31^m~KfTVp9%acLThh6{F|4`nCWdCcdOD}fkB#Z7a&Fi`l+ZV~aOVH`kxipAz%^k4zMjzyDtT z_K)>;*Gx*j7l_AS=N)sxj~M^sdfZ>6$Nk^eC;s|ak9&xVJ`4QRdqHw(SM!3hF8F(T z+y6=zo!n>P`b#5!wh@rj+d3i!>qPP+=J((IUf?5=Tk-QMDaT?yzQ7MhK=Qqy28_b8Dp`D^1J+n0A%?Vol{ESF;V$L$G+_rBuyR<-Nu-S@}##OI%d|ADK&1NeE} A?*IS* From 6aca7c5b5d3eeee9f3015682463169522ea230ca Mon Sep 17 00:00:00 2001 From: akashvelu Date: Mon, 13 Jul 2020 10:36:14 -0700 Subject: [PATCH 335/335] Revert model architecture and # rollouts to previous defaults --- examples/train.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/train.py b/examples/train.py index 16cfa5c59..b669dc59f 100644 --- a/examples/train.py +++ b/examples/train.py @@ -71,7 +71,7 @@ def parse_args(args): '--checkpoint_freq', type=int, default=20, help='How often to checkpoint.') parser.add_argument( - '--num_rollouts', type=int, default=20, + '--num_rollouts', type=int, default=1, help='How many rollouts are in a training batch') parser.add_argument( '--rollout_size', type=int, default=1000, @@ -192,7 +192,7 @@ def setup_exps_rllib(flow_params, config["num_workers"] = n_cpus config["horizon"] = horizon - config["model"].update({"fcnet_hiddens": [32, 32, 32]}) + config["model"].update({"fcnet_hiddens": [32, 32]}) config["train_batch_size"] = horizon * n_rollouts config["gamma"] = 0.995 # discount rate config["use_gae"] = True