diff --git a/.github/ISSUE_TEMPLATE/documentation.md b/.github/ISSUE_TEMPLATE/documentation.md index 851b235be..8581b284e 100644 --- a/.github/ISSUE_TEMPLATE/documentation.md +++ b/.github/ISSUE_TEMPLATE/documentation.md @@ -8,17 +8,20 @@ assignees: '' --- ## Documentation issue description -<--A description of what the problem/suggestion is.--> + ## Suggested modifications -<--Be as concise and clear as possible. + + ``` +Please note: + Documentation issues are low priority. Please provide your suggested modifications to increase processing speed. Thanks for your understanding. ``` ## Additional context -Add any other context here. + diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 1417765b9..06a6436a3 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -8,13 +8,34 @@ assignees: '' --- ## Is your feature request related to a problem? Please describe. -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + + + +Ex. This is what i do: +```python +import grid2op +env_name = ... +env = grid2op.make(env_name, ...) + +... +``` ## Describe the solution you'd like -A clear and concise description of what you want to happen. + + + +Ex. This is how i would like it to be done: +```python +import grid2op +env_name = ... +env = grid2op.make(env_name, ...) + +# give an example on how your awesome new feature would behave +``` ## Describe alternatives you've considered -A clear and concise description of any alternative solutions or features you've considered. + ## Additional context -Add any other context about the feature request here. + diff --git a/.gitignore b/.gitignore index b85cf2dd4..bb3a2bbb8 100644 --- a/.gitignore +++ b/.gitignore @@ -298,6 +298,11 @@ pp_error_non_connected_grid.py test_issue174.py actions2.npy bug_discord_0.py +invert_idto_act.py +test_bug_discord1.py +test_networkx.py +test_issue185.py +test_can_make_opponent.py # profiling files **.prof diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 1df5e50fa..45a1bc0d0 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -22,6 +22,29 @@ Change Log - [???] "asynch" multienv - [???] properly model interconnecting powerlines +[1.5.1] - 2021-xx-yy +----------------------- +- [FIXED]: `Issue #187 `_: improve the computation and the + documentation of the `RedispReward`. This has an impact on the `env.reward_range` of all environments using this + reward, because the old "reward_max" was not correct. +- [FIXED] `Issue #181 `_ : now environment can be created with + a layout and a warning is issued in this case. +- [FIXED] `Issue #180 `_ : it is now possible to set the thermal + limit with a dictionary +- [FIXED] a typo that would cause the attack to be discarded in the runner in some cases (cases for now not used) +- [FIXED] an issue linked to the transformation into gym box space for some environments, + this **might** be linked to `Issue #185 `_ +- [ADDED] a feature to retrieve the voltage angle (theta) in the backend (`backend.get_theta`) and in the observation. +- [ADDED] support for multimix in the GymEnv (lack of support spotted thanks to + `Issue #185 `_ ) +- [ADDED] basic documentation of the environment available. +- [ADDED] `Issue #166 `_ : support for simulate in multi environment + settings. +- [IMPROVED] extra layer of security preventing modification of `observation_space` and `action_space` of environment +- [IMPROVED] better handling of dynamically generated classes +- [IMPROVED] the documentation of the opponent + + [1.5.0] - 2021-03-31 ------------------------- - [BREAKING] `backend.check_kirchoff()` method now returns also the discrepancy in the voltage magnitude diff --git a/docs/action.rst b/docs/action.rst index d9f67c1fd..4ee1aab2b 100644 --- a/docs/action.rst +++ b/docs/action.rst @@ -323,7 +323,9 @@ Now to retrieve a "graph like" object, you can : # method 2 obs_add = obs + add -And refer to the section :ref:`observation_module_graph` to retrieve a graph structure from these observations. +And refer to the page :ref:`gridgraph-module` or the section :ref:`observation_module_graph` to retrieve a graph +structure from these observations. + For example: .. code-block:: python @@ -333,6 +335,7 @@ For example: connect_mat = obs_add.connectivity_matrix() # alternatively `sim_obs.connectivity_matrix()` + .. _Illegal-vs-Ambiguous: Illegal vs Ambiguous diff --git a/docs/agent.rst b/docs/agent.rst index 1cef6913a..557603878 100644 --- a/docs/agent.rst +++ b/docs/agent.rst @@ -4,6 +4,12 @@ Agent ============ +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + + Objectives ----------- In this RL framework, an Agent is an entity that acts on the Environment (modeled in grid2op as an object diff --git a/docs/available_envs.rst b/docs/available_envs.rst index c94c3cbf4..c1efbea9a 100644 --- a/docs/available_envs.rst +++ b/docs/available_envs.rst @@ -1,9 +1,631 @@ + +.. |l2rpn_case14_sandbox_layout| image:: ./img/l2rpn_case14_sandbox_layout.png +.. |R2_full_grid| image:: ./img/R2_full_grid.png +.. |l2rpn_neurips_2020_track1_layout| image:: ./img/l2rpn_neurips_2020_track1_layout.png +.. |l2rpn_neurips_2020_track2_layout| image:: ./img/l2rpn_neurips_2020_track2_layout.png + + Available environments ------------------------ +=================================== + +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + +Content of an environment +--------------------------- + +A grid2op "environment" is represented as a folder on your computer. There is one folder for each environment. + +Inside each folder / environment there are a few files (as of writing): + +- "**grid.json**" (a file): it is the file that describe the powergrid and that can be read by the default backend. + It is today + mandatory, but we could imagine a file in a different format. Note that in this case, + this environment will not be compatible with the default backend. +- "**config.py**" (a file): this file is imported when the environment is loaded. It is used to parametrize the way + the environment is made. It should define a "config" variable. This "config" is dictionary that is used to initialize + the environment. They key should be variable names. See example of such "*config.py*" file in provided environment +- "**chronics**" (a folder): this folder contains the information to generate the production / loads at each steps. + It can + itself contain multiple folder, depending on the :class:`grid2op.Chronics.GridValue` class used. In most available + environment, the class :class:`grid2op.Chronics.Multifolder` is used. This folder is optional, though it is present + in most grid2op environment provided by default. +- "**grid_layout.json**" (a file): gives, for each substation its coordinate *(x,y)* when plotted. It is optional, but + we + strongly encourage to have such. Otherwise, some tools might not work (including all the tool to represent it, such + as the renderer (`env.render`), the `EpisodeReplay` or even some other dependency package, such as Grid2Viz). + +It can of course contain other information, among them: + +- "**prods_charac.csv**" (file): [see :func:`grid2op.Backend.Backend.load_redispacthing_data` for a + description of this file] + This contains all the information related to "ramps", "pmin / pmax", etc. This file is optional (grid2op can + perfectly run without it). However, if absent, then the classes + :attr:`grid2op.Space.GridObjects.redispatching_unit_commitment_availble` will be set to ``False`` thus preventing + the use of some feature that requires it (for example *redispatching* or *curtailment*) +- "**storage_units_charac.csv**" (file): [see :func:`grid2op.Backend.Backend.load_storage_data` for a description + of this file] + This file is used for a description of the storage units. It is a description of the storage units needed by grid2op. + This is optional if you don't have any storage units on the grid but required if there are (otherwise a + `BackendError` will be raised). +- "**difficulty_levels.json**" (file): This file is useful is you want to define different "difficulty" for your + environment. It should be a valid json with keys being difficulty levels ("0" for easiest to "1", "2", "3", "4", "5", + ..., "10", ..., "100", ... or "competition" for the hardest / closest to reality difficulty). + +And this is it for default environment. + +You can highly customize everything. Only the "config.py" file is really mandatory: + +- if you don't care about your environment to run on the default "Backend", you can get rid of the "grid.json" + file. In that case you will have to use the "keyword argument" "backend=..." when you create your environment + (*e.g* `env = grid2op.make(..., backend=...)` ) This is totally possible with grid2op and causes absolutely + no issues. +- if you code another :class:`grid2op.Chronics.GridValue` class, you can totally get rid of the "chronics" repository + if you want to. In that case, you will need to either provide "chronics_class=..." in the config.py file, + or initialize with `env = grid2op.make(..., chronics_class=...)` +- if your grid data format contains enough information for grid2op to initialize the redispatching and / or storage + data then you can freely use it and override the :func:`grid2op.Backend.Backend.load_redispacthing_data` or + :func:`grid2op.Backend.Backend.load_storage_data` and read if from the grid file without any issues at all. + +List of available environment +------------------------------ + +How to get the up to date list +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The complete list of **test** environments can be found using: + +.. code-block:: python + + import grid2op + grid2op.list_available_test_env() + +And the list of environment that can be downloaded is given by: + +.. code-block:: python + + import grid2op + grid2op.list_available_remote_env() + +In this case, remember that the data will be downloaded with: + +.. code-block:: python + + import grid2op + grid2op.get_current_local_dir() + +Description of some environments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The provided list has been updated early April 2021: + +=========================== =========== ============= ========== =============== ============================ +env name grid size maintenance opponent redisp. storage unit +=========================== =========== ============= ========== =============== ============================ +l2rpn_case14_sandbox 14 sub. ❌ ❌ ️ ✔️ ️ ❌ +l2rpn_wcci_2020 36 sub. ✔️ ️ ❌ ️ ✔️ ️ ❌ +l2rpn_neurips_2020_track1 36 sub. ✔️ ️ ✔️ ️ ✔️ ️ ❌ +l2rpn_neurips_2020_track2 118 sub. ✔️ ️ ❌ ️ ✔️ ️ ❌ +\* educ_case14_redisp \* 14 sub. ❌️ ❌ ️ ️ ✔️ ️ ❌ +\* educ_case14_storage \* 14 sub. ❌️ ❌ ️ ✔️ ️ ✔️ +\* rte_case5_example \* 5 sub. ❌️ ❌ ️ ️ ❌ ️ ️ ❌ +\* educ_case14_redisp \* 14 sub. ❌️ ❌ ️ ✔️ ️ ❌ +\* educ_case14_storage \* 14 sub. ❌️ ❌ ️ ✔️ ️ ❌ +\* rte_case14_opponent \* 14 sub. ❌️ ✔️ ️ ❌ ️ ️ ❌ +\* rte_case14_realistic \* 14 sub. ❌️ ❌ ️ ️ ✔️ ️ ❌ +\* rte_case14_redisp \* 14 sub. ❌️ ❌ ️ ️ ✔️ ️ ❌ +\* rte_case14_test \* 14 sub. ❌️ ❌ ️ ️ ❌ ️ ️ ❌ +\* rte_case118_example \* 118 sub. ❌️ ❌ ️ ✔️ ️ ❌ +=========================== =========== ============= ========== =============== ============================ + +To create regular environment, you can do: + +.. code-block:: python + + import grid2op + env_name = ... # for example "educ_case14_redisp" or "l2rpn_wcci_2020" + env = grid2op.make(env_name) + +The first time an environment is called, the data for this environment will be downloaded from the internet. Make sure +to have an internet connection where you can access https website (such as https://github.com ). Afterwards, the data +are stored on your computer and you won't need to download it again. + +.. warning:: + + Some environment have different names. The only difference in this case will be the suffixes "_large" or "_small" + appended to them. + + This is because we release different version of them. The "basic" version are for testing purpose, + the "_small" are for making standard experiment. This should be enough with most use-case including training RL + agent. + + And you have some "_large" dataset for larger studies. The use of "large" dataset is not recommended. It can create + way more problem than it solves (for example, you can fit a small dataset entirely in memory of + most computers, and having that, you can benefit from better performances - your agent will be able to perform + more steps per seconds. See :ref:`environment-module-data-pipeline` for more information). + These datasets were released to address some really specific use in case were "overfitting" were encounter, we are + still unsure about their usefulness even in this case. + + This is the case for "l2rpn_neurips_2020_track1" and "l2rpn_neurips_2020_track2". To create them, you need to do + `env = grid2op.make("l2rpn_neurips_2020_track1_small")` or `env = grid2op.make("l2rpn_neurips_2020_track2_small")` + +So to create both the environment, we recommend: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_neurips_2020_track1_small" # or "l2rpn_neurips_2020_track2_small" + env = grid2op.make(env_name) + +.. warning:: + + Environment with \* are reserved for testing / education purpose only. We do not recommend to perform + extensive studies with them as they contain only little data. + +For these testing environments (the one with \* around them in the above list): + +.. code-block:: python + + import grid2op + env_name = ... # for example "l2rpn_case14_sandbox" or "educ_case14_storage" + env = grid2op.make(env_name, test=True) + +.. note:: + + More information about each environment is provided in each of the sub section below + (one sub section per environment) + + +l2rpn_case14_sandbox ++++++++++++++++++++++ + +This dataset uses the IEEE case14 powergrid slightly modified (a few generators have been added). + +It counts 14 substations, 20 lines, 6 generators and 11 loads. It does not count any storage unit. + +We recommend to use this dataset when you want to get familiar with grid2op, with powergrid modeling or RL. It is a +rather small environment where you can understand and actually see what is happening. + +This grid looks like: + +|l2rpn_case14_sandbox_layout| + + +l2rpn_neurips_2020_track1 ++++++++++++++++++++++++++++ + +This environment comes in 3 different "variations" (depending on the number of chronics available): + +- `l2rpn_neurips_2020_track1_small` (900 MB, equivalent of 48 years of powergrid data at 5 mins interval, + so `5 045 760` different steps !) +- `l2rpn_neurips_2020_track1_large` (4.5 GB, equivalent of 240 years of powergrid data at 5 mins interval, + so `25 228 800` different steps.) +- `l2rpn_neurips_2020_track1` (use it for test only, only a few snapshots are available) + +We recommend to create this environment with: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_neurips_2020_track1_small" + env = grid2op.make(env_name) + +It was the environment used as a training set of the neurips 2020 "L2RPN" competition, for the "robustness" track, +see https://competitions.codalab.org/competitions/25426 . + +This environment is part of the IEEE 118 grid, where some generators have been added. It counts 36 substations, 59 +powerlines, 22 generators and 37 loads. The grid is represented in the figure below: + +|l2rpn_neurips_2020_track1_layout| + +One of the specificity of this grid is that it is actually a subset of a bigger grid. Actually, it represents the grid +"circled" in red in the figure below: + +|R2_full_grid| + +This explains why there can be some "negative loads" in this environment. Indeed, this loads represent interconnection +with other part of the original grid (emphasize in green in the figure above). + + +l2rpn_neurips_2020_track2 ++++++++++++++++++++++++++++ + +- `l2rpn_neurips_2020_track2_small` (2.5 GB, split into 5 different sub-environment - each being generated from + slightly different distribution - with 10 years for each sub-environment. This makes, for each sub-environment + `1 051 200` steps, so `5 256 000` different steps in total) +- `l2rpn_neurips_2020_track2_large` (12 GB, again split into 5 different sub-environment. It is 5 times as large + as the "small" one. So it counts `26 280 000` different steps. Each containing all the information of all productions + and all loads. This is a lot of data) +- `l2rpn_neurips_2020_track2` (use it for test only, only a few snapshots are available) + +We recommend to create this environment with: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_neurips_2020_track2_small" + env = grid2op.make(env_name) + +It was the environment used as a training set of the neurips 2020 "L2RPN" competition, for the "robustness" track, +see https://competitions.codalab.org/competitions/25427 . + +This environment is the IEEE 118 grid, where some generators have been added. It counts 118 substations, 186 +powerlines, 62 generators and 99 loads. The grid is represented in the figure below: + +|l2rpn_neurips_2020_track2_layout| + +This grid is, as specified in the previous paragraph, a "super set" of the grid used in the other track. It does not +count any "interconnection" with other types of grid. + +l2rpn_wcci_2020 ++++++++++++++++++++++++++++ + +This environment `l2rpn_wcci_2020` weight 4.5 GB, representing 240 equivalent years of data at 5 mins resolution, so +`25 228 800` different steps. Unfortunately, you can only download the full dataset. + +We recommend to create this environment with: + +.. code-block:: python + + import grid2op + env_name = "l2rpn_wcci_2020" + env = grid2op.make(env_name) + +It was the environment used as a training set of the WII 2020 "L2RPN" competition +see https://competitions.codalab.org/competitions/24902 . + +This environment is part of the IEEE 118 grid, where some generators have been added. It counts 36 substations, 59 +powerlines, 22 generators and 37 loads. The grid is represented in the figure below: + +|l2rpn_neurips_2020_track1_layout| + +.. note:: + + It is an earlier version than the `l2rpn_neurips_2020_track1`. In the `l2rpn_wcci_2020` it is not easy + to identify which loads are "real" loads, and which are "interconnection" for example. + + Also, the names of some elements (substations, loads, lines, or generators) are different. + In the `l2rpn_neurips_2020_track1` the names match the one in `l2rpn_neurips_2020_track2` which is not + the case in `l2rpn_wcci_2020` which make it less obvious that is a subgrid of the IEEE 118. + + +educ_case14_redisp (test only) ++++++++++++++++++++++++++++++++ + +It is the same kind of data as the "l2rpn_case14_sandbox" (see above). It counts simply less data and allows +less different type of actions for easier "access". It do not require to dive deep into grid2op to use this environment. + +We recommend to create this environment with: + +.. code-block:: python + + import grid2op + env_name = "educ_case14_redisp" + env = grid2op.make(env_name, test=True) + + +educ_case14_storage (test only) +++++++++++++++++++++++++++++++++ + +Uses the same type of actions as the grid above ("educ_case14_redisp") but counts 2 storage units. The grid on which +it is based is also the IEEE case 14 but with 2 additional storage unit. + +We recommend to create this environment with: + +.. code-block:: python + + import grid2op + env_name = "educ_case14_storage" + env = grid2op.make(env_name, test=True) + +rte_case5_example (test only) ++++++++++++++++++++++++++++++ + +.. warning:: + + We dont' recommend to create this environment at all, unles you want to perform some specific dedicated tests. + +A custom made environment, totally fictive, not representative of anything, mainly develop for internal tests and +for super easy representation. + +The grid on which it is based has absolutely no "good properties" and is "mainly random" and is not calibrated +to be representative of anything, especially not of a real powergrid. Use at your own risk. + + +other environments (test only) +++++++++++++++++++++++++++++++++ + +Some other test environments are available: + +- "rte_case14_realistic" +- "rte_case14_redisp" +- "rte_case14_test" +- "rte_case118_example" + +.. warning:: + + We don't recommend to create any of these environments at all, + unless you want to perform some specific dedicated tests. + + This is why we don't detail them in this documentation. + + +Miscellaneous +-------------- + +Possible workflow to create an environment from existing chronics +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +In this subsection, we will give an example on how to set up an environment in grid2op if you already +have some data that represents loads and productions at each steps. This paragraph aims at making more concrete +the description of the environment shown previously. + +For this, we suppose that you already have: +- a powergrid in any type of format that represents the grid you have studied. +- some injections data, in any format (csv, mysql, json, etc. etc.) + +The process to make this a grid2op environment is the following: + +1) :ref:`create_folder`: create the folder +2) :ref:`grid_json_ex`: convert the grid file / make sure you have a "backend that can read it" +3) :ref:`chronics_folder_ex`: convert your data / make sure to have a "GridValue" that understands it +4) :ref:`config_py_ex`: create the `config.py` file +5) [optional] :ref:`grid_layout_ex`: generate the `grid_layout.json` +6) [optional] :ref:`prod_charac_ex`: generate the `prod_charac.csv`and `storage_units_charac.csv` if needed +7) :ref:`test_env_ex`: charge the environment and test it +8) [optional] :ref:`calibrate_th_lim_ex`: calibrate the thermal limit and set them in the `config.py` file + +Each task is briefly described in a following paragraph. + +.. _create_folder: + +Creating the folder ++++++++++++++++++++++ +First you need to create the folder that will represent your environment. Just create an empty folder anywhere +on your computer. + +For the sake of the example, we assume here the folder is `EXAMPLE_FOLDER=C:\\Users\\Me\\Documents\\my_grid2op_env`, it +can also be `EXAMPLE_FOLDER=/home/Me/Documents/my_grid2op_env` or +`EXAMPLE_FOLDER=/home/Me/Documents/anything_i_want_really` it does not matter. + +.. _grid_json_ex: + +Generate the "grid.json" file ++++++++++++++++++++++++++++++ + +.. note:: + + The title of this section is "grid.json" for simplicity. We would like to recall that grid2op do not care about the + the format used to represent powergrid. It could be an xml, excel, sql, or any format you want, really. + +We supposed for this section that you add a file representing a grid at your disposal. So it's time to use it. + +From there, there are 3 different situations you can be in: + +1) you have a grid in a given format (for example `json` format) and already have at your disposal a type of grid2op + backend (for example `PandaPowerBackend`) then you don't need to do anything in particular. +2) you have a grid in a given format (for example `.example`) and knows how to convert it to a format for which + you have a backend (typically: `PandapPowerBackend`, that reads pandapower json file). In that case, you convert the + grid and you put the converted grid in the directory and you are good. For converters to pandapower, you can + consult the official pandapower documentation at https://pandapower.readthedocs.io/en/v2.6.0/converter.html . +3) you have a grid in a given format, but don't know how to convert it to a format where you have a backend. In that + case it might require a bit more work (see details below) + +.. note:: + + Case 2 above includes the case where you can convert your file in a format not compatible with default + PandapowerBackend. For example, you could have a grid in sql database, that you know how to convert to a "xml" file + and you already coded "CustomBackend" that is able to work with this xml file. This is totally fine too ! + +In all cases, after you converted your file, name it `grid.something` (for example `grid.json` if your grid is +compatible with pandapowerr backend) into the folder `EXAMPLE_FOLDER` (for example +`C:\\Users\\Me\\Documents\\my_grid2op_env`) + +The rest of this section is only relevant if you are in case 3 above. You can go to the next section +:ref:`chronics_folder_ex` if you are in case 1 or 2 below. + +You have in that two solutions: + +1) if you have lots such "conversion in grid2op env to do" or if you think it makes sense for you simulator to + be used as a grid2op backend outside of your use case, then it's totally worth it to try to create a dedicated + backend class for your powerflow solver. Once done, you can reuse it or even make it available for other to use it. +2) if you are trying to do a "one shot" things the easiest road would be to try to convert your grid into a format + that pandapower is able to understand. Pandpower does understand the Matpower format which is pretty common. You + might check if your grid format is convertible into mapower format, and then convert the matpower format to + pandapower one (for example). The main point is: try to convert the grid to a format that can be processed by + the default grid2op backend. + +.. _chronics_folder_ex: + +Organize the "chronics" folder ++++++++++++++++++++++++++++++++ + +In this step, you are suppose to provide a way for grid2op to set the value of each production and load at each step. + +The first step is then to create a folder named "chronics" in `EXAMPLE_FOLDER` (remember, in our example +`EXAMPLE_FOLDER` was `C:\\Users\\Me\\Documents\\my_grid2op_env`, so you need to create +`C:\\Users\\Me\\Documents\\my_grid2op_env\\chronics`) + +Then you need to fill this `chronics` folder with the data we supposed you had. +You have different ways to achieve this task. + +1) The easiest way, in our opinion, is to convert your data into a format that can be understand by + :class:`grid2op.Chronics.Multifolder` by default (with attribute `gridvalueClass` set to + :class:`grid2op.Chronics.GridStateFromFile`). So inside your "chronics" folder you should have as many folders + as their will be different episode on your dataset. And each "episode" folder should contain the files listed + in the documentation of :class:`grid2op.Chronics.GridStateFromFile` +2) Another way, as always, is to code a class, inheriting from :class:`grid2op.Chronics.GridValue` that is able + to "load" your file and convert it, when asked, into a valid grid2op format. In this case, the main functions + to overload are :func:`grid2op.Chronics.GridValue.initialize` (called at the beginning of a scenario) + and :func:`grid2op.Chronics.GridValue.load_next` call at each "step", each time a new state is generated. + + +.. _config_py_ex: + +Set up the "config.py" file ++++++++++++++++++++++++++++ + +The goal of this file is to define characteristics for your environment. It is here that you glue everything together. +This file will be loaded each time your environment is created. + +This file looks like (example of the "l2rpn_case14_sandbox" one) the one below. Just copy paste it inside your +environment folder `EXAMPLE_FOLDER` (remember, in our example `EXAMPLE_FOLDER` was +`C:\\Users\\Me\\Documents\\my_grid2op_env`). We added some more comment for you to be able to more easily modify it: + +.. code-block:: python + + from grid2op.Action import TopologyAndDispatchAction + from grid2op.Reward import RedispReward + from grid2op.Rules import DefaultRules + from grid2op.Chronics import Multifolder + from grid2op.Chronics import GridStateFromFileWithForecasts + from grid2op.Backend import PandaPowerBackend + + # you need to define this dictionary. + config = { + # type of backend to use, in this example the default PandaPowerBackend + "backend": PandaPowerBackend, + + # type of action that the agent will be allowed to perform + "action_class": TopologyAndDispatchAction, + + # use the default Observation class (CompleteObservation) + "observation_class": None, + "reward_class": RedispReward, # which reward function to use + + # how to use the "parameters" of the environment, we don't recommend to change that + "gamerules_class": DefaultRules, + + # type of chronics, if you used recommended method 1 of the "Organize the "chronics" folder" section + # don't change that. Otherwise, put the name (and its proper import) of the + # class you coded + "chronics_class": Multifolder, + + # this is specific to the "MultiFolder" part. It says that inside each "scenario folder" + # the data are represented as a format that can be understood by the GridStateFromFileWithForecasts + # class. You might need to adapt it depending on the choice you made in "Organize the "chronics" folder" + "grid_value_class": GridStateFromFileWithForecasts, + + # don't change that + "volagecontroler_class": None, + + # this is used to map the names of the elements from the grid to the chronics data. Typically, the "load + # connected to substation 1" might have a different name in the grid file (for example in the grid.json) + # and in the chronics folder (header of the csv if using `GridStateFromFileWithForecasts`) + "names_chronics_to_grid": None + } + + +.. _grid_layout_ex: + +Obtain the "grid_layout.json" +++++++++++++++++++++++++++++++ + +Work in progress. + +You can have a look at this file in one of the provided environments for more information. + +.. _prod_charac_ex: + +Set up the productions and storage characteristics ++++++++++++++++++++++++++++++++++++++++++++++++++++ + +Work in progress. + +Have a look at :func:`grid2op.Backend.Backend.load_redispacthing_data` for productions characteristics and +:func:`grid2op.Backend. Backend.load_storage_data` for storage characteristics. + +.. _test_env_ex: + +Test your environment ++++++++++++++++++++++ + +Once the previous steps have been performed, you can try to load your environment in grid2op. This process +is rather easy, but unfortunately, from our own experience, it might not be successful on the first trial. + +Anyway, assuming you created your environment in `EXAMPLE_FOLDER` (remember, in our example `EXAMPLE_FOLDER` was +`C:\\Users\\Me\\Documents\\my_grid2op_env`) you simply need to do, from a python "console" or a python script: + +.. code-block:: python + + import grid2op + env_folder = "C:\\Users\\Me\\Documents\\my_grid2op_env" # or /home/Me/Documents/my_grid2op_env` + # in all cases it should match the folder you created and we called EXAMPLE_FOLDER + # in all this example + my_custom_env = grid2op.make(env_folder) + + # if it loads, then congrats ! You made your first grid2op environment. + + # you might also need to check things like: + obs = my_custom_env.reset() + + # and + obs, reward, done, info = my_custom_env.step(my_custom_env.action_space()) + + +.. note:: + + We tried our best to display useful error messages if the environment is not loading properly. If you experience + any trouble at this stage, feel free to post a github issue on the official grid2op repository + https://github.com/rte-france/grid2op/issues (you might need to log in on a github account for such purpose) + + +.. _calibrate_th_lim_ex: + +Calibrate the thermal limit ++++++++++++++++++++++++++++ + +One final (but sometimes important) step for you environment to be really useful is the "calibration of the +thermal limits". + +Indeed, the main goal of a grid2op "agent" is to operate the grid "in safety". To that end, you need to specify what +are the "safety criteria". As of writing the main safety criteria are the flows on the powerline (flow in Amps, +"current flow" and not flow in MW). + +To complete your environment, you then need to provide for each powerline, the maximum flow allowed on it. This is +optional in the sense that grid2op will work even if you don't do it. But we still strongly recommend to do it. + +The way you determine the maximum flow on each powerline is not cover by this "tutorial" as it heavily depends on the +problems you are trying to adress and on the data you have at hands. + +Once you have it, you can set it in the "config.py" file. The way you specify it is by setting the +`thermal_limits` key in the `config` dictionary. And this "thermal_limit" is in turn a dictionary, with +the keys being the powerline name, and the value is the associated thermal limit (remember, thermal limit are in A, +not in MW, not in kA). + +The example below suppose that you have a powergrid with powerlines named "0_1_0", "0_2_1", "0_3_2", etc. +And that powergrid named "0_1_0" has a thermal limit of `200. A`, that powerline "0_2_1" has a thermal limit +of `300. A`, powerline named "0_3_2" has a thermal limit of `500 A` etc. -TODO: function to list remote and local env +.. code-block:: python -TODO short description of each of the environments available + from grid2op.Action import TopologyAction + from grid2op.Reward import L2RPNReward + from grid2op.Rules import DefaultRules + from grid2op.Chronics import Multifolder + from grid2op.Chronics import GridStateFromFileWithForecasts + from grid2op.Backend import PandaPowerBackend -TODO: structure of environments and description of the data and mandatory files + config = { + "backend": PandaPowerBackend, + "action_class": TopologyAction, + "observation_class": None, + "reward_class": L2RPNReward, + "gamerules_class": DefaultRules, + "chronics_class": Multifolder, + "grid_value_class": GridStateFromFileWithForecasts, + "volagecontroler_class": None, + # this part is added compared to the previous example showed in sub section "Set up the "config.py" file" + # For each powerline (identified by their name, it gives the thermal limit, in A) + "thermal_limits": {'0_1_0': 200., + '0_2_1': 300., + '0_3_2': 500., + '0_4_3': 600., + '1_2_4': 700., + '2_3_5': 800., + '2_3_6': 900., + '3_4_7': 1000.} + } +Once done, you should be good to go and doing any study you want with grid2op. diff --git a/docs/backend.rst b/docs/backend.rst index 15758e0d1..d4e666861 100644 --- a/docs/backend.rst +++ b/docs/backend.rst @@ -4,6 +4,11 @@ Backend =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- diff --git a/docs/chronics.rst b/docs/chronics.rst index 87b34640e..428852556 100644 --- a/docs/chronics.rst +++ b/docs/chronics.rst @@ -3,6 +3,11 @@ Chronics =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- This module is present to handle everything related to input data that are not structural. diff --git a/docs/converter.rst b/docs/converter.rst index d594e21cb..3f8e5c8c8 100644 --- a/docs/converter.rst +++ b/docs/converter.rst @@ -3,6 +3,11 @@ Converters =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- In this module of grid2op, the "converters" are defined. diff --git a/docs/createbackend.rst b/docs/createbackend.rst index 27cd763d6..99646cfd8 100644 --- a/docs/createbackend.rst +++ b/docs/createbackend.rst @@ -47,6 +47,11 @@ Creating a new backend =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- diff --git a/docs/environment.rst b/docs/environment.rst index f588628b3..98550e9fe 100644 --- a/docs/environment.rst +++ b/docs/environment.rst @@ -4,6 +4,11 @@ Environment =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- This module defines the :class:`Environment` the higher level representation of the world with which an diff --git a/docs/episode.rst b/docs/episode.rst index 6a571dda9..34bc8453e 100644 --- a/docs/episode.rst +++ b/docs/episode.rst @@ -1,5 +1,10 @@ Episode =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- Grid2op defines some special function that help with restoring agent that has run during some episode that has been diff --git a/docs/exception.rst b/docs/exception.rst index 55950fe1b..ac842250f 100644 --- a/docs/exception.rst +++ b/docs/exception.rst @@ -1,5 +1,10 @@ Exception =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- Grid2op defined some specific kind of exception to help debugging programs that lead didn't execute properly. diff --git a/docs/grid2op.rst b/docs/grid2op.rst index ea5e6d21a..af1c9a566 100644 --- a/docs/grid2op.rst +++ b/docs/grid2op.rst @@ -3,6 +3,12 @@ Grid2Op module =================================== + +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + The grid2op module allows to model sequential decision making on a powergrid. It is modular in the sense that it allows to use different powerflow solver (denoted as "Backend"). @@ -144,6 +150,9 @@ This graph has some constraints: 1 above (sum production = sum load + sum losses) will not be met in each of the independant subgraph, most likely. - there exist a solution to the `Kirchoff Circuits Laws` +For more information on this "graph" and the way to retrieve it +in different format, you can consult the page :ref:`gridgraph-module` of the documentation. + The whole grid2op ecosystem aims at modeling the evolution of a "controller" that is able to make sure the "graph of grid", at all time meets all the constraints. diff --git a/docs/grid_graph.rst b/docs/grid_graph.rst new file mode 100644 index 000000000..7638c92d2 --- /dev/null +++ b/docs/grid_graph.rst @@ -0,0 +1,504 @@ + +.. |grid_graph_1| image:: ./img/grid_graph_1.png + :width: 45% + +.. |grid_graph_2| image:: ./img/grid_graph_2.png + :width: 45% + +.. _gridgraph-module: + +A grid, a graph: grid2op representation of the powergrid +=================================================================== + +In this section of the documentation, we will dive a deeper into the "modeling" on which grid2op is based and +especially how the underlying graph of the powergrid is represented and how it can be easily retrieved. + +.. note:: + + This whole page is a work in progress, and any contribution is welcome ! + + +First, we detail some concepts from the power system community in section +:ref:`powersystem-desc-gridgraph`. Then we explain how this graph is coded in grid2op in section +:ref:`graph-encoding-gridgraph`. Finally, we show some code examples on how to retrieve this graph in +section :ref:`get-the-graph-gridgraph`. + + +.. contents:: Table of Contents + :depth: 3 + +.. _powersystem-desc-gridgraph: + +Description of a powergrid adopting the "graph" representation +---------------------------------------------------------------- + +A powergrid can be represented as a "graph" (in the mathematical meaning) where: + +- nodes / vertices are represented by "buses" (or "busbars"): that is the place that different elements of the grid + are interconnected +- links / edges are represented by "powerlines". + +Nodes attributes +~~~~~~~~~~~~~~~~~~ + +The nodes of this graph have attributes: + +- they have "active power" injected at them. Adopting the "generator" convention, if power injected is positive then some + power is produced at this node, otherwise power is consumed. This active power is the sum of all power produced / + consumed for every load, generator, storage units (and optionally shunts) that are connected at this nodes. + In grid2op (to be consistent with the notations in power system literature) active power is noted "`p`" +- they have "reactive power" injected at them. This "reactive power" is the similar to the active power. Reactive + power (out of consistency with power system literature) is noted "`q`". +- they have a "voltage magnitude" which is more commonly known as "voltage" in "every day" use. As in the power system + literature, this "voltage magnitude" is noted "`v`". **NB** For reader mainly familiar with the power system + notations, "`v`" is a real number here, it is not the "complex voltage" but the voltage magnitude (module of + the complex voltage) +- they have a "voltage angle" which is the "angle" of the "complex angle" denoted above and is denoted + "`theta`" and is given in degree (and not in radian!). + **NB** Depending on the solver that you are using, this might not be available. +- "sub_id": the id of the substation to which this bus belongs. +- "cooldown": if 0 it means you can split or merge this nodes with other nodes at the same substation, otherwise it + gives the number of steps you need to wait before being able to split / merge it. + +Edges attributes +~~~~~~~~~~~~~~~~~~ + +The edges of this graph have attributes: + +- "status": a powerline can be either connected (at both sides) or disconnected. Grid2op does not support, at the moment + a powerline connected at only one side. +- "thermal_limit": the maximum current (measured in amps) that can flow on the powerline +- "timestep_overflow": the number of steps the powerlines sees a current higher that the maximum flows. It is reset + to 0 each time the flow falls bellow the thermal limit. +- "cooldown": same concept as the substations, but for powerline. You cannot change its status as frequently as you + want. +- "rho": which is the relative flow. It is defined as the flow in amps (by default flows are measured on the origin side + divided by the thermal limit) +- "p_or": the active flow at the origin side of the powerline +- "p_ex": the active flow at the extremity side of the powerline +- "q_or": the reactive flow at the origin side of the powerline +- "q_ex": the reactive flow at the extremity side of the powerline +- "a_or": the current flow at the origin side of the powerline +- "a_ex": the current flow at the extremity side of the powerline +- "v_or": (optional) the voltage magnitude at the origin side of the powerline +- "V_ex": (optional) the voltage magnitude at the extremity side of the powerline +- "theta_or": (optional) the voltage angle at the origin side of the powerline +- "theta_ex": (optional) the voltage angle at the extremity side of the powerline + +**NB** A convention needs to be chosen (without loss of generality) for the orientation of the powerlines. When +adopting the "graph representation" powerlines are oriented in this manner: if a powerline is connected at substation +`j` on one side and at substation `k` on the other and `j < k`, then its origin side will be on the `j` side +and its extremity side will be attached to `k`. To make it clear: if a powerline connects substation 12 to 14, then +its origin side will be connected to 12 and it's origin side to 14. + +Some clarifications +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Redundant attributes +++++++++++++++++++++++ + +Some of these variables are redundant, for example: + +- if a powerline connects bus `j` with bus `k` (with `j < k`) then `v_or = nodes[j]["v"]`, `theta_or = nodes[j]["v"]`, + same for the extremity side: `v_ex = nodes[k]["v"]`, `theta_ex = nodes[k]["v"]` +- if two powerlines are connected at the same bus. For example: + + - the powerline id `l_1` is connected (origin side) at the + bus `j` and the powerline `l_2` is connected (origin side) at this same bus `j` then `v_or (l_1) = v_or (l_2)` and + `theta_or (l_1) = theta_or (l_2)` + - if `l_1` is connected on the extremity side at bus `k` and line `l_3` is connected at its origin side + at the same bus `k` then `v_ex (l_1) = v_or (l_2)` and + `theta_ex (l_1) = theta_or (l_2)` + - of course a similar relation holds if `l_1` and `l_2` are both connected at the extremity side on the + same bus. + +Physical equations ++++++++++++++++++++++ +All these variables are not independant from one another regardless of the "power system modeling" adopted. + +Generally speaking: + +- if you know "v" and "theta" at both sides of the powerline, you can deduce all the "p_or", "q_or", + "v_or", "theta_or", "p_ex", "q_ex", "v_ex" and "theta_ex" +- at each bus, the sum of all the "p_or" and "p_ex" and "p" is equal to 0. +- at each bus, the sum of all the "q_or" and "q_ex" and "q" is equal to 0. + +Actually, it is by solving these constraints that everything is computed. + +.. note:: + + Grid2op itself does not compute anything. The task of computing a given consistent state (from a power system point + of view) is carried out by the `Backend` and only by it. + + This means that nowhere, in grid2op code, you will find anything related to how these variables are linked to + one another. + + This modularity is really important, because lots of sets of equations can represent a powergrid depending on + the problem studied. Grid2op does not assume anything regarding the set of equations implemented in the backend. + + Actually, the same grid can be modeled with different sets of equations, leading to different results (in terms + of flows notably). This is perfectly possible in grid2op: only the backend needs to be changed. All the rest + can stay the same (*e.g.,* the agent does not need to be changed at all). + + +The grid is immutable ++++++++++++++++++++++++ +Grid2op aims at modeling grid2op operation close to real time. + +This is why the powergrid is considered as "fixed" or "immutable". For example, if for a given environment load +with id `j` is connected to substation with id `k` then for all the episodes on this environment, this will +be the case (load cannot be magically connected to another substation). + +This is a property of power system: a load representing a city, in real time, it is not possible to move completly +a city from one place of a state to another. And even if it was possible, it is definitely not desirable. + +This applies to all elements of the grid. For the same environment: + +- load will always be connected at the same substation +- generator will always be connected at the same substation +- storage units will always be connected at the same substation +- powerlines will always connects the same substations, and in this case, grid2op also offer the guarantee that + origin side will always be connected at the same substation AND extremity side will always be connected at + the same substation. In other words, the orientation convention adopted to define "origin side" and + "extremity side" is part of the environment. + +.. warning:: + + If you decide to code a new Backend class, then you need to meet this property. Otherwise things might break. + +Not everything can be connected together ++++++++++++++++++++++++++++++++++++++++++++ +There are also constraints on what can be done, and what cannot. + +For example, it is not possible to connect directly (without powerline) a city in the North East of a State to a +production unit at the South West of the same sate. + +To adopt a more precise vocabulary, only elements (load, generator, storage unit, origin side of a powerline, +extremity side of a powerline) that are at the same substation can be directly connected together. + +If an element of the grid is connected at substation `j` and another one at substation `k` for a given environment, +in absolutely no circumstances these two objects can be directly connected together, they will never be connected at +the same bus. + +To state this constraint a bit differently, a substation can be split in independent buses (multiple nodes +at the same substation) but a nodes can only connect elements of the same substation. + +.. note:: + + For simplicity (in the problem exposed) grid2op does not allow to have more than 2 independent buses at a + given substation at time of writing (April 2021). + + Changing this would not be too difficult on grid2op side, but would make the action space even bigger. If you + really need to use more than 2 buses at the same substation, do not hesitate to fill a feature request. + +The graph is dynamic +++++++++++++++++++++++ +Even though an element is always, under all circumstances, connected at the same substation, it can be connected +at different buses (of this substation) at different step. This is even the main "flexibility" that is studied +with grid2op. + +This implies that "the" graph representing the powergrid does not always have the same number of nodes depending +on the time, nor the same number of edges, for example if powerlines are disconnected. + +.. note:: + + For real powergrid, this is possible to perform such changes in real time without the need to install new + infrastructure. This is because substations are full of "breakers" and other "switches" that can be opened or + closed. + + Again, for the sake of simplicity, breakers / switches are not directly modeled in grid2op. An agent / + environment only needs to know what is connected to what without giving the details on how its done. + + Manipulating breakers / switches is not an easy task and not everything can be done every time in real + powergrid. The removal of breaker is another simplification made for clarity. + + If you want to model these, it is perfectly possible without too much trouble. You can fill a feature request + for this if that is interesting to you. + +.. note:: + + The graph of the grid has also the property that more than one edge can connect the same pair of buses (it is + the case for parallel powerlines) + +Wrapping it up +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The "sequential decision making" modeled by the grid2op aims then at finding good actions to keep the grid in safety +(flows <= thermal limits on all powerlines) while allowing consumer to consume as much power as they want. + +The possible actions are: + +- connecting / disconnecting powerlines +- changing the "topology" of some substations, which consists in merging or splitting buses at some substations +- changing the amount of power some storage units produce / absorb +- changing the production setpoint of the generators (aka redispatching or curtailment) + +For more information, technical details are provided in page :ref:`action-module` + +.. _graph-encoding-gridgraph: + +How the graph of the grid is encoded in grid2op +---------------------------------------------------------------- + +In computer science, there are lots of way to represent a graph structure, each solution having advantages and drawbacks. +In grid2op, a drastic choice have been made: the graph of the grid will not be explicitly represented (but it can +be computed on request, see section :ref:`get-the-graph-gridgraph`). + +Instead, the "graph" of the grid is stored in different vectors: + +One set of vectors (fixed, immutable) gives to which substation each element is connected, they are +the `env.load_to_subid`, `env.gen_to_subid`, `env.line_or_to_subid`, `env.line_ex_to_subid` or `env.storage_to_subid` +vectors. + +As an example, `env.load_to_subid` is vector that has as many components as there are loads on the grid, +and for each load, it gives the id of the substation to which it is connected. More concretely, if +`env.load_to_subid[load_id] = sub_id` it means that the load of id `load_id` is connected (and will always be!) at +the substation of id `sub_id` + + +Now, remember (from the previous section) that each object can either be connected on busbar 1 or on busbar 2. +To know the completely graph of the grid, you simply need to know if the element is connected or not, and if +it's connected, whether it is connected to bus 1 or bus 2. + +This is exactly how it is represented in grid2op. All objects are assigned (by the Backend, again, this +is immutable and will always be the same for a given environment) to a position. + +These positions are given by the `env.load_pos_topo_vect`, `env.gen_pos_topo_vect`, `env.line_or_pos_topo_vect`, +`env.line_ex_pos_topo_vect` or `env.storage_pos_topo_vect` +(see :attr:`grid2op.Space.GridObjects.load_pos_topo_vect` for more information). + +And then, in the observation, you can retrieve the state of each object in the "topo_vect" vector: `obs.topo_vect` +(see :attr:`grid2op.Observation.BaseObservation.topo_vect` for more information). + +As an exemple, say `obs.topo_vect[42] = 2` it means that the "42nd" element (remember in python index are 0 based, +this is why i put quote on "42nd", this is actually the 43rd... but writing 43rd is more confusing, so we will +stick to "42nd") of the grid is connected to bus 2. + +To know what element of the grid is the "42nd", you can: + +1) look at the `env.load_pos_topo_vect`, `env.gen_pos_topo_vect`, `env.line_or_pos_topo_vect`, + `env.line_ex_pos_topo_vect` or `env.storage_pos_topo_vect` and find where there is a "42" there. For example if + `env.line_ex_pos_topo_vect[line_id] = 42` then you know for sure that the "42nd" element of the grid is, in that + case the extremity side of powerline `line_id`. +2) look at the table :attr:`grid2op.Space.GridObjects.grid_objects_types` and especially the line 42 so + `env.grid_objects_types[42,:]` which contains this information as well. Each column of this table encodes + for one type of element (first column is substation, second is load, then generator, then origin end of + powerline then extremity end of powerline and finally storage unit. Each will have "-1" if the element + is not of that type, and otherwise and id > 0. Taking the same example as for the above bullet point! + `env.grid_objects_types[42,:] = [sub_id, -1, -1, -1, line_id, -1]` meaning the "42nd" element of the grid + if the extremity end (because it's the 5th column) of id `line_id` (the other element being marked as "-1"). + +.. _get-the-graph-gridgraph: + +How to retrieve "the" graph in grid2op +---------------------------------------------------------------- + +As of now, we only presented a single graph that could represent the powergrid. This was to simplify the language. In +fact the graph of the grid can be represented in different manners. Some of them will detailed in this section. + +A summary of the types of graph that can be used to (sometimes partially) represent a powergrid is: + +======================== ================ ===================================================================== +Type of graph described in grid2op method +======================== ================ ===================================================================== +"normal graph" :ref:`graph1-gg` :func:`grid2op.Observation.BaseObservation.as_networkx` +"connectivity graph" :ref:`graph2-gg` :func:`grid2op.Observation.BaseObservation.connectivity_matrix` +"bus connectivity graph" :ref:`graph3-gg` :func:`grid2op.Observation.BaseObservation.bus_connectivity_matrix` +"flow bus graph" :ref:`graph4-gg` :func:`grid2op.Observation.BaseObservation.flow_bus_matrix` +======================== ================ ===================================================================== + +.. note:: + + None of the name of the graph are standard... It's unlikely that searching for "flow bus graph" on google + will lead to interesting results. Sorry about that. + + We are, however, really interested in having better names there. So if you have some, don't hesitate to + write an issue on the official grid2op github. + +And their respective properties: + +======================== ================ ======================== ===================== +Type of graph always same size encode all observation has flow information +======================== ================ ======================== ===================== +"normal graph" no yes yes +"connectivity graph" yes no no +"bus connectivity graph" no no no +"flow bus graph" no no yes +======================== ================ ======================== ===================== + +.. _graph1-gg: + +Graph1: the "normal graph" +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Because we don't really find fancy name for it, let's call it the "normal" graph of the grid. This graph, you +might have guessed is the graph defined at the very top of this page of the documentation. + +Each edge is a powerline and each node is a "bus" (remember: by definition two objects are directly +connected together if they are connected at the same "bus"). + +This graph can be retrieved using the `obs.as_networkx()` command that returns a networkx graph for the entire +observation, that has all the attributes described in :ref:`powersystem-desc-gridgraph`. + +You can retrieve it with: + +.. code-block:: python + + import grid2op + env_name = ... # for example "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + + # retrieve the state, as a grid2op observation + obs = env.reset() + # the same state, as a graph + state_as_graph = obs.as_networkx() + + # attributes of node 0 + print(state_as_graph.nodes[0]) + + # print the "first" edge + first_edge = next(iter(state_as_graph.edges)) + print(state_as_graph.edges[first_edge]) + +.. note:: + + The main difference with the "graph of the grid" showed in the first section is that it is a "simple + graph" (as opposed to "multi graph"): two parallel edges are merged together. + +This graph varies in size: the number of nodes on this graph is the number of bus on the grid ! + +Effect of an action on this graph +---------------------------------------------------------------- + +Now, let's do a topological action on this graph, and print the results: + +.. code-block:: python + + import grid2op + import networkx + import matplotlib.pyplot as plt + + env_name = "l2rpn_case14_sandbox" + env = grid2op.make(env_name) + col_no_change = "#c4e1f5" + col_change = "#1f78b4" + line_no_change = "#383636" + line_1 = "#e66363" + line_2 = "#63e6b6" + sub_id = 4 + + # retrieve the state, as a grid2op observation + obs = env.reset() + # the same state, as a graph + state_as_graph = obs.as_networkx() + + ############################################################################## + # now plot this first graph (use some pretty color and layout...) + fig, ax = plt.subplots(1, 1, figsize=(5, 7.5)) + # ax = axs[0] + node_color = [col_no_change for _ in range(env.n_sub)] + node_color[sub_id] = col_change + edge_color = [line_no_change for _ in range(env.n_line)] + edge_color[1] = line_1 + edge_color[9] = line_1 + edge_color[6] = line_2 + edge_color[4] = line_2 + networkx.draw(state_as_graph, + pos={i: env.grid_layout[env.name_sub[i]] for i in range(env.n_sub)}, + node_color=node_color, + edge_color=edge_color, + labels={i: state_as_graph.nodes[i]["sub_id"] for i in range(env.n_sub)}, + ax=ax) + plt.tight_layout() + plt.show() + ############################################################################## + + # perform an action to split substation 4 on two buses + action = env.action_space({"set_bus": {"substations_id": [(sub_id, [1, 2, 2, 1, 1])]}}) + new_obs, *_ = env.step(action) + new_graph = new_obs.as_networkx() + + ############################################################################## + # now pretty plot it + fig, ax = plt.subplots(1, 1, figsize=(5, 7.5)) + # ax = axs[0] + dict_pos = {i: env.grid_layout[env.name_sub[i]] for i in range(env.n_sub)} + dict_pos[sub_id] = env.grid_layout[env.name_sub[4]][0] - 30, env.grid_layout[env.name_sub[4]][1] + dict_pos[env.n_sub] = env.grid_layout[env.name_sub[4]][0] + 30, env.grid_layout[env.name_sub[4]][1] + node_color = [col_no_change for _ in range(env.n_sub + 1)] + node_color[sub_id] = col_change + node_color[env.n_sub] = col_change + + edge_color = [line_no_change for _ in range(env.n_line)] + edge_color[1] = line_1 + edge_color[9] = line_1 + edge_color[8] = line_2 + edge_color[4] = line_2 + circle1 = plt.Circle(env.grid_layout[env.name_sub[4]], 50, color=col_no_change, alpha=0.7) + ax.add_patch(circle1) + networkx.draw(new_graph, + pos=dict_pos, + node_color=node_color, + edge_color=edge_color, + labels={i: new_graph.nodes[i]["sub_id"] for i in range(env.n_sub+1)}, + ax=ax) + plt.tight_layout() + plt.show() + ############################################################################## + + +And this gives: + +|grid_graph_1| |grid_graph_2| + +As you see, this action have for effect to split the substation 4 on 2 independent buses (one where there are +the two red powerline, another where there are the two green) + +.. note:: + + On this example, for this visualization, lots of elements of the grid are not displayed. This is the case + for the load, generator and storage units for example. + + For an easier to read (and to get! ) representation, feel free to consult the :ref:`grid2op-plot-module` + +.. _graph2-gg: + +Graph2: the "connectivity graph" +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +TODO: Work in progress, any help welcome + +In the mean time, some documentation are available at :func:`grid2op.Observation.BaseObservation.connectivity_matrix` + +.. note:: + + This graph is not represented as a networkx graph, but rather as a symmetrical (sparse) matrix. + + It has no informations about the flows. It is a simple graph that indicates whether or not two objects + are on the same bus or not. + +.. _graph3-gg: + +Graph3: the "bus connectivity graph" +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +TODO: Work in progress, any help welcome + +In the mean time, some documentation are available at :func:`grid2op.Observation.BaseObservation.bus_connectivity_matrix` + +.. note:: + + This graph is not represented as a networkx graph, but rather as a symmetrical (sparse) matrix. + + It has no information about flows, but simply about the presence / abscence of powerlines between two buses. + +.. _graph4-gg: + +Graph4: the "flow bus graph" +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +TODO: Work in progress, any help welcome + +In the mean time, some documentation are available at :func:`grid2op.Observation.BaseObservation.flow_bus_matrix` + +.. note:: + + This graph is not represented as a networkx graph, but rather as a (sparse) matrix. + +.. include:: final.rst \ No newline at end of file diff --git a/docs/gym.rst b/docs/gym.rst index a353633df..6e1c54cbb 100644 --- a/docs/gym.rst +++ b/docs/gym.rst @@ -54,6 +54,11 @@ A simple usage is: For more customization on that side, please refer to the section :ref:`gym_compat_box_discrete` below +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Observation space and action space customization ------------------------------------------------- By default, the action space and observation space are `gym.spaces.Dict` with the keys being the attribute diff --git a/docs/img/R2_full_grid.png b/docs/img/R2_full_grid.png new file mode 100644 index 000000000..6686074ec Binary files /dev/null and b/docs/img/R2_full_grid.png differ diff --git a/docs/img/grid_graph_1.png b/docs/img/grid_graph_1.png new file mode 100644 index 000000000..038f2bdca Binary files /dev/null and b/docs/img/grid_graph_1.png differ diff --git a/docs/img/grid_graph_2.png b/docs/img/grid_graph_2.png new file mode 100644 index 000000000..3621a2755 Binary files /dev/null and b/docs/img/grid_graph_2.png differ diff --git a/docs/img/l2rpn_case14_sandbox_layout.png b/docs/img/l2rpn_case14_sandbox_layout.png new file mode 100644 index 000000000..b3d5b024d Binary files /dev/null and b/docs/img/l2rpn_case14_sandbox_layout.png differ diff --git a/docs/img/l2rpn_neurips_2020_track1_layout.png b/docs/img/l2rpn_neurips_2020_track1_layout.png new file mode 100644 index 000000000..108951126 Binary files /dev/null and b/docs/img/l2rpn_neurips_2020_track1_layout.png differ diff --git a/docs/img/l2rpn_neurips_2020_track2_layout.png b/docs/img/l2rpn_neurips_2020_track2_layout.png new file mode 100644 index 000000000..35140182a Binary files /dev/null and b/docs/img/l2rpn_neurips_2020_track2_layout.png differ diff --git a/docs/img/random_agent.gif b/docs/img/random_agent.gif index 082e7e93c..0cb4b4a49 100644 Binary files a/docs/img/random_agent.gif and b/docs/img/random_agent.gif differ diff --git a/docs/index.rst b/docs/index.rst index 3b2379429..828726012 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -66,6 +66,7 @@ Main module content available_envs modeled_elements gym + grid_graph Plotting capabilities ---------------------- diff --git a/docs/makeenv.rst b/docs/makeenv.rst index af56f0b51..680e4859a 100644 --- a/docs/makeenv.rst +++ b/docs/makeenv.rst @@ -4,6 +4,11 @@ Make: Using pre defined Environments ==================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- The function define in this module is the easiest and most convenient ways to create a valid diff --git a/docs/modeled_elements.rst b/docs/modeled_elements.rst index 590ad5d04..ed095a806 100644 --- a/docs/modeled_elements.rst +++ b/docs/modeled_elements.rst @@ -49,6 +49,11 @@ Each type of elements will be described in the same way: to alter them. - `Equations satisfied` explains the "constraint" of all of the above +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + .. _generator-mod-el: Generators diff --git a/docs/observation.rst b/docs/observation.rst index f61ef9e1c..8c9be4bdb 100644 --- a/docs/observation.rst +++ b/docs/observation.rst @@ -47,6 +47,11 @@ Observation =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- @@ -140,11 +145,14 @@ this graph: split some bus in sub buses by changing at which busbar some element some edges from this graph when powerlines are connected / disconnected. An important feature of this graph is that its size changes: it can have a different number of nodes at different steps! -TODO add some images, and explain these graphs! +Some methods allow to retrieve these graphs, for example: - :func:`grid2op.Observation.BaseObservation.connectivity_matrix` - :func:`grid2op.Observation.BaseObservation.flow_bus_matrix` + +For more information, you can consult the :ref:`gridgraph-module` page. + Detailed Documentation by class -------------------------------- .. automodule:: grid2op.Observation diff --git a/docs/opponent.rst b/docs/opponent.rst index 1179e2604..5f53a68ff 100644 --- a/docs/opponent.rst +++ b/docs/opponent.rst @@ -3,6 +3,11 @@ Opponent Modeling =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- Power systems are a really important tool today, that can be as resilient as possible to avoid possibly dramatic @@ -20,10 +25,143 @@ The class :class:`OpponentSpace` has the delicate role to: - compute the cost of such attack - make sure this cost is not too high for the opponent budget. +How to create an opponent in any environment +--------------------------------------------- + +This section is a work in progress, it will only cover how to set up one type of opponent, and supposes +that you already know which lines you want to attack, at which frequency etc. + +More detailed information about the opponent will be provide in the future. + +The set up for the opponent in the "l2rpn_neurips_track1" has the following configuration. + +.. code-block:: python + + lines_attacked = ["62_58_180", "62_63_160", "48_50_136", "48_53_141", "41_48_131", "39_41_121", + "43_44_125", "44_45_126", "34_35_110", "54_58_154"] + rho_normalization = [0.45, 0.45, 0.6, 0.35, 0.3, 0.2, + 0.55, 0.3, 0.45, 0.55] + opponent_attack_cooldown = 12*24 # 24 hours, 1 hour being 12 time steps + opponent_attack_duration = 12*4 # 4 hours + opponent_budget_per_ts = 0.16667 # opponent_attack_duration / opponent_attack_cooldown + epsilon + opponent_init_budget = 144. # no need to attack straightfully, it can attack starting at midday the first day + config = { + "opponent_attack_cooldown": opponent_attack_cooldown, + "opponent_attack_duration": opponent_attack_duration, + "opponent_budget_per_ts": opponent_budget_per_ts, + "opponent_init_budget": opponent_init_budget, + "opponent_action_class": PowerlineSetAction, + "opponent_class": WeightedRandomOpponent, + "opponent_budget_class": BaseActionBudget, + 'kwargs_opponent': {"lines_attacked": lines_attacked, + "rho_normalization": rho_normalization, + "attack_period": opponent_attack_cooldown} + } + +To create the same type of opponent on the **case14** grid you can do: + +.. code-block:: python + + import grid2op + from grid2op.Action import PowerlineSetAction + from grid2op.Opponent import RandomLineOpponent, BaseActionBudget + env_name = "l2rpn_case14_sandbox" + + env_with_opponent = grid2op.make(env_name, + opponent_attack_cooldown=12*24, + opponent_attack_duration=12*4, + opponent_budget_per_ts=0.5, + opponent_init_budget=0., + opponent_action_class=PowerlineSetAction, + opponent_class=RandomLineOpponent, + opponent_budget_class=BaseActionBudget, + kwargs_opponent={"lines_attacked": + ["1_3_3", "1_4_4", "3_6_15", "9_10_12", "11_12_13", "12_13_14"]} + ) + # and now you have an opponent on the l2rpn_case14_sandbox + # you can for example + obs = env_with_opponent.reset() + + act = ... # chose an action here + obs, reward, done, info = env_with_opponent.step(act) + + +And for the track2 of neurips, if you want to make it even more complicated, you can add an opponent +in the same fashion: + +.. code-block:: python + + import grid2op + from grid2op.Action import PowerlineSetAction + from grid2op.Opponent import RandomLineOpponent, BaseActionBudget + env_name = "l2rpn_neurips_2020_track2_small" + + env_with_opponent = grid2op.make(env_name, + opponent_attack_cooldown=12*24, + opponent_attack_duration=12*4, + opponent_budget_per_ts=0.5, + opponent_init_budget=0., + opponent_action_class=PowerlineSetAction, + opponent_class=RandomLineOpponent, + opponent_budget_class=BaseActionBudget, + kwargs_opponent={"lines_attacked": + ["26_31_106", + "21_22_93", + "17_18_88", + "4_10_162", + "12_14_68", + "14_32_108", + "62_58_180", + "62_63_160", + "48_50_136", + "48_53_141", + "41_48_131", + "39_41_121", + "43_44_125", + "44_45_126", + "34_35_110", + "54_58_154", + "74_117_81", + "80_79_175", + "93_95_43", + "88_91_33", + "91_92_37", + "99_105_62", + "102_104_61"]} + ) + # and now you have an opponent on the l2rpn_case14_sandbox + # you can for example + obs = env_with_opponent.reset() + + act = ... # chose an action here + obs, reward, done, info = env_with_opponent.step(act) + +To summarize what is going on here: + +- `opponent_attack_cooldown`: give the minimum number of time between two attacks (here 1 attack per day) +- `opponent_attack_duration`: duration for each attack (when a line is attacked, it will not be possible to reconnect + it for that many steps). In the example it's 4h (so 48 steps) +- `opponent_action_class`: type of the action the opponent will perform (in this case `PowerlineSetAction`) +- `opponent_class`: type of the opponent. Change it at your own risk. +- `opponent_budget_class`: Each attack will cost some budget to the opponent. If no budget, the opponent cannot + attack. This specifies how the budget are computed. Do not change it. +- `opponent_budget_per_ts`: increase of the budget of the opponent per step. The higher this number, the faster the + the opponent will regenerate its budget. +- `opponent_init_budget`: initial opponent budget. It is set to 0 to "give" the agent a bit of time before the opponent + is triggered. +- `kwargs_opponent`: additional information for the opponent. In this case we provide for each grid the powerline it + can attack. + +.. note:: + + This is only valid for the `RandomLineOpponent` that disconnect powerlines randomly (but not uniformly!). For other + type of Opponent, we don't provide any information in the documentation at this stage. Feel free to submit + a github issue if this is an issue for you. Detailed Documentation by class -------------------------------- .. automodule:: grid2op.Opponent - :members: + :members: + :autosummary: .. include:: final.rst \ No newline at end of file diff --git a/docs/plot.rst b/docs/plot.rst index 6da8c3088..47b38ea44 100644 --- a/docs/plot.rst +++ b/docs/plot.rst @@ -4,10 +4,16 @@ .. |14bus_2| image:: ./img/14bus_2.png .. |14bus_th_lim| image:: ./img/14bus_th_lim.png +.. _grid2op-plot-module: Grid2Op Plotting capabilities (beta) ===================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- This module contrains all the plotting utilities of grid2op. These utilities can be used in different manners to serve diff --git a/docs/quickstart.rst b/docs/quickstart.rst index 8dfa82afb..f8ee8d4f6 100644 --- a/docs/quickstart.rst +++ b/docs/quickstart.rst @@ -1,6 +1,11 @@ Getting started =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + In this chapter we present how to install grid2op. ############ diff --git a/docs/reward.rst b/docs/reward.rst index af988efac..555988adf 100644 --- a/docs/reward.rst +++ b/docs/reward.rst @@ -3,6 +3,11 @@ Reward =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- This module implements some utilities to get rewards given an :class:`grid2op.Action` an :class:`grid2op.Environment` diff --git a/docs/rules.rst b/docs/rules.rst index b2f207506..db2be160e 100644 --- a/docs/rules.rst +++ b/docs/rules.rst @@ -3,6 +3,11 @@ Rules of the Game =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- The Rules module define what is "Legal" and what is not. For example, it can be usefull, at the beginning of the diff --git a/docs/runner.rst b/docs/runner.rst index 76c3ddf2f..e6c6bc80b 100644 --- a/docs/runner.rst +++ b/docs/runner.rst @@ -3,6 +3,11 @@ Runner =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- The runner class aims at: diff --git a/docs/space.rst b/docs/space.rst index 9d908eb63..aa85d0a49 100644 --- a/docs/space.rst +++ b/docs/space.rst @@ -3,6 +3,11 @@ Space =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- This module exposes the action space definition, the observation space definition (both depend on the underlying diff --git a/docs/utils.rst b/docs/utils.rst index 152bf8b31..fde3a084a 100644 --- a/docs/utils.rst +++ b/docs/utils.rst @@ -3,6 +3,11 @@ Utility classes =================================== +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- This module exposes some utility classes that can be used for example to compute run and store some information about diff --git a/docs/voltagecontroler.rst b/docs/voltagecontroler.rst index 1cf908423..eb7b902f3 100644 --- a/docs/voltagecontroler.rst +++ b/docs/voltagecontroler.rst @@ -2,6 +2,12 @@ Voltage Controler =================================== + +This page is organized as follow: + +.. contents:: Table of Contents + :depth: 3 + Objectives ----------- Powergrid are really complex objects. One of this complexity comes from the management of the voltages. diff --git a/grid2op/Backend/Backend.py b/grid2op/Backend/Backend.py index 5f1ae084b..45a790ef3 100644 --- a/grid2op/Backend/Backend.py +++ b/grid2op/Backend/Backend.py @@ -8,6 +8,7 @@ import copy import os +import sys import warnings import json @@ -135,6 +136,7 @@ def __init__(self, detailed_infos_for_cascading_failures=False): # if this information is not present, then "get_action_to_set" might not behave correctly self.comp_time = 0. + self.can_output_theta = False @abstractmethod def load_grid(self, path, filename=None): @@ -663,17 +665,40 @@ def shunt_info(self): Returns ------- - shunt_p ``numpy.ndarray`` + shunt_p: ``numpy.ndarray`` For each shunt, the active power it withdraw at the bus to which it is connected. - shunt_q ``numpy.ndarray`` + shunt_q: ``numpy.ndarray`` For each shunt, the reactive power it withdraw at the bus to which it is connected. - shunt_v ``numpy.ndarray`` + shunt_v: ``numpy.ndarray`` For each shunt, the voltage magnitude of the bus to which it is connected. - shunt_bus ``numpy.ndarray`` + shunt_bus: ``numpy.ndarray`` For each shunt, the bus id to which it is connected. """ return [], [], [], [] + def get_theta(self): + """ + + Notes + ----- + Don't forget to set the flag :attr:`Backend.can_output_theta` to ``True`` in the + :func:`Bakcend.load_grid` if you support this feature. + + Returns + ------- + line_or_theta: ``numpy.ndarray`` + For each origin side of powerline, gives the voltage angle + line_ex_theta: ``numpy.ndarray`` + For each extremity side of powerline, gives the voltage angle + load_theta: ``numpy.ndarray`` + Gives the voltage angle to the bus at which each load is connected + gen_theta: ``numpy.ndarray`` + Gives the voltage angle to the bus at which each generator is connected + storage_theta: ``numpy.ndarray`` + Gives the voltage angle to the bus at which each storage unit is connected + """ + raise NotImplementedError("Your backend does not support the retrieval of the voltage angle theta.") + def sub_from_bus_id(self, bus_id): """ INTERNAL @@ -1365,6 +1390,7 @@ def load_grid_layout(self, path, name='grid_layout.json'): "".format(el, e_)) self.attach_layout(grid_layout=new_grid_layout) + return None def _aux_get_line_status_to_set(self, line_status): line_status = 2 * line_status - 1 @@ -1464,15 +1490,28 @@ def assert_grid_correct(self): This is done as it should be by the Environment """ - # and set up the proper class and everything - # lazy loading from grid2op.Action import CompleteAction from grid2op.Action._BackendAction import _BackendAction - self.__class__ = self.init_grid(self) - self.my_bk_act_class = _BackendAction.init_grid(self) - self._complete_action_class = CompleteAction.init_grid(self) - super().assert_grid_correct() + + orig_type = type(self) + if orig_type.my_bk_act_class is None: + # class is already initialized + # and set up the proper class and everything + self._init_class_attr() + # hack due to changing class of imported module in the module itself + self.__class__ = type(self).init_grid(type(self), force_module=type(self).__module__) + setattr(sys.modules[type(self).__module__], self.__class__.__name__, self.__class__) + + # reset the attribute of the grid2op.Backend.Backend class + # that can be messed up with depending on the initialization of the backend + Backend._clear_class_attribute() + orig_type._clear_class_attribute() + + my_cls = type(self) + my_cls.my_bk_act_class = _BackendAction.init_grid(my_cls) + my_cls._complete_action_class = CompleteAction.init_grid(my_cls) + my_cls.assert_grid_correct_cls() def assert_grid_correct_after_powerflow(self): """ diff --git a/grid2op/Backend/EducPandaPowerBackend.py b/grid2op/Backend/EducPandaPowerBackend.py index 6fcf9fb0e..1cbbd9586 100644 --- a/grid2op/Backend/EducPandaPowerBackend.py +++ b/grid2op/Backend/EducPandaPowerBackend.py @@ -178,8 +178,8 @@ def load_grid(self, path=None, filename=None): copy.deepcopy(self._grid.trafo["lv_bus"]) )) - # and now we don't forget to inialize the rest - self._compute_pos_big_topo() + # and now we don't forget to initialize the rest + self._compute_pos_big_topo() # we highly recommend you to call this ! # and now the thermal limit self.thermal_limit_a = 1000 * np.concatenate((self._grid.line["max_i_ka"].values, diff --git a/grid2op/Backend/PandaPowerBackend.py b/grid2op/Backend/PandaPowerBackend.py index 4a227fb71..f5e68cae9 100644 --- a/grid2op/Backend/PandaPowerBackend.py +++ b/grid2op/Backend/PandaPowerBackend.py @@ -180,6 +180,32 @@ def __init__(self, detailed_infos_for_cascading_failures=False): # produce / absorbs anything # TODO storage doc (in grid2op rst) of the backend + self.can_output_theta = True # I support the voltage angle + self.theta_or = None + self.theta_ex = None + self.load_theta = None + self.gen_theta = None + self.storage_theta = None + + def get_theta(self): + """ + TODO doc + + Returns + ------- + theta_or: ``numpy.ndarray`` + For each orgin side of powerline, gives the voltage angle (in degree) + theta_ex: ``numpy.ndarray`` + For each extremity side of powerline, gives the voltage angle (in degree) + load_theta: ``numpy.ndarray`` + Gives the voltage angle (in degree) to the bus at which each load is connected + gen_theta: ``numpy.ndarray`` + Gives the voltage angle (in degree) to the bus at which each generator is connected + storage_theta: ``numpy.ndarray`` + Gives the voltage angle (in degree) to the bus at which each storage unit is connected + """ + return self.cst_1 * self.theta_or, self.cst_1 * self.theta_ex, self.cst_1 * self.load_theta, \ + self.cst_1 * self.gen_theta, self.cst_1 * self.storage_theta def get_nb_active_bus(self): """ @@ -350,7 +376,6 @@ def load_grid(self, path=None, filename=None): self.n_storage = copy.deepcopy(self._grid.storage.shape[0]) if self.n_storage == 0: self.set_no_storage() - need_init_storage = False else: if "name" in self._grid.storage.columns and not self._grid.storage["name"].isnull().values.any(): self.name_storage = [nl for nl in self._grid.storage["name"]] @@ -358,7 +383,6 @@ def load_grid(self, path=None, filename=None): self.name_storage = ["storage_{bus}_{index_sto}".format(**row, index_sto=i) for i, (_, row) in enumerate(self._grid.storage.iterrows())] self.name_storage = np.array(self.name_storage) - need_init_storage = True self.n_sub = copy.deepcopy(self._grid.bus.shape[0]) self.name_sub = ["sub_{}".format(i) for i, row in self._grid.bus.iterrows()] @@ -541,6 +565,12 @@ def _init_private_attrs(self): else: self._big_topo_to_backend[pos_big_topo] = (l_id, l_id - self.__nb_powerline, 5) + self.theta_or = np.full(self.n_line, fill_value=np.NaN, dtype=dt_float) + self.theta_ex = np.full(self.n_line, fill_value=np.NaN, dtype=dt_float) + self.load_theta = np.full(self.n_load, fill_value=np.NaN, dtype=dt_float) + self.gen_theta = np.full(self.n_gen, fill_value=np.NaN, dtype=dt_float) + self.storage_theta = np.full(self.n_storage, fill_value=np.NaN, dtype=dt_float) + self._topo_vect = self._get_topo_vect() self.tol = 1e-5 # this is NOT the pandapower tolerance !!!! this is used to check if a storage unit # produce / absorbs anything @@ -776,8 +806,8 @@ def runpf(self, is_dc=False): # sometimes pandapower does not detect divergence and put Nan. raise pp.powerflow.LoadflowNotConverged("Isolated gen") - self.prod_p[:], self.prod_q[:], self.prod_v[:] = self._gens_info() - self.load_p[:], self.load_q[:], self.load_v[:] = self._loads_info() + self.prod_p[:], self.prod_q[:], self.prod_v[:], self.gen_theta[:] = self._gens_info() + self.load_p[:], self.load_q[:], self.load_v[:], self.load_theta[:] = self._loads_info() if not is_dc: if not np.all(np.isfinite(self.load_v)): # TODO see if there is a better way here @@ -803,6 +833,7 @@ def runpf(self, is_dc=False): self.q_or[:] = self._aux_get_line_info("q_from_mvar", "q_hv_mvar") self.v_or[:] = self._aux_get_line_info("vm_from_pu", "vm_hv_pu") self.a_or[:] = self._aux_get_line_info("i_from_ka", "i_hv_ka") * 1000 + self.theta_or[:] = self._aux_get_line_info("va_from_degree", "va_hv_degree") self.a_or[~np.isfinite(self.a_or)] = 0. self.v_or[~np.isfinite(self.v_or)] = 0. @@ -810,13 +841,13 @@ def runpf(self, is_dc=False): self.q_ex[:] = self._aux_get_line_info("q_to_mvar", "q_lv_mvar") self.v_ex[:] = self._aux_get_line_info("vm_to_pu", "vm_lv_pu") self.a_ex[:] = self._aux_get_line_info("i_to_ka", "i_lv_ka") * 1000 + self.theta_ex[:] = self._aux_get_line_info("va_to_degree", "va_lv_degree") self.a_ex[~np.isfinite(self.a_ex)] = 0. self.v_ex[~np.isfinite(self.v_ex)] = 0. # it seems that pandapower does not take into account disconencted powerline for their voltage self.v_or[~self.line_status] = 0. self.v_ex[~self.line_status] = 0. - self.v_or[:] *= self.lines_or_pu_to_kv self.v_ex[:] *= self.lines_ex_pu_to_kv @@ -825,7 +856,7 @@ def runpf(self, is_dc=False): # handle storage units # note that we have to look ourselves for disconnected storage - self.storage_p[:], self.storage_q[:], self.storage_v[:] = self._storages_info() + self.storage_p[:], self.storage_q[:], self.storage_v[:], self.storage_theta[:] = self._storages_info() deact_storage = ~np.isfinite(self.storage_v) if np.any(np.abs(self.storage_p[deact_storage]) > self.tol): raise pp.powerflow.LoadflowNotConverged("Isolated storage set to absorb / produce something") @@ -863,6 +894,12 @@ def _reset_all_nan(self): self.storage_v[:] = np.NaN self._nb_bus_before = None + self.theta_or[:] = np.NaN + self.theta_ex[:] = np.NaN + self.load_theta[:] = np.NaN + self.gen_theta[:] = np.NaN + self.storage_theta[:] = np.NaN + def copy(self): """ INTERNAL @@ -996,6 +1033,7 @@ def _gens_info(self): prod_p = self.cst_1 * self._grid.res_gen["p_mw"].values.astype(dt_float) prod_q = self.cst_1 * self._grid.res_gen["q_mvar"].values.astype(dt_float) prod_v = self.cst_1 * self._grid.res_gen["vm_pu"].values.astype(dt_float) * self.prod_pu_to_kv + prod_theta = self.cst_1 * self._grid.res_gen["va_degree"].values.astype(dt_float) if self._iref_slack is not None: # slack bus and added generator are on same bus. I need to add power of slack bus to this one. @@ -1003,13 +1041,14 @@ def _gens_info(self): if "gen" in self._grid._ppc["internal"]: prod_p[self._id_bus_added] += self._grid._ppc["internal"]["gen"][self._iref_slack, 1] prod_q[self._id_bus_added] += self._grid._ppc["internal"]["gen"][self._iref_slack, 2] - return prod_p, prod_q, prod_v + return prod_p, prod_q, prod_v, prod_theta def _loads_info(self): load_p = self.cst_1 * self._grid.res_load["p_mw"].values.astype(dt_float) load_q = self.cst_1 * self._grid.res_load["q_mvar"].values.astype(dt_float) load_v = self._grid.res_bus.loc[self._grid.load["bus"].values]["vm_pu"].values.astype(dt_float) * self.load_pu_to_kv - return load_p, load_q, load_v + load_theta = self._grid.res_bus.loc[self._grid.load["bus"].values]["va_degree"].values.astype(dt_float) + return load_p, load_q, load_v, load_theta def generators_info(self): return self.cst_1 * self.prod_p, self.cst_1 * self.prod_q, self.cst_1 * self.prod_v @@ -1043,11 +1082,13 @@ def _storages_info(self): p_storage = self._grid.res_storage["p_mw"].values.astype(dt_float) q_storage = self._grid.res_storage["q_mvar"].values.astype(dt_float) v_storage = self._grid.res_bus.loc[self._grid.storage["bus"].values]["vm_pu"].values.astype(dt_float) * self.storage_pu_to_kv + theta_storage = self._grid.res_bus.loc[self._grid.storage["bus"].values]["vm_pu"].values.astype(dt_float) * self.storage_pu_to_kv else: p_storage = np.zeros(shape=0, dtype=dt_float) q_storage = np.zeros(shape=0, dtype=dt_float) v_storage = np.zeros(shape=0, dtype=dt_float) - return p_storage, q_storage, v_storage + theta_storage = np.zeros(shape=0, dtype=dt_float) + return p_storage, q_storage, v_storage, theta_storage def sub_from_bus_id(self, bus_id): if bus_id >= self._number_true_line: diff --git a/grid2op/Chronics/GridStateFromFile.py b/grid2op/Chronics/GridStateFromFile.py index a8882a847..ee0809ab8 100644 --- a/grid2op/Chronics/GridStateFromFile.py +++ b/grid2op/Chronics/GridStateFromFile.py @@ -53,7 +53,8 @@ class GridStateFromFile(GridValue): - "prod_p.csv": for each time steps, this file contains the value for the active production of each generators of the grid (it counts as many rows as the number of time steps - and its header) and as many columns as the number of generators on the grid. The header must contains the names of - the generators used to map their value on the grid. Values must be convertible to floating point. + the generators used to map their value on the grid. Values must be convertible to floating point and the + column separator of this file should be semi-colon `;` (unless you specify a "sep" when loading this class) - "prod_v.csv": same as "prod_p.csv" but for the production voltage setpoint. - "load_p.csv": same as "prod_p.csv" but for the load active value (number of columns = number of loads) - "load_q.csv": same as "prod_p.csv" but for the load reactive value (number of columns = number of loads) @@ -61,6 +62,8 @@ class GridStateFromFile(GridValue): each time step (row). - "hazards.csv": that contains whether or not there is a hazard for a given powerline (column) at each time step (row). + - "start_datetime.info": the time stamp (date and time) at which the chronic is starting. + - "time_interval.info": the amount of time between two consecutive steps (*e.g.* 5 mins, or 1h) If a file is missing, it is understood as "this value will not be modified". For example, if the file "prod_v.csv" is not present, it will be equivalent as not modifying the production voltage setpoint, never. diff --git a/grid2op/Chronics/GridValue.py b/grid2op/Chronics/GridValue.py index 88222cb8f..be9632f02 100644 --- a/grid2op/Chronics/GridValue.py +++ b/grid2op/Chronics/GridValue.py @@ -439,7 +439,7 @@ def load_next(self): about the grid state (load p and load q, prod p and prod v as well as some maintenance or hazards information) - Generate the next values, either by reading from a file, or by generating on the fly and return a dictionnary + Generate the next values, either by reading from a file, or by generating on the fly and return a dictionary compatible with the :class:`grid2op.BaseAction` class allowed for the :class:`Environment`. More information about this dictionary can be found at :func:`grid2op.BaseAction.update`. diff --git a/grid2op/Chronics/MultiFolder.py b/grid2op/Chronics/MultiFolder.py index 6f13992de..5ae0b8dd7 100644 --- a/grid2op/Chronics/MultiFolder.py +++ b/grid2op/Chronics/MultiFolder.py @@ -445,8 +445,8 @@ def shuffle(self, shuffler=None): obs, reward, done, info = env.step(act) .. warning:: Though it is possible to use this "shuffle" function to only use some chronics, we highly - recommend you to have a look at the sections :sec:`environment-module-chronics-info` or - :sec:`environment-module-train-val-test`. It is likely that you will find better way to do + recommend you to have a look at the sections :ref:`environment-module-chronics-info` or + :ref:`environment-module-train-val-test`. It is likely that you will find better way to do what you want to do there. Use this last example with care then. .. warning:: As stated on the :func:`MultiFolder.reset`, any call to `env.chronics_handler.reset` diff --git a/grid2op/Converter/BackendConverter.py b/grid2op/Converter/BackendConverter.py index f36c072bc..15dda37d3 100644 --- a/grid2op/Converter/BackendConverter.py +++ b/grid2op/Converter/BackendConverter.py @@ -73,6 +73,7 @@ def __init__(self, difcf = detailed_infos_for_cascading_failures self.source_backend = source_backend_class(detailed_infos_for_cascading_failures=difcf) self.target_backend = target_backend_class(detailed_infos_for_cascading_failures=difcf) + # if the target backend (the one performing the powerflows) needs a different file self.target_backend_grid_path = target_backend_grid_path # key: name in the source backend, value name in the target backend, for the substations @@ -117,22 +118,22 @@ def load_grid(self, path=None, filename=None): self.target_backend.load_grid(path, filename) def _assert_same_grid(self): - """basic assertion that self and the target backend have the same grid""" - if self.n_sub != self.target_backend.n_sub: + """basic assertion that self and the target backend have the same grid + but not necessarily the same object at the same place of course""" + if type(self).n_sub != type(self.target_backend).n_sub: raise Grid2OpException(ERROR_NB_ELEMENTS.format("substations")) - if self.n_gen != self.target_backend.n_gen: + if type(self).n_gen != type(self.target_backend).n_gen: raise Grid2OpException(ERROR_NB_ELEMENTS.format("generators")) - if self.n_load != self.target_backend.n_load: + if type(self).n_load != type(self.target_backend).n_load: raise Grid2OpException(ERROR_NB_ELEMENTS.format("loads")) - if self.n_line != self.target_backend.n_line: + if type(self).n_line != type(self.target_backend).n_line: raise Grid2OpException(ERROR_NB_ELEMENTS.format("lines")) - if self.n_storage != self.target_backend.n_storage: + if type(self).n_storage != type(self.target_backend).n_storage: raise Grid2OpException(ERROR_NB_ELEMENTS.format("storages")) def _init_myself(self): # shortcut to set all information related to the class, except the name of the environment # this should been done when the source backend is fully initialized only - self.__class__ = self.init_grid(self.source_backend) self._assert_same_grid() # and now init all the converting vectors @@ -212,15 +213,6 @@ def _init_myself(self): nm="shunt") self.set_thermal_limit(self.target_backend.thermal_limit_a[self._line_tg2sr]) - if self.path_redisp is not None: - # redispatching data were available - super().load_redispacthing_data(self.path_redisp, name=self.name_redisp) - if self.path_storage_data is not None: - super().load_storage_data(self.path_storage_data, self.name_storage_data) - if self.path_grid_layout is not None: - # grid layout data were available - super().load_grid_layout(self.path_grid_layout, self.name_grid_layout) - def _get_possible_target_ids(self, id_source, source_2_id_sub, target_2_id_sub, nm): id_sub_source = source_2_id_sub[id_source] id_sub_target = self._sub_tg2sr[id_sub_source] @@ -229,7 +221,9 @@ def _get_possible_target_ids(self, id_source, source_2_id_sub, target_2_id_sub, raise RuntimeError(ERROR_ELEMENT_CONNECTED.format(nm, id_sub_target, id_sub_source)) return id_sub_target, ids_target - def _auto_fill_vect_load_gen_shunt(self, n_element, source_2_id_sub, target_2_id_sub, + def _auto_fill_vect_load_gen_shunt(self, + n_element, + source_2_id_sub, target_2_id_sub, tg2sr, sr2tg, nm): nb_load_per_sub = np.zeros(self.n_sub, dtype=dt_int) @@ -303,22 +297,37 @@ def _auto_fill_vect_topo_aux(self, n_elem, source_pos, target_pos, sr2tg): def assert_grid_correct(self): # this is done before a call to this function, by the environment - self.source_backend.set_env_name(self.env_name) - self.target_backend.set_env_name(self.env_name) - - # everything went well, so i can properly terminate my initialization - self._init_myself() + env_name = type(self).env_name + type(self.target_backend).set_env_name(env_name) + type(self.source_backend).set_env_name(env_name) + self._init_class_attr(obj=self.source_backend) - # the next is not done as it is supposed to be done in "assert_grid_correct_after_powerflow" - self.source_backend.__class__ = self.source_backend.init_grid(self) - self.target_backend.__class__ = self.target_backend.init_grid(self) # for this one i am not sure + if self.path_redisp is not None: + # redispatching data were available + super().load_redispacthing_data(self.path_redisp, name=self.name_redisp) + self.source_backend.load_redispacthing_data(self.path_redisp, name=self.name_redisp) + self.target_backend.load_redispacthing_data(self.path_redisp, name=self.name_redisp) + if self.path_storage_data is not None: + super().load_storage_data(self.path_storage_data, self.name_storage_data) + self.source_backend.load_storage_data(self.path_redisp, name=self.name_redisp) + self.target_backend.load_storage_data(self.path_redisp, name=self.name_redisp) + if self.path_grid_layout is not None: + # grid layout data were available + super().load_grid_layout(self.path_grid_layout, self.name_grid_layout) + self.source_backend.load_grid_layout(self.path_redisp, name=self.name_redisp) - # now i assert that the powergrids are ok - self.source_backend.assert_grid_correct() + # init the target backend (the one that does the computation and that is initialized) self.target_backend.assert_grid_correct() + # initialize the other one, because, well the grid should be seen from both backend + self.source_backend._init_class_attr(obj=self) + self.source_backend.assert_grid_correct() # and this should be called after all the rest super().assert_grid_correct() + + # everything went well, so i can properly terminate my initialization + self._init_myself() + if self.sub_source_target is None: # automatic mode for substations, names must match assert np.all(self.target_backend.name_sub[self._sub_tg2sr] == self.source_backend.name_sub) diff --git a/grid2op/Environment/BaseEnv.py b/grid2op/Environment/BaseEnv.py index b11c2c41c..6b0e4d62f 100644 --- a/grid2op/Environment/BaseEnv.py +++ b/grid2op/Environment/BaseEnv.py @@ -171,7 +171,7 @@ class BaseEnv(GridObjects, RandomObject, ABC): The rules of the game (define which actions are legal and which are not) - _helper_action_player: :class:`grid2op.Action.ActionSpace` + _action_space: :class:`grid2op.Action.ActionSpace` .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ Helper used to manipulate more easily the actions given to / provided by the :class:`grid2op.Agent.BaseAgent` @@ -182,7 +182,7 @@ class BaseEnv(GridObjects, RandomObject, ABC): Helper used to manipulate more easily the actions given to / provided by the environment to the backend. - _helper_observation: :class:`grid2op.Observation.ObservationSpace` + _observation_space: :class:`grid2op.Observation.ObservationSpace` .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ Helper used to generate the observation that will be given to the :class:`grid2op.BaseAgent` @@ -301,13 +301,13 @@ def __init__(self, self._helper_action_env = None self.chronics_handler = None self._game_rules = None - self._helper_action_player = None + self._action_space = None self._rewardClass = None self._actionClass = None self._observationClass = None self._legalActClass = None - self._helper_observation = None + self._observation_space = None self._names_chronics_to_backend = None self._reward_helper = None @@ -379,6 +379,28 @@ def __init__(self, self._sum_curtailment_mw = None self._sum_curtailment_mw_prev = None + @property + def action_space(self): + """this represent a view on the action space""" + return self._action_space + + @action_space.setter + def action_space(self, other): + raise EnvError("Impossible to modify the action space of the environment. You probably want to modify " + "the action with which the agent is interacting. You can do that with a converter, or " + "using the GymEnv. Please consult the documentation.") + + @property + def observation_space(self): + """this represent a view on the action space""" + return self._observation_space + + @observation_space.setter + def observation_space(self, other): + raise EnvError("Impossible to modify the observation space of the environment. You probably want to modify " + "the observation with which the agent is interacting. You can do that with a converter, or " + "using the GymEnv. Please consult the documentation.") + def change_parameters(self, new_parameters): """ Allows to change the parameters of an environment. @@ -457,14 +479,22 @@ def _create_opponent(self): self._oppSpace.init_opponent(partial_env=self, **self._kwargs_opponent) self._oppSpace.reset() + def _init_myclass(self): + if self._backend_action_class is not None: + # the class has already been initialized + return + bk_type = type(self.backend) # be careful here: you need to initialize from the class, and not from the object + # create the proper environment class for this specific environment + self.__class__ = type(self).init_grid(bk_type) + def _has_been_initialized(self): # type of power flow to play # if True, then it will not disconnect lines above their thermal limits - bk_type = type(self.backend) # be carefull here: you need to initialize from the class, and not from the object - self.__class__ = self.init_grid(bk_type) # create the proper environment class for this specific environment + self._init_myclass() + bk_type = type(self.backend) if np.min([self.n_line, self.n_gen, self.n_load, self.n_sub]) <= 0: raise EnvironmentError("Environment has not been initialized properly") - self._backend_action_class = _BackendAction.init_grid(self.backend) + self._backend_action_class = _BackendAction.init_grid(bk_type) self._backend_action = self._backend_action_class() # initialize maintenance / hazards @@ -549,14 +579,14 @@ def reset(self): if self.__new_param is not None: self._update_parameters() # reset __new_param to None too if self.__new_forecast_param is not None: - self._helper_observation._change_parameters(self.__new_forecast_param) + self._observation_space._change_parameters(self.__new_forecast_param) self.__new_forecast_param = None if self.__new_reward_func is not None: self._reward_helper.change_reward(self.__new_reward_func) self._reward_helper.initialize(self) self.reward_range = self._reward_helper.range() # change also the reward used in simulate - self._helper_observation.change_reward(self._reward_helper.template_reward) + self._observation_space.change_reward(self._reward_helper.template_reward) self.__new_reward_func = None self._reset_storage() @@ -645,12 +675,12 @@ def seed(self, seed=None): if self.chronics_handler is not None: seed = self.space_prng.randint(max_int) seed_chron = self.chronics_handler.seed(seed) - if self._helper_observation is not None: + if self._observation_space is not None: seed = self.space_prng.randint(max_int) - seed_obs = self._helper_observation.seed(seed) - if self._helper_action_player is not None: + seed_obs = self._observation_space.seed(seed) + if self._action_space is not None: seed = self.space_prng.randint(max_int) - seed_action_space = self._helper_action_player.seed(seed) + seed_action_space = self._action_space.seed(seed) if self._helper_action_env is not None: seed = self.space_prng.randint(max_int) seed_env_modif = self._helper_action_env.seed(seed) @@ -696,8 +726,8 @@ def deactivate_forecast(self): # obs.simulate(do_nothing_action) # DO NOT RUN IT RAISES AN ERROR """ - if self._helper_observation is not None: - self._helper_observation.with_forecast = False + if self._observation_space is not None: + self._observation_space.with_forecast = False self.with_forecast = False def reactivate_forecast(self): @@ -736,8 +766,8 @@ def reactivate_forecast(self): simobs, sim_r, sim_d, sim_info = obs.simulate(do_nothing_action) """ - if self._helper_observation is not None: - self._helper_observation.with_forecast = True + if self._observation_space is not None: + self._observation_space.with_forecast = True self.with_forecast = True @abstractmethod @@ -765,6 +795,10 @@ def set_thermal_limit(self, thermal_limit): The new thermal limit. It must be a numpy ndarray vector (or convertible to it). For each powerline it gives the new thermal limit. + Alternatively, this can be a dictionary mapping the line names (keys) to its thermal limits (values). In + that case, all thermal limits for all powerlines should be specified (this is a safety measure + to reduce the odds of misuse). + Examples --------- @@ -780,20 +814,49 @@ def set_thermal_limit(self, thermal_limit): # i set the thermal limit of each powerline to 20000 amps env.set_thermal_limit([20000 for _ in range(env.n_line)]) + Notes + ----- + As of grid2op > 1.5.0, it is possible to set the thermal limit by using a dictionary with the keys being + the name of the powerline and the values the thermal limits. """ if not self.__is_init: raise Grid2OpException("Impossible to set the thermal limit to a non initialized Environment") - try: - tmp = np.array(thermal_limit).flatten().astype(dt_float) - except Exception as exc_: - raise Grid2OpException(f"Impossible to convert the vector as input into a 1d numpy float array. " - f"Error was: \n {exc_}") - if tmp.shape[0] != self.n_line: - raise Grid2OpException("Attempt to set thermal limit on {} powerlines while there are {}" - "on the grid".format(tmp.shape[0], self.n_line)) - if np.any(~np.isfinite(tmp)): - raise Grid2OpException("Impossible to use non finite value for thermal limits.") + if isinstance(thermal_limit, dict): + tmp = np.full(self.n_line, fill_value=np.NaN, dtype=dt_float) + for key, val in thermal_limit.items(): + if key not in self.name_line: + raise Grid2OpException(f"When setting a thermal limit with a dictionary, the keys should be line " + f"names. We found: {key} which is not a line name. The names of the " + f"powerlines are {self.name_line}") + ind_line = np.where(self.name_line == key)[0][0] + if np.isfinite(tmp[ind_line]): + raise Grid2OpException(f"Humm, there is a really strange bug, some lines are set twice.") + try: + val_fl = float(val) + except Exception as exc_: + raise Grid2OpException(f"When setting thermal limit with a dictionary, the keys should be " + f"the values of the thermal limit (in amps) you provided something that " + f"cannot be converted to a float. Error was \"{exc_}\".") + tmp[ind_line] = val_fl + + elif isinstance(thermal_limit, (np.ndarray, list)): + try: + tmp = np.array(thermal_limit).flatten().astype(dt_float) + except Exception as exc_: + raise Grid2OpException(f"Impossible to convert the vector as input into a 1d numpy float array. " + f"Error was: \n {exc_}") + if tmp.shape[0] != self.n_line: + raise Grid2OpException("Attempt to set thermal limit on {} powerlines while there are {}" + "on the grid".format(tmp.shape[0], self.n_line)) + if np.any(~np.isfinite(tmp)): + raise Grid2OpException("Impossible to use non finite value for thermal limits.") + else: + raise Grid2OpException(f"You can only set the thermal limits of the environment with a dictionary (in that " + f"case the keys are the line names, and the values the thermal limits) or with " + f"a numpy array that has as many components of the number of powerlines on " + f"the grid. You provided something with type \"{type(thermal_limit)}\" which " + f"is not supported.") self._thermal_limit_a = tmp self.backend.set_thermal_limit(self._thermal_limit_a) @@ -1270,7 +1333,7 @@ def get_obs(self): # obs2 and obs are identical. """ - res = self._helper_observation(env=self) + res = self._observation_space(env=self) return res def get_thermal_limit(self): @@ -1494,7 +1557,7 @@ def step(self, action): is_legal, reason = self._game_rules(action=action, env=self) if not is_legal: # action is replace by do nothing - action = self._helper_action_player({}) + action = self._action_space({}) init_disp = 1.0 * action._redispatch # dispatching action action_storage_power = 1.0 * action._storage_power # battery information except_.append(reason) @@ -1503,7 +1566,7 @@ def step(self, action): ambiguous, except_tmp = action.is_ambiguous() if ambiguous: # action is replace by do nothing - action = self._helper_action_player({}) + action = self._action_space({}) init_disp = 1.0 * action._redispatch # dispatching action action_storage_power = 1.0 * action._storage_power # battery information is_ambiguous = True @@ -1560,7 +1623,7 @@ def step(self, action): valid_disp, except_tmp, info_ = self._prepare_redisp(action, new_p, already_modified_gen) if except_tmp is not None: - action = self._helper_action_player({}) + action = self._action_space({}) is_illegal_redisp = True except_.append(except_tmp) @@ -1578,7 +1641,7 @@ def step(self, action): valid_disp, except_tmp, info_ = self._make_redisp(already_modified_gen, new_p) if not valid_disp or except_tmp is not None: # game over case (divergence of the scipy routine to compute redispatching) - action = self._helper_action_player({}) + action = self._action_space({}) is_illegal_redisp = True except_.append(except_tmp) is_done = True @@ -1593,7 +1656,7 @@ def step(self, action): except_tmp = self._handle_updown_times(gen_up_before, self._actual_dispatch) if except_tmp is not None: is_illegal_reco = True - action = self._helper_action_player({}) + action = self._action_space({}) except_.append(except_tmp) self._time_redisp += time.time() - beg__redisp @@ -1887,12 +1950,12 @@ def attach_layout(self, grid_layout): "that will be used the grid layout. The error is: \"{}\"" "".format(el, e_)) super().attach_layout(res) - if self._helper_action_player is not None: - self._helper_action_player.attach_layout(res) + if self._action_space is not None: + self._action_space.attach_layout(res) if self._helper_action_env is not None: self._helper_action_env.attach_layout(res) - if self._helper_observation is not None: - self._helper_observation.attach_layout(res) + if self._observation_space is not None: + self._observation_space.attach_layout(res) if self._voltage_controler is not None: self._voltage_controler.attach_layout(res) if self._opponent_action_space is not None: @@ -1958,7 +2021,7 @@ def fast_forward_chronics(self, nb_timestep): self._times_before_topology_actionable[:] = np.maximum(ff_time_topo_act, min_time_topo) # Update to the fast forward state using a do nothing action - self.step(self._helper_action_player({})) + self.step(self._action_space({})) def get_current_line_status(self): """ diff --git a/grid2op/Environment/BaseMultiProcessEnv.py b/grid2op/Environment/BaseMultiProcessEnv.py index 12d01f29b..6eaf72c5c 100644 --- a/grid2op/Environment/BaseMultiProcessEnv.py +++ b/grid2op/Environment/BaseMultiProcessEnv.py @@ -177,6 +177,12 @@ def run(self): elif cmd == "set_id": self.env.set_id(data) self.remote.send(None) + elif cmd == "sim": + action = self.env.action_space.from_vect(data) + obs = self.env.get_obs() + sim_obs, sim_reward, sim_done, sim_info = obs.simulate(action) + sim_obs_v = sim_obs.to_vect() + self.remote.send((sim_obs_v, sim_reward, sim_done, sim_info)) elif hasattr(self.env, cmd): tmp = getattr(self.env, cmd) self.remote.send(tmp) @@ -235,7 +241,6 @@ class BaseMultiProcessEnvironment(GridObjects): """ def __init__(self, envs, obs_as_class=True, return_info=True): GridObjects.__init__(self) - self.envs = envs for env in envs: if not isinstance(env, Environment): raise MultiEnvException("You provided environment of type \"{}\" which is not supported." @@ -244,17 +249,21 @@ def __init__(self, envs, obs_as_class=True, return_info=True): self.nb_env = len(envs) max_int = np.iinfo(dt_int).max - self._remotes, self._work_remotes = zip(*[Pipe() for _ in range(self.nb_env)]) - - env_params = [envs[e].get_kwargs(with_backend=False) for e in range(self.nb_env)] + _remotes, _work_remotes = zip(*[Pipe() for _ in range(self.nb_env)]) + env_params = [sub_env.get_kwargs(with_backend=False) for sub_env in envs] self._ps = [RemoteEnv(env_params=env_, remote=work_remote, parent_remote=remote, - name="{}_subprocess_{}".format(envs[i].name, i), + name="{}_{}".format(envs[i].name, i), return_info=return_info, seed=envs[i].space_prng.randint(max_int)) - for i, (work_remote, remote, env_) in enumerate(zip(self._work_remotes, self._remotes, env_params))] + for i, (work_remote, remote, env_) in enumerate(zip(_work_remotes, _remotes, env_params))] + + # on windows, this has to be created after + self.envs = envs + self._remotes = _remotes + self._work_remotes = _work_remotes for p in self._ps: p.daemon = True # if the main process crashes, we should not cause things to hang @@ -460,8 +469,8 @@ def set_ff(self, ff_max=7*24*60/5): """ try: ff_max = int(ff_max) - except: - raise RuntimeError("ff_max parameters should be convertible to an integer.") + except Exception as exc_: + raise RuntimeError("ff_max parameters should be convertible to an integer.") from exc_ for remote in self._remotes: remote.send(('f', ff_max)) @@ -491,6 +500,66 @@ def get_obs(self): res = [self.envs[e].observation_space.from_vect(remote.recv()) for e, remote in enumerate(self._remotes)] return res + def _send_sim(self, actions): + for remote, action in zip(self._remotes, actions): + remote.send(('sim', action.to_vect())) + self._waiting = True + + def simulate(self, actions): + """ + Perform the equivalent of `obs.simulate` in all the underlying environment + + Parameters + ---------- + actions: ``list`` + List of all action to simulate + + Returns + --------- + sim_obs: + The observation resulting from the simulation + sim_rews: + The reward resulting from the simulation + sim_dones: + For each simulation, whether or not this the simulated action lead to a game over + sim_infos: + Additional information for each simulated actions. + + Examples + -------- + + You can use this feature like: + + .. code-block:: + + import grid2op + from grid2op.Environment import BaseMultiProcessEnvironment + + env_name = ... # for example "l2rpn_case14_sandbox" + env1 = grid2op.make(env_name) + env2 = grid2op.make(env_name) + + multi_env = BaseMultiProcessEnvironment([env1, env2]) + obss = multi_env.reset() + + # simulate + actions = [env1.action_space(), env2.action_space()] + sim_obss, sim_rs, sim_ds, sim_is = multi_env.simulate(actions) + + """ + if len(actions) != self.nb_env: + raise MultiEnvException("Incorrect number of actions provided. You provided {} actions, but the " + "MultiEnvironment counts {} different environment." + "".format(len(actions), self.nb_env)) + for act in actions: + if not isinstance(act, BaseAction): + raise MultiEnvException("All actions send to MultiEnvironment.step should be of type " + "\"grid2op.BaseAction\" and not {}".format(type(act))) + + self._send_sim(actions) + sim_obs, sim_rews, sim_dones, sim_infos = self._wait_for_obs() + return sim_obs, sim_rews, sim_dones, sim_infos + def __getattr__(self, name): """ This function is used to get the attribute of the underlying sub environments. diff --git a/grid2op/Environment/Environment.py b/grid2op/Environment/Environment.py index f0fced3f5..8c749ceeb 100644 --- a/grid2op/Environment/Environment.py +++ b/grid2op/Environment/Environment.py @@ -24,6 +24,8 @@ from grid2op.Environment.BaseEnv import BaseEnv from grid2op.Opponent import BaseOpponent, NeverAttackBudget +from grid2op.Backend import PandaPowerBackend + class Environment(BaseEnv): """ @@ -107,8 +109,8 @@ def __init__(self, self.name = name # for gym compatibility (initialized below) - self.action_space = None - self.observation_space = None + # self.action_space = None + # self.observation_space = None self.reward_range = None self.viewer = None self.metadata = None @@ -171,7 +173,12 @@ def _init_backend(self, self.backend.load_grid(self._init_grid_path) # the real powergrid of the environment self.backend.load_redispacthing_data(self.get_path_env()) self.backend.load_storage_data(self.get_path_env()) - self.backend.load_grid_layout(self.get_path_env()) + exc_ = self.backend.load_grid_layout(self.get_path_env()) + if exc_ is not None: + warnings.warn(f"No layout have been found for you grid (or the layout provided was corrupted). You will " + f"not be able to use the renderer, plot the grid etc. The error was \"{exc_}\"") + + # to force the initialization of the backend to the proper type self.backend.assert_grid_correct() self._handle_compat_glop_version() @@ -229,18 +236,20 @@ def _init_backend(self, self._complete_action_cls = CompleteAction.init_grid(gridobj=bk_type) self._helper_action_class = ActionSpace.init_grid(gridobj=bk_type) - self._helper_action_player = self._helper_action_class(gridobj=bk_type, - actionClass=actionClass, - legal_action=self._game_rules.legal_action) + self._action_space = self._helper_action_class(gridobj=bk_type, + actionClass=actionClass, + legal_action=self._game_rules.legal_action) # action that affect the grid made by the environment. self._helper_action_env = self._helper_action_class(gridobj=bk_type, actionClass=CompleteAction, legal_action=self._game_rules.legal_action) + self._helper_observation_class = ObservationSpace.init_grid(gridobj=bk_type) - self._helper_observation = self._helper_observation_class(gridobj=bk_type, - observationClass=observationClass, - rewardClass=rewardClass, - env=self) + self._observation_space = self._helper_observation_class(gridobj=bk_type, + observationClass=observationClass, + actionClass=actionClass, + rewardClass=rewardClass, + env=self) # handles input data if not isinstance(chronics_handler, ChronicsHandler): @@ -290,8 +299,8 @@ def _init_backend(self, self.backend.assert_grid_correct_after_powerflow() # for gym compatibility - self.action_space = self._helper_action_player # this should be an action !!! - self.observation_space = self._helper_observation # this return an observation. + # self._action_space = self._helper_action_player # this should be an action !!! + # self._observation_space = self._helper_observation # this return an observation. self.reward_range = self._reward_helper.range() self.viewer = None self.viewer_fig = None @@ -305,6 +314,14 @@ def _init_backend(self, # reset everything to be consistent self._reset_vectors_and_timings() + @property + def _helper_observation(self): + return self._observation_space + + @property + def _helper_action_player(self): + return self._action_space + def _handle_compat_glop_version(self): if self._compat_glop_version is not None and self._compat_glop_version != grid2op.__version__: warnings.warn("You are using a grid2op \"compatibility\" environment. This means that some " @@ -443,6 +460,28 @@ def set_chunk_size(self, new_chunk_size): self.chronics_handler.set_chunk_size(new_chunk_size) + def simulate(self, action): + """ + Another method to call `obs.simulate` to ensure compatibility between multi environment and + regular one. + + Parameters + ---------- + action: + A grid2op action + + Returns + ------- + Same return type as :func:`grid2op.Environment.BaseEnv.step` or + :func:`grid2op.Observation.BaseObservation.simulate` + + Notes + ----- + Prefer using `obs.simulate` if possible, it will be faster than this function. + + """ + return self.get_obs().simulate(action) + def set_id(self, id_): """ Set the id that will be used at the next call to :func:`Environment.reset`. @@ -501,9 +540,9 @@ def set_id(self, id_): """ try: id_ = int(id_) - except: + except Exception as exc_: raise EnvError("the \"id_\" parameters should be convertible to integer and not be of type {}" - "".format(type(id_))) + "with error \n\"{}\"".format(type(id_), exc_)) self.chronics_handler.tell_id(id_-1) @@ -565,7 +604,7 @@ def attach_renderer(self, graph_layout=None): "Please install matplotlib or run pip install grid2op[optional]" raise Grid2OpException(err_msg) from None - self.viewer = PlotMatplot(self._helper_observation) + self.viewer = PlotMatplot(self._observation_space) self.viewer_fig = None # Set renderer modes self.metadata = {'render.modes': ["human", "silent"]} @@ -738,9 +777,8 @@ def copy(self): tmp_backend = self.backend self.backend = None - tmp_obs_space = self._helper_observation - self.observation_space = None - self._helper_observation = None + tmp_obs_space = self._observation_space + self._observation_space = None obs_tmp = self.current_obs self.current_obs = None @@ -750,17 +788,15 @@ def copy(self): res = copy.deepcopy(self) res.backend = tmp_backend.copy() - res._helper_observation = tmp_obs_space.copy() - res.observation_space = res._helper_observation + res._observation_space = tmp_obs_space.copy() res.current_obs = obs_tmp.copy() - res.current_obs._obs_env = res._helper_observation.obs_env # retrieve the pointer to the proper backend + res.current_obs._obs_env = res._observation_space.obs_env # retrieve the pointer to the proper backend res._voltage_controler = volt_cont.copy() if self._thermal_limit_a is not None: res.backend.set_thermal_limit(self._thermal_limit_a) self.backend = tmp_backend - self.observation_space = tmp_obs_space - self._helper_observation = tmp_obs_space + self._observation_space = tmp_obs_space self.current_obs = obs_tmp self._voltage_controler = volt_cont return res @@ -807,8 +843,8 @@ def get_kwargs(self, with_backend=True): res["backend"] = self.backend.copy() res["parameters"] = copy.deepcopy(self._parameters) res["names_chronics_to_backend"] = copy.deepcopy(self.names_chronics_to_backend) - res["actionClass"] = self._actionClass - res["observationClass"] = self._observationClass + res["actionClass"] = self._actionClass_orig + res["observationClass"] = self._observationClass_orig res["rewardClass"] = self._rewardClass res["legalActClass"] = self._legalActClass res["epsilon_poly"] = self._epsilon_poly diff --git a/grid2op/Episode/EpisodeData.py b/grid2op/Episode/EpisodeData.py index f370a47fd..883e28487 100644 --- a/grid2op/Episode/EpisodeData.py +++ b/grid2op/Episode/EpisodeData.py @@ -557,16 +557,23 @@ def incr_store(self, efficient_storing, time_step, time_step_duration, if self.force_detail or self.serialize: self.actions.update(time_step, act, efficient_storing) self.env_actions.update(time_step, env_act, efficient_storing) + # deactive the possibility to do "forecast" in this serialized instance + tmp_obs_env = obs._obs_env + tmp_inj = obs._forecasted_inj + obs._obs_env = None + obs._forecasted_inj = [] self.observations.update(time_step + 1, obs, efficient_storing) + obs._obs_env = tmp_obs_env + obs._forecasted_inj = tmp_inj + if opp_attack is not None: - self.attacks.update( - time_step, opp_attack, efficient_storing) + self.attacks.update(time_step, opp_attack, efficient_storing) else: if efficient_storing: self.attacks.collection[time_step - 1, :] = 0. else: - self.attack = np.concatenate( - (self.attack, self.attack_templ)) + # might not work ! + self.attacks = np.concatenate((self.attacks, self.attack_templ)) if efficient_storing: # efficient way of writing @@ -579,6 +586,7 @@ def incr_store(self, efficient_storing, time_step, time_step_duration, else: self.disc_lines[time_step - 1, :] = self.disc_lines_templ else: + # might not work ! # completely inefficient way of writing self.times = np.concatenate( (self.times, (time_step_duration,))) @@ -599,7 +607,7 @@ def incr_store(self, efficient_storing, time_step, time_step_duration, def _convert_to_float(self, el): try: res = float(el) - except Exception as e: + except Exception as exc_: res = -float('inf') return res diff --git a/grid2op/MakeEnv/MakeFromPath.py b/grid2op/MakeEnv/MakeFromPath.py index 331da270d..fb554887d 100644 --- a/grid2op/MakeEnv/MakeFromPath.py +++ b/grid2op/MakeEnv/MakeFromPath.py @@ -10,6 +10,7 @@ import importlib.util import numpy as np import json +import warnings from grid2op.Environment import Environment from grid2op.Backend import Backend, PandaPowerBackend @@ -30,24 +31,28 @@ CHALLENGE_NAME = "competition" ERR_MSG_KWARGS = { "backend": "The backend of the environment (keyword \"backend\") must be an instance of grid2op.Backend", - "observation_class": "The type of observation of the environment (keyword \"observation_class\")" \ + "observation_class": "The type of observation of the environment (keyword \"observation_class\")" " must be a subclass of grid2op.BaseObservation", "param": "The parameters of the environment (keyword \"param\") must be an instance of grid2op.Parameters", - "gamerules_class": "The type of rules of the environment (keyword \"gamerules_class\")" \ + "gamerules_class": "The type of rules of the environment (keyword \"gamerules_class\")" " must be a subclass of grid2op.BaseRules", - "reward_class": "The type of reward in the environment (keyword \"reward_class\") must be a subclass of grid2op.BaseReward", - "action_class": "The type of action of the environment (keyword \"action_class\") must be a subclass of grid2op.BaseAction", - "data_feeding_kwargs": "The argument to build the data generation process [chronics]" \ + "reward_class": "The type of reward in the environment (keyword \"reward_class\") must be a subclass of " + "grid2op.BaseReward", + "action_class": "The type of action of the environment (keyword \"action_class\") must be a subclass of " + "grid2op.BaseAction", + "data_feeding_kwargs": "The argument to build the data generation process [chronics]" " (keyword \"data_feeding_kwargs\") should be a dictionnary.", - "chronics_class": "The argument to build the data generation process [chronics] (keyword \"chronics_class\")" \ + "chronics_class": "The argument to build the data generation process [chronics] (keyword \"chronics_class\")" " should be a class that inherit grid2op.Chronics.GridValue.", - "chronics_handler": "The argument to build the data generation process [chronics] (keyword \"data_feeding\")" \ + "chronics_handler": "The argument to build the data generation process [chronics] (keyword \"data_feeding\")" " should be a class that inherit grid2op.ChronicsHandler.ChronicsHandler.", - "voltagecontroler_class": "The argument to build the online controler for chronics (keyword \"volagecontroler_class\")" \ + "voltagecontroler_class": "The argument to build the online controler for chronics (keyword " + "\"volagecontroler_class\")" " should be a class that inherit grid2op.VoltageControler.ControlVoltageFromFile.", - "names_chronics_to_grid": "The converter between names (keyword \"names_chronics_to_backend\") should be a dictionnary.", + "names_chronics_to_grid": "The converter between names (keyword \"names_chronics_to_backend\") " + "should be a dictionnary.", "other_rewards": "The argument to build the online controler for chronics (keyword \"other_rewards\") " - "should be dictionnary.", + "should be dictionary.", "chronics_path": "The path where the data is located (keyword \"chronics_path\") should be a string.", "grid_path": "The path where the grid is located (keyword \"grid_path\") should be a string.", @@ -61,8 +66,8 @@ "opponent_budget_class": "The opponent budget class (\"opponent_budget_class\") should derive from " "\"BaseActionBudget\".", "opponent_budget_per_ts": "The increase of the opponent's budget (\"opponent_budget_per_ts\") should be a float.", - "kwargs_opponent": "The extra kwargs argument used to properly initiliazed the opponent " - "(\"kwargs_opponent\") shoud " + "kwargs_opponent": "The extra kwargs argument used to properly initialized the opponent " + "(\"kwargs_opponent\") should " "be a dictionary.", DIFFICULTY_NAME: "Unknown difficulty level {difficulty} for this environment. Authorized difficulties are " "{difficulties}" @@ -76,8 +81,8 @@ def _check_kwargs(kwargs): for el in kwargs: - if not el in ERR_MSG_KWARGS.keys(): - raise EnvError("Unknown keyword argument \"{}\" used to create an Environement. " + if el not in ERR_MSG_KWARGS.keys(): + raise EnvError("Unknown keyword argument \"{}\" used to create an Environment. " "No Environment will be created. " "Accepted keyword arguments are {}".format(el, ERR_MSG_KWARGS.keys())) @@ -219,7 +224,11 @@ def make_from_dataset_path(dataset_path="/", else: # otherwise use it chronics_path_abs = os.path.abspath(chronics_path) - _check_path(chronics_path_abs, "Dataset chronics folder") + exc_chronics = None + try: + _check_path(chronics_path_abs, "Dataset chronics folder") + except Exception as exc_: + exc_chronics = exc_ # Compute and find backend/grid file grid_path = _get_default_aux("grid_path", kwargs, @@ -233,7 +242,11 @@ def make_from_dataset_path(dataset_path="/", # Compute and find grid layout file grid_layout_path_abs = os.path.abspath(os.path.join(dataset_path_abs, NAME_GRID_LAYOUT_FILE)) - _check_path(grid_layout_path_abs, "Dataset grid layout") + try: + _check_path(grid_layout_path_abs, "Dataset grid layout") + except EnvError as exc_: + warnings.warn(f"Impossible to load the coordinate of the substation with error: \"{exc_}\". Expect some issue " + f"if you attempt to plot the grid.") # Check provided config overrides are valid _check_kwargs(kwargs) @@ -248,16 +261,18 @@ def make_from_dataset_path(dataset_path="/", config_module = importlib.util.module_from_spec(spec) spec.loader.exec_module(config_module) config_data = config_module.config - except Exception as e: - print (e) + except Exception as exc_: + print(exc_) raise EnvError("Invalid dataset config file: {}".format(config_path_abs)) from None # Get graph layout + graph_layout = None try: with open(grid_layout_path_abs) as layout_fp: graph_layout = json.load(layout_fp) - except Exception as e: - raise EnvError("Dataset {} doesn't have a valid graph layout".format(config_path_abs)) + except Exception as exc_: + warnings.warn("Dataset {} doesn't have a valid graph layout. Expect some failures when attempting " + "to plot the grid. Error was: {}".format(config_path_abs, exc_)) # Get thermal limits thermal_limits = None @@ -307,9 +322,10 @@ def make_from_dataset_path(dataset_path="/", my_difficulty = kwargs[DIFFICULTY_NAME] try: my_difficulty = str(my_difficulty) - except: + except Exception as exc_: raise EnvError("Impossible to convert your difficulty into a valid string. Please make sure to " - "pass a string (eg \"2\") and not something else (eg. int(2)) as a difficulty") + "pass a string (eg \"2\") and not something else (eg. int(2)) as a difficulty." + "Error was \n{}".format(exc_)) if my_difficulty in dict_: param.init_from_dict(dict_[my_difficulty]) else: @@ -395,7 +411,7 @@ def make_from_dataset_path(dataset_path="/", defaultinstance=default_chronics_kwargs, msg_error=ERR_MSG_KWARGS["data_feeding_kwargs"]) for el in default_chronics_kwargs: - if not el in data_feeding_kwargs: + if el not in data_feeding_kwargs: data_feeding_kwargs[el] = default_chronics_kwargs[el] ### the chronics generator @@ -404,6 +420,10 @@ def make_from_dataset_path(dataset_path="/", defaultClass=data_feeding_kwargs["chronicsClass"], msg_error=ERR_MSG_KWARGS["chronics_class"], isclass=True) + if (chronics_class_used != ChangeNothing) and exc_chronics is not None: + raise EnvError(f"Impossible to find the chronics for your environment. Please make sure to provide " + f"a folder \"{NAME_CHRONICS_FOLDER}\" within your environment folder.") + data_feeding_kwargs["chronicsClass"] = chronics_class_used data_feeding = _get_default_aux("data_feeding", kwargs, defaultClassApp=ChronicsHandler, diff --git a/grid2op/Observation/BaseObservation.py b/grid2op/Observation/BaseObservation.py index 1e88df79b..a7eb44899 100644 --- a/grid2op/Observation/BaseObservation.py +++ b/grid2op/Observation/BaseObservation.py @@ -308,6 +308,14 @@ def __init__(self, self.curtailment = np.empty(shape=self.n_gen, dtype=dt_float) self.curtailment_limit = np.empty(shape=self.n_gen, dtype=dt_float) + # the "theta" (voltage angle, in degree) + self.support_theta = False + self.theta_or = np.empty(shape=self.n_line, dtype=dt_float) + self.theta_ex = np.empty(shape=self.n_line, dtype=dt_float) + self.load_theta = np.empty(shape=self.n_load, dtype=dt_float) + self.gen_theta = np.empty(shape=self.n_gen, dtype=dt_float) + self.storage_theta = np.empty(shape=self.n_storage, dtype=dt_float) + def state_of(self, _sentinel=None, load_id=None, @@ -355,6 +363,7 @@ def state_of(self, - "p" the active value consumed by the load - "q" the reactive value consumed by the load - "v" the voltage magnitude of the bus to which the load is connected + - "theta" (optional) the voltage angle (in degree) of the bus to which the load is connected - "bus" on which bus the load is connected in the substation - "sub_id" the id of the substation to which the load is connected @@ -363,20 +372,23 @@ def state_of(self, - "p" the active value produced by the generator - "q" the reactive value consumed by the generator - "v" the voltage magnitude of the bus to which the generator is connected + - "theta" (optional) the voltage angle (in degree) of the bus to which the gen. is connected - "bus" on which bus the generator is connected in the substation - "sub_id" the id of the substation to which the generator is connected - "actual_dispatch" the actual dispatch implemented for this generator - "target_dispatch" the target dispatch (cumulation of all previously asked dispatch by the agent) for this generator - - if a powerline is inspected then the keys are "origin" and "extremity" each being dictionnary with keys: + - if a powerline is inspected then the keys are "origin" and "extremity" each being dictionary with keys: - - "p" the active flow on line end (extremity or origin) - - "q" the reactive flow on line end (extremity or origin) - - "v" the voltage magnitude of the bus to which the line end (extremity or origin) is connected - - "bus" on which bus the line end (extremity or origin) is connected in the substation - - "sub_id" the id of the substation to which the generator is connected - - "a" the current flow on the line end (extremity or origin) + - "p" the active flow on line side (extremity or origin) + - "q" the reactive flow on line side (extremity or origin) + - "v" the voltage magnitude of the bus to which the line side (extremity or origin) is connected + - "theta" (optional) the voltage angle (in degree) of the bus to which line side (extremity or origin) + is connected + - "bus" on which bus the line side (extremity or origin) is connected in the substation + - "sub_id" the id of the substation to which the line side is connected + - "a" the current flow on the line side (extremity or origin) In the case of a powerline, additional information are: @@ -390,6 +402,7 @@ def state_of(self, - "storage_power": the power the unit actually produced / absorbed - "storage_charge": the state of the charge of the storage unit - "storage_power_target": the power production / absorbtion targer + - "storage_theta": (optional) the voltage angle of the bus at which the storage unit is connected - "bus": the bus (1 or 2) to which the storage unit is connected - "sub_id" : the id of the substation to which the sotrage unit is connected @@ -429,6 +442,8 @@ def state_of(self, "bus": self.topo_vect[self.load_pos_topo_vect[load_id]], "sub_id": self.load_to_subid[load_id] } + if self.support_theta: + res["theta"] = self.load_theta[load_id] elif gen_id is not None: if line_id is not None or substation_id is not None or storage_id is not None: raise Grid2OpException("You can only the inspect the effect of an action on one single element") @@ -448,6 +463,8 @@ def state_of(self, "curtailment_limit": self.curtailment_limit[gen_id], "p_before_curtail": self.gen_p_before_curtail[gen_id], } + if self.support_theta: + res["theta"] = self.gen_theta[gen_id] elif line_id is not None: if substation_id is not None or storage_id is not None: raise Grid2OpException("You can only the inspect the effect of an action on one single element") @@ -466,6 +483,8 @@ def state_of(self, "bus": self.topo_vect[self.line_or_pos_topo_vect[line_id]], "sub_id": self.line_or_to_subid[line_id] } + if self.support_theta: + res["origin"]["theta"] = self.theta_or[line_id] # extremity information res["extremity"] = { "p": self.p_ex[line_id], @@ -475,6 +494,8 @@ def state_of(self, "bus": self.topo_vect[self.line_ex_pos_topo_vect[line_id]], "sub_id": self.line_ex_to_subid[line_id] } + if self.support_theta: + res["origin"]["theta"] = self.theta_ex[line_id] # maintenance information res["maintenance"] = {"next": self.time_next_maintenance[line_id], @@ -497,6 +518,8 @@ def state_of(self, res["storage_power_target"] = self.storage_power_target[storage_id] res["bus"] = self.topo_vect[self.storage_pos_topo_vect[storage_id]] res["sub_id"] = self.storage_to_subid[storage_id] + if self.support_theta: + res["origin"]["theta"] = self.storage_theta[storage_id] else: if substation_id >= len(self.sub_info): raise Grid2OpException("There are no substation of id \"substation_id={}\" in this grid.".format(substation_id)) @@ -615,6 +638,13 @@ def reset(self): self._shunt_v[:] = np.NaN self._shunt_bus[:] = -1 + self.support_theta = False + self.theta_or[:] = np.NaN + self.theta_ex[:] = np.NaN + self.load_theta[:] = np.NaN + self.gen_theta[:] = np.NaN + self.storage_theta[:] = np.NaN + def set_game_over(self): """ Set the observation to the "game over" state: @@ -689,6 +719,14 @@ def set_game_over(self): self.minute_of_hour = 0 self.day_of_week = 1 + if self.support_theta: + # by convention, I say it's 0 if the grid is in total blackout + self.theta_or[:] = 0. + self.theta_ex[:] = 0. + self.load_theta[:] = 0. + self.gen_theta[:] = 0. + self.storage_theta[:] = 0. + def __compare_stats(self, other, name): attr_me = getattr(self, name) attr_other = getattr(other, name) @@ -787,8 +825,13 @@ def __sub__(self, other): """ same_grid = type(self).same_grid_class(type(other)) if not same_grid: + import pdb + pdb.set_trace() raise RuntimeError("Cannot compare to observation not coming from the same powergrid.") + tmp_obs_env = self._obs_env + self._obs_env = None # keep aside the backend res = copy.deepcopy(self) + self._obs_env = tmp_obs_env for stat_nm in self._attr_eq: me_ = getattr(self, stat_nm) oth_ = getattr(other, stat_nm) @@ -1299,9 +1342,7 @@ def _add_edges_simple(self, vector, attr_nm, lor_bus, lex_bus, graph): for (k1, k2), val in dict_.items(): dict_2[(k2, k1)] = val dict_.update(dict_2) - networkx.set_edge_attributes(graph, - dict_, - attr_nm) + networkx.set_edge_attributes(graph, dict_, attr_nm) def _add_edges_multi(self, vector_or, @@ -1348,12 +1389,8 @@ def _add_edges_multi(self, # networkx and grid2op do not share the same "direction" dict_ex[(k2, k1)] = dict_or_glop[(k1, k2)] - networkx.set_edge_attributes(graph, - dict_or, - "{}_or".format(attr_nm)) - networkx.set_edge_attributes(graph, - dict_ex, - "{}_ex".format(attr_nm)) + networkx.set_edge_attributes(graph, dict_or, "{}_or".format(attr_nm)) + networkx.set_edge_attributes(graph, dict_ex, "{}_ex".format(attr_nm)) def as_networkx(self): """ @@ -1453,6 +1490,14 @@ def as_networkx(self): bus_v = np.zeros(mat_p.shape[0]) bus_v[lor_bus] = self.v_or bus_v[lex_bus] = self.v_ex + bus_theta = np.zeros(mat_p.shape[0]) + bus_subid = np.zeros(mat_p.shape[0], dtype=dt_int) + bus_subid[lor_bus] = self.line_or_to_subid + bus_subid[lex_bus] = self.line_ex_to_subid + if self.support_theta: + bus_theta[lor_bus] = self.theta_or + bus_theta[lex_bus] = self.theta_ex + # bus active injection bus_p = mat_p.diagonal().copy() mat_p.setdiag(0.) @@ -1465,6 +1510,10 @@ def as_networkx(self): networkx.set_node_attributes(graph, {el: val for el, val in enumerate(bus_p)}, "p") networkx.set_node_attributes(graph, {el: val for el, val in enumerate(mat_q.diagonal())}, "q") networkx.set_node_attributes(graph, {el: val for el, val in enumerate(bus_v)}, "v") + networkx.set_node_attributes(graph, {el: val for el, val in enumerate(bus_subid)}, "sub_id") + if self.support_theta: + networkx.set_node_attributes(graph, {el: val for el, val in enumerate(bus_theta)}, "theta") + dict_cooldown = {el: val for el, val in enumerate(self.time_before_cooldown_sub)} dict_cooldown2 = {} for k, v in dict_cooldown.items(): @@ -1476,13 +1525,15 @@ def as_networkx(self): self._add_edges_multi(self.p_or, self.p_ex, "p", lor_bus, lex_bus, graph) self._add_edges_multi(self.q_or, self.q_ex, "q", lor_bus, lex_bus, graph) self._add_edges_multi(self.a_or, self.a_ex, "a", lor_bus, lex_bus, graph) + if self.support_theta: + self._add_edges_multi(self.theta_or, self.theta_ex, "theta", lor_bus, lex_bus, graph) self._add_edges_simple(self.rho, "rho", lor_bus, lex_bus, graph) self._add_edges_simple(self.time_before_cooldown_line, "cooldown", lor_bus, lex_bus, graph) self._add_edges_simple(self.line_status, "status", lor_bus, lex_bus, graph) self._add_edges_simple(self.thermal_limit, "thermal_limit", lor_bus, lex_bus, graph) self._add_edges_simple(self.timestep_overflow, "timestep_overflow", lor_bus, lex_bus, graph) - networkx.freeze(graph) + networkx.freeze(graph) # extra layer of security: prevent accidental modification of this graph return graph def get_forecasted_inj(self, time_step=1): @@ -1624,10 +1675,16 @@ def simulate(self, action, time_step=1): # `simulated_info` gives extra information on this forecast state """ - if self.action_helper is None or self._obs_env is None: + if self.action_helper is None: raise NoForecastAvailable("No forecasts are available for this instance of BaseObservation " "(no action_space " "and no simulated environment are set).") + if self._obs_env is None: + raise EnvError("This observation has no \"environment used for simulation\" (_obs_env) is not created. " + "This is the case if you loaded this observation from a disk (for example using " + "EpisodeData) " + "or used a Runner with multi processing with the \"add_detailed_output=True\" " + "flag. This is a feature of grid2op: you cannot serialize backends.") if time_step < 0: raise NoForecastAvailable("Impossible to forecast in the past.") diff --git a/grid2op/Observation/CompleteObservation.py b/grid2op/Observation/CompleteObservation.py index a47085daa..754bddd95 100644 --- a/grid2op/Observation/CompleteObservation.py +++ b/grid2op/Observation/CompleteObservation.py @@ -105,11 +105,13 @@ class CompleteObservation(BaseObservation): "time_before_cooldown_line", "time_before_cooldown_sub", "time_next_maintenance", "duration_next_maintenance", "target_dispatch", "actual_dispatch", - # TODO: backward compatibility "storage_charge", "storage_power_target", "storage_power", "gen_p_before_curtail", "curtailment", "curtailment_limit" ] - attr_list_json = ["_shunt_p", "_shunt_q", "_shunt_v", "_shunt_bus"] + attr_list_json = ["_shunt_p", "_shunt_q", "_shunt_v", "_shunt_bus", + "_thermal_limit", + "support_theta", + "theta_or", "theta_ex", "load_theta", "gen_theta", "storage_theta"] attr_list_set = set(attr_list_vect) def __init__(self, @@ -199,3 +201,8 @@ def update(self, env, with_forecast=True): self.curtailment[:] = 0. self.gen_p_before_curtail[:] = self.gen_p self.curtailment_limit[:] = 1.0 + + if env.backend.can_output_theta: + self.support_theta = True # backend supports the computation of theta + self.theta_or[:], self.theta_ex[:], self.load_theta[:], self.gen_theta[:], self.storage_theta[:] = \ + env.backend.get_theta() diff --git a/grid2op/Observation/ObservationSpace.py b/grid2op/Observation/ObservationSpace.py index fcade0bf1..db9f4af14 100644 --- a/grid2op/Observation/ObservationSpace.py +++ b/grid2op/Observation/ObservationSpace.py @@ -6,6 +6,7 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +import sys import copy from grid2op.Observation.SerializableObservationSpace import SerializableObservationSpace @@ -58,6 +59,7 @@ def __init__(self, env, rewardClass=None, observationClass=CompleteObservation, + actionClass=None, with_forecast=True): """ INTERNAL @@ -67,6 +69,10 @@ def __init__(self, Env: requires :attr:`grid2op.Environment.parameters` and :attr:`grid2op.Environment.backend` to be valid """ + if actionClass is None: + from grid2op.Action import CompleteAction + actionClass = CompleteAction + SerializableObservationSpace.__init__(self, gridobj, observationClass=observationClass) self.with_forecast = with_forecast @@ -86,17 +92,16 @@ def __init__(self, # TODO here: have another backend maybe self._backend_obs = env.backend.copy() - _ObsEnv_class = _ObsEnv.init_grid(type(self._backend_obs)) + _ObsEnv_class = _ObsEnv.init_grid(type(env.backend), force_module=_ObsEnv.__module__) + setattr(sys.modules[_ObsEnv.__module__], _ObsEnv_class.__name__, _ObsEnv_class) self.obs_env = _ObsEnv_class(backend_instanciated=self._backend_obs, obsClass=observationClass, # do not put self.observationClass otherwise it's initialized twice parameters=self._simulate_parameters, reward_helper=self.reward_helper, action_helper=self.action_helper_env, thermal_limit_a=env.get_thermal_limit(), - legalActClass=env._legalActClass, - donothing_act=env._helper_action_player(), + legalActClass=copy.deepcopy(env._legalActClass), other_rewards=other_rewards, - completeActionClass=env._helper_action_env.actionClass, helper_action_class=env._helper_action_class, helper_action_env=env._helper_action_env) for k, v in self.obs_env.other_rewards.items(): diff --git a/grid2op/Observation/_ObsEnv.py b/grid2op/Observation/_ObsEnv.py index 423c678f2..133aa53f2 100644 --- a/grid2op/Observation/_ObsEnv.py +++ b/grid2op/Observation/_ObsEnv.py @@ -44,14 +44,12 @@ class _ObsEnv(BaseEnv): """ def __init__(self, backend_instanciated, - completeActionClass, parameters, reward_helper, obsClass, action_helper, thermal_limit_a, legalActClass, - donothing_act, helper_action_class, helper_action_env, other_rewards={}): @@ -121,6 +119,10 @@ def __init__(self, self._sum_curtailment_mw_init = 0. self._sum_curtailment_mw_prev_init = 0. + def _init_myclass(self): + """this class has already all the powergrid information: it is initialized in the obs space !""" + pass + def _init_backend(self, init_grid_path, chronics_handler, @@ -128,11 +130,12 @@ def _init_backend(self, names_chronics_to_backend, actionClass, observationClass, - rewardClass, legalActClass): + rewardClass, + legalActClass): self._env_dc = self.parameters.ENV_DC self.chronics_handler = chronics_handler self.backend = backend - self._has_been_initialized() + self._has_been_initialized() # really important to include this piece of code! and just here after the if not issubclass(legalActClass, BaseRules): raise Grid2OpException( @@ -141,7 +144,7 @@ def _init_backend(self, type(legalActClass))) self._game_rules = RulesChecker(legalActClass=legalActClass) self._legalActClass = legalActClass - self._helper_action_player = self._do_nothing + self._action_space = self._do_nothing self.backend.set_thermal_limit(self._thermal_limit_a) self._create_opponent() @@ -153,9 +156,14 @@ def _init_backend(self, # backend has loaded everything self._line_status = np.ones(shape=self.n_line, dtype=dt_bool) self._hazard_duration = np.zeros(shape=self.n_line, dtype=dt_int) - self._has_been_initialized() def _do_nothing(self, x): + """ + this is should be only called within _Obsenv.step, and there, only return the "do nothing" + action. + + This is why this function is used as the "obsenv action space" + """ return self._do_nothing_act def _update_actions(self): diff --git a/grid2op/Opponent/OpponentSpace.py b/grid2op/Opponent/OpponentSpace.py index 7c074b620..5f9e8ddba 100644 --- a/grid2op/Opponent/OpponentSpace.py +++ b/grid2op/Opponent/OpponentSpace.py @@ -165,7 +165,7 @@ def attack(self, observation, agent_action, env_action): self.previous_fails) attack_called = True # If the cost is too high - final_budget = self.budget # TODO add the: + self.budget_per_timestep * (self.attack_duration - 1) + final_budget = self.budget # TODO add the: + self.budget_per_timestep * (self.attack_duration - 1) # i did not do it in case an attack is ok at the beginning, ok at the end, but at some point in the attack # process it is not (but i'm not sure this can happen, and don't have time to think about it right now) if self.attack_duration * self.compute_budget(attack) > final_budget: diff --git a/grid2op/Reward/RedispReward.py b/grid2op/Reward/RedispReward.py index 3e93a0aaa..3a5764841 100644 --- a/grid2op/Reward/RedispReward.py +++ b/grid2op/Reward/RedispReward.py @@ -6,8 +6,11 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +import sys import numpy as np +import re +from grid2op.platform import _IS_WINDOWS, _IS_LINUX, _IS_MACOS from grid2op.Exceptions import Grid2OpException from grid2op.Reward.BaseReward import BaseReward from grid2op.dtypes import dt_float @@ -39,36 +42,127 @@ class RedispReward(BaseReward): # NB this is the default reward of many environments in the grid2op framework + This class depends on some "meta parameters". These meta parameters can be changed when the class is created + in the following way: + + .. code-block:: python + + import grid2op + from grid2op.Reward import RedispReward + + reward_cls = RedispReward.generate_class_custom_params(alpha_redisph=5, + min_load_ratio=0.1, + worst_losses_ratio=0.05, + min_reward=-10., + reward_illegal_ambiguous=0., + least_losses_ratio=0.015) + env_name = ... #eg "l2rpn_case14_sandbox" + env = grid2op.make(env_name,reward_class=reward_cls) + + These meta parameters means: + + - alpha_redisp: extra cost paid when performing redispatching. For 1MW of redispatching done, you pay + "alpha_redisph" + - min_load_ratio: how to compute the minimum load on the grid, based on the total generation (sum of gen_pmax) + - worst_losses_ratio: worst loss possible on the grid (5% is an upper bound for normal grid) + - min_reward: what is the minimum reward of this class (can be parametrized, and is only used when there is + a game over + - reward_illegal_ambiguous: reward given when the action is illegal or ambiguous + - least_losses_ratio: the minimum loss you can have (1.5% of the total demand should be a lower bound for real grid) + + Notes + ------ + On windows and MacOs, due to a compatibility issue with multi-processing, it is not possible to have different + "RedisReward" with different meta parameters (see the "Examples" section). + """ - def __init__(self, alpha_redisph=5.0): + _alpha_redisp = dt_float(5.0) + _min_load_ratio = dt_float(0.1) # min load = min_load_ratio * max_load + _worst_losses_ratio = dt_float(0.05) # worst_losses = worst_losses_ratio * worst_load + _min_reward = dt_float(-10.) # reward when game over + _reward_illegal_ambiguous = dt_float(0.) # reward when action is illegal or ambiguous + _least_losses_ratio = dt_float(0.015) # least_losses = least_losses_ratio * least_loads + + def __init__(self): BaseReward.__init__(self) self.reward_min = None self.reward_max = None self.max_regret = dt_float(0.0) - self.alpha_redisph = dt_float(alpha_redisph) + self.reward_illegal_ambiguous = None + + @classmethod + def generate_class_custom_params(cls, + alpha_redisph=5.0, + min_load_ratio=0.1, # min load = min_load_ratio * max_load + worst_losses_ratio=0.05, # worst_losses = worst_losses_ratio * worst_load + min_reward=-10., + least_losses_ratio=0.015, # least_losses = least_losses_ratio * least_loads + reward_illegal_ambiguous=0., + ): + if _IS_LINUX: + # on linux it's fine, i can create new classes for each meta parameters + nm_res = f"RedispReward_{alpha_redisph:.2f}_{min_load_ratio:.2f}_{worst_losses_ratio:.2f}" + nm_res += f"_{min_reward:.2f}_{least_losses_ratio:.2f}_{reward_illegal_ambiguous:.2f}" + nm_res = re.sub("\\.", "@", nm_res) + cls_attr_as_dict = { + "_alpha_redisp": dt_float(alpha_redisph), + "_min_load_ratio": dt_float(min_load_ratio), + "_worst_losses_ratio": dt_float(worst_losses_ratio), + "_min_reward": dt_float(min_reward), + "_least_losses_ratio": dt_float(least_losses_ratio), + "_reward_illegal_ambiguous": dt_float(reward_illegal_ambiguous) + } + res_cls = type(nm_res, (cls,), cls_attr_as_dict) + res_cls.__module__ = cls.__module__ + setattr(sys.modules[cls.__module__], nm_res, res_cls) + globals()[nm_res] = res_cls + else: + # i mess with the default parameters in the base class, i know i know it's not pretty, but hey... + + # TODO make that prettier and clean the way to make the reward in the env (for example allow to pass + # objects and not just class) + cls._alpha_redisp = dt_float(alpha_redisph) + cls._min_load_ratio = dt_float(min_load_ratio) + cls._worst_losses_ratio = dt_float(worst_losses_ratio) + cls._min_reward = dt_float(min_reward) + cls._least_losses_ratio = dt_float(least_losses_ratio) + cls._reward_illegal_ambiguous = dt_float(reward_illegal_ambiguous) + res_cls = cls + + return res_cls def initialize(self, env): if not env.redispatching_unit_commitment_availble: - raise Grid2OpException("Impossible to use the RedispReward reward with an environment without generators" + raise Grid2OpException("Impossible to use the RedispReward reward with an environment without generators " "cost. Please make sure env.redispatching_unit_commitment_availble is available.") + cls_ = type(self) + worst_marginal_cost = np.max(env.gen_cost_per_MW) worst_load = dt_float(np.sum(env.gen_pmax)) - worst_losses = dt_float(0.05) * worst_load # it's not the worst, but definitely an upper bound - worst_redisp = self.alpha_redisph * np.sum(env.gen_pmax) # not realistic, but an upper bound - self.max_regret = (worst_losses + worst_redisp)*worst_marginal_cost - self.reward_min = dt_float(-10.0) - - least_loads = dt_float(worst_load * 0.5) # half the capacity of the grid - least_losses = dt_float(0.015 * least_loads) # 1.5% of losses + # it's not the worst, but definitely an upper bound + worst_losses = dt_float(cls_._worst_losses_ratio) * worst_load + worst_redisp = cls_._alpha_redisp * np.sum(env.gen_pmax) # not realistic, but an upper bound + self.max_regret = (worst_losses + worst_redisp) * worst_marginal_cost + self.reward_min = dt_float(cls_._min_reward) + + least_loads = dt_float(worst_load * cls_._min_load_ratio) # half the capacity of the grid + least_losses = dt_float(cls_._least_losses_ratio * least_loads) # 1.5% of losses least_redisp = dt_float(0.0) # lower_bound is 0 base_marginal_cost = np.min(env.gen_cost_per_MW[env.gen_cost_per_MW > 0.]) min_regret = (least_losses + least_redisp) * base_marginal_cost self.reward_max = dt_float((self.max_regret - min_regret) / least_loads) + self.reward_illegal_ambiguous = cls_._reward_illegal_ambiguous def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous): - if has_error or is_illegal or is_ambiguous: - res = self.reward_min - else: + res = None + if is_done: + # if the episode is over and it's my fault (i did a blackout) i strongly + if has_error or is_illegal or is_ambiguous: + res = self.reward_min + elif is_illegal or is_ambiguous: + res = self._reward_illegal_ambiguous + + if res is None: # compute the losses gen_p, *_ = env.backend.generators_info() load_p, *_ = env.backend.loads_info() @@ -80,7 +174,7 @@ def __call__(self, action, env, has_error, is_done, is_illegal, is_ambiguous): # redispatching amount actual_dispatch = env._actual_dispatch - redisp_cost = self.alpha_redisph * np.sum(np.abs(actual_dispatch)) * marginal_cost + redisp_cost = self._alpha_redisp * np.sum(np.abs(actual_dispatch)) * marginal_cost # cost of losses losses_cost = losses * marginal_cost diff --git a/grid2op/Runner/Runner.py b/grid2op/Runner/Runner.py index bdd34f22d..e2c2059f3 100644 --- a/grid2op/Runner/Runner.py +++ b/grid2op/Runner/Runner.py @@ -33,9 +33,7 @@ # on windows if i start using sequential, i need to continue using sequential # if i start using parallel i need to continue using parallel # so i force the usage of the "starmap" stuff even if there is one process on windows -_IS_WINDOWS = sys.platform.startswith('win') -_IS_LINUX = sys.platform.startswith("linux") -_IS_MACOS = sys.platform.startswith("darwin") +from grid2op.platform import _IS_WINDOWS, _IS_LINUX, _IS_MACOS # TODO have a vectorized implementation of everything in case the agent is able to act on multiple environment # at the same time. This might require a lot of work, but would be totally worth it! @@ -223,7 +221,6 @@ def _aux_run_one_episode(env, agent, logger, indx, path_save=None, observations[time_step, :] = obs.to_vect() else: observations = np.concatenate((observations, obs.to_vect().reshape(1, -1))) - episode = EpisodeData(actions=actions, env_actions=env_actions, observations=observations, @@ -271,7 +268,6 @@ def _aux_run_one_episode(env, agent, logger, indx, path_save=None, info) end_ = time.time() - episode.set_meta(env, time_step, float(cum_reward), env_seed, agent_seed) li_text = ["Env: {:.2f}s", "\t - apply act {:.2f}s", "\t - run pf: {:.2f}s", @@ -287,7 +283,6 @@ def _aux_run_one_episode(env, agent, logger, indx, path_save=None, episode.to_disk() name_chron = env.chronics_handler.get_name() - return name_chron, cum_reward, int(time_step), episode @@ -764,8 +759,10 @@ def __init__(self, # otherwise on windows / macos it sometimes fail in the runner in multi process # on linux like OS i prefer to generate all the proper classes accordingly if _IS_LINUX: - env = self.init_env() - env.close() + pass + with self.init_env() as env: + bk_class = type(env.backend) + pass self.__used = False @@ -1061,7 +1058,7 @@ def _run_parrallel(self, nb_episode, nb_process=1, path_save=None, env_seeds=Non res = [] if _IS_LINUX: - lists = [(self, pn, i, path_save, seeds_res[i], max_iter, add_detailed_output) + lists = [(self, pn, i, path_save, seeds_res[i], max_iter, add_detailed_output) for i, pn in enumerate(process_ids)] else: lists = [(Runner(**self._get_params()), pn, i, path_save, seeds_res[i], max_iter, add_detailed_output) diff --git a/grid2op/Space/GridObjects.py b/grid2op/Space/GridObjects.py index 52d41fd81..a181fbdb2 100644 --- a/grid2op/Space/GridObjects.py +++ b/grid2op/Space/GridObjects.py @@ -47,6 +47,9 @@ class GridObjects: :class:`grid2op.Backend.Backend` all inherit from this class. This means that each of the above has its own representation of the powergrid. + Before diving into the technical details on the implementation, you might want to have a look at this + page of the documentation :ref:`graph-encoding-gridgraph` that details why this representation is suitable. + The modeling adopted for describing a powergrid is the following: - only the main objects of a powergrid are represented. An "object" is either a load (consumption) a generator @@ -120,8 +123,6 @@ class GridObjects: "method 1" and "method 2" were presented as a way to give detailed and "concrete" example on how the modeling of the powergrid work. - - For a given powergrid, this object should be initialized once in the :class:`grid2op.Backend.Backend` when the first call to :func:`grid2op.Backend.Backend.load_grid` is performed. In particular the following attributes must necessarily be defined (see above for a detailed description of some of the attributes): @@ -152,7 +153,7 @@ class GridObjects: - :attr:`GridObjects.line_ex_to_sub_pos` - :attr:`GridObjects.storage_to_sub_pos` - A call to the function :func:`GridObjects._compute_pos_big_topo` allow to compute the \*_pos_topo_vect attributes + A call to the function :func:`GridObjects._compute_pos_big_topo_cls` allow to compute the \*_pos_topo_vect attributes (for example :attr:`GridObjects.line_ex_pos_topo_vect`) can be computed from the above data: - :attr:`GridObjects.load_pos_topo_vect` @@ -422,7 +423,9 @@ class GridObjects: grid_objects_types: ``matrix`` Give the information about each element of the "topo_vect" vector. It is an "easy" way to retrieve at which element (side of a power, load, generator, storage units) a given component of the "topology vector" - is referring to. See the getting started notebook about the observation and the action for more information. + is referring to. + For more information, you can consult the :ref:`graph-encoding-gridgraph` of the documentation + or the getting started notebook about the observation and the action for more information. # TODO specify the unit of redispatching data MWh, $/MW etc. """ @@ -535,6 +538,113 @@ class GridObjects: def __init__(self): pass + @classmethod + def _clear_class_attribute(cls): + cls.glop_version = grid2op.__version__ + + cls.SUB_COL = 0 + cls.LOA_COL = 1 + cls.GEN_COL = 2 + cls.LOR_COL = 3 + cls.LEX_COL = 4 + cls.STORAGE_COL = 5 + + cls.attr_list_vect = None + cls.attr_list_set = {} + cls.attr_list_json = [] + cls.attr_nan_list_set = set() + + # class been init + # __is_init = False + + # name of the objects + cls.env_name = "unknown" + cls.name_load = None + cls.name_gen = None + cls.name_line = None + cls.name_sub = None + cls.name_storage = None + + cls.n_gen = -1 + cls.n_load = -1 + cls.n_line = -1 + cls.n_sub = -1 + cls.n_storage = -1 + + cls.sub_info = None + cls.dim_topo = -1 + + # to which substation is connected each element + cls.load_to_subid = None + cls.gen_to_subid = None + cls.line_or_to_subid = None + cls.line_ex_to_subid = None + cls.storage_to_subid = None + + # which index has this element in the substation vector + cls.load_to_sub_pos = None + cls.gen_to_sub_pos = None + cls.line_or_to_sub_pos = None + cls.line_ex_to_sub_pos = None + cls.storage_to_sub_pos = None + + # which index has this element in the topology vector + cls.load_pos_topo_vect = None + cls.gen_pos_topo_vect = None + cls.line_or_pos_topo_vect = None + cls.line_ex_pos_topo_vect = None + cls.storage_pos_topo_vect = None + + # "convenient" way to retrieve information of the grid + cls.grid_objects_types = None + # to which substation each element of the topovect is connected + cls._topo_vect_to_sub = None + + # list of attribute to convert it from/to a vector + cls._vectorized = None + + # for redispatching / unit commitment + cls._li_attr_disp = ["gen_type", "gen_pmin", "gen_pmax", "gen_redispatchable", "gen_max_ramp_up", + "gen_max_ramp_down", "gen_min_uptime", "gen_min_downtime", "gen_cost_per_MW", + "gen_startup_cost", "gen_shutdown_cost", "gen_renewable"] + + cls._type_attr_disp = [str, float, float, bool, float, float, int, int, float, float, float, bool] + + # redispatch data, not available in all environment + cls.redispatching_unit_commitment_availble = False + cls.gen_type = None + cls.gen_pmin = None + cls.gen_pmax = None + cls.gen_redispatchable = None + cls.gen_max_ramp_up = None + cls.gen_max_ramp_down = None + cls.gen_min_uptime = None + cls.gen_min_downtime = None + cls.gen_cost_per_MW = None # marginal cost (in currency / (power.step) and not in $/(MW.h) it would be $ / (MW.5mins) ) + cls.gen_startup_cost = None # start cost (in currency) + cls.gen_shutdown_cost = None # shutdown cost (in currency) + cls.gen_renewable = None + + # storage unit static data + cls.storage_type = None + cls.storage_Emax = None + cls.storage_Emin = None + cls.storage_max_p_prod = None + cls.storage_max_p_absorb = None + cls.storage_marginal_cost = None + cls.storage_loss = None + cls.storage_charging_efficiency = None + cls.storage_discharging_efficiency = None + + # grid layout + cls.grid_layout = None + + # shunt data, not available in every backend + cls.shunts_data_available = False + cls.n_shunt = None + cls.name_shunt = None + cls.shunt_to_subid = None + @classmethod def _update_value_set(cls): """ @@ -939,7 +1049,8 @@ def size(self): res = np.sum(self.shape()).astype(dt_int) return res - def _aux_pos_big_topo(self, vect_to_subid, vect_to_sub_pos): + @classmethod + def _aux_pos_big_topo(cls, vect_to_subid, vect_to_sub_pos): """ INTERNAL @@ -958,11 +1069,30 @@ def _aux_pos_big_topo(self, vect_to_subid, vect_to_sub_pos): """ res = np.zeros(shape=vect_to_subid.shape, dtype=dt_int) for i, (sub_id, my_pos) in enumerate(zip(vect_to_subid, vect_to_sub_pos)): - obj_before = np.sum(self.sub_info[:sub_id]) + obj_before = np.sum(cls.sub_info[:sub_id]) res[i] = obj_before + my_pos return res + def _init_class_attr(self, obj=None): + """init the class attribute from an instance of the class + THIS IS NOT A CLASS ATTR + """ + if obj is None: + obj = self + cls = type(self) + cls_as_dict = {} + GridObjects._make_cls_dict_extended(obj, cls_as_dict, as_list=False) + for attr_nm, attr in cls_as_dict.items(): + setattr(cls, attr_nm, attr) + def _compute_pos_big_topo(self): + # TODO move the object attribute as class attribute ! + self._init_class_attr() + cls = type(self) + cls._compute_pos_big_topo_cls() + + @classmethod + def _compute_pos_big_topo_cls(cls): """ INTERNAL @@ -983,268 +1113,288 @@ def _compute_pos_big_topo(self): """ # check if we need to implement the position in substation - if self.n_storage == -1 and \ - self.storage_to_subid is None and \ - self.storage_pos_topo_vect is None and \ - self.storage_to_sub_pos is None: + if cls.n_storage == -1 and \ + cls.storage_to_subid is None and \ + cls.storage_pos_topo_vect is None and \ + cls.storage_to_sub_pos is None: # no storage on the grid, so i deactivate them - type(self).set_no_storage() - self._compute_sub_elements() - self._compute_sub_pos() - - self.load_pos_topo_vect = self._aux_pos_big_topo(self.load_to_subid, self.load_to_sub_pos).astype(dt_int) - self.gen_pos_topo_vect = self._aux_pos_big_topo(self.gen_to_subid, self.gen_to_sub_pos).astype(dt_int) - self.line_or_pos_topo_vect = self._aux_pos_big_topo(self.line_or_to_subid, self.line_or_to_sub_pos).astype(dt_int) - self.line_ex_pos_topo_vect = self._aux_pos_big_topo(self.line_ex_to_subid, self.line_ex_to_sub_pos).astype(dt_int) - self.storage_pos_topo_vect = self._aux_pos_big_topo(self.storage_to_subid, self.storage_to_sub_pos).astype(dt_int) - - self._topo_vect_to_sub = np.repeat(np.arange(self.n_sub), repeats=self.sub_info) - self.grid_objects_types = np.full(shape=(self.dim_topo, 6), fill_value=-1, dtype=dt_int) + cls.set_no_storage() + cls._compute_sub_elements() + cls._compute_sub_pos() + + cls.load_pos_topo_vect = cls._aux_pos_big_topo(cls.load_to_subid, cls.load_to_sub_pos).astype(dt_int) + cls.gen_pos_topo_vect = cls._aux_pos_big_topo(cls.gen_to_subid, cls.gen_to_sub_pos).astype(dt_int) + cls.line_or_pos_topo_vect = cls._aux_pos_big_topo(cls.line_or_to_subid, cls.line_or_to_sub_pos).astype(dt_int) + cls.line_ex_pos_topo_vect = cls._aux_pos_big_topo(cls.line_ex_to_subid, cls.line_ex_to_sub_pos).astype(dt_int) + cls.storage_pos_topo_vect = cls._aux_pos_big_topo(cls.storage_to_subid, cls.storage_to_sub_pos).astype(dt_int) + + cls._topo_vect_to_sub = np.repeat(np.arange(cls.n_sub), repeats=cls.sub_info) + cls.grid_objects_types = np.full(shape=(cls.dim_topo, 6), fill_value=-1, dtype=dt_int) prev = 0 - for sub_id, nb_el in enumerate(self.sub_info): - self.grid_objects_types[prev:(prev + nb_el), :] = self.get_obj_substations(substation_id=sub_id) + for sub_id, nb_el in enumerate(cls.sub_info): + cls.grid_objects_types[prev:(prev + nb_el), :] = cls.get_obj_substations(substation_id=sub_id) prev += nb_el - def _check_sub_id(self): + @classmethod + def _check_sub_id(cls): # check it can be converted to proper types - if not isinstance(self.load_to_subid, np.ndarray): + if not isinstance(cls.load_to_subid, np.ndarray): try: - self.load_to_subid = np.array(self.load_to_subid) - self.load_to_subid = self.load_to_subid.astype(dt_int) + cls.load_to_subid = np.array(cls.load_to_subid) + cls.load_to_subid = cls.load_to_subid.astype(dt_int) except Exception as exc_: raise EnvError(f"self.load_to_subid should be convertible to a numpy array. " f"It fails with error \"{exc_}\"") - if not isinstance(self.gen_to_subid, np.ndarray): + if not isinstance(cls.gen_to_subid, np.ndarray): try: - self.gen_to_subid = np.array(self.gen_to_subid) - self.gen_to_subid = self.gen_to_subid.astype(dt_int) + cls.gen_to_subid = np.array(cls.gen_to_subid) + cls.gen_to_subid = cls.gen_to_subid.astype(dt_int) except Exception as exc_: raise EnvError(f"self.gen_to_subid should be convertible to a numpy array. " f"It fails with error \"{exc_}\"") - if not isinstance(self.line_or_to_subid, np.ndarray): + if not isinstance(cls.line_or_to_subid, np.ndarray): try: - self.line_or_to_subid = np.array(self.line_or_to_subid) - self.line_or_to_subid = self.line_or_to_subid.astype(dt_int) + cls.line_or_to_subid = np.array(cls.line_or_to_subid) + cls.line_or_to_subid = cls.line_or_to_subid.astype(dt_int) except Exception as exc_: raise EnvError(f"self.line_or_to_subid should be convertible to a numpy array. " f"It fails with error \"{exc_}\"") - if not isinstance(self.line_ex_to_subid, np.ndarray): + if not isinstance(cls.line_ex_to_subid, np.ndarray): try: - self.line_ex_to_subid = np.array(self.line_ex_to_subid) - self.line_ex_to_subid = self.line_ex_to_subid.astype(dt_int) + cls.line_ex_to_subid = np.array(cls.line_ex_to_subid) + cls.line_ex_to_subid = cls.line_ex_to_subid.astype(dt_int) except Exception as exc_: raise EnvError("self.line_ex_to_subid should be convertible to a numpy array" f"It fails with error \"{exc_}\"") - if not isinstance(self.storage_to_subid, np.ndarray): + if not isinstance(cls.storage_to_subid, np.ndarray): try: - self.storage_to_subid = np.array(self.storage_to_subid) - self.storage_to_subid = self.storage_to_subid.astype(dt_int) + cls.storage_to_subid = np.array(cls.storage_to_subid) + cls.storage_to_subid = cls.storage_to_subid.astype(dt_int) except Exception as e: raise EnvError("self.storage_to_subid should be convertible to a numpy array") # now check the sizes - if len(self.load_to_subid) != self.n_load: + if len(cls.load_to_subid) != cls.n_load: raise IncorrectNumberOfLoads() - if np.min(self.load_to_subid) < 0: + if np.min(cls.load_to_subid) < 0: raise EnvError("Some shunt is connected to a negative substation id.") - if np.max(self.load_to_subid) > self.n_sub: + if np.max(cls.load_to_subid) > cls.n_sub: raise EnvError("Some load is supposed to be connected to substations with id {} which" "is greater than the number of substations of the grid, which is {}." - "".format(np.max(self.load_to_subid), self.n_sub)) + "".format(np.max(cls.load_to_subid), cls.n_sub)) - if len(self.gen_to_subid) != self.n_gen: + if len(cls.gen_to_subid) != cls.n_gen: raise IncorrectNumberOfGenerators() - if np.min(self.gen_to_subid) < 0: + if np.min(cls.gen_to_subid) < 0: raise EnvError("Some shunt is connected to a negative substation id.") - if np.max(self.gen_to_subid) > self.n_sub: + if np.max(cls.gen_to_subid) > cls.n_sub: raise EnvError("Some generator is supposed to be connected to substations with id {} which" "is greater than the number of substations of the grid, which is {}." - "".format(np.max(self.gen_to_subid), self.n_sub)) - if len(self.line_or_to_subid) != self.n_line: + "".format(np.max(cls.gen_to_subid), cls.n_sub)) + if len(cls.line_or_to_subid) != cls.n_line: raise IncorrectNumberOfLines() - if np.min(self.line_or_to_subid) < 0: + if np.min(cls.line_or_to_subid) < 0: raise EnvError("Some shunt is connected to a negative substation id.") - if np.max(self.line_or_to_subid) > self.n_sub: + if np.max(cls.line_or_to_subid) > cls.n_sub: raise EnvError("Some powerline (or) is supposed to be connected to substations with id {} which" "is greater than the number of substations of the grid, which is {}." - "".format(np.max(self.line_or_to_subid), self.n_sub)) + "".format(np.max(cls.line_or_to_subid), cls.n_sub)) - if len(self.line_ex_to_subid) != self.n_line: + if len(cls.line_ex_to_subid) != cls.n_line: raise IncorrectNumberOfLines() - if np.min(self.line_ex_to_subid) < 0: + if np.min(cls.line_ex_to_subid) < 0: raise EnvError("Some shunt is connected to a negative substation id.") - if np.max(self.line_ex_to_subid) > self.n_sub: + if np.max(cls.line_ex_to_subid) > cls.n_sub: raise EnvError("Some powerline (ex) is supposed to be connected to substations with id {} which" "is greater than the number of substations of the grid, which is {}." - "".format(np.max(self.line_or_to_subid), self.n_sub)) - if len(self.storage_to_subid) != self.n_storage: + "".format(np.max(cls.line_or_to_subid), cls.n_sub)) + if len(cls.storage_to_subid) != cls.n_storage: raise IncorrectNumberOfStorages() - if self.n_storage > 0: - if np.min(self.storage_to_subid) < 0: + if cls.n_storage > 0: + if np.min(cls.storage_to_subid) < 0: raise EnvError("Some storage is connected to a negative substation id.") - if np.max(self.storage_to_subid) > self.n_sub: + if np.max(cls.storage_to_subid) > cls.n_sub: raise EnvError("Some powerline (ex) is supposed to be connected to substations with id {} which" "is greater than the number of substations of the grid, which is {}." - "".format(np.max(self.line_or_to_subid), self.n_sub)) + "".format(np.max(cls.line_or_to_subid), cls.n_sub)) - def _fill_names(self): - if self.name_line is None: - self.name_line = ['{}_{}_{}'.format(or_id, ex_id, l_id) for l_id, (or_id, ex_id) in - enumerate(zip(self.line_or_to_subid, self.line_ex_to_subid))] - self.name_line = np.array(self.name_line) + @classmethod + def _fill_names(cls): + if cls.name_line is None: + cls.name_line = ['{}_{}_{}'.format(or_id, ex_id, l_id) for l_id, (or_id, ex_id) in + enumerate(zip(cls.line_or_to_subid, cls.line_ex_to_subid))] + cls.name_line = np.array(cls.name_line) warnings.warn("name_line is None so default line names have been assigned to your grid. " "(FYI: Line names are used to make the correspondence between the chronics and the backend)" "This might result in impossibility to load data." "\n\tIf \"env.make\" properly worked, you can safely ignore this warning.") - if self.name_load is None: - self.name_load = ["load_{}_{}".format(bus_id, load_id) for load_id, bus_id in enumerate(self.load_to_subid)] - self.name_load = np.array(self.name_load) + if cls.name_load is None: + cls.name_load = ["load_{}_{}".format(bus_id, load_id) for load_id, bus_id in enumerate(cls.load_to_subid)] + cls.name_load = np.array(cls.name_load) warnings.warn("name_load is None so default load names have been assigned to your grid. " "(FYI: load names are used to make the correspondence between the chronics and the backend)" "This might result in impossibility to load data." "\n\tIf \"env.make\" properly worked, you can safely ignore this warning.") - if self.name_gen is None: - self.name_gen = ["gen_{}_{}".format(bus_id, gen_id) for gen_id, bus_id in enumerate(self.gen_to_subid)] - self.name_gen = np.array(self.name_gen) + if cls.name_gen is None: + cls.name_gen = ["gen_{}_{}".format(bus_id, gen_id) for gen_id, bus_id in enumerate(cls.gen_to_subid)] + cls.name_gen = np.array(cls.name_gen) warnings.warn("name_gen is None so default generator names have been assigned to your grid. " "(FYI: generator names are used to make the correspondence between the chronics and " "the backend)" "This might result in impossibility to load data." "\n\tIf \"env.make\" properly worked, you can safely ignore this warning.") - if self.name_sub is None: - self.name_sub = ["sub_{}".format(sub_id) for sub_id in range(self.n_sub)] - self.name_sub = np.array(self.name_sub) + if cls.name_sub is None: + cls.name_sub = ["sub_{}".format(sub_id) for sub_id in range(cls.n_sub)] + cls.name_sub = np.array(cls.name_sub) warnings.warn("name_sub is None so default substation names have been assigned to your grid. " "(FYI: substation names are used to make the correspondence between the chronics and " "the backend)" "This might result in impossibility to load data." "\n\tIf \"env.make\" properly worked, you can safely ignore this warning.") - if self.name_storage is None: - self.name_storage = ["storage_{}_{}".format(bus_id, sto_id) - for sto_id, bus_id in enumerate(self.storage_to_subid)] - self.name_storage = np.array(self.name_sub) + if cls.name_storage is None: + cls.name_storage = ["storage_{}_{}".format(bus_id, sto_id) + for sto_id, bus_id in enumerate(cls.storage_to_subid)] + cls.name_storage = np.array(cls.name_sub) warnings.warn("name_storage is None so default storage unit names have been assigned to your grid. " "(FYI: storage names are used to make the correspondence between the chronics and " "the backend)" "This might result in impossibility to load data." "\n\tIf \"env.make\" properly worked, you can safely ignore this warning.") - def _check_names(self): - self._fill_names() + @classmethod + def _check_names(cls): + cls._fill_names() - if not isinstance(self.name_line, np.ndarray): + if not isinstance(cls.name_line, np.ndarray): try: - self.name_line = np.array(self.name_line) - self.name_line = self.name_line.astype(str) + cls.name_line = np.array(cls.name_line) + cls.name_line = cls.name_line.astype(str) except Exception as exc_: raise EnvError(f"self.name_line should be convertible to a numpy array of type str. Error was " f"{exc_}") - if not isinstance(self.name_load, np.ndarray): + if not isinstance(cls.name_load, np.ndarray): try: - self.name_load = np.array(self.name_load) - self.name_load = self.name_load.astype(str) + cls.name_load = np.array(cls.name_load) + cls.name_load = cls.name_load.astype(str) except Exception as exc_: raise EnvError("self.name_load should be convertible to a numpy array of type str. Error was " f"{exc_}") - if not isinstance(self.name_gen, np.ndarray): + if not isinstance(cls.name_gen, np.ndarray): try: - self.name_gen = np.array(self.name_gen) - self.name_gen = self.name_gen.astype(str) + cls.name_gen = np.array(cls.name_gen) + cls.name_gen = cls.name_gen.astype(str) except Exception as exc_: raise EnvError("self.name_gen should be convertible to a numpy array of type str. Error was " f"{exc_}") - if not isinstance(self.name_sub, np.ndarray): + if not isinstance(cls.name_sub, np.ndarray): try: - self.name_sub = np.array(self.name_sub) - self.name_sub = self.name_sub.astype(str) + cls.name_sub = np.array(cls.name_sub) + cls.name_sub = cls.name_sub.astype(str) except Exception as exc_: raise EnvError("self.name_sub should be convertible to a numpy array of type str. Error was " f"{exc_}") - if not isinstance(self.name_storage, np.ndarray): + if not isinstance(cls.name_storage, np.ndarray): try: - self.name_storage = np.array(self.name_storage) - self.name_storage = self.name_storage.astype(str) + cls.name_storage = np.array(cls.name_storage) + cls.name_storage = cls.name_storage.astype(str) except Exception as exc_: raise EnvError("self.name_storage should be convertible to a numpy array of type str. Error was " f"{exc_}") - def _check_sub_pos(self): - if not isinstance(self.load_to_sub_pos, np.ndarray): + attrs_nms = [cls.name_gen, cls.name_sub, cls.name_line, cls.name_load, cls.name_storage] + nms = ["generators", "substations", "lines", "loads", "storage units"] + if cls.shunts_data_available: + # these are set to "None" if there is no shunts on the grid + attrs_nms.append(cls.name_shunt) + nms.append("shunts") + + for arr_, nm in zip(attrs_nms, nms): + tmp = np.unique(arr_) + if tmp.shape[0] != arr_.shape[0]: + nms = '\n\t - '.join(sorted(arr_)) + raise EnvError(f"Two {nm} have the same names. Please check the \"grid.json\" file and make sure the " + f"name of the {nm} are all different. Right now they are \n\t - {nms}.") + + @classmethod + def _check_sub_pos(cls): + if not isinstance(cls.load_to_sub_pos, np.ndarray): try: - self.load_to_sub_pos = np.array(self.load_to_sub_pos) - self.load_to_sub_pos = self.load_to_sub_pos.astype(dt_int) + cls.load_to_sub_pos = np.array(cls.load_to_sub_pos) + cls.load_to_sub_pos = cls.load_to_sub_pos.astype(dt_int) except Exception as exc_: raise EnvError("self.load_to_sub_pos should be convertible to a numpy array. Error was " f"{exc_}") - if not isinstance(self.gen_to_sub_pos, np.ndarray): + if not isinstance(cls.gen_to_sub_pos, np.ndarray): try: - self.gen_to_sub_pos = np.array(self.gen_to_sub_pos) - self.gen_to_sub_pos = self.gen_to_sub_pos.astype(dt_int) + cls.gen_to_sub_pos = np.array(cls.gen_to_sub_pos) + cls.gen_to_sub_pos = cls.gen_to_sub_pos.astype(dt_int) except Exception as exc_: raise EnvError("self.gen_to_sub_pos should be convertible to a numpy array. Error was " f"{exc_}") - if not isinstance(self.line_or_to_sub_pos, np.ndarray): + if not isinstance(cls.line_or_to_sub_pos, np.ndarray): try: - self.line_or_to_sub_pos = np.array(self.line_or_to_sub_pos) - self.line_or_to_sub_pos = self.line_or_to_sub_pos.astype(dt_int) + cls.line_or_to_sub_pos = np.array(cls.line_or_to_sub_pos) + cls.line_or_to_sub_pos = cls.line_or_to_sub_pos.astype(dt_int) except Exception as exc_: raise EnvError("self.line_or_to_sub_pos should be convertible to a numpy array. Error was " f"{exc_}") - if not isinstance(self.line_ex_to_sub_pos, np.ndarray): + if not isinstance(cls.line_ex_to_sub_pos, np.ndarray): try: - self.line_ex_to_sub_pos = np.array(self.line_ex_to_sub_pos) - self.line_ex_to_sub_pos = self.line_ex_to_sub_pos .astype(dt_int) + cls.line_ex_to_sub_pos = np.array(cls.line_ex_to_sub_pos) + cls.line_ex_to_sub_pos = cls.line_ex_to_sub_pos .astype(dt_int) except Exception as exc_: raise EnvError("self.line_ex_to_sub_pos should be convertible to a numpy array. Error was " f"{exc_}") - if not isinstance(self.storage_to_sub_pos, np.ndarray): + if not isinstance(cls.storage_to_sub_pos, np.ndarray): try: - self.storage_to_sub_pos = np.array(self.storage_to_sub_pos) - self.storage_to_sub_pos = self.storage_to_sub_pos .astype(dt_int) + cls.storage_to_sub_pos = np.array(cls.storage_to_sub_pos) + cls.storage_to_sub_pos = cls.storage_to_sub_pos .astype(dt_int) except Exception as exc_: raise EnvError("self.line_ex_to_sub_pos should be convertible to a numpy array. Error was " f"{exc_}") - def _check_topo_vect(self): - if not isinstance(self.load_pos_topo_vect, np.ndarray): + @classmethod + def _check_topo_vect(cls): + if not isinstance(cls.load_pos_topo_vect, np.ndarray): try: - self.load_pos_topo_vect = np.array(self.load_pos_topo_vect) - self.load_pos_topo_vect = self.load_pos_topo_vect.astype(dt_int) + cls.load_pos_topo_vect = np.array(cls.load_pos_topo_vect) + cls.load_pos_topo_vect = cls.load_pos_topo_vect.astype(dt_int) except Exception as exc_: raise EnvError("self.load_pos_topo_vect should be convertible to a numpy array. Error was " f"{exc_}") - if not isinstance(self.gen_pos_topo_vect, np.ndarray): + if not isinstance(cls.gen_pos_topo_vect, np.ndarray): try: - self.gen_pos_topo_vect = np.array(self.gen_pos_topo_vect) - self.gen_pos_topo_vect = self.gen_pos_topo_vect.astype(dt_int) + cls.gen_pos_topo_vect = np.array(cls.gen_pos_topo_vect) + cls.gen_pos_topo_vect = cls.gen_pos_topo_vect.astype(dt_int) except Exception as exc_: raise EnvError("self.gen_pos_topo_vect should be convertible to a numpy array. Error was " f"{exc_}") - if not isinstance(self.line_or_pos_topo_vect, np.ndarray): + if not isinstance(cls.line_or_pos_topo_vect, np.ndarray): try: - self.line_or_pos_topo_vect = np.array(self.line_or_pos_topo_vect) - self.line_or_pos_topo_vect = self.line_or_pos_topo_vect.astype(dt_int) + cls.line_or_pos_topo_vect = np.array(cls.line_or_pos_topo_vect) + cls.line_or_pos_topo_vect = cls.line_or_pos_topo_vect.astype(dt_int) except Exception as exc_: raise EnvError("self.line_or_pos_topo_vect should be convertible to a numpy array. Error was " f"{exc_}") - if not isinstance(self.line_ex_pos_topo_vect, np.ndarray): + if not isinstance(cls.line_ex_pos_topo_vect, np.ndarray): try: - self.line_ex_pos_topo_vect = np.array(self.line_ex_pos_topo_vect) - self.line_ex_pos_topo_vect = self.line_ex_pos_topo_vect.astype(dt_int) + cls.line_ex_pos_topo_vect = np.array(cls.line_ex_pos_topo_vect) + cls.line_ex_pos_topo_vect = cls.line_ex_pos_topo_vect.astype(dt_int) except Exception as exc_: raise EnvError("self.line_ex_pos_topo_vect should be convertible to a numpy array. Error was " f"{exc_}") - if not isinstance(self.storage_pos_topo_vect, np.ndarray): + if not isinstance(cls.storage_pos_topo_vect, np.ndarray): try: - self.storage_pos_topo_vect = np.array(self.storage_pos_topo_vect) - self.storage_pos_topo_vect = self.storage_pos_topo_vect.astype(dt_int) + cls.storage_pos_topo_vect = np.array(cls.storage_pos_topo_vect) + cls.storage_pos_topo_vect = cls.storage_pos_topo_vect.astype(dt_int) except Exception as exc_: raise EnvError("self.storage_pos_topo_vect should be convertible to a numpy array. Error was " f"{exc_}") - def _compute_sub_pos(self): + @classmethod + def _compute_sub_pos(cls): """ INTERNAL @@ -1267,16 +1417,16 @@ def _compute_sub_pos(self): """ need_implement = False - if self.load_to_sub_pos is None: + if cls.load_to_sub_pos is None: need_implement = True - if self.gen_to_sub_pos is None: + if cls.gen_to_sub_pos is None: if need_implement is False: raise BackendError("You chose to implement \"load_to_sub_pos\" but not \"gen_to_sub_pos\". We cannot " "work with that. Please either use the automatic setting, or implement all of " "*_to_sub_pos vectors" "") need_implement = True - if self.line_or_to_sub_pos is None: + if cls.line_or_to_sub_pos is None: if need_implement is False: raise BackendError("You chose to implement \"line_or_to_sub_pos\" but not \"load_to_sub_pos\"" "or \"gen_to_sub_pos\". We cannot " @@ -1284,7 +1434,7 @@ def _compute_sub_pos(self): "*_to_sub_pos vectors" "") need_implement = True - if self.line_ex_to_sub_pos is None: + if cls.line_ex_to_sub_pos is None: if need_implement is False: raise BackendError("You chose to implement \"line_ex_to_sub_pos\" but not \"load_to_sub_pos\"" "or \"gen_to_sub_pos\" or \"line_or_to_sub_pos\". We cannot " @@ -1292,7 +1442,7 @@ def _compute_sub_pos(self): "*_to_sub_pos vectors" "") need_implement = True - if self.storage_to_sub_pos is None: + if cls.storage_to_sub_pos is None: if need_implement is False: raise BackendError("You chose to implement \"storage_to_sub_pos\" but not \"load_to_sub_pos\"" "or \"gen_to_sub_pos\" or \"line_or_to_sub_pos\" or \"line_ex_to_sub_pos\". " @@ -1305,33 +1455,34 @@ def _compute_sub_pos(self): if not need_implement: return - last_order_number = np.zeros(self.n_sub, dtype=dt_int) - self.load_to_sub_pos = np.zeros(self.n_load, dtype=dt_int) - for load_id, sub_id_connected in enumerate(self.load_to_subid): - self.load_to_sub_pos[load_id] = last_order_number[sub_id_connected] + last_order_number = np.zeros(cls.n_sub, dtype=dt_int) + cls.load_to_sub_pos = np.zeros(cls.n_load, dtype=dt_int) + for load_id, sub_id_connected in enumerate(cls.load_to_subid): + cls.load_to_sub_pos[load_id] = last_order_number[sub_id_connected] last_order_number[sub_id_connected] += 1 - self.gen_to_sub_pos = np.zeros(self.n_gen, dtype=dt_int) - for gen_id, sub_id_connected in enumerate(self.gen_to_subid): - self.gen_to_sub_pos[gen_id] = last_order_number[sub_id_connected] + cls.gen_to_sub_pos = np.zeros(cls.n_gen, dtype=dt_int) + for gen_id, sub_id_connected in enumerate(cls.gen_to_subid): + cls.gen_to_sub_pos[gen_id] = last_order_number[sub_id_connected] last_order_number[sub_id_connected] += 1 - self.line_or_to_sub_pos = np.zeros(self.n_line, dtype=dt_int) - for lor_id, sub_id_connected in enumerate(self.line_or_to_subid): - self.line_or_to_sub_pos[lor_id] = last_order_number[sub_id_connected] + cls.line_or_to_sub_pos = np.zeros(cls.n_line, dtype=dt_int) + for lor_id, sub_id_connected in enumerate(cls.line_or_to_subid): + cls.line_or_to_sub_pos[lor_id] = last_order_number[sub_id_connected] last_order_number[sub_id_connected] += 1 - self.line_ex_to_sub_pos = np.zeros(self.n_line, dtype=dt_int) - for lex_id, sub_id_connected in enumerate(self.line_ex_to_subid): - self.line_ex_to_sub_pos[lex_id] = last_order_number[sub_id_connected] + cls.line_ex_to_sub_pos = np.zeros(cls.n_line, dtype=dt_int) + for lex_id, sub_id_connected in enumerate(cls.line_ex_to_subid): + cls.line_ex_to_sub_pos[lex_id] = last_order_number[sub_id_connected] last_order_number[sub_id_connected] += 1 - self.storage_to_sub_pos = np.zeros(self.n_storage, dtype=dt_int) - for sto_id, sub_id_connected in enumerate(self.storage_to_subid): - self.storage_to_sub_pos[sto_id] = last_order_number[sub_id_connected] + cls.storage_to_sub_pos = np.zeros(cls.n_storage, dtype=dt_int) + for sto_id, sub_id_connected in enumerate(cls.storage_to_subid): + cls.storage_to_sub_pos[sto_id] = last_order_number[sub_id_connected] last_order_number[sub_id_connected] += 1 - def _compute_sub_elements(self): + @classmethod + def _compute_sub_elements(cls): """ INTERNAL @@ -1342,24 +1493,25 @@ def _compute_sub_elements(self): It supposes that *to_subid are initialized and that n_line, n_sub, n_load and n_gen are all positive """ - if self.dim_topo is None or self.dim_topo <= 0: - self.dim_topo = 2 * self.n_line + self.n_load + self.n_gen + self.n_storage + if cls.dim_topo is None or cls.dim_topo <= 0: + cls.dim_topo = 2 * cls.n_line + cls.n_load + cls.n_gen + cls.n_storage - if self.sub_info is None: - self.sub_info = np.zeros(self.n_sub, dtype=dt_int) + if cls.sub_info is None: + cls.sub_info = np.zeros(cls.n_sub, dtype=dt_int) # NB the vectorized implementation do not work - for s_id in self.load_to_subid: - self.sub_info[s_id] += 1 - for s_id in self.gen_to_subid: - self.sub_info[s_id] += 1 - for s_id in self.line_or_to_subid: - self.sub_info[s_id] += 1 - for s_id in self.line_ex_to_subid: - self.sub_info[s_id] += 1 - for s_id in self.storage_to_subid: - self.sub_info[s_id] += 1 - - def assert_grid_correct(self): + for s_id in cls.load_to_subid: + cls.sub_info[s_id] += 1 + for s_id in cls.gen_to_subid: + cls.sub_info[s_id] += 1 + for s_id in cls.line_or_to_subid: + cls.sub_info[s_id] += 1 + for s_id in cls.line_ex_to_subid: + cls.sub_info[s_id] += 1 + for s_id in cls.storage_to_subid: + cls.sub_info[s_id] += 1 + + @classmethod + def assert_grid_correct_cls(cls): """ INTERNAL @@ -1387,67 +1539,67 @@ def assert_grid_correct(self): # TODO refactor this method with the `_check***` methods. # TODO refactor the `_check***` to use the same "base functions" that would be coded only once. - if self.n_gen <= 0: + if cls.n_gen <= 0: raise EnvError("n_gen is negative. Powergrid is invalid: there are no generator") - if self.n_load <= 0: + if cls.n_load <= 0: raise EnvError("n_load is negative. Powergrid is invalid: there are no load") - if self.n_line <= 0: + if cls.n_line <= 0: raise EnvError("n_line is negative. Powergrid is invalid: there are no line") - if self.n_sub <= 0: + if cls.n_sub <= 0: raise EnvError("n_sub is negative. Powergrid is invalid: there are no substation") - if self.n_storage == -1 and \ - self.storage_to_subid is None and \ - self.storage_pos_topo_vect is None and \ - self.storage_to_sub_pos is None: + if cls.n_storage == -1 and \ + cls.storage_to_subid is None and \ + cls.storage_pos_topo_vect is None and \ + cls.storage_to_sub_pos is None: # no storage on the grid, so i deactivate them - type(self).set_no_storage() + cls.set_no_storage() - if self.n_storage < 0: + if cls.n_storage < 0: raise EnvError("n_storage is negative. Powergrid is invalid: you specify a negative number of unit storage") - self._compute_sub_elements() - if not isinstance(self.sub_info, np.ndarray): + cls._compute_sub_elements() + if not isinstance(cls.sub_info, np.ndarray): try: - self.sub_info = np.array(self.sub_info) - self.sub_info = self.sub_info.astype(dt_int) + cls.sub_info = np.array(cls.sub_info) + cls.sub_info = cls.sub_info.astype(dt_int) except Exception as exc_: raise EnvError(f"self.sub_info should be convertible to a numpy array. " f"It fails with error \"{exc_}\"") # to which subtation they are connected - self._check_sub_id() + cls._check_sub_id() # for names - self._check_names() + cls._check_names() # compute the position in substation if not done already - self._compute_sub_pos() + cls._compute_sub_pos() # test position in substation - self._check_sub_pos() + cls._check_sub_pos() # test position in topology vector - self._check_topo_vect() + cls._check_topo_vect() # test that all numbers are finite: tmp = np.concatenate(( - self.sub_info.flatten(), - self.load_to_subid.flatten(), - self.gen_to_subid.flatten(), - self.line_or_to_subid.flatten(), - self.line_ex_to_subid.flatten(), - self.storage_to_subid.flatten(), - self.load_to_sub_pos.flatten(), - self.gen_to_sub_pos.flatten(), - self.line_or_to_sub_pos.flatten(), - self.line_ex_to_sub_pos.flatten(), - self.storage_to_sub_pos.flatten(), - self.load_pos_topo_vect.flatten(), - self.gen_pos_topo_vect.flatten(), - self.line_or_pos_topo_vect.flatten(), - self.line_ex_pos_topo_vect.flatten(), - self.storage_pos_topo_vect.flatten() + cls.sub_info.flatten(), + cls.load_to_subid.flatten(), + cls.gen_to_subid.flatten(), + cls.line_or_to_subid.flatten(), + cls.line_ex_to_subid.flatten(), + cls.storage_to_subid.flatten(), + cls.load_to_sub_pos.flatten(), + cls.gen_to_sub_pos.flatten(), + cls.line_or_to_sub_pos.flatten(), + cls.line_ex_to_sub_pos.flatten(), + cls.storage_to_sub_pos.flatten(), + cls.load_pos_topo_vect.flatten(), + cls.gen_pos_topo_vect.flatten(), + cls.line_or_pos_topo_vect.flatten(), + cls.line_ex_pos_topo_vect.flatten(), + cls.storage_pos_topo_vect.flatten() )) try: if np.any(~np.isfinite(tmp)): @@ -1460,361 +1612,364 @@ def assert_grid_correct(self): f"{exc_}") # check sizes - if len(self.sub_info) != self.n_sub: + if len(cls.sub_info) != cls.n_sub: raise IncorrectNumberOfSubstation("The number of substation is not consistent in " "self.sub_info (size \"{}\")" - "and self.n_sub ({})".format(len(self.sub_info), self.n_sub)) - if np.sum(self.sub_info) != self.n_load + self.n_gen + 2*self.n_line + self.n_storage: + "and self.n_sub ({})".format(len(cls.sub_info), cls.n_sub)) + if np.sum(cls.sub_info) != cls.n_load + cls.n_gen + 2*cls.n_line + cls.n_storage: err_msg = "The number of elements of elements is not consistent between self.sub_info where there are " err_msg += "{} elements connected to all substations and the number of load, generators and lines in " \ "the _grid ({})." - err_msg = err_msg.format(np.sum(self.sub_info), - self.n_load + self.n_gen + 2*self.n_line + self.n_storage) + err_msg = err_msg.format(np.sum(cls.sub_info), + cls.n_load + cls.n_gen + 2*cls.n_line + cls.n_storage) raise IncorrectNumberOfElements(err_msg) - if len(self.name_load) != self.n_load: + if len(cls.name_load) != cls.n_load: raise IncorrectNumberOfLoads("len(self.name_load) != self.n_load") - if len(self.name_gen) != self.n_gen: + if len(cls.name_gen) != cls.n_gen: raise IncorrectNumberOfGenerators("len(self.name_gen) != self.n_gen") - if len(self.name_line) != self.n_line: + if len(cls.name_line) != cls.n_line: raise IncorrectNumberOfLines("len(self.name_line) != self.n_line") - if len(self.name_storage) != self.n_storage: + if len(cls.name_storage) != cls.n_storage: raise IncorrectNumberOfStorages("len(self.name_storage) != self.n_storage") - if len(self.name_sub) != self.n_sub: + if len(cls.name_sub) != cls.n_sub: raise IncorrectNumberOfSubstation("len(self.name_sub) != self.n_sub") - if len(self.load_to_sub_pos) != self.n_load: + if len(cls.load_to_sub_pos) != cls.n_load: raise IncorrectNumberOfLoads("len(self.load_to_sub_pos) != self.n_load") - if len(self.gen_to_sub_pos) != self.n_gen: + if len(cls.gen_to_sub_pos) != cls.n_gen: raise IncorrectNumberOfGenerators("en(self.gen_to_sub_pos) != self.n_gen") - if len(self.line_or_to_sub_pos) != self.n_line: + if len(cls.line_or_to_sub_pos) != cls.n_line: raise IncorrectNumberOfLines("len(self.line_or_to_sub_pos) != self.n_line") - if len(self.line_ex_to_sub_pos) != self.n_line: + if len(cls.line_ex_to_sub_pos) != cls.n_line: raise IncorrectNumberOfLines("len(self.line_ex_to_sub_pos) != self.n_line") - if len(self.storage_to_sub_pos) != self.n_storage: + if len(cls.storage_to_sub_pos) != cls.n_storage: raise IncorrectNumberOfStorages("len(self.storage_to_sub_pos) != self.n_storage") - if len(self.load_pos_topo_vect) != self.n_load: + if len(cls.load_pos_topo_vect) != cls.n_load: raise IncorrectNumberOfLoads("len(self.load_pos_topo_vect) != self.n_load") - if len(self.gen_pos_topo_vect) != self.n_gen: + if len(cls.gen_pos_topo_vect) != cls.n_gen: raise IncorrectNumberOfGenerators("len(self.gen_pos_topo_vect) != self.n_gen") - if len(self.line_or_pos_topo_vect) != self.n_line: + if len(cls.line_or_pos_topo_vect) != cls.n_line: raise IncorrectNumberOfLines("len(self.line_or_pos_topo_vect) != self.n_line") - if len(self.line_ex_pos_topo_vect) != self.n_line: + if len(cls.line_ex_pos_topo_vect) != cls.n_line: raise IncorrectNumberOfLines("len(self.line_ex_pos_topo_vect) != self.n_line") - if len(self.storage_pos_topo_vect) != self.n_storage: + if len(cls.storage_pos_topo_vect) != cls.n_storage: raise IncorrectNumberOfLines("len(self.storage_pos_topo_vect) != self.n_storage") # test if object are connected to right substation - obj_per_sub = np.zeros(shape=(self.n_sub,), dtype=dt_int) - for sub_id in self.load_to_subid: + obj_per_sub = np.zeros(shape=(cls.n_sub,), dtype=dt_int) + for sub_id in cls.load_to_subid: obj_per_sub[sub_id] += 1 - for sub_id in self.gen_to_subid: + for sub_id in cls.gen_to_subid: obj_per_sub[sub_id] += 1 - for sub_id in self.line_or_to_subid: + for sub_id in cls.line_or_to_subid: obj_per_sub[sub_id] += 1 - for sub_id in self.line_ex_to_subid: + for sub_id in cls.line_ex_to_subid: obj_per_sub[sub_id] += 1 - for sub_id in self.storage_to_subid: + for sub_id in cls.storage_to_subid: obj_per_sub[sub_id] += 1 - if not np.all(obj_per_sub == self.sub_info): - raise IncorrectNumberOfElements(f"for substation(s): {np.where(obj_per_sub != self.sub_info)[0]}") + if not np.all(obj_per_sub == cls.sub_info): + raise IncorrectNumberOfElements(f"for substation(s): {np.where(obj_per_sub != cls.sub_info)[0]}") # test right number of element in substations # test that for each substation i don't have an id above the number of element of a substations - for i, (sub_id, sub_pos) in enumerate(zip(self.load_to_subid, self.load_to_sub_pos)): - if sub_pos >= self.sub_info[sub_id]: + for i, (sub_id, sub_pos) in enumerate(zip(cls.load_to_subid, cls.load_to_sub_pos)): + if sub_pos >= cls.sub_info[sub_id]: raise IncorrectPositionOfLoads("for load {}".format(i)) - for i, (sub_id, sub_pos) in enumerate(zip(self.gen_to_subid, self.gen_to_sub_pos)): - if sub_pos >= self.sub_info[sub_id]: + for i, (sub_id, sub_pos) in enumerate(zip(cls.gen_to_subid, cls.gen_to_sub_pos)): + if sub_pos >= cls.sub_info[sub_id]: raise IncorrectPositionOfGenerators("for generator {}".format(i)) - for i, (sub_id, sub_pos) in enumerate(zip(self.line_or_to_subid, self.line_or_to_sub_pos)): - if sub_pos >= self.sub_info[sub_id]: + for i, (sub_id, sub_pos) in enumerate(zip(cls.line_or_to_subid, cls.line_or_to_sub_pos)): + if sub_pos >= cls.sub_info[sub_id]: raise IncorrectPositionOfLines("for line {} at origin end".format(i)) - for i, (sub_id, sub_pos) in enumerate(zip(self.line_ex_to_subid, self.line_ex_to_sub_pos)): - if sub_pos >= self.sub_info[sub_id]: + for i, (sub_id, sub_pos) in enumerate(zip(cls.line_ex_to_subid, cls.line_ex_to_sub_pos)): + if sub_pos >= cls.sub_info[sub_id]: raise IncorrectPositionOfLines("for line {} at extremity end".format(i)) - for i, (sub_id, sub_pos) in enumerate(zip(self.storage_to_subid, self.storage_to_sub_pos)): - if sub_pos >= self.sub_info[sub_id]: + for i, (sub_id, sub_pos) in enumerate(zip(cls.storage_to_subid, cls.storage_to_sub_pos)): + if sub_pos >= cls.sub_info[sub_id]: raise IncorrectPositionOfStorages("for storage {}".format(i)) # check that i don't have 2 objects with the same id in the "big topo" vector - concat_topo = np.concatenate((self.load_pos_topo_vect.flatten(), - self.gen_pos_topo_vect.flatten(), - self.line_or_pos_topo_vect.flatten(), - self.line_ex_pos_topo_vect.flatten(), - self.storage_pos_topo_vect.flatten())) - if len(np.unique(concat_topo)) != np.sum(self.sub_info): + concat_topo = np.concatenate((cls.load_pos_topo_vect.flatten(), + cls.gen_pos_topo_vect.flatten(), + cls.line_or_pos_topo_vect.flatten(), + cls.line_ex_pos_topo_vect.flatten(), + cls.storage_pos_topo_vect.flatten())) + if len(np.unique(concat_topo)) != np.sum(cls.sub_info): raise EnvError("2 different objects would have the same id in the topology vector, or there would be" "an empty component in this vector.") # check that self.load_pos_topo_vect and co are consistent - load_pos_big_topo = self._aux_pos_big_topo(self.load_to_subid, self.load_to_sub_pos) - if not np.all(load_pos_big_topo == self.load_pos_topo_vect): + load_pos_big_topo = cls._aux_pos_big_topo(cls.load_to_subid, cls.load_to_sub_pos) + if not np.all(load_pos_big_topo == cls.load_pos_topo_vect): raise IncorrectPositionOfLoads("Mismatch between load_to_subid, load_to_sub_pos and load_pos_topo_vect") - gen_pos_big_topo = self._aux_pos_big_topo(self.gen_to_subid, self.gen_to_sub_pos) - if not np.all(gen_pos_big_topo == self.gen_pos_topo_vect): + gen_pos_big_topo = cls._aux_pos_big_topo(cls.gen_to_subid, cls.gen_to_sub_pos) + if not np.all(gen_pos_big_topo == cls.gen_pos_topo_vect): raise IncorrectNumberOfGenerators("Mismatch between gen_to_subid, gen_to_sub_pos and gen_pos_topo_vect") - lines_or_pos_big_topo = self._aux_pos_big_topo(self.line_or_to_subid, self.line_or_to_sub_pos) - if not np.all(lines_or_pos_big_topo == self.line_or_pos_topo_vect): + lines_or_pos_big_topo = cls._aux_pos_big_topo(cls.line_or_to_subid, cls.line_or_to_sub_pos) + if not np.all(lines_or_pos_big_topo == cls.line_or_pos_topo_vect): raise IncorrectPositionOfLines("Mismatch between line_or_to_subid, " "line_or_to_sub_pos and line_or_pos_topo_vect") - lines_ex_pos_big_topo = self._aux_pos_big_topo(self.line_ex_to_subid, self.line_ex_to_sub_pos) - if not np.all(lines_ex_pos_big_topo == self.line_ex_pos_topo_vect): + lines_ex_pos_big_topo = cls._aux_pos_big_topo(cls.line_ex_to_subid, cls.line_ex_to_sub_pos) + if not np.all(lines_ex_pos_big_topo == cls.line_ex_pos_topo_vect): raise IncorrectPositionOfLines("Mismatch between line_ex_to_subid, " "line_ex_to_sub_pos and line_ex_pos_topo_vect") - storage_pos_big_topo = self._aux_pos_big_topo(self.storage_to_subid, self.storage_to_sub_pos) - if not np.all(storage_pos_big_topo == self.storage_pos_topo_vect): + storage_pos_big_topo = cls._aux_pos_big_topo(cls.storage_to_subid, cls.storage_to_sub_pos) + if not np.all(storage_pos_big_topo == cls.storage_pos_topo_vect): raise IncorrectPositionOfStorages("Mismatch between storage_to_subid, " "storage_to_sub_pos and storage_pos_topo_vect") # no empty bus: at least one element should be present on each bus - if np.any(self.sub_info < 1): + if np.any(cls.sub_info < 1): raise BackendError("There are {} bus with 0 element connected to it.".format(np.sum(self.sub_info < 1))) # redispatching / unit commitment - if self.redispatching_unit_commitment_availble: - self._check_validity_dispathcing_data() + if cls.redispatching_unit_commitment_availble: + cls._check_validity_dispathcing_data() # shunt data - if self.shunts_data_available: - self._check_validity_shunt_data() + if cls.shunts_data_available: + cls._check_validity_shunt_data() # storage data - self._check_validity_storage_data() + cls._check_validity_storage_data() - def _check_validity_storage_data(self): - if self.storage_type is None: + @classmethod + def _check_validity_storage_data(cls): + if cls.storage_type is None: raise IncorrectNumberOfStorages("self.storage_type is None") - if self.storage_Emax is None: + if cls.storage_Emax is None: raise IncorrectNumberOfStorages("self.storage_Emax is None") - if self.storage_Emin is None: + if cls.storage_Emin is None: raise IncorrectNumberOfStorages("self.storage_Emin is None") - if self.storage_max_p_prod is None: + if cls.storage_max_p_prod is None: raise IncorrectNumberOfStorages("self.storage_max_p_prod is None") - if self.storage_max_p_absorb is None: + if cls.storage_max_p_absorb is None: raise IncorrectNumberOfStorages("self.storage_max_p_absorb is None") - if self.storage_marginal_cost is None: + if cls.storage_marginal_cost is None: raise IncorrectNumberOfStorages("self.storage_marginal_cost is None") - if self.storage_loss is None: + if cls.storage_loss is None: raise IncorrectNumberOfStorages("self.storage_loss is None") - if self.storage_discharging_efficiency is None: + if cls.storage_discharging_efficiency is None: raise IncorrectNumberOfStorages("self.storage_discharging_efficiency is None") - if self.storage_charging_efficiency is None: + if cls.storage_charging_efficiency is None: raise IncorrectNumberOfStorages("self.storage_charging_efficiency is None") - if self.n_storage == 0: + if cls.n_storage == 0: # no more check to perform is there is no storage return - if self.storage_type.shape[0] != self.n_storage: + if cls.storage_type.shape[0] != cls.n_storage: raise IncorrectNumberOfStorages("self.storage_type.shape[0] != self.n_storage") - if self.storage_Emax.shape[0] != self.n_storage: + if cls.storage_Emax.shape[0] != cls.n_storage: raise IncorrectNumberOfStorages("self.storage_Emax.shape[0] != self.n_storage") - if self.storage_Emin.shape[0] != self.n_storage: + if cls.storage_Emin.shape[0] != cls.n_storage: raise IncorrectNumberOfStorages("self.storage_Emin.shape[0] != self.n_storage") - if self.storage_max_p_prod.shape[0] != self.n_storage: + if cls.storage_max_p_prod.shape[0] != cls.n_storage: raise IncorrectNumberOfStorages("self.storage_max_p_prod.shape[0] != self.n_storage") - if self.storage_max_p_absorb.shape[0] != self.n_storage: + if cls.storage_max_p_absorb.shape[0] != cls.n_storage: raise IncorrectNumberOfStorages("self.storage_max_p_absorb.shape[0] != self.n_storage") - if self.storage_marginal_cost.shape[0] != self.n_storage: + if cls.storage_marginal_cost.shape[0] != cls.n_storage: raise IncorrectNumberOfStorages("self.storage_marginal_cost.shape[0] != self.n_storage") - if self.storage_loss.shape[0] != self.n_storage: + if cls.storage_loss.shape[0] != cls.n_storage: raise IncorrectNumberOfStorages("self.storage_loss.shape[0] != self.n_storage") - if self.storage_discharging_efficiency.shape[0] != self.n_storage: + if cls.storage_discharging_efficiency.shape[0] != cls.n_storage: raise IncorrectNumberOfStorages("self.storage_discharging_efficiency.shape[0] != self.n_storage") - if self.storage_charging_efficiency.shape[0] != self.n_storage: + if cls.storage_charging_efficiency.shape[0] != cls.n_storage: raise IncorrectNumberOfStorages("self.storage_charging_efficiency.shape[0] != self.n_storage") - if np.any(~np.isfinite(self.storage_Emax)): + if np.any(~np.isfinite(cls.storage_Emax)): raise BackendError("np.any(~np.isfinite(self.storage_Emax))") - if np.any(~np.isfinite(self.storage_Emin)): + if np.any(~np.isfinite(cls.storage_Emin)): raise BackendError("np.any(~np.isfinite(self.storage_Emin))") - if np.any(~np.isfinite(self.storage_max_p_prod)): + if np.any(~np.isfinite(cls.storage_max_p_prod)): raise BackendError("np.any(~np.isfinite(self.storage_max_p_prod))") - if np.any(~np.isfinite(self.storage_max_p_absorb)): + if np.any(~np.isfinite(cls.storage_max_p_absorb)): raise BackendError("np.any(~np.isfinite(self.storage_max_p_absorb))") - if np.any(~np.isfinite(self.storage_marginal_cost)): + if np.any(~np.isfinite(cls.storage_marginal_cost)): raise BackendError("np.any(~np.isfinite(self.storage_marginal_cost))") - if np.any(~np.isfinite(self.storage_loss)): + if np.any(~np.isfinite(cls.storage_loss)): raise BackendError("np.any(~np.isfinite(self.storage_loss))") - if np.any(~np.isfinite(self.storage_charging_efficiency)): + if np.any(~np.isfinite(cls.storage_charging_efficiency)): raise BackendError("np.any(~np.isfinite(self.storage_charging_efficiency))") - if np.any(~np.isfinite(self.storage_discharging_efficiency)): + if np.any(~np.isfinite(cls.storage_discharging_efficiency)): raise BackendError("np.any(~np.isfinite(self.storage_discharging_efficiency))") - if np.any(self.storage_Emax < self.storage_Emin): - tmp = np.where(self.storage_Emax < self.storage_Emin)[0] + if np.any(cls.storage_Emax < cls.storage_Emin): + tmp = np.where(cls.storage_Emax < cls.storage_Emin)[0] raise BackendError(f"storage_Emax < storage_Emin for storage units with ids: {tmp}") - if np.any(self.storage_Emax < 0.): - tmp = np.where(self.storage_Emax < 0.)[0] + if np.any(cls.storage_Emax < 0.): + tmp = np.where(cls.storage_Emax < 0.)[0] raise BackendError(f"self.storage_Emax < 0. for storage units with ids: {tmp}") - if np.any(self.storage_Emin < 0.): - tmp = np.where(self.storage_Emin < 0.)[0] + if np.any(cls.storage_Emin < 0.): + tmp = np.where(cls.storage_Emin < 0.)[0] raise BackendError(f"self.storage_Emin < 0. for storage units with ids: {tmp}") - if np.any(self.storage_max_p_prod < 0.): - tmp = np.where(self.storage_max_p_prod < 0.)[0] + if np.any(cls.storage_max_p_prod < 0.): + tmp = np.where(cls.storage_max_p_prod < 0.)[0] raise BackendError(f"self.storage_max_p_prod < 0. for storage units with ids: {tmp}") - if np.any(self.storage_max_p_absorb < 0.): - tmp = np.where(self.storage_max_p_absorb < 0.)[0] + if np.any(cls.storage_max_p_absorb < 0.): + tmp = np.where(cls.storage_max_p_absorb < 0.)[0] raise BackendError(f"self.storage_max_p_absorb < 0. for storage units with ids: {tmp}") - if np.any(self.storage_loss < 0.): - tmp = np.where(self.storage_loss < 0.)[0] + if np.any(cls.storage_loss < 0.): + tmp = np.where(cls.storage_loss < 0.)[0] raise BackendError(f"self.storage_loss < 0. for storage units with ids: {tmp}") - if np.any(self.storage_discharging_efficiency <= 0.): - tmp = np.where(self.storage_discharging_efficiency <= 0.)[0] + if np.any(cls.storage_discharging_efficiency <= 0.): + tmp = np.where(cls.storage_discharging_efficiency <= 0.)[0] raise BackendError(f"self.storage_discharging_efficiency <= 0. for storage units with ids: {tmp}") - if np.any(self.storage_discharging_efficiency > 1.): - tmp = np.where(self.storage_discharging_efficiency > 1.)[0] + if np.any(cls.storage_discharging_efficiency > 1.): + tmp = np.where(cls.storage_discharging_efficiency > 1.)[0] raise BackendError(f"self.storage_discharging_efficiency > 1. for storage units with ids: {tmp}") - if np.any(self.storage_charging_efficiency < 0.): - tmp = np.where(self.storage_charging_efficiency < 0.)[0] + if np.any(cls.storage_charging_efficiency < 0.): + tmp = np.where(cls.storage_charging_efficiency < 0.)[0] raise BackendError(f"self.storage_charging_efficiency < 0. for storage units with ids: {tmp}") - if np.any(self.storage_charging_efficiency > 1.): - tmp = np.where(self.storage_charging_efficiency > 1.)[0] + if np.any(cls.storage_charging_efficiency > 1.): + tmp = np.where(cls.storage_charging_efficiency > 1.)[0] raise BackendError(f"self.storage_charging_efficiency > 1. for storage units with ids: {tmp}") - if np.any(self.storage_loss > self.storage_max_p_absorb): - tmp = np.where(self.storage_loss > self.storage_max_p_absorb)[0] + if np.any(cls.storage_loss > cls.storage_max_p_absorb): + tmp = np.where(cls.storage_loss > cls.storage_max_p_absorb)[0] raise BackendError(f"Some storage units are such that their loss (self.storage_loss) is higher " f"than the maximum power at which they can be charged (self.storage_max_p_absorb). " f"Such storage units are doomed to discharged (due to losses) without anything " f"being able to charge them back. This really un interesting behaviour is not " f"supported by grid2op. Please check storage data for units {tmp}") - def _check_validity_shunt_data(self): - if self.n_shunt is None: + @classmethod + def _check_validity_shunt_data(cls): + if cls.n_shunt is None: raise IncorrectNumberOfElements("Backend is supposed to support shunts, but \"n_shunt\" is not set.") - if self.name_shunt is None: + if cls.name_shunt is None: raise IncorrectNumberOfElements("Backend is supposed to support shunts, but \"name_shunt\" is not set.") - if self.shunt_to_subid is None: + if cls.shunt_to_subid is None: raise IncorrectNumberOfElements("Backend is supposed to support shunts, but \"shunt_to_subid\" is not set.") - if not isinstance(self.name_shunt, np.ndarray): + if not isinstance(cls.name_shunt, np.ndarray): try: - self.name_shunt = np.array(self.name_shunt) - self.name_shunt = self.name_shunt.astype(np.str) - except Exception as e: + cls.name_shunt = np.array(cls.name_shunt) + cls.name_shunt = cls.name_shunt.astype(np.str) + except Exception as exc: raise EnvError("name_shunt should be convertible to a numpy array with dtype \"str\".") - if not isinstance(self.shunt_to_subid, np.ndarray): + if not isinstance(cls.shunt_to_subid, np.ndarray): try: - self.shunt_to_subid = np.array(self.shunt_to_subid) - self.shunt_to_subid = self.shunt_to_subid.astype(dt_int) + cls.shunt_to_subid = np.array(cls.shunt_to_subid) + cls.shunt_to_subid = cls.shunt_to_subid.astype(dt_int) except Exception as e: raise EnvError("shunt_to_subid should be convertible to a numpy array with dtype \"int\".") - if self.name_shunt.shape[0] != self.n_shunt: + if cls.name_shunt.shape[0] != cls.n_shunt: raise IncorrectNumberOfElements("Backend is supposed to support shunts, but \"name_shunt\" has not " "\"n_shunt\" elements.") - if self.shunt_to_subid.shape[0] != self.n_shunt: + if cls.shunt_to_subid.shape[0] != cls.n_shunt: raise IncorrectNumberOfElements("Backend is supposed to support shunts, but \"shunt_to_subid\" has not " "\"n_shunt\" elements.") - if self.n_shunt > 0: + if cls.n_shunt > 0: # check the substation id only if there are shunt - if np.min(self.shunt_to_subid) < 0: + if np.min(cls.shunt_to_subid) < 0: raise EnvError("Some shunt is connected to a negative substation id.") - if np.max(self.shunt_to_subid) > self.n_sub: + if np.max(cls.shunt_to_subid) > cls.n_sub: raise EnvError("Some shunt is supposed to be connected to substations with id {} which" "is greater than the number of substations of the grid, which is {}." - "".format(np.max(self.shunt_to_subid), self.n_sub)) + "".format(np.max(cls.shunt_to_subid), cls.n_sub)) - def _check_validity_dispathcing_data(self): - if self.gen_type is None: + @classmethod + def _check_validity_dispathcing_data(cls): + if cls.gen_type is None: raise InvalidRedispatching("Impossible to recognize the type of generators (gen_type) when " "redispatching is supposed to be available.") - if self.gen_pmin is None: + if cls.gen_pmin is None: raise InvalidRedispatching("Impossible to recognize the pmin of generators (gen_pmin) when " "redispatching is supposed to be available.") - if self.gen_pmax is None: + if cls.gen_pmax is None: raise InvalidRedispatching("Impossible to recognize the pmax of generators (gen_pmax) when " "redispatching is supposed to be available.") - if self.gen_redispatchable is None: + if cls.gen_redispatchable is None: raise InvalidRedispatching("Impossible to know which generator can be dispatched (gen_redispatchable)" " when redispatching is supposed to be available.") - if self.gen_max_ramp_up is None: + if cls.gen_max_ramp_up is None: raise InvalidRedispatching("Impossible to recognize the ramp up of generators (gen_max_ramp_up)" " when redispatching is supposed to be available.") - if self.gen_max_ramp_down is None: + if cls.gen_max_ramp_down is None: raise InvalidRedispatching("Impossible to recognize the ramp up of generators (gen_max_ramp_down)" " when redispatching is supposed to be available.") - if self.gen_min_uptime is None: + if cls.gen_min_uptime is None: raise InvalidRedispatching("Impossible to recognize the min uptime of generators (gen_min_uptime)" " when redispatching is supposed to be available.") - if self.gen_min_downtime is None: + if cls.gen_min_downtime is None: raise InvalidRedispatching("Impossible to recognize the min downtime of generators (gen_min_downtime)" " when redispatching is supposed to be available.") - if self.gen_cost_per_MW is None: + if cls.gen_cost_per_MW is None: raise InvalidRedispatching("Impossible to recognize the marginal costs of generators (gen_cost_per_MW)" " when redispatching is supposed to be available.") - if self.gen_startup_cost is None: + if cls.gen_startup_cost is None: raise InvalidRedispatching("Impossible to recognize the start up cost of generators (gen_startup_cost)" " when redispatching is supposed to be available.") - if self.gen_shutdown_cost is None: + if cls.gen_shutdown_cost is None: raise InvalidRedispatching("Impossible to recognize the shut down cost of generators " "(gen_shutdown_cost) when redispatching is supposed to be available.") - if self.gen_renewable is None: + if cls.gen_renewable is None: raise InvalidRedispatching("Impossible to recognize the whether generators comes from renewable energy " "sources " "(gen_renewable) when redispatching is supposed to be available.") - if len(self.gen_type) != self.n_gen: + if len(cls.gen_type) != cls.n_gen: raise InvalidRedispatching("Invalid length for the type of generators (gen_type) when " "redispatching is supposed to be available.") - if len(self.gen_pmin) != self.n_gen: + if len(cls.gen_pmin) != cls.n_gen: raise InvalidRedispatching("Invalid length for the pmin of generators (gen_pmin) when " "redispatching is supposed to be available.") - if len(self.gen_pmax) != self.n_gen: + if len(cls.gen_pmax) != cls.n_gen: raise InvalidRedispatching("Invalid length for the pmax of generators (gen_pmax) when " "redispatching is supposed to be available.") - if len(self.gen_redispatchable) != self.n_gen: + if len(cls.gen_redispatchable) != cls.n_gen: raise InvalidRedispatching("Invalid length for which generator can be dispatched (gen_redispatchable)" " when redispatching is supposed to be available.") - if len(self.gen_max_ramp_up) != self.n_gen: + if len(cls.gen_max_ramp_up) != cls.n_gen: raise InvalidRedispatching("Invalid length for the ramp up of generators (gen_max_ramp_up)" " when redispatching is supposed to be available.") - if len(self.gen_max_ramp_down) != self.n_gen: + if len(cls.gen_max_ramp_down) != cls.n_gen: raise InvalidRedispatching("Invalid length for the ramp up of generators (gen_max_ramp_down)" " when redispatching is supposed to be available.") - if len(self.gen_min_uptime) != self.n_gen: + if len(cls.gen_min_uptime) != cls.n_gen: raise InvalidRedispatching("Invalid length for the min uptime of generators (gen_min_uptime)" " when redispatching is supposed to be available.") - if len(self.gen_min_downtime) != self.n_gen: + if len(cls.gen_min_downtime) != cls.n_gen: raise InvalidRedispatching("Invalid length for the min downtime of generators (gen_min_downtime)" " when redispatching is supposed to be available.") - if len(self.gen_cost_per_MW) != self.n_gen: + if len(cls.gen_cost_per_MW) != cls.n_gen: raise InvalidRedispatching("Invalid length for the marginal costs of generators (gen_cost_per_MW)" " when redispatching is supposed to be available.") - if len(self.gen_startup_cost) != self.n_gen: + if len(cls.gen_startup_cost) != cls.n_gen: raise InvalidRedispatching("Invalid length for the start up cost of generators (gen_startup_cost)" " when redispatching is supposed to be available.") - if len(self.gen_shutdown_cost) != self.n_gen: + if len(cls.gen_shutdown_cost) != cls.n_gen: raise InvalidRedispatching("Invalid length for the shut down cost of generators " "(gen_shutdown_cost) when redispatching is supposed to be available.") - if len(self.gen_renewable) != self.n_gen: + if len(cls.gen_renewable) != cls.n_gen: raise InvalidRedispatching("Invalid length for the renewable flag vector" "(gen_renewable) when redispatching is supposed to be available.") - if np.any(self.gen_min_uptime < 0): + if np.any(cls.gen_min_uptime < 0): raise InvalidRedispatching("Minimum uptime of generator (gen_min_uptime) cannot be negative") - if np.any(self.gen_min_downtime < 0): + if np.any(cls.gen_min_downtime < 0): raise InvalidRedispatching("Minimum downtime of generator (gen_min_downtime) cannot be negative") - for el in self.gen_type: + for el in cls.gen_type: if not el in ["solar", "wind", "hydro", "thermal", "nuclear"]: raise InvalidRedispatching("Unknown generator type : {}".format(el)) - if np.any(self.gen_pmin < 0.): + if np.any(cls.gen_pmin < 0.): raise InvalidRedispatching("One of the Pmin (gen_pmin) is negative") - if np.any(self.gen_pmax < 0.): + if np.any(cls.gen_pmax < 0.): raise InvalidRedispatching("One of the Pmax (gen_pmax) is negative") - if np.any(self.gen_max_ramp_down < 0.): + if np.any(cls.gen_max_ramp_down < 0.): raise InvalidRedispatching("One of the ramp up (gen_max_ramp_down) is negative") - if np.any(self.gen_max_ramp_up < 0.): + if np.any(cls.gen_max_ramp_up < 0.): raise InvalidRedispatching("One of the ramp down (gen_max_ramp_up) is negative") - if np.any(self.gen_startup_cost < 0.): + if np.any(cls.gen_startup_cost < 0.): raise InvalidRedispatching("One of the start up cost (gen_startup_cost) is negative") - if np.any(self.gen_shutdown_cost < 0.): + if np.any(cls.gen_shutdown_cost < 0.): raise InvalidRedispatching("One of the start up cost (gen_shutdown_cost) is negative") for el, type_ in zip(["gen_type", "gen_pmin", "gen_pmax", "gen_redispatchable", "gen_max_ramp_up", @@ -1823,22 +1978,23 @@ def _check_validity_dispathcing_data(self): [str, dt_float, dt_float, dt_bool, dt_float, dt_float, dt_int, dt_int, dt_float, dt_float, dt_float, dt_bool]): - if not isinstance(getattr(self, el), np.ndarray): + if not isinstance(getattr(cls, el), np.ndarray): try: - setattr(self, el, getattr(self, el).astype(type_)) + setattr(cls, el, getattr(cls, el).astype(type_)) except Exception as exc_: raise InvalidRedispatching("{} should be convertible to a numpy array with error:\n \"{}\"" "".format(el, exc_)) - if not np.issubdtype(getattr(self, el).dtype, np.dtype(type_).type): + if not np.issubdtype(getattr(cls, el).dtype, np.dtype(type_).type): try: - setattr(self, el, getattr(self, el).astype(type_)) + setattr(cls, el, getattr(cls, el).astype(type_)) except Exception as exc_: raise InvalidRedispatching("{} should be convertible data should be convertible to " "{} with error: \n\"{}\"".format(el, type_, exc_)) - if np.any(self.gen_max_ramp_up[self.gen_redispatchable] > self.gen_pmax[self.gen_redispatchable]): + if np.any(cls.gen_max_ramp_up[cls.gen_redispatchable] > cls.gen_pmax[cls.gen_redispatchable]): raise InvalidRedispatching("Invalid maximum ramp for some generator (above pmax)") - def attach_layout(self, grid_layout): + @classmethod + def attach_layout(cls, grid_layout): """ INTERNAL @@ -1855,7 +2011,7 @@ def attach_layout(self, grid_layout): See definition of :attr:`GridObjects.grid_layout` for more information. """ - GridObjects.grid_layout = grid_layout + cls.grid_layout = grid_layout @classmethod def set_env_name(cls, name): @@ -1870,7 +2026,7 @@ def set_env_name(cls, name): cls.env_name = name @classmethod - def init_grid(cls, gridobj, force=False): + def init_grid(cls, gridobj, force=False, extra_name=None, force_module=None): """ INTERNAL @@ -1905,93 +2061,21 @@ def init_grid(cls, gridobj, force=False): # i recreate the variable del globals()[name_res] - class res(cls): - pass - res.glop_version = gridobj.glop_version - - res.name_gen = gridobj.name_gen - res.name_load = gridobj.name_load - res.name_line = gridobj.name_line - res.name_sub = gridobj.name_sub - res.name_storage = gridobj.name_storage - - res.n_gen = len(gridobj.name_gen) - res.n_load = len(gridobj.name_load) - res.n_line = len(gridobj.name_line) - res.n_sub = len(gridobj.name_sub) - res.n_storage = len(gridobj.name_storage) + cls_attr_as_dict = {} + GridObjects._make_cls_dict_extended(gridobj, cls_attr_as_dict, as_list=False) + res_cls = type(name_res, (cls, ), cls_attr_as_dict) + res_cls._compute_pos_big_topo_cls() + if res_cls.glop_version != grid2op.__version__: + res_cls.process_grid2op_compat() - res.sub_info = gridobj.sub_info - res.dim_topo = np.sum(gridobj.sub_info) + if force_module is not None: + res_cls.__module__ = force_module # hack because otherwise it says "abc" which is not the case + # best would be to have a look at https://docs.python.org/3/library/types.html - # to which substation is connected each element - res.load_to_subid = gridobj.load_to_subid - res.gen_to_subid = gridobj.gen_to_subid - res.line_or_to_subid = gridobj.line_or_to_subid - res.line_ex_to_subid = gridobj.line_ex_to_subid - res.storage_to_subid = gridobj.storage_to_subid - - # which index has this element in the substation vector - res.load_to_sub_pos = gridobj.load_to_sub_pos - res.gen_to_sub_pos = gridobj.gen_to_sub_pos - res.line_or_to_sub_pos = gridobj.line_or_to_sub_pos - res.line_ex_to_sub_pos = gridobj.line_ex_to_sub_pos - res.storage_to_sub_pos = gridobj.storage_to_sub_pos - - # which index has this element in the topology vector - res.load_pos_topo_vect = gridobj.load_pos_topo_vect - res.gen_pos_topo_vect = gridobj.gen_pos_topo_vect - res.line_or_pos_topo_vect = gridobj.line_or_pos_topo_vect - res.line_ex_pos_topo_vect = gridobj.line_ex_pos_topo_vect - res.storage_pos_topo_vect = gridobj.storage_pos_topo_vect - - res.grid_objects_types = gridobj.grid_objects_types - res._topo_vect_to_sub = gridobj._topo_vect_to_sub - - # for redispatching / unit commitment (not available for all environment) - res.gen_type = gridobj.gen_type - res.gen_pmin = gridobj.gen_pmin - res.gen_pmax = gridobj.gen_pmax - res.gen_redispatchable = gridobj.gen_redispatchable - res.gen_max_ramp_up = gridobj.gen_max_ramp_up - res.gen_max_ramp_down = gridobj.gen_max_ramp_down - res.gen_min_uptime = gridobj.gen_min_uptime - res.gen_min_downtime = gridobj.gen_min_downtime - res.gen_cost_per_MW = gridobj.gen_cost_per_MW - res.gen_startup_cost = gridobj.gen_startup_cost - res.gen_shutdown_cost = gridobj.gen_shutdown_cost - res.redispatching_unit_commitment_availble = gridobj.redispatching_unit_commitment_availble - res.gen_renewable = gridobj.gen_renewable - - # grid layout (not available for all environment - res.grid_layout = gridobj.grid_layout - - # shuunts data (not available for all backend) - res.shunts_data_available = gridobj.shunts_data_available - res.n_shunt = gridobj.n_shunt - res.name_shunt = gridobj.name_shunt - res.shunt_to_subid = gridobj.shunt_to_subid - res.env_name = gridobj.env_name - - # other storage data - res.storage_type = gridobj.storage_type - res.storage_Emax = gridobj.storage_Emax - res.storage_Emin = gridobj.storage_Emin - res.storage_max_p_prod = gridobj.storage_max_p_prod - res.storage_max_p_absorb = gridobj.storage_max_p_absorb - res.storage_marginal_cost = gridobj.storage_marginal_cost - res.storage_loss = gridobj.storage_loss - res.storage_charging_efficiency = gridobj.storage_charging_efficiency - res.storage_discharging_efficiency = gridobj.storage_discharging_efficiency - - res.__name__ = name_res - res.__qualname__ = "{}_{}".format(cls.__qualname__, gridobj.env_name) - - if res.glop_version != grid2op.__version__: - res.process_grid2op_compat() - - globals()[name_res] = res - return res + # store the type created here in the "globals" to prevent the initialization of the same class over and over + globals()[name_res] = res_cls + del res_cls + return globals()[name_res] @classmethod def process_grid2op_compat(cls): @@ -2002,7 +2086,8 @@ def process_grid2op_compat(cls): """ pass - def get_obj_connect_to(self, _sentinel=None, substation_id=None): + @classmethod + def get_obj_connect_to(cls, _sentinel=None, substation_id=None): """ Get all the object connected to a given substation. This is particularly usefull if you want to know the names of the generator / load connected to a given substation, or which extremity etc. @@ -2060,20 +2145,21 @@ def get_obj_connect_to(self, _sentinel=None, substation_id=None): if substation_id is None: raise Grid2OpException("You ask the composition of a substation without specifying its id." "Please provide \"substation_id\"") - if substation_id >= len(self.sub_info): + if substation_id >= len(cls.sub_info): raise Grid2OpException("There are no substation of id \"substation_id={}\" in this grid." "".format(substation_id)) - - res = {} - res["loads_id"] = np.where(self.load_to_subid == substation_id)[0] - res["generators_id"] = np.where(self.gen_to_subid == substation_id)[0] - res["lines_or_id"] = np.where(self.line_or_to_subid == substation_id)[0] - res["lines_ex_id"] = np.where(self.line_ex_to_subid == substation_id)[0] - res["storages_id"] = np.where(self.storage_to_subid == substation_id)[0] - res["nb_elements"] = self.sub_info[substation_id] + res = { + "loads_id": np.where(cls.load_to_subid == substation_id)[0], + "generators_id": np.where(cls.gen_to_subid == substation_id)[0], + "lines_or_id": np.where(cls.line_or_to_subid == substation_id)[0], + "lines_ex_id": np.where(cls.line_ex_to_subid == substation_id)[0], + "storages_id": np.where(cls.storage_to_subid == substation_id)[0], + "nb_elements": cls.sub_info[substation_id] + } return res - def get_obj_substations(self, _sentinel=None, substation_id=None): + @classmethod + def get_obj_substations(cls, _sentinel=None, substation_id=None): """ Return the object connected as a substation in form of a numpy array instead of a dictionary (as opposed to :func:`GridObjects.get_obj_connect_to`). @@ -2154,19 +2240,19 @@ def get_obj_substations(self, _sentinel=None, substation_id=None): if substation_id is None: raise Grid2OpException("You ask the composition of a substation without specifying its id." "Please provide \"substation_id\"") - if substation_id >= len(self.sub_info): + if substation_id >= len(cls.sub_info): raise Grid2OpException("There are no substation of id \"substation_id={}\" in this grid." "".format(substation_id)) - dict_ = self.get_obj_connect_to(substation_id=substation_id) + dict_ = cls.get_obj_connect_to(substation_id=substation_id) res = np.full((dict_["nb_elements"], 6), fill_value=-1, dtype=dt_int) # 0 -> load, 1-> gen, 2 -> lines_or, 3 -> lines_ex - res[:, self.SUB_COL] = substation_id - res[self.load_to_sub_pos[dict_["loads_id"]], self.LOA_COL] = dict_["loads_id"] - res[self.gen_to_sub_pos[dict_["generators_id"]], self.GEN_COL] = dict_["generators_id"] - res[self.line_or_to_sub_pos[dict_["lines_or_id"]], self.LOR_COL] = dict_["lines_or_id"] - res[self.line_ex_to_sub_pos[dict_["lines_ex_id"]], self.LEX_COL] = dict_["lines_ex_id"] - res[self.storage_to_sub_pos[dict_["storages_id"]], self.STORAGE_COL] = dict_["storages_id"] + res[:, cls.SUB_COL] = substation_id + res[cls.load_to_sub_pos[dict_["loads_id"]], cls.LOA_COL] = dict_["loads_id"] + res[cls.gen_to_sub_pos[dict_["generators_id"]], cls.GEN_COL] = dict_["generators_id"] + res[cls.line_or_to_sub_pos[dict_["lines_or_id"]], cls.LOR_COL] = dict_["lines_or_id"] + res[cls.line_ex_to_sub_pos[dict_["lines_ex_id"]], cls.LEX_COL] = dict_["lines_ex_id"] + res[cls.storage_to_sub_pos[dict_["storages_id"]], cls.STORAGE_COL] = dict_["storages_id"] return res def get_lines_id(self, _sentinel=None, from_=None, to_=None): @@ -2372,84 +2458,145 @@ def get_storages_id(self, sub_id): return res - @classmethod - def cls_to_dict(cls): - """ - INTERNAL - - .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ - This is used internally only to save action_space or observation_space for example. Do not - attempt to use it in a different context. - - Convert the object as a dictionary. - Note that unless this method is overridden, a call to it will only output the - - Returns - ------- - res: ``dict`` - The representation of the object as a dictionary that can be json serializable. - """ - res = {} - save_to_dict(res, cls, "glop_version", str) - save_to_dict(res, cls, "name_gen", lambda li: [str(el) for el in li]) - save_to_dict(res, cls, "name_load", lambda li: [str(el) for el in li]) - save_to_dict(res, cls, "name_line", lambda li: [str(el) for el in li]) - save_to_dict(res, cls, "name_sub", lambda li: [str(el) for el in li]) - save_to_dict(res, cls, "name_storage", lambda li: [str(el) for el in li]) - save_to_dict(res, cls, "env_name", str) - - save_to_dict(res, cls, "sub_info", lambda li: [int(el) for el in li]) - - save_to_dict(res, cls, "load_to_subid", lambda li: [int(el) for el in li]) - save_to_dict(res, cls, "gen_to_subid", lambda li: [int(el) for el in li]) - save_to_dict(res, cls, "line_or_to_subid", lambda li: [int(el) for el in li]) - save_to_dict(res, cls, "line_ex_to_subid", lambda li: [int(el) for el in li]) - save_to_dict(res, cls, "storage_to_subid", lambda li: [int(el) for el in li]) - - save_to_dict(res, cls, "load_to_sub_pos", lambda li: [int(el) for el in li]) - save_to_dict(res, cls, "gen_to_sub_pos", lambda li: [int(el) for el in li]) - save_to_dict(res, cls, "line_or_to_sub_pos", lambda li: [int(el) for el in li]) - save_to_dict(res, cls, "line_ex_to_sub_pos", lambda li: [int(el) for el in li]) - save_to_dict(res, cls, "storage_to_sub_pos", lambda li: [int(el) for el in li]) - - save_to_dict(res, cls, "load_pos_topo_vect", lambda li: [int(el) for el in li]) - save_to_dict(res, cls, "gen_pos_topo_vect", lambda li: [int(el) for el in li]) - save_to_dict(res, cls, "line_or_pos_topo_vect", lambda li: [int(el) for el in li]) - save_to_dict(res, cls, "line_ex_pos_topo_vect", lambda li: [int(el) for el in li]) - save_to_dict(res, cls, "storage_pos_topo_vect", lambda li: [int(el) for el in li]) + @staticmethod + def _make_cls_dict(cls, res, as_list=True, copy_=True): + """NB: `cls` can be here a class or an object of a class...""" + save_to_dict(res, cls, "glop_version", str, copy_) + save_to_dict(res, cls, "name_gen", (lambda arr: [str(el) for el in arr]) if as_list else None, + copy_) + save_to_dict(res, cls, "name_load", (lambda li: [str(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "name_line", (lambda li: [str(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "name_sub", (lambda li: [str(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "name_storage", (lambda li: [str(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "env_name", str, + copy_) + + save_to_dict(res, cls, "sub_info", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) + + save_to_dict(res, cls, "load_to_subid", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "gen_to_subid", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "line_or_to_subid", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "line_ex_to_subid", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "storage_to_subid", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) + + save_to_dict(res, cls, "load_to_sub_pos", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "gen_to_sub_pos", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "line_or_to_sub_pos", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "line_ex_to_sub_pos", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "storage_to_sub_pos", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) + + save_to_dict(res, cls, "load_pos_topo_vect", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "gen_pos_topo_vect", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "line_or_pos_topo_vect", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "line_ex_pos_topo_vect", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "storage_pos_topo_vect", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) # redispatching if cls.redispatching_unit_commitment_availble: for nm_attr, type_attr in zip(cls._li_attr_disp, cls._type_attr_disp): - save_to_dict(res, cls, nm_attr, lambda li: [type_attr(el) for el in li]) + save_to_dict(res, cls, nm_attr, (lambda li: [type_attr(el) for el in li]) if as_list else None, + copy_) else: for nm_attr in cls._li_attr_disp: res[nm_attr] = None # shunts if cls.grid_layout is not None: - save_to_dict(res, cls, "grid_layout", lambda gl: {str(k): [float(x), float(y)] for k, (x,y) in gl.items()}) + save_to_dict(res, cls, "grid_layout", + (lambda gl: {str(k): [float(x), float(y)] for k, (x, y) in gl.items()}) if as_list else None, + copy_) else: res["grid_layout"] = None # shunts if cls.shunts_data_available: - save_to_dict(res, cls, "name_shunt", lambda li: [str(el) for el in li]) - save_to_dict(res, cls, "shunt_to_subid", lambda li: [int(el) for el in li]) + save_to_dict(res, cls, "name_shunt", (lambda li: [str(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "shunt_to_subid", (lambda li: [int(el) for el in li]) if as_list else None, + copy_) else: res["name_shunt"] = None res["shunt_to_subid"] = None # storage data - save_to_dict(res, cls, "storage_type", lambda li: [str(el) for el in li]) - save_to_dict(res, cls, "storage_Emax", lambda li: [float(el) for el in li]) - save_to_dict(res, cls, "storage_Emin", lambda li: [float(el) for el in li]) - save_to_dict(res, cls, "storage_max_p_prod", lambda li: [float(el) for el in li]) - save_to_dict(res, cls, "storage_max_p_absorb", lambda li: [float(el) for el in li]) - save_to_dict(res, cls, "storage_marginal_cost", lambda li: [float(el) for el in li]) - save_to_dict(res, cls, "storage_loss", lambda li: [float(el) for el in li]) - save_to_dict(res, cls, "storage_charging_efficiency", lambda li: [float(el) for el in li]) - save_to_dict(res, cls, "storage_discharging_efficiency", lambda li: [float(el) for el in li]) + save_to_dict(res, cls, "storage_type", (lambda li: [str(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "storage_Emax", (lambda li: [float(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "storage_Emin", (lambda li: [float(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "storage_max_p_prod", (lambda li: [float(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "storage_max_p_absorb", (lambda li: [float(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "storage_marginal_cost", (lambda li: [float(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "storage_loss", (lambda li: [float(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "storage_charging_efficiency", + (lambda li: [float(el) for el in li]) if as_list else None, + copy_) + save_to_dict(res, cls, "storage_discharging_efficiency", + (lambda li: [float(el) for el in li]) if as_list else None, + copy_) + return res + + @staticmethod + def _make_cls_dict_extended(cls, res, as_list=True, copy_=True): + """add the n_gen and all in the class created""" + GridObjects._make_cls_dict(cls, res, as_list=as_list, copy_=copy_) + res["n_gen"] = cls.n_gen + res["n_load"] = cls.n_load + res["n_line"] = cls.n_line + res["n_sub"] = cls.n_sub + res["dim_topo"] = 1 * cls.dim_topo + # shunt + res["n_shunt"] = cls.n_shunt + res["shunts_data_available"] = cls.shunts_data_available + # storage + res["n_storage"] = cls.n_storage + # redispatching / curtailment + res["redispatching_unit_commitment_availble"] = cls.redispatching_unit_commitment_availble + + @classmethod + def cls_to_dict(cls): + """ + INTERNAL + + .. warning:: /!\\\\ Internal, do not use unless you know what you are doing /!\\\\ + This is used internally only to save action_space or observation_space for example. Do not + attempt to use it in a different context. + + Convert the object as a dictionary. + Note that unless this method is overridden, a call to it will only output the + + Returns + ------- + res: ``dict`` + The representation of the object as a dictionary that can be json serializable. + """ + res = {} + GridObjects._make_cls_dict(cls, res) return res @staticmethod @@ -2476,6 +2623,7 @@ def from_dict(dict_): The object of the proper class that were initially represented as a dictionary. """ + # TODO refacto that with the "type(blablabla, blabla, blabal)" syntax ! class res(GridObjects): pass @@ -2581,7 +2729,7 @@ class res(GridObjects): # retrieve the redundant information that are not stored (for efficiency) obj_ = cls() - obj_._compute_pos_big_topo() + obj_._compute_pos_big_topo_cls() cls = cls.init_grid(obj_, force=True) return cls() @@ -2628,9 +2776,17 @@ def same_grid_class(cls, other_cls) -> bool: absence of shunts or storage units for example. """ + if cls.env_name == other_cls.env_name: + # speed optimization here: if the two classes are from the same environment + # they are from the same grid ! + return True + # this implementation is 6 times faster than the "cls_to_dict" one below, so i kept it - me_dict = cls.__dict__ - other_cls_dict = other_cls.__dict__ + me_dict = {} + GridObjects._make_cls_dict_extended(cls, me_dict, as_list=False, copy_=False) + other_cls_dict = {} + GridObjects._make_cls_dict_extended(other_cls, other_cls_dict, as_list=False, copy_=False) + if me_dict.keys() - other_cls_dict.keys(): # one key is in me but not in other return False diff --git a/grid2op/Space/space_utils.py b/grid2op/Space/space_utils.py index b0c204b94..ebaa43edc 100644 --- a/grid2op/Space/space_utils.py +++ b/grid2op/Space/space_utils.py @@ -6,6 +6,7 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. +import copy from grid2op.Exceptions import Grid2OpException @@ -20,11 +21,36 @@ def extract_from_dict(dict_, key, converter): return res -def save_to_dict(res_dict, me, key, converter): +def save_to_dict(res_dict, me, key, converter, copy_=True): + """ + + Parameters + ---------- + res_dict: + output dictionary + me: + the object to serialize in a dict + key: + the attribute of the object we want to save + converter: + if the attribute need to be converted (for example if you later want to serialize the dictionary as json) + copy_: + whether you copy the attribute or not (only applies if converter is None) + + Returns + ------- + + """ if not hasattr(me, key): raise Grid2OpException("Impossible to find key \"{}\" while loading the dictionary.".format(key)) try: - res = converter(getattr(me, key)) + if converter is not None: + res = converter(getattr(me, key)) + else: + if copy_: + res = copy.deepcopy(getattr(me, key)) + else: + res = getattr(me, key) except Exception as exc_: raise Grid2OpException("Impossible to convert \"{}\" into class {} with exception " "\n\"{}\"".format(key, converter, exc_)) diff --git a/grid2op/data_test/5bus_example_th_lim_dict/config.py b/grid2op/data_test/5bus_example_th_lim_dict/config.py new file mode 100644 index 000000000..1a6487476 --- /dev/null +++ b/grid2op/data_test/5bus_example_th_lim_dict/config.py @@ -0,0 +1,18 @@ +from grid2op.Action import TopologyAction +from grid2op.Reward import L2RPNReward +from grid2op.Rules import DefaultRules +from grid2op.Chronics import ChangeNothing +from grid2op.Backend import PandaPowerBackend + +config = { + "backend": PandaPowerBackend, + "action_class": TopologyAction, + "observation_class": None, + "reward_class": L2RPNReward, + "gamerules_class": DefaultRules, + "chronics_class": ChangeNothing, + "volagecontroler_class": None, + "thermal_limits": {'0_1_0': 200., '0_2_1': 300., '0_3_2': 500., '0_4_3': 600., '1_2_4': 700., '2_3_5': 800., + '2_3_6': 900., '3_4_7': 1000.}, + "names_chronics_to_grid": None +} diff --git a/grid2op/data_test/5bus_example_th_lim_dict/grid.json b/grid2op/data_test/5bus_example_th_lim_dict/grid.json new file mode 100644 index 000000000..9af1df1bb --- /dev/null +++ b/grid2op/data_test/5bus_example_th_lim_dict/grid.json @@ -0,0 +1,1740 @@ +{ + "_module": "pandapower.auxiliary", + "_class": "pandapowerNet", + "_object": { + "bus": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"vn_kv\",\"type\",\"zone\",\"in_service\"],\"index\":[0,1,2,3,4],\"data\":[[\"substation_1\",100.0,\"b\",null,true],[\"substation_2\",100.0,\"b\",null,true],[\"substation_3\",100.0,\"b\",null,true],[\"substation_4\",100.0,\"b\",null,true],[\"substation_5\",100.0,\"b\",null,true]]}", + "orient": "split", + "dtype": { + "name": "object", + "vn_kv": "float64", + "type": "object", + "zone": "object", + "in_service": "bool" + } + }, + "load": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"const_z_percent\",\"const_i_percent\",\"sn_mva\",\"scaling\",\"in_service\",\"type\"],\"index\":[0,1,2],\"data\":[[\"load_0_0\",0,10.0,7.0,0.0,0.0,null,1.0,true,null],[\"load_3_1\",3,10.0,7.0,0.0,0.0,null,1.0,true,null],[\"load_4_2\",4,10.0,7.0,0.0,0.0,null,1.0,true,null]]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "p_mw": "float64", + "q_mvar": "float64", + "const_z_percent": "float64", + "const_i_percent": "float64", + "sn_mva": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object" + } + }, + "sgen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"sn_mva\",\"scaling\",\"in_service\",\"type\",\"current_source\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "int64", + "p_mw": "float64", + "q_mvar": "float64", + "sn_mva": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object", + "current_source": "bool" + } + }, + "motor": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"pn_mech_mw\",\"loading_percent\",\"cos_phi\",\"cos_phi_n\",\"efficiency_percent\",\"efficiency_n_percent\",\"lrc_pu\",\"vn_kv\",\"scaling\",\"in_service\",\"rx\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "int64", + "pn_mech_mw": "float64", + "loading_percent": "float64", + "cos_phi": "float64", + "cos_phi_n": "float64", + "efficiency_percent": "float64", + "efficiency_n_percent": "float64", + "lrc_pu": "float64", + "vn_kv": "float64", + "scaling": "float64", + "in_service": "bool", + "rx": "float64" + } + }, + "asymmetric_load": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\",\"sn_mva\",\"scaling\",\"in_service\",\"type\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64", + "sn_mva": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object" + } + }, + "asymmetric_sgen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\",\"sn_mva\",\"scaling\",\"in_service\",\"type\",\"current_source\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "int64", + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64", + "sn_mva": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object", + "current_source": "bool" + } + }, + "storage": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"q_mvar\",\"sn_mva\",\"soc_percent\",\"min_e_mwh\",\"max_e_mwh\",\"scaling\",\"in_service\",\"type\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "int64", + "p_mw": "float64", + "q_mvar": "float64", + "sn_mva": "float64", + "soc_percent": "float64", + "min_e_mwh": "float64", + "max_e_mwh": "float64", + "scaling": "float64", + "in_service": "bool", + "type": "object" + } + }, + "gen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"p_mw\",\"vm_pu\",\"sn_mva\",\"min_q_mvar\",\"max_q_mvar\",\"scaling\",\"slack\",\"in_service\",\"type\"],\"index\":[0,1],\"data\":[[\"gen_0_0\",0,10.0,1.02,null,null,null,1.0,false,true,null],[\"gen_1_1\",1,20.0,1.02,null,null,null,1.0,true,true,null]]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "p_mw": "float64", + "vm_pu": "float64", + "sn_mva": "float64", + "min_q_mvar": "float64", + "max_q_mvar": "float64", + "scaling": "float64", + "slack": "bool", + "in_service": "bool", + "type": "object" + } + }, + "switch": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"bus\",\"element\",\"et\",\"type\",\"closed\",\"name\",\"z_ohm\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "bus": "int64", + "element": "int64", + "et": "object", + "type": "object", + "closed": "bool", + "name": "object", + "z_ohm": "float64" + } + }, + "shunt": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"bus\",\"name\",\"q_mvar\",\"p_mw\",\"vn_kv\",\"step\",\"max_step\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "bus": "uint32", + "name": "object", + "q_mvar": "float64", + "p_mw": "float64", + "vn_kv": "float64", + "step": "uint32", + "max_step": "uint32", + "in_service": "bool" + } + }, + "ext_grid": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"vm_pu\",\"va_degree\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "vm_pu": "float64", + "va_degree": "float64", + "in_service": "bool" + } + }, + "line": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"std_type\",\"from_bus\",\"to_bus\",\"length_km\",\"r_ohm_per_km\",\"x_ohm_per_km\",\"c_nf_per_km\",\"g_us_per_km\",\"max_i_ka\",\"df\",\"parallel\",\"type\",\"in_service\"],\"index\":[0,1,2,3,4,5,6,7],\"data\":[[null,\"NAYY 4x50 SE\",0,1,4.0,0.642,0.083,210.0,0.0,0.6,1.0,1,\"cs\",true],[\"0_2_2\",\"NAYY 4x50 SE\",0,2,4.47,0.642,0.083,210.0,0.0,0.22,1.0,1,\"cs\",true],[\"0_3_3\",\"NAYY 4x50 SE\",0,3,5.65,0.642,0.083,210.0,0.0,0.16,1.0,1,\"cs\",true],[\"0_4_4\",\"NAYY 4x50 SE\",0,4,4.0,0.642,0.083,210.0,0.0,0.16,1.0,1,\"cs\",true],[\"1_2_5\",\"NAYY 4x50 SE\",1,2,2.0,0.642,0.083,210.0,0.0,0.6,1.0,1,\"cs\",true],[\"2_3_6\",\"NAYY 4x50 SE\",2,3,2.0,0.642,0.083,210.0,0.0,0.3,1.0,1,\"cs\",true],[\"2_3_7\",\"NAYY 4x50 SE\",2,3,2.0,0.642,0.083,210.0,0.0,0.3,1.0,1,\"cs\",true],[\"3_4_8\",\"NAYY 4x50 SE\",3,4,4.0,0.642,0.083,210.0,0.0,0.16,1.0,1,\"cs\",true]]}", + "orient": "split", + "dtype": { + "name": "object", + "std_type": "object", + "from_bus": "uint32", + "to_bus": "uint32", + "length_km": "float64", + "r_ohm_per_km": "float64", + "x_ohm_per_km": "float64", + "c_nf_per_km": "float64", + "g_us_per_km": "float64", + "max_i_ka": "float64", + "df": "float64", + "parallel": "uint32", + "type": "object", + "in_service": "bool" + } + }, + "trafo": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"std_type\",\"hv_bus\",\"lv_bus\",\"sn_mva\",\"vn_hv_kv\",\"vn_lv_kv\",\"vk_percent\",\"vkr_percent\",\"pfe_kw\",\"i0_percent\",\"shift_degree\",\"tap_side\",\"tap_neutral\",\"tap_min\",\"tap_max\",\"tap_step_percent\",\"tap_step_degree\",\"tap_pos\",\"tap_phase_shifter\",\"parallel\",\"df\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "std_type": "object", + "hv_bus": "uint32", + "lv_bus": "uint32", + "sn_mva": "float64", + "vn_hv_kv": "float64", + "vn_lv_kv": "float64", + "vk_percent": "float64", + "vkr_percent": "float64", + "pfe_kw": "float64", + "i0_percent": "float64", + "shift_degree": "float64", + "tap_side": "object", + "tap_neutral": "int32", + "tap_min": "int32", + "tap_max": "int32", + "tap_step_percent": "float64", + "tap_step_degree": "float64", + "tap_pos": "int32", + "tap_phase_shifter": "bool", + "parallel": "uint32", + "df": "float64", + "in_service": "bool" + } + }, + "trafo3w": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"std_type\",\"hv_bus\",\"mv_bus\",\"lv_bus\",\"sn_hv_mva\",\"sn_mv_mva\",\"sn_lv_mva\",\"vn_hv_kv\",\"vn_mv_kv\",\"vn_lv_kv\",\"vk_hv_percent\",\"vk_mv_percent\",\"vk_lv_percent\",\"vkr_hv_percent\",\"vkr_mv_percent\",\"vkr_lv_percent\",\"pfe_kw\",\"i0_percent\",\"shift_mv_degree\",\"shift_lv_degree\",\"tap_side\",\"tap_neutral\",\"tap_min\",\"tap_max\",\"tap_step_percent\",\"tap_step_degree\",\"tap_pos\",\"tap_at_star_point\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "std_type": "object", + "hv_bus": "uint32", + "mv_bus": "uint32", + "lv_bus": "uint32", + "sn_hv_mva": "float64", + "sn_mv_mva": "float64", + "sn_lv_mva": "float64", + "vn_hv_kv": "float64", + "vn_mv_kv": "float64", + "vn_lv_kv": "float64", + "vk_hv_percent": "float64", + "vk_mv_percent": "float64", + "vk_lv_percent": "float64", + "vkr_hv_percent": "float64", + "vkr_mv_percent": "float64", + "vkr_lv_percent": "float64", + "pfe_kw": "float64", + "i0_percent": "float64", + "shift_mv_degree": "float64", + "shift_lv_degree": "float64", + "tap_side": "object", + "tap_neutral": "int32", + "tap_min": "int32", + "tap_max": "int32", + "tap_step_percent": "float64", + "tap_step_degree": "float64", + "tap_pos": "int32", + "tap_at_star_point": "bool", + "in_service": "bool" + } + }, + "impedance": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"from_bus\",\"to_bus\",\"rft_pu\",\"xft_pu\",\"rtf_pu\",\"xtf_pu\",\"sn_mva\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "from_bus": "uint32", + "to_bus": "uint32", + "rft_pu": "float64", + "xft_pu": "float64", + "rtf_pu": "float64", + "xtf_pu": "float64", + "sn_mva": "float64", + "in_service": "bool" + } + }, + "dcline": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"from_bus\",\"to_bus\",\"p_mw\",\"loss_percent\",\"loss_mw\",\"vm_from_pu\",\"vm_to_pu\",\"max_p_mw\",\"min_q_from_mvar\",\"min_q_to_mvar\",\"max_q_from_mvar\",\"max_q_to_mvar\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "from_bus": "uint32", + "to_bus": "uint32", + "p_mw": "float64", + "loss_percent": "float64", + "loss_mw": "float64", + "vm_from_pu": "float64", + "vm_to_pu": "float64", + "max_p_mw": "float64", + "min_q_from_mvar": "float64", + "min_q_to_mvar": "float64", + "max_q_from_mvar": "float64", + "max_q_to_mvar": "float64", + "in_service": "bool" + } + }, + "ward": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"ps_mw\",\"qs_mvar\",\"qz_mvar\",\"pz_mw\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "ps_mw": "float64", + "qs_mvar": "float64", + "qz_mvar": "float64", + "pz_mw": "float64", + "in_service": "bool" + } + }, + "xward": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"bus\",\"ps_mw\",\"qs_mvar\",\"qz_mvar\",\"pz_mw\",\"r_ohm\",\"x_ohm\",\"vm_pu\",\"in_service\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "bus": "uint32", + "ps_mw": "float64", + "qs_mvar": "float64", + "qz_mvar": "float64", + "pz_mw": "float64", + "r_ohm": "float64", + "x_ohm": "float64", + "vm_pu": "float64", + "in_service": "bool" + } + }, + "measurement": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"name\",\"measurement_type\",\"element_type\",\"element\",\"value\",\"std_dev\",\"side\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "name": "object", + "measurement_type": "object", + "element_type": "object", + "element": "uint32", + "value": "float64", + "std_dev": "float64", + "side": "object" + } + }, + "pwl_cost": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"power_type\",\"element\",\"et\",\"points\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "power_type": "object", + "element": "object", + "et": "object", + "points": "object" + } + }, + "poly_cost": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"element\",\"et\",\"cp0_eur\",\"cp1_eur_per_mw\",\"cp2_eur_per_mw2\",\"cq0_eur\",\"cq1_eur_per_mvar\",\"cq2_eur_per_mvar2\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "element": "object", + "et": "object", + "cp0_eur": "float64", + "cp1_eur_per_mw": "float64", + "cp2_eur_per_mw2": "float64", + "cq0_eur": "float64", + "cq1_eur_per_mvar": "float64", + "cq2_eur_per_mvar2": "float64" + } + }, + "controller": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"object\",\"in_service\",\"order\",\"level\",\"initial_run\",\"recycle\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "object": "object", + "in_service": "bool", + "order": "float64", + "level": "object", + "initial_run": "bool", + "recycle": "bool" + } + }, + "line_geodata": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"coords\"],\"index\":[0,1,2,3,4,5,6,7],\"data\":[[[[0,0],[0,4]]],[[[0,0],[2,4]]],[[[0,0],[4,4]]],[[[0,0],[4,0]]],[[[0,4],[2,4]]],[[[2,4],[3,4.2],[4,4]]],[[[2,4],[3,3.8],[4,4]]],[[[4,4],[4,0]]]]}", + "orient": "split", + "dtype": { + "coords": "object" + } + }, + "bus_geodata": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"x\",\"y\",\"coords\"],\"index\":[0,1,2,3,4],\"data\":[[0.0,0.0,null],[0.0,4.0,null],[2.0,4.0,null],[4.0,4.0,null],[4.0,0.0,null]]}", + "orient": "split", + "dtype": { + "x": "float64", + "y": "float64", + "coords": "object" + } + }, + "version": "2.5.0", + "converged": true, + "name": "5bus", + "f_hz": 50.0, + "sn_mva": 1, + "std_types": { + "line": { + "NAYY 4x50 SE": { + "c_nf_per_km": 210, + "r_ohm_per_km": 0.642, + "x_ohm_per_km": 0.083, + "max_i_ka": 0.142, + "type": "cs", + "q_mm2": 50, + "alpha": 0.00403 + }, + "NAYY 4x120 SE": { + "c_nf_per_km": 264, + "r_ohm_per_km": 0.225, + "x_ohm_per_km": 0.08, + "max_i_ka": 0.242, + "type": "cs", + "q_mm2": 120, + "alpha": 0.00403 + }, + "NAYY 4x150 SE": { + "c_nf_per_km": 261, + "r_ohm_per_km": 0.208, + "x_ohm_per_km": 0.08, + "max_i_ka": 0.27, + "type": "cs", + "q_mm2": 150, + "alpha": 0.00403 + }, + "NA2XS2Y 1x95 RM/25 12/20 kV": { + "c_nf_per_km": 216, + "r_ohm_per_km": 0.313, + "x_ohm_per_km": 0.132, + "max_i_ka": 0.252, + "type": "cs", + "q_mm2": 95, + "alpha": 0.00403 + }, + "NA2XS2Y 1x185 RM/25 12/20 kV": { + "c_nf_per_km": 273, + "r_ohm_per_km": 0.161, + "x_ohm_per_km": 0.117, + "max_i_ka": 0.362, + "type": "cs", + "q_mm2": 185, + "alpha": 0.00403 + }, + "NA2XS2Y 1x240 RM/25 12/20 kV": { + "c_nf_per_km": 304, + "r_ohm_per_km": 0.122, + "x_ohm_per_km": 0.112, + "max_i_ka": 0.421, + "type": "cs", + "q_mm2": 240, + "alpha": 0.00403 + }, + "NA2XS2Y 1x95 RM/25 6/10 kV": { + "c_nf_per_km": 315, + "r_ohm_per_km": 0.313, + "x_ohm_per_km": 0.123, + "max_i_ka": 0.249, + "type": "cs", + "q_mm2": 95, + "alpha": 0.00403 + }, + "NA2XS2Y 1x185 RM/25 6/10 kV": { + "c_nf_per_km": 406, + "r_ohm_per_km": 0.161, + "x_ohm_per_km": 0.11, + "max_i_ka": 0.358, + "type": "cs", + "q_mm2": 185, + "alpha": 0.00403 + }, + "NA2XS2Y 1x240 RM/25 6/10 kV": { + "c_nf_per_km": 456, + "r_ohm_per_km": 0.122, + "x_ohm_per_km": 0.105, + "max_i_ka": 0.416, + "type": "cs", + "q_mm2": 240, + "alpha": 0.00403 + }, + "NA2XS2Y 1x150 RM/25 12/20 kV": { + "c_nf_per_km": 250, + "r_ohm_per_km": 0.206, + "x_ohm_per_km": 0.116, + "max_i_ka": 0.319, + "type": "cs", + "q_mm2": 150, + "alpha": 0.00403 + }, + "NA2XS2Y 1x120 RM/25 12/20 kV": { + "c_nf_per_km": 230, + "r_ohm_per_km": 0.253, + "x_ohm_per_km": 0.119, + "max_i_ka": 0.283, + "type": "cs", + "q_mm2": 120, + "alpha": 0.00403 + }, + "NA2XS2Y 1x70 RM/25 12/20 kV": { + "c_nf_per_km": 190, + "r_ohm_per_km": 0.443, + "x_ohm_per_km": 0.132, + "max_i_ka": 0.22, + "type": "cs", + "q_mm2": 70, + "alpha": 0.00403 + }, + "NA2XS2Y 1x150 RM/25 6/10 kV": { + "c_nf_per_km": 360, + "r_ohm_per_km": 0.206, + "x_ohm_per_km": 0.11, + "max_i_ka": 0.315, + "type": "cs", + "q_mm2": 150, + "alpha": 0.00403 + }, + "NA2XS2Y 1x120 RM/25 6/10 kV": { + "c_nf_per_km": 340, + "r_ohm_per_km": 0.253, + "x_ohm_per_km": 0.113, + "max_i_ka": 0.28, + "type": "cs", + "q_mm2": 120, + "alpha": 0.00403 + }, + "NA2XS2Y 1x70 RM/25 6/10 kV": { + "c_nf_per_km": 280, + "r_ohm_per_km": 0.443, + "x_ohm_per_km": 0.123, + "max_i_ka": 0.217, + "type": "cs", + "q_mm2": 70, + "alpha": 0.00403 + }, + "N2XS(FL)2Y 1x120 RM/35 64/110 kV": { + "c_nf_per_km": 112, + "r_ohm_per_km": 0.153, + "x_ohm_per_km": 0.166, + "max_i_ka": 0.366, + "type": "cs", + "q_mm2": 120, + "alpha": 0.00393 + }, + "N2XS(FL)2Y 1x185 RM/35 64/110 kV": { + "c_nf_per_km": 125, + "r_ohm_per_km": 0.099, + "x_ohm_per_km": 0.156, + "max_i_ka": 0.457, + "type": "cs", + "q_mm2": 185, + "alpha": 0.00393 + }, + "N2XS(FL)2Y 1x240 RM/35 64/110 kV": { + "c_nf_per_km": 135, + "r_ohm_per_km": 0.075, + "x_ohm_per_km": 0.149, + "max_i_ka": 0.526, + "type": "cs", + "q_mm2": 240, + "alpha": 0.00393 + }, + "N2XS(FL)2Y 1x300 RM/35 64/110 kV": { + "c_nf_per_km": 144, + "r_ohm_per_km": 0.06, + "x_ohm_per_km": 0.144, + "max_i_ka": 0.588, + "type": "cs", + "q_mm2": 300, + "alpha": 0.00393 + }, + "15-AL1/3-ST1A 0.4": { + "c_nf_per_km": 11, + "r_ohm_per_km": 1.8769, + "x_ohm_per_km": 0.35, + "max_i_ka": 0.105, + "type": "ol", + "q_mm2": 16, + "alpha": 0.00403 + }, + "24-AL1/4-ST1A 0.4": { + "c_nf_per_km": 11.25, + "r_ohm_per_km": 1.2012, + "x_ohm_per_km": 0.335, + "max_i_ka": 0.14, + "type": "ol", + "q_mm2": 24, + "alpha": 0.00403 + }, + "48-AL1/8-ST1A 0.4": { + "c_nf_per_km": 12.2, + "r_ohm_per_km": 0.5939, + "x_ohm_per_km": 0.3, + "max_i_ka": 0.21, + "type": "ol", + "q_mm2": 48, + "alpha": 0.00403 + }, + "94-AL1/15-ST1A 0.4": { + "c_nf_per_km": 13.2, + "r_ohm_per_km": 0.306, + "x_ohm_per_km": 0.29, + "max_i_ka": 0.35, + "type": "ol", + "q_mm2": 94, + "alpha": 0.00403 + }, + "34-AL1/6-ST1A 10.0": { + "c_nf_per_km": 9.7, + "r_ohm_per_km": 0.8342, + "x_ohm_per_km": 0.36, + "max_i_ka": 0.17, + "type": "ol", + "q_mm2": 34, + "alpha": 0.00403 + }, + "48-AL1/8-ST1A 10.0": { + "c_nf_per_km": 10.1, + "r_ohm_per_km": 0.5939, + "x_ohm_per_km": 0.35, + "max_i_ka": 0.21, + "type": "ol", + "q_mm2": 48, + "alpha": 0.00403 + }, + "70-AL1/11-ST1A 10.0": { + "c_nf_per_km": 10.4, + "r_ohm_per_km": 0.4132, + "x_ohm_per_km": 0.339, + "max_i_ka": 0.29, + "type": "ol", + "q_mm2": 70, + "alpha": 0.00403 + }, + "94-AL1/15-ST1A 10.0": { + "c_nf_per_km": 10.75, + "r_ohm_per_km": 0.306, + "x_ohm_per_km": 0.33, + "max_i_ka": 0.35, + "type": "ol", + "q_mm2": 94, + "alpha": 0.00403 + }, + "122-AL1/20-ST1A 10.0": { + "c_nf_per_km": 11.1, + "r_ohm_per_km": 0.2376, + "x_ohm_per_km": 0.323, + "max_i_ka": 0.41, + "type": "ol", + "q_mm2": 122, + "alpha": 0.00403 + }, + "149-AL1/24-ST1A 10.0": { + "c_nf_per_km": 11.25, + "r_ohm_per_km": 0.194, + "x_ohm_per_km": 0.315, + "max_i_ka": 0.47, + "type": "ol", + "q_mm2": 149, + "alpha": 0.00403 + }, + "34-AL1/6-ST1A 20.0": { + "c_nf_per_km": 9.15, + "r_ohm_per_km": 0.8342, + "x_ohm_per_km": 0.382, + "max_i_ka": 0.17, + "type": "ol", + "q_mm2": 34, + "alpha": 0.00403 + }, + "48-AL1/8-ST1A 20.0": { + "c_nf_per_km": 9.5, + "r_ohm_per_km": 0.5939, + "x_ohm_per_km": 0.372, + "max_i_ka": 0.21, + "type": "ol", + "q_mm2": 48, + "alpha": 0.00403 + }, + "70-AL1/11-ST1A 20.0": { + "c_nf_per_km": 9.7, + "r_ohm_per_km": 0.4132, + "x_ohm_per_km": 0.36, + "max_i_ka": 0.29, + "type": "ol", + "q_mm2": 70, + "alpha": 0.00403 + }, + "94-AL1/15-ST1A 20.0": { + "c_nf_per_km": 10, + "r_ohm_per_km": 0.306, + "x_ohm_per_km": 0.35, + "max_i_ka": 0.35, + "type": "ol", + "q_mm2": 94, + "alpha": 0.00403 + }, + "122-AL1/20-ST1A 20.0": { + "c_nf_per_km": 10.3, + "r_ohm_per_km": 0.2376, + "x_ohm_per_km": 0.344, + "max_i_ka": 0.41, + "type": "ol", + "q_mm2": 122, + "alpha": 0.00403 + }, + "149-AL1/24-ST1A 20.0": { + "c_nf_per_km": 10.5, + "r_ohm_per_km": 0.194, + "x_ohm_per_km": 0.337, + "max_i_ka": 0.47, + "type": "ol", + "q_mm2": 149, + "alpha": 0.00403 + }, + "184-AL1/30-ST1A 20.0": { + "c_nf_per_km": 10.75, + "r_ohm_per_km": 0.1571, + "x_ohm_per_km": 0.33, + "max_i_ka": 0.535, + "type": "ol", + "q_mm2": 184, + "alpha": 0.00403 + }, + "243-AL1/39-ST1A 20.0": { + "c_nf_per_km": 11, + "r_ohm_per_km": 0.1188, + "x_ohm_per_km": 0.32, + "max_i_ka": 0.645, + "type": "ol", + "q_mm2": 243, + "alpha": 0.00403 + }, + "48-AL1/8-ST1A 110.0": { + "c_nf_per_km": 8, + "r_ohm_per_km": 0.5939, + "x_ohm_per_km": 0.46, + "max_i_ka": 0.21, + "type": "ol", + "q_mm2": 48, + "alpha": 0.00403 + }, + "70-AL1/11-ST1A 110.0": { + "c_nf_per_km": 8.4, + "r_ohm_per_km": 0.4132, + "x_ohm_per_km": 0.45, + "max_i_ka": 0.29, + "type": "ol", + "q_mm2": 70, + "alpha": 0.00403 + }, + "94-AL1/15-ST1A 110.0": { + "c_nf_per_km": 8.65, + "r_ohm_per_km": 0.306, + "x_ohm_per_km": 0.44, + "max_i_ka": 0.35, + "type": "ol", + "q_mm2": 94, + "alpha": 0.00403 + }, + "122-AL1/20-ST1A 110.0": { + "c_nf_per_km": 8.5, + "r_ohm_per_km": 0.2376, + "x_ohm_per_km": 0.43, + "max_i_ka": 0.41, + "type": "ol", + "q_mm2": 122, + "alpha": 0.00403 + }, + "149-AL1/24-ST1A 110.0": { + "c_nf_per_km": 8.75, + "r_ohm_per_km": 0.194, + "x_ohm_per_km": 0.41, + "max_i_ka": 0.47, + "type": "ol", + "q_mm2": 149, + "alpha": 0.00403 + }, + "184-AL1/30-ST1A 110.0": { + "c_nf_per_km": 8.8, + "r_ohm_per_km": 0.1571, + "x_ohm_per_km": 0.4, + "max_i_ka": 0.535, + "type": "ol", + "q_mm2": 184, + "alpha": 0.00403 + }, + "243-AL1/39-ST1A 110.0": { + "c_nf_per_km": 9, + "r_ohm_per_km": 0.1188, + "x_ohm_per_km": 0.39, + "max_i_ka": 0.645, + "type": "ol", + "q_mm2": 243, + "alpha": 0.00403 + }, + "305-AL1/39-ST1A 110.0": { + "c_nf_per_km": 9.2, + "r_ohm_per_km": 0.0949, + "x_ohm_per_km": 0.38, + "max_i_ka": 0.74, + "type": "ol", + "q_mm2": 305, + "alpha": 0.00403 + }, + "490-AL1/64-ST1A 110.0": { + "c_nf_per_km": 9.75, + "r_ohm_per_km": 0.059, + "x_ohm_per_km": 0.37, + "max_i_ka": 0.96, + "type": "ol", + "q_mm2": 490, + "alpha": 0.00403 + }, + "679-AL1/86-ST1A 110.0": { + "c_nf_per_km": 9.95, + "r_ohm_per_km": 0.042, + "x_ohm_per_km": 0.36, + "max_i_ka": 0.115, + "type": "ol", + "q_mm2": 679, + "alpha": 0.00403 + }, + "490-AL1/64-ST1A 220.0": { + "c_nf_per_km": 10, + "r_ohm_per_km": 0.059, + "x_ohm_per_km": 0.285, + "max_i_ka": 0.96, + "type": "ol", + "q_mm2": 490, + "alpha": 0.00403 + }, + "679-AL1/86-ST1A 220.0": { + "c_nf_per_km": 11.7, + "r_ohm_per_km": 0.042, + "x_ohm_per_km": 0.275, + "max_i_ka": 0.115, + "type": "ol", + "q_mm2": 679, + "alpha": 0.00403 + }, + "490-AL1/64-ST1A 380.0": { + "c_nf_per_km": 11, + "r_ohm_per_km": 0.059, + "x_ohm_per_km": 0.253, + "max_i_ka": 0.96, + "type": "ol", + "q_mm2": 490, + "alpha": 0.00403 + }, + "679-AL1/86-ST1A 380.0": { + "c_nf_per_km": 14.6, + "r_ohm_per_km": 0.042, + "x_ohm_per_km": 0.25, + "max_i_ka": 0.115, + "type": "ol", + "q_mm2": 679, + "alpha": 0.00403 + } + }, + "trafo": { + "160 MVA 380/110 kV": { + "i0_percent": 0.06, + "pfe_kw": 60, + "vkr_percent": 0.25, + "sn_mva": 160, + "vn_lv_kv": 110.0, + "vn_hv_kv": 380.0, + "vk_percent": 12.2, + "shift_degree": 0, + "vector_group": "Yy0", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "100 MVA 220/110 kV": { + "i0_percent": 0.06, + "pfe_kw": 55, + "vkr_percent": 0.26, + "sn_mva": 100, + "vn_lv_kv": 110.0, + "vn_hv_kv": 220.0, + "vk_percent": 12.0, + "shift_degree": 0, + "vector_group": "Yy0", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "63 MVA 110/20 kV": { + "i0_percent": 0.04, + "pfe_kw": 22, + "vkr_percent": 0.32, + "sn_mva": 63, + "vn_lv_kv": 20.0, + "vn_hv_kv": 110.0, + "vk_percent": 18, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "40 MVA 110/20 kV": { + "i0_percent": 0.05, + "pfe_kw": 18, + "vkr_percent": 0.34, + "sn_mva": 40, + "vn_lv_kv": 20.0, + "vn_hv_kv": 110.0, + "vk_percent": 16.2, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "25 MVA 110/20 kV": { + "i0_percent": 0.07, + "pfe_kw": 14, + "vkr_percent": 0.41, + "sn_mva": 25, + "vn_lv_kv": 20.0, + "vn_hv_kv": 110.0, + "vk_percent": 12, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "63 MVA 110/10 kV": { + "sn_mva": 63, + "vn_hv_kv": 110, + "vn_lv_kv": 10, + "vk_percent": 18, + "vkr_percent": 0.32, + "pfe_kw": 22, + "i0_percent": 0.04, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "40 MVA 110/10 kV": { + "sn_mva": 40, + "vn_hv_kv": 110, + "vn_lv_kv": 10, + "vk_percent": 16.2, + "vkr_percent": 0.34, + "pfe_kw": 18, + "i0_percent": 0.05, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "25 MVA 110/10 kV": { + "sn_mva": 25, + "vn_hv_kv": 110, + "vn_lv_kv": 10, + "vk_percent": 12, + "vkr_percent": 0.41, + "pfe_kw": 14, + "i0_percent": 0.07, + "shift_degree": 150, + "vector_group": "YNd5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -9, + "tap_max": 9, + "tap_step_degree": 0, + "tap_step_percent": 1.5, + "tap_phase_shifter": false + }, + "0.25 MVA 20/0.4 kV": { + "sn_mva": 0.25, + "vn_hv_kv": 20, + "vn_lv_kv": 0.4, + "vk_percent": 6, + "vkr_percent": 1.44, + "pfe_kw": 0.8, + "i0_percent": 0.32, + "shift_degree": 150, + "vector_group": "Yzn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.4 MVA 20/0.4 kV": { + "sn_mva": 0.4, + "vn_hv_kv": 20, + "vn_lv_kv": 0.4, + "vk_percent": 6, + "vkr_percent": 1.425, + "pfe_kw": 1.35, + "i0_percent": 0.3375, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.63 MVA 20/0.4 kV": { + "sn_mva": 0.63, + "vn_hv_kv": 20, + "vn_lv_kv": 0.4, + "vk_percent": 6, + "vkr_percent": 1.206, + "pfe_kw": 1.65, + "i0_percent": 0.2619, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.25 MVA 10/0.4 kV": { + "sn_mva": 0.25, + "vn_hv_kv": 10, + "vn_lv_kv": 0.4, + "vk_percent": 4, + "vkr_percent": 1.2, + "pfe_kw": 0.6, + "i0_percent": 0.24, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.4 MVA 10/0.4 kV": { + "sn_mva": 0.4, + "vn_hv_kv": 10, + "vn_lv_kv": 0.4, + "vk_percent": 4, + "vkr_percent": 1.325, + "pfe_kw": 0.95, + "i0_percent": 0.2375, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + }, + "0.63 MVA 10/0.4 kV": { + "sn_mva": 0.63, + "vn_hv_kv": 10, + "vn_lv_kv": 0.4, + "vk_percent": 4, + "vkr_percent": 1.0794, + "pfe_kw": 1.18, + "i0_percent": 0.1873, + "shift_degree": 150, + "vector_group": "Dyn5", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -2, + "tap_max": 2, + "tap_step_degree": 0, + "tap_step_percent": 2.5, + "tap_phase_shifter": false + } + }, + "trafo3w": { + "63/25/38 MVA 110/20/10 kV": { + "sn_hv_mva": 63, + "sn_mv_mva": 25, + "sn_lv_mva": 38, + "vn_hv_kv": 110, + "vn_mv_kv": 20, + "vn_lv_kv": 10, + "vk_hv_percent": 10.4, + "vk_mv_percent": 10.4, + "vk_lv_percent": 10.4, + "vkr_hv_percent": 0.28, + "vkr_mv_percent": 0.32, + "vkr_lv_percent": 0.35, + "pfe_kw": 35, + "i0_percent": 0.89, + "shift_mv_degree": 0, + "shift_lv_degree": 0, + "vector_group": "YN0yn0yn0", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -10, + "tap_max": 10, + "tap_step_percent": 1.2 + }, + "63/25/38 MVA 110/10/10 kV": { + "sn_hv_mva": 63, + "sn_mv_mva": 25, + "sn_lv_mva": 38, + "vn_hv_kv": 110, + "vn_mv_kv": 10, + "vn_lv_kv": 10, + "vk_hv_percent": 10.4, + "vk_mv_percent": 10.4, + "vk_lv_percent": 10.4, + "vkr_hv_percent": 0.28, + "vkr_mv_percent": 0.32, + "vkr_lv_percent": 0.35, + "pfe_kw": 35, + "i0_percent": 0.89, + "shift_mv_degree": 0, + "shift_lv_degree": 0, + "vector_group": "YN0yn0yn0", + "tap_side": "hv", + "tap_neutral": 0, + "tap_min": -10, + "tap_max": 10, + "tap_step_percent": 1.2 + } + } + }, + "res_bus": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"vm_pu\",\"va_degree\",\"p_mw\",\"q_mvar\"],\"index\":[0,1,2,3,4],\"data\":[[1.02,-0.845445168673926,0.0,-111.791243672370911],[1.02,0.0,-21.729831330858325,116.839935541152954],[1.019214100496144,-0.409103297622625,0.0,0.0],[1.018637116919488,-0.503470352662766,10.0,7.0],[1.017983079721402,-0.653497665026562,10.0,7.0]]}", + "orient": "split", + "dtype": { + "vm_pu": "float64", + "va_degree": "float64", + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_line": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\",\"i_ka\",\"vm_from_pu\",\"va_from_degree\",\"vm_to_pu\",\"va_to_degree\",\"loading_percent\"],\"index\":[0,1,2,3,4,5,6,7],\"data\":[[-7.167647147657727,57.480079867900443,8.03525639977348,-60.113463233922118,0.867609252115754,-2.633383366021676,0.327874112511858,0.343286326507116,0.343286326507116,1.02,-0.845445168673926,1.02,0.0,57.214387751185988],[-0.657313913963437,25.969126903729045,0.866078469150186,-29.007927174007612,0.208764555186749,-3.038800270278568,0.147040043868819,0.164393305610081,0.164393305610081,1.02,-0.845445168673926,1.019214100496144,-0.409103297622625,74.724229822763931],[1.64566972119938,15.370129751576128,-1.540268914180618,-19.229415550834709,0.105400807018762,-3.859285799258581,0.087496748884432,0.109338903896103,0.109338903896103,1.02,-0.845445168673926,1.018637116919488,-0.503470352662766,68.336814935064211],[6.179291340421495,12.971907266349552,-6.119076735247816,-15.70424981919658,0.060214605173678,-2.732342552847028,0.081330018729726,0.095589209712924,0.095589209712924,1.02,-0.845445168673926,1.017983079721402,-0.653497665026562,59.743256070577175],[13.694574931085771,-56.726472302863066,-13.283848894885464,55.407854241119566,0.410726036200307,-1.3186180617435,0.330312825878128,0.322760996590474,0.330312825878128,1.02,0.0,1.019214100496144,-0.409103297622625,55.052137646354595],[6.208885212872048,-13.199963533555254,-6.184761786109662,11.833197159642042,0.024123426762386,-1.366766373913212,0.082632108556076,0.075677384410291,0.082632108556076,1.019214100496144,-0.409103297622625,1.018637116919488,-0.503470352662766,27.544036185358689],[6.208885212872048,-13.199963533555254,-6.184761786109662,11.833197159642042,0.024123426762386,-1.366766373913212,0.082632108556076,0.075677384410291,0.082632108556076,1.019214100496144,-0.409103297622625,1.018637116919488,-0.503470352662766,27.544036185358689],[3.909792486391969,-11.436978768449999,-3.88092326475316,8.704249819196738,0.028869221638809,-2.732728949253261,0.068506463438984,0.054050881891821,0.068506463438984,1.018637116919488,-0.503470352662766,1.017983079721402,-0.653497665026562,42.816539649365005]]}", + "orient": "split", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_from_ka": "float64", + "i_to_ka": "float64", + "i_ka": "float64", + "vm_from_pu": "float64", + "va_from_degree": "float64", + "vm_to_pu": "float64", + "va_to_degree": "float64", + "loading_percent": "float64" + } + }, + "res_trafo": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_hv_mw": "float64", + "q_hv_mvar": "float64", + "p_lv_mw": "float64", + "q_lv_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_hv_ka": "float64", + "i_lv_ka": "float64", + "vm_hv_pu": "float64", + "va_hv_degree": "float64", + "vm_lv_pu": "float64", + "va_lv_degree": "float64", + "loading_percent": "float64" + } + }, + "res_trafo3w": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_mv_mw\",\"q_mv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_mv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_mv_pu\",\"va_mv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"va_internal_degree\",\"vm_internal_pu\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_hv_mw": "float64", + "q_hv_mvar": "float64", + "p_mv_mw": "float64", + "q_mv_mvar": "float64", + "p_lv_mw": "float64", + "q_lv_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_hv_ka": "float64", + "i_mv_ka": "float64", + "i_lv_ka": "float64", + "vm_hv_pu": "float64", + "va_hv_degree": "float64", + "vm_mv_pu": "float64", + "va_mv_degree": "float64", + "vm_lv_pu": "float64", + "va_lv_degree": "float64", + "va_internal_degree": "float64", + "vm_internal_pu": "float64", + "loading_percent": "float64" + } + }, + "res_impedance": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_from_ka": "float64", + "i_to_ka": "float64" + } + }, + "res_ext_grid": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_load": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[0,1,2],\"data\":[[10.0,7.0],[10.0,7.0],[10.0,7.0]]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_motor": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_sgen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_storage": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_shunt": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64", + "vm_pu": "float64" + } + }, + "res_gen": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"va_degree\",\"vm_pu\"],\"index\":[0,1],\"data\":[[10.0,118.791243672370911,-0.845445168673926,1.02],[21.729831330858325,-116.839935541152954,0.0,1.02]]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64", + "va_degree": "float64", + "vm_pu": "float64" + } + }, + "res_ward": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64", + "vm_pu": "float64" + } + }, + "res_xward": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\",\"vm_pu\",\"va_internal_degree\",\"vm_internal_pu\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64", + "vm_pu": "float64", + "va_internal_degree": "float64", + "vm_internal_pu": "float64" + } + }, + "res_dcline": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"vm_from_pu\",\"va_from_degree\",\"vm_to_pu\",\"va_to_degree\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "vm_from_pu": "float64", + "va_from_degree": "float64", + "vm_to_pu": "float64", + "va_to_degree": "float64" + } + }, + "res_bus_est": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"vm_pu\",\"va_degree\",\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "vm_pu": "float64", + "va_degree": "float64", + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_line_est": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\",\"i_ka\",\"vm_from_pu\",\"va_from_degree\",\"vm_to_pu\",\"va_to_degree\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_from_ka": "float64", + "i_to_ka": "float64", + "i_ka": "float64", + "vm_from_pu": "float64", + "va_from_degree": "float64", + "vm_to_pu": "float64", + "va_to_degree": "float64", + "loading_percent": "float64" + } + }, + "res_trafo_est": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_hv_mw": "float64", + "q_hv_mvar": "float64", + "p_lv_mw": "float64", + "q_lv_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_hv_ka": "float64", + "i_lv_ka": "float64", + "vm_hv_pu": "float64", + "va_hv_degree": "float64", + "vm_lv_pu": "float64", + "va_lv_degree": "float64", + "loading_percent": "float64" + } + }, + "res_trafo3w_est": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_hv_mw\",\"q_hv_mvar\",\"p_mv_mw\",\"q_mv_mvar\",\"p_lv_mw\",\"q_lv_mvar\",\"pl_mw\",\"ql_mvar\",\"i_hv_ka\",\"i_mv_ka\",\"i_lv_ka\",\"vm_hv_pu\",\"va_hv_degree\",\"vm_mv_pu\",\"va_mv_degree\",\"vm_lv_pu\",\"va_lv_degree\",\"va_internal_degree\",\"vm_internal_pu\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_hv_mw": "float64", + "q_hv_mvar": "float64", + "p_mv_mw": "float64", + "q_mv_mvar": "float64", + "p_lv_mw": "float64", + "q_lv_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_hv_ka": "float64", + "i_mv_ka": "float64", + "i_lv_ka": "float64", + "vm_hv_pu": "float64", + "va_hv_degree": "float64", + "vm_mv_pu": "float64", + "va_mv_degree": "float64", + "vm_lv_pu": "float64", + "va_lv_degree": "float64", + "va_internal_degree": "float64", + "vm_internal_pu": "float64", + "loading_percent": "float64" + } + }, + "res_impedance_est": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_from_mw\",\"q_from_mvar\",\"p_to_mw\",\"q_to_mvar\",\"pl_mw\",\"ql_mvar\",\"i_from_ka\",\"i_to_ka\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_from_mw": "float64", + "q_from_mvar": "float64", + "p_to_mw": "float64", + "q_to_mvar": "float64", + "pl_mw": "float64", + "ql_mvar": "float64", + "i_from_ka": "float64", + "i_to_ka": "float64" + } + }, + "res_bus_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_line_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_trafo_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_trafo3w_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_ext_grid_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_gen_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_sgen_sc": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_bus_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"vm_a_pu\",\"va_a_degree\",\"vm_b_pu\",\"va_b_degree\",\"vm_c_pu\",\"va_c_degree\",\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "vm_a_pu": "float64", + "va_a_degree": "float64", + "vm_b_pu": "float64", + "va_b_degree": "float64", + "vm_c_pu": "float64", + "va_c_degree": "float64", + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64" + } + }, + "res_line_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_a_from_mw\",\"q_a_from_mvar\",\"p_b_from_mw\",\"q_b_from_mvar\",\"q_c_from_mvar\",\"p_a_to_mw\",\"q_a_to_mvar\",\"p_b_to_mw\",\"q_b_to_mvar\",\"p_c_to_mw\",\"q_c_to_mvar\",\"p_a_l_mw\",\"q_a_l_mvar\",\"p_b_l_mw\",\"q_b_l_mvar\",\"p_c_l_mw\",\"q_c_l_mvar\",\"i_a_from_ka\",\"i_a_to_ka\",\"i_b_from_ka\",\"i_b_to_ka\",\"i_c_from_ka\",\"i_c_to_ka\",\"i_a_ka\",\"i_b_ka\",\"i_c_ka\",\"i_n_from_ka\",\"i_n_to_ka\",\"i_n_ka\",\"loading_a_percent\",\"loading_b_percent\",\"loading_c_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_a_from_mw": "float64", + "q_a_from_mvar": "float64", + "p_b_from_mw": "float64", + "q_b_from_mvar": "float64", + "q_c_from_mvar": "float64", + "p_a_to_mw": "float64", + "q_a_to_mvar": "float64", + "p_b_to_mw": "float64", + "q_b_to_mvar": "float64", + "p_c_to_mw": "float64", + "q_c_to_mvar": "float64", + "p_a_l_mw": "float64", + "q_a_l_mvar": "float64", + "p_b_l_mw": "float64", + "q_b_l_mvar": "float64", + "p_c_l_mw": "float64", + "q_c_l_mvar": "float64", + "i_a_from_ka": "float64", + "i_a_to_ka": "float64", + "i_b_from_ka": "float64", + "i_b_to_ka": "float64", + "i_c_from_ka": "float64", + "i_c_to_ka": "float64", + "i_a_ka": "float64", + "i_b_ka": "float64", + "i_c_ka": "float64", + "i_n_from_ka": "float64", + "i_n_to_ka": "float64", + "i_n_ka": "float64", + "loading_a_percent": "float64", + "loading_b_percent": "float64", + "loading_c_percent": "float64" + } + }, + "res_trafo_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_a_hv_mw\",\"q_a_hv_mvar\",\"p_b_hv_mw\",\"q_b_hv_mvar\",\"p_c_hv_mw\",\"q_c_hv_mvar\",\"p_a_lv_mw\",\"q_a_lv_mvar\",\"p_b_lv_mw\",\"q_b_lv_mvar\",\"p_c_lv_mw\",\"q_c_lv_mvar\",\"p_a_l_mw\",\"q_a_l_mvar\",\"p_b_l_mw\",\"q_b_l_mvar\",\"p_c_l_mw\",\"q_c_l_mvar\",\"i_a_hv_ka\",\"i_a_lv_ka\",\"i_b_hv_ka\",\"i_b_lv_ka\",\"i_c_hv_ka\",\"i_c_lv_ka\",\"loading_a_percent\",\"loading_b_percent\",\"loading_c_percent\",\"loading_percent\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_a_hv_mw": "float64", + "q_a_hv_mvar": "float64", + "p_b_hv_mw": "float64", + "q_b_hv_mvar": "float64", + "p_c_hv_mw": "float64", + "q_c_hv_mvar": "float64", + "p_a_lv_mw": "float64", + "q_a_lv_mvar": "float64", + "p_b_lv_mw": "float64", + "q_b_lv_mvar": "float64", + "p_c_lv_mw": "float64", + "q_c_lv_mvar": "float64", + "p_a_l_mw": "float64", + "q_a_l_mvar": "float64", + "p_b_l_mw": "float64", + "q_b_l_mvar": "float64", + "p_c_l_mw": "float64", + "q_c_l_mvar": "float64", + "i_a_hv_ka": "float64", + "i_a_lv_ka": "float64", + "i_b_hv_ka": "float64", + "i_b_lv_ka": "float64", + "i_c_hv_ka": "float64", + "i_c_lv_ka": "float64", + "loading_a_percent": "float64", + "loading_b_percent": "float64", + "loading_c_percent": "float64", + "loading_percent": "float64" + } + }, + "res_ext_grid_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64" + } + }, + "res_shunt_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[],\"index\":[],\"data\":[]}", + "orient": "split" + }, + "res_load_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_sgen_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_storage_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_mw\",\"q_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_mw": "float64", + "q_mvar": "float64" + } + }, + "res_asymmetric_load_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64" + } + }, + "res_asymmetric_sgen_3ph": { + "_module": "pandas.core.frame", + "_class": "DataFrame", + "_object": "{\"columns\":[\"p_a_mw\",\"q_a_mvar\",\"p_b_mw\",\"q_b_mvar\",\"p_c_mw\",\"q_c_mvar\"],\"index\":[],\"data\":[]}", + "orient": "split", + "dtype": { + "p_a_mw": "float64", + "q_a_mvar": "float64", + "p_b_mw": "float64", + "q_b_mvar": "float64", + "p_c_mw": "float64", + "q_c_mvar": "float64" + } + }, + "user_pf_options": {}, + "OPF_converged": false + } +} \ No newline at end of file diff --git a/grid2op/data_test/5bus_example_th_lim_dict/prods_charac.csv b/grid2op/data_test/5bus_example_th_lim_dict/prods_charac.csv new file mode 100644 index 000000000..d82fbbadf --- /dev/null +++ b/grid2op/data_test/5bus_example_th_lim_dict/prods_charac.csv @@ -0,0 +1,3 @@ +Pmax,Pmin,name,type,bus,max_ramp_up,max_ramp_down,min_up_time,min_down_time,marginal_cost,shut_down_cost,start_cost,x,y,V +10,0.0,gen_0_0,wind,5,0,0,0,0,0,0,0,0,0,102. +30,0.0,gen_1_1,thermal,0,10,10,4,4,70,1,2,0,400,102. \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.5.0/00/grid2op.info b/grid2op/data_test/runner_data/res_agent_1.5.0/00/grid2op.info index a8fa482e3..d41f414a1 100644 --- a/grid2op/data_test/runner_data/res_agent_1.5.0/00/grid2op.info +++ b/grid2op/data_test/runner_data/res_agent_1.5.0/00/grid2op.info @@ -1,3 +1,3 @@ { - "version": "1.4.0" + "version": "1.5.0" } \ No newline at end of file diff --git a/grid2op/data_test/runner_data/res_agent_1.5.0/01/grid2op.info b/grid2op/data_test/runner_data/res_agent_1.5.0/01/grid2op.info index a8fa482e3..d41f414a1 100644 --- a/grid2op/data_test/runner_data/res_agent_1.5.0/01/grid2op.info +++ b/grid2op/data_test/runner_data/res_agent_1.5.0/01/grid2op.info @@ -1,3 +1,3 @@ { - "version": "1.4.0" + "version": "1.5.0" } \ No newline at end of file diff --git a/grid2op/gym_compat/box_gym_obsspace.py b/grid2op/gym_compat/box_gym_obsspace.py index cec0bbab2..9f739bb00 100644 --- a/grid2op/gym_compat/box_gym_obsspace.py +++ b/grid2op/gym_compat/box_gym_obsspace.py @@ -129,12 +129,18 @@ def __init__(self, ob_sp = grid2op_observation_space self.dict_properties = { - "year": (0, 2200, (1,), dt_int), - "month": (0, 12, (1,), dt_int), - "day": (0, 31, (1,), dt_int), - "hour_of_day": (0, 24, (1,), dt_int), - "minute_of_hour": (0, 60, (1,), dt_int), - "day_of_week": (0, 7, (1,), dt_int), + "year": (np.zeros(1, dtype=dt_int), + np.zeros(1, dtype=dt_int) + 2200, (1,), dt_int), + "month": (np.zeros(1, dtype=dt_int), + np.zeros(1, dtype=dt_int) + 12, (1,), dt_int), + "day": (np.zeros(1, dtype=dt_int), + np.zeros(1, dtype=dt_int) + 31, (1,), dt_int), + "hour_of_day": (np.zeros(1, dtype=dt_int), + np.zeros(1, dtype=dt_int) + 24, (1,), dt_int), + "minute_of_hour": (np.zeros(1, dtype=dt_int), + np.zeros(1, dtype=dt_int) + 60, (1,), dt_int), + "day_of_week": (np.zeros(1, dtype=dt_int), + np.zeros(1, dtype=dt_int) + 7, (1,), dt_int), "gen_p": (np.full(shape=(ob_sp.n_gen,), fill_value=0., dtype=dt_float), 1.2 * ob_sp.gen_pmax, (ob_sp.n_gen,), @@ -155,7 +161,7 @@ def __init__(self, np.full(shape=(ob_sp.n_load,), fill_value=+np.inf, dtype=dt_float), (ob_sp.n_load,), dt_float), - "laod_v": (np.full(shape=(ob_sp.n_load,), fill_value=0., dtype=dt_float), + "load_v": (np.full(shape=(ob_sp.n_load,), fill_value=0., dtype=dt_float), np.full(shape=(ob_sp.n_load,), fill_value=np.inf, dtype=dt_float), (ob_sp.n_load,), dt_float), diff --git a/grid2op/gym_compat/gym_act_space.py b/grid2op/gym_compat/gym_act_space.py index 2e105fc94..9cdf67df4 100644 --- a/grid2op/gym_compat/gym_act_space.py +++ b/grid2op/gym_compat/gym_act_space.py @@ -10,7 +10,7 @@ import warnings import numpy as np -from grid2op.Environment import Environment +from grid2op.Environment import Environment, MultiMixEnvironment, BaseMultiProcessEnvironment from grid2op.Action import BaseAction, ActionSpace from grid2op.dtypes import dt_int, dt_bool, dt_float from grid2op.Converter.Converters import Converter @@ -106,7 +106,7 @@ def __init__(self, env, converter=None, dict_variables=None): """ if dict_variables is None: dict_variables = {} - if isinstance(env, Environment): + if isinstance(env, (Environment, MultiMixEnvironment, BaseMultiProcessEnvironment)): # action_space is an environment self.initial_act_space = env.action_space self._init_env = env diff --git a/grid2op/gym_compat/gym_obs_space.py b/grid2op/gym_compat/gym_obs_space.py index 5968edb84..ba03df7b3 100644 --- a/grid2op/gym_compat/gym_obs_space.py +++ b/grid2op/gym_compat/gym_obs_space.py @@ -5,10 +5,11 @@ # you can obtain one at http://mozilla.org/MPL/2.0/. # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + import numpy as np from gym import spaces - +from grid2op.Environment import Environment, MultiMixEnvironment, BaseMultiProcessEnvironment from grid2op.gym_compat.gym_space_converter import _BaseGymSpaceConverter from grid2op.Observation import BaseObservation from grid2op.dtypes import dt_int, dt_bool, dt_float @@ -49,6 +50,9 @@ class GymObservationSpace(_BaseGymSpaceConverter): """ def __init__(self, env, dict_variables=None): + if not isinstance(env, (Environment, MultiMixEnvironment, BaseMultiProcessEnvironment)): + raise RuntimeError("GymActionSpace must be created with an Environment of an ActionSpace (or a Converter)") + self._init_env = env self.initial_obs_space = self._init_env.observation_space dict_ = {} # will represent the gym.Dict space diff --git a/grid2op/gym_compat/multidiscrete_gym_actspace.py b/grid2op/gym_compat/multidiscrete_gym_actspace.py index 52fcbaeab..d7b00eebf 100644 --- a/grid2op/gym_compat/multidiscrete_gym_actspace.py +++ b/grid2op/gym_compat/multidiscrete_gym_actspace.py @@ -15,11 +15,6 @@ from grid2op.dtypes import dt_int, dt_bool, dt_float from grid2op.gym_compat.utils import ALL_ATTR, ATTR_DISCRETE -# TODO test that it works normally -# TODO test the casting in dt_int or dt_float depending on the data -# TODO test the scaling -# TODO doc -# TODO test the function part class MultiDiscreteActSpace(MultiDiscrete): diff --git a/grid2op/gym_compat/utils.py b/grid2op/gym_compat/utils.py index 9f3b31713..d5a1b56d2 100644 --- a/grid2op/gym_compat/utils.py +++ b/grid2op/gym_compat/utils.py @@ -6,9 +6,19 @@ # SPDX-License-Identifier: MPL-2.0 # This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. -ALL_ATTR = ("set_line_status", "change_line_status", "set_bus", "change_bus", "redispatch", - "set_storage", "curtail") +ALL_ATTR = ("set_line_status", + "change_line_status", + "set_bus", + "change_bus", + "redispatch", + "set_storage", + "curtail") -ATTR_DISCRETE = ("set_line_status", "change_line_status", "set_bus", "change_bus", - "sub_set_bus", "sub_change_bus", "one_sub_set", "one_sub_change" - ) +ATTR_DISCRETE = ("set_line_status", + "change_line_status", + "set_bus", + "change_bus", + "sub_set_bus", + "sub_change_bus", + "one_sub_set", + "one_sub_change") diff --git a/grid2op/platform.py b/grid2op/platform.py new file mode 100644 index 000000000..e28d667e9 --- /dev/null +++ b/grid2op/platform.py @@ -0,0 +1,15 @@ +# Copyright (c) 2019-2020, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +# this module check on which platform grid2op is currently running. This is important for multiprocessing that +# is not handled the same way in all platform. + +import sys +_IS_WINDOWS = sys.platform.startswith('win') +_IS_LINUX = sys.platform.startswith("linux") +_IS_MACOS = sys.platform.startswith("darwin") diff --git a/grid2op/tests/BaseBackendTest.py b/grid2op/tests/BaseBackendTest.py index 9b845a101..4b639941f 100644 --- a/grid2op/tests/BaseBackendTest.py +++ b/grid2op/tests/BaseBackendTest.py @@ -86,6 +86,7 @@ def test_properNames(self): warnings.filterwarnings("ignore") with make(os.path.join(path, "5bus_example_diff_name"), backend=backend, + _add_to_name="BaseTestNames" ) as env: obs = env.reset() assert np.all(obs.name_load == ["tutu", "toto", "tata"]) @@ -100,7 +101,7 @@ def test_load_file(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore") backend.load_grid(path_matpower, case_file) - backend.set_env_name("TestLoadingCase_env") + type(backend).set_env_name("BaseTestLoadingCase") backend.assert_grid_correct() assert backend.n_line == 20 @@ -150,7 +151,7 @@ def test_assert_grid_correct(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore") backend.load_grid(path_matpower, case_file) - backend.set_env_name("TestLoadingCase_env2") + type(backend).set_env_name("TestLoadingCase_env2_test_assert_grid_correct") backend.assert_grid_correct() conv = backend.runpf() assert conv, "powerflow diverge it is not supposed to!" @@ -165,7 +166,7 @@ def setUp(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore") self.backend.load_grid(self.path_matpower, self.case_file) - self.backend.set_env_name("TestLoadingBackendFunc_env") + type(self.backend).set_env_name("TestLoadingBackendFunc_env") type(self.backend).set_no_storage() self.backend.assert_grid_correct() self.game_rules = RulesChecker() @@ -178,6 +179,26 @@ def setUp(self): def tearDown(self): pass + def test_theta_ok(self): + self.skip_if_needed() + if self.backend.can_output_theta: + theta_or, theta_ex, load_theta, gen_theta, storage_theta = self.backend.get_theta() + assert theta_or.shape[0] == self.backend.n_line + assert theta_ex.shape[0] == self.backend.n_line + assert load_theta.shape[0] == self.backend.n_load + assert gen_theta.shape[0] == self.backend.n_gen + assert storage_theta.shape[0] == self.backend.n_storage + assert np.all(np.isfinite(theta_or)) + assert np.all(np.isfinite(theta_ex)) + assert np.all(np.isfinite(load_theta)) + assert np.all(np.isfinite(gen_theta)) + assert np.all(np.isfinite(storage_theta)) + else: + with self.assertRaises(NotImplementedError): + # if the "can_output_theta" flag is set to false, then it means the backend + # should not implement the get_theta class + self.backend.get_theta() + def test_runpf_dc(self): self.skip_if_needed() conv = self.backend.runpf(is_dc=True) @@ -575,7 +596,7 @@ def setUp(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore") self.backend.load_grid(self.path_matpower, self.case_file) - self.backend.set_env_name("TestTopoAction_env") + type(self.backend).set_env_name("BaseTestTopoAction") type(self.backend).set_no_storage() self.backend.assert_grid_correct() self.game_rules = RulesChecker() @@ -1064,12 +1085,13 @@ class BaseTestEnvPerformsCorrectCascadingFailures(MakeBackend): """ def setUp(self): self.backend = self.make_backend(detailed_infos_for_cascading_failures=True) + type(self.backend)._clear_class_attribute() self.path_matpower = self.get_path() self.case_file = self.get_casefile() with warnings.catch_warnings(): warnings.filterwarnings("ignore") self.backend.load_grid(self.path_matpower, self.case_file) - self.backend.set_env_name("TestEnvPerformsCorrectCascadingFailures_env") + type(self.backend).set_env_name("TestEnvPerformsCorrectCascadingFailures_env") type(self.backend).set_no_storage() self.backend.assert_grid_correct() self.game_rules = RulesChecker() @@ -1333,6 +1355,7 @@ def test_set_bus(self): self.skip_if_needed() # print("test_set_bus") backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") env = make(test=True, backend=backend) @@ -1347,6 +1370,7 @@ def test_change_bus(self): self.skip_if_needed() # print("test_change_bus") backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") env = make(test=True, backend=backend) @@ -1360,6 +1384,7 @@ def test_change_bustwice(self): self.skip_if_needed() # print("test_change_bustwice") backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") env = make(test=True, backend=backend) @@ -1380,6 +1405,7 @@ def test_isolate_load(self): self.skip_if_needed() # print("test_isolate_load") backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") env = make(test=True, backend=backend) @@ -1391,6 +1417,7 @@ def test_reco_disco_bus(self): self.skip_if_needed() # print("test_reco_disco_bus") backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") env_case1 = make("rte_case5_example", test=True, gamerules_class=AlwaysLegal, backend=backend) @@ -1408,6 +1435,7 @@ def test_reco_disco_bus2(self): self.skip_if_needed() # print("test_reco_disco_bus2") backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") env_case2 = make("rte_case5_example", test=True, gamerules_class=AlwaysLegal, backend=backend) @@ -1425,6 +1453,7 @@ def test_reco_disco_bus3(self): self.skip_if_needed() # print("test_reco_disco_bus3") backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") env_case2 = make("rte_case5_example", test=True, gamerules_class=AlwaysLegal, backend=backend) @@ -1440,6 +1469,7 @@ def test_reco_disco_bus4(self): self.skip_if_needed() # print("test_reco_disco_bus4") backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") env_case2 = make("rte_case5_example", test=True, gamerules_class=AlwaysLegal, backend=backend) @@ -1455,6 +1485,7 @@ def test_reco_disco_bus5(self): self.skip_if_needed() # print("test_reco_disco_bus5") backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") env_case2 = make("rte_case5_example", test=True, gamerules_class=AlwaysLegal, backend=backend) @@ -1470,6 +1501,7 @@ class BaseTestShuntAction(MakeBackend): def test_shunt_ambiguous_id_incorrect(self): self.skip_if_needed() backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") with make("rte_case5_example", test=True, gamerules_class=AlwaysLegal, @@ -1481,6 +1513,7 @@ def test_shunt_effect(self): self.skip_if_needed() backend1 = self.make_backend() backend2 = self.make_backend() + type(backend1)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") env_ref = make("rte_case14_realistic", test=True, gamerules_class=AlwaysLegal, @@ -1507,6 +1540,7 @@ class BaseTestResetEqualsLoadGrid(MakeBackend): def setUp(self): backend1 = self.make_backend() backend2 = self.make_backend() + type(backend1)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") self.env1 = make("rte_case5_example", test=True, backend=backend1) @@ -1625,6 +1659,7 @@ def test_obs_from_same_chronic(self): def test_combined_changes(self): # Unlimited sub changes backend = self.make_backend() + type(backend)._clear_class_attribute() params = grid2op.Parameters.Parameters() params.MAX_SUB_CHANGED = 999 @@ -1697,6 +1732,7 @@ class BaseTestVoltageOWhenDisco(MakeBackend): def test_this(self): self.skip_if_needed() backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") with make("rte_case14_realistic", test=True, backend=backend) as env: @@ -1710,6 +1746,7 @@ class BaseTestChangeBusSlack(MakeBackend): def test_change_slack_case14(self): self.skip_if_needed() backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") env = grid2op.make("rte_case14_realistic", test=True, backend=backend) @@ -1742,6 +1779,7 @@ def test_there_are_storage(self): """test the backend properly loaded the storage units""" self.skip_if_needed() backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") self.env = grid2op.make("educ_case14_storage", test=True, backend=backend) @@ -1751,6 +1789,7 @@ def test_storage_action_mw(self): """test the actions are properly implemented in the backend""" self.skip_if_needed() backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") self.env = grid2op.make("educ_case14_storage", test=True, backend=backend) @@ -1817,17 +1856,22 @@ def test_storage_action_topo(self): param.NB_TIMESTEP_COOLDOWN_SUB = 0 param.NB_TIMESTEP_COOLDOWN_LINE = 0 backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") - self.env = grid2op.make("educ_case14_storage", test=True, backend=backend, - param=param, action_class=CompleteAction) + self.env = grid2op.make("educ_case14_storage", + test=True, + backend=backend, + param=param, + action_class=CompleteAction) # test i can do a reset obs = self.env.reset() # test i can do a step obs, reward, done, info = self.env.step(self.env.action_space()) - assert not done + exc_ = info["exception"] + assert not done, f"i should be able to do a step with some storage units error is {exc_}" storage_p, storage_q, storage_v = self.env.backend.storages_info() assert np.all(np.abs(storage_p - 0.) <= self.tol_one) assert np.all(np.abs(storage_q - 0.) <= self.tol_one) @@ -1840,7 +1884,6 @@ def test_storage_action_topo(self): "generators_id": [(3, 2)]}} ) obs, reward, done, info = self.env.step(act) - assert not info["exception"] storage_p, storage_q, storage_v = self.env.backend.storages_info() assert np.all(np.abs(storage_p - array_modif) <= self.tol_one) @@ -1875,12 +1918,12 @@ def test_storage_action_topo(self): "generators_id": [(3, 1)]}} ) obs, reward, done, info = self.env.step(act) - assert not info["exception"] + assert not info["exception"], \ + "error when storage is disconnected with 0 production, throw an error, but should not" assert not done storage_p, storage_q, storage_v = self.env.backend.storages_info() assert np.all(np.abs(storage_p - [0., array_modif[1]]) <= self.tol_one), \ "storage is not disconnected, yet alone on its busbar" - assert np.all(np.abs(storage_q - 0.) <= self.tol_one) assert obs.storage_bus[0] == -1, "storage should be disconnected" assert storage_v[0] == 0., "storage 0 should be disconnected" assert obs.line_or_bus[8] == 1 @@ -1925,6 +1968,7 @@ def test_issue_125(self): # https://github.com/rte-france/Grid2Op/issues/125 self.skip_if_needed() backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") env = grid2op.make("rte_case14_realistic", test=True, @@ -1947,6 +1991,7 @@ def test_issue_125(self): def test_issue_134(self): self.skip_if_needed() backend = self.make_backend() + type(backend)._clear_class_attribute() param = Parameters() param.NB_TIMESTEP_COOLDOWN_LINE = 0 @@ -2010,6 +2055,7 @@ def test_issue_134(self): def test_issue_134_check_ambiguity(self): self.skip_if_needed() backend = self.make_backend() + type(backend)._clear_class_attribute() param = Parameters() param.MAX_LINE_STATUS_CHANGED = 9999 @@ -2037,6 +2083,7 @@ def test_issue_134_check_ambiguity(self): def test_issue_134_withcooldown_forrules(self): self.skip_if_needed() backend = self.make_backend() + type(backend)._clear_class_attribute() param = Parameters() param.NB_TIMESTEP_COOLDOWN_LINE = 20 @@ -2133,6 +2180,7 @@ def test_issue_134_withcooldown_forrules(self): def test_issue_copyenv(self): # https://github.com/BDonnot/lightsim2grid/issues/10 backend = self.make_backend() + type(backend)._clear_class_attribute() with warnings.catch_warnings(): warnings.filterwarnings("ignore") env1 = grid2op.make("rte_case14_realistic", @@ -2147,6 +2195,7 @@ def test_issue_copyenv(self): class BaseStatusActions(MakeBackend): def _make_my_env(self): backend = self.make_backend() + type(backend)._clear_class_attribute() param = Parameters() param.NB_TIMESTEP_COOLDOWN_LINE = 0 param.NB_TIMESTEP_COOLDOWN_SUB = 0 diff --git a/grid2op/tests/BaseRedispTest.py b/grid2op/tests/BaseRedispTest.py index 2e78d9360..06548e8e9 100644 --- a/grid2op/tests/BaseRedispTest.py +++ b/grid2op/tests/BaseRedispTest.py @@ -368,8 +368,12 @@ def setUp(self) -> None: self.env = make("rte_case14_redisp", test=True, backend=backend) # i don't want to be bother by ramps in these test (note that is NOT recommended to change that) - self.env.gen_max_ramp_down[:] = 5000 - self.env.gen_max_ramp_up[:] = 5000 + type(self.env).gen_max_ramp_down[:] = 5000 + type(self.env).gen_max_ramp_up[:] = 5000 + act_cls = type(self.env.action_space()) + act_cls.gen_max_ramp_down[:] = 5000 + act_cls.gen_max_ramp_up[:] = 5000 + self.msg_ = 'Grid2OpException AmbiguousAction InvalidRedispatching NotEnoughGenerators "Attempt to use a ' \ 'redispatch action that does not sum to 0., but a' self.tol_one = self.env._tol_poly @@ -389,12 +393,12 @@ def test_redisp_toohigh_toolow(self): act = self.env.action_space({"redispatch": (0, -1)}) obs, reward, done, info = self.env.step(act) assert not done - assert info["is_dispatching_illegal"] is False + assert not info["is_dispatching_illegal"] assert np.all(self.env._target_dispatch == [-1., 0., 0., 0., 0.]) act = self.env.action_space({"redispatch": (0, 0)}) obs, reward, done, info = self.env.step(act) assert not done - assert info["is_dispatching_illegal"] is False + assert not info["is_dispatching_illegal"] assert np.all(self.env._target_dispatch == [-1., 0., 0., 0., 0.]) # this one is not correct: too high decrease diff --git a/grid2op/tests/test_Action.py b/grid2op/tests/test_Action.py index fe76e2324..0e8f5ceec 100644 --- a/grid2op/tests/test_Action.py +++ b/grid2op/tests/test_Action.py @@ -652,7 +652,7 @@ def test__eq__(self): action2 = self.helper_action({"change_bus": {"substations_id": [(id_1, arr1)]}, "set_bus": {"substations_id": [(id_2, arr2)]}}) action3 = self.helper_action() - test = action1.assert_grid_correct() + test = type(action1).assert_grid_correct_cls() assert action1 == action2 assert action1 != action3 diff --git a/grid2op/tests/test_Action_iadd.py b/grid2op/tests/test_Action_iadd.py index 7254e524f..39bff9e2a 100644 --- a/grid2op/tests/test_Action_iadd.py +++ b/grid2op/tests/test_Action_iadd.py @@ -20,7 +20,7 @@ def _action_setup(self): def _skipMissingKey(self, key): if key not in self.action_t.authorized_keys: - skip_msg = "Skipped: Missing authorized_key {key}" + skip_msg = f"Skipped: Missing authorized_key {key}" unittest.TestCase.skipTest(self, skip_msg) @classmethod diff --git a/grid2op/tests/test_Environment.py b/grid2op/tests/test_Environment.py index 2ed6ef9a0..8f6467fe0 100644 --- a/grid2op/tests/test_Environment.py +++ b/grid2op/tests/test_Environment.py @@ -141,6 +141,16 @@ def test_copy_env(self): obs2 = env2.reset() assert obs1 == obs2 + def test_assign_action_space(self): + """test that i cannot change the action_space""" + with self.assertRaises(EnvError): + self.env.action_space = self.env._action_space + + def test_assign_obs_space(self): + """test that i cannot change the observation_space""" + with self.assertRaises(EnvError): + self.env.observation_space = self.env._observation_space + def test_step_doesnt_change_action(self): act = self.env.action_space() act_init = copy.deepcopy(act) @@ -729,18 +739,18 @@ def test_change_parameters_forecast(self): self.env1.change_forecast_parameters(param) # in these first checks, parameters are not modified self._check_env_param(self.env1, real_orig_param) - self._check_env_param(self.env1._helper_observation.obs_env, real_orig_param) + self._check_env_param(self.env1._observation_space.obs_env, real_orig_param) obs, *_ = self.env1.step(self.env1.action_space()) _ = obs.simulate(self.env1.action_space()) self._check_env_param(self.env1, real_orig_param) - self._check_env_param(self.env1._helper_observation.obs_env, real_orig_param) + self._check_env_param(self.env1._observation_space.obs_env, real_orig_param) # reset triggers the modification obs = self.env1.reset() _ = obs.simulate(self.env1.action_space()) self._check_env_param(self.env1, real_orig_param) - self._check_env_param(self.env1._helper_observation.obs_env, param) + self._check_env_param(self.env1._observation_space.obs_env, param) def test_change_parameters_forecast_fromissue_128(self): """ diff --git a/grid2op/tests/test_GridGraphObs.py b/grid2op/tests/test_GridGraphObs.py new file mode 100644 index 000000000..addeba7f8 --- /dev/null +++ b/grid2op/tests/test_GridGraphObs.py @@ -0,0 +1,53 @@ +# Copyright (c) 2019-2020, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import unittest +import warnings +import numpy as np +import networkx + +import grid2op + +import pdb + + +class TestNetworkXGraph(unittest.TestCase): + """this class test the networkx representation of an observation.""" + def setUp(self): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + self.env = grid2op.make("l2rpn_neurips_2020_track1", test=True) + self.tol = 1e-5 + + def test_kirchhoff(self): + """ + test kirchhoff law + + in case of parallel lines + """ + obs = self.env.reset() + graph = obs.as_networkx() + assert isinstance(graph, networkx.Graph), "graph should be a networkx object" + ps = np.array([graph.nodes[el]["p"] for el in graph.nodes]) + qs = np.array([graph.nodes[el]["q"] for el in graph.nodes]) + + p_out = np.zeros(ps.shape[0]) + q_out = np.zeros(ps.shape[0]) + for or_, ex_ in graph.edges: + me = graph.edges[(or_, ex_)] + p_out[or_] += me["p_or"] + q_out[or_] += me["q_or"] + p_out[ex_] += me["p_ex"] + q_out[ex_] += me["q_ex"] + + assert np.max(np.abs(ps - p_out)) <= self.tol, "error for active flow" + assert np.max(np.abs(qs - q_out)) <= self.tol, "error for reactive flow" + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_GridObjects.py b/grid2op/tests/test_GridObjects.py index 3a9be9be0..b83d39004 100644 --- a/grid2op/tests/test_GridObjects.py +++ b/grid2op/tests/test_GridObjects.py @@ -15,6 +15,9 @@ import grid2op from grid2op.Backend.EducPandaPowerBackend import EducPandaPowerBackend +from grid2op.Exceptions import EnvError + +import pdb class TestAuxFunctions(unittest.TestCase): @@ -52,40 +55,47 @@ def test_auxilliary_func(self): backend.line_or_to_subid = self.envref.backend.line_or_to_subid backend.line_ex_to_subid = self.envref.backend.line_ex_to_subid + # now "hack" the class to check that the correct element are correctly implemented + bk_cls = type(backend) + # delete the attributes we want to test - backend.sub_info = None - backend.load_to_sub_pos = None - backend.gen_to_sub_pos = None - backend.line_or_to_sub_pos = None - backend.line_ex_to_sub_pos = None - backend.line_ex_to_sub_pos = None - backend.load_pos_topo_vect = None - backend.gen_pos_topo_vect = None - backend.line_or_pos_topo_vect = None - backend.line_ex_pos_topo_vect = None + bk_cls.sub_info = None + bk_cls.load_to_sub_pos = None + bk_cls.gen_to_sub_pos = None + bk_cls.line_or_to_sub_pos = None + bk_cls.line_ex_to_sub_pos = None + bk_cls.line_ex_to_sub_pos = None + bk_cls.load_pos_topo_vect = None + bk_cls.gen_pos_topo_vect = None + bk_cls.line_or_pos_topo_vect = None + bk_cls.line_ex_pos_topo_vect = None + + # test that the grid is not correct now + with self.assertRaises(EnvError): + bk_cls.assert_grid_correct_cls() # fill the _compute_sub_elements - backend._compute_sub_elements() - assert np.sum(backend.sub_info) == 56 - assert np.all(backend.sub_info == [3, 6, 4, 6, 5, 6, 3, 2, 5, 3, 3, 3, 4, 3]) + bk_cls._compute_sub_elements() + assert np.sum(bk_cls.sub_info) == 56 + assert np.all(bk_cls.sub_info == [3, 6, 4, 6, 5, 6, 3, 2, 5, 3, 3, 3, 4, 3]) # fill the *sub_pos - backend._compute_sub_pos() - assert np.all(backend.load_to_sub_pos == 0) - assert np.all(backend.gen_to_sub_pos == [1, 1, 1, 0, 0]) - assert np.all(backend.line_or_to_sub_pos == [1, 2, 2, 3, 4, 2, 1, 2, 3, 4, 1, 2, 1, 1, 1, 2, 3, 1, 0, 3]) - assert np.all(backend.line_ex_to_sub_pos == [5, 2, 3, 4, 3, 5, 4, 1, 2, 2, 2, 1, 2, 3, 2, 1, 4, 5, 1, 2]) + bk_cls._compute_sub_pos() + assert np.all(bk_cls.load_to_sub_pos == 0) + assert np.all(bk_cls.gen_to_sub_pos == [1, 1, 1, 0, 0]) + assert np.all(bk_cls.line_or_to_sub_pos == [1, 2, 2, 3, 4, 2, 1, 2, 3, 4, 1, 2, 1, 1, 1, 2, 3, 1, 0, 3]) + assert np.all(bk_cls.line_ex_to_sub_pos == [5, 2, 3, 4, 3, 5, 4, 1, 2, 2, 2, 1, 2, 3, 2, 1, 4, 5, 1, 2]) # fill the *pos_topo_vect - backend._compute_pos_big_topo() - assert np.all(backend.load_pos_topo_vect == [3, 9, 13, 19, 24, 35, 40, 43, 46, 49, 53]) - assert np.all(backend.gen_pos_topo_vect == [4, 10, 25, 33, 0]) - assert np.all(backend.line_or_pos_topo_vect == [1, 2, 5, 6, 7, 11, 14, 26, 27, 28, 36, 37, 41, 47, 50, 15, - 16, 20, 30, 38]) - assert np.all(backend.line_ex_pos_topo_vect == [8, 21, 12, 17, 22, 18, 23, 44, 48, 51, 42, 54, 45, 52, 55, 31, - 39, 29, 34, 32]) + backend._compute_pos_big_topo() # i test the object class here + assert np.all(bk_cls.load_pos_topo_vect == [3, 9, 13, 19, 24, 35, 40, 43, 46, 49, 53]) + assert np.all(bk_cls.gen_pos_topo_vect == [4, 10, 25, 33, 0]) + assert np.all(bk_cls.line_or_pos_topo_vect == [1, 2, 5, 6, 7, 11, 14, 26, 27, 28, 36, 37, 41, 47, 50, 15, + 16, 20, 30, 38]) + assert np.all(bk_cls.line_ex_pos_topo_vect == [8, 21, 12, 17, 22, 18, 23, 44, 48, 51, 42, 54, 45, 52, 55, 31, + 39, 29, 34, 32]) # this should pass - backend.assert_grid_correct() + bk_cls.assert_grid_correct_cls() if __name__ == "__main__": diff --git a/grid2op/tests/test_MakeEnv.py b/grid2op/tests/test_MakeEnv.py index 8db7fc960..21642f7e3 100644 --- a/grid2op/tests/test_MakeEnv.py +++ b/grid2op/tests/test_MakeEnv.py @@ -66,6 +66,16 @@ def test_blank(self): chronics_class=ChangeNothing, action_class=TopologyAndDispatchAction) + # test that it raises a warning because there is not layout + with self.assertRaises(UserWarning): + with warnings.catch_warnings(): + warnings.filterwarnings("error") + env = make("blank", + test=True, + grid_path=EXAMPLE_CASEFILE, + chronics_class=ChangeNothing, + action_class=TopologyAndDispatchAction) + def test_case14_fromfile(self): self.skipTest("deprecated test") with warnings.catch_warnings(): @@ -73,6 +83,13 @@ def test_case14_fromfile(self): env = make("rte_case14_fromfile", test=True) obs = env.reset() + def test_l2rpn_case14_sandbox_fromfile(self): + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env = make("l2rpn_case14_sandbox", test=True) + obs = env.reset() + assert env.grid_layout, "env.grid_layout is empty but should not be" + def test_case5_example(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore") @@ -122,6 +139,25 @@ def test_case14redisp_test_thermals(self): obs = env.reset() assert np.all(env._thermal_limit_a == case14_redisp_TH_LIM) + env.set_thermal_limit({k: 200000. for k in env.name_line}) + assert np.all(env.get_thermal_limit() == 200000.) + + def test_init_thlim_from_dict(self): + """ + This tests that: + - can create an environment with thermal limit given as dictionary + - i can create an environment without chronics folder + - can create an environment without layout + + So lots of tests here ! + + """ + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make(os.path.join(PATH_DATA_TEST, "5bus_example_th_lim_dict"), test=True) as env: + obs = env.reset() + assert np.all(env._thermal_limit_a == [200., 300., 500., 600., 700., 800., 900., 1000.]) + def test_case14_realistic(self): with warnings.catch_warnings(): warnings.filterwarnings("ignore") @@ -359,131 +395,164 @@ def test_opponent_init_budget(self): class TestMakeFromPathConfig(unittest.TestCase): def test_case5_config(self): dataset_path = os.path.join(PATH_CHRONICS_Make2, "rte_case5_example") - with make_from_dataset_path(dataset_path) as env: - # Check config is loaded from config.py - assert env._rewardClass == L2RPNReward - assert issubclass(env._actionClass, TopologyAction) - assert issubclass(env._observationClass, CompleteObservation) - assert isinstance(env.backend, PandaPowerBackend) - assert env._legalActClass == DefaultRules - assert isinstance(env._voltage_controler, ControlVoltageFromFile) - assert isinstance(env.chronics_handler.real_data, Multifolder) - assert env.action_space.grid_layout != None + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path) as env: + # Check config is loaded from config.py + assert env._rewardClass == L2RPNReward + assert issubclass(env._actionClass, TopologyAction) + assert issubclass(env._observationClass, CompleteObservation) + assert isinstance(env.backend, PandaPowerBackend) + assert env._legalActClass == DefaultRules + assert isinstance(env._voltage_controler, ControlVoltageFromFile) + assert isinstance(env.chronics_handler.real_data, Multifolder) + assert env.action_space.grid_layout != None def test_case5_runs(self): dataset_path = os.path.join(PATH_CHRONICS_Make2, "rte_case5_example") - with make_from_dataset_path(dataset_path) as env: - assert env.redispatching_unit_commitment_availble == True - obs = env.reset() - sim_obs, reward, done, info = obs.simulate(env.action_space()) - assert sim_obs != obs + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path) as env: + assert env.redispatching_unit_commitment_availble == True + obs = env.reset() + sim_obs, reward, done, info = obs.simulate(env.action_space()) + assert sim_obs != obs def test_case14_test_config(self): dataset_path = os.path.join(PATH_CHRONICS_Make2, "rte_case14_test") - with make_from_dataset_path(dataset_path) as env: - # Check config is loaded from config.py - assert env._rewardClass == RedispReward - assert issubclass(env._actionClass, TopologyAndDispatchAction) - assert issubclass(env._observationClass, CompleteObservation) - assert isinstance(env.backend, PandaPowerBackend) - assert env._legalActClass == DefaultRules - assert isinstance(env._voltage_controler, ControlVoltageFromFile) - assert isinstance(env.chronics_handler.real_data, Multifolder) - assert env.action_space.grid_layout != None + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path) as env: + # Check config is loaded from config.py + assert env._rewardClass == RedispReward + assert issubclass(env._actionClass, TopologyAndDispatchAction) + assert issubclass(env._observationClass, CompleteObservation) + assert isinstance(env.backend, PandaPowerBackend) + assert env._legalActClass == DefaultRules + assert isinstance(env._voltage_controler, ControlVoltageFromFile) + assert isinstance(env.chronics_handler.real_data, Multifolder) + assert env.action_space.grid_layout != None def test_case14_test_runs(self): dataset_path = os.path.join(PATH_CHRONICS_Make2, "rte_case14_test") - with make_from_dataset_path(dataset_path) as env: - assert env.redispatching_unit_commitment_availble == True - obs = env.reset() - sim_obs, reward, done, info = obs.simulate(env.action_space()) - assert sim_obs != obs - assert np.all(env._thermal_limit_a == case14_test_TH_LIM) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path) as env: + assert env.redispatching_unit_commitment_availble == True + obs = env.reset() + sim_obs, reward, done, info = obs.simulate(env.action_space()) + assert sim_obs != obs + assert np.all(env._thermal_limit_a == case14_test_TH_LIM) def test_case14_redisp_config(self): dataset_path = os.path.join(PATH_CHRONICS_Make2, "rte_case14_redisp") - with make_from_dataset_path(dataset_path) as env: - # Check config is loaded from config.py - assert env._rewardClass == RedispReward - assert issubclass(env._actionClass, TopologyAndDispatchAction) - assert issubclass(env._observationClass, CompleteObservation) - assert isinstance(env.backend, PandaPowerBackend) - assert env._legalActClass == DefaultRules - assert isinstance(env._voltage_controler, ControlVoltageFromFile) - assert isinstance(env.chronics_handler.real_data, Multifolder) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path) as env: + # Check config is loaded from config.py + assert env._rewardClass == RedispReward + assert issubclass(env._actionClass, TopologyAndDispatchAction) + assert issubclass(env._observationClass, CompleteObservation) + assert isinstance(env.backend, PandaPowerBackend) + assert env._legalActClass == DefaultRules + assert isinstance(env._voltage_controler, ControlVoltageFromFile) + assert isinstance(env.chronics_handler.real_data, Multifolder) def test_case14_redisp_runs(self): dataset_path = os.path.join(PATH_CHRONICS_Make2, "rte_case14_redisp") - with make_from_dataset_path(dataset_path) as env: - assert env.redispatching_unit_commitment_availble == True - obs = env.reset() - sim_obs, reward, done, info = obs.simulate(env.action_space()) - assert sim_obs != obs - assert np.all(env._thermal_limit_a == case14_redisp_TH_LIM) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path) as env: + assert env.redispatching_unit_commitment_availble == True + obs = env.reset() + sim_obs, reward, done, info = obs.simulate(env.action_space()) + assert sim_obs != obs + assert np.all(env._thermal_limit_a == case14_redisp_TH_LIM) def test_l2rpn19_test_config(self): self.skipTest("l2rpn has been removed") dataset_path = os.path.join(PATH_CHRONICS_Make2, "l2rpn_2019") - with make_from_dataset_path(dataset_path) as env: - # Check config is loaded from config.py - assert env._rewardClass == L2RPNReward - assert env._actionClass == TopologyAction - assert env._observationClass == CompleteObservation - assert isinstance(env.backend, PandaPowerBackend) - assert env._legalActClass == DefaultRules - assert isinstance(env._voltage_controler, ControlVoltageFromFile) - assert isinstance(env.chronics_handler.real_data, Multifolder) - assert env.action_space.grid_layout != None + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path) as env: + # Check config is loaded from config.py + assert env._rewardClass == L2RPNReward + assert env._actionClass == TopologyAction + assert env._observationClass == CompleteObservation + assert isinstance(env.backend, PandaPowerBackend) + assert env._legalActClass == DefaultRules + assert isinstance(env._voltage_controler, ControlVoltageFromFile) + assert isinstance(env.chronics_handler.real_data, Multifolder) + assert env.action_space.grid_layout != None class TestMakeFromPathConfigOverride(unittest.TestCase): def test_case5_override_reward(self): dataset_path = os.path.join(PATH_CHRONICS_Make2, "rte_case5_example") - with make_from_dataset_path(dataset_path, reward_class=FlatReward) as env: - assert env._rewardClass == FlatReward + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path, reward_class=FlatReward) as env: + assert env._rewardClass == FlatReward def test_case14_test_override_reward(self): dataset_path = os.path.join(PATH_CHRONICS_Make2, "rte_case14_test") - with make_from_dataset_path(dataset_path, reward_class=FlatReward) as env: - assert env._rewardClass == FlatReward + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path, reward_class=FlatReward) as env: + assert env._rewardClass == FlatReward def test_l2rpn19_override_reward(self): self.skipTest("l2rpn has been removed") dataset_path = os.path.join(PATH_CHRONICS_Make2, "l2rpn_2019") - with make_from_dataset_path(dataset_path, reward_class=FlatReward) as env: - assert env._rewardClass == FlatReward + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path, reward_class=FlatReward) as env: + assert env._rewardClass == FlatReward def test_case5_override_action(self): dataset_path = os.path.join(PATH_CHRONICS_Make2, "rte_case5_example") - with make_from_dataset_path(dataset_path, action_class=VoltageOnlyAction) as env: - assert issubclass(env._actionClass, VoltageOnlyAction) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path, action_class=VoltageOnlyAction) as env: + assert issubclass(env._actionClass, VoltageOnlyAction) def test_case14_test_override_action(self): dataset_path = os.path.join(PATH_CHRONICS_Make2, "rte_case14_test") - with make_from_dataset_path(dataset_path, action_class=VoltageOnlyAction) as env: - assert issubclass(env._actionClass, VoltageOnlyAction) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path, action_class=VoltageOnlyAction) as env: + assert issubclass(env._actionClass, VoltageOnlyAction) def test_l2rpn19_override_action(self): self.skipTest("l2rpn has been removed") dataset_path = os.path.join(PATH_CHRONICS_Make2, "l2rpn_2019") - with make_from_dataset_path(dataset_path, action_class=VoltageOnlyAction) as env: - assert issubclass(env._actionClass, VoltageOnlyAction) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path, action_class=VoltageOnlyAction) as env: + assert issubclass(env._actionClass, VoltageOnlyAction) def test_case5_override_chronics(self): dataset_path = os.path.join(PATH_CHRONICS_Make2, "rte_case5_example") - with make_from_dataset_path(dataset_path, chronics_class=ChangeNothing) as env: - assert isinstance(env.chronics_handler.real_data, ChangeNothing) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path, chronics_class=ChangeNothing) as env: + assert isinstance(env.chronics_handler.real_data, ChangeNothing) def test_case14_test_override_chronics(self): dataset_path = os.path.join(PATH_CHRONICS_Make2, "rte_case14_test") - with make_from_dataset_path(dataset_path, chronics_class=ChangeNothing) as env: - assert isinstance(env.chronics_handler.real_data, ChangeNothing) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path, chronics_class=ChangeNothing) as env: + assert isinstance(env.chronics_handler.real_data, ChangeNothing) def test_l2rpn19_override_chronics(self): self.skipTest("l2rpn has been removed") dataset_path = os.path.join(PATH_CHRONICS_Make2, "l2rpn_2019") - with make_from_dataset_path(dataset_path, chronics_class=ChangeNothing) as env: - assert isinstance(env.chronics_handler.real_data, ChangeNothing) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path, chronics_class=ChangeNothing) as env: + assert isinstance(env.chronics_handler.real_data, ChangeNothing) def test_case5_override_feed_kwargs(self): dataset_path = os.path.join(PATH_CHRONICS_Make2, "rte_case5_example") @@ -493,8 +562,10 @@ def test_case5_override_feed_kwargs(self): "path": chronics_path, "gridvalueClass": GridStateFromFile } - with make_from_dataset_path(dataset_path, data_feeding_kwargs=dfk) as env: - assert isinstance(env.chronics_handler.real_data, ChangeNothing) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path, data_feeding_kwargs=dfk) as env: + assert isinstance(env.chronics_handler.real_data, ChangeNothing) def test_case14_test_override_feed_kwargs(self): dataset_path = os.path.join(PATH_CHRONICS_Make2, "rte_case14_test") @@ -504,8 +575,10 @@ def test_case14_test_override_feed_kwargs(self): "path": chronics_path, "gridvalueClass": GridStateFromFile } - with make_from_dataset_path(dataset_path, data_feeding_kwargs=dfk) as env: - assert isinstance(env.chronics_handler.real_data, ChangeNothing) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path, data_feeding_kwargs=dfk) as env: + assert isinstance(env.chronics_handler.real_data, ChangeNothing) def test_l2rpn19_override_feed_kwargs(self): self.skipTest("l2rpn has been removed") @@ -516,8 +589,10 @@ def test_l2rpn19_override_feed_kwargs(self): "path": chronics_path, "gridvalueClass": GridStateFromFile } - with make_from_dataset_path(dataset_path, data_feeding_kwargs=dfk) as env: - assert isinstance(env.chronics_handler.real_data, ChangeNothing) + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with make_from_dataset_path(dataset_path, data_feeding_kwargs=dfk) as env: + assert isinstance(env.chronics_handler.real_data, ChangeNothing) class TestMakeFromPathParameters(unittest.TestCase): diff --git a/grid2op/tests/test_MultiProcess.py b/grid2op/tests/test_MultiProcess.py index e5d88db86..d86cd8afc 100644 --- a/grid2op/tests/test_MultiProcess.py +++ b/grid2op/tests/test_MultiProcess.py @@ -9,6 +9,8 @@ import pdb import warnings from grid2op.tests.helper_path_test import * + +import grid2op from grid2op.Environment import BaseMultiProcessEnvironment from grid2op.Environment import SingleEnvMultiProcess from grid2op.Environment import MultiEnvMultiProcess @@ -16,9 +18,6 @@ from grid2op.Observation import CompleteObservation import pdb -import warnings -warnings.simplefilter("error") - class TestBaseMultiProcessEnvironment(unittest.TestCase): def test_creation_multienv(self): @@ -28,6 +27,7 @@ def test_creation_multienv(self): with make("rte_case5_example", test=True) as env: envs = [env for _ in range(nb_env)] multi_envs = BaseMultiProcessEnvironment(envs) + obss, rewards, dones, infos = multi_envs.step([env.action_space() for _ in range(multi_envs.nb_env)]) for ob in obss: assert isinstance(ob, CompleteObservation) @@ -63,6 +63,24 @@ def test_seeding(self): assert np.all(seeds_1 == seeds_3) assert np.any(seeds_1 != seeds_2) + def test_simulate(self): + env_name = "l2rpn_case14_sandbox" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + env1 = grid2op.make(env_name) + env2 = grid2op.make(env_name) + + multi_env = BaseMultiProcessEnvironment([env1, env2]) + obss = multi_env.reset() + + # simulate + actions = [env1.action_space(), env2.action_space()] + sim_obss, sim_rs, sim_ds, sim_is = multi_env.simulate(actions) + multi_env.close() + env1.close() + env2.close() + + class TestSingleEnvMultiProcess(unittest.TestCase): def test_creation_multienv(self): nb_env = 2 diff --git a/grid2op/tests/test_Observation.py b/grid2op/tests/test_Observation.py index 284e49f80..66c52c5b5 100644 --- a/grid2op/tests/test_Observation.py +++ b/grid2op/tests/test_Observation.py @@ -191,7 +191,31 @@ def setUp(self): 'storage_power': [], "gen_p_before_curtail": [0.0, 0.0, 0.0, 7.599999904632568, 0.0], "curtailment": [0.0, 0.0, 0.0, 0.0, 0.0], - "curtailment_limit": [1.0, 1.0, 1.0, 1.0, 1.0] + "curtailment_limit": [1.0, 1.0, 1.0, 1.0, 1.0], + "theta_ex": [-1.3276801109313965, -4.100967884063721, -10.311812400817871, -11.245238304138184, + -10.119081497192383, -10.50421142578125, -11.245238304138184, -4.473601341247559, + -4.824699401855469, -4.100967884063721, -4.824699401855469, -4.100967884063721, + -10.119081497192383, -10.39695930480957, -10.50421142578125, -7.88769006729126, + -10.060456275939941, -9.613715171813965, -7.1114115715026855, -10.060456275939941], + "theta_or": [0.0, 0.0, -10.060456275939941, -10.060456275939941, -10.311812400817871, + -10.39695930480957, -10.50421142578125, -1.3276801109313965, + -1.3276801109313965, -1.3276801109313965, -4.473601341247559, + -4.824699401855469, -9.613715171813965, -9.613715171813965, + -9.613715171813965, -4.824699401855469, -4.824699401855469, + -4.100967884063721, -7.88769006729126, -7.88769006729126], + "gen_theta": [-1.3276801109313965, -4.473601341247559, -9.613715171813965, + -7.1114115715026855, 0.0], + "load_theta": [-1.3276801109313965, -4.473601341247559, -11.245238304138184, + -4.824699401855469, -4.100967884063721, -9.613715171813965, + -10.060456275939941, -10.311812400817871, -10.119081497192383, + -10.39695930480957, -10.50421142578125], + "storage_theta": [], + "_thermal_limit": [352.8251647949219, 352.8251647949219, 183197.6875, 183197.6875, 183197.6875, + 12213.1787109375, 183197.6875, 352.8251647949219, 352.8251647949219, + 352.8251647949219, 352.8251647949219, 352.8251647949219, 183197.6875, + 183197.6875, 183197.6875, 352.8251647949219, 352.8251647949219, + 352.8251647949219, 2721.794189453125, 2721.794189453125], + "support_theta": [True] } self.dtypes = np.array([dt_int, dt_int, dt_int, dt_int, dt_int, dt_int, dt_float, dt_float, @@ -1042,6 +1066,9 @@ def test_change_bus(self): # Simulate & Step self.sim_obs, _, _, _ = self.obs.simulate(change_act) self.step_obs, _, _, _ = self.env.step(change_act) + assert isinstance(self.sim_obs, type(self.step_obs)), "sim_obs is not the same type as the step" + assert isinstance(self.step_obs, type(self.sim_obs)), "step is not the same type as the simulation" + # Test observations are the same if self.sim_obs != self.step_obs: diff_, attr_diff = self.sim_obs.where_different(self.step_obs) diff --git a/grid2op/tests/test_Runner.py b/grid2op/tests/test_Runner.py index c4b9b3a8d..a85aea3de 100644 --- a/grid2op/tests/test_Runner.py +++ b/grid2op/tests/test_Runner.py @@ -59,15 +59,17 @@ def setUp(self): } self.gridStateclass = Multifolder self.backendClass = PandaPowerBackend - self.runner = Runner(init_grid_path=self.init_grid_path, - path_chron=self.path_chron, - parameters_path=self.parameters_path, - names_chronics_to_backend=self.names_chronics_to_backend, - gridStateclass=self.gridStateclass, - backendClass=self.backendClass, - rewardClass=L2RPNReward, - max_iter=self.max_iter, - name_env="test_runner_env") + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") # silence the warning about missing layout + self.runner = Runner(init_grid_path=self.init_grid_path, + path_chron=self.path_chron, + parameters_path=self.parameters_path, + names_chronics_to_backend=self.names_chronics_to_backend, + gridStateclass=self.gridStateclass, + backendClass=self.backendClass, + rewardClass=L2RPNReward, + max_iter=self.max_iter, + name_env="test_runner_env") # def test_one_episode(self): # tested in the runner fast # def test_one_episode_detailed(self): # tested in the runner fast @@ -262,8 +264,8 @@ def _aux_backward(self, base_path, g2op_version_txt, g2op_version): assert len(this_episode.env_actions) == nb_ts, f"wrong number of elements for env_actions for " \ f"version {g2op_version_txt}: " \ f"{len(this_episode.env_actions)} vs {nb_ts}" - except: - raise + except Exception as exc_: + raise exc_ if g2op_version <= "1.4.0": assert EpisodeData.get_grid2op_version(full_episode_path) == "<=1.4.0", \ @@ -277,7 +279,7 @@ def _aux_backward(self, base_path, g2op_version_txt, g2op_version): def test_backward_compatibility(self): backward_comp_version = ["1.0.0", "1.1.0", "1.1.1", "1.2.0", "1.2.1", "1.2.2", "1.2.3", "1.3.0", "1.3.1", - "1.4.0"] + "1.4.0", "1.5.0"] curr_version = "test_version" assert 'curtailment' in CompleteObservation.attr_list_vect, "error at the beginning" with warnings.catch_warnings(): diff --git a/grid2op/tests/test_RunnerFast.py b/grid2op/tests/test_RunnerFast.py index 2c9645418..bf06c19ef 100644 --- a/grid2op/tests/test_RunnerFast.py +++ b/grid2op/tests/test_RunnerFast.py @@ -40,8 +40,8 @@ def setUp(self): ] with warnings.catch_warnings(): warnings.filterwarnings("ignore") - env = grid2op.make("l2rpn_case14_sandbox", test=True) - self.runner = Runner(**env.get_params_for_runner()) + self.env = grid2op.make("l2rpn_case14_sandbox", test=True) + self.runner = Runner(**self.env.get_params_for_runner()) def test_one_episode(self): _, cum_reward, timestep, episode_data = self.runner.run_one_episode(max_iter=self.max_iter) diff --git a/grid2op/tests/test_issue_185.py b/grid2op/tests/test_issue_185.py new file mode 100644 index 000000000..47317cbd5 --- /dev/null +++ b/grid2op/tests/test_issue_185.py @@ -0,0 +1,109 @@ +# Copyright (c) 2019-2020, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import unittest +import warnings + +import grid2op +from grid2op.gym_compat import GymEnv, BoxGymActSpace, BoxGymObsSpace, MultiDiscreteActSpace, DiscreteActSpace + + +class Issue185Tester(unittest.TestCase): + """ + this test ensure that every "test" environment can be converted to gym + + this test suit goes beyond the simple error raised in the github issue. + """ + def test_issue_185(self): + for env_name in grid2op.list_available_test_env(): + if env_name == "blank": + continue + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with grid2op.make(env_name, test=True) as env: + gym_env = GymEnv(env) + obs_gym = gym_env.reset() + assert obs_gym["a_ex"].shape[0] == env.n_line, f"error for {env_name}" + + def test_issue_185_act_box_space(self): + for env_name in grid2op.list_available_test_env(): + if env_name == "blank": + continue + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with grid2op.make(env_name, test=True) as env: + gym_env = GymEnv(env) + gym_env.action_space = BoxGymActSpace(gym_env.init_env.action_space) + gym_env.action_space.seed(0) + obs_gym = gym_env.reset() + act = gym_env.action_space.sample() + obs, reward, done, info = gym_env.step(act) + + def test_issue_185_obs_box_space(self): + for env_name in grid2op.list_available_test_env(): + if env_name == "blank": + continue + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with grid2op.make(env_name, test=True) as env: + gym_env = GymEnv(env) + gym_env.observation_space = BoxGymObsSpace(gym_env.init_env.observation_space) + gym_env.action_space.seed(0) + obs_gym = gym_env.reset() + act = gym_env.action_space.sample() + obs, reward, done, info = gym_env.step(act) + + def test_issue_185_act_multidiscrete_space(self): + for env_name in grid2op.list_available_test_env(): + if env_name == "blank": + continue + elif env_name == "l2rpn_neurips_2020_track1": + # takes too much time + continue + elif env_name == "l2rpn_neurips_2020_track2": + # takes too much time + continue + elif env_name == "rte_case118_example": + # takes too much time + continue + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with grid2op.make(env_name, test=True) as env: + gym_env = GymEnv(env) + gym_env.action_space = MultiDiscreteActSpace(gym_env.init_env.action_space) + gym_env.action_space.seed(0) + obs_gym = gym_env.reset() + act = gym_env.action_space.sample() + obs, reward, done, info = gym_env.step(act) + + def test_issue_185_act_discrete_space(self): + for env_name in grid2op.list_available_test_env(): + if env_name == "blank": + continue + elif env_name == "l2rpn_neurips_2020_track1": + # takes too much time + continue + elif env_name == "l2rpn_neurips_2020_track2": + # takes too much time + continue + elif env_name == "rte_case118_example": + # takes too much time + continue + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with grid2op.make(env_name, test=True) as env: + gym_env = GymEnv(env) + gym_env.action_space = DiscreteActSpace(gym_env.init_env.action_space) + gym_env.action_space.seed(0) + obs_gym = gym_env.reset() + act = gym_env.action_space.sample() + obs, reward, done, info = gym_env.step(act) + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/tests/test_issue_187.py b/grid2op/tests/test_issue_187.py new file mode 100644 index 000000000..aab75dc28 --- /dev/null +++ b/grid2op/tests/test_issue_187.py @@ -0,0 +1,109 @@ +# Copyright (c) 2019-2020, RTE (https://www.rte-france.com) +# See AUTHORS.txt +# This Source Code Form is subject to the terms of the Mozilla Public License, version 2.0. +# If a copy of the Mozilla Public License, version 2.0 was not distributed with this file, +# you can obtain one at http://mozilla.org/MPL/2.0/. +# SPDX-License-Identifier: MPL-2.0 +# This file is part of Grid2Op, Grid2Op a testbed platform to model sequential decision making in power systems. + +import unittest +import warnings +import numpy as np + +import grid2op +from grid2op.dtypes import dt_float +from grid2op.Reward import RedispReward +from grid2op.Runner import Runner + + +class Issue187Tester(unittest.TestCase): + """ + this test ensure that every "test" environment can be converted to gym + + this test suit goes beyond the simple error raised in the github issue. + """ + def setUp(self) -> None: + self.tol = 1e-6 + + def test_issue_187(self): + """test the range of the reward class""" + for env_name in grid2op.list_available_test_env(): + if env_name == "blank": + continue + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with grid2op.make(env_name, test=True, reward_class=RedispReward) as env: + obs = env.reset() + obs, reward, done, info = env.step(env.action_space()) + assert reward <= env.reward_range[1], f"error for reward_max for {env_name}" + assert reward >= env.reward_range[0], f"error for reward_min for {env_name}" + + def test_custom_reward(self): + """test i can generate the reward and use it in the envs""" + reward_cls = RedispReward.generate_class_custom_params(alpha_redisph=2, + min_load_ratio=0.15, + worst_losses_ratio=0.05, + min_reward=-10., + reward_illegal_ambiguous=0., + least_losses_ratio=0.015) + + for env_name in grid2op.list_available_test_env(): + if env_name == "blank": + continue + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with grid2op.make(env_name, test=True, reward_class=reward_cls) as env: + obs = env.reset() + obs, reward, done, info = env.step(env.action_space()) + # test that reward is in the correct range + assert reward <= env.reward_range[1], f"error reward > reward_max for {env_name}" + assert reward >= env.reward_range[0], f"error reward < reward_min for {env_name}" + + # test the parameters are effectively changed + + # what should be computed + _alpha_redisph = dt_float(2) + _min_load_ratio = dt_float(0.15) + _worst_losses_ratio = dt_float(0.05) + _min_reward = dt_float(-10.) + _reward_illegal_ambiguous = dt_float(0.) + _least_losses_ratio = dt_float(0.015) + + worst_marginal_cost = np.max(env.gen_cost_per_MW) + worst_load = dt_float(np.sum(env.gen_pmax)) + # it's not the worst, but definitely an upper bound + worst_losses = dt_float(_worst_losses_ratio) * worst_load + worst_redisp = _alpha_redisph * np.sum(env.gen_pmax) # not realistic, but an upper bound + max_regret = (worst_losses + worst_redisp) * worst_marginal_cost + reward_min = dt_float(_min_reward) + + least_loads = dt_float(worst_load * _min_load_ratio) # half the capacity of the grid + least_losses = dt_float(_least_losses_ratio * least_loads) # 1.5% of losses + least_redisp = dt_float(0.0) # lower_bound is 0 + base_marginal_cost = np.min(env.gen_cost_per_MW[env.gen_cost_per_MW > 0.]) + min_regret = (least_losses + least_redisp) * base_marginal_cost + reward_max = dt_float((max_regret - min_regret) / least_loads) + assert abs(env.reward_range[1] - reward_max) <= self.tol, \ + f"wrong reward max computed for {env_name}" + assert abs(env.reward_range[0] - reward_min) <= self.tol, \ + f"wrong reward min computed for {env_name}" + + def test_custom_reward_runner(self): + """test i can generate the reward and use it in the envs""" + reward_cls = RedispReward.generate_class_custom_params(alpha_redisph=2, + min_load_ratio=0.15, + worst_losses_ratio=0.05, + min_reward=-10., + reward_illegal_ambiguous=0., + least_losses_ratio=0.015) + env_name = "l2rpn_case14_sandbox" + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + with grid2op.make(env_name, test=True, reward_class=reward_cls) as env: + obs = env.reset() + runner = Runner(**env.get_params_for_runner()) + res = runner.run(nb_episode=2, nb_process=2) + + +if __name__ == "__main__": + unittest.main() diff --git a/grid2op/utils/__init__.py b/grid2op/utils/__init__.py index 1dc179bb6..5f4fb5988 100644 --- a/grid2op/utils/__init__.py +++ b/grid2op/utils/__init__.py @@ -1,4 +1,4 @@ __all__ = ["EpisodeStatistics", "ScoreL2RPN2020"] from grid2op.utils.underlying_statistics import EpisodeStatistics -from grid2op.utils.l2rpn_2020_scores import ScoreL2RPN2020 \ No newline at end of file +from grid2op.utils.l2rpn_2020_scores import ScoreL2RPN2020 diff --git a/utils/make_release.py b/utils/make_release.py index 443e11b1f..8857fd120 100644 --- a/utils/make_release.py +++ b/utils/make_release.py @@ -66,21 +66,24 @@ def modify_and_push_docker(version, # grid2op version try: maj_, min_, minmin_, *post = version.split(".") - except: + except Exception as exc_: raise RuntimeError( - "script \"update_version\": version should be formated as XX.YY.ZZ (eg 0.3.1). Please modify \"--version\" argument") + "script \"update_version\": version should be formated as XX.YY.ZZ (eg 0.3.1). " + "Please modify \"--version\" argument") regex_version = "[0-9]+\.[0-9]+\.[0-9]+(.post[0-9]+){0,1}" if re.match("^{}$".format(regex_version), version) is None: raise RuntimeError( - "script \"update_version\": version should be formated as XX.YY.ZZ (eg 0.3.1) and not {}. Please modify \"--version\" argument".format( + "script \"update_version\": version should be formated as XX.YY.ZZ (eg 0.3.1) and not {}. " + "Please modify \"--version\" argument".format( version)) # setup.py setup_path = os.path.join(path, "setup.py") if not os.path.exists(setup_path): raise RuntimeError( - "script \"update_version\" cannot find the root path of Grid2op. Please provide a valid \"--path\" argument.") + "script \"update_version\" cannot find the root path of Grid2op. " + "Please provide a valid \"--path\" argument.") with open(setup_path, "r") as f: new_setup = f.read() try: @@ -154,6 +157,8 @@ def modify_and_push_docker(version, # grid2op version with warnings.catch_warnings(): warnings.filterwarnings("ignore") PATH_PREVIOUS_RUNNER = os.path.join(path, "grid2op", "data_test", "runner_data") + # set the right grid2op version (instead of reloading the stuff, ugly, but working) + grid2op.__version__ = version env = grid2op.make("rte_case5_example", test=True) runner = Runner(**env.get_params_for_runner(), agentClass=RandomAgent) runner.run(nb_episode=2,