diff --git a/.github/workflows/linux-tutorials-test.yml b/.github/workflows/linux-tutorials-test.yml index 69d1fcc1a..3c78f5db3 100644 --- a/.github/workflows/linux-tutorials-test.yml +++ b/.github/workflows/linux-tutorials-test.yml @@ -18,7 +18,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.7', '3.8', '3.9', '3.10', '3.11'] + python-version: ['3.7', '3.8', '3.9', '3.10'] # '3.11' - broken due to numba tutorial: ['CleanRL', 'Tianshou', 'EnvironmentCreation'] steps: - uses: actions/checkout@v3 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 67bac5621..d483f550d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,18 +1,18 @@ --- repos: - repo: https://github.com/python/black - rev: 22.8.0 + rev: 23.1.0 hooks: - id: black - repo: https://github.com/codespell-project/codespell - rev: v2.2.1 + rev: v2.2.2 hooks: - id: codespell args: - --skip=*.css,*.js,*.map,*.scss,*svg - --ignore-words-list=magent - repo: https://github.com/PyCQA/flake8 - rev: 5.0.4 + rev: 6.0.0 hooks: - id: flake8 args: @@ -23,23 +23,23 @@ repos: - --show-source - --statistics - repo: https://github.com/PyCQA/isort - rev: 5.10.1 + rev: 5.12.0 hooks: - id: isort args: ["--profile", "black"] - repo: https://github.com/asottile/pyupgrade - rev: v2.38.0 + rev: v3.3.1 hooks: - id: pyupgrade # TODO: remove `--keep-runtime-typing` option args: ["--py37-plus", "--keep-runtime-typing"] - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.3.0 + rev: v4.4.0 hooks: - id: mixed-line-ending args: ["--fix=lf"] - repo: https://github.com/pycqa/pydocstyle - rev: 6.1.1 + rev: 6.3.0 hooks: - id: pydocstyle args: diff --git a/pettingzoo/__init__.py b/pettingzoo/__init__.py index f386c914a..e2a56276c 100644 --- a/pettingzoo/__init__.py +++ b/pettingzoo/__init__.py @@ -8,7 +8,6 @@ # DSP is far more benign (and should probably be the default in SDL anyways) if sys.platform.startswith("linux"): - os.environ["SDL_AUDIODRIVER"] = "dsp" os.environ["PYGAME_HIDE_SUPPORT_PROMPT"] = "hide" diff --git a/pettingzoo/atari/space_invaders/space_invaders.py b/pettingzoo/atari/space_invaders/space_invaders.py index 3d7942556..83891720b 100644 --- a/pettingzoo/atari/space_invaders/space_invaders.py +++ b/pettingzoo/atari/space_invaders/space_invaders.py @@ -87,7 +87,7 @@ def raw_env( zigzaging_bombs=False, fast_bomb=False, invisible_invaders=False, - **kwargs + **kwargs, ): mode = 33 + ( moving_shields * 1 diff --git a/pettingzoo/butterfly/cooperative_pong/manual_policy.py b/pettingzoo/butterfly/cooperative_pong/manual_policy.py index 0995fc8ec..f5ed69c36 100644 --- a/pettingzoo/butterfly/cooperative_pong/manual_policy.py +++ b/pettingzoo/butterfly/cooperative_pong/manual_policy.py @@ -3,7 +3,6 @@ class ManualPolicy: def __init__(self, env, agent_id: int = 0, show_obs: bool = False): - self.env = env self.agent_id = agent_id self.agent = self.env.agents[self.agent_id] diff --git a/pettingzoo/butterfly/knights_archers_zombies/knights_archers_zombies.py b/pettingzoo/butterfly/knights_archers_zombies/knights_archers_zombies.py index f158b9e82..4b4ce3ef6 100644 --- a/pettingzoo/butterfly/knights_archers_zombies/knights_archers_zombies.py +++ b/pettingzoo/butterfly/knights_archers_zombies/knights_archers_zombies.py @@ -211,7 +211,6 @@ def env(**kwargs): class raw_env(AECEnv, EzPickle): - metadata = { "render_modes": ["human", "rgb_array"], "name": "knights_archers_zombies_v10", @@ -700,7 +699,6 @@ def step(self, action): # Do these things once per cycle if self._agent_selector.is_last(): - # Update the weapons self.update_weapons() diff --git a/pettingzoo/butterfly/knights_archers_zombies/manual_policy.py b/pettingzoo/butterfly/knights_archers_zombies/manual_policy.py index 184ace86d..0d33f3c30 100644 --- a/pettingzoo/butterfly/knights_archers_zombies/manual_policy.py +++ b/pettingzoo/butterfly/knights_archers_zombies/manual_policy.py @@ -3,7 +3,6 @@ class ManualPolicy: def __init__(self, env, agent_id: int = 0, show_obs: bool = False): - self.env = env self.agent_id = agent_id self.agent = self.env.agents[self.agent_id] diff --git a/pettingzoo/butterfly/pistonball/manual_policy.py b/pettingzoo/butterfly/pistonball/manual_policy.py index 8c664cd4e..1e26d9fd1 100644 --- a/pettingzoo/butterfly/pistonball/manual_policy.py +++ b/pettingzoo/butterfly/pistonball/manual_policy.py @@ -4,7 +4,6 @@ class ManualPolicy: def __init__(self, env, agent_id: int = 0, show_obs: bool = False): - self.env = env self.agent_id = agent_id self.agent = self.env.agents[self.agent_id] diff --git a/pettingzoo/butterfly/pistonball/pistonball.py b/pettingzoo/butterfly/pistonball/pistonball.py index 9f2d0cdb7..ad6cf3d9e 100644 --- a/pettingzoo/butterfly/pistonball/pistonball.py +++ b/pettingzoo/butterfly/pistonball/pistonball.py @@ -122,7 +122,6 @@ def env(**kwargs): class raw_env(AECEnv, EzPickle): - metadata = { "render_modes": ["human", "rgb_array"], "name": "pistonball_v6", diff --git a/pettingzoo/classic/chess/chess.py b/pettingzoo/classic/chess/chess.py index 0878af24a..700f41ebd 100644 --- a/pettingzoo/classic/chess/chess.py +++ b/pettingzoo/classic/chess/chess.py @@ -107,7 +107,6 @@ def env(render_mode=None): class raw_env(AECEnv): - metadata = { "render_modes": ["human", "ansi", "rgb_array"], "name": "chess_v5", diff --git a/pettingzoo/classic/go/go.py b/pettingzoo/classic/go/go.py index 908eb7259..e0fe8454b 100644 --- a/pettingzoo/classic/go/go.py +++ b/pettingzoo/classic/go/go.py @@ -143,7 +143,6 @@ def env(**kwargs): class raw_env(AECEnv): - metadata = { "render_modes": ["human", "rgb_array"], "name": "go_v5", diff --git a/pettingzoo/classic/hanabi/hanabi.py b/pettingzoo/classic/hanabi/hanabi.py index 2552aaf6c..d1f2bce3f 100644 --- a/pettingzoo/classic/hanabi/hanabi.py +++ b/pettingzoo/classic/hanabi/hanabi.py @@ -379,7 +379,6 @@ def _raise_error_if_config_values_out_of_range( observation_type, random_start_player, ): - if not (2 <= colors <= 5): raise ValueError( f"Config parameter {colors} is out of bounds. See description in hanabi.py." diff --git a/pettingzoo/classic/rlcard_envs/gin_rummy.py b/pettingzoo/classic/rlcard_envs/gin_rummy.py index b9efbbb77..c97eb5406 100644 --- a/pettingzoo/classic/rlcard_envs/gin_rummy.py +++ b/pettingzoo/classic/rlcard_envs/gin_rummy.py @@ -138,7 +138,6 @@ def env(**kwargs): class raw_env(RLCardBase, EzPickle): - metadata = { "render_modes": ["human"], "name": "gin_rummy_v4", diff --git a/pettingzoo/classic/rlcard_envs/leduc_holdem.py b/pettingzoo/classic/rlcard_envs/leduc_holdem.py index 01d0d9fd8..a0100d273 100644 --- a/pettingzoo/classic/rlcard_envs/leduc_holdem.py +++ b/pettingzoo/classic/rlcard_envs/leduc_holdem.py @@ -104,7 +104,6 @@ def env(**kwargs): class raw_env(RLCardBase): - metadata = { "render_modes": ["human"], "name": "leduc_holdem_v4", diff --git a/pettingzoo/classic/rlcard_envs/texas_holdem.py b/pettingzoo/classic/rlcard_envs/texas_holdem.py index 658930f41..06c844ef4 100644 --- a/pettingzoo/classic/rlcard_envs/texas_holdem.py +++ b/pettingzoo/classic/rlcard_envs/texas_holdem.py @@ -115,7 +115,6 @@ def env(**kwargs): class raw_env(RLCardBase): - metadata = { "render_modes": ["human", "rgb_array"], "name": "texas_holdem_v4", diff --git a/pettingzoo/classic/rlcard_envs/texas_holdem_no_limit.py b/pettingzoo/classic/rlcard_envs/texas_holdem_no_limit.py index eac88f454..e7a700761 100644 --- a/pettingzoo/classic/rlcard_envs/texas_holdem_no_limit.py +++ b/pettingzoo/classic/rlcard_envs/texas_holdem_no_limit.py @@ -128,7 +128,6 @@ def env(**kwargs): class raw_env(RLCardBase): - metadata = { "render_modes": ["human", "rgb_array"], "name": "texas_holdem_no_limit_v6", diff --git a/pettingzoo/classic/rps/rps.py b/pettingzoo/classic/rps/rps.py index 75a3579c1..156415291 100644 --- a/pettingzoo/classic/rps/rps.py +++ b/pettingzoo/classic/rps/rps.py @@ -451,7 +451,6 @@ def step(self, action): # collect reward if it is the last agent to act if self._agent_selector.is_last(): - # same action => 0 reward each agent if self.state[self.agents[0]] == self.state[self.agents[1]]: rewards = (0, 0) diff --git a/pettingzoo/sisl/multiwalker/multiwalker.py b/pettingzoo/sisl/multiwalker/multiwalker.py index a958bb6b0..255a41244 100755 --- a/pettingzoo/sisl/multiwalker/multiwalker.py +++ b/pettingzoo/sisl/multiwalker/multiwalker.py @@ -141,7 +141,6 @@ def env(**kwargs): class raw_env(AECEnv, EzPickle): - metadata = { "render_modes": ["human", "rgb_array"], "name": "multiwalker_v9", diff --git a/pettingzoo/sisl/multiwalker/multiwalker_base.py b/pettingzoo/sisl/multiwalker/multiwalker_base.py index e364cf599..ec21d6d77 100644 --- a/pettingzoo/sisl/multiwalker/multiwalker_base.py +++ b/pettingzoo/sisl/multiwalker/multiwalker_base.py @@ -214,7 +214,6 @@ def ReportFixture(self, fixture, point, normal, fraction): self.lidar = [LidarCallback() for _ in range(10)] def apply_action(self, action): - self.joints[0].motorSpeed = float(SPEED_HIP * np.sign(action[0])) self.joints[0].maxMotorTorque = float( MOTORS_TORQUE * np.clip(np.abs(action[0]), 0, 1) @@ -288,7 +287,6 @@ def action_space(self): class MultiWalkerEnv: - metadata = {"render_modes": ["human", "rgb_array"], "render_fps": FPS} hardcore = False diff --git a/pettingzoo/sisl/pursuit/manual_policy.py b/pettingzoo/sisl/pursuit/manual_policy.py index 42604f99a..83617ee03 100644 --- a/pettingzoo/sisl/pursuit/manual_policy.py +++ b/pettingzoo/sisl/pursuit/manual_policy.py @@ -3,7 +3,6 @@ class ManualPolicy: def __init__(self, env, agent_id: int = 0, show_obs: bool = False): - self.env = env self.agent_id = agent_id self.agent = self.env.agents[self.agent_id] diff --git a/pettingzoo/sisl/pursuit/pursuit.py b/pettingzoo/sisl/pursuit/pursuit.py index 6474d88e5..76f0efe07 100755 --- a/pettingzoo/sisl/pursuit/pursuit.py +++ b/pettingzoo/sisl/pursuit/pursuit.py @@ -101,7 +101,6 @@ def env(**kwargs): class raw_env(AECEnv, EzPickle): - metadata = { "render_modes": ["human", "rgb_array"], "name": "pursuit_v4", diff --git a/pettingzoo/sisl/pursuit/pursuit_base.py b/pettingzoo/sisl/pursuit/pursuit_base.py index 4e59153c0..ed0fc4d2e 100755 --- a/pettingzoo/sisl/pursuit/pursuit_base.py +++ b/pettingzoo/sisl/pursuit/pursuit_base.py @@ -347,7 +347,7 @@ def draw_agent_counts(self): x, y = self.pursuer_layer.get_position(i) agent_positions[(x, y)] += 1 - for (x, y) in evader_positions: + for x, y in evader_positions: (pos_x, pos_y) = ( self.pixel_scale * x + self.pixel_scale // 2, self.pixel_scale * y + self.pixel_scale // 2, @@ -366,7 +366,7 @@ def draw_agent_counts(self): self.screen.blit(text, (pos_x, pos_y)) - for (x, y) in agent_positions: + for x, y in agent_positions: (pos_x, pos_y) = ( self.pixel_scale * x + self.pixel_scale // 2, self.pixel_scale * y + self.pixel_scale // 2, diff --git a/pettingzoo/sisl/pursuit/utils/controllers.py b/pettingzoo/sisl/pursuit/utils/controllers.py index 2c2327be6..fec9636a4 100644 --- a/pettingzoo/sisl/pursuit/utils/controllers.py +++ b/pettingzoo/sisl/pursuit/utils/controllers.py @@ -14,7 +14,6 @@ def act(self, state: np.ndarray) -> int: class RandomPolicy(PursuitPolicy): - # constructor def __init__(self, n_actions, rng): self.rng = rng diff --git a/pettingzoo/sisl/pursuit/utils/discrete_agent.py b/pettingzoo/sisl/pursuit/utils/discrete_agent.py index 25f92165d..b22e805e5 100644 --- a/pettingzoo/sisl/pursuit/utils/discrete_agent.py +++ b/pettingzoo/sisl/pursuit/utils/discrete_agent.py @@ -9,7 +9,6 @@ class DiscreteAgent(Agent): - # constructor def __init__( self, diff --git a/pettingzoo/sisl/waterworld/waterworld.py b/pettingzoo/sisl/waterworld/waterworld.py index e7ff7cd48..e160b7ca6 100755 --- a/pettingzoo/sisl/waterworld/waterworld.py +++ b/pettingzoo/sisl/waterworld/waterworld.py @@ -155,7 +155,6 @@ def env(**kwargs): class raw_env(AECEnv): - metadata = { "render_modes": ["human", "rgb_array"], "name": "waterworld_v4", diff --git a/pettingzoo/test/example_envs/generated_agents_env_v0.py b/pettingzoo/test/example_envs/generated_agents_env_v0.py index ff7875126..ac0b03e9c 100644 --- a/pettingzoo/test/example_envs/generated_agents_env_v0.py +++ b/pettingzoo/test/example_envs/generated_agents_env_v0.py @@ -17,7 +17,6 @@ def get_type(agent): class raw_env(AECEnv): - metadata = {"render_modes": ["human"], "name": "generated_agents_env_v0"} def __init__(self, max_cycles=100, render_mode=None): diff --git a/pettingzoo/test/example_envs/generated_agents_parallel_v0.py b/pettingzoo/test/example_envs/generated_agents_parallel_v0.py index 43655ace7..922abb3fe 100644 --- a/pettingzoo/test/example_envs/generated_agents_parallel_v0.py +++ b/pettingzoo/test/example_envs/generated_agents_parallel_v0.py @@ -21,7 +21,6 @@ def get_type(agent): class parallel_env(ParallelEnv): - metadata = {"render_modes": ["human"], "name": "generated_agents_parallel_v0"} def __init__(self, max_cycles=100, render_mode=None): diff --git a/pettingzoo/test/state_test.py b/pettingzoo/test/state_test.py index 62cdd95dd..b36636ebc 100644 --- a/pettingzoo/test/state_test.py +++ b/pettingzoo/test/state_test.py @@ -96,7 +96,6 @@ def test_state(env, num_cycles): def test_parallel_env(parallel_env): - parallel_env.reset() assert isinstance( diff --git a/test/unwrapped_test.py b/test/unwrapped_test.py index 92ab557c4..d6597cb37 100644 --- a/test/unwrapped_test.py +++ b/test/unwrapped_test.py @@ -31,7 +31,6 @@ def discrete_observation(env, agents): @pytest.mark.parametrize(("name", "env_module"), list(all_environments.items())) def test_unwrapped(name, env_module): - env = env_module.env(render_mode="human") base_env = env.unwrapped diff --git a/tutorials/CleanRL/cleanrl.py b/tutorials/CleanRL/cleanrl.py index 1fee3a545..d1d8df5ff 100644 --- a/tutorials/CleanRL/cleanrl.py +++ b/tutorials/CleanRL/cleanrl.py @@ -85,7 +85,6 @@ def unbatchify(x, env): if __name__ == "__main__": - """ALGO PARAMS""" device = torch.device("cuda" if torch.cuda.is_available() else "cpu") ent_coef = 0.1 @@ -126,10 +125,8 @@ def unbatchify(x, env): """ TRAINING LOGIC """ # train for n number of episodes for episode in range(total_episodes): - # collect an episode with torch.no_grad(): - # collect observations and convert to batch of torch tensors next_obs = env.reset(seed=None) # reset the episodic return @@ -137,7 +134,6 @@ def unbatchify(x, env): # each episode has num_steps for step in range(0, max_cycles): - # rollover the observation obs = batchify_obs(next_obs, device) diff --git a/tutorials/Ray/rllib_pistonball.py b/tutorials/Ray/rllib_pistonball.py index 173bd1dca..eee373b6a 100644 --- a/tutorials/Ray/rllib_pistonball.py +++ b/tutorials/Ray/rllib_pistonball.py @@ -69,7 +69,6 @@ def env_creator(args): if __name__ == "__main__": - env_name = "pistonball_v6" register_env(env_name, lambda config: ParallelPettingZooEnv(env_creator(config))) diff --git a/tutorials/Tianshou/2_training_agents.py b/tutorials/Tianshou/2_training_agents.py index 640d55b52..966c9808f 100644 --- a/tutorials/Tianshou/2_training_agents.py +++ b/tutorials/Tianshou/2_training_agents.py @@ -69,7 +69,6 @@ def _get_env(): if __name__ == "__main__": - # ======== Step 1: Environment setup ========= train_envs = DummyVectorEnv([_get_env for _ in range(10)]) test_envs = DummyVectorEnv([_get_env for _ in range(10)])