diff --git a/docs/misc/changelog.rst b/docs/misc/changelog.rst index c1d9461e2..f5cf0b6e9 100644 --- a/docs/misc/changelog.rst +++ b/docs/misc/changelog.rst @@ -36,6 +36,7 @@ Deprecations: Others: ^^^^^^^ - Used issue forms instead of issue templates +- Fixed flake8 config to be compatible with flake8 6+ Documentation: ^^^^^^^^^^^^^^ diff --git a/setup.cfg b/setup.cfg index b79e63a5d..5e307264f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -80,7 +80,8 @@ exclude = (?x)( ) [flake8] -ignore = W503,W504,E203,E231 # line breaks before and after binary operators +# line breaks before and after binary operators +ignore = W503,W504,E203,E231 # Ignore import not used when aliases are defined per-file-ignores = ./stable_baselines3/__init__.py:F401 diff --git a/stable_baselines3/common/base_class.py b/stable_baselines3/common/base_class.py index bb14f6ac9..9351bfb12 100644 --- a/stable_baselines3/common/base_class.py +++ b/stable_baselines3/common/base_class.py @@ -125,7 +125,7 @@ def __init__( # Used for computing fps, it is updated at each call of learn() self._num_timesteps_at_start = 0 self.seed = seed - self.action_noise = None # type: Optional[ActionNoise] + self.action_noise: Optional[ActionNoise] = None self.start_time = None self.policy = None self.learning_rate = learning_rate diff --git a/tests/test_utils.py b/tests/test_utils.py index e9281c363..e74b1d09e 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -298,13 +298,13 @@ def make_eval_env(with_monitor, wrapper_class=gym.Wrapper): episode_rewards, episode_lengths = evaluate_policy( model, eval_env, n_eval_episodes, return_episode_rewards=True, warn=False ) - assert all(map(lambda l: l == 1, episode_lengths)), "AlwaysDoneWrapper did not fix episode lengths to one" + assert all(map(lambda length: length == 1, episode_lengths)), "AlwaysDoneWrapper did not fix episode lengths to one" eval_env.close() # Should get longer episodes with with Monitor (true episodes) eval_env = make_eval_env(with_monitor=True, wrapper_class=AlwaysDoneWrapper) episode_rewards, episode_lengths = evaluate_policy(model, eval_env, n_eval_episodes, return_episode_rewards=True) - assert all(map(lambda l: l > 1, episode_lengths)), "evaluate_policy did not get episode lengths from Monitor" + assert all(map(lambda length: length > 1, episode_lengths)), "evaluate_policy did not get episode lengths from Monitor" eval_env.close()