From dc35369818aecc158300d69352bbdbb12cba3992 Mon Sep 17 00:00:00 2001 From: Xu Zhao Date: Fri, 29 Sep 2023 15:46:39 -0700 Subject: [PATCH] Fix models on python 3.11 (#1950) Summary: The CI is broken due to the recent default Python version upgrade. Hereby we: - Fix soft_actor_critic code change caused by upgrade of gym - Upgrade omegaconf version due to Python and numpy version upgrade - Upgrade detectron2 model version - Move fambench_xlmr to canary_models We are moving fambench_xlmr to canary_models because it depends on fairseq, and fairseq: 1. Depends on omegaconf < 2.1, which is not compatible with Python 3.11 (https://github.com/facebookresearch/fairseq/blob/main/setup.py#L183) 2. Detectron2, on the other hand, requires omegaconf >=2.1 < 2.4, so they can not co-exist. (https://github.com/facebookresearch/detectron2/blob/main/setup.py#L189) We should suggest the owner of fairseq to update their code to adapt to omegaconf >=2.1. Pull Request resolved: https://github.com/pytorch/benchmark/pull/1950 Reviewed By: davidberard98, msaroufim Differential Revision: D49785274 Pulled By: xuzhao9 fbshipit-source-id: e46f56c28359f8a38e09e2b90d8e07d19b68d58a --- .../fambench_xlmr/__init__.py | 0 .../fambench_xlmr/install.py | 0 .../fambench_xlmr/metadata.yaml | 0 .../fambench_xlmr/requirements.txt | 0 torchbenchmark/models/soft_actor_critic/__init__.py | 12 ++++++------ .../util/framework/detectron2/requirements.txt | 4 ++-- 6 files changed, 8 insertions(+), 8 deletions(-) rename torchbenchmark/{models => canary_models}/fambench_xlmr/__init__.py (100%) rename torchbenchmark/{models => canary_models}/fambench_xlmr/install.py (100%) rename torchbenchmark/{models => canary_models}/fambench_xlmr/metadata.yaml (100%) rename torchbenchmark/{models => canary_models}/fambench_xlmr/requirements.txt (100%) diff --git a/torchbenchmark/models/fambench_xlmr/__init__.py b/torchbenchmark/canary_models/fambench_xlmr/__init__.py similarity index 100% rename from torchbenchmark/models/fambench_xlmr/__init__.py rename to torchbenchmark/canary_models/fambench_xlmr/__init__.py diff --git a/torchbenchmark/models/fambench_xlmr/install.py b/torchbenchmark/canary_models/fambench_xlmr/install.py similarity index 100% rename from torchbenchmark/models/fambench_xlmr/install.py rename to torchbenchmark/canary_models/fambench_xlmr/install.py diff --git a/torchbenchmark/models/fambench_xlmr/metadata.yaml b/torchbenchmark/canary_models/fambench_xlmr/metadata.yaml similarity index 100% rename from torchbenchmark/models/fambench_xlmr/metadata.yaml rename to torchbenchmark/canary_models/fambench_xlmr/metadata.yaml diff --git a/torchbenchmark/models/fambench_xlmr/requirements.txt b/torchbenchmark/canary_models/fambench_xlmr/requirements.txt similarity index 100% rename from torchbenchmark/models/fambench_xlmr/requirements.txt rename to torchbenchmark/canary_models/fambench_xlmr/requirements.txt diff --git a/torchbenchmark/models/soft_actor_critic/__init__.py b/torchbenchmark/models/soft_actor_critic/__init__.py index c2d3b32c7d..5e4b0653c4 100644 --- a/torchbenchmark/models/soft_actor_critic/__init__.py +++ b/torchbenchmark/models/soft_actor_critic/__init__.py @@ -185,9 +185,9 @@ def __init__(self, test, device, batch_size=None, extra_args=[]): def get_module(self): model = self.agent.actor - state = self.train_env.reset() + state, _info = self.train_env.reset() action = self.agent.sample_action(state) - next_state, reward, done, info = self.train_env.step(action) + next_state, reward, done, info, _unused = self.train_env.step(action) self.buffer.push(state, action, reward, next_state, done) batch = self.buffer.sample(self.args.batch_size) state_batch, action_batch, reward_batch, next_state_batch, done_batch = batch @@ -204,11 +204,11 @@ def train(self): niter = 1 for step in range(niter): if done: - state = self.train_env.reset() + state, _info = self.train_env.reset() steps_this_ep = 0 done = False action = self.agent.sample_action(state) - next_state, reward, done, info = self.train_env.step(action) + next_state, reward, done, info, _unused = self.train_env.step(action) self.buffer.push(state, action, reward, next_state, done) state = next_state steps_this_ep += 1 @@ -244,13 +244,13 @@ def eval(self) -> Tuple[torch.Tensor]: episode_return_history = [] for episode in range(niter): episode_return = 0.0 - state = self.test_env.reset() + state, _info = self.test_env.reset() done, info = False, {} for step_num in range(self.args.max_episode_steps): if done: break action = self.agent.forward(state) - state, reward, done, info = self.test_env.step(action) + state, reward, done, info, _unused = self.test_env.step(action) episode_return += reward * (discount ** step_num) episode_return_history.append(episode_return) retval = torch.tensor(episode_return_history) diff --git a/torchbenchmark/util/framework/detectron2/requirements.txt b/torchbenchmark/util/framework/detectron2/requirements.txt index 486f75ab2d..b484339e94 100644 --- a/torchbenchmark/util/framework/detectron2/requirements.txt +++ b/torchbenchmark/util/framework/detectron2/requirements.txt @@ -1,3 +1,3 @@ -git+https://github.com/facebookresearch/detectron2.git@57bdb21 -omegaconf==2.1.1 +git+https://github.com/facebookresearch/detectron2.git@1a4df4d +omegaconf==2.3.0 numpy