Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add Mistral-7B-Instruct-v0.1 from huggingface. #2010

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,8 @@ pytest-benchmark
requests
tabulate
git+https://github.com/huggingface/pytorch-image-models.git@730b907
git+https://github.com/huggingface/transformers.git@6c26faa
# this version of transformers is required as per this page https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1
transformers==4.34.1
pranavsharma marked this conversation as resolved.
Show resolved Hide resolved
MonkeyType
psutil
pyyaml
Expand Down
19 changes: 19 additions & 0 deletions torchbenchmark/canary_models/mistral_7b_instruct/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel

class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
# DEFAULT_TRAIN_BSIZE not specified since we're not implementing a train test
# DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1

def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="mistral_7b_instruct", test=test, device=device, batch_size=batch_size, extra_args=extra_args)

def train(self):
return NotImplementedError("Not implemented")

def eval(self):
if (self.device == "cpu"):
raise NotImplementedError("mistral_7b_instruct model is too slow on CPU - skip CPU test.")
super().eval()
9 changes: 9 additions & 0 deletions torchbenchmark/canary_models/mistral_7b_instruct/install.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model

if __name__ == '__main__':
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
11 changes: 11 additions & 0 deletions torchbenchmark/canary_models/mistral_7b_instruct/metadata.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
devices:
- device: NVIDIA A10G
- device: NVIDIA A100-SXM4-40GB
eval_batch_size: 1
eval_benchmark: false
eval_deterministic: false
eval_nograd: true
train_benchmark: false
train_deterministic: false
not_implemented:
- device: cpu
3 changes: 2 additions & 1 deletion torchbenchmark/models/phi_1_5/requirements.txt
Original file line number Diff line number Diff line change
@@ -1 +1,2 @@
einops
einops
flash_attn
6 changes: 4 additions & 2 deletions torchbenchmark/util/framework/huggingface/model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,8 @@
'llama_v2_13b' : (512,512, 'AutoConfig.from_pretrained("meta-llama/Llama-2-13b-hf")', 'AutoModelForCausalLM'),
'llama_v2_70b' : (512, 512, 'AutoConfig.from_pretrained("meta-llama/Llama-2-70b-hf")', 'AutoModelForMaskedLM'),
'phi_1_5' : (512, 512, 'AutoConfig.from_pretrained("microsoft/phi-1_5", trust_remote_code=True)', 'AutoModelForCausalLM'),
# as per this page https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1 trust_remote_code=True is not required
'mistral_7b_instruct' : (128, 128, 'AutoConfig.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")', 'AutoModelForCausalLM'),
'hf_Yi' : (512, 512, 'AutoConfig.from_pretrained("01-ai/Yi-6B", trust_remote_code=True)', 'AutoModelForCausalLM'),
}

Expand Down Expand Up @@ -87,8 +89,8 @@ def __init__(self, name, test, device, batch_size=None, extra_args=[]):
config.num_buckets = 128
class_ctor = getattr(transformers, class_models[name][3])
kwargs = {}
remote_code_required = ['hf_Falcon_7b', 'hf_MPT_7b_instruct', 'phi_1_5', 'hf_Yi']
if name in remote_code_required:
hugging_face_models_requiring_trust_remote_code = ["hf_Falcon_7b", "hf_MPT_7b_instruct", "phi_1_5", "hf_Yi"]
if name in hugging_face_models_requiring_trust_remote_code:
kwargs["trust_remote_code"] = True
self.model = class_ctor.from_config(config, **kwargs).to(device)
self.optimizer = optim.Adam(
Expand Down
Loading