Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added moondream and llava. #2154

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 17 additions & 0 deletions torchbenchmark/models/llava/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel

class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
# DEFAULT_TRAIN_BSIZE not specified since we're not implementing a train test
# DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1

def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="llava", test=test, device=device, batch_size=batch_size, extra_args=extra_args)

def train(self):
return NotImplementedError("Not implemented")

def eval(self):
super().eval()
13 changes: 13 additions & 0 deletions torchbenchmark/models/llava/install.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model

def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])

if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
10 changes: 10 additions & 0 deletions torchbenchmark/models/llava/metadata.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
devices:
NVIDIA A100-SXM4-40GB:
eval_batch_size: 1
eval_benchmark: false
eval_deterministic: false
eval_nograd: true
train_benchmark: false
train_deterministic: false
not_implemented:
- device: NVIDIA A10G
1 change: 1 addition & 0 deletions torchbenchmark/models/llava/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
einops
17 changes: 17 additions & 0 deletions torchbenchmark/models/moondream/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
from torchbenchmark.tasks import NLP
from torchbenchmark.util.framework.huggingface.model_factory import HuggingFaceModel

class Model(HuggingFaceModel):
task = NLP.LANGUAGE_MODELING
# DEFAULT_TRAIN_BSIZE not specified since we're not implementing a train test
# DEFAULT_TRAIN_BSIZE = 1
DEFAULT_EVAL_BSIZE = 1

def __init__(self, test, device, batch_size=None, extra_args=[]):
super().__init__(name="moondream", test=test, device=device, batch_size=batch_size, extra_args=extra_args)

def train(self):
return NotImplementedError("Not implemented")

def eval(self):
super().eval()
13 changes: 13 additions & 0 deletions torchbenchmark/models/moondream/install.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
import subprocess
import sys
import os
from torchbenchmark.util.framework.huggingface.patch_hf import patch_transformers, cache_model

def pip_install_requirements():
subprocess.check_call([sys.executable, '-m', 'pip', 'install', '-q', '-r', 'requirements.txt'])

if __name__ == '__main__':
pip_install_requirements()
patch_transformers()
model_name = os.path.basename(os.path.dirname(os.path.abspath(__file__)))
cache_model(model_name)
10 changes: 10 additions & 0 deletions torchbenchmark/models/moondream/metadata.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
devices:
NVIDIA A100-SXM4-40GB:
eval_batch_size: 1
eval_benchmark: false
eval_deterministic: false
eval_nograd: true
train_benchmark: false
train_deterministic: false
not_implemented:
- device: NVIDIA A10G
1 change: 1 addition & 0 deletions torchbenchmark/models/moondream/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
einops
9 changes: 7 additions & 2 deletions torchbenchmark/util/framework/huggingface/model_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
from torchbenchmark.util.model import BenchmarkModel
from torchbenchmark.tasks import NLP
import transformers
from transformers import AutoConfig, ReformerConfig, BertConfig, GenerationConfig, WhisperConfig, LlamaConfig
from transformers import AutoConfig, ReformerConfig, BertConfig, GenerationConfig, WhisperConfig, LlamaConfig, PhiConfig
from typing import Tuple

class_models = {
Expand All @@ -33,11 +33,13 @@
# default num_hidden_layers=32 but that OOMs, feel free to change this config to something more real
'llama_v2_7b_16h' : (128,512, 'LlamaConfig(num_hidden_layers=16)', 'AutoModelForCausalLM'),
'hf_MPT_7b_instruct': (512, 512, 'AutoConfig.from_pretrained("mosaicml/mpt-7b-instruct", trust_remote_code=True)', 'AutoModelForCausalLM'),
'llava' : (512,512, 'AutoConfig.from_pretrained("liuhaotian/llava-v1.5-13b")', 'LlavaForConditionalGeneration'),
'llama_v2_7b' : (512,512, 'AutoConfig.from_pretrained("meta-llama/Llama-2-7b-hf")', 'AutoModelForCausalLM'),
'llama_v2_13b' : (512,512, 'AutoConfig.from_pretrained("meta-llama/Llama-2-13b-hf")', 'AutoModelForCausalLM'),
'llama_v2_70b' : (512, 512, 'AutoConfig.from_pretrained("meta-llama/Llama-2-70b-hf")', 'AutoModelForMaskedLM'),
'phi_1_5' : (512, 512, 'AutoConfig.from_pretrained("microsoft/phi-1_5", trust_remote_code=True)', 'AutoModelForCausalLM'),
'phi_2' : (512, 512, 'AutoConfig.from_pretrained("microsoft/phi-2", trust_remote_code=True)', 'AutoModelForCausalLM'),
'moondream' : (512, 512, 'PhiConfig.from_pretrained("vikhyatk/moondream1")', 'PhiForCausalLM'),
# as per this page https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1 trust_remote_code=True is not required
'mistral_7b_instruct' : (128, 128, 'AutoConfig.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")', 'AutoModelForCausalLM'),
'hf_Yi' : (512, 512, 'AutoConfig.from_pretrained("01-ai/Yi-6B", trust_remote_code=True)', 'AutoModelForCausalLM'),
Expand Down Expand Up @@ -96,7 +98,10 @@ def __init__(self, name, test, device, batch_size=None, extra_args=[]):
hugging_face_models_requiring_trust_remote_code = ["hf_Falcon_7b", "hf_MPT_7b_instruct", "phi_1_5", "phi_2", "hf_Yi"]
if name in hugging_face_models_requiring_trust_remote_code:
kwargs["trust_remote_code"] = True
self.model = class_ctor.from_config(config, **kwargs).to(device)
if hasattr(class_ctor, "from_config"):
self.model = class_ctor.from_config(config, **kwargs).to(device)
else:
self.model = class_ctor(config, **kwargs).to(device)
self.optimizer = optim.Adam(
self.model.parameters(),
lr=0.001,
Expand Down
10 changes: 7 additions & 3 deletions torchbenchmark/util/framework/huggingface/patch_hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,16 +5,20 @@
import subprocess
import sys
from .model_factory import class_models
from transformers import AutoConfig, ReformerConfig, BigBirdConfig, BertConfig, WhisperConfig, LlamaConfig

from transformers import AutoConfig, ReformerConfig, BigBirdConfig, BertConfig, WhisperConfig, LlamaConfig, PhiConfig
import inspect

PATCH_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "patches")

def cache_model(name: str, **kwargs):
import transformers
model_config = eval(class_models[name][2])
model_ctor = getattr(transformers, class_models[name][3])
model_ctor.from_config(model_config, **kwargs)
if (model_config.__class__.__name__ is "PhiConfig" or "LlavaConfig"):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am wondering why we can't use the same code from model_factory.py to determine whether from_config should be used?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Causes errors w/ runs for other HF models (feel free to test & replicate)

model_ctor(model_config, **kwargs)
else:
model_ctor.from_config(model_config, **kwargs)


def patch_transformers():
import transformers
Expand Down
Loading