Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

add npu_group_size for transformers_int4_npu_win in all-in-one benchmark api #12316

Merged
merged 2 commits into from
Nov 1, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions python/llm/dev/benchmark/all-in-one/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,3 +43,4 @@ optimize_model: False # whether apply further optimization on NPU (only availabl
use_fp16_torch_dtype: True # whether use fp16 for non-linear layer (only available now for "pipeline_parallel_gpu" test_api)
task: 'continuation' # task can be 'continuation', 'QA' and 'summarize'
transpose_value_cache: True # whether apply transposed v_cache optimization on NPU (only available now for transformers_int4_npu_win test_api)
npu_group_size: 128 # This can only be either 0 or 128, and only works for `transformers_int4_npu_win` / `transformers_int4_npu_pipline_win`
27 changes: 16 additions & 11 deletions python/llm/dev/benchmark/all-in-one/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -190,7 +190,7 @@ def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1,
elif test_api == 'pipeline_parallel_gpu':
result = run_pipeline_parallel_gpu(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, cpu_embedding, fp16=use_fp16_torch_dtype)
elif test_api == 'transformers_int4_npu_win':
result = transformers_int4_npu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, optimize_model, transpose_value_cache)
result = transformers_int4_npu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, optimize_model, transpose_value_cache, group_size)
elif test_api == 'transformers_int4_loadlowbit_npu_win':
result = run_transformer_int4_loadlowbit_npu_win(repo_id, local_model_hub, in_out_pairs, warm_up, num_trials, num_beams, low_bit, batch_size, optimize_model, transpose_value_cache)
elif test_api == 'transformers_openvino':
Expand All @@ -214,7 +214,8 @@ def run_model(repo_id, test_api, in_out_pairs, local_model_hub=None, warm_up=1,
round(result[in_out_pair][-1][5], 2),
result[in_out_pair][-1][6] if any(keyword in test_api for keyword in ['int4_gpu', 'int4_fp16_gpu_win', 'int4_loadlowbit_gpu', 'int4_fp16_loadlowbit_gpu', 'fp16_gpu', 'deepspeed_optimize_model_gpu']) and not lookahead else 'N/A',
streaming if 'win' in test_api else 'N/A',
use_fp16_torch_dtype if 'pipeline_parallel_gpu' in test_api else 'N/A'],
use_fp16_torch_dtype if 'pipeline_parallel_gpu' in test_api else 'N/A',
group_size],
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Should we make group_size value "N/A" for other test_api? Maybe confusing

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Good point, we will fix this in next PR . 😊

)


Expand Down Expand Up @@ -589,9 +590,9 @@ def run_transformer_int4_gpu(repo_id,
file.seek(0, os.SEEK_END)
global line_counter
if file.tell() == 0:
csv_writer.writerow(["", "model", "1st token avg latency (ms)", "2+ avg latency (ms/token)", "encoder time (ms)", "input/output tokens", "batch_size", "actual input/output tokens", "num_beams", "low_bit", "cpu_embedding", "model loading time (s)", "peak mem (GB)", "streaming", "use_fp16_torch_dtype"])
csv_writer.writerow(["", "model", "1st token avg latency (ms)", "2+ avg latency (ms/token)", "encoder time (ms)", "input/output tokens", "batch_size", "actual input/output tokens", "num_beams", "low_bit", "cpu_embedding", "model loading time (s)", "peak mem (GB)", "streaming", "use_fp16_torch_dtype", "npu_group_size"])
line_counter +=1
csv_writer.writerow([line_counter-1, repo_id, first_token_latency, rest_token_latency, encoder_time, input_output_tokens, batch_size, actual_input_output_tokens, num_beams, low_bit, '', load_time, peak_mem, streaming, use_fp16_torch_dtype])
csv_writer.writerow([line_counter-1, repo_id, first_token_latency, rest_token_latency, encoder_time, input_output_tokens, batch_size, actual_input_output_tokens, num_beams, low_bit, '', load_time, peak_mem, streaming, use_fp16_torch_dtype, group_size])
line_counter += 1

model.to('cpu')
Expand All @@ -611,7 +612,8 @@ def transformers_int4_npu_win(repo_id,
low_bit,
batch_size,
optimize_model,
transpose_value_cache):
transpose_value_cache,
npu_group_size):
from ipex_llm.transformers.npu_model import AutoModel, AutoModelForCausalLM
from transformers import AutoTokenizer, LlamaTokenizer

Expand All @@ -623,17 +625,20 @@ def transformers_int4_npu_win(repo_id,
st = time.perf_counter()
if repo_id in CHATGLM_IDS:
model = AutoModel.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True,
optimize_model=optimize_model, max_context_len=max_context_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=transpose_value_cache,
optimize_model=optimize_model, max_context_len=max_context_len, max_prompt_len=int(in_out_len[0]),
quantization_group_size=npu_group_size, transpose_value_cache=transpose_value_cache,
torch_dtype=torch.float16, attn_implementation="eager").eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
elif repo_id in LLAMA_IDS:
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, torch_dtype=torch.float16,
optimize_model=optimize_model, max_context_len=max_context_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=transpose_value_cache,
optimize_model=optimize_model, max_context_len=max_context_len, max_prompt_len=int(in_out_len[0]),
quantization_group_size=npu_group_size, transpose_value_cache=transpose_value_cache,
use_cache=True, attn_implementation="eager").eval()
tokenizer = LlamaTokenizer.from_pretrained(model_path, trust_remote_code=True)
else:
model = AutoModelForCausalLM.from_pretrained(model_path, load_in_low_bit=low_bit, trust_remote_code=True, torch_dtype=torch.float16,
optimize_model=optimize_model, max_context_len=max_context_len, max_prompt_len=int(in_out_len[0]), transpose_value_cache=transpose_value_cache,
optimize_model=optimize_model, max_context_len=max_context_len, max_prompt_len=int(in_out_len[0]),
quantization_group_size=npu_group_size, transpose_value_cache=transpose_value_cache,
use_cache=True, attn_implementation="eager").eval()
tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True)
end = time.perf_counter()
Expand Down Expand Up @@ -2191,8 +2196,8 @@ def run_pipeline_parallel_gpu(repo_id,
task = conf['task']
if 'optimize_model' in conf:
optimize_model = conf['optimize_model']
if 'group_size' in conf:
group_size = conf['group_size']
if 'npu_group_size' in conf:
group_size = conf['npu_group_size']
lookahead = False
transpose_value_cache = True
if 'transpose_value_cache' in conf:
Expand Down Expand Up @@ -2225,7 +2230,7 @@ def run_pipeline_parallel_gpu(repo_id,
conf['low_bit'], conf['cpu_embedding'], batch_size, streaming, use_fp16_torch_dtype, lookahead, task, optimize_model, transpose_value_cache, group_size)
df = pd.DataFrame(results, columns=['model', '1st token avg latency (ms)', '2+ avg latency (ms/token)', 'encoder time (ms)',
'input/output tokens', 'batch_size', 'actual input/output tokens', 'num_beams', 'low_bit', 'cpu_embedding',
'model loading time (s)', 'peak mem (GB)', 'streaming', 'use_fp16_torch_dtype'])
'model loading time (s)', 'peak mem (GB)', 'streaming', 'use_fp16_torch_dtype', 'npu_group_size'])
if "pipeline" in api or "deepspeed" in api:
if torch.distributed.get_rank() == 0:
df.index += max(line_counter - 1, 0)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -868,7 +868,7 @@ def forward(
seq_len <= self.max_prompt_len,
(
f"seq_len: {seq_len} should be less than or equal"
" to max_prompt_len {self.max_prompt_len}"
f" to max_prompt_len {self.max_prompt_len}"
),
)
pad_len = self.max_prompt_len - seq_len
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -907,7 +907,7 @@ def forward(
seq_len <= self.max_prompt_len,
(
f"seq_len: {seq_len} should be less than or equal"
" to max_prompt_len {self.max_prompt_len}"
f" to max_prompt_len {self.max_prompt_len}"
),
)
pad_len = self.max_prompt_len - seq_len
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -829,7 +829,7 @@ def forward(
seq_len <= self.max_prompt_len,
(
f"seq_len: {seq_len} should be less than or equal"
" to max_prompt_len {self.max_prompt_len}"
f" to max_prompt_len {self.max_prompt_len}"
),
)
pad_len = self.max_prompt_len - seq_len
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -971,7 +971,7 @@ def forward(
seq_len <= self.max_prompt_len,
(
f"seq_len: {seq_len} should be less than or equal"
" to max_prompt_len {self.max_prompt_len}"
f" to max_prompt_len {self.max_prompt_len}"
),
)
pad_len = self.max_prompt_len - seq_len
Expand Down
Loading