-
Notifications
You must be signed in to change notification settings - Fork 305
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[Feature] Add mistral pretrain (#204)
* [Feature] Add mistral pretrain * [feat] rename pretrain_map_fn * [feat] add custom hook * [feat] change mistral config name * Update chat.py * Update xtuner/utils/templates.py Co-authored-by: Zhihao Lin <36994684+LZHgrla@users.noreply.github.com> * Update xtuner/configs/mistral/mistral_7b_qlora_skypile_pretrain_e1.py Co-authored-by: Zhihao Lin <36994684+LZHgrla@users.noreply.github.com> * Update xtuner/configs/mistral/mistral_7b_qlora_skypile_pretrain_e1.py Co-authored-by: Zhihao Lin <36994684+LZHgrla@users.noreply.github.com> * Update xtuner/configs/mistral/mistral_7b_qlora_skypile_pretrain_e1.py Co-authored-by: Zhihao Lin <36994684+LZHgrla@users.noreply.github.com> * fix pre-commit --------- Co-authored-by: Zhihao Lin <36994684+LZHgrla@users.noreply.github.com> Co-authored-by: LZHgrla <linzhihao@pjlab.org.cn>
- Loading branch information
1 parent
0badead
commit 8ce2569
Showing
4 changed files
with
227 additions
and
35 deletions.
There are no files selected for viewing
173 changes: 173 additions & 0 deletions
173
xtuner/configs/mistral/mistral_7b_qlora_skypile_pretrain_e1.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,173 @@ | ||
# Copyright (c) OpenMMLab. All rights reserved. | ||
import torch | ||
from bitsandbytes.optim import PagedAdamW32bit | ||
from datasets import load_dataset | ||
from mmengine.dataset import DefaultSampler | ||
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, | ||
LoggerHook, ParamSchedulerHook) | ||
from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR | ||
from peft import LoraConfig | ||
from transformers import BitsAndBytesConfig, LlamaTokenizer, MistralForCausalLM | ||
|
||
from xtuner.dataset import process_hf_dataset | ||
from xtuner.dataset.collate_fns import default_collate_fn | ||
from xtuner.dataset.map_fns import pretrain_map_fn | ||
from xtuner.engine import DatasetInfoHook, EvaluateChatHook | ||
from xtuner.model import SupervisedFinetune | ||
|
||
####################################################################### | ||
# PART 1 Settings # | ||
####################################################################### | ||
# Model | ||
pretrained_model_name_or_path = 'mistralai/Mistral-7B-v0.1' | ||
|
||
# Data | ||
data_path = 'Skywork/SkyPile-150B' | ||
max_length = 2048 | ||
pack_to_max_length = True | ||
|
||
# Scheduler & Optimizer | ||
batch_size = 1 # per_device | ||
accumulative_counts = 16 | ||
dataloader_num_workers = 0 | ||
max_epochs = 1 | ||
optim_type = PagedAdamW32bit | ||
lr = 2e-4 | ||
betas = (0.9, 0.999) | ||
weight_decay = 0 | ||
max_norm = 1 # grad clip | ||
|
||
# Evaluate the generation performance during the training | ||
evaluation_freq = 500 | ||
evaluation_inputs = ['上海的景点有'] | ||
|
||
####################################################################### | ||
# PART 2 Model & Tokenizer # | ||
####################################################################### | ||
tokenizer = dict( | ||
type=LlamaTokenizer.from_pretrained, | ||
pretrained_model_name_or_path=pretrained_model_name_or_path, | ||
trust_remote_code=True, | ||
padding_side='right') | ||
|
||
model = dict( | ||
type=SupervisedFinetune, | ||
llm=dict( | ||
type=MistralForCausalLM.from_pretrained, | ||
pretrained_model_name_or_path=pretrained_model_name_or_path, | ||
trust_remote_code=True, | ||
torch_dtype=torch.float16, | ||
quantization_config=dict( | ||
type=BitsAndBytesConfig, | ||
load_in_4bit=True, | ||
load_in_8bit=False, | ||
llm_int8_threshold=6.0, | ||
llm_int8_has_fp16_weight=False, | ||
bnb_4bit_compute_dtype=torch.float16, | ||
bnb_4bit_use_double_quant=True, | ||
bnb_4bit_quant_type='nf4')), | ||
lora=dict( | ||
type=LoraConfig, | ||
r=64, | ||
lora_alpha=16, | ||
lora_dropout=0.05, | ||
bias='none', | ||
task_type='CAUSAL_LM')) | ||
|
||
####################################################################### | ||
# PART 3 Dataset & Dataloader # | ||
####################################################################### | ||
train_dataset = dict( | ||
type=process_hf_dataset, | ||
dataset=dict(type=load_dataset, path=data_path), | ||
tokenizer=tokenizer, | ||
max_length=max_length, | ||
dataset_map_fn=pretrain_map_fn, | ||
template_map_fn=None, | ||
remove_unused_columns=True, | ||
shuffle_before_pack=True, | ||
pack_to_max_length=pack_to_max_length) | ||
|
||
train_dataloader = dict( | ||
batch_size=batch_size, | ||
num_workers=dataloader_num_workers, | ||
dataset=train_dataset, | ||
sampler=dict(type=DefaultSampler, shuffle=True), | ||
collate_fn=dict(type=default_collate_fn)) | ||
|
||
####################################################################### | ||
# PART 4 Scheduler & Optimizer # | ||
####################################################################### | ||
# optimizer | ||
optim_wrapper = dict( | ||
type=AmpOptimWrapper, | ||
optimizer=dict( | ||
type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), | ||
clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), | ||
accumulative_counts=accumulative_counts, | ||
loss_scale='dynamic', | ||
dtype='float16') | ||
|
||
# learning policy | ||
# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501 | ||
param_scheduler = dict( | ||
type=CosineAnnealingLR, | ||
eta_min=lr * 0.1, | ||
by_epoch=True, | ||
T_max=max_epochs, | ||
convert_to_iter_based=True) | ||
|
||
# train, val, test setting | ||
train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1) | ||
|
||
####################################################################### | ||
# PART 5 Runtime # | ||
####################################################################### | ||
custom_hooks = [ | ||
dict(type=DatasetInfoHook, tokenizer=tokenizer), | ||
dict( | ||
type=EvaluateChatHook, | ||
tokenizer=tokenizer, | ||
every_n_iters=evaluation_freq, | ||
evaluation_inputs=evaluation_inputs, | ||
max_new_tokens=100) | ||
] | ||
|
||
# configure default hooks | ||
default_hooks = dict( | ||
# record the time of every iteration. | ||
timer=dict(type=IterTimerHook), | ||
# print log every 100 iterations. | ||
logger=dict(type=LoggerHook, interval=10), | ||
# enable the parameter scheduler. | ||
param_scheduler=dict(type=ParamSchedulerHook), | ||
# save checkpoint per epoch. | ||
checkpoint=dict(type=CheckpointHook, interval=1), | ||
# set sampler seed in distributed evrionment. | ||
sampler_seed=dict(type=DistSamplerSeedHook), | ||
) | ||
|
||
# configure environment | ||
env_cfg = dict( | ||
# whether to enable cudnn benchmark | ||
cudnn_benchmark=False, | ||
# set multi process parameters | ||
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), | ||
# set distributed parameters | ||
dist_cfg=dict(backend='nccl'), | ||
) | ||
|
||
# set visualizer | ||
visualizer = None | ||
|
||
# set log level | ||
log_level = 'INFO' | ||
|
||
# load from which checkpoint | ||
load_from = None | ||
|
||
# whether to resume training from the loaded checkpoint | ||
resume = False | ||
|
||
# Defaults to use random seed and disable `deterministic` | ||
randomness = dict(seed=None, deterministic=False) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,14 @@ | ||
# Copyright (c) OpenMMLab. All rights reserved. | ||
def pretrain_map_fn(example): | ||
r"""Example before preprocessing: | ||
example['text'] = 'xxx' | ||
Example after preprocessing: | ||
example['conversation'] = [ | ||
{ | ||
'input': '', | ||
'output': 'xxx' | ||
}, | ||
] | ||
""" | ||
return {'conversation': [{'input': '', 'output': example['text'].strip()}]} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters