-
Notifications
You must be signed in to change notification settings - Fork 305
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* readme中增加了MiniCPM的支持 * 适配了minicpm3,并且测试可以运行 * 规范了格式,删除了长的行 * delete the error file * fix lint * fix the file name error --------- Co-authored-by: liudan <liudan@MacBook-Pro.local>
- Loading branch information
1 parent
4a1b201
commit 697bc77
Showing
5 changed files
with
873 additions
and
0 deletions.
There are no files selected for viewing
216 changes: 216 additions & 0 deletions
216
xtuner/configs/custom_dataset/pretrain/minicpm/minicpm3_4b_full_custom_pretrain_e1.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,216 @@ | ||
# Copyright (c) OpenMMLab. All rights reserved. | ||
"""Data format: | ||
[ | ||
{ | ||
"text": "xxx" | ||
}, | ||
{ | ||
"text": "xxx" | ||
}, | ||
... | ||
] | ||
""" # noqa: E501 | ||
|
||
from datasets import load_dataset | ||
from mmengine.dataset import DefaultSampler | ||
from mmengine.hooks import ( | ||
CheckpointHook, | ||
DistSamplerSeedHook, | ||
IterTimerHook, | ||
LoggerHook, | ||
ParamSchedulerHook, | ||
) | ||
from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR | ||
from torch.optim import AdamW | ||
from transformers import AutoModelForCausalLM, AutoTokenizer | ||
|
||
from xtuner.dataset import process_hf_dataset | ||
from xtuner.dataset.collate_fns import default_collate_fn | ||
from xtuner.dataset.map_fns import pretrain_map_fn | ||
from xtuner.engine.hooks import ( | ||
DatasetInfoHook, | ||
EvaluateChatHook, | ||
VarlenAttnArgsToMessageHubHook, | ||
) | ||
from xtuner.engine.runner import TrainLoop | ||
from xtuner.model import SupervisedFinetune | ||
|
||
####################################################################### | ||
# PART 1 Settings # | ||
####################################################################### | ||
# Model | ||
pretrained_model_name_or_path = "openbmb/MiniCPM3-4B" | ||
use_varlen_attn = False | ||
|
||
# Data | ||
data_files = ["/path/to/your.json"] | ||
max_length = 1024 | ||
pack_to_max_length = True | ||
|
||
# Scheduler & Optimizer | ||
batch_size = 1 # per_device | ||
accumulative_counts = 1 # bs = 1 GPU * 1 batch_size_per_device * 16 acc | ||
dataloader_num_workers = 0 | ||
max_steps = 10000 | ||
optim_type = AdamW | ||
lr = 2e-5 | ||
betas = (0.9, 0.999) | ||
weight_decay = 0 | ||
max_norm = 1 # grad clip | ||
warmup_ratio = 0.03 | ||
|
||
# Save | ||
save_steps = 500 | ||
save_total_limit = 2 # Maximum checkpoints to keep (-1 means unlimited) | ||
|
||
# Evaluate the generation performance during the training | ||
evaluation_freq = 500 | ||
SYSTEM = "" | ||
evaluation_inputs = ["上海是", "Shanghai is"] | ||
|
||
####################################################################### | ||
# PART 2 Model & Tokenizer # | ||
####################################################################### | ||
tokenizer = dict( | ||
type=AutoTokenizer.from_pretrained, | ||
pretrained_model_name_or_path=pretrained_model_name_or_path, | ||
trust_remote_code=True, | ||
padding_side="right", | ||
eos_token="<|im_end|>", | ||
) | ||
|
||
model = dict( | ||
type=SupervisedFinetune, | ||
use_varlen_attn=use_varlen_attn, | ||
llm=dict( | ||
type=AutoModelForCausalLM.from_pretrained, | ||
pretrained_model_name_or_path=pretrained_model_name_or_path, | ||
trust_remote_code=True, | ||
), | ||
) | ||
|
||
####################################################################### | ||
# PART 3 Dataset & Dataloader # | ||
####################################################################### | ||
train_dataset = dict( | ||
type=process_hf_dataset, | ||
dataset=dict(type=load_dataset, path="json", data_files=data_files), | ||
tokenizer=tokenizer, | ||
max_length=max_length, | ||
dataset_map_fn=pretrain_map_fn, | ||
template_map_fn=None, | ||
remove_unused_columns=True, | ||
shuffle_before_pack=False, | ||
pack_to_max_length=pack_to_max_length, | ||
use_varlen_attn=use_varlen_attn, | ||
) | ||
|
||
train_dataloader = dict( | ||
batch_size=batch_size, | ||
num_workers=dataloader_num_workers, | ||
dataset=train_dataset, | ||
sampler=dict(type=DefaultSampler, shuffle=True), | ||
collate_fn=dict(type=default_collate_fn, use_varlen_attn=use_varlen_attn), | ||
) | ||
|
||
####################################################################### | ||
# PART 4 Scheduler & Optimizer # | ||
####################################################################### | ||
# optimizer | ||
optim_wrapper = dict( | ||
type=AmpOptimWrapper, | ||
optimizer=dict(type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), | ||
clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), | ||
accumulative_counts=accumulative_counts, | ||
loss_scale="dynamic", | ||
dtype="float16", | ||
) | ||
|
||
# learning policy | ||
param_scheduler = [ | ||
dict( | ||
type=LinearLR, | ||
start_factor=1e-5, | ||
by_epoch=True, | ||
begin=0, | ||
end=max_steps * warmup_ratio, | ||
convert_to_iter_based=True, | ||
), | ||
dict( | ||
type=CosineAnnealingLR, | ||
eta_min=0.0, | ||
by_epoch=True, | ||
begin=max_steps * warmup_ratio, | ||
end=max_steps, | ||
convert_to_iter_based=True, | ||
), | ||
] | ||
|
||
# train, val, test setting | ||
train_cfg = dict(type=TrainLoop, max_iters=max_steps) | ||
|
||
####################################################################### | ||
# PART 5 Runtime # | ||
####################################################################### | ||
# Log the dialogue periodically during the training process, optional | ||
custom_hooks = [ | ||
dict(type=DatasetInfoHook, tokenizer=tokenizer), | ||
dict( | ||
type=EvaluateChatHook, | ||
tokenizer=tokenizer, | ||
every_n_iters=evaluation_freq, | ||
evaluation_inputs=evaluation_inputs, | ||
system=SYSTEM, | ||
), | ||
] | ||
|
||
if use_varlen_attn: | ||
custom_hooks += [dict(type=VarlenAttnArgsToMessageHubHook)] | ||
|
||
# configure default hooks | ||
default_hooks = dict( | ||
# record the time of every iteration. | ||
timer=dict(type=IterTimerHook), | ||
# print log every 10 iterations. | ||
logger=dict(type=LoggerHook, log_metric_by_epoch=False, interval=10), | ||
# enable the parameter scheduler. | ||
param_scheduler=dict(type=ParamSchedulerHook), | ||
# save checkpoint per `save_steps`. | ||
checkpoint=dict( | ||
type=CheckpointHook, | ||
by_epoch=False, | ||
interval=save_steps, | ||
max_keep_ckpts=save_total_limit, | ||
), | ||
# set sampler seed in distributed evrionment. | ||
sampler_seed=dict(type=DistSamplerSeedHook), | ||
) | ||
|
||
# configure environment | ||
env_cfg = dict( | ||
# whether to enable cudnn benchmark | ||
cudnn_benchmark=False, | ||
# set multi process parameters | ||
mp_cfg=dict(mp_start_method="fork", opencv_num_threads=0), | ||
# set distributed parameters | ||
dist_cfg=dict(backend="nccl"), | ||
) | ||
|
||
# set visualizer | ||
visualizer = None | ||
|
||
# set log level | ||
log_level = "INFO" | ||
|
||
# load from which checkpoint | ||
load_from = None | ||
|
||
# whether to resume training from the loaded checkpoint | ||
resume = False | ||
|
||
# Defaults to use random seed and disable `deterministic` | ||
randomness = dict(seed=None, deterministic=False) | ||
|
||
# set log processor | ||
log_processor = dict(by_epoch=False) |
Oops, something went wrong.