-
Notifications
You must be signed in to change notification settings - Fork 305
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
b6457fc
commit 58f7ad4
Showing
11 changed files
with
720 additions
and
258 deletions.
There are no files selected for viewing
170 changes: 170 additions & 0 deletions
170
xtuner/configs/internvl/v1_5/internvl_v1_5_internlm2_1_8b_finetune.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,170 @@ | ||
# Copyright (c) OpenMMLab. All rights reserved. | ||
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, | ||
LoggerHook, ParamSchedulerHook) | ||
from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR | ||
from torch.optim import AdamW | ||
|
||
from xtuner.dataset import InternVL_V1_5_Dataset | ||
from xtuner.dataset.collate_fns import default_collate_fn | ||
from xtuner.dataset.samplers import LengthGroupedSampler | ||
from xtuner.engine.hooks import DatasetInfoHook | ||
from xtuner.engine.runner import TrainLoop | ||
from xtuner.model import InternVL_V1_5 | ||
from xtuner.utils import PROMPT_TEMPLATE | ||
from transformers import AutoTokenizer | ||
####################################################################### | ||
# PART 1 Settings # | ||
####################################################################### | ||
# Model | ||
path = "/mnt/hwfile/xtuner/huanghaian/model/Mini-InternVL-Chat-2B-V1-5" | ||
prompt_template = PROMPT_TEMPLATE.internlm2_chat | ||
|
||
# Data | ||
data_root = '/mnt/hwfile/xtuner/linzhihao/dataset/llava_data/' | ||
data_path = data_root + 'LLaVA-Instruct-150K/llava_v1_5_mix665k.json' | ||
image_folder = data_root + 'llava_images' | ||
max_length = 8192 | ||
|
||
# Scheduler & Optimizer | ||
batch_size = 4 # per_device | ||
accumulative_counts = 4 | ||
dataloader_num_workers = 4 | ||
max_epochs = 1 | ||
optim_type = AdamW | ||
# 1024 -> 4e-5 | ||
# 128 -> 5e-6 | ||
lr = 1e-6 | ||
betas = (0.9, 0.999) | ||
weight_decay = 0.05 | ||
max_norm = 1 # grad clip | ||
warmup_ratio = 0.03 | ||
|
||
# Save | ||
save_steps = 1000 | ||
save_total_limit = 1 # Maximum checkpoints to keep (-1 means unlimited) | ||
|
||
####################################################################### | ||
# PART 2 Model & Tokenizer & Image Processor # | ||
####################################################################### | ||
model = dict( | ||
type=InternVL_V1_5, | ||
model_path=path, | ||
freeze_llm=False, | ||
freeze_visual_encoder=True # or False | ||
) | ||
|
||
####################################################################### | ||
# PART 3 Dataset & Dataloader # | ||
####################################################################### | ||
llava_dataset = dict( | ||
type=InternVL_V1_5_Dataset, | ||
model_path=path, | ||
data_path=data_path, | ||
image_folder=image_folder, | ||
template=prompt_template, | ||
max_length=max_length) | ||
|
||
train_dataloader = dict( | ||
batch_size=batch_size, | ||
num_workers=dataloader_num_workers, | ||
dataset=llava_dataset, | ||
sampler=dict( | ||
type=LengthGroupedSampler, | ||
length_property='modality_length', | ||
per_device_batch_size=batch_size * accumulative_counts), | ||
collate_fn=dict(type=default_collate_fn)) | ||
|
||
####################################################################### | ||
# PART 4 Scheduler & Optimizer # | ||
####################################################################### | ||
# optimizer | ||
optim_wrapper = dict( | ||
type=AmpOptimWrapper, | ||
optimizer=dict( | ||
type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), | ||
clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), | ||
accumulative_counts=accumulative_counts, | ||
loss_scale='dynamic', | ||
dtype='float16') | ||
|
||
# learning policy | ||
# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501 | ||
param_scheduler = [ | ||
dict( | ||
type=LinearLR, | ||
start_factor=1e-5, | ||
by_epoch=True, | ||
begin=0, | ||
end=warmup_ratio * max_epochs, | ||
convert_to_iter_based=True), | ||
dict( | ||
type=CosineAnnealingLR, | ||
eta_min=0.0, | ||
by_epoch=True, | ||
begin=warmup_ratio * max_epochs, | ||
end=max_epochs, | ||
convert_to_iter_based=True) | ||
] | ||
|
||
# train, val, test setting | ||
train_cfg = dict(type=TrainLoop, max_epochs=max_epochs) | ||
|
||
####################################################################### | ||
# PART 5 Runtime # | ||
####################################################################### | ||
# Log the dialogue periodically during the training process, optional | ||
tokenizer = dict( | ||
type=AutoTokenizer.from_pretrained, | ||
pretrained_model_name_or_path=path, | ||
trust_remote_code=True) | ||
|
||
custom_hooks = [ | ||
dict(type=DatasetInfoHook, tokenizer=tokenizer), | ||
] | ||
|
||
# configure default hooks | ||
default_hooks = dict( | ||
# record the time of every iteration. | ||
timer=dict(type=IterTimerHook), | ||
# print log every 10 iterations. | ||
logger=dict(type=LoggerHook, log_metric_by_epoch=False, interval=10), | ||
# enable the parameter scheduler. | ||
param_scheduler=dict(type=ParamSchedulerHook), | ||
# save checkpoint per `save_steps`. | ||
checkpoint=dict( | ||
type=CheckpointHook, | ||
save_optimizer=False, | ||
by_epoch=False, | ||
interval=save_steps, | ||
max_keep_ckpts=save_total_limit), | ||
# set sampler seed in distributed evrionment. | ||
sampler_seed=dict(type=DistSamplerSeedHook), | ||
) | ||
|
||
# configure environment | ||
env_cfg = dict( | ||
# whether to enable cudnn benchmark | ||
cudnn_benchmark=False, | ||
# set multi process parameters | ||
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), | ||
# set distributed parameters | ||
dist_cfg=dict(backend='nccl'), | ||
) | ||
|
||
# set visualizer | ||
visualizer = None | ||
|
||
# set log level | ||
log_level = 'INFO' | ||
|
||
# load from which checkpoint | ||
load_from = None | ||
|
||
# whether to resume training from the loaded checkpoint | ||
resume = False | ||
|
||
# Defaults to use random seed and disable `deterministic` | ||
randomness = dict(seed=None, deterministic=False) | ||
|
||
# set log processor | ||
log_processor = dict(by_epoch=False) |
183 changes: 183 additions & 0 deletions
183
xtuner/configs/internvl/v1_5/internvl_v1_5_internlm2_1_8b_lora_finetune.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,183 @@ | ||
# Copyright (c) OpenMMLab. All rights reserved. | ||
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, | ||
LoggerHook, ParamSchedulerHook) | ||
from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR | ||
from torch.optim import AdamW | ||
|
||
from xtuner.dataset import InternVL_V1_5_Dataset | ||
from xtuner.dataset.collate_fns import default_collate_fn | ||
from xtuner.dataset.samplers import LengthGroupedSampler | ||
from xtuner.engine.hooks import DatasetInfoHook | ||
from xtuner.engine.runner import TrainLoop | ||
from xtuner.model import InternVL_V1_5 | ||
from xtuner.utils import PROMPT_TEMPLATE | ||
from transformers import AutoTokenizer | ||
from peft import LoraConfig | ||
####################################################################### | ||
# PART 1 Settings # | ||
####################################################################### | ||
# Model | ||
path = "/mnt/hwfile/xtuner/huanghaian/model/Mini-InternVL-Chat-2B-V1-5" | ||
prompt_template = PROMPT_TEMPLATE.internlm2_chat | ||
|
||
# Data | ||
data_root = '/mnt/hwfile/xtuner/linzhihao/dataset/llava_data/' | ||
data_path = data_root + 'LLaVA-Instruct-150K/llava_v1_5_mix665k.json' | ||
image_folder = data_root + 'llava_images' | ||
max_length = 8192 | ||
|
||
# Scheduler & Optimizer | ||
batch_size = 8 # per_device | ||
accumulative_counts = 2 | ||
dataloader_num_workers = 4 | ||
max_epochs = 1 | ||
optim_type = AdamW | ||
# 1024 -> 4e-5 | ||
# 128 -> 5e-6 | ||
lr = 1e-6 | ||
betas = (0.9, 0.999) | ||
weight_decay = 0.05 | ||
max_norm = 1 # grad clip | ||
warmup_ratio = 0.03 | ||
|
||
# Save | ||
save_steps = 1000 | ||
save_total_limit = 1 # Maximum checkpoints to keep (-1 means unlimited) | ||
|
||
####################################################################### | ||
# PART 2 Model & Tokenizer & Image Processor # | ||
####################################################################### | ||
model = dict( | ||
type=InternVL_V1_5, | ||
model_path=path, | ||
freeze_llm=True, | ||
freeze_visual_encoder=True, | ||
# comment the following lines if you don't want to use Lora in llm | ||
llm_lora=dict( | ||
type=LoraConfig, | ||
r=128, | ||
lora_alpha=256, | ||
lora_dropout=0.05, | ||
target_modules=None, | ||
task_type='CAUSAL_LM'), | ||
# uncomment the following lines if you don't want to use Lora in visual encoder | ||
# visual_encoder_lora=dict( | ||
# type=LoraConfig, r=64, lora_alpha=16, lora_dropout=0.05, | ||
# target_modules=['attn.qkv', 'attn.proj', 'mlp.fc1', 'mlp.fc2']) | ||
) | ||
|
||
####################################################################### | ||
# PART 3 Dataset & Dataloader # | ||
####################################################################### | ||
llava_dataset = dict( | ||
type=InternVL_V1_5_Dataset, | ||
model_path=path, | ||
data_path=data_path, | ||
image_folder=image_folder, | ||
template=prompt_template, | ||
max_length=max_length) | ||
|
||
train_dataloader = dict( | ||
batch_size=batch_size, | ||
num_workers=dataloader_num_workers, | ||
dataset=llava_dataset, | ||
sampler=dict( | ||
type=LengthGroupedSampler, | ||
length_property='modality_length', | ||
per_device_batch_size=batch_size * accumulative_counts), | ||
collate_fn=dict(type=default_collate_fn)) | ||
|
||
####################################################################### | ||
# PART 4 Scheduler & Optimizer # | ||
####################################################################### | ||
# optimizer | ||
optim_wrapper = dict( | ||
type=AmpOptimWrapper, | ||
optimizer=dict( | ||
type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), | ||
clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), | ||
accumulative_counts=accumulative_counts, | ||
loss_scale='dynamic', | ||
dtype='float16') | ||
|
||
# learning policy | ||
# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501 | ||
param_scheduler = [ | ||
dict( | ||
type=LinearLR, | ||
start_factor=1e-5, | ||
by_epoch=True, | ||
begin=0, | ||
end=warmup_ratio * max_epochs, | ||
convert_to_iter_based=True), | ||
dict( | ||
type=CosineAnnealingLR, | ||
eta_min=0.0, | ||
by_epoch=True, | ||
begin=warmup_ratio * max_epochs, | ||
end=max_epochs, | ||
convert_to_iter_based=True) | ||
] | ||
|
||
# train, val, test setting | ||
train_cfg = dict(type=TrainLoop, max_epochs=max_epochs) | ||
|
||
####################################################################### | ||
# PART 5 Runtime # | ||
####################################################################### | ||
# Log the dialogue periodically during the training process, optional | ||
tokenizer = dict( | ||
type=AutoTokenizer.from_pretrained, | ||
pretrained_model_name_or_path=path, | ||
trust_remote_code=True) | ||
|
||
custom_hooks = [ | ||
dict(type=DatasetInfoHook, tokenizer=tokenizer), | ||
] | ||
|
||
# configure default hooks | ||
default_hooks = dict( | ||
# record the time of every iteration. | ||
timer=dict(type=IterTimerHook), | ||
# print log every 10 iterations. | ||
logger=dict(type=LoggerHook, log_metric_by_epoch=False, interval=10), | ||
# enable the parameter scheduler. | ||
param_scheduler=dict(type=ParamSchedulerHook), | ||
# save checkpoint per `save_steps`. | ||
checkpoint=dict( | ||
type=CheckpointHook, | ||
save_optimizer=False, | ||
by_epoch=False, | ||
interval=save_steps, | ||
max_keep_ckpts=save_total_limit), | ||
# set sampler seed in distributed evrionment. | ||
sampler_seed=dict(type=DistSamplerSeedHook), | ||
) | ||
|
||
# configure environment | ||
env_cfg = dict( | ||
# whether to enable cudnn benchmark | ||
cudnn_benchmark=False, | ||
# set multi process parameters | ||
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), | ||
# set distributed parameters | ||
dist_cfg=dict(backend='nccl'), | ||
) | ||
|
||
# set visualizer | ||
visualizer = None | ||
|
||
# set log level | ||
log_level = 'INFO' | ||
|
||
# load from which checkpoint | ||
load_from = None | ||
|
||
# whether to resume training from the loaded checkpoint | ||
resume = False | ||
|
||
# Defaults to use random seed and disable `deterministic` | ||
randomness = dict(seed=None, deterministic=False) | ||
|
||
# set log processor | ||
log_processor = dict(by_epoch=False) |
Oops, something went wrong.