From 1d0259b921395b8e541def087b476dee10f5399f Mon Sep 17 00:00:00 2001
From: Zhihao Lin <36994684+LZHgrla@users.noreply.github.com>
Date: Wed, 6 Sep 2023 17:41:12 +0800
Subject: [PATCH] [Feature] Support Baichuan2 models (#102)
* add baichuan2_7b_base
* fix lm_head bug for Baichuan2
* Update README.md
* Update README_zh-CN.md
* Update README.md
* remove infrequent configs
* Update README.md
* Update README_zh-CN.md
* add baichuan2 chat template
* Update README_zh-CN.md
---
README.md | 24 +-
README_zh-CN.md | 24 +-
.../baichuan2_7b_base_qlora_alpaca_e3.py | 180 +++++++++++++++
.../baichuan2_7b_base_qlora_alpaca_enzh_e3.py | 198 ++++++++++++++++
...an2_7b_base_qlora_alpaca_enzh_oasst1_e3.py | 211 +++++++++++++++++
.../baichuan2_7b_base_qlora_alpaca_zh_e3.py | 180 +++++++++++++++
...ichuan2_7b_base_qlora_arxiv_gentitle_e3.py | 215 ++++++++++++++++++
.../baichuan2_7b_base_qlora_code_alpaca_e3.py | 184 +++++++++++++++
.../baichuan2_7b_base_qlora_colorist_e5.py | 180 +++++++++++++++
.../baichuan2_7b_base_qlora_lawyer_e3.py | 206 +++++++++++++++++
.../baichuan2_7b_base_qlora_oasst1_512_e3.py | 180 +++++++++++++++
.../baichuan2_7b_base_qlora_oasst1_e3.py | 180 +++++++++++++++
...aichuan2_7b_base_qlora_open_platypus_e3.py | 180 +++++++++++++++
.../baichuan2_7b_base_qlora_sql_e3.py | 184 +++++++++++++++
xtuner/model/sft.py | 7 +
xtuner/utils/templates.py | 3 +
16 files changed, 2314 insertions(+), 22 deletions(-)
create mode 100644 xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_alpaca_e3.py
create mode 100644 xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_alpaca_enzh_e3.py
create mode 100644 xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_alpaca_enzh_oasst1_e3.py
create mode 100644 xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_alpaca_zh_e3.py
create mode 100644 xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_arxiv_gentitle_e3.py
create mode 100644 xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_code_alpaca_e3.py
create mode 100644 xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_colorist_e5.py
create mode 100644 xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_lawyer_e3.py
create mode 100644 xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_oasst1_512_e3.py
create mode 100644 xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_oasst1_e3.py
create mode 100644 xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_open_platypus_e3.py
create mode 100644 xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_sql_e3.py
diff --git a/README.md b/README.md
index d28730113..c08a1eb3c 100644
--- a/README.md
+++ b/README.md
@@ -14,6 +14,7 @@ English | [简体中文](README_zh-CN.md)
## 🎉 News
+- **\[2023.09.06\]** Support the training of [Baichuan2](https://huggingface.co/baichuan-inc) models! Try it out by `xtuner train baichuan2_7b_base_qlora_oasst1_e3`!
- **\[2023.08.30\]** XTuner is released, with multiple fine-tuned adapters on [HuggingFace](https://huggingface.co/xtuner).
## 📖 Introduction
@@ -21,7 +22,7 @@ English | [简体中文](README_zh-CN.md)
XTuner is a toolkit for efficiently fine-tuning LLM, developed by the [MMRazor](https://github.com/open-mmlab/mmrazor) and [MMDeploy](https://github.com/open-mmlab/mmdeploy) teams.
- **Efficiency**: Support LLM fine-tuning on consumer-grade GPUs. The minimum GPU memory required for 7B LLM fine-tuning is only **8GB**, indicating that users can use nearly any GPU (even the free resource, *e.g.*, Colab) to fine-tune custom LLMs.
-- **Versatile**: Support various **LLMs** ([InternLM](https://github.com/InternLM/InternLM), [Llama2](https://github.com/facebookresearch/llama), [ChatGLM2](https://huggingface.co/THUDM/chatglm2-6b), [Qwen](https://github.com/QwenLM/Qwen-7B), [Baichuan](https://github.com/baichuan-inc), ...), **datasets** ([MOSS_003_SFT](https://huggingface.co/datasets/fnlp/moss-003-sft-data), [Alpaca](https://huggingface.co/datasets/tatsu-lab/alpaca), [WizardLM](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k), [oasst1](https://huggingface.co/datasets/timdettmers/openassistant-guanaco), [Open-Platypus](https://huggingface.co/datasets/garage-bAInd/Open-Platypus), [Code Alpaca](https://huggingface.co/datasets/HuggingFaceH4/CodeAlpaca_20K), [Colorist](https://huggingface.co/datasets/burkelibbey/colors), ...) and **algorithms** ([QLoRA](http://arxiv.org/abs/2305.14314), [LoRA](http://arxiv.org/abs/2106.09685)), allowing users to choose the most suitable solution for their requirements.
+- **Versatile**: Support various **LLMs** ([InternLM](https://huggingface.co/internlm), [Llama2](https://huggingface.co/meta-llama), [ChatGLM2](https://huggingface.co/THUDM/chatglm2-6b), [Qwen](https://huggingface.co/Qwen), [Baichuan2](https://huggingface.co/baichuan-inc), ...), **datasets** ([MOSS_003_SFT](https://huggingface.co/datasets/fnlp/moss-003-sft-data), [Alpaca](https://huggingface.co/datasets/tatsu-lab/alpaca), [WizardLM](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k), [oasst1](https://huggingface.co/datasets/timdettmers/openassistant-guanaco), [Open-Platypus](https://huggingface.co/datasets/garage-bAInd/Open-Platypus), [Code Alpaca](https://huggingface.co/datasets/HuggingFaceH4/CodeAlpaca_20K), [Colorist](https://huggingface.co/datasets/burkelibbey/colors), ...) and **algorithms** ([QLoRA](http://arxiv.org/abs/2305.14314), [LoRA](http://arxiv.org/abs/2106.09685)), allowing users to choose the most suitable solution for their requirements.
- **Compatibility**: Compatible with [DeepSpeed](https://github.com/microsoft/DeepSpeed) 🚀 and [HuggingFace](https://huggingface.co) 🤗 training pipeline, enabling effortless integration and utilization.
## 🌟 Demos
@@ -70,17 +71,18 @@ XTuner is a toolkit for efficiently fine-tuning LLM, developed by the [MMRazor](
|
diff --git a/README_zh-CN.md b/README_zh-CN.md
index 0ca037b7a..2bba9ad54 100644
--- a/README_zh-CN.md
+++ b/README_zh-CN.md
@@ -14,6 +14,7 @@
## 🎉 更新
+- **\[2023.09.06\]** 支持 [Baichuan2](https://huggingface.co/baichuan-inc) 系列模型训练!快速体验:`xtuner train baichuan2_7b_base_qlora_oasst1_e3`!
- **\[2023.08.30\]** XTuner 正式发布!众多微调模型已上传至 [HuggingFace](https://huggingface.co/xtuner)!
## 📖 介绍
@@ -21,7 +22,7 @@
XTuner 是一个轻量级微调大语言模型的工具库,由 [MMRazor](https://github.com/open-mmlab/mmrazor) 和 [MMDeploy](https://github.com/open-mmlab/mmdeploy) 团队联合开发。
- **轻量级**: 支持在消费级显卡上微调大语言模型。对于 7B 参数量,微调所需的最小显存仅为 **8GB**,这使得用户可以使用几乎任何显卡(甚至免费资源,例如Colab)来微调获得自定义大语言模型助手。
-- **多样性**: 支持多种**大语言模型**([InternLM](https://github.com/InternLM/InternLM)、[Llama2](https://github.com/facebookresearch/llama)、[ChatGLM2](https://huggingface.co/THUDM/chatglm2-6b)、[Qwen](https://github.com/QwenLM/Qwen-7B)、[Baichuan](https://github.com/baichuan-inc), ...),**数据集**([MOSS_003_SFT](https://huggingface.co/datasets/fnlp/moss-003-sft-data), [Alpaca](https://huggingface.co/datasets/tatsu-lab/alpaca), [WizardLM](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k), [oasst1](https://huggingface.co/datasets/timdettmers/openassistant-guanaco), [Open-Platypus](https://huggingface.co/datasets/garage-bAInd/Open-Platypus), [Code Alpaca](https://huggingface.co/datasets/HuggingFaceH4/CodeAlpaca_20K), [Colorist](https://huggingface.co/datasets/burkelibbey/colors), ...)和**微调算法**([QLoRA](http://arxiv.org/abs/2305.14314)、[LoRA](http://arxiv.org/abs/2106.09685)),支撑用户根据自身具体需求选择合适的解决方案。
+- **多样性**: 支持多种**大语言模型**([InternLM](https://huggingface.co/internlm)、[Llama2](https://huggingface.co/meta-llama)、[ChatGLM2](https://huggingface.co/THUDM/chatglm2-6b)、[Qwen](https://huggingface.co/Qwen)、[Baichuan2](https://huggingface.co/baichuan-inc), ...),**数据集**([MOSS_003_SFT](https://huggingface.co/datasets/fnlp/moss-003-sft-data), [Alpaca](https://huggingface.co/datasets/tatsu-lab/alpaca), [WizardLM](https://huggingface.co/datasets/WizardLM/WizardLM_evol_instruct_V2_196k), [oasst1](https://huggingface.co/datasets/timdettmers/openassistant-guanaco), [Open-Platypus](https://huggingface.co/datasets/garage-bAInd/Open-Platypus), [Code Alpaca](https://huggingface.co/datasets/HuggingFaceH4/CodeAlpaca_20K), [Colorist](https://huggingface.co/datasets/burkelibbey/colors), ...)和**微调算法**([QLoRA](http://arxiv.org/abs/2305.14314)、[LoRA](http://arxiv.org/abs/2106.09685)),支撑用户根据自身具体需求选择合适的解决方案。
- **兼容性**: 兼容 [DeepSpeed](https://github.com/microsoft/DeepSpeed) 🚀 和 [HuggingFace](https://huggingface.co) 🤗 的训练流程,支撑用户无感式集成与使用。
## 🌟 示例
@@ -70,17 +71,18 @@ XTuner 是一个轻量级微调大语言模型的工具库,由 [MMRazor](https
|
diff --git a/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_alpaca_e3.py b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_alpaca_e3.py
new file mode 100644
index 000000000..3500806e3
--- /dev/null
+++ b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_alpaca_e3.py
@@ -0,0 +1,180 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import alpaca_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'baichuan-inc/Baichuan2-7B-Base'
+
+# Data
+alpaca_en_path = 'tatsu-lab/alpaca'
+prompt_template = PROMPT_TEMPLATE.alpaca
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+evaluation_inputs = [
+ '请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ padding_side='right')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+alpaca_en = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=alpaca_en_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=alpaca_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=alpaca_en,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ instruction=prompt_template.INSTRUCTION_START)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_alpaca_enzh_e3.py b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_alpaca_enzh_e3.py
new file mode 100644
index 000000000..2ec4464d3
--- /dev/null
+++ b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_alpaca_enzh_e3.py
@@ -0,0 +1,198 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import ConcatDataset, process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import (alpaca_map_fn, alpaca_zh_map_fn,
+ template_map_fn_factory)
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'baichuan-inc/Baichuan2-7B-Base'
+
+# Data
+alpaca_zh_path = 'silk-road/alpaca-data-gpt4-chinese'
+alpaca_en_path = 'tatsu-lab/alpaca'
+prompt_template = PROMPT_TEMPLATE.alpaca
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+evaluation_inputs = [
+ '请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ padding_side='right')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+alpaca_en = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=alpaca_en_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=alpaca_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+alpaca_zh = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=alpaca_zh_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=alpaca_zh_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataset = dict(
+ type=ConcatDataset,
+ datasets_cfg=dict(alpaca_en=alpaca_en, alpaca_zh=alpaca_zh))
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ instruction=prompt_template.INSTRUCTION_START)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_alpaca_enzh_oasst1_e3.py b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_alpaca_enzh_oasst1_e3.py
new file mode 100644
index 000000000..e18bf308b
--- /dev/null
+++ b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_alpaca_enzh_oasst1_e3.py
@@ -0,0 +1,211 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import ConcatDataset, process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import (alpaca_map_fn, alpaca_zh_map_fn,
+ oasst1_map_fn, template_map_fn_factory)
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'baichuan-inc/Baichuan2-7B-Base'
+
+# Data
+alpaca_zh_path = 'silk-road/alpaca-data-gpt4-chinese'
+alpaca_en_path = 'tatsu-lab/alpaca'
+oasst1_path = 'timdettmers/openassistant-guanaco'
+prompt_template = PROMPT_TEMPLATE.alpaca
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+evaluation_inputs = [
+ '请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ padding_side='right')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+alpaca_en = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=alpaca_en_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=alpaca_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+alpaca_zh = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=alpaca_zh_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=alpaca_zh_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+oasst1 = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=oasst1_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=oasst1_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataset = dict(
+ type=ConcatDataset,
+ datasets_cfg=dict(alpaca_en=alpaca_en, alpaca_zh=alpaca_zh, oasst1=oasst1))
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ instruction=prompt_template.INSTRUCTION_START)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_alpaca_zh_e3.py b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_alpaca_zh_e3.py
new file mode 100644
index 000000000..988f75b90
--- /dev/null
+++ b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_alpaca_zh_e3.py
@@ -0,0 +1,180 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import alpaca_zh_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'baichuan-inc/Baichuan2-7B-Base'
+
+# Data
+alpaca_zh_path = 'silk-road/alpaca-data-gpt4-chinese'
+prompt_template = PROMPT_TEMPLATE.alpaca
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+evaluation_inputs = [
+ '请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ padding_side='right')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+alpaca_zh = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=alpaca_zh_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=alpaca_zh_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=alpaca_zh,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ instruction=prompt_template.INSTRUCTION_START)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_arxiv_gentitle_e3.py b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_arxiv_gentitle_e3.py
new file mode 100644
index 000000000..1e1ad6924
--- /dev/null
+++ b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_arxiv_gentitle_e3.py
@@ -0,0 +1,215 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import arxiv_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'baichuan-inc/Baichuan2-7B-Base'
+
+# Data
+# 1. Download data from https://kaggle.com/datasets/Cornell-University/arxiv
+# 2. Process data by `xtuner preprocess arxiv ${DOWNLOADED_DATA} ./data/arxiv_data.json [optional arguments]` # noqa: E501
+data_path = './data/arxiv_data.json'
+prompt_template = PROMPT_TEMPLATE.title
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+evaluation_inputs = [
+ ('We present InternLM, a multilingual foundational language '
+ 'model with 104B parameters. InternLM is pre-trained on a large '
+ 'corpora with 1.6T tokens with a multi-phase progressive '
+ 'process, and then fine-tuned to align with human preferences. '
+ 'We also developed a training system called Uniscale-LLM for '
+ 'efficient large language model training. The evaluation on a '
+ 'number of benchmarks shows that InternLM achieves '
+ 'state-of-the-art performance in multiple aspects, including '
+ 'knowledge understanding, reading comprehension, mathematics, '
+ 'and coding. With such well-rounded capabilities, InternLM '
+ 'achieves outstanding performances on comprehensive exams, '
+ 'including MMLU, AGIEval, C-Eval and GAOKAO-Bench, without '
+ 'resorting to external tools. On these benchmarks, InternLM '
+ 'not only significantly outperforms open-source models, but '
+ 'also obtains superior performance compared to ChatGPT. Also, '
+ 'InternLM demonstrates excellent capability of understanding '
+ 'Chinese language and Chinese culture, which makes it a '
+ 'suitable foundation model to support Chinese-oriented language '
+ 'applications. This manuscript gives a detailed study of '
+ 'our results, with benchmarks and examples across a diverse '
+ 'set of knowledge domains and tasks.'),
+ ('In this work, we develop and release Llama 2, a collection of '
+ 'pretrained and fine-tuned large language models (LLMs) ranging '
+ 'in scale from 7 billion to 70 billion parameters.\nOur '
+ 'fine-tuned LLMs, called LLAMA 2-CHAT, are optimized for '
+ 'dialogue use cases. Our models outperform open-source chat '
+ 'models on most benchmarks we tested, and based on our human '
+ 'evaluations for helpfulness and safety, may be a suitable '
+ 'substitute for closedsource models. We provide a detailed '
+ 'description of our approach to fine-tuning and safety '
+ 'improvements of LLAMA 2-CHAT in order to enable the community '
+ 'to build on our work and contribute to the responsible '
+ 'development of LLMs.')
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ padding_side='right')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(
+ type=load_dataset, path='json', data_files=dict(train=data_path)),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=arxiv_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ instruction=prompt_template.INSTRUCTION_START)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_code_alpaca_e3.py b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_code_alpaca_e3.py
new file mode 100644
index 000000000..8c1d31968
--- /dev/null
+++ b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_code_alpaca_e3.py
@@ -0,0 +1,184 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import code_alpaca_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'baichuan-inc/Baichuan2-7B-Base'
+
+# Data
+data_path = 'HuggingFaceH4/CodeAlpaca_20K'
+prompt_template = PROMPT_TEMPLATE.coder
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 100
+evaluation_inputs = [
+ ('写一个Python函数,将十六进制颜色代码(如#0066ee)转换为对应的'
+ '红、绿、蓝(RGB)三个颜色分量值,并以元组的形式返回。'),
+ ('Write a Python function that takes a hexadecimal color code '
+ '(e.g., #0066ee) as input and converts it into the corresponding '
+ 'red, green, and blue (RGB) color component values.')
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ padding_side='right')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=data_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=code_alpaca_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ instruction=prompt_template.INSTRUCTION_START)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_colorist_e5.py b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_colorist_e5.py
new file mode 100644
index 000000000..11075ad8b
--- /dev/null
+++ b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_colorist_e5.py
@@ -0,0 +1,180 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import colors_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'baichuan-inc/Baichuan2-7B-Base'
+
+# Data
+data_path = 'burkelibbey/colors'
+prompt_template = PROMPT_TEMPLATE.colorist
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 5
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 200
+evaluation_inputs = [
+ '请给我一个像天空一样清澈透明的蓝色。', 'Please give me a clear blue like the sky.'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ padding_side='right')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=data_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=colors_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ instruction=prompt_template.INSTRUCTION_START)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_lawyer_e3.py b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_lawyer_e3.py
new file mode 100644
index 000000000..589747804
--- /dev/null
+++ b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_lawyer_e3.py
@@ -0,0 +1,206 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import ConcatDataset, process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import (crime_kg_assitant_map_fn,
+ law_reference_map_fn,
+ template_map_fn_factory)
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'baichuan-inc/Baichuan2-7B-Base'
+
+# Data
+# download data from https://github.com/LiuHC0428/LAW-GPT
+crime_kg_assitant_path = './data/CrimeKgAssitant清洗后_52k.json'
+law_reference_data_path = './data/训练数据_带法律依据_92k.json'
+prompt_template = PROMPT_TEMPLATE.lawyer
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+evaluation_inputs = ['请问离婚需要准备什么材料?', '销售鳄鱼皮包违法吗?']
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ padding_side='right')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+crime_kg_assitant = dict(
+ type=process_hf_dataset,
+ dataset=dict(
+ type=load_dataset,
+ path='json',
+ data_files=dict(train=crime_kg_assitant_path)),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=crime_kg_assitant_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+law_reference_data = dict(
+ type=process_hf_dataset,
+ dataset=dict(
+ type=load_dataset,
+ path='json',
+ data_files=dict(train=law_reference_data_path)),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=law_reference_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataset = dict(
+ type=ConcatDataset,
+ datasets_cfg=dict(
+ crime_kg_assitant=crime_kg_assitant,
+ law_reference_data=law_reference_data))
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ instruction=prompt_template.INSTRUCTION_START)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_oasst1_512_e3.py b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_oasst1_512_e3.py
new file mode 100644
index 000000000..2efef99c4
--- /dev/null
+++ b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_oasst1_512_e3.py
@@ -0,0 +1,180 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import oasst1_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'baichuan-inc/Baichuan2-7B-Base'
+
+# Data
+data_path = 'timdettmers/openassistant-guanaco'
+prompt_template = PROMPT_TEMPLATE.openassistant
+max_length = 512
+pack_to_max_length = False
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+evaluation_inputs = [
+ '请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ padding_side='right')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=data_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=oasst1_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ instruction=prompt_template.INSTRUCTION_START)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_oasst1_e3.py b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_oasst1_e3.py
new file mode 100644
index 000000000..f1458f2b6
--- /dev/null
+++ b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_oasst1_e3.py
@@ -0,0 +1,180 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import oasst1_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'baichuan-inc/Baichuan2-7B-Base'
+
+# Data
+data_path = 'timdettmers/openassistant-guanaco'
+prompt_template = PROMPT_TEMPLATE.openassistant
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+evaluation_inputs = [
+ '请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ padding_side='right')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=data_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=oasst1_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ instruction=prompt_template.INSTRUCTION_START)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_open_platypus_e3.py b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_open_platypus_e3.py
new file mode 100644
index 000000000..7a9471bbd
--- /dev/null
+++ b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_open_platypus_e3.py
@@ -0,0 +1,180 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import alpaca_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'baichuan-inc/Baichuan2-7B-Base'
+
+# Data
+data_path = 'garage-bAInd/Open-Platypus'
+prompt_template = PROMPT_TEMPLATE.alpaca
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+evaluation_inputs = [
+ '请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ padding_side='right')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=data_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=alpaca_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ instruction=prompt_template.INSTRUCTION_START)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_sql_e3.py b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_sql_e3.py
new file mode 100644
index 000000000..24862b83d
--- /dev/null
+++ b/xtuner/configs/baichuan/baichuan2_7b_base/baichuan2_7b_base_qlora_sql_e3.py
@@ -0,0 +1,184 @@
+# Copyright (c) OpenMMLab. All rights reserved.
+import torch
+from bitsandbytes.optim import PagedAdamW32bit
+from datasets import load_dataset
+from mmengine.dataset import DefaultSampler
+from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
+ LoggerHook, ParamSchedulerHook)
+from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
+from peft import LoraConfig
+from transformers import (AutoModelForCausalLM, AutoTokenizer,
+ BitsAndBytesConfig)
+
+from xtuner.dataset import process_hf_dataset
+from xtuner.dataset.collate_fns import default_collate_fn
+from xtuner.dataset.map_fns import sql_map_fn, template_map_fn_factory
+from xtuner.engine import DatasetInfoHook, EvaluateChatHook
+from xtuner.model import SupervisedFinetune
+from xtuner.utils import PROMPT_TEMPLATE
+
+#######################################################################
+# PART 1 Settings #
+#######################################################################
+# Model
+pretrained_model_name_or_path = 'baichuan-inc/Baichuan2-7B-Base'
+
+# Data
+data_path = 'b-mc2/sql-create-context'
+prompt_template = PROMPT_TEMPLATE.sql
+max_length = 2048
+pack_to_max_length = True
+
+# Scheduler & Optimizer
+batch_size = 1 # per_device
+accumulative_counts = 16
+dataloader_num_workers = 0
+max_epochs = 3
+optim_type = PagedAdamW32bit
+lr = 2e-4
+betas = (0.9, 0.999)
+weight_decay = 0
+max_norm = 1 # grad clip
+
+# Evaluate the generation performance during the training
+evaluation_freq = 500
+evaluation_inputs = [
+ ('CREATE TABLE station (name VARCHAR, lat VARCHAR, city VARCHAR)\n'
+ 'Find the name, latitude, and city of stations with latitude '
+ 'above 50.'),
+ ('CREATE TABLE weather (zip_code VARCHAR, mean_visibility_miles '
+ 'INTEGER)\n找到mean_visibility_miles最大的zip_code。')
+]
+
+#######################################################################
+# PART 2 Model & Tokenizer #
+#######################################################################
+tokenizer = dict(
+ type=AutoTokenizer.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ padding_side='right')
+
+model = dict(
+ type=SupervisedFinetune,
+ llm=dict(
+ type=AutoModelForCausalLM.from_pretrained,
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
+ trust_remote_code=True,
+ torch_dtype=torch.float16,
+ quantization_config=dict(
+ type=BitsAndBytesConfig,
+ load_in_4bit=True,
+ load_in_8bit=False,
+ llm_int8_threshold=6.0,
+ llm_int8_has_fp16_weight=False,
+ bnb_4bit_compute_dtype=torch.float16,
+ bnb_4bit_use_double_quant=True,
+ bnb_4bit_quant_type='nf4')),
+ lora=dict(
+ type=LoraConfig,
+ r=64,
+ lora_alpha=16,
+ lora_dropout=0.1,
+ bias='none',
+ task_type='CAUSAL_LM'))
+
+#######################################################################
+# PART 3 Dataset & Dataloader #
+#######################################################################
+train_dataset = dict(
+ type=process_hf_dataset,
+ dataset=dict(type=load_dataset, path=data_path),
+ tokenizer=tokenizer,
+ max_length=max_length,
+ dataset_map_fn=sql_map_fn,
+ template_map_fn=dict(
+ type=template_map_fn_factory, template=prompt_template),
+ remove_unused_columns=True,
+ shuffle_before_pack=True,
+ pack_to_max_length=pack_to_max_length)
+
+train_dataloader = dict(
+ batch_size=batch_size,
+ num_workers=dataloader_num_workers,
+ dataset=train_dataset,
+ sampler=dict(type=DefaultSampler, shuffle=True),
+ collate_fn=dict(type=default_collate_fn))
+
+#######################################################################
+# PART 4 Scheduler & Optimizer #
+#######################################################################
+# optimizer
+optim_wrapper = dict(
+ type=AmpOptimWrapper,
+ optimizer=dict(
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
+ accumulative_counts=accumulative_counts,
+ loss_scale='dynamic',
+ dtype='float16')
+
+# learning policy
+# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
+param_scheduler = dict(
+ type=CosineAnnealingLR,
+ eta_min=lr * 0.1,
+ by_epoch=True,
+ T_max=max_epochs,
+ convert_to_iter_based=True)
+
+# train, val, test setting
+train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
+
+#######################################################################
+# PART 5 Runtime #
+#######################################################################
+# Log the dialogue periodically during the training process, optional
+custom_hooks = [
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
+ dict(
+ type=EvaluateChatHook,
+ tokenizer=tokenizer,
+ every_n_iters=evaluation_freq,
+ evaluation_inputs=evaluation_inputs,
+ instruction=prompt_template.INSTRUCTION_START)
+]
+
+# configure default hooks
+default_hooks = dict(
+ # record the time of every iteration.
+ timer=dict(type=IterTimerHook),
+ # print log every 100 iterations.
+ logger=dict(type=LoggerHook, interval=10),
+ # enable the parameter scheduler.
+ param_scheduler=dict(type=ParamSchedulerHook),
+ # save checkpoint per epoch.
+ checkpoint=dict(type=CheckpointHook, interval=1),
+ # set sampler seed in distributed evrionment.
+ sampler_seed=dict(type=DistSamplerSeedHook),
+)
+
+# configure environment
+env_cfg = dict(
+ # whether to enable cudnn benchmark
+ cudnn_benchmark=False,
+ # set multi process parameters
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
+ # set distributed parameters
+ dist_cfg=dict(backend='nccl'),
+)
+
+# set visualizer
+visualizer = None
+
+# set log level
+log_level = 'INFO'
+
+# load from which checkpoint
+load_from = None
+
+# whether to resume training from the loaded checkpoint
+resume = False
+
+# Defaults to use random seed and disable `deterministic`
+randomness = dict(seed=None, deterministic=False)
diff --git a/xtuner/model/sft.py b/xtuner/model/sft.py
index c15721fed..ea3a95679 100644
--- a/xtuner/model/sft.py
+++ b/xtuner/model/sft.py
@@ -33,6 +33,13 @@ def __init__(self,
self.use_lora = lora is not None
if self.use_lora:
self._prepare_for_lora(peft_model, use_gradient_checkpointing)
+ try:
+ # for BaiChuan2, set first_flag to False to disable weight init
+ if self.llm.base_model.model.__class__.__name__.lower(
+ ) == 'BaichuanForCausalLM'.lower():
+ self.llm.base_model.model.lm_head.first_flag = False
+ except Exception:
+ pass
elif use_gradient_checkpointing:
# For backward compatibility
if hasattr(self.llm, 'enable_input_require_grads'):
diff --git a/xtuner/utils/templates.py b/xtuner/utils/templates.py
index 318cd38cc..21c3a1eee 100644
--- a/xtuner/utils/templates.py
+++ b/xtuner/utils/templates.py
@@ -82,6 +82,9 @@
baichuan_chat=dict(
INSTRUCTION_START='{input}',
INSTRUCTION='{input}'),
+ baichuan2_chat=dict(
+ INSTRUCTION_START='{input}',
+ INSTRUCTION='{input}'),
wizardlm=dict(
INSTRUCTION_START=('A chat between a curious user and an artificial '
'intelligence assistant. The assistant gives '