Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Pr/eliebak/220 #231

Merged
merged 10 commits into from
Sep 9, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -75,6 +75,7 @@ You can find more examples in the [`/examples`](/examples) directory:
| `mamba` | Train an example Mamba model |
| `moe` | Train an example Mixture-of-Experts (MoE) model |
| `mup` | Use spectral µTransfer to scale up your model |
| `examples/config_tiny_llama_with_s3_upload.yaml` | For automatically uploading checkpoints to S3 |

We're working on adding more examples soon! Feel free to add a PR to add your own example. 🚀

Expand Down
115 changes: 115 additions & 0 deletions examples/config_tiny_llama_with_s3_upload.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
checkpoints:
checkpoint_interval: 10
checkpoints_path: checkpoints
checkpoints_path_is_shared_file_system: false
resume_checkpoint_path: s3://phuc-experiments/temp/config_tiny_llama_with_s3_upload
save_initial_state: false
data_stages:
- data:
dataset:
dataset_overwrite_cache: false
dataset_processing_num_proc_per_process: 1
hf_dataset_config_name: null
hf_dataset_or_datasets: stas/openwebtext-10k
hf_dataset_splits: train
text_column_name: text
num_loading_workers: 1
seed: 42
name: Stable Training Stage
start_training_step: 1
- data:
dataset:
dataset_overwrite_cache: false
dataset_processing_num_proc_per_process: 1
hf_dataset_config_name: null
hf_dataset_or_datasets: stas/openwebtext-10k
hf_dataset_splits: train
text_column_name: text
num_loading_workers: 1
seed: 42
name: Annealing Phase
start_training_step: 10
general:
benchmark_csv_path: null
consumed_train_samples: null
ignore_sanity_checks: true
project: debug
run: tiny_llama_%date_%jobid
seed: 42
step: null
lighteval: null
logging:
iteration_step_info_interval: 1
log_level: info
log_level_replica: info
model:
ddp_bucket_cap_mb: 25
dtype: bfloat16
init_method:
std: 0.025
make_vocab_size_divisible_by: 1
model_config:
bos_token_id: 1
eos_token_id: 2
hidden_act: silu
hidden_size: 16
initializer_range: 0.02
intermediate_size: 64
is_llama_config: true
max_position_embeddings: 256
num_attention_heads: 4
num_hidden_layers: 2
num_key_value_heads: 4
pad_token_id: null
pretraining_tp: 1
rms_norm_eps: 1.0e-05
rope_scaling: null
tie_word_embeddings: true
use_cache: true
vocab_size: 256
optimizer:
accumulate_grad_in_fp32: true
clip_grad: 1.0
learning_rate_scheduler:
learning_rate: 0.0003
lr_decay_starting_step: null
lr_decay_steps: 13
lr_decay_style: cosine
lr_warmup_steps: 2
lr_warmup_style: linear
min_decay_lr: 1.0e-05
optimizer_factory:
adam_beta1: 0.9
adam_beta2: 0.95
adam_eps: 1.0e-08
name: adamW
torch_adam_is_fused: true
weight_decay: 0.01
zero_stage: 0
parallelism:
dp: 1
expert_parallel_size: 1
pp: 1
pp_engine: 1f1b
tp: 1
tp_linear_async_communication: true
tp_mode: REDUCE_SCATTER
profiler: null
tokenizer:
tokenizer_max_length: null
tokenizer_name_or_path: robot-test/dummy-tokenizer-wordlevel
tokenizer_revision: null
tokens:
batch_accumulation_per_replica: 1
limit_test_batches: 0
limit_val_batches: 0
micro_batch_size: 2
sequence_length: 256
train_steps: 30
val_check_interval: -1
s3_upload:
remove_after_upload: true
s5cmd_concurrency: 5
s5cmd_numworkers: 16
s5cmd_path: /fsx/nouamane/miniconda/envs/2-1-cu121/bin/s5cmd
upload_s3_path: s3://phuc-experiments/temp/config_tiny_llama_with_s3_upload
7 changes: 7 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ dependencies = [
"safetensors",
"dacite",
"tqdm",
"datasets",
]

[tool.setuptools.packages.find]
Expand Down Expand Up @@ -53,6 +54,12 @@ nanosets = [
"numba",
]

s3 = [
"boto3",
"s3fs",
"s5cmd",
]

[build-system]
requires = [
"setuptools",
Expand Down
28 changes: 25 additions & 3 deletions src/nanotron/config/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
import os
from dataclasses import dataclass, fields
from pathlib import Path
from datasets.download.streaming_download_manager import xPath
from typing import List, Optional, Type, Union

import dacite
Expand Down Expand Up @@ -91,6 +92,22 @@ def __post_init__(self):
self.hf_dataset_splits = "train"


@dataclass
class S3UploadArgs:
"""Arguments related to uploading checkpoints on s3"""

upload_s3_path: xPath
remove_after_upload: bool
s5cmd_numworkers: Optional[int]
s5cmd_concurrency: Optional[int]
s5cmd_path: Optional[xPath]

def __post_init__(self):
if isinstance(self.upload_s3_path, str):
self.upload_s3_path = xPath(self.upload_s3_path)
if isinstance(self.s5cmd_path, str):
self.s5cmd_path = xPath(self.s5cmd_path)

@dataclass
class NanosetDatasetsArgs:
dataset_folder: Union[str, dict, List[str]]
Expand Down Expand Up @@ -146,14 +163,14 @@ class CheckpointsArgs:
checkpoint_interval: int
save_initial_state: Optional[bool] = False
save_final_state: Optional[bool] = False
resume_checkpoint_path: Optional[Path] = None
resume_checkpoint_path: Optional[xPath] = None
checkpoints_path_is_shared_file_system: Optional[bool] = False

def __post_init__(self):
if isinstance(self.checkpoints_path, str):
self.checkpoints_path = Path(self.checkpoints_path)
self.checkpoints_path = xPath(self.checkpoints_path)
if isinstance(self.resume_checkpoint_path, str):
self.resume_checkpoint_path = Path(self.resume_checkpoint_path)
self.resume_checkpoint_path = xPath(self.resume_checkpoint_path)


@dataclass
Expand Down Expand Up @@ -338,13 +355,18 @@ class Config:
data_stages: Optional[List[DatasetStageArgs]] = None
profiler: Optional[ProfilerArgs] = None
lighteval: Optional[LightEvalConfig] = None
s3_upload : Optional[S3UploadArgs] = None

@classmethod
def create_empty(cls):
cls_fields = fields(cls)
return cls(**{f.name: None for f in cls_fields})

def __post_init__(self):

if self.s3_upload is not None:
self.s3_upload.__post_init__()

# Some final sanity checks across separate arguments sections:
if self.profiler is not None and self.profiler.profiler_export_path is not None:
assert self.tokens.train_steps < 10
Expand Down
9 changes: 3 additions & 6 deletions src/nanotron/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,6 @@ def _vocab_size_with_padding(orig_vocab_size: int, pg_size: int, make_vocab_size

multiple = make_vocab_size_divisible_by * pg_size
after = int(ceil(orig_vocab_size / multiple) * multiple)

if after != orig_vocab_size:
log_rank(
f"[Vocab Size Padding] Padded vocab (size: {orig_vocab_size}) with {after - orig_vocab_size} dummy tokens (new size: {after})",
Expand Down Expand Up @@ -147,10 +146,8 @@ def lr_lambda(current_step: int, initial_lr: float):
/ lr_decay_steps
)
elif lr_scheduler_args.lr_decay_style == "1-sqrt":
lmbda = (
lr_scheduler_args.min_decay_lr
+ (initial_lr - lr_scheduler_args.min_decay_lr)
* (1 - math.sqrt((current_step - lr_decay_starting_step) / lr_decay_steps))
lmbda = lr_scheduler_args.min_decay_lr + (initial_lr - lr_scheduler_args.min_decay_lr) * (
1 - math.sqrt((current_step - lr_decay_starting_step) / lr_decay_steps)
)
else:
raise ValueError(f"Unknown decay style {lr_scheduler_args.lr_decay_style}")
Expand Down Expand Up @@ -693,7 +690,7 @@ def is_resume_from_training():
else:
next_stage = next((s for s in config.data_stages if s.start_training_step > stage.start_training_step), None)
total_train_steps = next_stage.start_training_step

if metadata.last_train_step > stage.start_training_step:
# NOTE: if the last_train_step is larger than the start_training_step of the current stage,
# it means that the training has already passed this stage
Expand Down
4 changes: 4 additions & 0 deletions src/nanotron/s3_checkpoints/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
from .fsspec import check_path_is_local, fs_copy, fs_open
from .s3_mover import S3Mover

__all__ = ["S3Mover", "fs_open", "fs_copy", "check_path_is_local"]
38 changes: 38 additions & 0 deletions src/nanotron/s3_checkpoints/fsspec.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
import contextlib
from pathlib import Path
from typing import Tuple, Union

import fsspec
from fsspec.implementations import local


def get_filesystem_and_path(path: Path, storage_options=None) -> Tuple[fsspec.AbstractFileSystem, str]:
# Use supported filesystems in `fsspec`. If you need another one, please use `fsspec.registry.register_implementation`
# DO NOT USE `mode` argument as it adds a suffix `0.part` when using `mode="w"`.
fs, _, paths = fsspec.core.get_fs_token_paths(str(path), storage_options=storage_options)
assert len(paths) == 1
return fs, paths[0]


@contextlib.contextmanager
def fs_open(
file: Union[str, Path],
mode="r",
):
# TODO @thomasw21: pass storage options
fs, path = get_filesystem_and_path(file)
with fs.open(path, mode=mode) as f:
yield f


def fs_copy(
input_file: Union[str, Path],
output_file: Union[str, Path],
):
"""Copy file from input to output (possibly on s3/other fs)"""
with fs_open(input_file, mode="rb") as fi, fs_open(output_file, mode="wb") as fo:
fo.write(fi.read())


def check_path_is_local(path: Path, storage_options=None) -> bool:
return isinstance(get_filesystem_and_path(path=path, storage_options=storage_options)[0], local.LocalFileSystem)
Loading
Loading