Skip to content

Commit

Permalink
[BE] restructure tests and assets folders
Browse files Browse the repository at this point in the history
[ghstack-poisoned]
  • Loading branch information
tianyu-l committed Dec 18, 2024
1 parent 00f5302 commit b78627e
Show file tree
Hide file tree
Showing 22 changed files with 15 additions and 14 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/integration_test_4gpu.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -43,4 +43,4 @@ jobs:
python -m pip install -e .
mkdir artifacts-to-be-uploaded
python ./test_runner.py artifacts-to-be-uploaded --ngpu 4
python ./tests/integration_tests.py artifacts-to-be-uploaded --ngpu 4
2 changes: 1 addition & 1 deletion .github/workflows/integration_test_8gpu.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -38,4 +38,4 @@ jobs:
python -m pip install --force-reinstall --pre torch --index-url https://download.pytorch.org/whl/nightly/cu124
mkdir artifacts-to-be-uploaded
python ./test_runner.py artifacts-to-be-uploaded --ngpu 8
python ./tests/integration_tests.py artifacts-to-be-uploaded --ngpu 8
2 changes: 1 addition & 1 deletion .github/workflows/unit_test_cpu.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -25,4 +25,4 @@ jobs:
pip config --user set global.progress_bar off
pip install --force-reinstall --pre torch --index-url https://download.pytorch.org/whl/nightly/cpu
pytest test --cov=. --cov-report=xml --durations=20 -vv
pytest tests/unit_tests --cov=. --cov-report=xml --durations=20 -vv
2 changes: 1 addition & 1 deletion .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ repos:
files: \.py$
args:
- --license-filepath
- docs/license_header.txt
- assets/license_header.txt

- repo: https://github.com/pycqa/flake8
rev: 34cbf8ef3950f43d09b85e2e45c15ae5717dc37b
Expand Down
1 change: 0 additions & 1 deletion assets/images/readme.md

This file was deleted.

File renamed without changes.
File renamed without changes.
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ dev = [
]

[tool.setuptools.dynamic]
version = {file = "version.txt"}
version = {file = "assets/version.txt"}


# ---- Explicit project build information ---- #
Expand Down
File renamed without changes.
File renamed without changes.
File renamed without changes.
File renamed without changes.
2 changes: 1 addition & 1 deletion test_runner.py → tests/integration_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -254,7 +254,7 @@ def build_test_list():
[
"--experimental.pipeline_parallel_degree 2",
"--experimental.pipeline_parallel_schedule PipelineScheduleMulti",
"--experimental.pipeline_parallel_schedule_csv ./test/assets/custom_schedule.csv",
"--experimental.pipeline_parallel_schedule_csv ./tests/assets/custom_schedule.csv",
"--experimental.pipeline_parallel_microbatches 8",
],
],
Expand Down
File renamed without changes.
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,10 @@
VisionEncoder,
)

from test.multimodal_model.test_utils import fixed_init_model, fixed_init_tensor
from tests.unit_tests.multimodal_model.test_utils import (
fixed_init_model,
fixed_init_tensor,
)


@pytest.fixture
Expand Down
File renamed without changes.
Original file line number Diff line number Diff line change
Expand Up @@ -9,10 +9,10 @@
from torchtitan.datasets.tokenizer import build_tokenizer


class TestCheckpoint:
class TestDatasetCheckpointing:
def test_c4_resumption(self):
dataset_name = "c4_test"
dataset_path = "./test/assets/c4_test"
dataset_path = "./tests/assets/c4_test"
batch_size = 1
seq_len = 1024
world_size = 4
Expand Down Expand Up @@ -41,8 +41,7 @@ def test_c4_resumption(self):
def _build_dataloader(
self, dataset_name, dataset_path, batch_size, seq_len, world_size, rank
):
tokenizer_type = "tiktoken"
tokenizer = build_tokenizer("tiktoken", "./test/assets/test_tiktoken.model")
tokenizer = build_tokenizer("tiktoken", "./tests/assets/test_tiktoken.model")
return build_hf_data_loader(
dataset_name=dataset_name,
dataset_path=dataset_path,
Expand Down
File renamed without changes.
File renamed without changes.
2 changes: 1 addition & 1 deletion torchtitan/datasets/hf_datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ class DatasetConfig:
text_processor=_process_c4_text,
),
"c4_test": DatasetConfig(
path="test/assets/c4_test",
path="tests/assets/c4_test",
loader=lambda path: load_dataset(path, split="train"),
text_processor=_process_c4_text,
),
Expand Down
2 changes: 1 addition & 1 deletion train_configs/debug_model.toml
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ name = "llama3"
flavor = "debugmodel"
norm_type = "rmsnorm" # layernorm / np_layernorm / rmsnorm / fused_rmsnorm
# test tokenizer.model, for debug purpose only
tokenizer_path = "./test/assets/test_tiktoken.model"
tokenizer_path = "./tests/assets/test_tiktoken.model"

[optimizer]
name = "AdamW"
Expand Down

0 comments on commit b78627e

Please sign in to comment.