Skip to content
This repository has been archived by the owner on Apr 11, 2024. It is now read-only.

Commit

Permalink
Release: 0.3.21
Browse files Browse the repository at this point in the history
  • Loading branch information
BobaZooba committed Oct 2, 2023
1 parent 6da6d0a commit 2831b5c
Show file tree
Hide file tree
Showing 7 changed files with 11 additions and 44 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@
# Setup
setup(
name="xllm",
version="0.3.20",
version="0.3.21",
description="Simple & Cutting Edge LLM Finetuning",
license_files=["LICENSE"],
long_description=open("README.md", "r", encoding="utf-8").read(),
Expand Down
6 changes: 2 additions & 4 deletions src/xllm/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,18 +14,16 @@

# ruff: noqa: F401

__version__ = "0.3.20"
__version__ = "0.3.21"

from . import enums, types
from .cli.download import cli_run_download
from .cli.fuse import cli_run_fuse
from .cli.quantize import cli_run_quantize
from .cli.train import cli_run_train
from .core.config import HuggingFaceConfig
from .run.download import download
from .run.fuse import fuse
from .run.quantize import quantize
from .run.train import train
from .utils.cli import set_environment_variables, setup_cli
from .utils.cli import setup_cli
from .utils.logger import dist_logger
from .utils.post_training import fuse_lora
2 changes: 1 addition & 1 deletion src/xllm/utils/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@

# ruff: noqa: F401

from .cli import set_environment_variables, setup_cli
from .cli import setup_cli
from .logger import dist_logger
from .miscellaneous import is_distributed_training
from .nn import apply_lora, stabilize_training
Expand Down
30 changes: 0 additions & 30 deletions src/xllm/utils/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,34 +21,6 @@
from xllm import enums

from ..core.config import HuggingFaceConfig
from ..utils.logger import dist_logger


def set_environment_variables(config: HuggingFaceConfig) -> None:
if config.huggingface_hub_token is not None:
os.environ[enums.EnvironmentVariables.huggingface_hub_token] = config.huggingface_hub_token
dist_logger(message=f"Environment variable {enums.EnvironmentVariables.huggingface_hub_token} set")

if config.report_to_wandb:
for key, value in zip(
[
enums.EnvironmentVariables.wandb_api_key,
enums.EnvironmentVariables.wandb_project,
enums.EnvironmentVariables.wandb_entity,
],
[
config.wandb_api_key,
config.wandb_project,
config.wandb_entity,
],
):
if value is not None:
os.environ[key] = value
dist_logger(message=f"Environment variable {key} set")
else:
os.environ[enums.EnvironmentVariables.wandb_disabled] = "true"

return None


def setup_cli(config: HuggingFaceConfig, logger_path: str = "xllm.log", rotation: str = "5 MB") -> None:
Expand All @@ -58,8 +30,6 @@ def setup_cli(config: HuggingFaceConfig, logger_path: str = "xllm.log", rotation

os.environ[enums.EnvironmentVariables.tokenizers_parallelism] = "false"

set_environment_variables(config=config)

if config.report_to_wandb and enums.EnvironmentVariables.wandb_api_key not in os.environ:
logger.warning("W&B token not found in env vars")

Expand Down
4 changes: 4 additions & 0 deletions src/xllm/utils/post_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,10 @@ def fuse_lora(config: HuggingFaceConfig) -> Tuple[PreTrainedTokenizer, PreTraine
model = model.merge_and_unload()
logger.info("LoRA fused")

model_dtype = next(iter(model.parameters())).dtype
if model_dtype != config.dtype:
model = model.to(config.dtype)

if config.fused_model_local_path is not None:
logger.info(f"Saving locally to {config.fused_model_local_path}")
tokenizer.save_pretrained(
Expand Down
4 changes: 2 additions & 2 deletions tests/helpers/patches.py
Original file line number Diff line number Diff line change
Expand Up @@ -95,8 +95,8 @@ def push_to_hub(self, *args, **kwargs):
def patch_peft_model_from_pretrained(monkeypatch: MonkeyPatch) -> Any:
def mock_peft_model_from_pretrained(*args, **kwargs):
class MockPeftModel:
def __init__(self):
...
def parameters(self):
yield torch.rand(1024, 1024)

def merge_and_unload(self, *args, **kwargs):
return self
Expand Down
7 changes: 1 addition & 6 deletions tests/unit/utils/test_cli.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,6 @@
from src.xllm.core.config import HuggingFaceConfig
from src.xllm.utils.cli import set_environment_variables, setup_cli
from src.xllm.utils.cli import setup_cli


def test_setup_cli(config: HuggingFaceConfig):
setup_cli(config=config)


def test_set_environment_variables():
hf_config = HuggingFaceConfig(huggingface_hub_token="hf_token", wandb_api_key="wandb_token")
set_environment_variables(config=hf_config)

0 comments on commit 2831b5c

Please sign in to comment.