From b6e4bb86f4dc39119f069657b3dd502af7251378 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 9 Mar 2024 16:00:25 +0100 Subject: [PATCH 01/21] feat: use jpeg instead of jpg, use enums instead of strings (#2453) * fix: parse width and height as int when applying metadata (#2452) fixes an issue with A1111 metadata scheme where width and height are strings after splitting resolution * feat: use jpeg instead of jpg, use enums instead of strings --- modules/config.py | 4 ++-- modules/flags.py | 17 +++++++++++------ modules/meta_parser.py | 4 ++-- modules/private_logger.py | 9 +++++---- webui.py | 14 +++++++------- 5 files changed, 27 insertions(+), 21 deletions(-) diff --git a/modules/config.py b/modules/config.py index a68bd2187..ef6de2ae2 100644 --- a/modules/config.py +++ b/modules/config.py @@ -8,7 +8,7 @@ from modules.model_loader import load_file_from_url from modules.util import get_files_from_folder, makedirs_with_log -from modules.flags import Performance, MetadataScheme +from modules.flags import OutputFormat, Performance, MetadataScheme def get_config_path(key, default_value): env = os.getenv(key) @@ -326,7 +326,7 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_ default_output_format = get_config_item_or_set_default( key='default_output_format', default_value='png', - validator=lambda x: x in modules.flags.output_formats + validator=lambda x: x in OutputFormat.list() ) default_image_number = get_config_item_or_set_default( key='default_image_number', diff --git a/modules/flags.py b/modules/flags.py index 6f12bc8f3..95621c2b1 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -67,7 +67,7 @@ cn_ip: (0.5, 0.6), cn_ip_face: (0.9, 0.75), cn_canny: (0.5, 1.0), cn_cpds: (0.5, 1.0) } # stop, weight -output_formats = ['png', 'jpg', 'webp'] +output_formats = ['png', 'jpeg', 'webp'] inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6'] inpaint_option_default = 'Inpaint or Outpaint (default)' @@ -89,11 +89,19 @@ class MetadataScheme(Enum): (f'{MetadataScheme.A1111.value} (plain text)', MetadataScheme.A1111.value), ] -lora_count = 5 - controlnet_image_count = 4 +class OutputFormat(Enum): + PNG = 'png' + JPEG = 'jpeg' + WEBP = 'webp' + + @classmethod + def list(cls) -> list: + return list(map(lambda c: c.value, cls)) + + class Steps(IntEnum): QUALITY = 60 SPEED = 30 @@ -120,6 +128,3 @@ def steps(self) -> int | None: def steps_uov(self) -> int | None: return StepsUOV[self.name].value if Steps[self.name] else None - - -performance_selections = Performance.list() diff --git a/modules/meta_parser.py b/modules/meta_parser.py index da8c70b21..546c093fa 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -117,8 +117,8 @@ def get_resolution(key: str, fallback: str | None, source_dict: dict, results: l results.append(-1) else: results.append(gr.update()) - results.append(width) - results.append(height) + results.append(int(width)) + results.append(int(height)) except: results.append(gr.update()) results.append(gr.update()) diff --git a/modules/private_logger.py b/modules/private_logger.py index 01e570a7d..916d7bf0a 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -6,8 +6,9 @@ from PIL import Image from PIL.PngImagePlugin import PngInfo -from modules.util import generate_temp_filename +from modules.flags import OutputFormat from modules.meta_parser import MetadataParser, get_exif +from modules.util import generate_temp_filename log_cache = {} @@ -29,7 +30,7 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for parsed_parameters = metadata_parser.parse_string(metadata.copy()) if metadata_parser is not None else '' image = Image.fromarray(img) - if output_format == 'png': + if output_format == OutputFormat.PNG.value: if parsed_parameters != '': pnginfo = PngInfo() pnginfo.add_text('parameters', parsed_parameters) @@ -37,9 +38,9 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for else: pnginfo = None image.save(local_temp_filename, pnginfo=pnginfo) - elif output_format == 'jpg': + elif output_format == OutputFormat.JPEG.value: image.save(local_temp_filename, quality=95, optimize=True, progressive=True, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif()) - elif output_format == 'webp': + elif output_format == OutputFormat.WEBP.value: image.save(local_temp_filename, quality=95, lossless=False, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif()) else: image.save(local_temp_filename) diff --git a/webui.py b/webui.py index 42dd890f7..5dab79d09 100644 --- a/webui.py +++ b/webui.py @@ -254,7 +254,7 @@ def trigger_metadata_preview(filepath): with gr.Column(scale=1, visible=modules.config.default_advanced_checkbox) as advanced_column: with gr.Tab(label='Setting'): performance_selection = gr.Radio(label='Performance', - choices=modules.flags.performance_selections, + choices=flags.Performance.list(), value=modules.config.default_performance) aspect_ratios_selection = gr.Radio(label='Aspect Ratios', choices=modules.config.available_aspect_ratios, value=modules.config.default_aspect_ratio, info='width × height', @@ -262,7 +262,7 @@ def trigger_metadata_preview(filepath): image_number = gr.Slider(label='Image Number', minimum=1, maximum=modules.config.default_max_image_number, step=1, value=modules.config.default_image_number) output_format = gr.Radio(label='Output Format', - choices=modules.flags.output_formats, + choices=flags.OutputFormat.list(), value=modules.config.default_output_format) negative_prompt = gr.Textbox(label='Negative Prompt', show_label=True, placeholder="Type prompt here.", @@ -427,8 +427,8 @@ def update_history_link(): disable_preview = gr.Checkbox(label='Disable Preview', value=False, info='Disable preview during generation.') disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results', - value=modules.config.default_performance == 'Extreme Speed', - interactive=modules.config.default_performance != 'Extreme Speed', + value=modules.config.default_performance == flags.Performance.EXTREME_SPEED.value, + interactive=modules.config.default_performance != flags.Performance.EXTREME_SPEED.value, info='Disable intermediate results during generation, only show final gallery.') disable_seed_increment = gr.Checkbox(label='Disable seed increment', info='Disable automatic seed increment when image number is > 1.', @@ -526,9 +526,9 @@ def model_refresh_clicked(): model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls, queue=False, show_progress=False) - performance_selection.change(lambda x: [gr.update(interactive=x != 'Extreme Speed')] * 11 + - [gr.update(visible=x != 'Extreme Speed')] * 1 + - [gr.update(interactive=x != 'Extreme Speed', value=x == 'Extreme Speed', )] * 1, + performance_selection.change(lambda x: [gr.update(interactive=x != flags.Performance.EXTREME_SPEED.value)] * 11 + + [gr.update(visible=x != flags.Performance.EXTREME_SPEED.value)] * 1 + + [gr.update(interactive=x != flags.Performance.EXTREME_SPEED.value, value=x == flags.Performance.EXTREME_SPEED.value, )] * 1, inputs=performance_selection, outputs=[ guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive, From 25650b4bc4a9e6103c1d384d31c61aae13391de5 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 10 Mar 2024 14:34:48 +0100 Subject: [PATCH 02/21] feat: add performance lightning with 4 step LoRA (#2415) * feat: add performance sdxl lightning based on https://huggingface.co/ByteDance/SDXL-Lightning/blob/main/sdxl_lightning_4step_lora.safetensors * feat: add method for centralized restriction of features for specific performance modes * feat: add lightning preset --- modules/async_worker.py | 19 +++++++++++++++ modules/config.py | 9 +++++++ modules/flags.py | 9 +++++++ presets/lightning.json | 52 +++++++++++++++++++++++++++++++++++++++++ webui.py | 6 ++--- 5 files changed, 92 insertions(+), 3 deletions(-) create mode 100644 presets/lightning.json diff --git a/modules/async_worker.py b/modules/async_worker.py index a8661f4dd..17c2645c8 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -250,6 +250,25 @@ def handler(async_task): adm_scaler_negative = 1.0 adm_scaler_end = 0.0 + elif performance_selection == Performance.LIGHTNING: + print('Enter Lightning mode.') + progressbar(async_task, 1, 'Downloading Lightning components ...') + loras += [(modules.config.downloading_sdxl_lightning_lora(), 1.0)] + + if refiner_model_name != 'None': + print(f'Refiner disabled in Lightning mode.') + + refiner_model_name = 'None' + sampler_name = 'euler' + scheduler_name = 'sgm_uniform' + sharpness = 0.0 + guidance_scale = 1.0 + adaptive_cfg = 1.0 + refiner_switch = 1.0 + adm_scaler_positive = 1.0 + adm_scaler_negative = 1.0 + adm_scaler_end = 0.0 + print(f'[Parameters] Adaptive CFG = {adaptive_cfg}') print(f'[Parameters] Sharpness = {sharpness}') print(f'[Parameters] ControlNet Softness = {controlnet_softness}') diff --git a/modules/config.py b/modules/config.py index ef6de2ae2..0d4156c7a 100644 --- a/modules/config.py +++ b/modules/config.py @@ -475,6 +475,7 @@ def add_ratio(x): model_filenames = [] lora_filenames = [] sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors' +sdxl_lightning_lora = 'sdxl_lightning_4step_lora.safetensors' def get_model_filenames(folder_paths, name_filter=None): @@ -538,6 +539,14 @@ def downloading_sdxl_lcm_lora(): ) return sdxl_lcm_lora +def downloading_sdxl_lightning_lora(): + load_file_from_url( + url='https://huggingface.co/ByteDance/SDXL-Lightning/resolve/main/sdxl_lightning_4step_lora.safetensors', + model_dir=paths_loras[0], + file_name=sdxl_lightning_lora + ) + return sdxl_lightning_lora + def downloading_controlnet_canny(): load_file_from_url( diff --git a/modules/flags.py b/modules/flags.py index 95621c2b1..c9d13fd81 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -106,23 +106,32 @@ class Steps(IntEnum): QUALITY = 60 SPEED = 30 EXTREME_SPEED = 8 + LIGHTNING = 4 class StepsUOV(IntEnum): QUALITY = 36 SPEED = 18 EXTREME_SPEED = 8 + LIGHTNING = 4 class Performance(Enum): QUALITY = 'Quality' SPEED = 'Speed' EXTREME_SPEED = 'Extreme Speed' + LIGHTNING = 'Lightning' @classmethod def list(cls) -> list: return list(map(lambda c: c.value, cls)) + @classmethod + def has_restricted_features(cls, x) -> bool: + if isinstance(x, Performance): + x = x.value + return x in [cls.EXTREME_SPEED.value, cls.LIGHTNING.value] + def steps(self) -> int | None: return Steps[self.name].value if Steps[self.name] else None diff --git a/presets/lightning.json b/presets/lightning.json new file mode 100644 index 000000000..642493586 --- /dev/null +++ b/presets/lightning.json @@ -0,0 +1,52 @@ +{ + "default_model": "juggernautXL_v8Rundiffusion.safetensors", + "default_refiner": "None", + "default_refiner_switch": 0.5, + "default_loras": [ + [ + "None", + 1.0 + ], + [ + "None", + 1.0 + ], + [ + "None", + 1.0 + ], + [ + "None", + 1.0 + ], + [ + "None", + 1.0 + ] + ], + "default_cfg_scale": 4.0, + "default_sample_sharpness": 2.0, + "default_sampler": "dpmpp_2m_sde_gpu", + "default_scheduler": "karras", + "default_performance": "Lightning", + "default_prompt": "", + "default_prompt_negative": "", + "default_styles": [ + "Fooocus V2", + "Fooocus Enhance", + "Fooocus Sharp" + ], + "default_aspect_ratio": "1152*896", + "checkpoint_downloads": { + "juggernautXL_v8Rundiffusion.safetensors": "https://huggingface.co/lllyasviel/fav_models/resolve/main/fav/juggernautXL_v8Rundiffusion.safetensors" + }, + "embeddings_downloads": {}, + "lora_downloads": {}, + "previous_default_models": [ + "juggernautXL_version8Rundiffusion.safetensors", + "juggernautXL_version7Rundiffusion.safetensors", + "juggernautXL_v7Rundiffusion.safetensors", + "juggernautXL_version6Rundiffusion.safetensors", + "juggernautXL_v6Rundiffusion.safetensors" + ] +} \ No newline at end of file diff --git a/webui.py b/webui.py index 5dab79d09..bcd2a5fd1 100644 --- a/webui.py +++ b/webui.py @@ -526,9 +526,9 @@ def model_refresh_clicked(): model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls, queue=False, show_progress=False) - performance_selection.change(lambda x: [gr.update(interactive=x != flags.Performance.EXTREME_SPEED.value)] * 11 + - [gr.update(visible=x != flags.Performance.EXTREME_SPEED.value)] * 1 + - [gr.update(interactive=x != flags.Performance.EXTREME_SPEED.value, value=x == flags.Performance.EXTREME_SPEED.value, )] * 1, + performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 + + [gr.update(visible=not flags.Performance.has_restricted_features(x))] * 1 + + [gr.update(interactive=not flags.Performance.has_restricted_features(x), value=flags.Performance.has_restricted_features(x))] * 1, inputs=performance_selection, outputs=[ guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive, From db7d2018ca6d34757e1a6b97fab22c4c0ef3cd19 Mon Sep 17 00:00:00 2001 From: xhoxye <129571231+xhoxye@users.noreply.github.com> Date: Sun, 10 Mar 2024 21:42:03 +0800 Subject: [PATCH 03/21] fix: change synthetic refiner switch from 0.5 to 0.8 (#2165) * fix problem 1. In partial redrawing, when refiner is empty, enable use_synthetic_refiner. The default switching timing of 0.5 is too early, which is now modified to SDXL default of 0.8. 2. When using custom steps, the calculation of switching timing is wrong. Now it is modified to calculate "steps x timing" after custom steps are used. * fix: parse width and height as int when applying metadata (#2452) fixes an issue with A1111 metadata scheme where width and height are strings after splitting resolution * fix: do not attempt to remove non-existing image grid file (#2456) image grid is actually not an image here but a numpy array, as the grid isn't saved by default * feat: add troubleshooting guide to bug report template again (#2489) --------- Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Co-authored-by: Manuel Schmid --- modules/async_worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 17c2645c8..d4fbd95d7 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -366,7 +366,7 @@ def handler(async_task): print(f'[Inpaint] Current inpaint model is {inpaint_patch_model_path}') if refiner_model_name == 'None': use_synthetic_refiner = True - refiner_switch = 0.5 + refiner_switch = 0.8 else: inpaint_head_model_path, inpaint_patch_model_path = None, None print(f'[Inpaint] Parameterized inpaint is disabled.') From 400471f7afa68d6ee90b4cfc4f181e5bc9cf0a6a Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sun, 10 Mar 2024 21:09:49 +0100 Subject: [PATCH 04/21] feat: add config for temp path and temp path cleanup on launch (#1992) * Added options to set the Gradio cache path and clear cache on launch. * Renamed cache to temp * clear temp * feat: do not delete temp folder but only clean content also use fallback to system temp dir see https://github.com/gradio-app/gradio/blob/6683ab2589f9d8658e1f51acc1b7526edce988d3/gradio/utils.py#L1151 * refactor: code cleanup * feat: unify arg --temp-path and new temp_path config value * feat: change default temp dir from gradio to fooocus * refactor: move temp path method definition and configs * feat: rename get_temp_path to init_temp_path --------- Co-authored-by: Magee Co-authored-by: steveyourcreativepeople Co-authored-by: Manuel Schmid --- args_manager.py | 3 --- launch.py | 21 ++++++++++++++------- modules/config.py | 36 +++++++++++++++++++++++++++++++++++- modules/launch_util.py | 20 +++++++++++++++++--- modules/private_logger.py | 2 +- 5 files changed, 67 insertions(+), 15 deletions(-) diff --git a/args_manager.py b/args_manager.py index c7c1b7ab1..8c3e19182 100644 --- a/args_manager.py +++ b/args_manager.py @@ -49,7 +49,4 @@ if args_parser.args.disable_in_browser: args_parser.args.in_browser = False -if args_parser.args.temp_path is None: - args_parser.args.temp_path = os.path.join(gettempdir(), 'Fooocus') - args = args_parser.args diff --git a/launch.py b/launch.py index 4269f1fcb..b3b06d6ec 100644 --- a/launch.py +++ b/launch.py @@ -1,6 +1,6 @@ import os -import sys import ssl +import sys print('[System ARGV] ' + str(sys.argv)) @@ -15,15 +15,13 @@ ssl._create_default_https_context = ssl._create_unverified_context - import platform import fooocus_version from build_launcher import build_launcher -from modules.launch_util import is_installed, run, python, run_pip, requirements_met +from modules.launch_util import is_installed, run, python, run_pip, requirements_met, delete_folder_content from modules.model_loader import load_file_from_url - REINSTALL_ALL = False TRY_INSTALL_XFORMERS = False @@ -68,6 +66,7 @@ def prepare_environment(): 'https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors') ] + def ini_args(): from args_manager import args return args @@ -77,14 +76,23 @@ def ini_args(): build_launcher() args = ini_args() - if args.gpu_device_id is not None: os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_device_id) print("Set device to:", args.gpu_device_id) - from modules import config +os.environ['GRADIO_TEMP_DIR'] = config.temp_path + +if config.temp_path_cleanup_on_launch: + print(f'[Cleanup] Attempting to delete content of temp dir {config.temp_path}') + result = delete_folder_content(config.temp_path, '[Cleanup] ') + if result: + print("[Cleanup] Cleanup successful") + else: + print(f"[Cleanup] Failed to delete content of temp dir.") + + def download_models(): for file_name, url in vae_approx_filenames: load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name) @@ -123,5 +131,4 @@ def download_models(): download_models() - from webui import * diff --git a/modules/config.py b/modules/config.py index 0d4156c7a..669040961 100644 --- a/modules/config.py +++ b/modules/config.py @@ -3,6 +3,7 @@ import math import numbers import args_manager +import tempfile import modules.flags import modules.sdxl_styles @@ -10,6 +11,7 @@ from modules.util import get_files_from_folder, makedirs_with_log from modules.flags import OutputFormat, Performance, MetadataScheme + def get_config_path(key, default_value): env = os.getenv(key) if env is not None and isinstance(env, str): @@ -18,6 +20,7 @@ def get_config_path(key, default_value): else: return os.path.abspath(default_value) + config_path = get_config_path('config_path', "./config.txt") config_example_path = get_config_path('config_example_path', "config_modification_tutorial.txt") config_dict = {} @@ -117,7 +120,7 @@ def get_path_output() -> str: global config_dict path_output = get_dir_or_set_default('path_outputs', '../outputs/', make_directory=True) if args_manager.args.output_path: - print(f'[CONFIG] Overriding config value path_outputs with {args_manager.args.output_path}') + print(f'Overriding config value path_outputs with {args_manager.args.output_path}') config_dict['path_outputs'] = path_output = args_manager.args.output_path return path_output @@ -178,6 +181,7 @@ def get_dir_or_set_default(key, default_value, as_array=False, make_directory=Fa path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion') path_outputs = get_path_output() + def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False): global config_dict, visited_keys @@ -206,6 +210,36 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_ return default_value +def init_temp_path(path: str | None, default_path: str) -> str: + if args_manager.args.temp_path: + path = args_manager.args.temp_path + + if path != '' and path != default_path: + try: + if not os.path.isabs(path): + path = os.path.abspath(path) + os.makedirs(path, exist_ok=True) + print(f'Using temp path {path}') + return path + except Exception as e: + print(f'Could not create temp path {path}. Reason: {e}') + print(f'Using default temp path {default_path} instead.') + + os.makedirs(default_path, exist_ok=True) + return default_path + + +default_temp_path = os.path.join(tempfile.gettempdir(), 'fooocus') +temp_path = init_temp_path(get_config_item_or_set_default( + key='temp_path', + default_value=default_temp_path, + validator=lambda x: isinstance(x, str), +), default_temp_path) +temp_path_cleanup_on_launch = get_config_item_or_set_default( + key='temp_path_cleanup_on_launch', + default_value=True, + validator=lambda x: isinstance(x, bool) +) default_base_model_name = get_config_item_or_set_default( key='default_model', default_value='model.safetensors', diff --git a/modules/launch_util.py b/modules/launch_util.py index b483d5158..370dc0489 100644 --- a/modules/launch_util.py +++ b/modules/launch_util.py @@ -1,6 +1,7 @@ import os import importlib import importlib.util +import shutil import subprocess import sys import re @@ -9,9 +10,6 @@ import packaging.version from packaging.requirements import Requirement - - - logging.getLogger("torch.distributed.nn").setLevel(logging.ERROR) # sshh... logging.getLogger("xformers").addFilter(lambda record: 'A matching Triton is not available' not in record.getMessage()) @@ -101,3 +99,19 @@ def requirements_met(requirements_file): return True + +def delete_folder_content(folder, prefix=None): + result = True + + for filename in os.listdir(folder): + file_path = os.path.join(folder, filename) + try: + if os.path.isfile(file_path) or os.path.islink(file_path): + os.unlink(file_path) + elif os.path.isdir(file_path): + shutil.rmtree(file_path) + except Exception as e: + print(f'{prefix}Failed to delete {file_path}. Reason: {e}') + result = False + + return result \ No newline at end of file diff --git a/modules/private_logger.py b/modules/private_logger.py index 916d7bf0a..edd9457d2 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -22,7 +22,7 @@ def get_current_html_path(output_format=None): def log(img, metadata, metadata_parser: MetadataParser | None = None, output_format=None) -> str: - path_outputs = args_manager.args.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs + path_outputs = modules.config.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs output_format = output_format if output_format else modules.config.default_output_format date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format) os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True) From f6117180d4e02fa90e356755f86ca661af628542 Mon Sep 17 00:00:00 2001 From: Cruxial Date: Sun, 10 Mar 2024 21:35:41 +0100 Subject: [PATCH 05/21] feat: scan wildcard subdirectories (#2466) * Fix typo * Scan wildcards recursively Adds a method for getting the top-most occurrence of a given file in a directory tree * Use already existing method for locating files * Fix issue with incorrect files being loaded When using the `name-filter` parameter in `get_model_filenames`, it doesn't guarantee the best match to be in the first index. This change adds a step to ensure the correct wildcard is being loaded. * feat: make path for wildcards configurable, cache filenames on refresh files, rename button variable * Fix formatting --------- Co-authored-by: Manuel Schmid --- modules/config.py | 15 ++++++++++----- modules/sdxl_styles.py | 11 ++++++----- modules/util.py | 4 ++-- webui.py | 9 ++++----- 4 files changed, 22 insertions(+), 17 deletions(-) diff --git a/modules/config.py b/modules/config.py index 669040961..83590a24b 100644 --- a/modules/config.py +++ b/modules/config.py @@ -179,6 +179,7 @@ def get_dir_or_set_default(key, default_value, as_array=False, make_directory=Fa path_controlnet = get_dir_or_set_default('path_controlnet', '../models/controlnet/') path_clip_vision = get_dir_or_set_default('path_clip_vision', '../models/clip_vision/') path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion') +path_wildcards = get_dir_or_set_default('path_wildcards', '../wildcards/') path_outputs = get_path_output() @@ -508,22 +509,26 @@ def add_ratio(x): model_filenames = [] lora_filenames = [] +wildcard_filenames = [] + sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors' sdxl_lightning_lora = 'sdxl_lightning_4step_lora.safetensors' -def get_model_filenames(folder_paths, name_filter=None): - extensions = ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch'] +def get_model_filenames(folder_paths, extensions=None, name_filter=None): + if extensions is None: + extensions = ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch'] files = [] for folder in folder_paths: files += get_files_from_folder(folder, extensions, name_filter) return files -def update_all_model_names(): - global model_filenames, lora_filenames +def update_files(): + global model_filenames, lora_filenames, wildcard_filenames model_filenames = get_model_filenames(paths_checkpoints) lora_filenames = get_model_filenames(paths_loras) + wildcard_filenames = get_files_from_folder(path_wildcards, ['.txt']) return @@ -647,4 +652,4 @@ def downloading_upscale_model(): return os.path.join(path_upscale_models, 'fooocus_upscaler_s409985e5.bin') -update_all_model_names() +update_files() diff --git a/modules/sdxl_styles.py b/modules/sdxl_styles.py index 2a310024c..0b07339c8 100644 --- a/modules/sdxl_styles.py +++ b/modules/sdxl_styles.py @@ -2,13 +2,12 @@ import re import json import math +import modules.config from modules.util import get_files_from_folder - # cannot use modules.config - validators causing circular imports styles_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../sdxl_styles/')) -wildcards_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '../wildcards/')) wildcards_max_bfs_depth = 64 @@ -60,7 +59,7 @@ def apply_style(style, positive): return p.replace('{prompt}', positive).splitlines(), n.splitlines() -def apply_wildcards(wildcard_text, rng, directory=wildcards_path): +def apply_wildcards(wildcard_text, rng): for _ in range(wildcards_max_bfs_depth): placeholders = re.findall(r'__([\w-]+)__', wildcard_text) if len(placeholders) == 0: @@ -69,7 +68,8 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path): print(f'[Wildcards] processing: {wildcard_text}') for placeholder in placeholders: try: - words = open(os.path.join(directory, f'{placeholder}.txt'), encoding='utf-8').read().splitlines() + matches = [x for x in modules.config.wildcard_filenames if os.path.splitext(os.path.basename(x))[0] == placeholder] + words = open(os.path.join(modules.config.path_wildcards, matches[0]), encoding='utf-8').read().splitlines() words = [x for x in words if x != ''] assert len(words) > 0 wildcard_text = wildcard_text.replace(f'__{placeholder}__', rng.choice(words), 1) @@ -82,8 +82,9 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path): print(f'[Wildcards] BFS stack overflow. Current text: {wildcard_text}') return wildcard_text + def get_words(arrays, totalMult, index): - if(len(arrays) == 1): + if len(arrays) == 1: return [arrays[0].split(',')[index]] else: words = arrays[0].split(',') diff --git a/modules/util.py b/modules/util.py index c7923ec82..9c432eb61 100644 --- a/modules/util.py +++ b/modules/util.py @@ -163,7 +163,7 @@ def generate_temp_filename(folder='./outputs/', extension='png'): return date_string, os.path.abspath(result), filename -def get_files_from_folder(folder_path, exensions=None, name_filter=None): +def get_files_from_folder(folder_path, extensions=None, name_filter=None): if not os.path.isdir(folder_path): raise ValueError("Folder path is not a valid directory.") @@ -175,7 +175,7 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None): relative_path = "" for filename in sorted(files, key=lambda s: s.casefold()): _, file_extension = os.path.splitext(filename) - if (exensions is None or file_extension.lower() in exensions) and (name_filter is None or name_filter in _): + if (extensions is None or file_extension.lower() in extensions) and (name_filter is None or name_filter in _): path = os.path.join(relative_path, filename) filenames.append(path) diff --git a/webui.py b/webui.py index c0f1ec91c..808db72d9 100644 --- a/webui.py +++ b/webui.py @@ -366,7 +366,7 @@ def update_history_link(): lora_ctrls += [lora_enabled, lora_model, lora_weight] with gr.Row(): - model_refresh = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button') + refresh_files = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button') with gr.Tab(label='Advanced'): guidance_scale = gr.Slider(label='Guidance Scale', minimum=1.0, maximum=30.0, step=0.01, value=modules.config.default_cfg_scale, @@ -512,19 +512,18 @@ def update_history_link(): def dev_mode_checked(r): return gr.update(visible=r) - dev_mode.change(dev_mode_checked, inputs=[dev_mode], outputs=[dev_tools], queue=False, show_progress=False) - def model_refresh_clicked(): - modules.config.update_all_model_names() + def refresh_files_clicked(): + modules.config.update_files() results = [gr.update(choices=modules.config.model_filenames)] results += [gr.update(choices=['None'] + modules.config.model_filenames)] for i in range(modules.config.default_max_lora_number): results += [gr.update(interactive=True), gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] return results - model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls, + refresh_files.click(refresh_files_clicked, [], [base_model, refiner_model] + lora_ctrls, queue=False, show_progress=False) performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 + From ead24c9361337a1ab52720c85b3daab431b00f24 Mon Sep 17 00:00:00 2001 From: xhoxye <129571231+xhoxye@users.noreply.github.com> Date: Mon, 11 Mar 2024 06:18:36 +0800 Subject: [PATCH 06/21] =?UTF-8?q?feat:=20read=20wildcards=20in=20order=20?= =?UTF-8?q?=E9=80=9A=E9=85=8D=E7=AC=A6=E5=A2=9E=E5=BC=BA=EF=BC=8C=E5=88=87?= =?UTF-8?q?=E6=8D=A2=E9=A1=BA=E5=BA=8F=E8=AF=BB=E5=8F=96=E3=80=82(#1761)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * 通配符增强,切换顺序读取 通配符增强,通过勾选切换通配符读取方法,默认不勾选为随机读取一行,勾选后为按顺序读取,并使用相同的种子。 * 代码来自刁璐璐 * update * Update async_worker.py * refactor: rename read_wildcard_in_order_checkbox to read_wildcard_in_order * fix: use correct method call for interrupt_current_processing actually achieves the same result, stopping the task * refactor: move checkbox to developer debug mode, rename to plural below disable seed increment * refactor: code cleanup, separate code for disable_seed_increment * i18n: add translation for checkbox text --------- Co-authored-by: Manuel Schmid --- language/en.json | 1 + modules/async_worker.py | 12 +++++++----- modules/sdxl_styles.py | 7 +++++-- webui.py | 4 +++- 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/language/en.json b/language/en.json index f61255c96..241c5d541 100644 --- a/language/en.json +++ b/language/en.json @@ -50,6 +50,7 @@ "Seed": "Seed", "Disable seed increment": "Disable seed increment", "Disable automatic seed increment when image number is > 1.": "Disable automatic seed increment when image number is > 1.", + "Read wildcards in order": "Read wildcards in order", "\ud83d\udcda History Log": "\uD83D\uDCDA History Log", "Image Style": "Image Style", "Fooocus V2": "Fooocus V2", diff --git a/modules/async_worker.py b/modules/async_worker.py index c7df14f5d..c5953a580 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -1,4 +1,5 @@ import threading +import re from modules.patch import PatchSettings, patch_settings, patch_all patch_all() @@ -148,6 +149,7 @@ def handler(async_task): image_number = args.pop() output_format = args.pop() image_seed = args.pop() + read_wildcards_in_order = args.pop() sharpness = args.pop() guidance_scale = args.pop() base_model_name = args.pop() @@ -441,16 +443,16 @@ def handler(async_task): for i in range(image_number): if disable_seed_increment: - task_seed = seed + task_seed = seed % (constants.MAX_SEED + 1) else: task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not task_rng = random.Random(task_seed) # may bind to inpaint noise in the future - task_prompt = apply_wildcards(prompt, task_rng) + task_prompt = apply_wildcards(prompt, task_rng, i, read_wildcards_in_order) task_prompt = apply_arrays(task_prompt, i) - task_negative_prompt = apply_wildcards(negative_prompt, task_rng) - task_extra_positive_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_positive_prompts] - task_extra_negative_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_negative_prompts] + task_negative_prompt = apply_wildcards(negative_prompt, task_rng, i, read_wildcards_in_order) + task_extra_positive_prompts = [apply_wildcards(pmt, task_rng, i, read_wildcards_in_order) for pmt in extra_positive_prompts] + task_extra_negative_prompts = [apply_wildcards(pmt, task_rng, i, read_wildcards_in_order) for pmt in extra_negative_prompts] positive_basic_workloads = [] negative_basic_workloads = [] diff --git a/modules/sdxl_styles.py b/modules/sdxl_styles.py index 0b07339c8..77ad6b574 100644 --- a/modules/sdxl_styles.py +++ b/modules/sdxl_styles.py @@ -59,7 +59,7 @@ def apply_style(style, positive): return p.replace('{prompt}', positive).splitlines(), n.splitlines() -def apply_wildcards(wildcard_text, rng): +def apply_wildcards(wildcard_text, rng, i, read_wildcards_in_order): for _ in range(wildcards_max_bfs_depth): placeholders = re.findall(r'__([\w-]+)__', wildcard_text) if len(placeholders) == 0: @@ -72,7 +72,10 @@ def apply_wildcards(wildcard_text, rng): words = open(os.path.join(modules.config.path_wildcards, matches[0]), encoding='utf-8').read().splitlines() words = [x for x in words if x != ''] assert len(words) > 0 - wildcard_text = wildcard_text.replace(f'__{placeholder}__', rng.choice(words), 1) + if read_wildcards_in_order: + wildcard_text = wildcard_text.replace(f'__{placeholder}__', words[i % len(words)], 1) + else: + wildcard_text = wildcard_text.replace(f'__{placeholder}__', rng.choice(words), 1) except: print(f'[Wildcards] Warning: {placeholder}.txt missing or empty. ' f'Using "{placeholder}" as a normal word.') diff --git a/webui.py b/webui.py index 808db72d9..ee7edc2d8 100644 --- a/webui.py +++ b/webui.py @@ -434,6 +434,7 @@ def update_history_link(): disable_seed_increment = gr.Checkbox(label='Disable seed increment', info='Disable automatic seed increment when image number is > 1.', value=False) + read_wildcards_in_order = gr.Checkbox(label="Read wildcards in order", value=False) if not args_manager.args.disable_metadata: save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images, @@ -578,7 +579,8 @@ def inpaint_mode_change(mode): ctrls = [currentTask, generate_image_grid] ctrls += [ prompt, negative_prompt, style_selections, - performance_selection, aspect_ratios_selection, image_number, output_format, image_seed, sharpness, guidance_scale + performance_selection, aspect_ratios_selection, image_number, output_format, image_seed, + read_wildcards_in_order, sharpness, guidance_scale ] ctrls += [base_model, refiner_model, refiner_switch] + lora_ctrls From 84e3124c37e26acb39371b73e00edbb611335cd9 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 11 Mar 2024 00:47:31 +0100 Subject: [PATCH 07/21] i18n: add translation for lightning --- language/en.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/language/en.json b/language/en.json index 241c5d541..3e42fff0c 100644 --- a/language/en.json +++ b/language/en.json @@ -41,6 +41,8 @@ "Performance": "Performance", "Speed": "Speed", "Quality": "Quality", + "Extreme Speed": "Extreme Speed", + "Lightning": "Lightning", "Aspect Ratios": "Aspect Ratios", "width \u00d7 height": "width \u00d7 height", "Image Number": "Image Number", @@ -368,7 +370,6 @@ "B2": "B2", "S1": "S1", "S2": "S2", - "Extreme Speed": "Extreme Speed", "\uD83D\uDD0E Type here to search styles ...": "\uD83D\uDD0E Type here to search styles ...", "Type prompt here.": "Type prompt here.", "Outpaint Expansion Direction:": "Outpaint Expansion Direction:", From 2831dc70a7fb772f077776d7e92208af2ff62c7b Mon Sep 17 00:00:00 2001 From: hswlab Date: Mon, 11 Mar 2024 16:35:03 +0100 Subject: [PATCH 08/21] feat: use scrollable 2 column layout for styles (#1883) * Styles Grouping/Sorting #1770 * Update css/style.css Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> * Update javascript/script.js Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> * feat: use standard padding again --------- Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Co-authored-by: Manuel Schmid --- css/style.css | 45 ++++++++++++++++++++++++++++++++++++++++++++ javascript/script.js | 16 +++++++++++----- webui.py | 2 +- 3 files changed, 57 insertions(+), 6 deletions(-) diff --git a/css/style.css b/css/style.css index 010c8e7f6..3cc1e5e52 100644 --- a/css/style.css +++ b/css/style.css @@ -218,3 +218,48 @@ #stylePreviewOverlay.lower-half { transform: translate(-140px, -140px); } + +/* scrollable box for style selections */ +.contain .tabs { + height: 100%; +} + +.contain .tabs .tabitem.style_selections_tab { + height: 100%; +} + +.contain .tabs .tabitem.style_selections_tab > div:first-child { + height: 100%; +} + +.contain .tabs .tabitem.style_selections_tab .style_selections { + min-height: 200px; + height: 100%; +} + +.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] { + position: absolute; /* remove this to disable scrolling within the checkbox-group */ + overflow: auto; + padding-right: 2px; + max-height: 100%; +} + +.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] label { + /* max-width: calc(35% - 15px) !important; */ /* add this to enable 3 columns layout */ + flex: calc(50% - 5px) !important; +} + +.contain .tabs .tabitem.style_selections_tab .style_selections .wrap[data-testid="checkbox-group"] label span { + /* white-space:nowrap; */ /* add this to disable text wrapping (better choice for 3 columns layout) */ + overflow: hidden; + text-overflow: ellipsis; +} + +/* styles preview tooltip */ +.preview-tooltip { + background-color: #fff8; + font-family: monospace; + text-align: center; + border-radius-top: 5px; + display: none; /* remove this to enable tooltip in preview image */ +} \ No newline at end of file diff --git a/javascript/script.js b/javascript/script.js index 8f4cac58f..9aa0b5c16 100644 --- a/javascript/script.js +++ b/javascript/script.js @@ -150,9 +150,12 @@ function initStylePreviewOverlay() { let overlayVisible = false; const samplesPath = document.querySelector("meta[name='samples-path']").getAttribute("content") const overlay = document.createElement('div'); + const tooltip = document.createElement('div'); + tooltip.className = 'preview-tooltip'; + overlay.appendChild(tooltip); overlay.id = 'stylePreviewOverlay'; document.body.appendChild(overlay); - document.addEventListener('mouseover', function(e) { + document.addEventListener('mouseover', function (e) { const label = e.target.closest('.style_selections label'); if (!label) return; label.removeEventListener("mouseout", onMouseLeave); @@ -162,9 +165,12 @@ function initStylePreviewOverlay() { const originalText = label.querySelector("span").getAttribute("data-original-text"); const name = originalText || label.querySelector("span").textContent; overlay.style.backgroundImage = `url("${samplesPath.replace( - "fooocus_v2", - name.toLowerCase().replaceAll(" ", "_") + "fooocus_v2", + name.toLowerCase().replaceAll(" ", "_") ).replaceAll("\\", "\\\\")}")`; + + tooltip.textContent = name; + function onMouseLeave() { overlayVisible = false; overlay.style.opacity = "0"; @@ -172,8 +178,8 @@ function initStylePreviewOverlay() { label.removeEventListener("mouseout", onMouseLeave); } }); - document.addEventListener('mousemove', function(e) { - if(!overlayVisible) return; + document.addEventListener('mousemove', function (e) { + if (!overlayVisible) return; overlay.style.left = `${e.clientX}px`; overlay.style.top = `${e.clientY}px`; overlay.className = e.clientY > window.innerHeight / 2 ? "lower-half" : "upper-half"; diff --git a/webui.py b/webui.py index ee7edc2d8..832cc1943 100644 --- a/webui.py +++ b/webui.py @@ -300,7 +300,7 @@ def update_history_link(): history_link = gr.HTML() shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False) - with gr.Tab(label='Style'): + with gr.Tab(label='Style', elem_classes=['style_selections_tab']): style_sorter.try_load_sorted_styles( style_names=legal_style_names, default_selected=modules.config.default_styles) From 39669453cda5bbbbdb322246beda195d0ae46af6 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Mon, 11 Mar 2024 17:59:58 +0100 Subject: [PATCH 09/21] feat: allow to add disabled LoRAs in config on application start (#2507) add LoRA checkbox enable/disable handling to all necessary occurrences --- modules/config.py | 7 ++++++- modules/core.py | 13 ++++++++----- presets/anime.json | 5 +++++ presets/default.json | 5 +++++ presets/lcm.json | 5 +++++ presets/realistic.json | 5 +++++ presets/sai.json | 5 +++++ webui.py | 8 ++++---- 8 files changed, 43 insertions(+), 10 deletions(-) diff --git a/modules/config.py b/modules/config.py index 83590a24b..8fec8e05b 100644 --- a/modules/config.py +++ b/modules/config.py @@ -275,27 +275,32 @@ def init_temp_path(path: str | None, default_path: str) -> str: key='default_loras', default_value=[ [ + True, "None", 1.0 ], [ + True, "None", 1.0 ], [ + True, "None", 1.0 ], [ + True, "None", 1.0 ], [ + True, "None", 1.0 ] ], - validator=lambda x: isinstance(x, list) and all(len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number) for y in x) + validator=lambda x: isinstance(x, list) and all(len(y) == 3 and isinstance(y[0], bool) and isinstance(y[1], str) and isinstance(y[2], numbers.Number) for y in x) ) default_max_lora_number = get_config_item_or_set_default( key='default_max_lora_number', diff --git a/modules/core.py b/modules/core.py index bfc449661..e8e19397c 100644 --- a/modules/core.py +++ b/modules/core.py @@ -73,14 +73,17 @@ def refresh_loras(self, loras): loras_to_load = [] - for name, weight in loras: - if name == 'None': + for enabled, filename, weight in loras: + if not enabled: continue - if os.path.exists(name): - lora_filename = name + if filename == 'None': + continue + + if os.path.exists(filename): + lora_filename = filename else: - lora_filename = get_file_from_folder_list(name, modules.config.paths_loras) + lora_filename = get_file_from_folder_list(filename, modules.config.paths_loras) if not os.path.exists(lora_filename): print(f'Lora file not found: {lora_filename}') diff --git a/presets/anime.json b/presets/anime.json index 8bd2813bc..1f2b26a97 100644 --- a/presets/anime.json +++ b/presets/anime.json @@ -4,22 +4,27 @@ "default_refiner_switch": 0.5, "default_loras": [ [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ] diff --git a/presets/default.json b/presets/default.json index 7930c92f0..963f7a631 100644 --- a/presets/default.json +++ b/presets/default.json @@ -4,22 +4,27 @@ "default_refiner_switch": 0.5, "default_loras": [ [ + false, "sd_xl_offset_example-lora_1.0.safetensors", 0.1 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ] diff --git a/presets/lcm.json b/presets/lcm.json index 3897f8812..6713fdd50 100644 --- a/presets/lcm.json +++ b/presets/lcm.json @@ -4,22 +4,27 @@ "default_refiner_switch": 0.5, "default_loras": [ [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ] diff --git a/presets/realistic.json b/presets/realistic.json index 7799c96a4..95f8b6e0a 100644 --- a/presets/realistic.json +++ b/presets/realistic.json @@ -4,22 +4,27 @@ "default_refiner_switch": 0.5, "default_loras": [ [ + true, "SDXL_FILM_PHOTOGRAPHY_STYLE_BetaV0.4.safetensors", 0.25 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ] diff --git a/presets/sai.json b/presets/sai.json index fecf047bf..918028f37 100644 --- a/presets/sai.json +++ b/presets/sai.json @@ -4,22 +4,27 @@ "default_refiner_switch": 0.75, "default_loras": [ [ + true, "sd_xl_offset_example-lora_1.0.safetensors", 0.5 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ] diff --git a/webui.py b/webui.py index 832cc1943..7fe10f153 100644 --- a/webui.py +++ b/webui.py @@ -353,15 +353,15 @@ def update_history_link(): with gr.Group(): lora_ctrls = [] - for i, (n, v) in enumerate(modules.config.default_loras): + for i, (enabled, filename, weight) in enumerate(modules.config.default_loras): with gr.Row(): - lora_enabled = gr.Checkbox(label='Enable', value=True, + lora_enabled = gr.Checkbox(label='Enable', value=enabled, elem_classes=['lora_enable', 'min_check'], scale=1) lora_model = gr.Dropdown(label=f'LoRA {i + 1}', - choices=['None'] + modules.config.lora_filenames, value=n, + choices=['None'] + modules.config.lora_filenames, value=filename, elem_classes='lora_model', scale=5) lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight, - maximum=modules.config.default_loras_max_weight, step=0.01, value=v, + maximum=modules.config.default_loras_max_weight, step=0.01, value=weight, elem_classes='lora_weight', scale=5) lora_ctrls += [lora_enabled, lora_model, lora_weight] From d57afc88a48359bc1642c2ae30a091f0426eff43 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 11 Mar 2024 18:26:04 +0100 Subject: [PATCH 10/21] feat: merge webui css into one file --- css/style.css | 131 +++++++++++++++++++++++++++++++++++++++++++++++ modules/html.py | 133 ------------------------------------------------ webui.py | 4 +- 3 files changed, 132 insertions(+), 136 deletions(-) diff --git a/css/style.css b/css/style.css index 3cc1e5e52..c702a7257 100644 --- a/css/style.css +++ b/css/style.css @@ -1,5 +1,136 @@ /* based on https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/v1.6.0/style.css */ +.loader-container { + display: flex; /* Use flex to align items horizontally */ + align-items: center; /* Center items vertically within the container */ + white-space: nowrap; /* Prevent line breaks within the container */ +} + +.loader { + border: 8px solid #f3f3f3; /* Light grey */ + border-top: 8px solid #3498db; /* Blue */ + border-radius: 50%; + width: 30px; + height: 30px; + animation: spin 2s linear infinite; +} + +@keyframes spin { + 0% { transform: rotate(0deg); } + 100% { transform: rotate(360deg); } +} + +/* Style the progress bar */ +progress { + appearance: none; /* Remove default styling */ + height: 20px; /* Set the height of the progress bar */ + border-radius: 5px; /* Round the corners of the progress bar */ + background-color: #f3f3f3; /* Light grey background */ + width: 100%; +} + +/* Style the progress bar container */ +.progress-container { + margin-left: 20px; + margin-right: 20px; + flex-grow: 1; /* Allow the progress container to take up remaining space */ +} + +/* Set the color of the progress bar fill */ +progress::-webkit-progress-value { + background-color: #3498db; /* Blue color for the fill */ +} + +progress::-moz-progress-bar { + background-color: #3498db; /* Blue color for the fill in Firefox */ +} + +/* Style the text on the progress bar */ +progress::after { + content: attr(value '%'); /* Display the progress value followed by '%' */ + position: absolute; + top: 50%; + left: 50%; + transform: translate(-50%, -50%); + color: white; /* Set text color */ + font-size: 14px; /* Set font size */ +} + +/* Style other texts */ +.loader-container > span { + margin-left: 5px; /* Add spacing between the progress bar and the text */ +} + +.progress-bar > .generating { + display: none !important; +} + +.progress-bar{ + height: 30px !important; +} + +.type_row{ + height: 80px !important; +} + +.type_row_half{ + height: 32px !important; +} + +.scroll-hide{ + resize: none !important; +} + +.refresh_button{ + border: none !important; + background: none !important; + font-size: none !important; + box-shadow: none !important; +} + +.advanced_check_row{ + width: 250px !important; +} + +.min_check{ + min-width: min(1px, 100%) !important; +} + +.resizable_area { + resize: vertical; + overflow: auto !important; +} + +.aspect_ratios label { + width: 140px !important; +} + +.aspect_ratios label span { + white-space: nowrap !important; +} + +.aspect_ratios label input { + margin-left: -5px !important; +} + +.lora_enable label { + height: 100%; +} + +.lora_enable label input { + margin: auto; +} + +.lora_enable label span { + display: none; +} + +@-moz-document url-prefix() { + .lora_weight input[type=number] { + width: 80px; + } +} + #context-menu{ z-index:9999; position:absolute; diff --git a/modules/html.py b/modules/html.py index 769151a9f..25771cb9f 100644 --- a/modules/html.py +++ b/modules/html.py @@ -1,136 +1,3 @@ -css = ''' -.loader-container { - display: flex; /* Use flex to align items horizontally */ - align-items: center; /* Center items vertically within the container */ - white-space: nowrap; /* Prevent line breaks within the container */ -} - -.loader { - border: 8px solid #f3f3f3; /* Light grey */ - border-top: 8px solid #3498db; /* Blue */ - border-radius: 50%; - width: 30px; - height: 30px; - animation: spin 2s linear infinite; -} - -@keyframes spin { - 0% { transform: rotate(0deg); } - 100% { transform: rotate(360deg); } -} - -/* Style the progress bar */ -progress { - appearance: none; /* Remove default styling */ - height: 20px; /* Set the height of the progress bar */ - border-radius: 5px; /* Round the corners of the progress bar */ - background-color: #f3f3f3; /* Light grey background */ - width: 100%; -} - -/* Style the progress bar container */ -.progress-container { - margin-left: 20px; - margin-right: 20px; - flex-grow: 1; /* Allow the progress container to take up remaining space */ -} - -/* Set the color of the progress bar fill */ -progress::-webkit-progress-value { - background-color: #3498db; /* Blue color for the fill */ -} - -progress::-moz-progress-bar { - background-color: #3498db; /* Blue color for the fill in Firefox */ -} - -/* Style the text on the progress bar */ -progress::after { - content: attr(value '%'); /* Display the progress value followed by '%' */ - position: absolute; - top: 50%; - left: 50%; - transform: translate(-50%, -50%); - color: white; /* Set text color */ - font-size: 14px; /* Set font size */ -} - -/* Style other texts */ -.loader-container > span { - margin-left: 5px; /* Add spacing between the progress bar and the text */ -} - -.progress-bar > .generating { - display: none !important; -} - -.progress-bar{ - height: 30px !important; -} - -.type_row{ - height: 80px !important; -} - -.type_row_half{ - height: 32px !important; -} - -.scroll-hide{ - resize: none !important; -} - -.refresh_button{ - border: none !important; - background: none !important; - font-size: none !important; - box-shadow: none !important; -} - -.advanced_check_row{ - width: 250px !important; -} - -.min_check{ - min-width: min(1px, 100%) !important; -} - -.resizable_area { - resize: vertical; - overflow: auto !important; -} - -.aspect_ratios label { - width: 140px !important; -} - -.aspect_ratios label span { - white-space: nowrap !important; -} - -.aspect_ratios label input { - margin-left: -5px !important; -} - -.lora_enable label { - height: 100%; -} - -.lora_enable label input { - margin: auto; -} - -.lora_enable label span { - display: none; -} - -@-moz-document url-prefix() { - .lora_weight input[type=number] { - width: 80px; - } -} - -''' progress_html = '''
diff --git a/webui.py b/webui.py index 7fe10f153..d68ade627 100644 --- a/webui.py +++ b/webui.py @@ -91,9 +91,7 @@ def generate_clicked(task): if isinstance(args_manager.args.preset, str): title += ' ' + args_manager.args.preset -shared.gradio_root = gr.Blocks( - title=title, - css=modules.html.css).queue() +shared.gradio_root = gr.Blocks(title=title).queue() with shared.gradio_root: currentTask = gr.State(worker.AsyncTask(args=[])) From 532401df766af637488e194f39fe1cec1ddd4739 Mon Sep 17 00:00:00 2001 From: Giuseppe Speranza Date: Mon, 11 Mar 2024 19:58:25 +0100 Subject: [PATCH 11/21] fix: prioritize VRAM over RAM in Colab, preventing out of memory issues (#1710) * colab: balance the use of RAM enables the use of VRAM memory so as not to saturate the system RAM * feat: use --always-high-vram by default for Colab, adjust readme --------- Co-authored-by: Manuel Schmid --- fooocus_colab.ipynb | 2 +- readme.md | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/fooocus_colab.ipynb b/fooocus_colab.ipynb index 205dac55d..7fa988798 100644 --- a/fooocus_colab.ipynb +++ b/fooocus_colab.ipynb @@ -12,7 +12,7 @@ "%cd /content\n", "!git clone https://github.com/lllyasviel/Fooocus.git\n", "%cd /content/Fooocus\n", - "!python entry_with_update.py --share\n" + "!python entry_with_update.py --share --always-high-vram\n" ] } ], diff --git a/readme.md b/readme.md index 0bfee5b4c..4e47ac088 100644 --- a/readme.md +++ b/readme.md @@ -115,16 +115,18 @@ See also the common problems and troubleshoots [here](troubleshoot.md). ### Colab -(Last tested - 2023 Dec 12) +(Last tested - 2024 Mar 11) | Colab | Info | --- | --- | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/lllyasviel/Fooocus/blob/main/fooocus_colab.ipynb) | Fooocus Official -In Colab, you can modify the last line to `!python entry_with_update.py --share` or `!python entry_with_update.py --preset anime --share` or `!python entry_with_update.py --preset realistic --share` for Fooocus Default/Anime/Realistic Edition. +In Colab, you can modify the last line to `!python entry_with_update.py --share --always-high-vram` or `!python entry_with_update.py --share --always-high-vram --preset anime` or `!python entry_with_update.py --share --always-high-vram --preset realistic` for Fooocus Default/Anime/Realistic Edition. Note that this Colab will disable refiner by default because Colab free's resources are relatively limited (and some "big" features like image prompt may cause free-tier Colab to disconnect). We make sure that basic text-to-image is always working on free-tier Colab. +Using `--always-high-vram` shifts resource allocation from RAM to VRAM and achieves the overall best balance between performance, flexibility and stability on the default T4 instance. + Thanks to [camenduru](https://github.com/camenduru)! ### Linux (Using Anaconda) From 57a01865b99e3334fc83da25adc48ab989d853ab Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 11 Mar 2024 23:49:45 +0100 Subject: [PATCH 12/21] refactor: only use LoRA activate on handover to async worker, extract method --- modules/async_worker.py | 14 +++----------- modules/core.py | 5 +---- modules/default_pipeline.py | 4 ++-- modules/util.py | 4 ++++ presets/lightning.json | 5 +++++ 5 files changed, 15 insertions(+), 17 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index c5953a580..ee9978526 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -46,8 +46,8 @@ def worker(): from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion, apply_arrays from modules.private_logger import log from extras.expansion import safe_str - from modules.util import remove_empty_str, HWC3, resize_image, \ - get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate, ordinal_suffix + from modules.util import remove_empty_str, HWC3, resize_image, get_image_shape_ceil, set_image_shape_ceil, \ + get_shape_ceil, resample_image, erode_or_dilate, ordinal_suffix, get_enabled_loras from modules.upscaler import perform_upscale from modules.flags import Performance from modules.meta_parser import get_metadata_parser, MetadataScheme @@ -124,14 +124,6 @@ def build_image_wall(async_task): async_task.results = async_task.results + [wall] return - def apply_enabled_loras(loras): - enabled_loras = [] - for lora_enabled, lora_model, lora_weight in loras: - if lora_enabled: - enabled_loras.append([lora_model, lora_weight]) - - return enabled_loras - @torch.no_grad() @torch.inference_mode() def handler(async_task): @@ -155,7 +147,7 @@ def handler(async_task): base_model_name = args.pop() refiner_model_name = args.pop() refiner_switch = args.pop() - loras = apply_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop()), ] for _ in range(modules.config.default_max_lora_number)]) + loras = get_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop())] for _ in range(modules.config.default_max_lora_number)]) input_image_checkbox = args.pop() current_tab = args.pop() uov_method = args.pop() diff --git a/modules/core.py b/modules/core.py index e8e19397c..38ee8e8dc 100644 --- a/modules/core.py +++ b/modules/core.py @@ -73,10 +73,7 @@ def refresh_loras(self, loras): loras_to_load = [] - for enabled, filename, weight in loras: - if not enabled: - continue - + for filename, weight in loras: if filename == 'None': continue diff --git a/modules/default_pipeline.py b/modules/default_pipeline.py index f8edfae10..190601ecf 100644 --- a/modules/default_pipeline.py +++ b/modules/default_pipeline.py @@ -11,7 +11,7 @@ from ldm_patched.modules.model_base import SDXL, SDXLRefiner from modules.sample_hijack import clip_separate -from modules.util import get_file_from_folder_list +from modules.util import get_file_from_folder_list, get_enabled_loras model_base = core.StableDiffusionModel() @@ -254,7 +254,7 @@ def refresh_everything(refiner_model_name, base_model_name, loras, refresh_everything( refiner_model_name=modules.config.default_refiner_model_name, base_model_name=modules.config.default_base_model_name, - loras=modules.config.default_loras + loras=get_enabled_loras(modules.config.default_loras) ) diff --git a/modules/util.py b/modules/util.py index 9c432eb61..7c46d946c 100644 --- a/modules/util.py +++ b/modules/util.py @@ -360,3 +360,7 @@ def makedirs_with_log(path): os.makedirs(path, exist_ok=True) except OSError as error: print(f'Directory {path} could not be created, reason: {error}') + + +def get_enabled_loras(loras: list) -> list: + return [[lora[1], lora[2]] for lora in loras if lora[0]] diff --git a/presets/lightning.json b/presets/lightning.json index 642493586..d1466c10d 100644 --- a/presets/lightning.json +++ b/presets/lightning.json @@ -4,22 +4,27 @@ "default_refiner_switch": 0.5, "default_loras": [ [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ], [ + true, "None", 1.0 ] From 4363dbc303f6c022bfeccb43c2b55f4a19fc96a5 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Wed, 13 Mar 2024 00:32:54 +0100 Subject: [PATCH 13/21] fix: revert testing change to default lora activation --- presets/default.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/presets/default.json b/presets/default.json index 963f7a631..d02bb8a4d 100644 --- a/presets/default.json +++ b/presets/default.json @@ -4,7 +4,7 @@ "default_refiner_switch": 0.5, "default_loras": [ [ - false, + true, "sd_xl_offset_example-lora_1.0.safetensors", 0.1 ], From 4a44be36fd61aeb2e20fd7f2e2f639a1acab7d20 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Fri, 15 Mar 2024 22:04:27 +0100 Subject: [PATCH 14/21] feat: add preset selection to Gradio UI (session based) (#1570) * add preset selection uses meta parsing to set presets in user session (UI elements only) * add LoRA handling * use default config as fallback value * add preset refresh on "Refresh All Files" click * add special handling for default_styles and default_aspect_ratio * sort styles after preset change * code cleanup * download missing models from preset * set default refiner to "None" in preset realistic * use state_is_generating for preset selection change * DRY output parameter handling * feat: add argument --disable-preset-selection useful for cloud provisioning to prevent model switches and keep models loaded * feat: keep prompt when not set in preset, use more robust syntax * fix: add default return values when preset download is disabled https://github.com/mashb1t/Fooocus/issues/20 * feat: add translation for preset label * refactor: unify preset loading methods in config * refactor: code cleanup --- args_manager.py | 3 ++ language/en.json | 1 + launch.py | 28 +++++------ modules/config.py | 103 +++++++++++++++++++++++++---------------- modules/meta_parser.py | 7 ++- presets/realistic.json | 2 +- webui.py | 57 ++++++++++++++++++----- 7 files changed, 132 insertions(+), 69 deletions(-) diff --git a/args_manager.py b/args_manager.py index 8c3e19182..6a3ae9dc3 100644 --- a/args_manager.py +++ b/args_manager.py @@ -4,7 +4,10 @@ from tempfile import gettempdir args_parser.parser.add_argument("--share", action='store_true', help="Set whether to share on Gradio.") + args_parser.parser.add_argument("--preset", type=str, default=None, help="Apply specified UI preset.") +args_parser.parser.add_argument("--disable-preset-selection", action='store_true', + help="Disables preset selection in Gradio.") args_parser.parser.add_argument("--language", type=str, default='default', help="Translate UI using json files in [language] folder. " diff --git a/language/en.json b/language/en.json index 3e42fff0c..0f97e6e96 100644 --- a/language/en.json +++ b/language/en.json @@ -38,6 +38,7 @@ "* \"Inpaint or Outpaint\" is powered by the sampler \"DPMPP Fooocus Seamless 2M SDE Karras Inpaint Sampler\" (beta)": "* \"Inpaint or Outpaint\" is powered by the sampler \"DPMPP Fooocus Seamless 2M SDE Karras Inpaint Sampler\" (beta)", "Setting": "Setting", "Style": "Style", + "Preset": "Preset", "Performance": "Performance", "Speed": "Speed", "Quality": "Quality", diff --git a/launch.py b/launch.py index 3cee7f9c5..afa667058 100644 --- a/launch.py +++ b/launch.py @@ -93,7 +93,7 @@ def ini_args(): print(f"[Cleanup] Failed to delete content of temp dir.") -def download_models(): +def download_models(default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads): for file_name, url in vae_approx_filenames: load_file_from_url(url=url, model_dir=config.path_vae_approx, file_name=file_name) @@ -105,30 +105,32 @@ def download_models(): if args.disable_preset_download: print('Skipped model download.') - return + return default_model, checkpoint_downloads if not args.always_download_new_model: - if not os.path.exists(os.path.join(config.paths_checkpoints[0], config.default_base_model_name)): - for alternative_model_name in config.previous_default_models: + if not os.path.exists(os.path.join(config.paths_checkpoints[0], default_model)): + for alternative_model_name in previous_default_models: if os.path.exists(os.path.join(config.paths_checkpoints[0], alternative_model_name)): - print(f'You do not have [{config.default_base_model_name}] but you have [{alternative_model_name}].') + print(f'You do not have [{default_model}] but you have [{alternative_model_name}].') print(f'Fooocus will use [{alternative_model_name}] to avoid downloading new models, ' - f'but you are not using latest models.') + f'but you are not using the latest models.') print('Use --always-download-new-model to avoid fallback and always get new models.') - config.checkpoint_downloads = {} - config.default_base_model_name = alternative_model_name + checkpoint_downloads = {} + default_model = alternative_model_name break - for file_name, url in config.checkpoint_downloads.items(): + for file_name, url in checkpoint_downloads.items(): load_file_from_url(url=url, model_dir=config.paths_checkpoints[0], file_name=file_name) - for file_name, url in config.embeddings_downloads.items(): + for file_name, url in embeddings_downloads.items(): load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name) - for file_name, url in config.lora_downloads.items(): + for file_name, url in lora_downloads.items(): load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name) - return + return default_model, checkpoint_downloads -download_models() +config.default_base_model_name, config.checkpoint_downloads = download_models( + config.default_base_model_name, config.previous_default_models, config.checkpoint_downloads, + config.embeddings_downloads, config.lora_downloads) from webui import * diff --git a/modules/config.py b/modules/config.py index 8fec8e05b..c82f61c22 100644 --- a/modules/config.py +++ b/modules/config.py @@ -97,22 +97,45 @@ def replace_config(old_key, new_key): try_load_deprecated_user_path_config() -preset = args_manager.args.preset -if isinstance(preset, str): - preset_path = os.path.abspath(f'./presets/{preset}.json') - try: - if os.path.exists(preset_path): - with open(preset_path, "r", encoding="utf-8") as json_file: - config_dict.update(json.load(json_file)) - print(f'Loaded preset: {preset_path}') - else: - raise FileNotFoundError - except Exception as e: - print(f'Load preset [{preset_path}] failed') - print(e) +def get_presets(): + preset_folder = 'presets' + presets = ['initial'] + if not os.path.exists(preset_folder): + print('No presets found.') + return presets + + return presets + [f[:f.index('.json')] for f in os.listdir(preset_folder) if f.endswith('.json')] + + +def try_get_preset_content(preset): + if isinstance(preset, str): + preset_path = os.path.abspath(f'./presets/{preset}.json') + try: + if os.path.exists(preset_path): + with open(preset_path, "r", encoding="utf-8") as json_file: + json_content = json.load(json_file) + print(f'Loaded preset: {preset_path}') + return json_content + else: + raise FileNotFoundError + except Exception as e: + print(f'Load preset [{preset_path}] failed') + print(e) + return {} +try: + with open(os.path.abspath(f'./presets/default.json'), "r", encoding="utf-8") as json_file: + config_dict.update(json.load(json_file)) +except Exception as e: + print(f'Load default preset failed.') + print(e) + +available_presets = get_presets() +preset = args_manager.args.preset +config_dict.update(try_get_preset_content(preset)) + def get_path_output() -> str: """ Checking output path argument and overriding default path. @@ -241,7 +264,7 @@ def init_temp_path(path: str | None, default_path: str) -> str: default_value=True, validator=lambda x: isinstance(x, bool) ) -default_base_model_name = get_config_item_or_set_default( +default_base_model_name = default_model = get_config_item_or_set_default( key='default_model', default_value='model.safetensors', validator=lambda x: isinstance(x, str) @@ -251,7 +274,7 @@ def init_temp_path(path: str | None, default_path: str) -> str: default_value=[], validator=lambda x: isinstance(x, list) and all(isinstance(k, str) for k in x) ) -default_refiner_model_name = get_config_item_or_set_default( +default_refiner_model_name = default_refiner = get_config_item_or_set_default( key='default_refiner', default_value='None', validator=lambda x: isinstance(x, str) @@ -451,29 +474,30 @@ def init_temp_path(path: str | None, default_path: str) -> str: config_dict["default_loras"] = default_loras = default_loras[:default_max_lora_number] + [['None', 1.0] for _ in range(default_max_lora_number - len(default_loras))] -possible_preset_keys = [ - "default_model", - "default_refiner", - "default_refiner_switch", - "default_loras_min_weight", - "default_loras_max_weight", - "default_loras", - "default_max_lora_number", - "default_cfg_scale", - "default_sample_sharpness", - "default_sampler", - "default_scheduler", - "default_performance", - "default_prompt", - "default_prompt_negative", - "default_styles", - "default_aspect_ratio", - "default_save_metadata_to_images", - "checkpoint_downloads", - "embeddings_downloads", - "lora_downloads", -] - +# mapping config to meta parameter +possible_preset_keys = { + "default_model": "base_model", + "default_refiner": "refiner_model", + "default_refiner_switch": "refiner_switch", + "previous_default_models": "previous_default_models", + "default_loras_min_weight": "default_loras_min_weight", + "default_loras_max_weight": "default_loras_max_weight", + "default_loras": "", + "default_cfg_scale": "guidance_scale", + "default_sample_sharpness": "sharpness", + "default_sampler": "sampler", + "default_scheduler": "scheduler", + "default_overwrite_step": "steps", + "default_performance": "performance", + "default_prompt": "prompt", + "default_prompt_negative": "negative_prompt", + "default_styles": "styles", + "default_aspect_ratio": "resolution", + "default_save_metadata_to_images": "default_save_metadata_to_images", + "checkpoint_downloads": "checkpoint_downloads", + "embeddings_downloads": "embeddings_downloads", + "lora_downloads": "lora_downloads" +} REWRITE_PRESET = False @@ -530,10 +554,11 @@ def get_model_filenames(folder_paths, extensions=None, name_filter=None): def update_files(): - global model_filenames, lora_filenames, wildcard_filenames + global model_filenames, lora_filenames, wildcard_filenames, available_presets model_filenames = get_model_filenames(paths_checkpoints) lora_filenames = get_model_filenames(paths_loras) wildcard_filenames = get_files_from_folder(path_wildcards, ['.txt']) + available_presets = get_presets() return diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 546c093fa..0cdbdf1fd 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -210,9 +210,8 @@ def parse_meta_from_preset(preset_content): height = height[:height.index(" ")] preset_prepared[meta_key] = (width, height) else: - preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[ - settings_key] is not None else getattr(modules.config, settings_key) - + preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[settings_key] is not None else getattr(modules.config, settings_key) + if settings_key == "default_styles" or settings_key == "default_aspect_ratio": preset_prepared[meta_key] = str(preset_prepared[meta_key]) @@ -570,4 +569,4 @@ def get_exif(metadata: str | None, metadata_scheme: str): exif[0x0131] = 'Fooocus v' + fooocus_version.version # 0x927C = MakerNote exif[0x927C] = metadata_scheme - return exif \ No newline at end of file + return exif diff --git a/presets/realistic.json b/presets/realistic.json index 95f8b6e0a..6db6d0b76 100644 --- a/presets/realistic.json +++ b/presets/realistic.json @@ -1,6 +1,6 @@ { "default_model": "realisticStockPhoto_v20.safetensors", - "default_refiner": "", + "default_refiner": "None", "default_refiner_switch": 0.5, "default_loras": [ [ diff --git a/webui.py b/webui.py index d68ade627..01c828dff 100644 --- a/webui.py +++ b/webui.py @@ -15,6 +15,7 @@ import modules.meta_parser import args_manager import copy +import launch from modules.sdxl_styles import legal_style_names from modules.private_logger import get_current_html_path @@ -252,6 +253,11 @@ def trigger_metadata_preview(filepath): with gr.Column(scale=1, visible=modules.config.default_advanced_checkbox) as advanced_column: with gr.Tab(label='Setting'): + if not args_manager.args.disable_preset_selection: + preset_selection = gr.Radio(label='Preset', + choices=modules.config.available_presets, + value=args_manager.args.preset if args_manager.args.preset else "initial", + interactive=True) performance_selection = gr.Radio(label='Performance', choices=flags.Performance.list(), value=modules.config.default_performance) @@ -518,13 +524,50 @@ def refresh_files_clicked(): modules.config.update_files() results = [gr.update(choices=modules.config.model_filenames)] results += [gr.update(choices=['None'] + modules.config.model_filenames)] + if not args_manager.args.disable_preset_selection: + results += [gr.update(choices=modules.config.available_presets)] for i in range(modules.config.default_max_lora_number): - results += [gr.update(interactive=True), gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] + results += [gr.update(interactive=True), + gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] return results - refresh_files.click(refresh_files_clicked, [], [base_model, refiner_model] + lora_ctrls, + refresh_files_output = [base_model, refiner_model] + if not args_manager.args.disable_preset_selection: + refresh_files_output += [preset_selection] + refresh_files.click(refresh_files_clicked, [], refresh_files_output + lora_ctrls, queue=False, show_progress=False) + state_is_generating = gr.State(False) + + load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections, + performance_selection, overwrite_step, overwrite_switch, aspect_ratios_selection, + overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive, + adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, base_model, + refiner_model, refiner_switch, sampler_name, scheduler_name, seed_random, image_seed, + generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls + + if not args_manager.args.disable_preset_selection: + def preset_selection_change(preset, is_generating): + preset_content = modules.config.try_get_preset_content(preset) if preset != 'initial' else {} + preset_prepared = modules.meta_parser.parse_meta_from_preset(preset_content) + + default_model = preset_prepared.get('base_model') + previous_default_models = preset_prepared.get('previous_default_models', []) + checkpoint_downloads = preset_prepared.get('checkpoint_downloads', {}) + embeddings_downloads = preset_prepared.get('embeddings_downloads', {}) + lora_downloads = preset_prepared.get('lora_downloads', {}) + + preset_prepared['base_model'], preset_prepared['lora_downloads'] = launch.download_models( + default_model, previous_default_models, checkpoint_downloads, embeddings_downloads, lora_downloads) + + if 'prompt' in preset_prepared and preset_prepared.get('prompt') == '': + del preset_prepared['prompt'] + + return modules.meta_parser.load_parameter_button_click(json.dumps(preset_prepared), is_generating) + + preset_selection.change(preset_selection_change, inputs=[preset_selection, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \ + .then(fn=style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) \ + performance_selection.change(lambda x: [gr.update(interactive=not flags.Performance.has_restricted_features(x))] * 11 + [gr.update(visible=not flags.Performance.has_restricted_features(x))] * 1 + [gr.update(interactive=not flags.Performance.has_restricted_features(x), value=flags.Performance.has_restricted_features(x))] * 1, @@ -600,8 +643,6 @@ def inpaint_mode_change(mode): ctrls += ip_ctrls - state_is_generating = gr.State(False) - def parse_meta(raw_prompt_txt, is_generating): loaded_json = None if is_json(raw_prompt_txt): @@ -617,13 +658,6 @@ def parse_meta(raw_prompt_txt, is_generating): prompt.input(parse_meta, inputs=[prompt, state_is_generating], outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False) - load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections, - performance_selection, overwrite_step, overwrite_switch, aspect_ratios_selection, - overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive, - adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, base_model, - refiner_model, refiner_switch, sampler_name, scheduler_name, seed_random, image_seed, - generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls - load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False) def trigger_metadata_import(filepath, state_is_generating): @@ -637,7 +671,6 @@ def trigger_metadata_import(filepath, state_is_generating): return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating) - metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \ .then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) From 55e23a9374cbe09d70f182b7a73a7885411822db Mon Sep 17 00:00:00 2001 From: Spencer Hayes-Laverdiere Date: Fri, 15 Mar 2024 17:30:29 -0400 Subject: [PATCH 15/21] fix: add error output for unsupported images (#2537) * Raise Error on bad decode * Move task arg pop to try block * fix: prevent empty task from getting queued --------- Co-authored-by: Manuel Schmid --- modules/gradio_hijack.py | 7 +++++-- webui.py | 6 +++++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/modules/gradio_hijack.py b/modules/gradio_hijack.py index 181429ec3..35df81c00 100644 --- a/modules/gradio_hijack.py +++ b/modules/gradio_hijack.py @@ -17,7 +17,7 @@ from gradio_client.serializing import ImgSerializable from PIL import Image as _Image # using _ to minimize namespace pollution -from gradio import processing_utils, utils +from gradio import processing_utils, utils, Error from gradio.components.base import IOComponent, _Keywords, Block from gradio.deprecation import warn_style_method_deprecation from gradio.events import ( @@ -275,7 +275,10 @@ def preprocess( x, mask = x["image"], x["mask"] assert isinstance(x, str) - im = processing_utils.decode_base64_to_image(x) + try: + im = processing_utils.decode_base64_to_image(x) + except PIL.UnidentifiedImageError: + raise Error("Unsupported image type in input") with warnings.catch_warnings(): warnings.simplefilter("ignore") im = im.convert(self.image_mode) diff --git a/webui.py b/webui.py index 01c828dff..98780bff7 100644 --- a/webui.py +++ b/webui.py @@ -29,12 +29,16 @@ def get_task(*args): return worker.AsyncTask(args=args) -def generate_clicked(task): +def generate_clicked(task: worker.AsyncTask): import ldm_patched.modules.model_management as model_management with model_management.interrupt_processing_mutex: model_management.interrupt_processing = False # outputs=[progress_html, progress_window, progress_gallery, gallery] + + if len(task.args) == 0: + return + execution_start_time = time.perf_counter() finished = False From 37274c652a044783c63f0966c087a4a062f09790 Mon Sep 17 00:00:00 2001 From: David Sage <162500231+DavidDragonsage@users.noreply.github.com> Date: Fri, 15 Mar 2024 14:52:27 -0700 Subject: [PATCH 16/21] feat: improve anime preset by adding style Fooocus Semi Realistic (#2492) * Add files via upload In anime.json, at Line 36, replace "Fooocus Negative" with "Fooocus Semi Realistic" * Add files via upload In sdxl_styles_fooocus.json, insert this text at Line 6: { "name": "Fooocus Semi Realistic", "negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)" }, * Add files via upload Popup image for the new "Fooocus Semi Realistic" style * Update sdxl_styles_fooocus.json Removed "grayscale, bw" from the proposed Fooocus Realistic entry at Line 6 of sdxl_styles_fooocus.json * refactor: cleanup files * feat: use default model to create thumbnail juggernautv8, seed 0, 1024x1024, no LoRAs, only this style, positive prompt "cat" --------- Co-authored-by: Manuel Schmid Co-authored-by: Manuel Schmid --- presets/anime.json | 2 +- sdxl_styles/samples/fooocus_semi_realistic.jpg | Bin 0 -> 8565 bytes sdxl_styles/sdxl_styles_fooocus.json | 4 ++++ 3 files changed, 5 insertions(+), 1 deletion(-) create mode 100644 sdxl_styles/samples/fooocus_semi_realistic.jpg diff --git a/presets/anime.json b/presets/anime.json index 1f2b26a97..6fe6e4ba9 100644 --- a/presets/anime.json +++ b/presets/anime.json @@ -38,7 +38,7 @@ "default_prompt_negative": "", "default_styles": [ "Fooocus V2", - "Fooocus Negative", + "Fooocus Semi Realistic", "Fooocus Masterpiece" ], "default_aspect_ratio": "896*1152", diff --git a/sdxl_styles/samples/fooocus_semi_realistic.jpg b/sdxl_styles/samples/fooocus_semi_realistic.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b07555a7becce71aa5e2722450e2fea328724615 GIT binary patch literal 8565 zcmbW3cTkf}8}37qCPjKL3WD?|9fDY>0s_)YUIe7qPyz-}6i}*wfOL^2HBv(FC3KLE zp(k_*B-GFk@AsWKXXgBM&bgo2dv@o!cjx-;&g``}vp36td)n$+>Hq=)0>D4FAK+#V z@C-nBJ0u|ZHxvFF#6ZL? zUF5%03GWaQ-Ju{QA^lIu|5LhY2hfrM-UHqf5j+48(h?BS65MnHxB&oyJO7bK0QjFl zKuAP zj!w=lU|&D~07zia$4}uAk)OXrB_@4KPD%Zqmi{v@zo4+_S8+*Ibq&0>uD+qMv#YxY z(c9NQFg`IkH9a%?7rBB$udc0cY;JAi4i1lwPfqb?=l^mM0EqsJb({YS_J45E-f|KC zdj_QcauE;)+%_Uw;yd>xN$AuJNNv68A4q*5V|bd7TiHR*D{Y8neB(1l!Nm7?Z}41W*zY+#VhgEkG3z`EqJ4Mbq8m>)x`*ae0#Cor|}!U8lws zAygoJ$)43hmI`5ei={0=#%YcHh8@t5e5m8l>LSe2Us=pE;F-qrFK(X5MQBysmNjQZ zl6wGUgxWR6c*57Q-QAoUu1ORq&`Ep*5d z)AGBq(l0fK3kr=Vp^H=fhVtRe3QCqZvL-55OHsWgTA|X9heqYTci_>ZUjoGa=f)eF zVztp)St+bKl5YP58DoS|^p+O=g>6HaNv^4`x$}`dDVT@(<09YPprt!j zx-D5LKcJ=S2Y(?w{$O>#@lt2+n!QyS2Nw$l3-_h0Us7LqFE)c_^7NSv8IWO(Z=sC~ zne*v)M$%gykzZmP&SJrev2{pK3zmY@it6t3qX4m=OvW#+xw3?^c7dFekM-fm#`0bb zW91nWZq}iB_L}Ve{dEVKeJ9cRUmxtg_$OCHcOk!>XI(YR%_!x@??tWgG=xo$YbwW1 zZ~^Vc;l3H~=3E_G7vS&Eg~D+LIn}F)^B~F^r7wO;87&$YQk^?%eRKOJj#D-W- z?+FnlY{7|Y`4wUXDU zn@lDdjYr%7fJnpb@?o!5ReT1TQwjC<3l`lGB0Tw6#x=CoWx_d$Y^}*8IH4!=AkSGz zpSX=@*{*Ti6V+y}TzrC{3Buk0kYC{9M34*C1t<5s8H>&?Il<`6Q({`WXF zPf^^dS$ysa#IQ77>11xb;@zd<4;(Dx%(_MYZR^Z&3vc(`6>$b-Z;&oas&2ET0U<~n zr``uN{_fvSsP~;(BQ2w}7aZs#=Dv=!j5vkS;#bi7(K+-XjAe^VY{@4-FYMv2q11x{ zDPFAHBhGxicfWSbZ3=~5|CKv;jK%OwGIfHlQeDo+)l1iq3k+QEC<)0NyWDwKMFvP1 z=#N02AmtW-{%EoO`INrQP^id+XL3h1yvO5W-+VUIfa zXoOn9H9ih>?^*({qo4Dn?p;UVJ866sg5?Gf=-|rk9z}h&^nMRQTm{3<#DltuZE=#R zbM+K&IqX(HqKY1&^T<$7fiL@*x*Uf%y{2Ue;f5;L zOx5%>!p-vQ90T3fS=c|D^BQm|t}3tsdl~n6D*wRAMav&Jy2@BOI3UQL+~ZPRs2CCG zw3XUzjiCGF20K@3-Q65`C5%lnUzGJ$^T+H75Q&Rv;8^#}7=v*6HOzPl) z_YiEn0Yv+W0`YTJTQMc`5eSzWTx+=lyu!cq@B^|TqhTZMynSJX6z%Dx?VY@#}%wN(O)QT3|TMr&ZPHY+v4_R zbeVUN-x6Q`aVMf({Urdq0oYV*r&iuIadoykWYR6=SQ0$Pg5!T+kWJW3x$L)$48OPk zTstd?r1LoR53jvrc*>+ehU&mFWLug12-oUM%$#Zq}wN zdzqJyZ($xR0!;^OAFbsi#HWSsXGeatR?whWXSH!0wYHG;*jkW14=Ma>&GCZL=bgD~ z9~O2GYK3nJaOdseToRcZ@6OJq<{6#3Css}SVw%8_{CqSZ6* zIjFujYaDSlI$4saX7KMzDOW37qjv|)us;Exh^gteb!k>l1?prVy~u6+IY%O^r>uuNO|t(LGXk3 z;2a?-#f|#}D#GBC=abEz!6Ui&9L{UIT9AJ+)`H$s*N>V`MdzBu#=K}vs--k z69-A}@bgkb{lq-@4Pd41wT4KZaPPrmWow+PC9bGD9)NE%n~-q_UNvi`AXmK>8R1{r zg(G8p-vr7pA1Q3A$f4kIkt?x4UBfwpIOnxDyQML!hnXhaiH)YQa2jfSQfE*~Mj%yP zHq!m5ab+KlbT>0IkO%j0V$r72P5vOGXuOp-)=t1MIY)i` zqHUR5A?I~w5m^hI<66A7ts( zPQ$4CaQJvWWqP2%9jWtX(e?&lZ%;@H{wf@WcMl5TfkbJ^+)Wh4^rrbNwbM@kuS$_e zO_q7eif=Pqqm?4eL{laRpLfPEi;+=i3^#e0(Z(I0J5sHVBlPD`rhDkBYsZb%#?!e5 zj01!PRRUH?MfrwM;$Qf|sMAB>E+v9lV^O-pu2veXz)3)+u{E1;aeli)NORJMxiAPC*`WWKo6Z++ zDEeSrjgOpNr{PK0FJ!%f>S9NGzY=8G`xIU(-{VuR$nE?z#_lIy;$Tn6&!Z_?Lg?=Q2NLviE;WnEDb?^@PysocUm z+$Jx4rYRXr<5T6v)BB%*4}Z^tr}Xkq8ou$_l!`hp1gdtuIek!YTWWV}I`xkIcT z84!(*Wm08=O5Z+Rx>=Oz3p-F$T*rRqGSM$UaLX-h{NgjZ9~AGy2#k%1{m}f)pp}t= zWl(rfOT|Z=u(=aPxyKJ)U1ga@=s5y{+CFuRdrJ4jitcGn(KR=|ba^A30cer}i2ly^Vd?&bD-zwDjK0hZXFX8V-rl4ZLR?IE7?2XAj)<;o)a776Rr(4~<>qs`(W52kSZj<0; z`YhQtQ+yu^r(g*Nl<$+zW@&d+c=%aon|HhP!djps;^{eEKNWZfj-_tzm(fUxd9(SQ z^m!L|i{ON_N@4K@PRl6Lgv112oZLI<8;1r~~d+qnDbw*R-P+$7Yjk*xm z@T&~igBHB4BWGDccsgdqnXmoq^D-rF>dR)3I64=#L_h0Hzu=_!%B0?G3Yx}p*|5z( z2%cO`=@9tM3PVcJMh!a6of%ym1D6D&io(Nmfl{%w(>yI=hXhxTnL-}nB)snE){KmB^B~4g7`2qX=j23Ep`I{RVR+f zd+KQo1vG!x;`GJcD<~T8Y15G`z&iT*;_49+ zLe}xtFAH$0sE4(Y%QpawPT21AP50gO@I|tNivD1;@;X{+lcszmX=Nwa@Nl=gyjj34l}GWc0J3uZkGlS86Pw3J zl#XfG1R|S%{1a1W8)@-7PYg(dMkKxVgi3T$c{dxj~ zNKH3uJ}ax{{xqwN{z$q2nu;RUOe%0wDt5cq0E_)g4l@nMQBY8ZKtbk5*Y5SsV zAP|4evu(qdtr^?1ny5WiK_Vw9j|dfi8L(pppGnFOftY3jQ(PzL1yfV*E@M6p;nqYt`foPr6q0)GG9PW^R zB$6Zx8{gpr5$H;CuC|tZS1e%^Fwft_o7=O3Q89Xz_Vz8Y*!re$Uk^$U5x-|D6>ZZ< z@p!esZUm}Y4!V@Y4rj-m(5x*iD3MD6pY(({?2(ZL9-Wqy;w0_cGK{JM>0DvQE(1Q6 zffuu+adP0eNMwU5{blfSv;^ z=$>t&b4A+Mz6ElMJffn7_FJpOU;q8siSm+vWKcHYY^gJct2{B|3i9P8_lSS%{&tET zgY7>Vl~sGVFmeE`Jk_+qwIek5KO7bPa<#C&t_k~_d&t=B6ool#yO`&o`F1!$66`u` ziI{Qzspaa&+vT;Sl9T#=ry(O|qiE|iSib08*;Ux_$ESSm#djZb+kZ*gDoqJp| zYN_uVjm6~qbHhhJpeBJVU8QQj^i|O6RO_rd3{BxvWJR>x zxzZ!^vOU`9!~Rk3_pAs7qV#i}c3Ws5L%P7{{CA(slRUP_Ur{fEx4nwe4^%)ado3#p zaN$Qs?)(t!XFy!9a+rjdhPK49oxp&@XZmb{-m|58fG>Mo1<<8zpH5naTkclTrlZO>Me+PFTDD(-)EI>df zSxFkdBRkqClx6E?=)@3H@JD*x^3;SJF_-yZOw^(CM7bfTcntG1Wn&6dy`F}n=}8~o zR1}EZ7Z-4{9D!HGjLIc4u(Bqb3WdBX&?GvjnCaoBggRH~@+ZOOhaI_|xEjXOvh-c# zV*`Oxe{TRC-0*U{mD4zx(5Q3FLPc7!Mt*s7m9^E`Pn3BY$3O!@efH#pm(RBYHeP_A zyLALkH44aF$2AL%!N-egsxfS_o{!diJ&jA6GxGH>luAyh|LLWXMtn?>CF@C96L-vM zTXjnEiYjAQ*$a{Pn%+{YUp0_Jxp{sYeNT$@=lHa<31{r+>lXNhaSg>l9$5044PFG1?FF?)YE7n| zmVa%E4bCXP-3M8q(5B=*Hlm?$M|LhWkO9n+56lsUw&9NGPl79#Q?dCY~9wlgjK8&%bJ6)o6mxEY@7Uw4sKxf`O6r&KG>w-8X={zNN(? zA@{b~sIc+QQ!<~K&ea`QN6vWp{I=_X-*-Uis_>k4nlIc`$^qV`cq3hRBb^I1SJz?H z{d$Fdrz&%GVEr|yiHi?=l2+;ra!h(UP|0V$ zNEXLvw_(nzN2GrUHI%6#+d*tsje1qd`=L3P^m{GIYzei!hZ`lAuW+qj$MS0rL=qZo zIs5)G<*>`-qB$t1QbN~7@sEF9*c|J_4B7X2m3j9c!;qd_w_Bvx$^q4!SifGF*!6o6 z^E=JHF3ykvKzcrQ@SD4qEbnYg8K|yZ*3(jsYx{m$!X7HtxD1TN1KEPpW{auHjr9Ylj|=?AoU= z+dA|$-AZ9*)l)Kv}y}g%lEhb z${YlA{qIAYdu7f%FBDx=X?9yhkn@*MXYyNq>kH;htj8i{ze?MeX%|E-mxLqCHUKe> zC&DXy*~~bm_pLnE)UM&_({d3kd4$XS%sde*-)WbUVh;3NgHpaVBt6a*Gj^q!0E$Aw z0NY$@xgi=>Ry$qkGvlE zNTu{EjwT|66%uv0%gm;laB%s1X$!$hTODQ`7Q^kOF*(DD9>uF)0{#?!ucNb&a&LHF zm%)n8OjYQ$+2H!#&jA;bA2j+?q@MMn@`$VWLfa*XEA>uydQ8Y0jAcCV%?G|--5Y=e z*I!;3r_g%tg&suM99yuwobu87wUB2H*}Nyp&8wcLo8h z-=Yp?6#dF$8!@CRWIS>Dx(%d-Qqoq8yVx|}=G9mjB*MgG=U z^}(vIhladC`l0uwTIyO&W+lCjZe82YZ7(#iC_KLP1~7ERS2O~4Gi!W@$U0t1;f1$314%6+i0ay zwXBRos);3gdld6e9K=?1ajX(Woa3}#ueq~&mo=1)4O&A$Pn+v5Wb0ODFDs-)+^aVh z5QOL)@N}hkD-%>^W^TB8EH~ked)0GOZy$nD4m+9I#RZ7NWm_H6qyCWa>B%VxU1R|3 z+3#84N7aGKoHYANcr(_cQ+Q5L#J5~~!(;#GlS5y8tHdf=MJXsuF-DbUE44P&yF;vG z!a}|Nb;@Q1iG(`2a-3>3$H_45+B&@K1^`p*e^=)&J?P8*m$^NJrY>F&Ym@B#WKZq@+UEy;*uPf* zrm-Sk(5F`F*5%TEbEvwaie+9A?`nC0Zf?SkWVrgaJvOC9$hL6{_LFA%iWJFjlW0cv zUh`rdyFQ1Ep*a1=5rPj$l%_Mg>VF~T?cH`hAhckq*Sw(}PnESoo&MlmkSa8~4hi?0 z(^aLm-^%}`D-fqnR=l(77oPgZFb3bu*?EZ&7eQ&3OuadFy1+5F1^p8VsA<6h9m! zG=1Q3cZ2>^*8NUYcF=N|lzazF?qeqi&W+Jbk;{JEoY56T%$js1#R>D00B}9wN$OBY zs<=EHZ{c3I#%MTU)YsR^1)hx pZ)TVPh{BgFji^?BgQA#X432I+X`(dr3u>}lG124nSoP-b{{X_R7Q6re literal 0 HcmV?d00001 diff --git a/sdxl_styles/sdxl_styles_fooocus.json b/sdxl_styles/sdxl_styles_fooocus.json index 81d6442e6..cf64eab44 100644 --- a/sdxl_styles/sdxl_styles_fooocus.json +++ b/sdxl_styles/sdxl_styles_fooocus.json @@ -3,6 +3,10 @@ "name": "Fooocus Enhance", "negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, grayscale, bw, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (airbrushed, cartoon, anime, semi-realistic, cgi, render, blender, digital art, manga, amateur:1.3), (3D ,3D Game, 3D Game Scene, 3D Character:1.1), (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)" }, + { + "name": "Fooocus Semi Realistic", + "negative_prompt": "(worst quality, low quality, normal quality, lowres, low details, oversaturated, undersaturated, overexposed, underexposed, bad photo, bad photography, bad art:1.4), (watermark, signature, text font, username, error, logo, words, letters, digits, autograph, trademark, name:1.2), (blur, blurry, grainy), morbid, ugly, asymmetrical, mutated malformed, mutilated, poorly lit, bad shadow, draft, cropped, out of frame, cut off, censored, jpeg artifacts, out of focus, glitch, duplicate, (bad hands, bad anatomy, bad body, bad face, bad teeth, bad arms, bad legs, deformities:1.3)" + }, { "name": "Fooocus Sharp", "prompt": "cinematic still {prompt} . emotional, harmonious, vignette, 4k epic detailed, shot on kodak, 35mm photo, sharp focus, high budget, cinemascope, moody, epic, gorgeous, film grain, grainy", From 86cba3f223720245269b96d86560d8ef16806c2a Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Fri, 15 Mar 2024 23:11:26 +0100 Subject: [PATCH 17/21] feat: add translation for unsupported image error (#2537) --- language/en.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/language/en.json b/language/en.json index 0f97e6e96..fefc79c47 100644 --- a/language/en.json +++ b/language/en.json @@ -384,5 +384,6 @@ "Metadata Scheme": "Metadata Scheme", "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.", "fooocus (json)": "fooocus (json)", - "a1111 (plain text)": "a1111 (plain text)" + "a1111 (plain text)": "a1111 (plain text)", + "Unsupported image type in input": "Unsupported image type in input" } \ No newline at end of file From 6b44c101dbfe742fd912456c200ad1bfa4a88473 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 18 Mar 2024 12:30:39 +0100 Subject: [PATCH 18/21] feat: update changelog and readme --- readme.md | 10 +++++++--- update_log.md | 11 +++++++++++ 2 files changed, 18 insertions(+), 3 deletions(-) diff --git a/readme.md b/readme.md index 4e47ac088..6ec24eedd 100644 --- a/readme.md +++ b/readme.md @@ -84,6 +84,10 @@ The first time you launch the software, it will automatically download models: After Fooocus 2.1.60, you will also have `run_anime.bat` and `run_realistic.bat`. They are different model presets (and require different models, but they will be automatically downloaded). [Check here for more details](https://github.com/lllyasviel/Fooocus/discussions/679). +After Fooocus 2.3.0 you can also switch presets directly in the browser. Keep in mind to add these arguments if you want to change the default behavior: +* Use `--disable-preset-selection` to disable preset selection in the browser. +* Use `--always-download-new-model` to download missing models on preset switch. Default is fallback to `previous_default_models` defined in the corresponding preset, also see terminal output. + ![image](https://github.com/lllyasviel/Fooocus/assets/19834515/d386f817-4bd7-490c-ad89-c1e228c23447) If you already have these files, you can copy them to the above locations to speed up installation. @@ -115,7 +119,7 @@ See also the common problems and troubleshoots [here](troubleshoot.md). ### Colab -(Last tested - 2024 Mar 11) +(Last tested - 2024 Mar 18 - @mashb1t) | Colab | Info | --- | --- | @@ -125,9 +129,9 @@ In Colab, you can modify the last line to `!python entry_with_update.py --share Note that this Colab will disable refiner by default because Colab free's resources are relatively limited (and some "big" features like image prompt may cause free-tier Colab to disconnect). We make sure that basic text-to-image is always working on free-tier Colab. -Using `--always-high-vram` shifts resource allocation from RAM to VRAM and achieves the overall best balance between performance, flexibility and stability on the default T4 instance. +Using `--always-high-vram` shifts resource allocation from RAM to VRAM and achieves the overall best balance between performance, flexibility and stability on the default T4 instance. Please find more information [here](https://github.com/lllyasviel/Fooocus/pull/1710#issuecomment-1989185346). -Thanks to [camenduru](https://github.com/camenduru)! +Thanks to [camenduru](https://github.com/camenduru) for the template! ### Linux (Using Anaconda) diff --git a/update_log.md b/update_log.md index 322c19c12..4e22db0a4 100644 --- a/update_log.md +++ b/update_log.md @@ -1,3 +1,14 @@ +# [2.3.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.3.0) + +* Add performance "lightning" (based on [SDXL-Lightning 4 step LoRA](https://huggingface.co/ByteDance/SDXL-Lightning/blob/main/sdxl_lightning_4step_lora.safetensors)) +* Add preset selection to UI, disable with argument `--disable-preset-selection`. Use `--always-download-new-model` to download missing models on preset switch. +* Improve face swap consistency by switching later in the process to (synthetic) refiner +* Add temp path cleanup on startup +* Add support for wildcard subdirectories +* Add scrollable 2 column layout for styles for better structure +* Improve Colab resource needs for T4 instances (default), positively tested with all image prompt features +* Improve anime preset, now uses style `Fooocus Semi Realistic` instead of `Fooocus Negative` (less wet look images) + # [2.2.1](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.1) * Fix some small bugs (e.g. image grid, upscale fast 2x, LoRA weight width in Firefox) From c08518abae926d596ac5095ba8af7c3c3d88cc4d Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 18 Mar 2024 17:40:37 +0100 Subject: [PATCH 19/21] feat: add backwards compatibility for presets without disable/enable LoRA boolean https://github.com/lllyasviel/Fooocus/pull/2507 --- modules/config.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/config.py b/modules/config.py index c82f61c22..f3bf7f1f0 100644 --- a/modules/config.py +++ b/modules/config.py @@ -323,8 +323,12 @@ def init_temp_path(path: str | None, default_path: str) -> str: 1.0 ] ], - validator=lambda x: isinstance(x, list) and all(len(y) == 3 and isinstance(y[0], bool) and isinstance(y[1], str) and isinstance(y[2], numbers.Number) for y in x) + validator=lambda x: isinstance(x, list) and all( + len(y) == 3 and isinstance(y[0], bool) and isinstance(y[1], str) and isinstance(y[2], numbers.Number) + or len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number) + for y in x) ) +default_loras = [(y[0], y[1], y[2]) if len(y) == 3 else (True, y[0], y[1]) for y in default_loras] default_max_lora_number = get_config_item_or_set_default( key='default_max_lora_number', default_value=len(default_loras) if isinstance(default_loras, list) and len(default_loras) > 0 else 5, From ee361715afb7dab10ff266fcf8d8c6abcebfd81a Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 18 Mar 2024 18:04:15 +0100 Subject: [PATCH 20/21] docs: bump version number to 2.3.0 --- fooocus_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fooocus_version.py b/fooocus_version.py index 6c3c2c903..a4b8895b3 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.2.1' +version = '2.3.0' From 3efce581cac1df4441980710f55c28fbde3ac3d7 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 18 Mar 2024 18:13:15 +0100 Subject: [PATCH 21/21] docs: add hint for colab preset timeout to readme --- readme.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/readme.md b/readme.md index 6ec24eedd..5f66e02aa 100644 --- a/readme.md +++ b/readme.md @@ -119,7 +119,7 @@ See also the common problems and troubleshoots [here](troubleshoot.md). ### Colab -(Last tested - 2024 Mar 18 - @mashb1t) +(Last tested - 2024 Mar 18 by [mashb1t](https://github.com/mashb1t)) | Colab | Info | --- | --- | @@ -127,6 +127,8 @@ See also the common problems and troubleshoots [here](troubleshoot.md). In Colab, you can modify the last line to `!python entry_with_update.py --share --always-high-vram` or `!python entry_with_update.py --share --always-high-vram --preset anime` or `!python entry_with_update.py --share --always-high-vram --preset realistic` for Fooocus Default/Anime/Realistic Edition. +You can also change the preset in the UI. Please be aware that this may lead to timeouts after 60 seconds. If this is the case, please wait until the download has finished, change the preset to initial and back to the one you've selected or reload the page. + Note that this Colab will disable refiner by default because Colab free's resources are relatively limited (and some "big" features like image prompt may cause free-tier Colab to disconnect). We make sure that basic text-to-image is always working on free-tier Colab. Using `--always-high-vram` shifts resource allocation from RAM to VRAM and achieves the overall best balance between performance, flexibility and stability on the default T4 instance. Please find more information [here](https://github.com/lllyasviel/Fooocus/pull/1710#issuecomment-1989185346).