From a78f66ffb5bf7b684c6a529314f74acc33d53775 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 12 Feb 2024 21:34:07 +0100 Subject: [PATCH 01/25] fix: sort with casefold, case insensitive https://docs.python.org/3/library/stdtypes.html#str.casefold --- modules/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/util.py b/modules/util.py index c309480ac..9d4d09961 100644 --- a/modules/util.py +++ b/modules/util.py @@ -168,7 +168,7 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None): relative_path = os.path.relpath(root, folder_path) if relative_path == ".": relative_path = "" - for filename in sorted(files): + for filename in sorted(files, key=lambda s: s.casefold()): _, file_extension = os.path.splitext(filename) if (exensions == None or file_extension.lower() in exensions) and (name_filter == None or name_filter in _): path = os.path.join(relative_path, filename) From f8ca04a4061a0dabb420c5c271cfd115f88169cd Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 19 Feb 2024 15:22:10 +0100 Subject: [PATCH 02/25] feat: add early return for prompt expansion when no new tokens should be added closes https://github.com/lllyasviel/Fooocus/issues/2278, also removes comma at the end added before tokenizer --- extras/expansion.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/extras/expansion.py b/extras/expansion.py index c1b59b8a4..34c1ee8d4 100644 --- a/extras/expansion.py +++ b/extras/expansion.py @@ -112,6 +112,9 @@ def __call__(self, prompt, seed): max_token_length = 75 * int(math.ceil(float(current_token_length) / 75.0)) max_new_tokens = max_token_length - current_token_length + if max_new_tokens == 0: + return prompt[:-1] + # https://huggingface.co/blog/introducing-csearch # https://huggingface.co/docs/transformers/generation_strategies features = self.model.generate(**tokenized_kwargs, From 187f4a76c66ebd4281f5313533af13b6b47a5bf7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Charlie=20=E2=9A=A1=EF=B8=8F?= Date: Tue, 20 Feb 2024 21:51:01 -0500 Subject: [PATCH 03/25] Remove mac generated invisible files --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index de2f57786..859149866 100644 --- a/.gitignore +++ b/.gitignore @@ -51,3 +51,4 @@ user_path_config-deprecated.txt /package-lock.json /.coverage* /auth.json +.DS_Store From 5b7ddf8b22d3c682de612218fc31245b70f492d8 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 24 Feb 2024 18:59:57 +0100 Subject: [PATCH 04/25] feat: advanced params refactoring + prevent users from skipping/stopping other users tasks in queue (#981) * only make stop_button and skip_button interactive when rendering process starts fix inconsistency in behaviour of stop_button and skip_button as it was possible to skip or stop other users processes while still being in queue * use AsyncTask for last_stop handling instead of shared * Revert "only make stop_button and skip_button interactive when rendering process starts" This reverts commit d3f9156854b3d6b4c3d5d736f3b0454743203076. * introduce state for task skipping/stopping * fix return parameters of stop_clicked * code cleanup, do not disable skip/stop on stop_clicked * reset last_stop when skipping for further processing * fix: replace fcbh with ldm_patched * fix: use currentTask instead of ctrls after merging upstream * feat: extract attribute disable_preview * feat: extract attribute adm_scaler_positive * feat: extract attribute adm_scaler_negative * feat: extract attribute adm_scaler_end * feat: extract attribute adaptive_cfg * feat: extract attribute sampler_name * feat: extract attribute scheduler_name * feat: extract attribute generate_image_grid * feat: extract attribute overwrite_step * feat: extract attribute overwrite_switch * feat: extract attribute overwrite_width * feat: extract attribute overwrite_height * feat: extract attribute overwrite_vary_strength * feat: extract attribute overwrite_upscale_strength * feat: extract attribute mixing_image_prompt_and_vary_upscale * feat: extract attribute mixing_image_prompt_and_inpaint * feat: extract attribute debugging_cn_preprocessor * feat: extract attribute skipping_cn_preprocessor * feat: extract attribute canny_low_threshold * feat: extract attribute canny_high_threshold * feat: extract attribute refiner_swap_method * feat: extract freeu_ctrls attributes freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2 * feat: extract inpaint_ctrls attributes debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field, inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate * wip: add TODOs * chore: cleanup code * feat: extract attribute controlnet_softness * feat: extract remaining attributes, do not use globals in patch * fix: resolve circular import, patch_all now in async_worker * chore: cleanup pid code --- extras/preprocessors.py | 17 ++- modules/advanced_parameters.py | 33 ------ modules/async_worker.py | 203 ++++++++++++++++++++------------- modules/core.py | 10 +- modules/default_pipeline.py | 17 ++- modules/patch.py | 70 ++++++------ shared.py | 3 +- webui.py | 61 +++++----- 8 files changed, 218 insertions(+), 196 deletions(-) delete mode 100644 modules/advanced_parameters.py diff --git a/extras/preprocessors.py b/extras/preprocessors.py index 798fe15d2..0aa83109a 100644 --- a/extras/preprocessors.py +++ b/extras/preprocessors.py @@ -1,27 +1,26 @@ import cv2 import numpy as np -import modules.advanced_parameters as advanced_parameters -def centered_canny(x: np.ndarray): +def centered_canny(x: np.ndarray, canny_low_threshold, canny_high_threshold): assert isinstance(x, np.ndarray) assert x.ndim == 2 and x.dtype == np.uint8 - y = cv2.Canny(x, int(advanced_parameters.canny_low_threshold), int(advanced_parameters.canny_high_threshold)) + y = cv2.Canny(x, int(canny_low_threshold), int(canny_high_threshold)) y = y.astype(np.float32) / 255.0 return y -def centered_canny_color(x: np.ndarray): +def centered_canny_color(x: np.ndarray, canny_low_threshold, canny_high_threshold): assert isinstance(x, np.ndarray) assert x.ndim == 3 and x.shape[2] == 3 - result = [centered_canny(x[..., i]) for i in range(3)] + result = [centered_canny(x[..., i], canny_low_threshold, canny_high_threshold) for i in range(3)] result = np.stack(result, axis=2) return result -def pyramid_canny_color(x: np.ndarray): +def pyramid_canny_color(x: np.ndarray, canny_low_threshold, canny_high_threshold): assert isinstance(x, np.ndarray) assert x.ndim == 3 and x.shape[2] == 3 @@ -31,7 +30,7 @@ def pyramid_canny_color(x: np.ndarray): for k in [0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]: Hs, Ws = int(H * k), int(W * k) small = cv2.resize(x, (Ws, Hs), interpolation=cv2.INTER_AREA) - edge = centered_canny_color(small) + edge = centered_canny_color(small, canny_low_threshold, canny_high_threshold) if acc_edge is None: acc_edge = edge else: @@ -54,11 +53,11 @@ def norm255(x, low=4, high=96): return x * 255.0 -def canny_pyramid(x): +def canny_pyramid(x, canny_low_threshold, canny_high_threshold): # For some reasons, SAI's Control-lora Canny seems to be trained on canny maps with non-standard resolutions. # Then we use pyramid to use all resolutions to avoid missing any structure in specific resolutions. - color_canny = pyramid_canny_color(x) + color_canny = pyramid_canny_color(x, canny_low_threshold, canny_high_threshold) result = np.sum(color_canny, axis=2) return norm255(result, low=1, high=99).clip(0, 255).astype(np.uint8) diff --git a/modules/advanced_parameters.py b/modules/advanced_parameters.py deleted file mode 100644 index 0caa3eec8..000000000 --- a/modules/advanced_parameters.py +++ /dev/null @@ -1,33 +0,0 @@ -disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, \ - scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \ - overwrite_vary_strength, overwrite_upscale_strength, \ - mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \ - debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \ - refiner_swap_method, \ - freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \ - debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field, \ - inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate = [None] * 35 - - -def set_all_advanced_parameters(*args): - global disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, \ - scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \ - overwrite_vary_strength, overwrite_upscale_strength, \ - mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \ - debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \ - refiner_swap_method, \ - freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \ - debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field, \ - inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate - - disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, \ - scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, \ - overwrite_vary_strength, overwrite_upscale_strength, \ - mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, \ - debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, canny_low_threshold, canny_high_threshold, \ - refiner_swap_method, \ - freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2, \ - debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field, \ - inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate = args - - return diff --git a/modules/async_worker.py b/modules/async_worker.py index 40abb7fa4..d0ce4ba91 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -1,4 +1,8 @@ import threading +import os +from modules.patch import PatchSettings, patch_settings, patch_all + +patch_all() class AsyncTask: @@ -6,6 +10,8 @@ def __init__(self, args): self.args = args self.yields = [] self.results = [] + self.last_stop = False + self.processing = False async_tasks = [] @@ -31,7 +37,6 @@ def worker(): import extras.preprocessors as preprocessors import modules.inpaint_worker as inpaint_worker import modules.constants as constants - import modules.advanced_parameters as advanced_parameters import extras.ip_adapter as ip_adapter import extras.face_crop import fooocus_version @@ -43,6 +48,9 @@ def worker(): get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate, ordinal_suffix from modules.upscaler import perform_upscale + pid = os.getpid() + print(f'Started worker with PID {pid}') + try: async_gradio_app = shared.gradio_root flag = f'''App started successful. Use the app with {str(async_gradio_app.local_url)} or {str(async_gradio_app.server_name)}:{str(async_gradio_app.server_port)}''' @@ -69,9 +77,6 @@ def yield_result(async_task, imgs, do_not_show_finished_images=False): return def build_image_wall(async_task): - if not advanced_parameters.generate_image_grid: - return - results = async_task.results if len(results) < 2: @@ -115,6 +120,7 @@ def build_image_wall(async_task): @torch.inference_mode() def handler(async_task): execution_start_time = time.perf_counter() + async_task.processing = True args = async_task.args args.reverse() @@ -140,6 +146,40 @@ def handler(async_task): inpaint_input_image = args.pop() inpaint_additional_prompt = args.pop() inpaint_mask_image_upload = args.pop() + disable_preview = args.pop() + adm_scaler_positive = args.pop() + adm_scaler_negative = args.pop() + adm_scaler_end = args.pop() + adaptive_cfg = args.pop() + sampler_name = args.pop() + scheduler_name = args.pop() + overwrite_step = args.pop() + overwrite_switch = args.pop() + overwrite_width = args.pop() + overwrite_height = args.pop() + overwrite_vary_strength = args.pop() + overwrite_upscale_strength = args.pop() + mixing_image_prompt_and_vary_upscale = args.pop() + mixing_image_prompt_and_inpaint = args.pop() + debugging_cn_preprocessor = args.pop() + skipping_cn_preprocessor = args.pop() + canny_low_threshold = args.pop() + canny_high_threshold = args.pop() + refiner_swap_method = args.pop() + controlnet_softness = args.pop() + freeu_enabled = args.pop() + freeu_b1 = args.pop() + freeu_b2 = args.pop() + freeu_s1 = args.pop() + freeu_s2 = args.pop() + debugging_inpaint_preprocessor = args.pop() + inpaint_disable_initial_latent = args.pop() + inpaint_engine = args.pop() + inpaint_strength = args.pop() + inpaint_respective_field = args.pop() + inpaint_mask_upload_checkbox = args.pop() + invert_mask_checkbox = args.pop() + inpaint_erode_or_dilate = args.pop() cn_tasks = {x: [] for x in flags.ip_list} for _ in range(4): @@ -186,30 +226,33 @@ def handler(async_task): print(f'Refiner disabled in LCM mode.') refiner_model_name = 'None' - sampler_name = advanced_parameters.sampler_name = 'lcm' - scheduler_name = advanced_parameters.scheduler_name = 'lcm' - modules.patch.sharpness = sharpness = 0.0 - cfg_scale = guidance_scale = 1.0 - modules.patch.adaptive_cfg = advanced_parameters.adaptive_cfg = 1.0 + sampler_name = 'lcm' + scheduler_name = 'lcm' + sharpness = 0.0 + guidance_scale = 1.0 + adaptive_cfg = 1.0 refiner_switch = 1.0 - modules.patch.positive_adm_scale = advanced_parameters.adm_scaler_positive = 1.0 - modules.patch.negative_adm_scale = advanced_parameters.adm_scaler_negative = 1.0 - modules.patch.adm_scaler_end = advanced_parameters.adm_scaler_end = 0.0 + adm_scaler_positive = 1.0 + adm_scaler_negative = 1.0 + adm_scaler_end = 0.0 steps = 8 - modules.patch.adaptive_cfg = advanced_parameters.adaptive_cfg - print(f'[Parameters] Adaptive CFG = {modules.patch.adaptive_cfg}') - - modules.patch.sharpness = sharpness - print(f'[Parameters] Sharpness = {modules.patch.sharpness}') - - modules.patch.positive_adm_scale = advanced_parameters.adm_scaler_positive - modules.patch.negative_adm_scale = advanced_parameters.adm_scaler_negative - modules.patch.adm_scaler_end = advanced_parameters.adm_scaler_end + print(f'[Parameters] Adaptive CFG = {adaptive_cfg}') + print(f'[Parameters] Sharpness = {sharpness}') + print(f'[Parameters] ControlNet Softness = {controlnet_softness}') print(f'[Parameters] ADM Scale = ' - f'{modules.patch.positive_adm_scale} : ' - f'{modules.patch.negative_adm_scale} : ' - f'{modules.patch.adm_scaler_end}') + f'{adm_scaler_positive} : ' + f'{adm_scaler_negative} : ' + f'{adm_scaler_end}') + + patch_settings[pid] = PatchSettings( + sharpness, + adm_scaler_end, + adm_scaler_positive, + adm_scaler_negative, + controlnet_softness, + adaptive_cfg + ) cfg_scale = float(guidance_scale) print(f'[Parameters] CFG = {cfg_scale}') @@ -222,10 +265,9 @@ def handler(async_task): width, height = int(width), int(height) skip_prompt_processing = False - refiner_swap_method = advanced_parameters.refiner_swap_method inpaint_worker.current_task = None - inpaint_parameterized = advanced_parameters.inpaint_engine != 'None' + inpaint_parameterized = inpaint_engine != 'None' inpaint_image = None inpaint_mask = None inpaint_head_model_path = None @@ -239,15 +281,12 @@ def handler(async_task): seed = int(image_seed) print(f'[Parameters] Seed = {seed}') - sampler_name = advanced_parameters.sampler_name - scheduler_name = advanced_parameters.scheduler_name - goals = [] tasks = [] if input_image_checkbox: if (current_tab == 'uov' or ( - current_tab == 'ip' and advanced_parameters.mixing_image_prompt_and_vary_upscale)) \ + current_tab == 'ip' and mixing_image_prompt_and_vary_upscale)) \ and uov_method != flags.disabled and uov_input_image is not None: uov_input_image = HWC3(uov_input_image) if 'vary' in uov_method: @@ -271,12 +310,12 @@ def handler(async_task): progressbar(async_task, 1, 'Downloading upscale models ...') modules.config.downloading_upscale_model() if (current_tab == 'inpaint' or ( - current_tab == 'ip' and advanced_parameters.mixing_image_prompt_and_inpaint)) \ + current_tab == 'ip' and mixing_image_prompt_and_inpaint)) \ and isinstance(inpaint_input_image, dict): inpaint_image = inpaint_input_image['image'] inpaint_mask = inpaint_input_image['mask'][:, :, 0] - - if advanced_parameters.inpaint_mask_upload_checkbox: + + if inpaint_mask_upload_checkbox: if isinstance(inpaint_mask_image_upload, np.ndarray): if inpaint_mask_image_upload.ndim == 3: H, W, C = inpaint_image.shape @@ -285,10 +324,10 @@ def handler(async_task): inpaint_mask_image_upload = (inpaint_mask_image_upload > 127).astype(np.uint8) * 255 inpaint_mask = np.maximum(inpaint_mask, inpaint_mask_image_upload) - if int(advanced_parameters.inpaint_erode_or_dilate) != 0: - inpaint_mask = erode_or_dilate(inpaint_mask, advanced_parameters.inpaint_erode_or_dilate) + if int(inpaint_erode_or_dilate) != 0: + inpaint_mask = erode_or_dilate(inpaint_mask, inpaint_erode_or_dilate) - if advanced_parameters.invert_mask_checkbox: + if invert_mask_checkbox: inpaint_mask = 255 - inpaint_mask inpaint_image = HWC3(inpaint_image) @@ -299,7 +338,7 @@ def handler(async_task): if inpaint_parameterized: progressbar(async_task, 1, 'Downloading inpainter ...') inpaint_head_model_path, inpaint_patch_model_path = modules.config.downloading_inpaint_models( - advanced_parameters.inpaint_engine) + inpaint_engine) base_model_additional_loras += [(inpaint_patch_model_path, 1.0)] print(f'[Inpaint] Current inpaint model is {inpaint_patch_model_path}') if refiner_model_name == 'None': @@ -315,8 +354,8 @@ def handler(async_task): prompt = inpaint_additional_prompt + '\n' + prompt goals.append('inpaint') if current_tab == 'ip' or \ - advanced_parameters.mixing_image_prompt_and_inpaint or \ - advanced_parameters.mixing_image_prompt_and_vary_upscale: + mixing_image_prompt_and_vary_upscale or \ + mixing_image_prompt_and_inpaint: goals.append('cn') progressbar(async_task, 1, 'Downloading control models ...') if len(cn_tasks[flags.cn_canny]) > 0: @@ -335,19 +374,19 @@ def handler(async_task): ip_adapter.load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_path) ip_adapter.load_ip_adapter(clip_vision_path, ip_negative_path, ip_adapter_face_path) - if advanced_parameters.overwrite_step > 0: - steps = advanced_parameters.overwrite_step + if overwrite_step > 0: + steps = overwrite_step switch = int(round(steps * refiner_switch)) - if advanced_parameters.overwrite_switch > 0: - switch = advanced_parameters.overwrite_switch + if overwrite_switch > 0: + switch = overwrite_switch - if advanced_parameters.overwrite_width > 0: - width = advanced_parameters.overwrite_width + if overwrite_width > 0: + width = overwrite_width - if advanced_parameters.overwrite_height > 0: - height = advanced_parameters.overwrite_height + if overwrite_height > 0: + height = overwrite_height print(f'[Parameters] Sampler = {sampler_name} - {scheduler_name}') print(f'[Parameters] Steps = {steps} - {switch}') @@ -446,8 +485,8 @@ def handler(async_task): denoising_strength = 0.5 if 'strong' in uov_method: denoising_strength = 0.85 - if advanced_parameters.overwrite_vary_strength > 0: - denoising_strength = advanced_parameters.overwrite_vary_strength + if overwrite_vary_strength > 0: + denoising_strength = overwrite_vary_strength shape_ceil = get_image_shape_ceil(uov_input_image) if shape_ceil < 1024: @@ -518,8 +557,8 @@ def handler(async_task): tiled = True denoising_strength = 0.382 - if advanced_parameters.overwrite_upscale_strength > 0: - denoising_strength = advanced_parameters.overwrite_upscale_strength + if overwrite_upscale_strength > 0: + denoising_strength = overwrite_upscale_strength initial_pixels = core.numpy_to_pytorch(uov_input_image) progressbar(async_task, 13, 'VAE encoding ...') @@ -563,19 +602,19 @@ def handler(async_task): inpaint_image = np.ascontiguousarray(inpaint_image.copy()) inpaint_mask = np.ascontiguousarray(inpaint_mask.copy()) - advanced_parameters.inpaint_strength = 1.0 - advanced_parameters.inpaint_respective_field = 1.0 + inpaint_strength = 1.0 + inpaint_respective_field = 1.0 - denoising_strength = advanced_parameters.inpaint_strength + denoising_strength = inpaint_strength inpaint_worker.current_task = inpaint_worker.InpaintWorker( image=inpaint_image, mask=inpaint_mask, use_fill=denoising_strength > 0.99, - k=advanced_parameters.inpaint_respective_field + k=inpaint_respective_field ) - if advanced_parameters.debugging_inpaint_preprocessor: + if debugging_inpaint_preprocessor: yield_result(async_task, inpaint_worker.current_task.visualize_mask_processing(), do_not_show_finished_images=True) return @@ -621,7 +660,7 @@ def handler(async_task): model=pipeline.final_unet ) - if not advanced_parameters.inpaint_disable_initial_latent: + if not inpaint_disable_initial_latent: initial_latent = {'samples': latent_fill} B, C, H, W = latent_fill.shape @@ -634,24 +673,24 @@ def handler(async_task): cn_img, cn_stop, cn_weight = task cn_img = resize_image(HWC3(cn_img), width=width, height=height) - if not advanced_parameters.skipping_cn_preprocessor: - cn_img = preprocessors.canny_pyramid(cn_img) + if not skipping_cn_preprocessor: + cn_img = preprocessors.canny_pyramid(cn_img, canny_low_threshold, canny_high_threshold) cn_img = HWC3(cn_img) task[0] = core.numpy_to_pytorch(cn_img) - if advanced_parameters.debugging_cn_preprocessor: + if debugging_cn_preprocessor: yield_result(async_task, cn_img, do_not_show_finished_images=True) return for task in cn_tasks[flags.cn_cpds]: cn_img, cn_stop, cn_weight = task cn_img = resize_image(HWC3(cn_img), width=width, height=height) - if not advanced_parameters.skipping_cn_preprocessor: + if not skipping_cn_preprocessor: cn_img = preprocessors.cpds(cn_img) cn_img = HWC3(cn_img) task[0] = core.numpy_to_pytorch(cn_img) - if advanced_parameters.debugging_cn_preprocessor: + if debugging_cn_preprocessor: yield_result(async_task, cn_img, do_not_show_finished_images=True) return for task in cn_tasks[flags.cn_ip]: @@ -662,21 +701,21 @@ def handler(async_task): cn_img = resize_image(cn_img, width=224, height=224, resize_mode=0) task[0] = ip_adapter.preprocess(cn_img, ip_adapter_path=ip_adapter_path) - if advanced_parameters.debugging_cn_preprocessor: + if debugging_cn_preprocessor: yield_result(async_task, cn_img, do_not_show_finished_images=True) return for task in cn_tasks[flags.cn_ip_face]: cn_img, cn_stop, cn_weight = task cn_img = HWC3(cn_img) - if not advanced_parameters.skipping_cn_preprocessor: + if not skipping_cn_preprocessor: cn_img = extras.face_crop.crop_image(cn_img) # https://github.com/tencent-ailab/IP-Adapter/blob/d580c50a291566bbf9fc7ac0f760506607297e6d/README.md?plain=1#L75 cn_img = resize_image(cn_img, width=224, height=224, resize_mode=0) task[0] = ip_adapter.preprocess(cn_img, ip_adapter_path=ip_adapter_face_path) - if advanced_parameters.debugging_cn_preprocessor: + if debugging_cn_preprocessor: yield_result(async_task, cn_img, do_not_show_finished_images=True) return @@ -685,14 +724,14 @@ def handler(async_task): if len(all_ip_tasks) > 0: pipeline.final_unet = ip_adapter.patch_model(pipeline.final_unet, all_ip_tasks) - if advanced_parameters.freeu_enabled: + if freeu_enabled: print(f'FreeU is enabled!') pipeline.final_unet = core.apply_freeu( pipeline.final_unet, - advanced_parameters.freeu_b1, - advanced_parameters.freeu_b2, - advanced_parameters.freeu_s1, - advanced_parameters.freeu_s2 + freeu_b1, + freeu_b2, + freeu_s1, + freeu_s2 ) all_steps = steps * image_number @@ -738,6 +777,8 @@ def callback(step, x0, x, total_steps, y): execution_start_time = time.perf_counter() try: + if async_task.last_stop is not False: + ldm_patched.model_management.interrupt_current_processing() positive_cond, negative_cond = task['c'], task['uc'] if 'cn' in goals: @@ -765,7 +806,8 @@ def callback(step, x0, x, total_steps, y): denoise=denoising_strength, tiled=tiled, cfg_scale=cfg_scale, - refiner_swap_method=refiner_swap_method + refiner_swap_method=refiner_swap_method, + disable_preview=disable_preview ) del task['c'], task['uc'], positive_cond, negative_cond # Save memory @@ -784,9 +826,9 @@ def callback(step, x0, x, total_steps, y): ('Sharpness', sharpness), ('Guidance Scale', guidance_scale), ('ADM Guidance', str(( - modules.patch.positive_adm_scale, - modules.patch.negative_adm_scale, - modules.patch.adm_scaler_end))), + modules.patch.patch_settings[pid].positive_adm_scale, + modules.patch.patch_settings[pid].negative_adm_scale, + modules.patch.patch_settings[pid].adm_scaler_end))), ('Base Model', base_model_name), ('Refiner Model', refiner_model_name), ('Refiner Switch', refiner_switch), @@ -802,8 +844,9 @@ def callback(step, x0, x, total_steps, y): yield_result(async_task, imgs, do_not_show_finished_images=len(tasks) == 1) except ldm_patched.modules.model_management.InterruptProcessingException as e: - if shared.last_stop == 'skip': + if async_task.last_stop == 'skip': print('User skipped') + async_task.last_stop = False continue else: print('User stopped') @@ -811,21 +854,27 @@ def callback(step, x0, x, total_steps, y): execution_time = time.perf_counter() - execution_start_time print(f'Generating and saving time: {execution_time:.2f} seconds') - + async_task.processing = False return while True: time.sleep(0.01) if len(async_tasks) > 0: task = async_tasks.pop(0) + generate_image_grid = task.args.pop(0) + try: handler(task) - build_image_wall(task) + if generate_image_grid: + build_image_wall(task) task.yields.append(['finish', task.results]) pipeline.prepare_text_encoder(async_call=True) except: traceback.print_exc() task.yields.append(['finish', task.results]) + finally: + if pid in modules.patch.patch_settings: + del modules.patch.patch_settings[pid] pass diff --git a/modules/core.py b/modules/core.py index 989b8e321..7a29d9883 100644 --- a/modules/core.py +++ b/modules/core.py @@ -1,8 +1,3 @@ -from modules.patch import patch_all - -patch_all() - - import os import einops import torch @@ -16,7 +11,6 @@ import modules.sample_hijack import ldm_patched.modules.samplers import ldm_patched.modules.latent_formats -import modules.advanced_parameters from ldm_patched.modules.sd import load_checkpoint_guess_config from ldm_patched.contrib.external import VAEDecode, EmptyLatentImage, VAEEncode, VAEEncodeTiled, VAEDecodeTiled, \ @@ -268,7 +262,7 @@ def preview_function(x0, step, total_steps): def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sampler_name='dpmpp_2m_sde_gpu', scheduler='karras', denoise=1.0, disable_noise=False, start_step=None, last_step=None, force_full_denoise=False, callback_function=None, refiner=None, refiner_switch=-1, - previewer_start=None, previewer_end=None, sigmas=None, noise_mean=None): + previewer_start=None, previewer_end=None, sigmas=None, noise_mean=None, disable_preview=False): if sigmas is not None: sigmas = sigmas.clone().to(ldm_patched.modules.model_management.get_torch_device()) @@ -299,7 +293,7 @@ def ksampler(model, positive, negative, latent, seed=None, steps=30, cfg=7.0, sa def callback(step, x0, x, total_steps): ldm_patched.modules.model_management.throw_exception_if_processing_interrupted() y = None - if previewer is not None and not modules.advanced_parameters.disable_preview: + if previewer is not None and not disable_preview: y = previewer(x0, previewer_start + step, previewer_end) if callback_function is not None: callback_function(previewer_start + step, x0, x, previewer_end, y) diff --git a/modules/default_pipeline.py b/modules/default_pipeline.py index 6001d97f0..2f45667cf 100644 --- a/modules/default_pipeline.py +++ b/modules/default_pipeline.py @@ -315,7 +315,7 @@ def get_candidate_vae(steps, switch, denoise=1.0, refiner_swap_method='joint'): @torch.no_grad() @torch.inference_mode() -def process_diffusion(positive_cond, negative_cond, steps, switch, width, height, image_seed, callback, sampler_name, scheduler_name, latent=None, denoise=1.0, tiled=False, cfg_scale=7.0, refiner_swap_method='joint'): +def process_diffusion(positive_cond, negative_cond, steps, switch, width, height, image_seed, callback, sampler_name, scheduler_name, latent=None, denoise=1.0, tiled=False, cfg_scale=7.0, refiner_swap_method='joint', disable_preview=False): target_unet, target_vae, target_refiner_unet, target_refiner_vae, target_clip \ = final_unet, final_vae, final_refiner_unet, final_refiner_vae, final_clip @@ -374,6 +374,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height refiner_switch=switch, previewer_start=0, previewer_end=steps, + disable_preview=disable_preview ) decoded_latent = core.decode_vae(vae=target_vae, latent_image=sampled_latent, tiled=tiled) @@ -392,6 +393,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height scheduler=scheduler_name, previewer_start=0, previewer_end=steps, + disable_preview=disable_preview ) print('Refiner swapped by changing ksampler. Noise preserved.') @@ -414,6 +416,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height scheduler=scheduler_name, previewer_start=switch, previewer_end=steps, + disable_preview=disable_preview ) target_model = target_refiner_vae @@ -422,7 +425,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height decoded_latent = core.decode_vae(vae=target_model, latent_image=sampled_latent, tiled=tiled) if refiner_swap_method == 'vae': - modules.patch.eps_record = 'vae' + modules.patch.patch_settings[os.getpid()].eps_record = 'vae' if modules.inpaint_worker.current_task is not None: modules.inpaint_worker.current_task.unswap() @@ -440,7 +443,8 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height sampler_name=sampler_name, scheduler=scheduler_name, previewer_start=0, - previewer_end=steps + previewer_end=steps, + disable_preview=disable_preview ) print('Fooocus VAE-based swap.') @@ -459,7 +463,7 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height denoise=denoise)[switch:] * k_sigmas len_sigmas = len(sigmas) - 1 - noise_mean = torch.mean(modules.patch.eps_record, dim=1, keepdim=True) + noise_mean = torch.mean(modules.patch.patch_settings[os.getpid()].eps_record, dim=1, keepdim=True) if modules.inpaint_worker.current_task is not None: modules.inpaint_worker.current_task.swap() @@ -479,7 +483,8 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height previewer_start=switch, previewer_end=steps, sigmas=sigmas, - noise_mean=noise_mean + noise_mean=noise_mean, + disable_preview=disable_preview ) target_model = target_refiner_vae @@ -488,5 +493,5 @@ def process_diffusion(positive_cond, negative_cond, steps, switch, width, height decoded_latent = core.decode_vae(vae=target_model, latent_image=sampled_latent, tiled=tiled) images = core.pytorch_to_numpy(decoded_latent) - modules.patch.eps_record = None + modules.patch.patch_settings[os.getpid()].eps_record = None return images diff --git a/modules/patch.py b/modules/patch.py index 2e2409c54..3c2dd8f47 100644 --- a/modules/patch.py +++ b/modules/patch.py @@ -17,7 +17,6 @@ import ldm_patched.modules.model_patcher import ldm_patched.modules.samplers import ldm_patched.modules.args_parser -import modules.advanced_parameters as advanced_parameters import warnings import safetensors.torch import modules.constants as constants @@ -29,15 +28,25 @@ from modules.patch_clip import patch_all_clip -sharpness = 2.0 +class PatchSettings: + def __init__(self, + sharpness=2.0, + adm_scaler_end=0.3, + positive_adm_scale=1.5, + negative_adm_scale=0.8, + controlnet_softness=0.25, + adaptive_cfg=7.0): + self.sharpness = sharpness + self.adm_scaler_end = adm_scaler_end + self.positive_adm_scale = positive_adm_scale + self.negative_adm_scale = negative_adm_scale + self.controlnet_softness = controlnet_softness + self.adaptive_cfg = adaptive_cfg + self.global_diffusion_progress = 0 + self.eps_record = None -adm_scaler_end = 0.3 -positive_adm_scale = 1.5 -negative_adm_scale = 0.8 -adaptive_cfg = 7.0 -global_diffusion_progress = 0 -eps_record = None +patch_settings = {} def calculate_weight_patched(self, patches, weight, key): @@ -201,14 +210,13 @@ def __call__(sigma, sigma_next): def compute_cfg(uncond, cond, cfg_scale, t): - global adaptive_cfg - - mimic_cfg = float(adaptive_cfg) + pid = os.getpid() + mimic_cfg = float(patch_settings[pid].adaptive_cfg) real_cfg = float(cfg_scale) real_eps = uncond + real_cfg * (cond - uncond) - if cfg_scale > adaptive_cfg: + if cfg_scale > patch_settings[pid].adaptive_cfg: mimicked_eps = uncond + mimic_cfg * (cond - uncond) return real_eps * t + mimicked_eps * (1 - t) else: @@ -216,13 +224,13 @@ def compute_cfg(uncond, cond, cfg_scale, t): def patched_sampling_function(model, x, timestep, uncond, cond, cond_scale, model_options=None, seed=None): - global eps_record + pid = os.getpid() if math.isclose(cond_scale, 1.0) and not model_options.get("disable_cfg1_optimization", False): final_x0 = calc_cond_uncond_batch(model, cond, None, x, timestep, model_options)[0] - if eps_record is not None: - eps_record = ((x - final_x0) / timestep).cpu() + if patch_settings[pid].eps_record is not None: + patch_settings[pid].eps_record = ((x - final_x0) / timestep).cpu() return final_x0 @@ -231,16 +239,16 @@ def patched_sampling_function(model, x, timestep, uncond, cond, cond_scale, mode positive_eps = x - positive_x0 negative_eps = x - negative_x0 - alpha = 0.001 * sharpness * global_diffusion_progress + alpha = 0.001 * patch_settings[pid].sharpness * patch_settings[pid].global_diffusion_progress positive_eps_degraded = anisotropic.adaptive_anisotropic_filter(x=positive_eps, g=positive_x0) positive_eps_degraded_weighted = positive_eps_degraded * alpha + positive_eps * (1.0 - alpha) final_eps = compute_cfg(uncond=negative_eps, cond=positive_eps_degraded_weighted, - cfg_scale=cond_scale, t=global_diffusion_progress) + cfg_scale=cond_scale, t=patch_settings[pid].global_diffusion_progress) - if eps_record is not None: - eps_record = (final_eps / timestep).cpu() + if patch_settings[pid].eps_record is not None: + patch_settings[pid].eps_record = (final_eps / timestep).cpu() return x - final_eps @@ -255,20 +263,19 @@ def round_to_64(x): def sdxl_encode_adm_patched(self, **kwargs): - global positive_adm_scale, negative_adm_scale - clip_pooled = ldm_patched.modules.model_base.sdxl_pooled(kwargs, self.noise_augmentor) width = kwargs.get("width", 1024) height = kwargs.get("height", 1024) target_width = width target_height = height + pid = os.getpid() if kwargs.get("prompt_type", "") == "negative": - width = float(width) * negative_adm_scale - height = float(height) * negative_adm_scale + width = float(width) * patch_settings[pid].negative_adm_scale + height = float(height) * patch_settings[pid].negative_adm_scale elif kwargs.get("prompt_type", "") == "positive": - width = float(width) * positive_adm_scale - height = float(height) * positive_adm_scale + width = float(width) * patch_settings[pid].positive_adm_scale + height = float(height) * patch_settings[pid].positive_adm_scale def embedder(number_list): h = self.embedder(torch.tensor(number_list, dtype=torch.float32)) @@ -322,7 +329,7 @@ def patched_KSamplerX0Inpaint_forward(self, x, sigma, uncond, cond, cond_scale, def timed_adm(y, timesteps): if isinstance(y, torch.Tensor) and int(y.dim()) == 2 and int(y.shape[1]) == 5632: - y_mask = (timesteps > 999.0 * (1.0 - float(adm_scaler_end))).to(y)[..., None] + y_mask = (timesteps > 999.0 * (1.0 - float(patch_settings[os.getpid()].adm_scaler_end))).to(y)[..., None] y_with_adm = y[..., :2816].clone() y_without_adm = y[..., 2816:].clone() return y_with_adm * y_mask + y_without_adm * (1.0 - y_mask) @@ -332,6 +339,7 @@ def timed_adm(y, timesteps): def patched_cldm_forward(self, x, hint, timesteps, context, y=None, **kwargs): t_emb = ldm_patched.ldm.modules.diffusionmodules.openaimodel.timestep_embedding(timesteps, self.model_channels, repeat_only=False).to(x.dtype) emb = self.time_embed(t_emb) + pid = os.getpid() guided_hint = self.input_hint_block(hint, emb, context) @@ -357,19 +365,17 @@ def patched_cldm_forward(self, x, hint, timesteps, context, y=None, **kwargs): h = self.middle_block(h, emb, context) outs.append(self.middle_block_out(h, emb, context)) - if advanced_parameters.controlnet_softness > 0: + if patch_settings[pid].controlnet_softness > 0: for i in range(10): k = 1.0 - float(i) / 9.0 - outs[i] = outs[i] * (1.0 - advanced_parameters.controlnet_softness * k) + outs[i] = outs[i] * (1.0 - patch_settings[pid].controlnet_softness * k) return outs def patched_unet_forward(self, x, timesteps=None, context=None, y=None, control=None, transformer_options={}, **kwargs): - global global_diffusion_progress - self.current_step = 1.0 - timesteps.to(x) / 999.0 - global_diffusion_progress = float(self.current_step.detach().cpu().numpy().tolist()[0]) + patch_settings[os.getpid()].global_diffusion_progress = float(self.current_step.detach().cpu().numpy().tolist()[0]) y = timed_adm(y, timesteps) @@ -483,7 +489,7 @@ def patch_all(): if ldm_patched.modules.model_management.directml_enabled: ldm_patched.modules.model_management.lowvram_available = True ldm_patched.modules.model_management.OOM_EXCEPTION = Exception - + patch_all_precision() patch_all_clip() diff --git a/shared.py b/shared.py index 269809e3f..21a2a864b 100644 --- a/shared.py +++ b/shared.py @@ -1,2 +1 @@ -gradio_root = None -last_stop = None +gradio_root = None \ No newline at end of file diff --git a/webui.py b/webui.py index b9b620d24..05b7d20ef 100644 --- a/webui.py +++ b/webui.py @@ -11,7 +11,6 @@ import modules.constants as constants import modules.flags as flags import modules.gradio_hijack as grh -import modules.advanced_parameters as advanced_parameters import modules.style_sorter as style_sorter import modules.meta_parser import args_manager @@ -22,17 +21,19 @@ from modules.ui_gradio_extensions import reload_javascript from modules.auth import auth_enabled, check_auth +def get_task(*args): + args = list(args) + args.pop(0) -def generate_clicked(*args): + return worker.AsyncTask(args=args) + +def generate_clicked(task): import ldm_patched.modules.model_management as model_management with model_management.interrupt_processing_mutex: model_management.interrupt_processing = False - # outputs=[progress_html, progress_window, progress_gallery, gallery] - execution_start_time = time.perf_counter() - task = worker.AsyncTask(args=list(args)) finished = False yield gr.update(visible=True, value=modules.html.make_progress_html(1, 'Waiting for task to start ...')), \ @@ -88,6 +89,7 @@ def generate_clicked(*args): css=modules.html.css).queue() with shared.gradio_root: + currentTask = gr.State(worker.AsyncTask(args=[])) with gr.Row(): with gr.Column(scale=2): with gr.Row(): @@ -115,21 +117,22 @@ def generate_clicked(*args): skip_button = gr.Button(label="Skip", value="Skip", elem_classes='type_row_half', visible=False) stop_button = gr.Button(label="Stop", value="Stop", elem_classes='type_row_half', elem_id='stop_button', visible=False) - def stop_clicked(): + def stop_clicked(currentTask): import ldm_patched.modules.model_management as model_management - shared.last_stop = 'stop' - model_management.interrupt_current_processing() - return [gr.update(interactive=False)] * 2 + currentTask.last_stop = 'stop' + if (currentTask.processing): + model_management.interrupt_current_processing() + return currentTask - def skip_clicked(): + def skip_clicked(currentTask): import ldm_patched.modules.model_management as model_management - shared.last_stop = 'skip' - model_management.interrupt_current_processing() - return + currentTask.last_stop = 'skip' + if (currentTask.processing): + model_management.interrupt_current_processing() + return currentTask - stop_button.click(stop_clicked, outputs=[skip_button, stop_button], - queue=False, show_progress=False, _js='cancelGenerateForever') - skip_button.click(skip_clicked, queue=False, show_progress=False) + stop_button.click(stop_clicked, inputs=currentTask, outputs=currentTask, queue=False, show_progress=False, _js='cancelGenerateForever') + skip_button.click(skip_clicked, inputs=currentTask, outputs=currentTask, queue=False, show_progress=False) with gr.Row(elem_classes='advanced_check_row'): input_image_checkbox = gr.Checkbox(label='Input Image', value=False, container=False, elem_classes='min_check') advanced_checkbox = gr.Checkbox(label='Advanced', value=modules.config.default_advanced_checkbox, container=False, elem_classes='min_check') @@ -435,7 +438,7 @@ def update_history_link(): '(default is 0, always process before any mask invert)') inpaint_mask_upload_checkbox = gr.Checkbox(label='Enable Mask Upload', value=False) invert_mask_checkbox = gr.Checkbox(label='Invert Mask', value=False) - + inpaint_ctrls = [debugging_inpaint_preprocessor, inpaint_disable_initial_latent, inpaint_engine, inpaint_strength, inpaint_respective_field, inpaint_mask_upload_checkbox, invert_mask_checkbox, inpaint_erode_or_dilate] @@ -452,15 +455,6 @@ def update_history_link(): freeu_s2 = gr.Slider(label='S2', minimum=0, maximum=4, step=0.01, value=0.95) freeu_ctrls = [freeu_enabled, freeu_b1, freeu_b2, freeu_s1, freeu_s2] - adps = [disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg, sampler_name, - scheduler_name, generate_image_grid, overwrite_step, overwrite_switch, overwrite_width, overwrite_height, - overwrite_vary_strength, overwrite_upscale_strength, - mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint, - debugging_cn_preprocessor, skipping_cn_preprocessor, controlnet_softness, - canny_low_threshold, canny_high_threshold, refiner_swap_method] - adps += freeu_ctrls - adps += inpaint_ctrls - def dev_mode_checked(r): return gr.update(visible=r) @@ -525,7 +519,8 @@ def inpaint_mode_change(mode): inpaint_strength, inpaint_respective_field ], show_progress=False, queue=False) - ctrls = [ + ctrls = [currentTask, generate_image_grid] + ctrls += [ prompt, negative_prompt, style_selections, performance_selection, aspect_ratios_selection, image_number, image_seed, sharpness, guidance_scale ] @@ -534,6 +529,14 @@ def inpaint_mode_change(mode): ctrls += [input_image_checkbox, current_tab] ctrls += [uov_method, uov_input_image] ctrls += [outpaint_selections, inpaint_input_image, inpaint_additional_prompt, inpaint_mask_image] + ctrls += [disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg] + ctrls += [sampler_name, scheduler_name] + ctrls += [overwrite_step, overwrite_switch, overwrite_width, overwrite_height, overwrite_vary_strength] + ctrls += [overwrite_upscale_strength, mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint] + ctrls += [debugging_cn_preprocessor, skipping_cn_preprocessor, canny_low_threshold, canny_high_threshold] + ctrls += [refiner_swap_method, controlnet_softness] + ctrls += freeu_ctrls + ctrls += inpaint_ctrls ctrls += ip_ctrls state_is_generating = gr.State(False) @@ -588,8 +591,8 @@ def parse_meta(raw_prompt_txt, is_generating): generate_button.click(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), [], True), outputs=[stop_button, skip_button, generate_button, gallery, state_is_generating]) \ .then(fn=refresh_seed, inputs=[seed_random, image_seed], outputs=image_seed) \ - .then(advanced_parameters.set_all_advanced_parameters, inputs=adps) \ - .then(fn=generate_clicked, inputs=ctrls, outputs=[progress_html, progress_window, progress_gallery, gallery]) \ + .then(fn=get_task, inputs=ctrls, outputs=currentTask) \ + .then(fn=generate_clicked, inputs=currentTask, outputs=[progress_html, progress_window, progress_gallery, gallery]) \ .then(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), gr.update(visible=False, interactive=False), False), outputs=[generate_button, stop_button, skip_button, state_is_generating]) \ .then(fn=update_history_link, outputs=history_link) \ From 965364cd80e63686bd1138306995bbcea29a4d14 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 24 Feb 2024 19:03:25 +0100 Subject: [PATCH 05/25] feat: add list of 100 most popular animals to wildcards (#985) --- wildcards/animal.txt | 100 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 wildcards/animal.txt diff --git a/wildcards/animal.txt b/wildcards/animal.txt new file mode 100644 index 000000000..9a6f09ba8 --- /dev/null +++ b/wildcards/animal.txt @@ -0,0 +1,100 @@ +Alligator +Ant +Antelope +Armadillo +Badger +Bat +Bear +Beaver +Bison +Boar +Bobcat +Bull +Camel +Chameleon +Cheetah +Chicken +Chihuahua +Chimpanzee +Chinchilla +Chipmunk +Comodo Dragon +Cow +Coyote +Crocodile +Crow +Deer +Dinosaur +Dolphin +Donkey +Duck +Eagle +Eel +Elephant +Elk +Emu +Falcon +Ferret +Flamingo +Flying Squirrel +Giraffe +Goose +Guinea pig +Hawk +Hedgehog +Hippopotamus +Horse +Hummingbird +Hyena +Jackal +Jaguar +Jellyfish +Kangaroo +King Cobra +Koala bear +Leopard +Lion +Lizard +Magpie +Marten +Meerkat +Mole +Monkey +Moose +Mouse +Octopus +Okapi +Orangutan +Ostrich +Otter +Owl +Panda +Pangolin +Panther +Penguin +Pig +Porcupine +Possum +Puma +Quokka +Rabbit +Raccoon +Raven +Reindeer +Rhinoceros +Seal +Shark +Sheep +Snail +Snake +Sparrow +Spider +Squirrel +Swallow +Tiger +Walrus +Whale +Wolf +Wombat +Yak +Zebra \ No newline at end of file From 7cfb5e742db2b22eab61966b4be5300bd96dc53c Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sat, 24 Feb 2024 20:07:36 +0100 Subject: [PATCH 06/25] feat: add advanced parameter for disable_intermediate_results (progress_gallery) (#1013) * add advanced parameter for disable_intermediate_results prevents gradio frontend process from clogging image output and updates in high throughput scenarios such as LCM with image number >= 4 * update disable_intermediate_results correctly based on default and selected performance * chore: add missing translations --- language/en.json | 4 ++++ modules/async_worker.py | 3 ++- webui.py | 12 +++++++++--- 3 files changed, 15 insertions(+), 4 deletions(-) diff --git a/language/en.json b/language/en.json index fd40ca2f8..8a782e3fd 100644 --- a/language/en.json +++ b/language/en.json @@ -342,6 +342,10 @@ "Forced Overwrite of Denoising Strength of \"Vary\"": "Forced Overwrite of Denoising Strength of \"Vary\"", "Set as negative number to disable. For developer debugging.": "Set as negative number to disable. For developer debugging.", "Forced Overwrite of Denoising Strength of \"Upscale\"": "Forced Overwrite of Denoising Strength of \"Upscale\"", + "Disable Preview": "Disable Preview", + "Disable preview during generation.": "Disable preview during generation.", + "Disable Intermediate Results": "Disable Intermediate Results", + "Disable intermediate results during generation, only show final gallery.": "Disable intermediate results during generation, only show final gallery.", "Inpaint Engine": "Inpaint Engine", "v1": "v1", "Version of Fooocus inpaint model": "Version of Fooocus inpaint model", diff --git a/modules/async_worker.py b/modules/async_worker.py index d0ce4ba91..a304e6974 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -147,6 +147,7 @@ def handler(async_task): inpaint_additional_prompt = args.pop() inpaint_mask_image_upload = args.pop() disable_preview = args.pop() + disable_intermediate_results = args.pop() adm_scaler_positive = args.pop() adm_scaler_negative = args.pop() adm_scaler_end = args.pop() @@ -842,7 +843,7 @@ def callback(step, x0, x, total_steps, y): d.append(('Version', 'v' + fooocus_version.version)) log(x, d) - yield_result(async_task, imgs, do_not_show_finished_images=len(tasks) == 1) + yield_result(async_task, imgs, do_not_show_finished_images=len(tasks) == 1 or disable_intermediate_results) except ldm_patched.modules.model_management.InterruptProcessingException as e: if async_task.last_stop == 'skip': print('User skipped') diff --git a/webui.py b/webui.py index 05b7d20ef..0d8c3c04c 100644 --- a/webui.py +++ b/webui.py @@ -390,6 +390,10 @@ def update_history_link(): info='Set as negative number to disable. For developer debugging.') disable_preview = gr.Checkbox(label='Disable Preview', value=False, info='Disable preview during generation.') + disable_intermediate_results = gr.Checkbox(label='Disable Intermediate Results', + value=modules.config.default_performance == 'Extreme Speed', + interactive=modules.config.default_performance != 'Extreme Speed', + info='Disable intermediate results during generation, only show final gallery.') with gr.Tab(label='Control'): debugging_cn_preprocessor = gr.Checkbox(label='Debug Preprocessors', value=False, @@ -474,12 +478,13 @@ def model_refresh_clicked(): queue=False, show_progress=False) performance_selection.change(lambda x: [gr.update(interactive=x != 'Extreme Speed')] * 11 + - [gr.update(visible=x != 'Extreme Speed')] * 1, + [gr.update(visible=x != 'Extreme Speed')] * 1 + + [gr.update(interactive=x != 'Extreme Speed', value=x == 'Extreme Speed', )] * 1, inputs=performance_selection, outputs=[ guidance_scale, sharpness, adm_scaler_end, adm_scaler_positive, adm_scaler_negative, refiner_switch, refiner_model, sampler_name, - scheduler_name, adaptive_cfg, refiner_swap_method, negative_prompt + scheduler_name, adaptive_cfg, refiner_swap_method, negative_prompt, disable_intermediate_results ], queue=False, show_progress=False) advanced_checkbox.change(lambda x: gr.update(visible=x), advanced_checkbox, advanced_column, @@ -529,7 +534,8 @@ def inpaint_mode_change(mode): ctrls += [input_image_checkbox, current_tab] ctrls += [uov_method, uov_input_image] ctrls += [outpaint_selections, inpaint_input_image, inpaint_additional_prompt, inpaint_mask_image] - ctrls += [disable_preview, adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg] + ctrls += [disable_preview, disable_intermediate_results] + ctrls += [adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg] ctrls += [sampler_name, scheduler_name] ctrls += [overwrite_step, overwrite_switch, overwrite_width, overwrite_height, overwrite_vary_strength] ctrls += [overwrite_upscale_strength, mixing_image_prompt_and_vary_upscale, mixing_image_prompt_and_inpaint] From ef1999c52c8b0ae7fb26ee4563dfca9cc3b5c5c6 Mon Sep 17 00:00:00 2001 From: dooglewoogle <46539436+dooglewoogle@users.noreply.github.com> Date: Mon, 26 Feb 2024 00:47:14 +1300 Subject: [PATCH 07/25] feat: add ability to load checkpoints and loras from multiple locations (#1256) * Add ability to load checkpoints and loras from multiple locations * Found another location a default path is required * feat: use array as default --------- Co-authored-by: Manuel Schmid --- launch.py | 9 ++++----- modules/config.py | 35 +++++++++++++++++++++++++---------- modules/core.py | 3 ++- modules/default_pipeline.py | 5 +++-- modules/util.py | 9 +++++++++ 5 files changed, 43 insertions(+), 18 deletions(-) diff --git a/launch.py b/launch.py index db174f54c..4269f1fcb 100644 --- a/launch.py +++ b/launch.py @@ -68,7 +68,6 @@ def prepare_environment(): 'https://huggingface.co/lllyasviel/misc/resolve/main/xl-to-v1_interposer-v3.1.safetensors') ] - def ini_args(): from args_manager import args return args @@ -101,9 +100,9 @@ def download_models(): return if not args.always_download_new_model: - if not os.path.exists(os.path.join(config.path_checkpoints, config.default_base_model_name)): + if not os.path.exists(os.path.join(config.paths_checkpoints[0], config.default_base_model_name)): for alternative_model_name in config.previous_default_models: - if os.path.exists(os.path.join(config.path_checkpoints, alternative_model_name)): + if os.path.exists(os.path.join(config.paths_checkpoints[0], alternative_model_name)): print(f'You do not have [{config.default_base_model_name}] but you have [{alternative_model_name}].') print(f'Fooocus will use [{alternative_model_name}] to avoid downloading new models, ' f'but you are not using latest models.') @@ -113,11 +112,11 @@ def download_models(): break for file_name, url in config.checkpoint_downloads.items(): - load_file_from_url(url=url, model_dir=config.path_checkpoints, file_name=file_name) + load_file_from_url(url=url, model_dir=config.paths_checkpoints[0], file_name=file_name) for file_name, url in config.embeddings_downloads.items(): load_file_from_url(url=url, model_dir=config.path_embeddings, file_name=file_name) for file_name, url in config.lora_downloads.items(): - load_file_from_url(url=url, model_dir=config.path_loras, file_name=file_name) + load_file_from_url(url=url, model_dir=config.paths_loras[0], file_name=file_name) return diff --git a/modules/config.py b/modules/config.py index 1f4e82eb5..d3be1f212 100644 --- a/modules/config.py +++ b/modules/config.py @@ -114,7 +114,7 @@ def get_path_output() -> str: return path_output -def get_dir_or_set_default(key, default_value): +def get_dir_or_set_default(key, default_value, as_array=False): global config_dict, visited_keys, always_save_keys if key not in visited_keys: @@ -125,18 +125,29 @@ def get_dir_or_set_default(key, default_value): v = config_dict.get(key, None) if isinstance(v, str) and os.path.exists(v) and os.path.isdir(v): + return v if not as_array else [v] + elif isinstance(v, list) and all([os.path.exists(d) and os.path.isdir(d) for d in v]): return v else: if v is not None: print(f'Failed to load config key: {json.dumps({key:v})} is invalid or does not exist; will use {json.dumps({key:default_value})} instead.') - dp = os.path.abspath(os.path.join(os.path.dirname(__file__), default_value)) - os.makedirs(dp, exist_ok=True) + if isinstance(default_value, list): + dp = [] + for path in default_value: + abs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), path)) + dp.append(abs_path) + os.makedirs(abs_path, exist_ok=True) + else: + dp = os.path.abspath(os.path.join(os.path.dirname(__file__), default_value)) + os.makedirs(dp, exist_ok=True) + if as_array: + dp = [dp] config_dict[key] = dp return dp -path_checkpoints = get_dir_or_set_default('path_checkpoints', '../models/checkpoints/') -path_loras = get_dir_or_set_default('path_loras', '../models/loras/') +paths_checkpoints = get_dir_or_set_default('path_checkpoints', ['../models/checkpoints/'], True) +paths_loras = get_dir_or_set_default('path_loras', ['../models/loras/'], True) path_embeddings = get_dir_or_set_default('path_embeddings', '../models/embeddings/') path_vae_approx = get_dir_or_set_default('path_vae_approx', '../models/vae_approx/') path_upscale_models = get_dir_or_set_default('path_upscale_models', '../models/upscale_models/') @@ -404,14 +415,18 @@ def add_ratio(x): lora_filenames = [] -def get_model_filenames(folder_path, name_filter=None): - return get_files_from_folder(folder_path, ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch'], name_filter) +def get_model_filenames(folder_paths, name_filter=None): + extensions = ['.pth', '.ckpt', '.bin', '.safetensors', '.fooocus.patch'] + files = [] + for folder in folder_paths: + files += get_files_from_folder(folder, extensions, name_filter) + return files def update_all_model_names(): global model_filenames, lora_filenames - model_filenames = get_model_filenames(path_checkpoints) - lora_filenames = get_model_filenames(path_loras) + model_filenames = get_model_filenames(paths_checkpoints) + lora_filenames = get_model_filenames(paths_loras) return @@ -456,7 +471,7 @@ def downloading_inpaint_models(v): def downloading_sdxl_lcm_lora(): load_file_from_url( url='https://huggingface.co/lllyasviel/misc/resolve/main/sdxl_lcm_lora.safetensors', - model_dir=path_loras, + model_dir=paths_loras[0], file_name='sdxl_lcm_lora.safetensors' ) return 'sdxl_lcm_lora.safetensors' diff --git a/modules/core.py b/modules/core.py index 7a29d9883..bfc449661 100644 --- a/modules/core.py +++ b/modules/core.py @@ -18,6 +18,7 @@ from ldm_patched.contrib.external_freelunch import FreeU_V2 from ldm_patched.modules.sample import prepare_mask from modules.lora import match_lora +from modules.util import get_file_from_folder_list from ldm_patched.modules.lora import model_lora_keys_unet, model_lora_keys_clip from modules.config import path_embeddings from ldm_patched.contrib.external_model_advanced import ModelSamplingDiscrete @@ -79,7 +80,7 @@ def refresh_loras(self, loras): if os.path.exists(name): lora_filename = name else: - lora_filename = os.path.join(modules.config.path_loras, name) + lora_filename = get_file_from_folder_list(name, modules.config.paths_loras) if not os.path.exists(lora_filename): print(f'Lora file not found: {lora_filename}') diff --git a/modules/default_pipeline.py b/modules/default_pipeline.py index 2f45667cf..f8edfae10 100644 --- a/modules/default_pipeline.py +++ b/modules/default_pipeline.py @@ -11,6 +11,7 @@ from ldm_patched.modules.model_base import SDXL, SDXLRefiner from modules.sample_hijack import clip_separate +from modules.util import get_file_from_folder_list model_base = core.StableDiffusionModel() @@ -60,7 +61,7 @@ def assert_model_integrity(): def refresh_base_model(name): global model_base - filename = os.path.abspath(os.path.realpath(os.path.join(modules.config.path_checkpoints, name))) + filename = get_file_from_folder_list(name, modules.config.paths_checkpoints) if model_base.filename == filename: return @@ -76,7 +77,7 @@ def refresh_base_model(name): def refresh_refiner_model(name): global model_refiner - filename = os.path.abspath(os.path.realpath(os.path.join(modules.config.path_checkpoints, name))) + filename = get_file_from_folder_list(name, modules.config.paths_checkpoints) if model_refiner.filename == filename: return diff --git a/modules/util.py b/modules/util.py index 9d4d09961..3c23a992c 100644 --- a/modules/util.py +++ b/modules/util.py @@ -177,5 +177,14 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None): return filenames +def get_file_from_folder_list(name, folders): + for folder in folders: + filename = os.path.abspath(os.path.realpath(os.path.join(folder, name))) + if os.path.isfile(filename): + return filename + + return os.path.abspath(os.path.realpath(os.path.join(folders[0], name))) + + def ordinal_suffix(number: int) -> str: return 'th' if 10 <= number % 100 <= 20 else {1: 'st', 2: 'nd', 3: 'rd'}.get(number % 10, 'th') From 4d34f31a7207e7f2f4e2040be6a62fccd892a57d Mon Sep 17 00:00:00 2001 From: Maxim Saplin Date: Sun, 25 Feb 2024 19:14:17 +0300 Subject: [PATCH 08/25] feat: allow users to specify the number of threads when running on CPU (#1601) * CPU_NUM_THREADS * refactor: optimize code, type is already strict --------- Co-authored-by: Manuel Schmid --- ldm_patched/modules/args_parser.py | 2 +- ldm_patched/modules/model_management.py | 3 +++ readme.md | 2 +- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/ldm_patched/modules/args_parser.py b/ldm_patched/modules/args_parser.py index e5b84dc10..272deb83a 100644 --- a/ldm_patched/modules/args_parser.py +++ b/ldm_patched/modules/args_parser.py @@ -100,7 +100,7 @@ class LatentPreviewMethod(enum.Enum): vram_group.add_argument("--always-normal-vram", action="store_true") vram_group.add_argument("--always-low-vram", action="store_true") vram_group.add_argument("--always-no-vram", action="store_true") -vram_group.add_argument("--always-cpu", action="store_true") +vram_group.add_argument("--always-cpu", type=int, nargs="?", metavar="CPU_NUM_THREADS", const=-1) parser.add_argument("--always-offload-from-vram", action="store_true") diff --git a/ldm_patched/modules/model_management.py b/ldm_patched/modules/model_management.py index 6f88579df..840d79a07 100644 --- a/ldm_patched/modules/model_management.py +++ b/ldm_patched/modules/model_management.py @@ -60,6 +60,9 @@ class CPUState(Enum): pass if args.always_cpu: + if args.always_cpu > 0: + torch.set_num_threads(args.always_cpu) + print(f"Running on {torch.get_num_threads()} CPU threads") cpu_state = CPUState.CPU def is_intel_xpu(): diff --git a/readme.md b/readme.md index fa7e829cc..18b48f3ac 100644 --- a/readme.md +++ b/readme.md @@ -370,7 +370,7 @@ entry_with_update.py [-h] [--listen [IP]] [--port PORT] [--attention-split | --attention-quad | --attention-pytorch] [--disable-xformers] [--always-gpu | --always-high-vram | --always-normal-vram | - --always-low-vram | --always-no-vram | --always-cpu] + --always-low-vram | --always-no-vram | --always-cpu [CPU_NUM_THREADS]] [--always-offload-from-vram] [--disable-server-log] [--debug-mode] [--is-windows-embedded-python] [--disable-server-info] [--share] [--preset PRESET] From 9c19300a3e3ed184b0d27fbd8fc6bc52eb2d38cb Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 25 Feb 2024 18:04:46 +0100 Subject: [PATCH 09/25] feat: improve bug report and feature request issue templates (#1631) * refactor and improve bug report and feature request issue templates * update operating system placeholder to Windows 10 most common usage i assume * use already existing label "enhancement" instead of "feature" * feat: add checkbox for latest version check, add triage to feature requests * feat: add link to ask a question * feat: use templates of stable-diffusion-webui-forge as basis * feat: add optional hosting and operating system inputs --- .github/ISSUE_TEMPLATE/bug_report.md | 18 ---- .github/ISSUE_TEMPLATE/bug_report.yml | 106 +++++++++++++++++++++ .github/ISSUE_TEMPLATE/config.yml | 5 + .github/ISSUE_TEMPLATE/feature_request.md | 14 --- .github/ISSUE_TEMPLATE/feature_request.yml | 40 ++++++++ 5 files changed, 151 insertions(+), 32 deletions(-) delete mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/bug_report.yml create mode 100644 .github/ISSUE_TEMPLATE/config.yml delete mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.yml diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md deleted file mode 100644 index 624cfe3e0..000000000 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ /dev/null @@ -1,18 +0,0 @@ ---- -name: Bug report -about: Describe a problem -title: '' -labels: '' -assignees: '' - ---- - -**Read Troubleshoot** - -[x] I confirm that I have read the [Troubleshoot](https://github.com/lllyasviel/Fooocus/blob/main/troubleshoot.md) guide before making this issue. - -**Describe the problem** -A clear and concise description of what the bug is. - -**Full Console Log** -Paste the **full** console log here. You will make our job easier if you give a **full** log. diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml new file mode 100644 index 000000000..483e0de14 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -0,0 +1,106 @@ +name: Bug Report +description: You think something is broken in Fooocus +title: "[Bug]: " +labels: ["bug", "triage"] + +body: + - type: markdown + attributes: + value: | + > The title of the bug report should be short and descriptive. + > Use relevant keywords for searchability. + > Do not leave it blank, but also do not put an entire error log in it. + - type: checkboxes + attributes: + label: Checklist + description: | + Please perform basic debugging to see if your configuration is the cause of the issue. + Basic debug procedure +  2. Update Fooocus - sometimes things just need to be updated +  3. Backup and remove your config.txt - check if the issue is caused by bad configuration +  5. Try a fresh installation of Fooocus in a different directory - see if a clean installation solves the issue + Before making a issue report please, check that the issue hasn't been reported recently. + options: + - label: The issue exists on a clean installation of Fooocus + - label: The issue exists in the current version of Fooocus + - label: The issue has not been reported before recently + - label: The issue has been reported before but has not been fixed yet + - type: markdown + attributes: + value: | + > Please fill this form with as much information as possible. Don't forget to add information about "What browsers" and provide screenshots if possible + - type: textarea + id: what-did + attributes: + label: What happened? + description: Tell us what happened in a very clear and simple way + placeholder: | + image generation is not working as intended. + validations: + required: true + - type: textarea + id: steps + attributes: + label: Steps to reproduce the problem + description: Please provide us with precise step by step instructions on how to reproduce the bug + placeholder: | + 1. Go to ... + 2. Press ... + 3. ... + validations: + required: true + - type: textarea + id: what-should + attributes: + label: What should have happened? + description: Tell us what you think the normal behavior should be + placeholder: | + Fooocus should ... + validations: + required: true + - type: dropdown + id: browsers + attributes: + label: What browsers do you use to access Fooocus? + multiple: true + options: + - Mozilla Firefox + - Google Chrome + - Brave + - Apple Safari + - Microsoft Edge + - Android + - iOS + - Other + - type: dropdown + id: hosting + attributes: + label: Where are you running Fooocus? + multiple: false + options: + - Locally + - Locally with virtualization (e.g. Docker) + - Cloud (Google Colab) + - Cloud (other) + - type: input + id: operating-system + attributes: + label: What operating system are you using? + placeholder: | + Windows 10 + - type: textarea + id: logs + attributes: + label: Console logs + description: Please provide **full** cmd/terminal logs from the moment you started UI to the end of it, after the bug occured. If it's very long, provide a link to pastebin or similar service. + render: Shell + validations: + required: true + - type: textarea + id: misc + attributes: + label: Additional information + description: | + Please provide us with any relevant additional info or context. + Examples: +  I have updated my GPU driver recently. \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 000000000..7bbf022a3 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: Ask a question + url: https://github.com/lllyasviel/Fooocus/discussions/new?category=q-a + about: Ask the community for help \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md deleted file mode 100644 index 8101bc369..000000000 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -name: Feature request -about: Suggest an idea for this project -title: '' -labels: '' -assignees: '' - ---- - -**Is your feature request related to a problem? Please describe.** -A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] - -**Describe the idea you'd like** -A clear and concise description of what you want to happen. diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml new file mode 100644 index 000000000..90e594e4b --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.yml @@ -0,0 +1,40 @@ +name: Feature request +description: Suggest an idea for this project +title: "[Feature Request]: " +labels: ["enhancement", "triage"] + +body: + - type: checkboxes + attributes: + label: Is there an existing issue for this? + description: Please search to see if an issue already exists for the feature you want, and that it's not implemented in a recent build/commit. + options: + - label: I have searched the existing issues and checked the recent builds/commits + required: true + - type: markdown + attributes: + value: | + *Please fill this form with as much information as possible, provide screenshots and/or illustrations of the feature if possible* + - type: textarea + id: feature + attributes: + label: What would your feature do? + description: Tell us about your feature in a very clear and simple way, and what problem it would solve + validations: + required: true + - type: textarea + id: workflow + attributes: + label: Proposed workflow + description: Please provide us with step by step information on how you'd like the feature to be accessed and used + value: | + 1. Go to .... + 2. Press .... + 3. ... + validations: + required: true + - type: textarea + id: misc + attributes: + label: Additional information + description: Add any other context or screenshots about the feature request here. \ No newline at end of file From b5f019fb6237ef0074f3e930f6176e4635456ff3 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 25 Feb 2024 18:41:43 +0100 Subject: [PATCH 10/25] fix: correctly create directory for path_outputs if not existing (#1668) * correctly create directory for outputs if not existing * feat: add make_directory parameter checks for list, extract make_directory to util --- modules/config.py | 55 ++++++++++++++++++++++++++--------------------- modules/util.py | 7 ++++++ 2 files changed, 37 insertions(+), 25 deletions(-) diff --git a/modules/config.py b/modules/config.py index d3be1f212..6f7139166 100644 --- a/modules/config.py +++ b/modules/config.py @@ -7,7 +7,7 @@ import modules.sdxl_styles from modules.model_loader import load_file_from_url -from modules.util import get_files_from_folder +from modules.util import get_files_from_folder, makedirs_with_log config_path = os.path.abspath("./config.txt") @@ -107,14 +107,14 @@ def get_path_output() -> str: Checking output path argument and overriding default path. """ global config_dict - path_output = get_dir_or_set_default('path_outputs', '../outputs/') + path_output = get_dir_or_set_default('path_outputs', '../outputs/', make_directory=True) if args_manager.args.output_path: print(f'[CONFIG] Overriding config value path_outputs with {args_manager.args.output_path}') config_dict['path_outputs'] = path_output = args_manager.args.output_path return path_output -def get_dir_or_set_default(key, default_value, as_array=False): +def get_dir_or_set_default(key, default_value, as_array=False, make_directory=False): global config_dict, visited_keys, always_save_keys if key not in visited_keys: @@ -124,26 +124,34 @@ def get_dir_or_set_default(key, default_value, as_array=False): always_save_keys.append(key) v = config_dict.get(key, None) - if isinstance(v, str) and os.path.exists(v) and os.path.isdir(v): - return v if not as_array else [v] - elif isinstance(v, list) and all([os.path.exists(d) and os.path.isdir(d) for d in v]): - return v + + if isinstance(v, str): + if make_directory: + makedirs_with_log(v) + if os.path.exists(v) and os.path.isdir(v): + return v if not as_array else [v] + elif isinstance(v, list): + if make_directory: + for d in v: + makedirs_with_log(d) + if all([os.path.exists(d) and os.path.isdir(d) for d in v]): + return v + + if v is not None: + print(f'Failed to load config key: {json.dumps({key:v})} is invalid or does not exist; will use {json.dumps({key:default_value})} instead.') + if isinstance(default_value, list): + dp = [] + for path in default_value: + abs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), path)) + dp.append(abs_path) + os.makedirs(abs_path, exist_ok=True) else: - if v is not None: - print(f'Failed to load config key: {json.dumps({key:v})} is invalid or does not exist; will use {json.dumps({key:default_value})} instead.') - if isinstance(default_value, list): - dp = [] - for path in default_value: - abs_path = os.path.abspath(os.path.join(os.path.dirname(__file__), path)) - dp.append(abs_path) - os.makedirs(abs_path, exist_ok=True) - else: - dp = os.path.abspath(os.path.join(os.path.dirname(__file__), default_value)) - os.makedirs(dp, exist_ok=True) - if as_array: - dp = [dp] - config_dict[key] = dp - return dp + dp = os.path.abspath(os.path.join(os.path.dirname(__file__), default_value)) + os.makedirs(dp, exist_ok=True) + if as_array: + dp = [dp] + config_dict[key] = dp + return dp paths_checkpoints = get_dir_or_set_default('path_checkpoints', ['../models/checkpoints/'], True) @@ -408,9 +416,6 @@ def add_ratio(x): 'and there is no "," before the last "}". \n\n\n') json.dump({k: config_dict[k] for k in visited_keys}, json_file, indent=4) - -os.makedirs(path_outputs, exist_ok=True) - model_filenames = [] lora_filenames = [] diff --git a/modules/util.py b/modules/util.py index 3c23a992c..1b1651159 100644 --- a/modules/util.py +++ b/modules/util.py @@ -188,3 +188,10 @@ def get_file_from_folder_list(name, folders): def ordinal_suffix(number: int) -> str: return 'th' if 10 <= number % 100 <= 20 else {1: 'st', 2: 'nd', 3: 'rd'}.get(number % 10, 'th') + + +def makedirs_with_log(path): + try: + os.makedirs(path, exist_ok=True) + except OSError as error: + print(f'Directory {path} could not be created, reason: {error}') From eebd7752ab7aaa42e4928d1472115bb896468286 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 25 Feb 2024 18:44:28 +0100 Subject: [PATCH 11/25] fix: allow path_outputs to be outside of root dir (#2332) allows Gradio to serve outputs when folder has been changed in the config --- webui.py | 1 + 1 file changed, 1 insertion(+) diff --git a/webui.py b/webui.py index 0d8c3c04c..6d72c67ca 100644 --- a/webui.py +++ b/webui.py @@ -635,5 +635,6 @@ def dump_default_english_config(): server_port=args_manager.args.port, share=args_manager.args.share, auth=check_auth if (args_manager.args.share or args_manager.args.listen) and auth_enabled else None, + allowed_paths=[modules.config.path_outputs], blocked_paths=[constants.AUTH_FILENAME] ) From 468d704b299e0bf10ace1662506289ccd85be018 Mon Sep 17 00:00:00 2001 From: MindOfMatter <35126123+MindOfMatter@users.noreply.github.com> Date: Sun, 25 Feb 2024 13:59:28 -0500 Subject: [PATCH 12/25] feat: add button to enable LoRAs (#2210) * Initial commit * Update README.md * sync with original main Fooocus repo * update with my gitignore setup * add max lora config feature * Revert "add max lora config feature" This reverts commit cfe7463fe25475b6d59f36072ade410a2d8d5124. * add lora enabler feature * Update README.md * Update .gitignore * update * merge * revert changes * revert * feat: change width of LoRA columns * refactor: rename lora_enable to lora_enabled, optimize code --------- Co-authored-by: Manuel Schmid --- modules/async_worker.py | 10 +++++++++- modules/html.py | 24 ++++++++++++++++++++++++ modules/meta_parser.py | 6 ++++-- webui.py | 9 ++++++--- 4 files changed, 43 insertions(+), 6 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index a304e6974..34cd2e5aa 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -115,6 +115,14 @@ def build_image_wall(async_task): # must use deep copy otherwise gradio is super laggy. Do not use list.append() . async_task.results = async_task.results + [wall] return + + def apply_enabled_loras(loras): + enabled_loras = [] + for lora_enabled, lora_model, lora_weight in loras: + if lora_enabled: + enabled_loras.append([lora_model, lora_weight]) + + return enabled_loras @torch.no_grad() @torch.inference_mode() @@ -137,7 +145,7 @@ def handler(async_task): base_model_name = args.pop() refiner_model_name = args.pop() refiner_switch = args.pop() - loras = [[str(args.pop()), float(args.pop())] for _ in range(5)] + loras = apply_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop()), ] for _ in range(5)]) input_image_checkbox = args.pop() current_tab = args.pop() uov_method = args.pop() diff --git a/modules/html.py b/modules/html.py index 3ec6f2d68..47a1483a5 100644 --- a/modules/html.py +++ b/modules/html.py @@ -112,6 +112,30 @@ margin-left: -5px !important; } +.lora_enable { + flex-grow: 1 !important; +} + +.lora_enable label { + height: 100%; +} + +.lora_enable label input { + margin: auto; +} + +.lora_enable label span { + display: none; +} + +.lora_model { + flex-grow: 5 !important; +} + +.lora_weight { + flex-grow: 5 !important; +} + ''' progress_html = '''
diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 07b42a160..bd8f555e1 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -139,10 +139,12 @@ def load_parameter_button_click(raw_prompt_txt, is_generating): try: n, w = loaded_parameter_dict.get(f'LoRA {i}').split(' : ') w = float(w) + results.append(True) results.append(n) results.append(w) except: - results.append(gr.update()) - results.append(gr.update()) + results.append(True) + results.append("None") + results.append(1.0) return results diff --git a/webui.py b/webui.py index 6d72c67ca..1463ff90e 100644 --- a/webui.py +++ b/webui.py @@ -322,11 +322,14 @@ def update_history_link(): for i, (n, v) in enumerate(modules.config.default_loras): with gr.Row(): + lora_enabled = gr.Checkbox(label='Enable', value=True, + elem_classes=['lora_enable', 'min_check']) lora_model = gr.Dropdown(label=f'LoRA {i + 1}', - choices=['None'] + modules.config.lora_filenames, value=n) + choices=['None'] + modules.config.lora_filenames, value=n, + elem_classes='lora_model') lora_weight = gr.Slider(label='Weight', minimum=-2, maximum=2, step=0.01, value=v, elem_classes='lora_weight') - lora_ctrls += [lora_model, lora_weight] + lora_ctrls += [lora_enabled, lora_model, lora_weight] with gr.Row(): model_refresh = gr.Button(label='Refresh', value='\U0001f504 Refresh All Files', variant='secondary', elem_classes='refresh_button') @@ -471,7 +474,7 @@ def model_refresh_clicked(): results = [] results += [gr.update(choices=modules.config.model_filenames), gr.update(choices=['None'] + modules.config.model_filenames)] for i in range(5): - results += [gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] + results += [gr.update(choices=['None'] + modules.config.lora_filenames), gr.update(), gr.update(interactive=True)] return results model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls, From 18f9f7dc313ee279fd3241784aafad9e948b402b Mon Sep 17 00:00:00 2001 From: MindOfMatter <35126123+MindOfMatter@users.noreply.github.com> Date: Sun, 25 Feb 2024 15:12:26 -0500 Subject: [PATCH 13/25] feat: make lora number editable in config (#2215) * Initial commit * Update README.md * sync with original main Fooocus repo * update with my gitignore setup * add max lora config feature * Revert "add max lora config feature" This reverts commit cfe7463fe25475b6d59f36072ade410a2d8d5124. * add max loras config feature * Update README.md * Update .gitignore * update * merge * revert * refactor: rename default_loras_max_number to default_max_lora_number, validate config for int * fix: add missing patch_all call and imports again --------- Co-authored-by: Manuel Schmid --- modules/async_worker.py | 7 +++---- modules/config.py | 8 +++++++- modules/meta_parser.py | 6 +++--- webui.py | 8 ++++---- 4 files changed, 17 insertions(+), 12 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 34cd2e5aa..47848ad66 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -4,7 +4,6 @@ patch_all() - class AsyncTask: def __init__(self, args): self.args = args @@ -115,13 +114,13 @@ def build_image_wall(async_task): # must use deep copy otherwise gradio is super laggy. Do not use list.append() . async_task.results = async_task.results + [wall] return - + def apply_enabled_loras(loras): enabled_loras = [] for lora_enabled, lora_model, lora_weight in loras: if lora_enabled: enabled_loras.append([lora_model, lora_weight]) - + return enabled_loras @torch.no_grad() @@ -145,7 +144,7 @@ def handler(async_task): base_model_name = args.pop() refiner_model_name = args.pop() refiner_switch = args.pop() - loras = apply_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop()), ] for _ in range(5)]) + loras = apply_enabled_loras([[bool(args.pop()), str(args.pop()), float(args.pop()), ] for _ in range(modules.config.default_max_lora_number)]) input_image_checkbox = args.pop() current_tab = args.pop() uov_method = args.pop() diff --git a/modules/config.py b/modules/config.py index 6f7139166..bb1ee26ca 100644 --- a/modules/config.py +++ b/modules/config.py @@ -235,6 +235,11 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_ ], validator=lambda x: isinstance(x, list) and all(len(y) == 2 and isinstance(y[0], str) and isinstance(y[1], numbers.Number) for y in x) ) +default_max_lora_number = get_config_item_or_set_default( + key='default_max_lora_number', + default_value=len(default_loras), + validator=lambda x: isinstance(x, int) and x >= 1 +) default_cfg_scale = get_config_item_or_set_default( key='default_cfg_scale', default_value=7.0, @@ -357,13 +362,14 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_ example_inpaint_prompts = [[x] for x in example_inpaint_prompts] -config_dict["default_loras"] = default_loras = default_loras[:5] + [['None', 1.0] for _ in range(5 - len(default_loras))] +config_dict["default_loras"] = default_loras = default_loras[:default_max_lora_number] + [['None', 1.0] for _ in range(default_max_lora_number - len(default_loras))] possible_preset_keys = [ "default_model", "default_refiner", "default_refiner_switch", "default_loras", + "default_max_lora_number", "default_cfg_scale", "default_sample_sharpness", "default_sampler", diff --git a/modules/meta_parser.py b/modules/meta_parser.py index bd8f555e1..061e1f8af 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -135,16 +135,16 @@ def load_parameter_button_click(raw_prompt_txt, is_generating): results.append(gr.update(visible=False)) - for i in range(1, 6): + for i in range(1, modules.config.default_max_lora_number + 1): try: - n, w = loaded_parameter_dict.get(f'LoRA {i}').split(' : ') + n, w = loaded_parameter_dict.get(f'LoRA {i}', ' : ').split(' : ') w = float(w) results.append(True) results.append(n) results.append(w) except: results.append(True) - results.append("None") + results.append('None') results.append(1.0) return results diff --git a/webui.py b/webui.py index 1463ff90e..270f0ffa3 100644 --- a/webui.py +++ b/webui.py @@ -471,10 +471,10 @@ def dev_mode_checked(r): def model_refresh_clicked(): modules.config.update_all_model_names() - results = [] - results += [gr.update(choices=modules.config.model_filenames), gr.update(choices=['None'] + modules.config.model_filenames)] - for i in range(5): - results += [gr.update(choices=['None'] + modules.config.lora_filenames), gr.update(), gr.update(interactive=True)] + results = [gr.update(choices=modules.config.model_filenames)] + results += [gr.update(choices=['None'] + modules.config.model_filenames)] + for i in range(modules.config.default_max_lora_number): + results += [gr.update(interactive=True), gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] return results model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls, From 3be76ef8a3d503273f3800fa35b8148888bb9d4d Mon Sep 17 00:00:00 2001 From: MindOfMatter <35126123+MindOfMatter@users.noreply.github.com> Date: Sun, 25 Feb 2024 15:36:25 -0500 Subject: [PATCH 14/25] feat: make lora min max weight editable in config (#2216) * Initial commit * Update README.md * sync with original main Fooocus repo * update with my gitignore setup * add min max weight configs feature * add max lora config feature * Revert "add max lora config feature" This reverts commit cfe7463fe25475b6d59f36072ade410a2d8d5124. * Update README.md * Update .gitignore * update * merge * revert --------- Co-authored-by: Manuel Schmid --- modules/config.py | 12 ++++++++++++ webui.py | 3 ++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/modules/config.py b/modules/config.py index bb1ee26ca..acf19b607 100644 --- a/modules/config.py +++ b/modules/config.py @@ -209,6 +209,16 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_ default_value=0.8, validator=lambda x: isinstance(x, numbers.Number) and 0 <= x <= 1 ) +default_loras_min_weight = get_config_item_or_set_default( + key='default_loras_min_weight', + default_value=-2, + validator=lambda x: isinstance(x, numbers.Number) and -10 <= x <= 10 +) +default_loras_max_weight = get_config_item_or_set_default( + key='default_loras_max_weight', + default_value=2, + validator=lambda x: isinstance(x, numbers.Number) and -10 <= x <= 10 +) default_loras = get_config_item_or_set_default( key='default_loras', default_value=[ @@ -368,6 +378,8 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_ "default_model", "default_refiner", "default_refiner_switch", + "default_loras_min_weight", + "default_loras_max_weight", "default_loras", "default_max_lora_number", "default_cfg_scale", diff --git a/webui.py b/webui.py index 270f0ffa3..a3cb45e4d 100644 --- a/webui.py +++ b/webui.py @@ -327,7 +327,8 @@ def update_history_link(): lora_model = gr.Dropdown(label=f'LoRA {i + 1}', choices=['None'] + modules.config.lora_filenames, value=n, elem_classes='lora_model') - lora_weight = gr.Slider(label='Weight', minimum=-2, maximum=2, step=0.01, value=v, + lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight, + maximum=modules.config.default_loras_max_weight, step=0.01, value=v, elem_classes='lora_weight') lora_ctrls += [lora_enabled, lora_model, lora_weight] From c898e6a4dca5199653e0f05e0420c6d158bdd4f2 Mon Sep 17 00:00:00 2001 From: Brian Flannery Date: Sun, 25 Feb 2024 15:22:49 -0600 Subject: [PATCH 15/25] feat: add array support on main prompt (#1503) * prompt array support * update change log * update change log * docs: remove 2.1.847 change log * refactor: rename freeze_seed to disable_seed_increment, move to developer debug mode * feat: add translation for new labels * fix: use task_rng based on task_seed, not initial seed --------- Co-authored-by: Manuel Schmid --- language/en.json | 2 ++ modules/async_worker.py | 11 ++++++++--- modules/sdxl_styles.py | 36 ++++++++++++++++++++++++++++++++++++ webui.py | 5 ++++- 4 files changed, 50 insertions(+), 4 deletions(-) diff --git a/language/en.json b/language/en.json index 8a782e3fd..a3e47c1a1 100644 --- a/language/en.json +++ b/language/en.json @@ -48,6 +48,8 @@ "Describing what you do not want to see.": "Describing what you do not want to see.", "Random": "Random", "Seed": "Seed", + "Disable seed increment": "Disable seed increment", + "Disable automatic seed increment when image number is > 1.": "Disable automatic seed increment when image number is > 1.", "\ud83d\udcda History Log": "\uD83D\uDCDA History Log", "Image Style": "Image Style", "Fooocus V2": "Fooocus V2", diff --git a/modules/async_worker.py b/modules/async_worker.py index 47848ad66..4fca0966e 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -40,7 +40,7 @@ def worker(): import extras.face_crop import fooocus_version - from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion + from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion, apply_arrays from modules.private_logger import log from extras.expansion import safe_str from modules.util import remove_empty_str, HWC3, resize_image, \ @@ -155,6 +155,7 @@ def handler(async_task): inpaint_mask_image_upload = args.pop() disable_preview = args.pop() disable_intermediate_results = args.pop() + disable_seed_increment = args.pop() adm_scaler_positive = args.pop() adm_scaler_negative = args.pop() adm_scaler_end = args.pop() @@ -424,10 +425,14 @@ def handler(async_task): progressbar(async_task, 3, 'Processing prompts ...') tasks = [] for i in range(image_number): - task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not - task_rng = random.Random(task_seed) # may bind to inpaint noise in the future + if disable_seed_increment: + task_seed = seed + else: + task_seed = (seed + i) % (constants.MAX_SEED + 1) # randint is inclusive, % is not + task_rng = random.Random(task_seed) # may bind to inpaint noise in the future task_prompt = apply_wildcards(prompt, task_rng) + task_prompt = apply_arrays(task_prompt, i) task_negative_prompt = apply_wildcards(negative_prompt, task_rng) task_extra_positive_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_positive_prompts] task_extra_negative_prompts = [apply_wildcards(pmt, task_rng) for pmt in extra_negative_prompts] diff --git a/modules/sdxl_styles.py b/modules/sdxl_styles.py index f5bb62765..71afc402f 100644 --- a/modules/sdxl_styles.py +++ b/modules/sdxl_styles.py @@ -1,6 +1,7 @@ import os import re import json +import math from modules.util import get_files_from_folder @@ -80,3 +81,38 @@ def apply_wildcards(wildcard_text, rng, directory=wildcards_path): print(f'[Wildcards] BFS stack overflow. Current text: {wildcard_text}') return wildcard_text + +def get_words(arrays, totalMult, index): + if(len(arrays) == 1): + return [arrays[0].split(',')[index]] + else: + words = arrays[0].split(',') + word = words[index % len(words)] + index -= index % len(words) + index /= len(words) + index = math.floor(index) + return [word] + get_words(arrays[1:], math.floor(totalMult/len(words)), index) + + + +def apply_arrays(text, index): + arrays = re.findall(r'\[\[([\s,\w-]+)\]\]', text) + if len(arrays) == 0: + return text + + print(f'[Arrays] processing: {text}') + mult = 1 + for arr in arrays: + words = arr.split(',') + mult *= len(words) + + index %= mult + chosen_words = get_words(arrays, mult, index) + + i = 0 + for arr in arrays: + text = text.replace(f'[[{arr}]]', chosen_words[i], 1) + i = i+1 + + return text + diff --git a/webui.py b/webui.py index a3cb45e4d..14ba2a1ff 100644 --- a/webui.py +++ b/webui.py @@ -398,6 +398,9 @@ def update_history_link(): value=modules.config.default_performance == 'Extreme Speed', interactive=modules.config.default_performance != 'Extreme Speed', info='Disable intermediate results during generation, only show final gallery.') + disable_seed_increment = gr.Checkbox(label='Disable seed increment', + info='Disable automatic seed increment when image number is > 1.', + value=False) with gr.Tab(label='Control'): debugging_cn_preprocessor = gr.Checkbox(label='Debug Preprocessors', value=False, @@ -538,7 +541,7 @@ def inpaint_mode_change(mode): ctrls += [input_image_checkbox, current_tab] ctrls += [uov_method, uov_input_image] ctrls += [outpaint_selections, inpaint_input_image, inpaint_additional_prompt, inpaint_mask_image] - ctrls += [disable_preview, disable_intermediate_results] + ctrls += [disable_preview, disable_intermediate_results, disable_seed_increment] ctrls += [adm_scaler_positive, adm_scaler_negative, adm_scaler_end, adaptive_cfg] ctrls += [sampler_name, scheduler_name] ctrls += [overwrite_step, overwrite_switch, overwrite_width, overwrite_height, overwrite_vary_strength] From d3113f5c3f6aa266cf9cae498690cece6f3784c3 Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Sun, 25 Feb 2024 22:56:38 +0100 Subject: [PATCH 16/25] feat: use consistent file name in gradio (#1932) * feat: use consistent file name in gradio returns and uses filepaths instead of numpy image by saving to temp dir uses double the temp dir file storage on disk as it saves to temp dir and gradio temp dir when displaying the image, but reuses logged output image * feat: delete temp images after yielding to gradio * feat: use args temp path if given * chore: code cleanup, remove redundant if statement --- args_manager.py | 6 ++++++ ldm_patched/modules/args_parser.py | 1 - modules/async_worker.py | 9 +++++---- modules/private_logger.py | 16 +++++++++------- webui.py | 5 +++++ 5 files changed, 25 insertions(+), 12 deletions(-) diff --git a/args_manager.py b/args_manager.py index eeb38e1f9..1675c31db 100644 --- a/args_manager.py +++ b/args_manager.py @@ -1,5 +1,7 @@ import ldm_patched.modules.args_parser as args_parser +import os +from tempfile import gettempdir args_parser.parser.add_argument("--share", action='store_true', help="Set whether to share on Gradio.") args_parser.parser.add_argument("--preset", type=str, default=None, help="Apply specified UI preset.") @@ -40,7 +42,11 @@ if args_parser.args.disable_analytics: import os os.environ["GRADIO_ANALYTICS_ENABLED"] = "False" + if args_parser.args.disable_in_browser: args_parser.args.in_browser = False +if args_parser.args.temp_path is None: + args_parser.args.temp_path = os.path.join(gettempdir(), 'Fooocus') + args = args_parser.args diff --git a/ldm_patched/modules/args_parser.py b/ldm_patched/modules/args_parser.py index 272deb83a..0c6165a7b 100644 --- a/ldm_patched/modules/args_parser.py +++ b/ldm_patched/modules/args_parser.py @@ -102,7 +102,6 @@ class LatentPreviewMethod(enum.Enum): vram_group.add_argument("--always-no-vram", action="store_true") vram_group.add_argument("--always-cpu", type=int, nargs="?", metavar="CPU_NUM_THREADS", const=-1) - parser.add_argument("--always-offload-from-vram", action="store_true") parser.add_argument("--pytorch-deterministic", action="store_true") diff --git a/modules/async_worker.py b/modules/async_worker.py index 4fca0966e..2a31aae1e 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -563,8 +563,8 @@ def handler(async_task): if direct_return: d = [('Upscale (Fast)', '2x')] - log(uov_input_image, d) - yield_result(async_task, uov_input_image, do_not_show_finished_images=True) + uov_input_image_path = log(uov_input_image, d) + yield_result(async_task, uov_input_image_path, do_not_show_finished_images=True) return tiled = True @@ -828,6 +828,7 @@ def callback(step, x0, x, total_steps, y): if inpaint_worker.current_task is not None: imgs = [inpaint_worker.current_task.post_process(x) for x in imgs] + img_paths = [] for x in imgs: d = [ ('Prompt', task['log_positive_prompt']), @@ -853,9 +854,9 @@ def callback(step, x0, x, total_steps, y): if n != 'None': d.append((f'LoRA {li + 1}', f'{n} : {w}')) d.append(('Version', 'v' + fooocus_version.version)) - log(x, d) + img_paths.append(log(x, d)) - yield_result(async_task, imgs, do_not_show_finished_images=len(tasks) == 1 or disable_intermediate_results) + yield_result(async_task, img_paths, do_not_show_finished_images=len(tasks) == 1 or disable_intermediate_results) except ldm_patched.modules.model_management.InterruptProcessingException as e: if async_task.last_stop == 'skip': print('User skipped') diff --git a/modules/private_logger.py b/modules/private_logger.py index 49f17dcad..506b10555 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -6,7 +6,7 @@ from PIL import Image from modules.util import generate_temp_filename - +from tempfile import gettempdir log_cache = {} @@ -18,13 +18,15 @@ def get_current_html_path(): return html_name -def log(img, dic): - if args_manager.args.disable_image_log: - return - - date_string, local_temp_filename, only_name = generate_temp_filename(folder=modules.config.path_outputs, extension='png') +def log(img, dic) -> str: + path_outputs = args_manager.args.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs + date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension='png') os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True) Image.fromarray(img).save(local_temp_filename) + + if args_manager.args.disable_image_log: + return local_temp_filename + html_name = os.path.join(os.path.dirname(local_temp_filename), 'log.html') css_styles = ( @@ -105,4 +107,4 @@ def log(img, dic): log_cache[html_name] = middle_part - return + return local_temp_filename diff --git a/webui.py b/webui.py index 14ba2a1ff..a3663dfa8 100644 --- a/webui.py +++ b/webui.py @@ -72,6 +72,11 @@ def generate_clicked(task): gr.update(visible=True, value=product) finished = True + # delete Fooocus temp images, only keep gradio temp images + if args_manager.args.disable_image_log: + for filepath in product: + os.remove(filepath) + execution_time = time.perf_counter() - execution_start_time print(f'Total time: {execution_time:.2f} seconds') return From ba9eadbcda33839b3f6f12b21ce7b10a4c90a93c Mon Sep 17 00:00:00 2001 From: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Date: Mon, 26 Feb 2024 14:27:57 +0100 Subject: [PATCH 17/25] feat: add metadata to images (#1940) * feat: add metadata logging for images inspired by https://github.com/MoonRide303/Fooocus-MRE * feat: add config and checkbox for save_metadata_to_images * feat: add argument disable_metadata * feat: add support for A1111 metadata schema https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/cf2772fab0af5573da775e7437e6acdca424f26e/modules/processing.py#L672 * feat: add model hash support for a1111 * feat: use resolved prompts with included expansion and styles for a1111 metadata * fix: code cleanup and resolved prompt fixes * feat: add config metadata_created_by * fix: use stting isntead of quote wrap for A1111 created_by * fix: correctlyy hide/show metadata schema on app start * fix: do not generate hashes when arg --disable-metadata is used * refactor: rename metadata_schema to metadata_scheme * fix: use pnginfo "parameters" insteadf of "Comments" see https://github.com/RupertAvery/DiffusionToolkit/issues/202 and https://github.com/AUTOMATIC1111/stable-diffusion-webui/blob/cf2772fab0af5573da775e7437e6acdca424f26e/modules/processing.py#L939 * feat: add resolved prompts to metadata * fix: use correct default value in metadata check for created_by * wip: add metadata mapping, reading and writing applying data after reading currently not functional for A1111 * feat: rename metadata tab and import button label * feat: map basic information for scheme A1111 * wip: optimize handling for metadata in Gradio calls * feat: add enums for Performance, Steps and StepsUOV also move MetadataSchema enum to prevent circular dependency * fix: correctly map resolution, use empty styles for A1111 * chore: code cleanup * feat: add A1111 prompt style detection only detects one style as Fooocus doesn't wrap {prompt} with the whole style, but has a separate prompt string for each style * wip: add prompt style extraction for A1111 scheme * feat: sort styles after metadata import * refactor: use central flag for LoRA count * refactor: use central flag for ControlNet image count * fix: use correct LoRA mapping, add fallback for backwards compatibility * feat: add created_by again * feat: add prefix "Fooocus" to version * wip: code cleanup, update todos * fix: use correct order to read LoRA in meta parser * wip: code cleanup, update todos * feat: make sha256 with length 10 default * feat: add lora handling to A1111 scheme * feat: override existing LoRA values when importing, would cause images to differ * fix: correctly extract prompt style when only prompt expansion is selected * feat: allow model / LoRA loading from subfolders * feat: code cleanup, do not queue metadata preview on image upload * refactor: add flag for refiner_swap_method * feat: add metadata handling for all non-img2img parameters * refactor: code cleanup * chore: use str as return type in calculate_sha256 * feat: add hash cache to metadata * chore: code cleanup * feat: add method get_scheme to Metadata * fix: align handling for scheme Fooocus by removing lcm lora from json parsing * refactor: add step before parsing to set data in parser - add constructor for MetadataSchema class - remove showable and copyable from log output - add functional hash cache (model hashing takes about 5 seconds, only required once per model, using hash lazy loading) * feat: sort metadata attributes before writing to image * feat: add translations and hint for image prompt parameters * chore: check and remove ToDo's * refactor: merge metadata.py into meta_parser.py * fix: add missing refiner in A1111 parse_json * wip: add TODO for ultiline prompt style resolution * fix: remove sorting for A1111, change performance key position fixes https://github.com/lllyasviel/Fooocus/pull/1940#issuecomment-1924444633 * fix: add workaround for multiline prompts * feat: add sampler mapping * feat: prevent config reset by renaming metadata_scheme to match config options * chore: remove remaining todos after analysis refiner is added when set restoring multiline prompts has been resolved by using separate parameters "raw_prompt" and "raw_negative_prompt" * chore: specify too broad exception types * feat: add mapping for _gpu samplers to cpu samplers gpu samplers are less deterministic than cpu but in general similar, see https://www.reddit.com/r/comfyui/comments/15hayzo/comment/juqcpep/ * feat: add better handling for image import with empty metadata * fix: parse adaptive_cfg as float instead of string * chore: loosen strict type for parse_json, fix indent * chore: make steps enums more strict * feat: only override steps if metadata value is not in steps enum or in steps enum and performance is not the same * fix: handle empty strings in metadata e.g. raw negative prompt when none is set --- args_manager.py | 5 +- language/en.json | 9 +- modules/async_worker.py | 101 ++++---- modules/config.py | 22 +- modules/flags.py | 91 ++++++- modules/meta_parser.py | 515 ++++++++++++++++++++++++++++++++------ modules/private_logger.py | 28 ++- modules/util.py | 169 ++++++++++++- webui.py | 102 +++++--- 9 files changed, 864 insertions(+), 178 deletions(-) diff --git a/args_manager.py b/args_manager.py index 1675c31db..c7c1b7ab1 100644 --- a/args_manager.py +++ b/args_manager.py @@ -20,7 +20,10 @@ help="Prevent writing images and logs to hard drive.") args_parser.parser.add_argument("--disable-analytics", action='store_true', - help="Disables analytics for Gradio", default=False) + help="Disables analytics for Gradio.") + +args_parser.parser.add_argument("--disable-metadata", action='store_true', + help="Disables saving metadata to images.") args_parser.parser.add_argument("--disable-preset-download", action='store_true', help="Disables downloading models for presets", default=False) diff --git a/language/en.json b/language/en.json index a3e47c1a1..cb5603f92 100644 --- a/language/en.json +++ b/language/en.json @@ -374,5 +374,12 @@ "* Powered by Fooocus Inpaint Engine (beta)": "* Powered by Fooocus Inpaint Engine (beta)", "Fooocus Enhance": "Fooocus Enhance", "Fooocus Cinematic": "Fooocus Cinematic", - "Fooocus Sharp": "Fooocus Sharp" + "Fooocus Sharp": "Fooocus Sharp", + "Drag any image generated by Fooocus here": "Drag any image generated by Fooocus here", + "Metadata": "Metadata", + "Apply Metadata": "Apply Metadata", + "Metadata Scheme": "Metadata Scheme", + "Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.", + "fooocus (json)": "fooocus (json)", + "a1111 (plain text)": "a1111 (plain text)" } \ No newline at end of file diff --git a/modules/async_worker.py b/modules/async_worker.py index 2a31aae1e..677cf4691 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -19,6 +19,7 @@ def __init__(self, args): def worker(): global async_tasks + import os import traceback import math import numpy as np @@ -39,6 +40,7 @@ def worker(): import extras.ip_adapter as ip_adapter import extras.face_crop import fooocus_version + import args_manager from modules.sdxl_styles import apply_style, apply_wildcards, fooocus_expansion, apply_arrays from modules.private_logger import log @@ -46,6 +48,8 @@ def worker(): from modules.util import remove_empty_str, HWC3, resize_image, \ get_image_shape_ceil, set_image_shape_ceil, get_shape_ceil, resample_image, erode_or_dilate, ordinal_suffix from modules.upscaler import perform_upscale + from modules.flags import Performance + from modules.meta_parser import get_metadata_parser, MetadataScheme pid = os.getpid() print(f'Started worker with PID {pid}') @@ -135,7 +139,7 @@ def handler(async_task): prompt = args.pop() negative_prompt = args.pop() style_selections = args.pop() - performance_selection = args.pop() + performance_selection = Performance(args.pop()) aspect_ratios_selection = args.pop() image_number = args.pop() image_seed = args.pop() @@ -153,6 +157,7 @@ def handler(async_task): inpaint_input_image = args.pop() inpaint_additional_prompt = args.pop() inpaint_mask_image_upload = args.pop() + disable_preview = args.pop() disable_intermediate_results = args.pop() disable_seed_increment = args.pop() @@ -190,8 +195,11 @@ def handler(async_task): invert_mask_checkbox = args.pop() inpaint_erode_or_dilate = args.pop() + save_metadata_to_images = args.pop() if not args_manager.args.disable_metadata else False + metadata_scheme = MetadataScheme(args.pop()) if not args_manager.args.disable_metadata else MetadataScheme.FOOOCUS + cn_tasks = {x: [] for x in flags.ip_list} - for _ in range(4): + for _ in range(flags.controlnet_image_count): cn_img = args.pop() cn_stop = args.pop() cn_weight = args.pop() @@ -216,17 +224,9 @@ def handler(async_task): print(f'Refiner disabled because base model and refiner are same.') refiner_model_name = 'None' - assert performance_selection in ['Speed', 'Quality', 'Extreme Speed'] - - steps = 30 - - if performance_selection == 'Speed': - steps = 30 - - if performance_selection == 'Quality': - steps = 60 + steps = performance_selection.steps() - if performance_selection == 'Extreme Speed': + if performance_selection == Performance.EXTREME_SPEED: print('Enter LCM mode.') progressbar(async_task, 1, 'Downloading LCM components ...') loras += [(modules.config.downloading_sdxl_lcm_lora(), 1.0)] @@ -244,7 +244,6 @@ def handler(async_task): adm_scaler_positive = 1.0 adm_scaler_negative = 1.0 adm_scaler_end = 0.0 - steps = 8 print(f'[Parameters] Adaptive CFG = {adaptive_cfg}') print(f'[Parameters] Sharpness = {sharpness}') @@ -305,16 +304,7 @@ def handler(async_task): if 'fast' in uov_method: skip_prompt_processing = True else: - steps = 18 - - if performance_selection == 'Speed': - steps = 18 - - if performance_selection == 'Quality': - steps = 36 - - if performance_selection == 'Extreme Speed': - steps = 8 + steps = performance_selection.steps_uov() progressbar(async_task, 1, 'Downloading upscale models ...') modules.config.downloading_upscale_model() @@ -830,31 +820,50 @@ def callback(step, x0, x, total_steps, y): img_paths = [] for x in imgs: - d = [ - ('Prompt', task['log_positive_prompt']), - ('Negative Prompt', task['log_negative_prompt']), - ('Fooocus V2 Expansion', task['expansion']), - ('Styles', str(raw_style_selections)), - ('Performance', performance_selection), - ('Resolution', str((width, height))), - ('Sharpness', sharpness), - ('Guidance Scale', guidance_scale), - ('ADM Guidance', str(( - modules.patch.patch_settings[pid].positive_adm_scale, - modules.patch.patch_settings[pid].negative_adm_scale, - modules.patch.patch_settings[pid].adm_scaler_end))), - ('Base Model', base_model_name), - ('Refiner Model', refiner_model_name), - ('Refiner Switch', refiner_switch), - ('Sampler', sampler_name), - ('Scheduler', scheduler_name), - ('Seed', task['task_seed']), - ] + d = [('Prompt', 'prompt', task['log_positive_prompt']), + ('Negative Prompt', 'negative_prompt', task['log_negative_prompt']), + ('Fooocus V2 Expansion', 'prompt_expansion', task['expansion']), + ('Styles', 'styles', str(raw_style_selections)), + ('Performance', 'performance', performance_selection.value), + ('Resolution', 'resolution', str((width, height))), + ('Guidance Scale', 'guidance_scale', guidance_scale), + ('Sharpness', 'sharpness', sharpness), + ('ADM Guidance', 'adm_guidance', str(( + modules.patch.patch_settings[pid].positive_adm_scale, + modules.patch.patch_settings[pid].negative_adm_scale, + modules.patch.patch_settings[pid].adm_scaler_end))), + ('Base Model', 'base_model', base_model_name), + ('Refiner Model', 'refiner_model', refiner_model_name), + ('Refiner Switch', 'refiner_switch', refiner_switch)] + + if refiner_model_name != 'None': + if overwrite_switch > 0: + d.append(('Overwrite Switch', 'overwrite_switch', overwrite_switch)) + if refiner_swap_method != flags.refiner_swap_method: + d.append(('Refiner Swap Method', 'refiner_swap_method', refiner_swap_method)) + if modules.patch.patch_settings[pid].adaptive_cfg != modules.config.default_cfg_tsnr: + d.append(('CFG Mimicking from TSNR', 'adaptive_cfg', modules.patch.patch_settings[pid].adaptive_cfg)) + + d.append(('Sampler', 'sampler', sampler_name)) + d.append(('Scheduler', 'scheduler', scheduler_name)) + d.append(('Seed', 'seed', task['task_seed'])) + + if freeu_enabled: + d.append(('FreeU', 'freeu', str((freeu_b1, freeu_b2, freeu_s1, freeu_s2)))) + + metadata_parser = None + if save_metadata_to_images: + metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme) + metadata_parser.set_data(task['log_positive_prompt'], task['positive'], + task['log_negative_prompt'], task['negative'], + steps, base_model_name, refiner_model_name, loras) + for li, (n, w) in enumerate(loras): if n != 'None': - d.append((f'LoRA {li + 1}', f'{n} : {w}')) - d.append(('Version', 'v' + fooocus_version.version)) - img_paths.append(log(x, d)) + d.append((f'LoRA {li + 1}', f'lora_combined_{li + 1}', f'{n} : {w}')) + + d.append(('Version', 'version', 'Fooocus v' + fooocus_version.version)) + img_paths.append(log(x, d, metadata_parser)) yield_result(async_task, img_paths, do_not_show_finished_images=len(tasks) == 1 or disable_intermediate_results) except ldm_patched.modules.model_management.InterruptProcessingException as e: diff --git a/modules/config.py b/modules/config.py index acf19b607..a393e24cc 100644 --- a/modules/config.py +++ b/modules/config.py @@ -8,7 +8,7 @@ from modules.model_loader import load_file_from_url from modules.util import get_files_from_folder, makedirs_with_log - +from modules.flags import Performance, MetadataScheme config_path = os.path.abspath("./config.txt") config_example_path = os.path.abspath("config_modification_tutorial.txt") @@ -293,8 +293,8 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_ ) default_performance = get_config_item_or_set_default( key='default_performance', - default_value='Speed', - validator=lambda x: x in modules.flags.performance_selections + default_value=Performance.SPEED.value, + validator=lambda x: x in Performance.list() ) default_advanced_checkbox = get_config_item_or_set_default( key='default_advanced_checkbox', @@ -369,6 +369,21 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_ ], validator=lambda x: isinstance(x, list) and all(isinstance(v, str) for v in x) ) +default_save_metadata_to_images = get_config_item_or_set_default( + key='default_save_metadata_to_images', + default_value=False, + validator=lambda x: isinstance(x, bool) +) +default_metadata_scheme = get_config_item_or_set_default( + key='default_metadata_scheme', + default_value=MetadataScheme.FOOOCUS.value, + validator=lambda x: x in [y[1] for y in modules.flags.metadata_scheme if y[1] == x] +) +metadata_created_by = get_config_item_or_set_default( + key='metadata_created_by', + default_value='', + validator=lambda x: isinstance(x, str) +) example_inpaint_prompts = [[x] for x in example_inpaint_prompts] @@ -391,6 +406,7 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_ "default_prompt_negative", "default_styles", "default_aspect_ratio", + "default_save_metadata_to_images", "checkpoint_downloads", "embeddings_downloads", "lora_downloads", diff --git a/modules/flags.py b/modules/flags.py index 27f2d7166..206f51218 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -1,3 +1,5 @@ +from enum import IntEnum, Enum + disabled = 'Disabled' enabled = 'Enabled' subtle_variation = 'Vary (Subtle)' @@ -10,16 +12,49 @@ disabled, subtle_variation, strong_variation, upscale_15, upscale_2, upscale_fast ] -KSAMPLER_NAMES = ["euler", "euler_ancestral", "heun", "heunpp2","dpm_2", "dpm_2_ancestral", - "lms", "dpm_fast", "dpm_adaptive", "dpmpp_2s_ancestral", "dpmpp_sde", "dpmpp_sde_gpu", - "dpmpp_2m", "dpmpp_2m_sde", "dpmpp_2m_sde_gpu", "dpmpp_3m_sde", "dpmpp_3m_sde_gpu", "ddpm", "lcm"] +CIVITAI_NO_KARRAS = ["euler", "euler_ancestral", "heun", "dpm_fast", "dpm_adaptive", "ddim", "uni_pc"] + +# fooocus: a1111 (Civitai) +KSAMPLER = { + "euler": "Euler", + "euler_ancestral": "Euler a", + "heun": "Heun", + "heunpp2": "", + "dpm_2": "DPM2", + "dpm_2_ancestral": "DPM2 a", + "lms": "LMS", + "dpm_fast": "DPM fast", + "dpm_adaptive": "DPM adaptive", + "dpmpp_2s_ancestral": "DPM++ 2S a", + "dpmpp_sde": "DPM++ SDE", + "dpmpp_sde_gpu": "DPM++ SDE", + "dpmpp_2m": "DPM++ 2M", + "dpmpp_2m_sde": "DPM++ 2M SDE", + "dpmpp_2m_sde_gpu": "DPM++ 2M SDE", + "dpmpp_3m_sde": "", + "dpmpp_3m_sde_gpu": "", + "ddpm": "", + "lcm": "LCM" +} + +SAMPLER_EXTRA = { + "ddim": "DDIM", + "uni_pc": "UniPC", + "uni_pc_bh2": "" +} + +SAMPLERS = KSAMPLER | SAMPLER_EXTRA + +KSAMPLER_NAMES = list(KSAMPLER.keys()) SCHEDULER_NAMES = ["normal", "karras", "exponential", "sgm_uniform", "simple", "ddim_uniform", "lcm", "turbo"] -SAMPLER_NAMES = KSAMPLER_NAMES + ["ddim", "uni_pc", "uni_pc_bh2"] +SAMPLER_NAMES = KSAMPLER_NAMES + list(SAMPLER_EXTRA.keys()) sampler_list = SAMPLER_NAMES scheduler_list = SCHEDULER_NAMES +refiner_swap_method = 'joint' + cn_ip = "ImagePrompt" cn_ip_face = "FaceSwap" cn_canny = "PyraCanny" @@ -33,8 +68,6 @@ } # stop, weight inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6'] -performance_selections = ['Speed', 'Quality', 'Extreme Speed'] - inpaint_option_default = 'Inpaint or Outpaint (default)' inpaint_option_detail = 'Improve Detail (face, hand, eyes, etc.)' inpaint_option_modify = 'Modify Content (add objects, change background, etc.)' @@ -42,3 +75,49 @@ desc_type_photo = 'Photograph' desc_type_anime = 'Art/Anime' + + +class MetadataScheme(Enum): + FOOOCUS = 'fooocus' + A1111 = 'a1111' + + +metadata_scheme = [ + (f'{MetadataScheme.FOOOCUS.value} (json)', MetadataScheme.FOOOCUS.value), + (f'{MetadataScheme.A1111.value} (plain text)', MetadataScheme.A1111.value), +] + +lora_count = 5 + +controlnet_image_count = 4 + + +class Steps(IntEnum): + QUALITY = 60 + SPEED = 30 + EXTREME_SPEED = 8 + + +class StepsUOV(IntEnum): + QUALITY = 36 + SPEED = 18 + EXTREME_SPEED = 8 + + +class Performance(Enum): + QUALITY = 'Quality' + SPEED = 'Speed' + EXTREME_SPEED = 'Extreme Speed' + + @classmethod + def list(cls) -> list: + return list(map(lambda c: c.value, cls)) + + def steps(self) -> int | None: + return Steps[self.name].value if Steps[self.name] else None + + def steps_uov(self) -> int | None: + return StepsUOV[self.name].value if Steps[self.name] else None + + +performance_selections = Performance.list() diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 061e1f8af..e9f1d0332 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -1,45 +1,113 @@ import json +import os +import re +from abc import ABC, abstractmethod +from pathlib import Path + import gradio as gr +from PIL import Image + import modules.config +import modules.sdxl_styles +from modules.flags import MetadataScheme, Performance, Steps +from modules.flags import SAMPLERS, CIVITAI_NO_KARRAS +from modules.util import quote, unquote, extract_styles_from_prompt, is_json, get_file_from_folder_list, calculate_sha256 + +re_param_code = r'\s*(\w[\w \-/]+):\s*("(?:\\.|[^\\"])+"|[^,]*)(?:,|$)' +re_param = re.compile(re_param_code) +re_imagesize = re.compile(r"^(\d+)x(\d+)$") +hash_cache = {} -def load_parameter_button_click(raw_prompt_txt, is_generating): - loaded_parameter_dict = json.loads(raw_prompt_txt) + +def load_parameter_button_click(raw_metadata: dict | str, is_generating: bool): + loaded_parameter_dict = raw_metadata + if isinstance(raw_metadata, str): + loaded_parameter_dict = json.loads(raw_metadata) assert isinstance(loaded_parameter_dict, dict) - results = [True, 1] + results = [len(loaded_parameter_dict) > 0, 1] - try: - h = loaded_parameter_dict.get('Prompt', None) - assert isinstance(h, str) - results.append(h) - except: + get_str('prompt', 'Prompt', loaded_parameter_dict, results) + get_str('negative_prompt', 'Negative Prompt', loaded_parameter_dict, results) + get_list('styles', 'Styles', loaded_parameter_dict, results) + get_str('performance', 'Performance', loaded_parameter_dict, results) + get_steps('steps', 'Steps', loaded_parameter_dict, results) + get_float('overwrite_switch', 'Overwrite Switch', loaded_parameter_dict, results) + get_resolution('resolution', 'Resolution', loaded_parameter_dict, results) + get_float('guidance_scale', 'Guidance Scale', loaded_parameter_dict, results) + get_float('sharpness', 'Sharpness', loaded_parameter_dict, results) + get_adm_guidance('adm_guidance', 'ADM Guidance', loaded_parameter_dict, results) + get_str('refiner_swap_method', 'Refiner Swap Method', loaded_parameter_dict, results) + get_float('adaptive_cfg', 'CFG Mimicking from TSNR', loaded_parameter_dict, results) + get_str('base_model', 'Base Model', loaded_parameter_dict, results) + get_str('refiner_model', 'Refiner Model', loaded_parameter_dict, results) + get_float('refiner_switch', 'Refiner Switch', loaded_parameter_dict, results) + get_str('sampler', 'Sampler', loaded_parameter_dict, results) + get_str('scheduler', 'Scheduler', loaded_parameter_dict, results) + get_seed('seed', 'Seed', loaded_parameter_dict, results) + + if is_generating: results.append(gr.update()) + else: + results.append(gr.update(visible=True)) + + results.append(gr.update(visible=False)) + + get_freeu('freeu', 'FreeU', loaded_parameter_dict, results) + + for i in range(modules.config.default_max_lora_number): + get_lora(f'lora_combined_{i + 1}', f'LoRA {i + 1}', loaded_parameter_dict, results) + + return results + +def get_str(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = loaded_parameter_dict.get('Negative Prompt', None) + h = source_dict.get(key, source_dict.get(fallback, default)) assert isinstance(h, str) results.append(h) except: results.append(gr.update()) + +def get_list(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = loaded_parameter_dict.get('Styles', None) + h = source_dict.get(key, source_dict.get(fallback, default)) h = eval(h) assert isinstance(h, list) results.append(h) except: results.append(gr.update()) + +def get_float(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = loaded_parameter_dict.get('Performance', None) - assert isinstance(h, str) + h = source_dict.get(key, source_dict.get(fallback, default)) + assert h is not None + h = float(h) results.append(h) except: results.append(gr.update()) + +def get_steps(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = loaded_parameter_dict.get('Resolution', None) + h = source_dict.get(key, source_dict.get(fallback, default)) + assert h is not None + h = int(h) + # if not in steps or in steps and performance is not the same + if h not in iter(Steps) or Steps(h).name.casefold() != source_dict.get('performance', '').replace(' ', '_').casefold(): + results.append(h) + return + results.append(-1) + except: + results.append(-1) + + +def get_resolution(key: str, fallback: str | None, source_dict: dict, results: list, default=None): + try: + h = source_dict.get(key, source_dict.get(fallback, default)) width, height = eval(h) formatted = modules.config.add_ratio(f'{width}*{height}') if formatted in modules.config.available_aspect_ratios: @@ -55,24 +123,22 @@ def load_parameter_button_click(raw_prompt_txt, is_generating): results.append(gr.update()) results.append(gr.update()) - try: - h = loaded_parameter_dict.get('Sharpness', None) - assert h is not None - h = float(h) - results.append(h) - except: - results.append(gr.update()) +def get_seed(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = loaded_parameter_dict.get('Guidance Scale', None) + h = source_dict.get(key, source_dict.get(fallback, default)) assert h is not None - h = float(h) + h = int(h) + results.append(False) results.append(h) except: results.append(gr.update()) + results.append(gr.update()) + +def get_adm_guidance(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = loaded_parameter_dict.get('ADM Guidance', None) + h = source_dict.get(key, source_dict.get(fallback, default)) p, n, e = eval(h) results.append(float(p)) results.append(float(n)) @@ -82,69 +148,368 @@ def load_parameter_button_click(raw_prompt_txt, is_generating): results.append(gr.update()) results.append(gr.update()) - try: - h = loaded_parameter_dict.get('Base Model', None) - assert isinstance(h, str) - results.append(h) - except: - results.append(gr.update()) +def get_freeu(key: str, fallback: str | None, source_dict: dict, results: list, default=None): try: - h = loaded_parameter_dict.get('Refiner Model', None) - assert isinstance(h, str) - results.append(h) + h = source_dict.get(key, source_dict.get(fallback, default)) + b1, b2, s1, s2 = eval(h) + results.append(True) + results.append(float(b1)) + results.append(float(b2)) + results.append(float(s1)) + results.append(float(s2)) except: + results.append(False) + results.append(gr.update()) results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Refiner Switch', None) - assert h is not None - h = float(h) - results.append(h) - except: results.append(gr.update()) - - try: - h = loaded_parameter_dict.get('Sampler', None) - assert isinstance(h, str) - results.append(h) - except: results.append(gr.update()) + +def get_lora(key: str, fallback: str | None, source_dict: dict, results: list): try: - h = loaded_parameter_dict.get('Scheduler', None) - assert isinstance(h, str) - results.append(h) + n, w = source_dict.get(key, source_dict.get(fallback)).split(' : ') + w = float(w) + results.append(True) + results.append(n) + results.append(w) except: - results.append(gr.update()) + results.append(True) + results.append('None') + results.append(1) + + +def get_sha256(filepath): + global hash_cache + + if filepath not in hash_cache: + hash_cache[filepath] = calculate_sha256(filepath) + + return hash_cache[filepath] + + +class MetadataParser(ABC): + def __init__(self): + self.raw_prompt: str = '' + self.full_prompt: str = '' + self.raw_negative_prompt: str = '' + self.full_negative_prompt: str = '' + self.steps: int = 30 + self.base_model_name: str = '' + self.base_model_hash: str = '' + self.refiner_model_name: str = '' + self.refiner_model_hash: str = '' + self.loras: list = [] + + @abstractmethod + def get_scheme(self) -> MetadataScheme: + raise NotImplementedError + + @abstractmethod + def parse_json(self, metadata: dict | str) -> dict: + raise NotImplementedError + + @abstractmethod + def parse_string(self, metadata: dict) -> str: + raise NotImplementedError + + def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name, refiner_model_name, loras): + self.raw_prompt = raw_prompt + self.full_prompt = full_prompt + self.raw_negative_prompt = raw_negative_prompt + self.full_negative_prompt = full_negative_prompt + self.steps = steps + self.base_model_name = Path(base_model_name).stem + + base_model_path = get_file_from_folder_list(base_model_name, modules.config.paths_checkpoints) + self.base_model_hash = get_sha256(base_model_path) + + if refiner_model_name not in ['', 'None']: + self.refiner_model_name = Path(refiner_model_name).stem + refiner_model_path = get_file_from_folder_list(refiner_model_name, modules.config.paths_checkpoints) + self.refiner_model_hash = get_sha256(refiner_model_path) + + self.loras = [] + for (lora_name, lora_weight) in loras: + if lora_name != 'None': + lora_path = get_file_from_folder_list(lora_name, modules.config.paths_loras) + lora_hash = get_sha256(lora_path) + self.loras.append((Path(lora_name).stem, lora_weight, lora_hash)) + + +class A1111MetadataParser(MetadataParser): + def get_scheme(self) -> MetadataScheme: + return MetadataScheme.A1111 + + fooocus_to_a1111 = { + 'raw_prompt': 'Raw prompt', + 'raw_negative_prompt': 'Raw negative prompt', + 'negative_prompt': 'Negative prompt', + 'styles': 'Styles', + 'performance': 'Performance', + 'steps': 'Steps', + 'sampler': 'Sampler', + 'scheduler': 'Scheduler', + 'guidance_scale': 'CFG scale', + 'seed': 'Seed', + 'resolution': 'Size', + 'sharpness': 'Sharpness', + 'adm_guidance': 'ADM Guidance', + 'refiner_swap_method': 'Refiner Swap Method', + 'adaptive_cfg': 'Adaptive CFG', + 'overwrite_switch': 'Overwrite Switch', + 'freeu': 'FreeU', + 'base_model': 'Model', + 'base_model_hash': 'Model hash', + 'refiner_model': 'Refiner', + 'refiner_model_hash': 'Refiner hash', + 'lora_hashes': 'Lora hashes', + 'lora_weights': 'Lora weights', + 'created_by': 'User', + 'version': 'Version' + } + + def parse_json(self, metadata: str) -> dict: + metadata_prompt = '' + metadata_negative_prompt = '' + + done_with_prompt = False + + *lines, lastline = metadata.strip().split("\n") + if len(re_param.findall(lastline)) < 3: + lines.append(lastline) + lastline = '' + + for line in lines: + line = line.strip() + if line.startswith(f"{self.fooocus_to_a1111['negative_prompt']}:"): + done_with_prompt = True + line = line[len(f"{self.fooocus_to_a1111['negative_prompt']}:"):].strip() + if done_with_prompt: + metadata_negative_prompt += ('' if metadata_negative_prompt == '' else "\n") + line + else: + metadata_prompt += ('' if metadata_prompt == '' else "\n") + line + + found_styles, prompt, negative_prompt = extract_styles_from_prompt(metadata_prompt, metadata_negative_prompt) + + data = { + 'prompt': prompt, + 'negative_prompt': negative_prompt + } + + for k, v in re_param.findall(lastline): + try: + if v != '' and v[0] == '"' and v[-1] == '"': + v = unquote(v) + + m = re_imagesize.match(v) + if m is not None: + data['resolution'] = str((m.group(1), m.group(2))) + else: + data[list(self.fooocus_to_a1111.keys())[list(self.fooocus_to_a1111.values()).index(k)]] = v + except Exception: + print(f"Error parsing \"{k}: {v}\"") + + # workaround for multiline prompts + if 'raw_prompt' in data: + data['prompt'] = data['raw_prompt'] + raw_prompt = data['raw_prompt'].replace("\n", ', ') + if metadata_prompt != raw_prompt and modules.sdxl_styles.fooocus_expansion not in found_styles: + found_styles.append(modules.sdxl_styles.fooocus_expansion) + + if 'raw_negative_prompt' in data: + data['negative_prompt'] = data['raw_negative_prompt'] + + data['styles'] = str(found_styles) + + # try to load performance based on steps, fallback for direct A1111 imports + if 'steps' in data and 'performance' not in data: + try: + data['performance'] = Performance[Steps(int(data['steps'])).name].value + except ValueError | KeyError: + pass + + if 'sampler' in data: + data['sampler'] = data['sampler'].replace(' Karras', '') + # get key + for k, v in SAMPLERS.items(): + if v == data['sampler']: + data['sampler'] = k + break + + for key in ['base_model', 'refiner_model']: + if key in data: + for filename in modules.config.model_filenames: + path = Path(filename) + if data[key] == path.stem: + data[key] = filename + break + + if 'lora_hashes' in data: + lora_filenames = modules.config.lora_filenames.copy() + lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) + for li, lora in enumerate(data['lora_hashes'].split(', ')): + lora_name, lora_hash, lora_weight = lora.split(': ') + for filename in lora_filenames: + path = Path(filename) + if lora_name == path.stem: + data[f'lora_combined_{li + 1}'] = f'{filename} : {lora_weight}' + break + + return data + + def parse_string(self, metadata: dict) -> str: + data = {k: v for _, k, v in metadata} + + width, height = eval(data['resolution']) + + sampler = data['sampler'] + scheduler = data['scheduler'] + if sampler in SAMPLERS and SAMPLERS[sampler] != '': + sampler = SAMPLERS[sampler] + if sampler not in CIVITAI_NO_KARRAS and scheduler == 'karras': + sampler += f' Karras' + + generation_params = { + self.fooocus_to_a1111['steps']: self.steps, + self.fooocus_to_a1111['sampler']: sampler, + self.fooocus_to_a1111['seed']: data['seed'], + self.fooocus_to_a1111['resolution']: f'{width}x{height}', + self.fooocus_to_a1111['guidance_scale']: data['guidance_scale'], + self.fooocus_to_a1111['sharpness']: data['sharpness'], + self.fooocus_to_a1111['adm_guidance']: data['adm_guidance'], + self.fooocus_to_a1111['base_model']: Path(data['base_model']).stem, + self.fooocus_to_a1111['base_model_hash']: self.base_model_hash, + + self.fooocus_to_a1111['performance']: data['performance'], + self.fooocus_to_a1111['scheduler']: scheduler, + # workaround for multiline prompts + self.fooocus_to_a1111['raw_prompt']: self.raw_prompt, + self.fooocus_to_a1111['raw_negative_prompt']: self.raw_negative_prompt, + } + + if self.refiner_model_name not in ['', 'None']: + generation_params |= { + self.fooocus_to_a1111['refiner_model']: self.refiner_model_name, + self.fooocus_to_a1111['refiner_model_hash']: self.refiner_model_hash + } + + for key in ['adaptive_cfg', 'overwrite_switch', 'refiner_swap_method', 'freeu']: + if key in data: + generation_params[self.fooocus_to_a1111[key]] = data[key] + + lora_hashes = [] + for index, (lora_name, lora_weight, lora_hash) in enumerate(self.loras): + # workaround for Fooocus not knowing LoRA name in LoRA metadata + lora_hashes.append(f'{lora_name}: {lora_hash}: {lora_weight}') + lora_hashes_string = ', '.join(lora_hashes) + + generation_params |= { + self.fooocus_to_a1111['lora_hashes']: lora_hashes_string, + self.fooocus_to_a1111['version']: data['version'] + } + + if modules.config.metadata_created_by != '': + generation_params[self.fooocus_to_a1111['created_by']] = modules.config.metadata_created_by + + generation_params_text = ", ".join( + [k if k == v else f'{k}: {quote(v)}' for k, v in generation_params.items() if + v is not None]) + positive_prompt_resolved = ', '.join(self.full_prompt) + negative_prompt_resolved = ', '.join(self.full_negative_prompt) + negative_prompt_text = f"\nNegative prompt: {negative_prompt_resolved}" if negative_prompt_resolved else "" + return f"{positive_prompt_resolved}{negative_prompt_text}\n{generation_params_text}".strip() + + +class FooocusMetadataParser(MetadataParser): + def get_scheme(self) -> MetadataScheme: + return MetadataScheme.FOOOCUS + + def parse_json(self, metadata: dict) -> dict: + model_filenames = modules.config.model_filenames.copy() + lora_filenames = modules.config.lora_filenames.copy() + lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) + + for key, value in metadata.items(): + if value in ['', 'None']: + continue + if key in ['base_model', 'refiner_model']: + metadata[key] = self.replace_value_with_filename(key, value, model_filenames) + elif key.startswith('lora_combined_'): + metadata[key] = self.replace_value_with_filename(key, value, lora_filenames) + else: + continue + + return metadata + + def parse_string(self, metadata: list) -> str: + for li, (label, key, value) in enumerate(metadata): + # remove model folder paths from metadata + if key.startswith('lora_combined_'): + name, weight = value.split(' : ') + name = Path(name).stem + value = f'{name} : {weight}' + metadata[li] = (label, key, value) + + res = {k: v for _, k, v in metadata} + + res['full_prompt'] = self.full_prompt + res['full_negative_prompt'] = self.full_negative_prompt + res['steps'] = self.steps + res['base_model'] = self.base_model_name + res['base_model_hash'] = self.base_model_hash + + if self.refiner_model_name not in ['', 'None']: + res['refiner_model'] = self.refiner_model_name + res['refiner_model_hash'] = self.refiner_model_hash + + res['loras'] = self.loras + + if modules.config.metadata_created_by != '': + res['created_by'] = modules.config.metadata_created_by + + return json.dumps(dict(sorted(res.items()))) + + @staticmethod + def replace_value_with_filename(key, value, filenames): + for filename in filenames: + path = Path(filename) + if key.startswith('lora_combined_'): + name, weight = value.split(' : ') + if name == path.stem: + return f'{filename} : {weight}' + elif value == path.stem: + return filename + + +def get_metadata_parser(metadata_scheme: MetadataScheme) -> MetadataParser: + match metadata_scheme: + case MetadataScheme.FOOOCUS: + return FooocusMetadataParser() + case MetadataScheme.A1111: + return A1111MetadataParser() + case _: + raise NotImplementedError + + +def read_info_from_image(filepath) -> tuple[str | None, dict, MetadataScheme | None]: + with Image.open(filepath) as image: + items = (image.info or {}).copy() + + parameters = items.pop('parameters', None) + if parameters is not None and is_json(parameters): + parameters = json.loads(parameters) try: - h = loaded_parameter_dict.get('Seed', None) - assert h is not None - h = int(h) - results.append(False) - results.append(h) - except: - results.append(gr.update()) - results.append(gr.update()) + metadata_scheme = MetadataScheme(items.pop('fooocus_scheme', None)) + except ValueError: + metadata_scheme = None - if is_generating: - results.append(gr.update()) - else: - results.append(gr.update(visible=True)) - - results.append(gr.update(visible=False)) + # broad fallback + if isinstance(parameters, dict): + metadata_scheme = MetadataScheme.FOOOCUS - for i in range(1, modules.config.default_max_lora_number + 1): - try: - n, w = loaded_parameter_dict.get(f'LoRA {i}', ' : ').split(' : ') - w = float(w) - results.append(True) - results.append(n) - results.append(w) - except: - results.append(True) - results.append('None') - results.append(1.0) + if isinstance(parameters, str): + metadata_scheme = MetadataScheme.A1111 - return results + return parameters, items, metadata_scheme diff --git a/modules/private_logger.py b/modules/private_logger.py index 506b10555..2213cbbab 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -5,7 +5,9 @@ import urllib.parse from PIL import Image +from PIL.PngImagePlugin import PngInfo from modules.util import generate_temp_filename +from modules.meta_parser import MetadataParser from tempfile import gettempdir log_cache = {} @@ -18,11 +20,21 @@ def get_current_html_path(): return html_name -def log(img, dic) -> str: +def log(img, metadata, metadata_parser: MetadataParser | None = None) -> str: path_outputs = args_manager.args.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension='png') os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True) - Image.fromarray(img).save(local_temp_filename) + + parsed_parameters = metadata_parser.parse_string(metadata) if metadata_parser is not None else '' + image = Image.fromarray(img) + + if parsed_parameters != '': + pnginfo = PngInfo() + pnginfo.add_text('parameters', parsed_parameters) + pnginfo.add_text('fooocus_scheme', metadata_parser.get_scheme().value) + else: + pnginfo = None + image.save(local_temp_filename, pnginfo=pnginfo) if args_manager.args.disable_image_log: return local_temp_filename @@ -34,7 +46,7 @@ def log(img, dic) -> str: "body { background-color: #121212; color: #E0E0E0; } " "a { color: #BB86FC; } " ".metadata { border-collapse: collapse; width: 100%; } " - ".metadata .key { width: 15%; } " + ".metadata .label { width: 15%; } " ".metadata .value { width: 85%; font-weight: bold; } " ".metadata th, .metadata td { border: 1px solid #4d4d4d; padding: 4px; } " ".image-container img { height: auto; max-width: 512px; display: block; padding-right:10px; } " @@ -87,13 +99,13 @@ def log(img, dic) -> str: item = f"

\n" item += f"" item += "" item += "
{only_name}
" - for key, value in dic: - value_txt = str(value).replace('\n', '
') - item += f"\n" + for label, key, value in metadata: + value_txt = str(value).replace('\n', '
') + item += f"\n" item += "" - js_txt = urllib.parse.quote(json.dumps({k: v for k, v in dic}, indent=0), safe='') - item += f"
" + js_txt = urllib.parse.quote(json.dumps({k: v for _, k, v in metadata}, indent=0), safe='') + item += f"
" item += "
\n\n" diff --git a/modules/util.py b/modules/util.py index 1b1651159..29d48696b 100644 --- a/modules/util.py +++ b/modules/util.py @@ -1,15 +1,20 @@ +import typing + import numpy as np import datetime import random import math import os import cv2 +import json from PIL import Image +from hashlib import sha256 +import modules.sdxl_styles LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) - +HASH_SHA256_LENGTH = 10 def erode_or_dilate(x, k): k = int(k) @@ -170,13 +175,173 @@ def get_files_from_folder(folder_path, exensions=None, name_filter=None): relative_path = "" for filename in sorted(files, key=lambda s: s.casefold()): _, file_extension = os.path.splitext(filename) - if (exensions == None or file_extension.lower() in exensions) and (name_filter == None or name_filter in _): + if (exensions is None or file_extension.lower() in exensions) and (name_filter is None or name_filter in _): path = os.path.join(relative_path, filename) filenames.append(path) return filenames +def calculate_sha256(filename, length=HASH_SHA256_LENGTH) -> str: + hash_sha256 = sha256() + blksize = 1024 * 1024 + + with open(filename, "rb") as f: + for chunk in iter(lambda: f.read(blksize), b""): + hash_sha256.update(chunk) + + res = hash_sha256.hexdigest() + return res[:length] if length else res + + +def quote(text): + if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text): + return text + + return json.dumps(text, ensure_ascii=False) + + +def unquote(text): + if len(text) == 0 or text[0] != '"' or text[-1] != '"': + return text + + try: + return json.loads(text) + except Exception: + return text + + +def unwrap_style_text_from_prompt(style_text, prompt): + """ + Checks the prompt to see if the style text is wrapped around it. If so, + returns True plus the prompt text without the style text. Otherwise, returns + False with the original prompt. + + Note that the "cleaned" version of the style text is only used for matching + purposes here. It isn't returned; the original style text is not modified. + """ + stripped_prompt = prompt + stripped_style_text = style_text + if "{prompt}" in stripped_style_text: + # Work out whether the prompt is wrapped in the style text. If so, we + # return True and the "inner" prompt text that isn't part of the style. + try: + left, right = stripped_style_text.split("{prompt}", 2) + except ValueError as e: + # If the style text has multple "{prompt}"s, we can't split it into + # two parts. This is an error, but we can't do anything about it. + print(f"Unable to compare style text to prompt:\n{style_text}") + print(f"Error: {e}") + return False, prompt, '' + + left_pos = stripped_prompt.find(left) + right_pos = stripped_prompt.find(right) + if 0 <= left_pos < right_pos: + real_prompt = stripped_prompt[left_pos + len(left):right_pos] + prompt = stripped_prompt.replace(left + real_prompt + right, '', 1) + if prompt.startswith(", "): + prompt = prompt[2:] + if prompt.endswith(", "): + prompt = prompt[:-2] + return True, prompt, real_prompt + else: + # Work out whether the given prompt starts with the style text. If so, we + # return True and the prompt text up to where the style text starts. + if stripped_prompt.endswith(stripped_style_text): + prompt = stripped_prompt[: len(stripped_prompt) - len(stripped_style_text)] + if prompt.endswith(", "): + prompt = prompt[:-2] + return True, prompt, prompt + + return False, prompt, '' + + +def extract_original_prompts(style, prompt, negative_prompt): + """ + Takes a style and compares it to the prompt and negative prompt. If the style + matches, returns True plus the prompt and negative prompt with the style text + removed. Otherwise, returns False with the original prompt and negative prompt. + """ + if not style.prompt and not style.negative_prompt: + return False, prompt, negative_prompt + + match_positive, extracted_positive, real_prompt = unwrap_style_text_from_prompt( + style.prompt, prompt + ) + if not match_positive: + return False, prompt, negative_prompt, '' + + match_negative, extracted_negative, _ = unwrap_style_text_from_prompt( + style.negative_prompt, negative_prompt + ) + if not match_negative: + return False, prompt, negative_prompt, '' + + return True, extracted_positive, extracted_negative, real_prompt + + +def extract_styles_from_prompt(prompt, negative_prompt): + extracted = [] + applicable_styles = [] + + for style_name, (style_prompt, style_negative_prompt) in modules.sdxl_styles.styles.items(): + applicable_styles.append(PromptStyle(name=style_name, prompt=style_prompt, negative_prompt=style_negative_prompt)) + + real_prompt = '' + + while True: + found_style = None + + for style in applicable_styles: + is_match, new_prompt, new_neg_prompt, new_real_prompt = extract_original_prompts( + style, prompt, negative_prompt + ) + if is_match: + found_style = style + prompt = new_prompt + negative_prompt = new_neg_prompt + if real_prompt == '' and new_real_prompt != '' and new_real_prompt != prompt: + real_prompt = new_real_prompt + break + + if not found_style: + break + + applicable_styles.remove(found_style) + extracted.append(found_style.name) + + # add prompt expansion if not all styles could be resolved + if prompt != '': + if real_prompt != '': + extracted.append(modules.sdxl_styles.fooocus_expansion) + else: + # find real_prompt when only prompt expansion is selected + first_word = prompt.split(', ')[0] + first_word_positions = [i for i in range(len(prompt)) if prompt.startswith(first_word, i)] + if len(first_word_positions) > 1: + real_prompt = prompt[:first_word_positions[-1]] + extracted.append(modules.sdxl_styles.fooocus_expansion) + if real_prompt.endswith(', '): + real_prompt = real_prompt[:-2] + + return list(reversed(extracted)), real_prompt, negative_prompt + + +class PromptStyle(typing.NamedTuple): + name: str + prompt: str + negative_prompt: str + + +def is_json(data: str) -> bool: + try: + loaded_json = json.loads(data) + assert isinstance(loaded_json, dict) + except (ValueError, AssertionError): + return False + return True + + def get_file_from_folder_list(name, folders): for folder in folders: filename = os.path.abspath(os.path.realpath(os.path.join(folder, name))) diff --git a/webui.py b/webui.py index a3663dfa8..7020438e6 100644 --- a/webui.py +++ b/webui.py @@ -20,6 +20,7 @@ from modules.private_logger import get_current_html_path from modules.ui_gradio_extensions import reload_javascript from modules.auth import auth_enabled, check_auth +from modules.util import is_json def get_task(*args): args = list(args) @@ -158,7 +159,7 @@ def skip_clicked(currentTask): ip_weights = [] ip_ctrls = [] ip_ad_cols = [] - for _ in range(4): + for _ in range(flags.controlnet_image_count): with gr.Column(): ip_image = grh.Image(label='Image', source='upload', type='numpy', show_label=False, height=300) ip_images.append(ip_image) @@ -216,6 +217,30 @@ def ip_advance_checked(x): value=flags.desc_type_photo) desc_btn = gr.Button(value='Describe this Image into Prompt') gr.HTML('\U0001F4D4 Document') + with gr.TabItem(label='Metadata') as load_tab: + with gr.Column(): + metadata_input_image = grh.Image(label='Drag any image generated by Fooocus here', source='upload', type='filepath') + metadata_json = gr.JSON(label='Metadata') + metadata_import_button = gr.Button(value='Apply Metadata') + + def trigger_metadata_preview(filepath): + parameters, items, metadata_scheme = modules.meta_parser.read_info_from_image(filepath) + + results = {} + if parameters is not None: + results['parameters'] = parameters + + if items: + results['items'] = items + + if isinstance(metadata_scheme, flags.MetadataScheme): + results['metadata_scheme'] = metadata_scheme.value + + return results + + metadata_input_image.upload(trigger_metadata_preview, inputs=metadata_input_image, + outputs=metadata_json, queue=False, show_progress=True) + switch_js = "(x) => {if(x){viewer_to_bottom(100);viewer_to_bottom(500);}else{viewer_to_top();} return x;}" down_js = "() => {viewer_to_bottom();}" @@ -359,7 +384,7 @@ def update_history_link(): step=0.001, value=0.3, info='When to end the guidance from positive/negative ADM. ') - refiner_swap_method = gr.Dropdown(label='Refiner swap method', value='joint', + refiner_swap_method = gr.Dropdown(label='Refiner swap method', value=flags.refiner_swap_method, choices=['joint', 'separate', 'vae']) adaptive_cfg = gr.Slider(label='CFG Mimicking from TSNR', minimum=1.0, maximum=30.0, step=0.01, @@ -407,6 +432,16 @@ def update_history_link(): info='Disable automatic seed increment when image number is > 1.', value=False) + if not args_manager.args.disable_metadata: + save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images, + info='Adds parameters to generated images allowing manual regeneration.') + metadata_scheme = gr.Radio(label='Metadata Scheme', choices=flags.metadata_scheme, value=modules.config.default_metadata_scheme, + info='Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.', + visible=modules.config.default_save_metadata_to_images) + + save_metadata_to_images.change(lambda x: gr.update(visible=x), inputs=[save_metadata_to_images], outputs=[metadata_scheme], + queue=False, show_progress=False) + with gr.Tab(label='Control'): debugging_cn_preprocessor = gr.Checkbox(label='Debug Preprocessors', value=False, info='See the results from preprocessors.') @@ -484,7 +519,6 @@ def model_refresh_clicked(): results += [gr.update(choices=['None'] + modules.config.model_filenames)] for i in range(modules.config.default_max_lora_number): results += [gr.update(interactive=True), gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] - return results model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls, queue=False, show_progress=False) @@ -555,20 +589,18 @@ def inpaint_mode_change(mode): ctrls += [refiner_swap_method, controlnet_softness] ctrls += freeu_ctrls ctrls += inpaint_ctrls + + if not args_manager.args.disable_metadata: + ctrls += [save_metadata_to_images, metadata_scheme] + ctrls += ip_ctrls state_is_generating = gr.State(False) def parse_meta(raw_prompt_txt, is_generating): loaded_json = None - try: - if '{' in raw_prompt_txt: - if '}' in raw_prompt_txt: - if ':' in raw_prompt_txt: - loaded_json = json.loads(raw_prompt_txt) - assert isinstance(loaded_json, dict) - except: - loaded_json = None + if is_json(raw_prompt_txt): + loaded_json = json.loads(raw_prompt_txt) if loaded_json is None: if is_generating: @@ -580,31 +612,29 @@ def parse_meta(raw_prompt_txt, is_generating): prompt.input(parse_meta, inputs=[prompt, state_is_generating], outputs=[prompt, generate_button, load_parameter_button], queue=False, show_progress=False) - load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=[ - advanced_checkbox, - image_number, - prompt, - negative_prompt, - style_selections, - performance_selection, - aspect_ratios_selection, - overwrite_width, - overwrite_height, - sharpness, - guidance_scale, - adm_scaler_positive, - adm_scaler_negative, - adm_scaler_end, - base_model, - refiner_model, - refiner_switch, - sampler_name, - scheduler_name, - seed_random, - image_seed, - generate_button, - load_parameter_button - ] + lora_ctrls, queue=False, show_progress=False) + load_data_outputs = [advanced_checkbox, image_number, prompt, negative_prompt, style_selections, + performance_selection, overwrite_step, overwrite_switch, aspect_ratios_selection, + overwrite_width, overwrite_height, guidance_scale, sharpness, adm_scaler_positive, + adm_scaler_negative, adm_scaler_end, refiner_swap_method, adaptive_cfg, base_model, + refiner_model, refiner_switch, sampler_name, scheduler_name, seed_random, image_seed, + generate_button, load_parameter_button] + freeu_ctrls + lora_ctrls + + load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False) + + def trigger_metadata_import(filepath, state_is_generating): + parameters, items, metadata_scheme = modules.meta_parser.read_info_from_image(filepath) + if parameters is None: + print('Could not find metadata in the image!') + parsed_parameters = {} + else: + metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme) + parsed_parameters = metadata_parser.parse_json(parameters) + + return modules.meta_parser.load_parameter_button_click(parsed_parameters, state_is_generating) + + + metadata_import_button.click(trigger_metadata_import, inputs=[metadata_input_image, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=True) \ + .then(style_sorter.sort_styles, inputs=style_selections, outputs=style_selections, queue=False, show_progress=False) generate_button.click(lambda: (gr.update(visible=True, interactive=True), gr.update(visible=True, interactive=True), gr.update(visible=False, interactive=False), [], True), outputs=[stop_button, skip_button, generate_button, gallery, state_is_generating]) \ From b6d23670d87277f4ae237c499237d4eb3b9d9903 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 26 Feb 2024 15:31:32 +0100 Subject: [PATCH 18/25] feat: add jpg and webp support, add exif data handling for metadata (#1863) * feature: added flag, config and ui update for image extension change #1789 * moved function to config module * moved image extension to webui via async worker. Passing as parameter to log and get_current_html_path functions per feedback * check flag before displaying image extension radio button * disabled if image log flag is passed in * fix: add missing image_extension parameter to log call * refactor: change label * feat: add webp to image_extensions supported image extemsions: see https://pillow.readthedocs.io/en/stable/handbook/image-file-formats.html * feat: use consistent file name in gradio returns and uses filepaths instead of numpy image by saving to temp dir uses double the temp dir file storage on disk as it saves to temp dir and gradio temp dir when displaying the image, but reuses logged output image * feat: delete temp images after yielding to gradio * feat: use args temp path if given * chore: code cleanup, remove redundant if statement * feat: always show image_extension element this is now possible due to image extension support in gradio via https://github.com/lllyasviel/Fooocus/pull/1932 * refactor: rename image_extension to image_file_extension * feat: use optimized jpg parameters when saving the image quality=95 optimize=True progressive=True * refactor: rename image_file_extension to output_format * feat: add exif handling * refactor: code cleanup, remove items from metadata output --------- Co-authored-by: Manuel Schmid Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Co-authored-by: Manuel Schmid Co-authored by: eddyizm --- modules/async_worker.py | 7 +++-- modules/config.py | 5 +++ modules/flags.py | 2 ++ modules/meta_parser.py | 66 ++++++++++++++++++++++++++++++++++++--- modules/private_logger.py | 32 ++++++++++++------- webui.py | 20 +++++++----- 6 files changed, 104 insertions(+), 28 deletions(-) diff --git a/modules/async_worker.py b/modules/async_worker.py index 677cf4691..2c029cfbe 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -1,5 +1,4 @@ import threading -import os from modules.patch import PatchSettings, patch_settings, patch_all patch_all() @@ -142,6 +141,7 @@ def handler(async_task): performance_selection = Performance(args.pop()) aspect_ratios_selection = args.pop() image_number = args.pop() + output_format = args.pop() image_seed = args.pop() sharpness = args.pop() guidance_scale = args.pop() @@ -414,6 +414,7 @@ def handler(async_task): progressbar(async_task, 3, 'Processing prompts ...') tasks = [] + for i in range(image_number): if disable_seed_increment: task_seed = seed @@ -553,7 +554,7 @@ def handler(async_task): if direct_return: d = [('Upscale (Fast)', '2x')] - uov_input_image_path = log(uov_input_image, d) + uov_input_image_path = log(uov_input_image, d, output_format) yield_result(async_task, uov_input_image_path, do_not_show_finished_images=True) return @@ -863,7 +864,7 @@ def callback(step, x0, x, total_steps, y): d.append((f'LoRA {li + 1}', f'lora_combined_{li + 1}', f'{n} : {w}')) d.append(('Version', 'version', 'Fooocus v' + fooocus_version.version)) - img_paths.append(log(x, d, metadata_parser)) + img_paths.append(log(x, d, metadata_parser, output_format)) yield_result(async_task, img_paths, do_not_show_finished_images=len(tasks) == 1 or disable_intermediate_results) except ldm_patched.modules.model_management.InterruptProcessingException as e: diff --git a/modules/config.py b/modules/config.py index a393e24cc..6800c0042 100644 --- a/modules/config.py +++ b/modules/config.py @@ -306,6 +306,11 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_ default_value=32, validator=lambda x: isinstance(x, int) and x >= 1 ) +default_output_format = get_config_item_or_set_default( + key='default_output_format', + default_value='png', + validator=lambda x: x in modules.flags.output_formats +) default_image_number = get_config_item_or_set_default( key='default_image_number', default_value=2, diff --git a/modules/flags.py b/modules/flags.py index 206f51218..6f12bc8f3 100644 --- a/modules/flags.py +++ b/modules/flags.py @@ -67,6 +67,8 @@ cn_ip: (0.5, 0.6), cn_ip_face: (0.9, 0.75), cn_canny: (0.5, 1.0), cn_cpds: (0.5, 1.0) } # stop, weight +output_formats = ['png', 'jpg', 'webp'] + inpaint_engine_versions = ['None', 'v1', 'v2.5', 'v2.6'] inpaint_option_default = 'Inpaint or Outpaint (default)' inpaint_option_detail = 'Improve Detail (face, hand, eyes, etc.)' diff --git a/modules/meta_parser.py b/modules/meta_parser.py index e9f1d0332..9b2dadb32 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -7,6 +7,7 @@ import gradio as gr from PIL import Image +import fooocus_version import modules.config import modules.sdxl_styles from modules.flags import MetadataScheme, Performance, Steps @@ -181,13 +182,43 @@ def get_lora(key: str, fallback: str | None, source_dict: dict, results: list): def get_sha256(filepath): global hash_cache - if filepath not in hash_cache: hash_cache[filepath] = calculate_sha256(filepath) return hash_cache[filepath] +def parse_meta_from_preset(preset_content): + assert isinstance(preset_content, dict) + preset_prepared = {} + items = preset_content + + for settings_key, meta_key in modules.config.possible_preset_keys.items(): + if settings_key == "default_loras": + loras = getattr(modules.config, settings_key) + if settings_key in items: + loras = items[settings_key] + for index, lora in enumerate(loras[:5]): + preset_prepared[f'lora_combined_{index + 1}'] = ' : '.join(map(str, lora)) + elif settings_key == "default_aspect_ratio": + if settings_key in items and items[settings_key] is not None: + default_aspect_ratio = items[settings_key] + width, height = default_aspect_ratio.split('*') + else: + default_aspect_ratio = getattr(modules.config, settings_key) + width, height = default_aspect_ratio.split('×') + height = height[:height.index(" ")] + preset_prepared[meta_key] = (width, height) + else: + preset_prepared[meta_key] = items[settings_key] if settings_key in items and items[ + settings_key] is not None else getattr(modules.config, settings_key) + + if settings_key == "default_styles" or settings_key == "default_aspect_ratio": + preset_prepared[meta_key] = str(preset_prepared[meta_key]) + + return preset_prepared + + class MetadataParser(ABC): def __init__(self): self.raw_prompt: str = '' @@ -213,7 +244,8 @@ def parse_json(self, metadata: dict | str) -> dict: def parse_string(self, metadata: dict) -> str: raise NotImplementedError - def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name, refiner_model_name, loras): + def set_data(self, raw_prompt, full_prompt, raw_negative_prompt, full_negative_prompt, steps, base_model_name, + refiner_model_name, loras): self.raw_prompt = raw_prompt self.full_prompt = full_prompt self.raw_negative_prompt = raw_negative_prompt @@ -492,16 +524,28 @@ def get_metadata_parser(metadata_scheme: MetadataScheme) -> MetadataParser: raise NotImplementedError -def read_info_from_image(filepath) -> tuple[str | None, dict, MetadataScheme | None]: +def read_info_from_image(filepath) -> tuple[str | None, MetadataScheme | None]: with Image.open(filepath) as image: items = (image.info or {}).copy() parameters = items.pop('parameters', None) + metadata_scheme = items.pop('fooocus_scheme', None) + exif = items.pop('exif', None) + if parameters is not None and is_json(parameters): parameters = json.loads(parameters) + elif exif is not None: + exif = image.getexif() + # 0x9286 = UserComment + parameters = exif.get(0x9286, None) + # 0x927C = MakerNote + metadata_scheme = exif.get(0x927C, None) + + if is_json(parameters): + parameters = json.loads(parameters) try: - metadata_scheme = MetadataScheme(items.pop('fooocus_scheme', None)) + metadata_scheme = MetadataScheme(metadata_scheme) except ValueError: metadata_scheme = None @@ -512,4 +556,16 @@ def read_info_from_image(filepath) -> tuple[str | None, dict, MetadataScheme | N if isinstance(parameters, str): metadata_scheme = MetadataScheme.A1111 - return parameters, items, metadata_scheme + return parameters, metadata_scheme + + +def get_exif(metadata: str | None, metadata_scheme: str): + exif = Image.Exif() + # tags see see https://github.com/python-pillow/Pillow/blob/9.2.x/src/PIL/ExifTags.py + # 0x9286 = UserComment + exif[0x9286] = metadata + # 0x0131 = Software + exif[0x0131] = 'Fooocus v' + fooocus_version.version + # 0x927C = MakerNote + exif[0x927C] = metadata_scheme + return exif \ No newline at end of file diff --git a/modules/private_logger.py b/modules/private_logger.py index 2213cbbab..8fa5f73c6 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -7,34 +7,42 @@ from PIL import Image from PIL.PngImagePlugin import PngInfo from modules.util import generate_temp_filename -from modules.meta_parser import MetadataParser -from tempfile import gettempdir +from modules.meta_parser import MetadataParser, get_exif log_cache = {} -def get_current_html_path(): +def get_current_html_path(output_format=None): + output_format = output_format if output_format else modules.config.default_output_format date_string, local_temp_filename, only_name = generate_temp_filename(folder=modules.config.path_outputs, - extension='png') + extension=output_format) html_name = os.path.join(os.path.dirname(local_temp_filename), 'log.html') return html_name -def log(img, metadata, metadata_parser: MetadataParser | None = None) -> str: +def log(img, metadata, metadata_parser: MetadataParser | None = None, output_format=None) -> str: path_outputs = args_manager.args.temp_path if args_manager.args.disable_image_log else modules.config.path_outputs - date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension='png') + output_format = output_format if output_format else modules.config.default_output_format + date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format) os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True) parsed_parameters = metadata_parser.parse_string(metadata) if metadata_parser is not None else '' image = Image.fromarray(img) - if parsed_parameters != '': - pnginfo = PngInfo() - pnginfo.add_text('parameters', parsed_parameters) - pnginfo.add_text('fooocus_scheme', metadata_parser.get_scheme().value) + if output_format == 'png': + if parsed_parameters != '': + pnginfo = PngInfo() + pnginfo.add_text('parameters', parsed_parameters) + pnginfo.add_text('fooocus_scheme', metadata_parser.get_scheme().value) + else: + pnginfo = None + image.save(local_temp_filename, pnginfo=pnginfo) + elif output_format == 'jpg': + image.save(local_temp_filename, quality=95, optimize=True, progressive=True, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif()) + elif output_format == 'webp': + image.save(local_temp_filename, quality=95, lossless=False, exif=get_exif(parsed_parameters, metadata_parser.get_scheme().value) if metadata_parser else Image.Exif()) else: - pnginfo = None - image.save(local_temp_filename, pnginfo=pnginfo) + image.save(local_temp_filename) if args_manager.args.disable_image_log: return local_temp_filename diff --git a/webui.py b/webui.py index 7020438e6..5e8853ede 100644 --- a/webui.py +++ b/webui.py @@ -224,15 +224,12 @@ def ip_advance_checked(x): metadata_import_button = gr.Button(value='Apply Metadata') def trigger_metadata_preview(filepath): - parameters, items, metadata_scheme = modules.meta_parser.read_info_from_image(filepath) + parameters, metadata_scheme = modules.meta_parser.read_info_from_image(filepath) results = {} if parameters is not None: results['parameters'] = parameters - if items: - results['items'] = items - if isinstance(metadata_scheme, flags.MetadataScheme): results['metadata_scheme'] = metadata_scheme.value @@ -263,6 +260,11 @@ def trigger_metadata_preview(filepath): value=modules.config.default_aspect_ratio, info='width × height', elem_classes='aspect_ratios') image_number = gr.Slider(label='Image Number', minimum=1, maximum=modules.config.default_max_image_number, step=1, value=modules.config.default_image_number) + + output_format = gr.Radio(label='Output Format', + choices=modules.flags.output_formats, + value=modules.config.default_output_format) + negative_prompt = gr.Textbox(label='Negative Prompt', show_label=True, placeholder="Type prompt here.", info='Describing what you do not want to see.', lines=2, elem_id='negative_prompt', @@ -292,7 +294,7 @@ def update_history_link(): if args_manager.args.disable_image_log: return gr.update(value='') - return gr.update(value=f'\U0001F4DA History Log') + return gr.update(value=f'\U0001F4DA History Log') history_link = gr.HTML() shared.gradio_root.load(update_history_link, outputs=history_link, queue=False, show_progress=False) @@ -532,7 +534,9 @@ def model_refresh_clicked(): adm_scaler_negative, refiner_switch, refiner_model, sampler_name, scheduler_name, adaptive_cfg, refiner_swap_method, negative_prompt, disable_intermediate_results ], queue=False, show_progress=False) - + + output_format.input(lambda x: gr.update(output_format=x), inputs=output_format) + advanced_checkbox.change(lambda x: gr.update(visible=x), advanced_checkbox, advanced_column, queue=False, show_progress=False) \ .then(fn=lambda: None, _js='refresh_grid_delayed', queue=False, show_progress=False) @@ -573,7 +577,7 @@ def inpaint_mode_change(mode): ctrls = [currentTask, generate_image_grid] ctrls += [ prompt, negative_prompt, style_selections, - performance_selection, aspect_ratios_selection, image_number, image_seed, sharpness, guidance_scale + performance_selection, aspect_ratios_selection, image_number, output_format, image_seed, sharpness, guidance_scale ] ctrls += [base_model, refiner_model, refiner_switch] + lora_ctrls @@ -622,7 +626,7 @@ def parse_meta(raw_prompt_txt, is_generating): load_parameter_button.click(modules.meta_parser.load_parameter_button_click, inputs=[prompt, state_is_generating], outputs=load_data_outputs, queue=False, show_progress=False) def trigger_metadata_import(filepath, state_is_generating): - parameters, items, metadata_scheme = modules.meta_parser.read_info_from_image(filepath) + parameters, metadata_scheme = modules.meta_parser.read_info_from_image(filepath) if parameters is None: print('Could not find metadata in the image!') parsed_parameters = {} From f4a6350300e03fb0aa7619d02a4704434dd5e89c Mon Sep 17 00:00:00 2001 From: whitehara <58582589+whitehara@users.noreply.github.com> Date: Mon, 26 Feb 2024 18:30:05 +0200 Subject: [PATCH 19/25] feat: add docker files (#1418) * Add docker files * Add python precompiled cache file in the image * Add Notes in docker.md * Create docker-publish.yml * Modify docker-compose.yml not to use the bind mount * Update torch version * Change --share to --listen * Update torch version * Change '--share' to '--listen` * adjust code comments * Update requirements-docker.txt * chore: code cleanup - default_model env var isn't necessary as model is included in default preset, same for speed - ENV CMDARGS --listen is now synched with docker-compose.yml file - remove * Change entry_with_update.py to launch.py in entrypoint.sh * Change CMD in Dockerfile * Change default CMDARGS to --listen in Dockerfile * Modify CMD in Dockerfile * Fix docker-compose.yml * Import files from models,outputs * docs: change wording in docker.md, change git clone URL, add quotes to port mapping * docs: remove docker publish github action, remove pre-built image from docs * Modify modules versions for linux/arm64 * docs: update docker readme --------- Co-authored-by: Manuel Schmid <9307310+mashb1t@users.noreply.github.com> Co-authored-by: Manuel Schmid Co-authored-by: Manuel Schmid --- .dockerignore | 1 + Dockerfile | 29 ++++++++++++++++++ docker-compose.yml | 38 ++++++++++++++++++++++++ docker.md | 66 +++++++++++++++++++++++++++++++++++++++++ entrypoint.sh | 33 +++++++++++++++++++++ modules/config.py | 25 +++++++++++++--- modules/util.py | 2 +- readme.md | 4 +++ requirements_docker.txt | 5 ++++ 9 files changed, 198 insertions(+), 5 deletions(-) create mode 100644 .dockerignore create mode 100644 Dockerfile create mode 100644 docker-compose.yml create mode 100644 docker.md create mode 100755 entrypoint.sh create mode 100644 requirements_docker.txt diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 000000000..485dee64b --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +.idea diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..2aea28106 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,29 @@ +FROM nvidia/cuda:12.3.1-base-ubuntu22.04 +ENV DEBIAN_FRONTEND noninteractive +ENV CMDARGS --listen + +RUN apt-get update -y && \ + apt-get install -y curl libgl1 libglib2.0-0 python3-pip python-is-python3 git && \ + apt-get clean && \ + rm -rf /var/lib/apt/lists/* + +COPY requirements_docker.txt requirements_versions.txt /tmp/ +RUN pip install --no-cache-dir -r /tmp/requirements_docker.txt -r /tmp/requirements_versions.txt && \ + rm -f /tmp/requirements_docker.txt /tmp/requirements_versions.txt +RUN pip install --no-cache-dir xformers==0.0.22 --no-dependencies +RUN curl -fsL -o /usr/local/lib/python3.10/dist-packages/gradio/frpc_linux_amd64_v0.2 https://cdn-media.huggingface.co/frpc-gradio-0.2/frpc_linux_amd64 && \ + chmod +x /usr/local/lib/python3.10/dist-packages/gradio/frpc_linux_amd64_v0.2 + +RUN adduser --disabled-password --gecos '' user && \ + mkdir -p /content/app /content/data + +COPY entrypoint.sh /content/ +RUN chown -R user:user /content + +WORKDIR /content +USER user + +RUN git clone https://github.com/lllyasviel/Fooocus /content/app +RUN mv /content/app/models /content/app/models.org + +CMD [ "sh", "-c", "/content/entrypoint.sh ${CMDARGS}" ] diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 000000000..dee7b3e7c --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,38 @@ +version: '3.9' + +volumes: + fooocus-data: + +services: + app: + build: . + image: fooocus + ports: + - "7865:7865" + environment: + - CMDARGS=--listen # Arguments for launch.py. + - DATADIR=/content/data # Directory which stores models, outputs dir + - config_path=/content/data/config.txt + - config_example_path=/content/data/config_modification_tutorial.txt + - path_checkpoints=/content/data/models/checkpoints/ + - path_loras=/content/data/models/loras/ + - path_embeddings=/content/data/models/embeddings/ + - path_vae_approx=/content/data/models/vae_approx/ + - path_upscale_models=/content/data/models/upscale_models/ + - path_inpaint=/content/data/models/inpaint/ + - path_controlnet=/content/data/models/controlnet/ + - path_clip_vision=/content/data/models/clip_vision/ + - path_fooocus_expansion=/content/data/models/prompt_expansion/fooocus_expansion/ + - path_outputs=/content/app/outputs/ # Warning: If it is not located under '/content/app', you can't see history log! + volumes: + - fooocus-data:/content/data + #- ./models:/import/models # Once you import files, you don't need to mount again. + #- ./outputs:/import/outputs # Once you import files, you don't need to mount again. + tty: true + deploy: + resources: + reservations: + devices: + - driver: nvidia + device_ids: ['0'] + capabilities: [compute, utility] diff --git a/docker.md b/docker.md new file mode 100644 index 000000000..36cfa632a --- /dev/null +++ b/docker.md @@ -0,0 +1,66 @@ +# Fooocus on Docker + +The docker image is based on NVIDIA CUDA 12.3 and PyTorch 2.0, see [Dockerfile](Dockerfile) and [requirements_docker.txt](requirements_docker.txt) for details. + +## Quick start + +**This is just an easy way for testing. Please find more information in the [notes](#notes).** + +1. Clone this repository +2. Build the image with `docker compose build` +3. Run the docker container with `docker compose up`. Building the image takes some time. + +When you see the message `Use the app with http://0.0.0.0:7865/` in the console, you can access the URL in your browser. + +Your models and outputs are stored in the `fooocus-data` volume, which, depending on OS, is stored in `/var/lib/docker/volumes`. + +## Details + +### Update the container manually + +When you are using `docker compose up` continuously, the container is not updated to the latest version of Fooocus automatically. +Run `git pull` before executing `docker compose build --no-cache` to build an image with the latest Fooocus version. +You can then start it with `docker compose up` + +### Import models, outputs +If you want to import files from models or the outputs folder, you can uncomment the following settings in the [docker-compose.yml](docker-compose.yml): +``` +#- ./models:/import/models # Once you import files, you don't need to mount again. +#- ./outputs:/import/outputs # Once you import files, you don't need to mount again. +``` +After running `docker compose up`, your files will be copied into `/content/data/models` and `/content/data/outputs` +Since `/content/data` is a persistent volume folder, your files will be persisted even when you re-run `docker compose up --build` without above volume settings. + + +### Paths inside the container + +|Path|Details| +|-|-| +|/content/app|The application stored folder| +|/content/app/models.org|Original 'models' folder.
Files are copied to the '/content/app/models' which is symlinked to '/content/data/models' every time the container boots. (Existing files will not be overwritten.) | +|/content/data|Persistent volume mount point| +|/content/data/models|The folder is symlinked to '/content/app/models'| +|/content/data/outputs|The folder is symlinked to '/content/app/outputs'| + +### Environments + +You can change `config.txt` parameters by using environment variables. +**The priority of using the environments is higher than the values defined in `config.txt`, and they will be saved to the `config_modification_tutorial.txt`** + +Docker specified environments are there. They are used by 'entrypoint.sh' +|Environment|Details| +|-|-| +|DATADIR|'/content/data' location.| +|CMDARGS|Arguments for [entry_with_update.py](entry_with_update.py) which is called by [entrypoint.sh](entrypoint.sh)| +|config_path|'config.txt' location| +|config_example_path|'config_modification_tutorial.txt' location| + +You can also use the same json key names and values explained in the 'config_modification_tutorial.txt' as the environments. +See examples in the [docker-compose.yml](docker-compose.yml) + +## Notes + +- Please keep 'path_outputs' under '/content/app'. Otherwise, you may get an error when you open the history log. +- Docker on Mac/Windows still has issues in the form of slow volume access when you use "bind mount" volumes. Please refer to [this article](https://docs.docker.com/storage/volumes/#use-a-volume-with-docker-compose) for not using "bind mount". +- The MPS backend (Metal Performance Shaders, Apple Silicon M1/M2/etc.) is not yet supported in Docker, see https://github.com/pytorch/pytorch/issues/81224 +- You can also use `docker compose up -d` to start the container detached and connect to the logs with `docker compose logs -f`. This way you can also close the terminal and keep the container running. \ No newline at end of file diff --git a/entrypoint.sh b/entrypoint.sh new file mode 100755 index 000000000..d0dba09c2 --- /dev/null +++ b/entrypoint.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +ORIGINALDIR=/content/app +# Use predefined DATADIR if it is defined +[[ x"${DATADIR}" == "x" ]] && DATADIR=/content/data + +# Make persistent dir from original dir +function mklink () { + mkdir -p $DATADIR/$1 + ln -s $DATADIR/$1 $ORIGINALDIR +} + +# Copy old files from import dir +function import () { + (test -d /import/$1 && cd /import/$1 && cp -Rpn . $DATADIR/$1/) +} + +cd $ORIGINALDIR + +# models +mklink models +# Copy original files +(cd $ORIGINALDIR/models.org && cp -Rpn . $ORIGINALDIR/models/) +# Import old files +import models + +# outputs +mklink outputs +# Import old files +import outputs + +# Start application +python launch.py $* diff --git a/modules/config.py b/modules/config.py index 6800c0042..328878ccb 100644 --- a/modules/config.py +++ b/modules/config.py @@ -10,8 +10,16 @@ from modules.util import get_files_from_folder, makedirs_with_log from modules.flags import Performance, MetadataScheme -config_path = os.path.abspath("./config.txt") -config_example_path = os.path.abspath("config_modification_tutorial.txt") +def get_config_path(key, default_value): + env = os.getenv(key) + if env is not None and isinstance(env, str): + print(f"Environment: {key} = {env}") + return env + else: + return os.path.abspath(default_value) + +config_path = get_config_path('config_path', "./config.txt") +config_example_path = get_config_path('config_example_path', "config_modification_tutorial.txt") config_dict = {} always_save_keys = [] visited_keys = [] @@ -123,7 +131,12 @@ def get_dir_or_set_default(key, default_value, as_array=False, make_directory=Fa if key not in always_save_keys: always_save_keys.append(key) - v = config_dict.get(key, None) + v = os.getenv(key) + if v is not None: + print(f"Environment: {key} = {v}") + config_dict[key] = v + else: + v = config_dict.get(key, None) if isinstance(v, str): if make_directory: @@ -165,13 +178,17 @@ def get_dir_or_set_default(key, default_value, as_array=False, make_directory=Fa path_fooocus_expansion = get_dir_or_set_default('path_fooocus_expansion', '../models/prompt_expansion/fooocus_expansion') path_outputs = get_path_output() - def get_config_item_or_set_default(key, default_value, validator, disable_empty_as_none=False): global config_dict, visited_keys if key not in visited_keys: visited_keys.append(key) + v = os.getenv(key) + if v is not None: + print(f"Environment: {key} = {v}") + config_dict[key] = v + if key not in config_dict: config_dict[key] = default_value return default_value diff --git a/modules/util.py b/modules/util.py index 29d48696b..c7923ec82 100644 --- a/modules/util.py +++ b/modules/util.py @@ -160,7 +160,7 @@ def generate_temp_filename(folder='./outputs/', extension='png'): random_number = random.randint(1000, 9999) filename = f"{time_string}_{random_number}.{extension}" result = os.path.join(folder, date_string, filename) - return date_string, os.path.abspath(os.path.realpath(result)), filename + return date_string, os.path.abspath(result), filename def get_files_from_folder(folder_path, exensions=None, name_filter=None): diff --git a/readme.md b/readme.md index 18b48f3ac..a1e62fa47 100644 --- a/readme.md +++ b/readme.md @@ -237,6 +237,10 @@ You can install Fooocus on Apple Mac silicon (M1 or M2) with macOS 'Catalina' or Use `python entry_with_update.py --preset anime` or `python entry_with_update.py --preset realistic` for Fooocus Anime/Realistic Edition. +### Docker + +See [docker.md](docker.md) + ### Download Previous Version See the guidelines [here](https://github.com/lllyasviel/Fooocus/discussions/1405). diff --git a/requirements_docker.txt b/requirements_docker.txt new file mode 100644 index 000000000..3cf4aa89d --- /dev/null +++ b/requirements_docker.txt @@ -0,0 +1,5 @@ +torch==2.0.1 +torchvision==0.15.2 +torchaudio==2.0.2 +torchtext==0.15.2 +torchdata==0.6.1 From 4e526e255ea52ea7420b2b85897bd36430915b57 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 26 Feb 2024 17:39:29 +0100 Subject: [PATCH 20/25] docs: add missing release notes for 2.1.865 --- update_log.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/update_log.md b/update_log.md index e052d24c0..79b523a4f 100644 --- a/update_log.md +++ b/update_log.md @@ -1,3 +1,8 @@ +# 2.1.865 + +* Various bugfixes +* Add authentication to --listen + # 2.1.864 * New model list. See also discussions. From 692beadbdcf36e4e9f04d21eafba3090448915ee Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 26 Feb 2024 17:41:29 +0100 Subject: [PATCH 21/25] docs: bump version number to 2.2.0-rc1 easier debugging and issue handling --- fooocus_version.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fooocus_version.py b/fooocus_version.py index 91c2ddda2..de51863ab 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.1.865' +version = '2.2.0-rc1' From 9c30961efda2c63726c0aede238b16f666b1dfaa Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 26 Feb 2024 21:12:27 +0100 Subject: [PATCH 22/25] fix: add missing return statement in model_refresh_clicked --- webui.py | 1 + 1 file changed, 1 insertion(+) diff --git a/webui.py b/webui.py index 5e8853ede..180c7d2ba 100644 --- a/webui.py +++ b/webui.py @@ -521,6 +521,7 @@ def model_refresh_clicked(): results += [gr.update(choices=['None'] + modules.config.model_filenames)] for i in range(modules.config.default_max_lora_number): results += [gr.update(interactive=True), gr.update(choices=['None'] + modules.config.lora_filenames), gr.update()] + return results model_refresh.click(model_refresh_clicked, [], [base_model, refiner_model] + lora_ctrls, queue=False, show_progress=False) From 4f4d23f4e3e6896daeb16025b483f9de50f8cdc6 Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Mon, 26 Feb 2024 21:14:11 +0100 Subject: [PATCH 23/25] fix: use filename instead of download function call for lcm lora do not require lcm lora to be downloaded for metadata parsing --- modules/config.py | 5 +++-- modules/meta_parser.py | 6 ++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/modules/config.py b/modules/config.py index 328878ccb..09c8fd7c5 100644 --- a/modules/config.py +++ b/modules/config.py @@ -474,6 +474,7 @@ def add_ratio(x): model_filenames = [] lora_filenames = [] +sdxl_lcm_lora = 'sdxl_lcm_lora.safetensors' def get_model_filenames(folder_paths, name_filter=None): @@ -533,9 +534,9 @@ def downloading_sdxl_lcm_lora(): load_file_from_url( url='https://huggingface.co/lllyasviel/misc/resolve/main/sdxl_lcm_lora.safetensors', model_dir=paths_loras[0], - file_name='sdxl_lcm_lora.safetensors' + file_name=sdxl_lcm_lora ) - return 'sdxl_lcm_lora.safetensors' + return sdxl_lcm_lora def downloading_controlnet_canny(): diff --git a/modules/meta_parser.py b/modules/meta_parser.py index 9b2dadb32..da8c70b21 100644 --- a/modules/meta_parser.py +++ b/modules/meta_parser.py @@ -379,7 +379,8 @@ def parse_json(self, metadata: str) -> dict: if 'lora_hashes' in data: lora_filenames = modules.config.lora_filenames.copy() - lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) + if modules.config.sdxl_lcm_lora in lora_filenames: + lora_filenames.remove(modules.config.sdxl_lcm_lora) for li, lora in enumerate(data['lora_hashes'].split(', ')): lora_name, lora_hash, lora_weight = lora.split(': ') for filename in lora_filenames: @@ -460,7 +461,8 @@ def get_scheme(self) -> MetadataScheme: def parse_json(self, metadata: dict) -> dict: model_filenames = modules.config.model_filenames.copy() lora_filenames = modules.config.lora_filenames.copy() - lora_filenames.remove(modules.config.downloading_sdxl_lcm_lora()) + if modules.config.sdxl_lcm_lora in lora_filenames: + lora_filenames.remove(modules.config.sdxl_lcm_lora) for key, value in metadata.items(): if value in ['', 'None']: From 41e88a4e8d9acd20dd088e2a13e09d3ae2fd0500 Mon Sep 17 00:00:00 2001 From: Gianluca Teti <51110452+gteti@users.noreply.github.com> Date: Thu, 29 Feb 2024 16:10:34 +0100 Subject: [PATCH 24/25] docs: fix typo in readme (#2368) --- readme.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/readme.md b/readme.md index a1e62fa47..0bfee5b4c 100644 --- a/readme.md +++ b/readme.md @@ -297,7 +297,7 @@ In both ways the access is unauthenticated by default. You can add basic authent The below things are already inside the software, and **users do not need to do anything about these**. -1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processsing and "raw" mode, or the LeonardoAI's Prompt Magic). +1. GPT2-based [prompt expansion as a dynamic style "Fooocus V2".](https://github.com/lllyasviel/Fooocus/discussions/117#raw) (similar to Midjourney's hidden pre-processing and "raw" mode, or the LeonardoAI's Prompt Magic). 2. Native refiner swap inside one single k-sampler. The advantage is that the refiner model can now reuse the base model's momentum (or ODE's history parameters) collected from k-sampling to achieve more coherent sampling. In Automatic1111's high-res fix and ComfyUI's node system, the base model and refiner use two independent k-samplers, which means the momentum is largely wasted, and the sampling continuity is broken. Fooocus uses its own advanced k-diffusion sampling that ensures seamless, native, and continuous swap in a refiner setup. (Update Aug 13: Actually, I discussed this with Automatic1111 several days ago, and it seems that the “native refiner swap inside one single k-sampler” is [merged]( https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371) into the dev branch of webui. Great!) 3. Negative ADM guidance. Because the highest resolution level of XL Base does not have cross attentions, the positive and negative signals for XL's highest resolution level cannot receive enough contrasts during the CFG sampling, causing the results to look a bit plastic or overly smooth in certain cases. Fortunately, since the XL's highest resolution level is still conditioned on image aspect ratios (ADM), we can modify the adm on the positive/negative side to compensate for the lack of CFG contrast in the highest resolution level. (Update Aug 16, the IOS App [Draw Things](https://apps.apple.com/us/app/draw-things-ai-generation/id6444050820) will support Negative ADM Guidance. Great!) 4. We implemented a carefully tuned variation of Section 5.1 of ["Improving Sample Quality of Diffusion Models Using Self-Attention Guidance"](https://arxiv.org/pdf/2210.00939.pdf). The weight is set to very low, but this is Fooocus's final guarantee to make sure that the XL will never yield an overly smooth or plastic appearance (examples [here](https://github.com/lllyasviel/Fooocus/discussions/117#sharpness)). This can almost eliminate all cases for which XL still occasionally produces overly smooth results, even with negative ADM guidance. (Update 2023 Aug 18, the Gaussian kernel of SAG is changed to an anisotropic kernel for better structure preservation and fewer artifacts.) From 6db14acf8e2c383d6e33f689646ebf9599f83e9a Mon Sep 17 00:00:00 2001 From: Manuel Schmid Date: Sat, 2 Mar 2024 16:25:31 +0100 Subject: [PATCH 25/25] docs: update version and changelog --- fooocus_version.py | 2 +- update_log.md | 10 +++++++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/fooocus_version.py b/fooocus_version.py index de51863ab..d4b750f9f 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.2.0-rc1' +version = '2.2.0' diff --git a/update_log.md b/update_log.md index 79b523a4f..b0192d0d8 100644 --- a/update_log.md +++ b/update_log.md @@ -1,4 +1,12 @@ -# 2.1.865 +# [2.2.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.0) + +* Isolate every image generation to truly allow multi-user usage +* Add array support, changes the main prompt when increasing the image number. Syntax: `[[red, green, blue]] flower` +* Add optional metadata to images, allowing you to regenerate and modify them later with the same parameters +* Now supports native PNG, JPG and WEBP image generation +* Add Docker support + +# [2.1.865](https://github.com/lllyasviel/Fooocus/releases/tag/2.1.865) * Various bugfixes * Add authentication to --listen