Skip to content

Commit

Permalink
update pipelines and xyzgrid
Browse files Browse the repository at this point in the history
  • Loading branch information
vladmandic committed Nov 9, 2023
1 parent fd3f971 commit 6564e99
Show file tree
Hide file tree
Showing 4 changed files with 41 additions and 9 deletions.
18 changes: 13 additions & 5 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,16 +2,24 @@

## Update for 2023-11-08

- **Diffusers**
- **LCM** support for any *SD 1.5* or *SD-XL* model!
- download [lcm-lora-sd15](https://huggingface.co/latent-consistency/lcm-lora-sdv1-5/tree/main) and/or [lcm-lora-sdxl](https://huggingface.co/latent-consistency/lcm-lora-sdxl/tree/main)
- load for favorite *SD 1.5* or *SD-XL* model
- load **lcm lora**
- set **sampler** to **LCM**
- set number of steps to some low number, for SD-XL 6-7 steps is normally sufficient
note: LCM scheduler does not support steps higher than 50
- Add additional pipeline types for manual model loads when loading from `safetensors`
- Updated logic for calculating **steps** when using base/hires/refiner workflows
- Safe model offloading for non-standard models
- Fix **DPM SDE** scheduler
- **Extra networks**
- Use multi-threading for 5x load speedup
- **General**:
- Reworked parser when pasting previously generated images/prompts
includes all `txt2img`, `img2img` and `override` params
- **Diffusers**
- Add additional pipeline types for manual model loads when loading from `safetensors`
- Updated logic for calculating steps when using base/hires/refiner workflows
- Safe model offloading for non-standard models
- Fix DPM SDE scheduler
- Add refiner options to XYZ Grid
- **Fixes**
- Fix inpaint
- Fix manual grid image save
Expand Down
8 changes: 8 additions & 0 deletions modules/sd_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -646,6 +646,14 @@ def detect_pipeline(f: str, op: str = 'model'):
guess = 'Stable Diffusion XL Instruct'
else:
guess = 'Stable Diffusion'
if 'LCM_' in f or 'LCM-' in f:
if shared.backend == shared.Backend.ORIGINAL:
shared.log.warning(f'Model detected as LCM model, but attempting to load using backend=original: {op}={f} size={size} MB')
guess = 'Latent Consistency Model'
if 'PixArt' in f:
if shared.backend == shared.Backend.ORIGINAL:
shared.log.warning(f'Model detected as PixArt Alpha model, but attempting to load using backend=original: {op}={f} size={size} MB')
guess = 'PixArt Alpha'
pipeline = shared_items.get_pipelines().get(guess, None)
shared.log.info(f'Autodetect: {op}="{guess}" class={pipeline.__name__} file="{f}" size={size}MB')
except Exception as e:
Expand Down
6 changes: 5 additions & 1 deletion modules/sd_samplers_diffusers.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@
sd_samplers_common.SamplerData('Euler', lambda model: DiffusionSampler('Euler', EulerDiscreteScheduler, model), [], {}),
sd_samplers_common.SamplerData('Euler a', lambda model: DiffusionSampler('Euler a', EulerAncestralDiscreteScheduler, model), [], {}),
sd_samplers_common.SamplerData('Heun', lambda model: DiffusionSampler('Heun', HeunDiscreteScheduler, model), [], {}),
sd_samplers_common.SamplerData('LCM', lambda model: DiffusionSampler('Heun', LCMScheduler, model), [], {}),
sd_samplers_common.SamplerData('LCM', lambda model: DiffusionSampler('LCM', LCMScheduler, model), [], {}),
]

class DiffusionSampler:
Expand All @@ -73,18 +73,22 @@ def __init__(self, name, constructor, model, **kwargs):
return
for key, value in config.get('All', {}).items(): # apply global defaults
self.config[key] = value
shared.log.debug(f'Sampler: name={name} type=all config={self.config}')
for key, value in config.get(name, {}).items(): # apply diffusers per-scheduler defaults
self.config[key] = value
shared.log.debug(f'Sampler: name={name} type=scheduler config={self.config}')
if hasattr(model.scheduler, 'scheduler_config'): # find model defaults
orig_config = model.scheduler.scheduler_config
else:
orig_config = model.scheduler.config
for key, value in orig_config.items(): # apply model defaults
if key in self.config:
self.config[key] = value
shared.log.debug(f'Sampler: name={name} type=model config={self.config}')
for key, value in kwargs.items(): # apply user args, if any
if key in self.config:
self.config[key] = value
shared.log.debug(f'Sampler: name={name} type=user config={self.config}')
# finally apply user preferences
if shared.opts.schedulers_prediction_type != 'default':
self.config['prediction_type'] = shared.opts.schedulers_prediction_type
Expand Down
18 changes: 15 additions & 3 deletions scripts/xyz_grid.py
Original file line number Diff line number Diff line change
Expand Up @@ -86,6 +86,17 @@ def apply_checkpoint(p, x, xs):
p.override_settings['sd_model_checkpoint'] = info.name


def apply_refiner(p, x, xs):
if x == shared.opts.sd_model_refiner:
return
info = sd_models.get_closet_checkpoint_match(x)
if info is None:
shared.log.warning(f"XYZ grid: apply refiner unknown checkpoint: {x}")
else:
sd_models.reload_model_weights(shared.sd_refiner, info)
p.override_settings['sd_model_refiner'] = info.name


def apply_dict(p, x, xs):
if x == shared.opts.sd_model_dict:
return
Expand Down Expand Up @@ -240,11 +251,12 @@ def __init__(self, *args, **kwargs):
AxisOption("[Second pass] upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
AxisOption("[Second pass] sampler", str, apply_latent_sampler, fmt=format_value, confirm=confirm_samplers, choices=lambda: [x.name for x in sd_samplers.samplers]),
AxisOption("[Second pass] denoising Strength", float, apply_field("denoising_strength")),
AxisOption("[Second pass] steps", int, apply_field("hr_second_pass_steps")),
AxisOption("[Second pass] hires steps", int, apply_field("hr_second_pass_steps")),
AxisOption("[Second pass] CFG scale", float, apply_field("image_cfg_scale")),
AxisOption("[Second pass] guidance rescale", float, apply_field("diffusers_guidance_rescale")),
AxisOption("[Second pass] refiner start", float, apply_field("refiner_start")),
AxisOption("[Second pass] refiner start", float, apply_field("refiner_start")),
AxisOption("[Refiner] model", str, apply_refiner, fmt=format_value, cost=1.0, choices=lambda: sorted(sd_models.checkpoints_list)),
AxisOption("[Refiner] refiner start", float, apply_field("refiner_start")),
AxisOption("[Refiner] refiner steps", float, apply_field("refiner_steps")),
AxisOption("[TOME] Token merging ratio (txt2img)", float, apply_override('token_merging_ratio')),
AxisOption("[TOME] Token merging ratio (hires)", float, apply_override('token_merging_ratio_hr')),
AxisOption("[FreeU] 1st stage backbone factor", float, apply_setting('freeu_b1')),
Expand Down

0 comments on commit 6564e99

Please sign in to comment.