Skip to content

Commit b6af0a3

Browse files
committed
Merge branch 'release_candidate'
2 parents 20ae71f + 8c3e64f commit b6af0a3

11 files changed

Lines changed: 36 additions & 16 deletions

‎CHANGELOG.md‎

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,17 @@
1+
## 1.3.1
2+
3+
### Features:
4+
* revert default cross attention optimization to Doggettx
5+
6+
### Bug Fixes:
7+
* fix bug: LoRA don't apply on dropdown list sd_lora
8+
* fix png info always added even if setting is not enabled
9+
* fix some fields not applying in xyz plot
10+
* fix "hires. fix" prompt sharing same labels with txt2img_prompt
11+
* fix lora hashes not being added properly to infotex if there is only one lora
12+
* fix --use-cpu failing to work properly at startup
13+
* make --disable-opt-split-attention command line option work again
14+
115
## 1.3.0
216

317
### Features:

‎modules/cmd_args.py‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@
6262
parser.add_argument("--opt-split-attention-v1", action='store_true', help="prefer older version of split attention optimization for automatic choice of optimization")
6363
parser.add_argument("--opt-sdp-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization for automatic choice of optimization; requires PyTorch 2.*")
6464
parser.add_argument("--opt-sdp-no-mem-attention", action='store_true', help="prefer scaled dot product cross-attention layer optimization without memory efficient attention for automatic choice of optimization, makes image generation deterministic; requires PyTorch 2.*")
65-
parser.add_argument("--disable-opt-split-attention", action='store_true', help="does not do anything")
65+
parser.add_argument("--disable-opt-split-attention", action='store_true', help="prefer no cross-attention layer optimization for automatic choice of optimization")
6666
parser.add_argument("--disable-nan-check", action='store_true', help="do not check if produced images/latent spaces have nans; useful for running without a checkpoint in CI")
6767
parser.add_argument("--use-cpu", nargs='+', help="use CPU as torch device for specified modules", default=[], type=str.lower)
6868
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")

‎modules/extra_networks.py‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ def __init__(self, items=None):
2626
self.named = {}
2727

2828
for item in self.items:
29-
parts = item.split('=', 2)
29+
parts = item.split('=', 2) if isinstance(item, str) else [item]
3030
if len(parts) == 2:
3131
self.named[parts[0]] = parts[1]
3232
else:

‎modules/generation_parameters_copypaste.py‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -35,7 +35,7 @@ def reset():
3535

3636

3737
def quote(text):
38-
if ',' not in str(text) and '\n' not in str(text):
38+
if ',' not in str(text) and '\n' not in str(text) and ':' not in str(text):
3939
return text
4040

4141
return json.dumps(text, ensure_ascii=False)

‎modules/images.py‎

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -493,9 +493,12 @@ def save_image_with_geninfo(image, geninfo, filename, extension=None, existing_p
493493
existing_pnginfo['parameters'] = geninfo
494494

495495
if extension.lower() == '.png':
496-
pnginfo_data = PngImagePlugin.PngInfo()
497-
for k, v in (existing_pnginfo or {}).items():
498-
pnginfo_data.add_text(k, str(v))
496+
if opts.enable_pnginfo:
497+
pnginfo_data = PngImagePlugin.PngInfo()
498+
for k, v in (existing_pnginfo or {}).items():
499+
pnginfo_data.add_text(k, str(v))
500+
else:
501+
pnginfo_data = None
499502

500503
image.save(filename, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
501504

‎modules/processing.py‎

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -321,14 +321,13 @@ def get_conds_with_caching(self, function, required_prompts, steps, cache):
321321
have been used before. The second element is where the previously
322322
computed result is stored.
323323
"""
324-
325-
if cache[0] is not None and (required_prompts, steps) == cache[0]:
324+
if cache[0] is not None and (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info) == cache[0]:
326325
return cache[1]
327326

328327
with devices.autocast():
329328
cache[1] = function(shared.sd_model, required_prompts, steps)
330329

331-
cache[0] = (required_prompts, steps)
330+
cache[0] = (required_prompts, steps, opts.CLIP_stop_at_last_layers, shared.sd_model.sd_checkpoint_info)
332331
return cache[1]
333332

334333
def setup_conds(self):

‎modules/sd_hijack.py‎

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -68,6 +68,8 @@ def apply_optimizations():
6868

6969
if selection == "None":
7070
matching_optimizer = None
71+
elif selection == "Automatic" and shared.cmd_opts.disable_opt_split_attention:
72+
matching_optimizer = None
7173
elif matching_optimizer is None:
7274
matching_optimizer = optimizers[0]
7375

‎modules/sd_hijack_optimizations.py‎

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ class SdOptimizationSdpNoMem(SdOptimization):
5959
name = "sdp-no-mem"
6060
label = "scaled dot product without memory efficient attention"
6161
cmd_opt = "opt_sdp_no_mem_attention"
62-
priority = 90
62+
priority = 80
6363

6464
def is_available(self):
6565
return hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention)
@@ -73,7 +73,7 @@ class SdOptimizationSdp(SdOptimizationSdpNoMem):
7373
name = "sdp"
7474
label = "scaled dot product"
7575
cmd_opt = "opt_sdp_attention"
76-
priority = 80
76+
priority = 70
7777

7878
def apply(self):
7979
ldm.modules.attention.CrossAttention.forward = scaled_dot_product_attention_forward
@@ -116,7 +116,7 @@ def apply(self):
116116
class SdOptimizationDoggettx(SdOptimization):
117117
name = "Doggettx"
118118
cmd_opt = "opt_split_attention"
119-
priority = 20
119+
priority = 90
120120

121121
def apply(self):
122122
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward

‎modules/sd_models.py‎

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -313,8 +313,6 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
313313

314314
timer.record("apply half()")
315315

316-
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
317-
devices.dtype_vae = torch.float32 if shared.cmd_opts.no_half or shared.cmd_opts.no_half_vae else torch.float16
318316
devices.dtype_unet = model.model.diffusion_model.dtype
319317
devices.unet_needs_upcast = shared.cmd_opts.upcast_sampling and devices.dtype == torch.float16 and devices.dtype_unet == torch.float16
320318

‎modules/shared.py‎

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,7 @@
66
import time
77

88
import gradio as gr
9+
import torch
910
import tqdm
1011

1112
import modules.interrogate
@@ -76,6 +77,9 @@
7677
devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \
7778
(devices.cpu if any(y in cmd_opts.use_cpu for y in [x, 'all']) else devices.get_optimal_device() for x in ['sd', 'interrogate', 'gfpgan', 'esrgan', 'codeformer'])
7879

80+
devices.dtype = torch.float32 if cmd_opts.no_half else torch.float16
81+
devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16
82+
7983
device = devices.device
8084
weight_load_location = None if cmd_opts.lowram else "cpu"
8185

0 commit comments

Comments
 (0)