Skip to content

Commit 68f336b

Browse files
committed
Merge branch 'release_candidate'
2 parents a3ddf46 + 50973ec commit 68f336b

16 files changed

Lines changed: 140 additions & 164 deletions

‎CHANGELOG.md‎

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,28 @@
1+
## 1.5.1
2+
3+
### Minor:
4+
* support parsing text encoder blocks in some new LoRAs
5+
* delete scale checker script due to user demand
6+
7+
### Extensions and API:
8+
* add postprocess_batch_list script callback
9+
10+
### Bug Fixes:
11+
* fix TI training for SD1
12+
* fix reload altclip model error
13+
* prepend the pythonpath instead of overriding it
14+
* fix typo in SD_WEBUI_RESTARTING
15+
* if txt2img/img2img raises an exception, finally call state.end()
16+
* fix composable diffusion weight parsing
17+
* restyle Startup profile for black users
18+
* fix webui not launching with --nowebui
19+
* catch exception for non git extensions
20+
* fix some options missing from /sdapi/v1/options
21+
* fix for extension update status always saying "unknown"
22+
* fix display of extra network cards that have `<>` in the name
23+
* update lora extension to work with python 3.8
24+
25+
126
## 1.5.0
227

328
### Features:

‎extensions-builtin/Lora/network.py‎

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
from __future__ import annotations
12
import os
23
from collections import namedtuple
34
import enum

‎extensions-builtin/Lora/networks.py‎

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,11 @@ def load_network(name, network_on_disk):
163163
key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model")
164164
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
165165

166+
# some SD1 Loras also have correct compvis keys
167+
if sd_module is None:
168+
key = key_network_without_network_parts.replace("lora_te1_text_model", "transformer_text_model")
169+
sd_module = shared.sd_model.network_layer_mapping.get(key, None)
170+
166171
if sd_module is None:
167172
keys_failed_to_match[key_network] = key
168173
continue

‎javascript/badScaleChecker.js‎

Lines changed: 0 additions & 108 deletions
This file was deleted.

‎modules/api/api.py‎

Lines changed: 22 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -333,14 +333,16 @@ def text2imgapi(self, txt2imgreq: models.StableDiffusionTxt2ImgProcessingAPI):
333333
p.outpath_grids = opts.outdir_txt2img_grids
334334
p.outpath_samples = opts.outdir_txt2img_samples
335335

336-
shared.state.begin(job="scripts_txt2img")
337-
if selectable_scripts is not None:
338-
p.script_args = script_args
339-
processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
340-
else:
341-
p.script_args = tuple(script_args) # Need to pass args as tuple here
342-
processed = process_images(p)
343-
shared.state.end()
336+
try:
337+
shared.state.begin(job="scripts_txt2img")
338+
if selectable_scripts is not None:
339+
p.script_args = script_args
340+
processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
341+
else:
342+
p.script_args = tuple(script_args) # Need to pass args as tuple here
343+
processed = process_images(p)
344+
finally:
345+
shared.state.end()
344346

345347
b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
346348

@@ -390,14 +392,16 @@ def img2imgapi(self, img2imgreq: models.StableDiffusionImg2ImgProcessingAPI):
390392
p.outpath_grids = opts.outdir_img2img_grids
391393
p.outpath_samples = opts.outdir_img2img_samples
392394

393-
shared.state.begin(job="scripts_img2img")
394-
if selectable_scripts is not None:
395-
p.script_args = script_args
396-
processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
397-
else:
398-
p.script_args = tuple(script_args) # Need to pass args as tuple here
399-
processed = process_images(p)
400-
shared.state.end()
395+
try:
396+
shared.state.begin(job="scripts_img2img")
397+
if selectable_scripts is not None:
398+
p.script_args = script_args
399+
processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
400+
else:
401+
p.script_args = tuple(script_args) # Need to pass args as tuple here
402+
processed = process_images(p)
403+
finally:
404+
shared.state.end()
401405

402406
b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []
403407

@@ -720,9 +724,9 @@ def get_memory(self):
720724
cuda = {'error': f'{err}'}
721725
return models.MemoryResponse(ram=ram, cuda=cuda)
722726

723-
def launch(self, server_name, port):
727+
def launch(self, server_name, port, root_path):
724728
self.app.include_router(self.router)
725-
uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=shared.cmd_opts.timeout_keep_alive)
729+
uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=shared.cmd_opts.timeout_keep_alive, root_path=root_path)
726730

727731
def kill_webui(self):
728732
restart.stop_program()

‎modules/api/models.py‎

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -208,11 +208,9 @@ class PreprocessResponse(BaseModel):
208208
fields = {}
209209
for key, metadata in opts.data_labels.items():
210210
value = opts.data.get(key)
211-
optType = opts.typemap.get(type(metadata.default), type(metadata.default))
211+
optType = opts.typemap.get(type(metadata.default), type(metadata.default)) if metadata.default else Any
212212

213-
if metadata.default is None:
214-
pass
215-
elif metadata is not None:
213+
if metadata is not None:
216214
fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
217215
else:
218216
fields.update({key: (Optional[optType], Field())})

‎modules/extensions.py‎

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -56,10 +56,12 @@ def read_from_repo():
5656
self.do_read_info_from_repo()
5757

5858
return self.to_dict()
59-
60-
d = cache.cached_data_for_file('extensions-git', self.name, os.path.join(self.path, ".git"), read_from_repo)
61-
self.from_dict(d)
62-
self.status = 'unknown'
59+
try:
60+
d = cache.cached_data_for_file('extensions-git', self.name, os.path.join(self.path, ".git"), read_from_repo)
61+
self.from_dict(d)
62+
except FileNotFoundError:
63+
pass
64+
self.status = 'unknown' if self.status == '' else self.status
6365

6466
def do_read_info_from_repo(self):
6567
repo = None

‎modules/launch_utils.py‎

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -196,7 +196,7 @@ def run_extension_installer(extension_dir):
196196

197197
try:
198198
env = os.environ.copy()
199-
env['PYTHONPATH'] = os.path.abspath(".")
199+
env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env.get('PYTHONPATH', '')}"
200200

201201
print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env))
202202
except Exception as e:
@@ -233,7 +233,7 @@ def run_extensions_installers(settings_file):
233233
re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*")
234234

235235

236-
def requrements_met(requirements_file):
236+
def requirements_met(requirements_file):
237237
"""
238238
Does a simple parse of a requirements.txt file to determine if all rerqirements in it
239239
are already installed. Returns True if so, False if not installed or parsing fails.
@@ -293,7 +293,7 @@ def prepare_environment():
293293
try:
294294
# the existance of this file is a signal to webui.sh/bat that webui needs to be restarted when it stops execution
295295
os.remove(os.path.join(script_path, "tmp", "restart"))
296-
os.environ.setdefault('SD_WEBUI_RESTARTING ', '1')
296+
os.environ.setdefault('SD_WEBUI_RESTARTING', '1')
297297
except OSError:
298298
pass
299299

@@ -354,7 +354,7 @@ def prepare_environment():
354354
if not os.path.isfile(requirements_file):
355355
requirements_file = os.path.join(script_path, requirements_file)
356356

357-
if not requrements_met(requirements_file):
357+
if not requirements_met(requirements_file):
358358
run_pip(f"install -r \"{requirements_file}\"", "requirements")
359359

360360
run_extensions_installers(settings_file=args.ui_settings_file)

‎modules/processing.py‎

Lines changed: 26 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -600,8 +600,12 @@ def program_version():
600600
return res
601601

602602

603-
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False):
604-
index = position_in_batch + iteration * p.batch_size
603+
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None):
604+
if index is None:
605+
index = position_in_batch + iteration * p.batch_size
606+
607+
if all_negative_prompts is None:
608+
all_negative_prompts = p.all_negative_prompts
605609

606610
clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
607611
enable_hr = getattr(p, 'enable_hr', False)
@@ -617,12 +621,12 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
617621
"Sampler": p.sampler_name,
618622
"CFG scale": p.cfg_scale,
619623
"Image CFG scale": getattr(p, 'image_cfg_scale', None),
620-
"Seed": all_seeds[index],
624+
"Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index],
621625
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
622626
"Size": f"{p.width}x{p.height}",
623627
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
624628
"Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra),
625-
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
629+
"Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])),
626630
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
627631
"Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
628632
"Denoising strength": getattr(p, 'denoising_strength', None),
@@ -642,7 +646,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
642646
generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])
643647

644648
prompt_text = p.prompt if use_main_prompt else all_prompts[index]
645-
negative_prompt_text = f"\nNegative prompt: {p.all_negative_prompts[index]}" if p.all_negative_prompts[index] else ""
649+
negative_prompt_text = f"\nNegative prompt: {all_negative_prompts[index]}" if all_negative_prompts[index] else ""
646650

647651
return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip()
648652

@@ -716,9 +720,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
716720
else:
717721
p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]
718722

719-
def infotext(iteration=0, position_in_batch=0, use_main_prompt=False):
720-
return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch, use_main_prompt)
721-
722723
if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
723724
model_hijack.embedding_db.load_textual_inversion_embeddings()
724725

@@ -806,6 +807,16 @@ def infotext(iteration=0, position_in_batch=0, use_main_prompt=False):
806807
if p.scripts is not None:
807808
p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n)
808809

810+
p.prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
811+
p.negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]
812+
813+
batch_params = scripts.PostprocessBatchListArgs(list(x_samples_ddim))
814+
p.scripts.postprocess_batch_list(p, batch_params, batch_number=n)
815+
x_samples_ddim = batch_params.images
816+
817+
def infotext(index=0, use_main_prompt=False):
818+
return create_infotext(p, p.prompts, p.seeds, p.subseeds, use_main_prompt=use_main_prompt, index=index, all_negative_prompts=p.negative_prompts)
819+
809820
for i, x_sample in enumerate(x_samples_ddim):
810821
p.batch_index = i
811822

@@ -814,7 +825,7 @@ def infotext(iteration=0, position_in_batch=0, use_main_prompt=False):
814825

815826
if p.restore_faces:
816827
if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
817-
images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration")
828+
images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-face-restoration")
818829

819830
devices.torch_gc()
820831

@@ -831,15 +842,15 @@ def infotext(iteration=0, position_in_batch=0, use_main_prompt=False):
831842
if p.color_corrections is not None and i < len(p.color_corrections):
832843
if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
833844
image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
834-
images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction")
845+
images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-color-correction")
835846
image = apply_color_correction(p.color_corrections[i], image)
836847

837848
image = apply_overlay(image, p.paste_to, i, p.overlay_images)
838849

839850
if opts.samples_save and not p.do_not_save_samples:
840-
images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p)
851+
images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p)
841852

842-
text = infotext(n, i)
853+
text = infotext(i)
843854
infotexts.append(text)
844855
if opts.enable_pnginfo:
845856
image.info["parameters"] = text
@@ -850,10 +861,10 @@ def infotext(iteration=0, position_in_batch=0, use_main_prompt=False):
850861
image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')
851862

852863
if opts.save_mask:
853-
images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask")
864+
images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask")
854865

855866
if opts.save_mask_composite:
856-
images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask-composite")
867+
images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite")
857868

858869
if opts.return_mask:
859870
output_images.append(image_mask)
@@ -894,7 +905,7 @@ def infotext(iteration=0, position_in_batch=0, use_main_prompt=False):
894905
p,
895906
images_list=output_images,
896907
seed=p.all_seeds[0],
897-
info=infotext(),
908+
info=infotexts[0],
898909
comments="".join(f"{comment}\n" for comment in comments),
899910
subseed=p.all_subseeds[0],
900911
index_of_first_image=index_of_first_image,

0 commit comments

Comments
 (0)