Skip to content

Commit 96d6ca4

Browse files
committed
manual fixes for ruff
1 parent 762265e commit 96d6ca4

22 files changed

Lines changed: 129 additions & 129 deletions

‎extensions-builtin/LDSR/ldsr_model_arch.py‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,7 @@ def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize
243243
x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True)
244244
log["sample_noquant"] = x_sample_noquant
245245
log["sample_diff"] = torch.abs(x_sample_noquant - x_sample)
246-
except:
246+
except Exception:
247247
pass
248248

249249
log["sample"] = x_sample

‎extensions-builtin/LDSR/scripts/ldsr_model.py‎

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,8 @@
77
from modules.upscaler import Upscaler, UpscalerData
88
from ldsr_model_arch import LDSR
99
from modules import shared, script_callbacks
10-
import sd_hijack_autoencoder, sd_hijack_ddpm_v1
10+
import sd_hijack_autoencoder
11+
import sd_hijack_ddpm_v1
1112

1213

1314
class UpscalerLDSR(Upscaler):

‎extensions-builtin/LDSR/sd_hijack_autoencoder.py‎

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,21 @@
11
# The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo
22
# The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo
33
# As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder
4-
4+
import numpy as np
55
import torch
66
import pytorch_lightning as pl
77
import torch.nn.functional as F
88
from contextlib import contextmanager
9+
10+
from torch.optim.lr_scheduler import LambdaLR
11+
12+
from ldm.modules.ema import LitEma
913
from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer
1014
from ldm.modules.diffusionmodules.model import Encoder, Decoder
1115
from ldm.util import instantiate_from_config
1216

1317
import ldm.models.autoencoder
18+
from packaging import version
1419

1520
class VQModel(pl.LightningModule):
1621
def __init__(self,
@@ -249,7 +254,8 @@ def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs):
249254
if plot_ema:
250255
with self.ema_scope():
251256
xrec_ema, _ = self(x)
252-
if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema)
257+
if x.shape[1] > 3:
258+
xrec_ema = self.to_rgb(xrec_ema)
253259
log["reconstructions_ema"] = xrec_ema
254260
return log
255261

‎extensions-builtin/LDSR/sd_hijack_ddpm_v1.py‎

Lines changed: 10 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -450,7 +450,7 @@ def __init__(self,
450450
self.cond_stage_key = cond_stage_key
451451
try:
452452
self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1
453-
except:
453+
except Exception:
454454
self.num_downs = 0
455455
if not scale_by_std:
456456
self.scale_factor = scale_factor
@@ -877,16 +877,6 @@ def forward(self, x, c, *args, **kwargs):
877877
c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float()))
878878
return self.p_losses(x, c, t, *args, **kwargs)
879879

880-
def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset
881-
def rescale_bbox(bbox):
882-
x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2])
883-
y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3])
884-
w = min(bbox[2] / crop_coordinates[2], 1 - x0)
885-
h = min(bbox[3] / crop_coordinates[3], 1 - y0)
886-
return x0, y0, w, h
887-
888-
return [rescale_bbox(b) for b in bboxes]
889-
890880
def apply_model(self, x_noisy, t, cond, return_ids=False):
891881

892882
if isinstance(cond, dict):
@@ -1157,8 +1147,10 @@ def progressive_denoising(self, cond, shape, verbose=True, callback=None, quanti
11571147

11581148
if i % log_every_t == 0 or i == timesteps - 1:
11591149
intermediates.append(x0_partial)
1160-
if callback: callback(i)
1161-
if img_callback: img_callback(img, i)
1150+
if callback:
1151+
callback(i)
1152+
if img_callback:
1153+
img_callback(img, i)
11621154
return img, intermediates
11631155

11641156
@torch.no_grad()
@@ -1205,8 +1197,10 @@ def p_sample_loop(self, cond, shape, return_intermediates=False,
12051197

12061198
if i % log_every_t == 0 or i == timesteps - 1:
12071199
intermediates.append(img)
1208-
if callback: callback(i)
1209-
if img_callback: img_callback(img, i)
1200+
if callback:
1201+
callback(i)
1202+
if img_callback:
1203+
img_callback(img, i)
12101204

12111205
if return_intermediates:
12121206
return img, intermediates
@@ -1322,7 +1316,7 @@ def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=
13221316

13231317
if inpaint:
13241318
# make a simple center square
1325-
b, h, w = z.shape[0], z.shape[2], z.shape[3]
1319+
h, w = z.shape[2], z.shape[3]
13261320
mask = torch.ones(N, h, w).to(self.device)
13271321
# zeros will be filled in
13281322
mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0.

‎extensions-builtin/ScuNET/scunet_model_arch.py‎

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -61,7 +61,9 @@ def forward(self, x):
6161
Returns:
6262
output: tensor shape [b h w c]
6363
"""
64-
if self.type != 'W': x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2))
64+
if self.type != 'W':
65+
x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2))
66+
6567
x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size)
6668
h_windows = x.size(1)
6769
w_windows = x.size(2)
@@ -85,8 +87,9 @@ def forward(self, x):
8587
output = self.linear(output)
8688
output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size)
8789

88-
if self.type != 'W': output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2),
89-
dims=(1, 2))
90+
if self.type != 'W':
91+
output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), dims=(1, 2))
92+
9093
return output
9194

9295
def relative_embedding(self):

‎extensions-builtin/SwinIR/scripts/swinir_model.py‎

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ def do_upscale(self, img, model_file):
4545
img = upscale(img, model)
4646
try:
4747
torch.cuda.empty_cache()
48-
except:
48+
except Exception:
4949
pass
5050
return img
5151

0 commit comments

Comments
 (0)