diff --git a/modules/processing.py b/modules/processing.py index bc7e5311..accb31d1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -740,7 +740,6 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.denoising_strength: float = denoising_strength self.init_latent = None self.image_mask = mask - #self.image_unblurred_mask = None self.latent_mask = None self.mask_for_overlay = None self.mask_blur = mask_blur @@ -756,36 +755,36 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) crop_region = None - if self.image_mask is not None: - self.image_mask = self.image_mask.convert('L') + image_mask = self.image_mask + + if image_mask is not None: + image_mask = image_mask.convert('L') if self.inpainting_mask_invert: - self.image_mask = ImageOps.invert(self.image_mask) - - #self.image_unblurred_mask = self.image_mask + image_mask = ImageOps.invert(image_mask) if self.mask_blur > 0: - self.image_mask = self.image_mask.filter(ImageFilter.GaussianBlur(self.mask_blur)) + image_mask = image_mask.filter(ImageFilter.GaussianBlur(self.mask_blur)) if self.inpaint_full_res: - self.mask_for_overlay = self.image_mask - mask = self.image_mask.convert('L') + self.mask_for_overlay = image_mask + mask = image_mask.convert('L') crop_region = masking.get_crop_region(np.array(mask), self.inpaint_full_res_padding) crop_region = masking.expand_crop_region(crop_region, self.width, self.height, mask.width, mask.height) x1, y1, x2, y2 = crop_region mask = mask.crop(crop_region) - self.image_mask = images.resize_image(2, mask, self.width, self.height) + image_mask = images.resize_image(2, mask, self.width, self.height) self.paste_to = (x1, y1, x2-x1, y2-y1) else: - self.image_mask = images.resize_image(self.resize_mode, self.image_mask, self.width, self.height) - np_mask = np.array(self.image_mask) + image_mask = images.resize_image(self.resize_mode, image_mask, self.width, self.height) + np_mask = np.array(image_mask) np_mask = np.clip((np_mask.astype(np.float32)) * 2, 0, 255).astype(np.uint8) self.mask_for_overlay = Image.fromarray(np_mask) self.overlay_images = [] - latent_mask = self.latent_mask if self.latent_mask is not None else self.image_mask + latent_mask = self.latent_mask if self.latent_mask is not None else image_mask add_color_corrections = opts.img2img_color_correction and self.color_corrections is None if add_color_corrections: @@ -797,7 +796,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): if crop_region is None: image = images.resize_image(self.resize_mode, image, self.width, self.height) - if self.image_mask is not None: + if image_mask is not None: image_masked = Image.new('RGBa', (image.width, image.height)) image_masked.paste(image.convert("RGBA").convert("RGBa"), mask=ImageOps.invert(self.mask_for_overlay.convert('L'))) @@ -807,7 +806,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): image = image.crop(crop_region) image = images.resize_image(2, image, self.width, self.height) - if self.image_mask is not None: + if image_mask is not None: if self.inpainting_fill != 1: image = masking.fill(image, latent_mask) @@ -839,7 +838,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.init_latent = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image)) - if self.image_mask is not None: + if image_mask is not None: init_mask = latent_mask latmask = init_mask.convert('RGB').resize((self.init_latent.shape[3], self.init_latent.shape[2])) latmask = np.moveaxis(np.array(latmask, dtype=np.float32), 2, 0) / 255 @@ -856,7 +855,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): elif self.inpainting_fill == 3: self.init_latent = self.init_latent * self.mask - self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, self.image_mask) + self.image_conditioning = self.img2img_image_conditioning(image, self.init_latent, image_mask) def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)