diff --git a/.eslintignore b/.eslintignore new file mode 100644 index 00000000..1cfd9487 --- /dev/null +++ b/.eslintignore @@ -0,0 +1,4 @@ +extensions +extensions-disabled +repositories +venv \ No newline at end of file diff --git a/.eslintrc.js b/.eslintrc.js new file mode 100644 index 00000000..78275554 --- /dev/null +++ b/.eslintrc.js @@ -0,0 +1,89 @@ +module.exports = { + env: { + browser: true, + es2021: true, + }, + extends: "eslint:recommended", + parserOptions: { + ecmaVersion: "latest", + }, + rules: { + "arrow-spacing": "error", + "block-spacing": "error", + "brace-style": "error", + "comma-dangle": ["error", "only-multiline"], + "comma-spacing": "error", + "comma-style": ["error", "last"], + "curly": ["error", "multi-line", "consistent"], + "eol-last": "error", + "func-call-spacing": "error", + "function-call-argument-newline": ["error", "consistent"], + "function-paren-newline": ["error", "consistent"], + "indent": ["error", 4], + "key-spacing": "error", + "keyword-spacing": "error", + "linebreak-style": ["error", "unix"], + "no-extra-semi": "error", + "no-mixed-spaces-and-tabs": "error", + "no-trailing-spaces": "error", + "no-whitespace-before-property": "error", + "object-curly-newline": ["error", {consistent: true, multiline: true}], + "quote-props": ["error", "consistent-as-needed"], + "semi": ["error", "always"], + "semi-spacing": "error", + "semi-style": ["error", "last"], + "space-before-blocks": "error", + "space-before-function-paren": ["error", "never"], + "space-in-parens": ["error", "never"], + "space-infix-ops": "error", + "space-unary-ops": "error", + "switch-colon-spacing": "error", + "template-curly-spacing": ["error", "never"], + "unicode-bom": "error", + "no-multi-spaces": "error", + "object-curly-spacing": ["error", "never"], + "operator-linebreak": ["error", "after"], + "no-unused-vars": "off", + "no-redeclare": "off", + }, + globals: { + // this file + module: "writable", + //script.js + gradioApp: "writable", + onUiLoaded: "writable", + onUiUpdate: "writable", + onOptionsChanged: "writable", + uiCurrentTab: "writable", + uiElementIsVisible: "writable", + executeCallbacks: "writable", + //ui.js + opts: "writable", + all_gallery_buttons: "writable", + selected_gallery_button: "writable", + selected_gallery_index: "writable", + args_to_array: "writable", + switch_to_txt2img: "writable", + switch_to_img2img_tab: "writable", + switch_to_img2img: "writable", + switch_to_sketch: "writable", + switch_to_inpaint: "writable", + switch_to_inpaint_sketch: "writable", + switch_to_extras: "writable", + get_tab_index: "writable", + create_submit_args: "writable", + restart_reload: "writable", + updateInput: "writable", + //extraNetworks.js + requestGet: "writable", + popup: "writable", + // from python + localization: "writable", + // progrssbar.js + randomId: "writable", + requestProgress: "writable", + // imageviewer.js + modalPrevImage: "writable", + modalNextImage: "writable", + } +}; diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml index 7d435297..3a8b9953 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.yml +++ b/.github/ISSUE_TEMPLATE/bug_report.yml @@ -47,6 +47,15 @@ body: description: Which commit are you running ? (Do not write *Latest version/repo/commit*, as this means nothing and will have changed by the time we read your issue. Rather, copy the **Commit** link at the bottom of the UI, or from the cmd/terminal if you can't launch it.) validations: required: true + - type: dropdown + id: py-version + attributes: + label: What Python version are you running on ? + multiple: false + options: + - Python 3.10.x + - Python 3.11.x (above, no supported yet) + - Python 3.9.x (below, no recommended) - type: dropdown id: platforms attributes: @@ -59,6 +68,18 @@ body: - iOS - Android - Other/Cloud + - type: dropdown + id: device + attributes: + label: What device are you running WebUI on? + multiple: true + options: + - Nvidia GPUs (RTX 20 above) + - Nvidia GPUs (GTX 16 below) + - AMD GPUs (RX 6000 above) + - AMD GPUs (RX 5000 below) + - CPU + - Other GPUs - type: dropdown id: browsers attributes: diff --git a/.github/workflows/on_pull_request.yaml b/.github/workflows/on_pull_request.yaml index a168be5b..7b7219fd 100644 --- a/.github/workflows/on_pull_request.yaml +++ b/.github/workflows/on_pull_request.yaml @@ -1,39 +1,34 @@ -# See https://github.com/actions/starter-workflows/blob/1067f16ad8a1eac328834e4b0ae24f7d206f810d/ci/pylint.yml for original reference file name: Run Linting/Formatting on Pull Requests on: - push - pull_request - # See https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions#onpull_requestpull_request_targetbranchesbranches-ignore for syntax docs - # if you want to filter out branches, delete the `- pull_request` and uncomment these lines : - # pull_request: - # branches: - # - master - # branches-ignore: - # - development jobs: - lint: + lint-python: runs-on: ubuntu-latest steps: - name: Checkout Code uses: actions/checkout@v3 - - name: Set up Python 3.10 - uses: actions/setup-python@v4 + - uses: actions/setup-python@v4 with: - python-version: 3.10.6 - cache: pip - cache-dependency-path: | - **/requirements*txt - - name: Install PyLint - run: | - python -m pip install --upgrade pip - pip install pylint - # This lets PyLint check to see if it can resolve imports - - name: Install dependencies - run: | - export COMMANDLINE_ARGS="--skip-torch-cuda-test --exit" - python launch.py - - name: Analysing the code with pylint - run: | - pylint $(git ls-files '*.py') + python-version: 3.11 + # NB: there's no cache: pip here since we're not installing anything + # from the requirements.txt file(s) in the repository; it's faster + # not to have GHA download an (at the time of writing) 4 GB cache + # of PyTorch and other dependencies. + - name: Install Ruff + run: pip install ruff==0.0.265 + - name: Run Ruff + run: ruff . + lint-js: + runs-on: ubuntu-latest + steps: + - name: Checkout Code + uses: actions/checkout@v3 + - name: Install Node.js + uses: actions/setup-node@v3 + with: + node-version: 18 + - run: npm i --ci + - run: npm run lint diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml index 9a0b8d22..0708398b 100644 --- a/.github/workflows/run_tests.yaml +++ b/.github/workflows/run_tests.yaml @@ -17,8 +17,14 @@ jobs: cache: pip cache-dependency-path: | **/requirements*txt + launch.py - name: Run tests run: python launch.py --tests test --no-half --disable-opt-split-attention --use-cpu all --skip-torch-cuda-test + env: + PIP_DISABLE_PIP_VERSION_CHECK: "1" + PIP_PROGRESS_BAR: "off" + TORCH_INDEX_URL: https://download.pytorch.org/whl/cpu + WEBUI_LAUNCH_LIVE_OUTPUT: "1" - name: Upload main app stdout-stderr uses: actions/upload-artifact@v3 if: always() diff --git a/.gitignore b/.gitignore index 7328401f..46654d83 100644 --- a/.gitignore +++ b/.gitignore @@ -34,3 +34,5 @@ notification.mp3 /test/stderr.txt /cache.json* /config_states/ +/node_modules +/package-lock.json \ No newline at end of file diff --git a/README.md b/README.md index 67a1a83a..79089e52 100644 --- a/README.md +++ b/README.md @@ -99,6 +99,12 @@ Alternatively, use online services (like Google Colab): - [List of Online Services](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Online-Services) +### Installation on Windows 10/11 with NVidia-GPUs using release package +1. Download `sd.webui.zip` from [v1.0.0-pre](https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre) and extract it's contents. +2. Run `update.bat`. +3. Run `run.bat`. +> For more details see [Install-and-Run-on-NVidia-GPUs](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) + ### Automatic Installation on Windows 1. Install [Python 3.10.6](https://www.python.org/downloads/release/python-3106/) (Newer version of Python does not support torch), checking "Add Python to PATH". 2. Install [git](https://git-scm.com/download/win). @@ -158,5 +164,6 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al - Instruct pix2pix - Tim Brooks (star), Aleksander Holynski (star), Alexei A. Efros (no star) - https://github.com/timothybrooks/instruct-pix2pix - Security advice - RyotaK - UniPC sampler - Wenliang Zhao - https://github.com/wl-zhao/UniPC +- TAESD - Ollin Boer Bohan - https://github.com/madebyollin/taesd - Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user. - (You) diff --git a/extensions-builtin/LDSR/ldsr_model_arch.py b/extensions-builtin/LDSR/ldsr_model_arch.py index bc11cc6e..7f450086 100644 --- a/extensions-builtin/LDSR/ldsr_model_arch.py +++ b/extensions-builtin/LDSR/ldsr_model_arch.py @@ -88,7 +88,7 @@ class LDSR: x_t = None logs = None - for n in range(n_runs): + for _ in range(n_runs): if custom_shape is not None: x_t = torch.randn(1, custom_shape[1], custom_shape[2], custom_shape[3]).to(model.device) x_t = repeat(x_t, '1 c h w -> b c h w', b=custom_shape[0]) @@ -110,7 +110,6 @@ class LDSR: diffusion_steps = int(steps) eta = 1.0 - down_sample_method = 'Lanczos' gc.collect() if torch.cuda.is_available: @@ -131,11 +130,11 @@ class LDSR: im_og = im_og.resize((width_downsampled_pre, height_downsampled_pre), Image.LANCZOS) else: print(f"Down sample rate is 1 from {target_scale} / 4 (Not downsampling)") - + # pad width and height to multiples of 64, pads with the edge values of image to avoid artifacts pad_w, pad_h = np.max(((2, 2), np.ceil(np.array(im_og.size) / 64).astype(int)), axis=0) * 64 - im_og.size im_padded = Image.fromarray(np.pad(np.array(im_og), ((0, pad_h), (0, pad_w), (0, 0)), mode='edge')) - + logs = self.run(model["model"], im_padded, diffusion_steps, eta) sample = logs["sample"] @@ -158,7 +157,7 @@ class LDSR: def get_cond(selected_path): - example = dict() + example = {} up_f = 4 c = selected_path.convert('RGB') c = torch.unsqueeze(torchvision.transforms.ToTensor()(c), 0) @@ -196,7 +195,7 @@ def convsample_ddim(model, cond, steps, shape, eta=1.0, callback=None, normals_s @torch.no_grad() def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize_x0=False, custom_shape=None, temperature=1., noise_dropout=0., corrector=None, corrector_kwargs=None, x_T=None, ddim_use_x0_pred=False): - log = dict() + log = {} z, c, x, xrec, xc = model.get_input(batch, model.first_stage_key, return_first_stage_outputs=True, @@ -244,7 +243,7 @@ def make_convolutional_sample(batch, model, custom_steps=None, eta=1.0, quantize x_sample_noquant = model.decode_first_stage(sample, force_not_quantize=True) log["sample_noquant"] = x_sample_noquant log["sample_diff"] = torch.abs(x_sample_noquant - x_sample) - except: + except Exception: pass log["sample"] = x_sample diff --git a/extensions-builtin/LDSR/scripts/ldsr_model.py b/extensions-builtin/LDSR/scripts/ldsr_model.py index da19cff1..fbbe9005 100644 --- a/extensions-builtin/LDSR/scripts/ldsr_model.py +++ b/extensions-builtin/LDSR/scripts/ldsr_model.py @@ -7,7 +7,8 @@ from basicsr.utils.download_util import load_file_from_url from modules.upscaler import Upscaler, UpscalerData from ldsr_model_arch import LDSR from modules import shared, script_callbacks -import sd_hijack_autoencoder, sd_hijack_ddpm_v1 +import sd_hijack_autoencoder # noqa: F401 +import sd_hijack_ddpm_v1 # noqa: F401 class UpscalerLDSR(Upscaler): diff --git a/extensions-builtin/LDSR/sd_hijack_autoencoder.py b/extensions-builtin/LDSR/sd_hijack_autoencoder.py index 8e03c7f8..81c5101b 100644 --- a/extensions-builtin/LDSR/sd_hijack_autoencoder.py +++ b/extensions-builtin/LDSR/sd_hijack_autoencoder.py @@ -1,16 +1,21 @@ # The content of this file comes from the ldm/models/autoencoder.py file of the compvis/stable-diffusion repo # The VQModel & VQModelInterface were subsequently removed from ldm/models/autoencoder.py when we moved to the stability-ai/stablediffusion repo # As the LDSR upscaler relies on VQModel & VQModelInterface, the hijack aims to put them back into the ldm.models.autoencoder - +import numpy as np import torch import pytorch_lightning as pl import torch.nn.functional as F from contextlib import contextmanager + +from torch.optim.lr_scheduler import LambdaLR + +from ldm.modules.ema import LitEma from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer from ldm.modules.diffusionmodules.model import Encoder, Decoder from ldm.util import instantiate_from_config import ldm.models.autoencoder +from packaging import version class VQModel(pl.LightningModule): def __init__(self, @@ -19,7 +24,7 @@ class VQModel(pl.LightningModule): n_embed, embed_dim, ckpt_path=None, - ignore_keys=[], + ignore_keys=None, image_key="image", colorize_nlabels=None, monitor=None, @@ -57,7 +62,7 @@ class VQModel(pl.LightningModule): print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or []) self.scheduler_config = scheduler_config self.lr_g_factor = lr_g_factor @@ -76,11 +81,11 @@ class VQModel(pl.LightningModule): if context is not None: print(f"{context}: Restored training weights") - def init_from_ckpt(self, path, ignore_keys=list()): + def init_from_ckpt(self, path, ignore_keys=None): sd = torch.load(path, map_location="cpu")["state_dict"] keys = list(sd.keys()) for k in keys: - for ik in ignore_keys: + for ik in ignore_keys or []: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] @@ -165,7 +170,7 @@ class VQModel(pl.LightningModule): def validation_step(self, batch, batch_idx): log_dict = self._validation_step(batch, batch_idx) with self.ema_scope(): - log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") + self._validation_step(batch, batch_idx, suffix="_ema") return log_dict def _validation_step(self, batch, batch_idx, suffix=""): @@ -232,7 +237,7 @@ class VQModel(pl.LightningModule): return self.decoder.conv_out.weight def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): - log = dict() + log = {} x = self.get_input(batch, self.image_key) x = x.to(self.device) if only_inputs: @@ -249,7 +254,8 @@ class VQModel(pl.LightningModule): if plot_ema: with self.ema_scope(): xrec_ema, _ = self(x) - if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) + if x.shape[1] > 3: + xrec_ema = self.to_rgb(xrec_ema) log["reconstructions_ema"] = xrec_ema return log @@ -264,7 +270,7 @@ class VQModel(pl.LightningModule): class VQModelInterface(VQModel): def __init__(self, embed_dim, *args, **kwargs): - super().__init__(embed_dim=embed_dim, *args, **kwargs) + super().__init__(*args, embed_dim=embed_dim, **kwargs) self.embed_dim = embed_dim def encode(self, x): @@ -282,5 +288,5 @@ class VQModelInterface(VQModel): dec = self.decoder(quant) return dec -setattr(ldm.models.autoencoder, "VQModel", VQModel) -setattr(ldm.models.autoencoder, "VQModelInterface", VQModelInterface) +ldm.models.autoencoder.VQModel = VQModel +ldm.models.autoencoder.VQModelInterface = VQModelInterface diff --git a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py index 5c0488e5..631a08ef 100644 --- a/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py +++ b/extensions-builtin/LDSR/sd_hijack_ddpm_v1.py @@ -48,7 +48,7 @@ class DDPMV1(pl.LightningModule): beta_schedule="linear", loss_type="l2", ckpt_path=None, - ignore_keys=[], + ignore_keys=None, load_only_unet=False, monitor="val/loss", use_ema=True, @@ -100,7 +100,7 @@ class DDPMV1(pl.LightningModule): if monitor is not None: self.monitor = monitor if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys or [], only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) @@ -182,13 +182,13 @@ class DDPMV1(pl.LightningModule): if context is not None: print(f"{context}: Restored training weights") - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + def init_from_ckpt(self, path, ignore_keys=None, only_model=False): sd = torch.load(path, map_location="cpu") if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: - for ik in ignore_keys: + for ik in ignore_keys or []: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] @@ -375,7 +375,7 @@ class DDPMV1(pl.LightningModule): @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): - log = dict() + log = {} x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) @@ -383,7 +383,7 @@ class DDPMV1(pl.LightningModule): log["inputs"] = x # get diffusion row - diffusion_row = list() + diffusion_row = [] x_start = x[:n_row] for t in range(self.num_timesteps): @@ -444,13 +444,13 @@ class LatentDiffusionV1(DDPMV1): conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) - super().__init__(conditioning_key=conditioning_key, *args, **kwargs) + super().__init__(*args, conditioning_key=conditioning_key, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 - except: + except Exception: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor @@ -460,7 +460,7 @@ class LatentDiffusionV1(DDPMV1): self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False - self.bbox_tokenizer = None + self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: @@ -792,7 +792,7 @@ class LatentDiffusionV1(DDPMV1): z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): + if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] @@ -877,16 +877,6 @@ class LatentDiffusionV1(DDPMV1): c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) - def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset - def rescale_bbox(bbox): - x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) - y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) - w = min(bbox[2] / crop_coordinates[2], 1 - x0) - h = min(bbox[3] / crop_coordinates[3], 1 - y0) - return x0, y0, w, h - - return [rescale_bbox(b) for b in bboxes] - def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): @@ -900,7 +890,7 @@ class LatentDiffusionV1(DDPMV1): if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm - assert not return_ids + assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) @@ -1126,7 +1116,7 @@ class LatentDiffusionV1(DDPMV1): if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + [x[:batch_size] for x in cond[key]] for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] @@ -1157,8 +1147,10 @@ class LatentDiffusionV1(DDPMV1): if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) - if callback: callback(i) - if img_callback: img_callback(img, i) + if callback: + callback(i) + if img_callback: + img_callback(img, i) return img, intermediates @torch.no_grad() @@ -1205,8 +1197,10 @@ class LatentDiffusionV1(DDPMV1): if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) - if callback: callback(i) - if img_callback: img_callback(img, i) + if callback: + callback(i) + if img_callback: + img_callback(img, i) if return_intermediates: return img, intermediates @@ -1221,7 +1215,7 @@ class LatentDiffusionV1(DDPMV1): if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + [x[:batch_size] for x in cond[key]] for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, @@ -1253,7 +1247,7 @@ class LatentDiffusionV1(DDPMV1): use_ddim = ddim_steps is not None - log = dict() + log = {} z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, @@ -1280,7 +1274,7 @@ class LatentDiffusionV1(DDPMV1): if plot_diffusion_rows: # get diffusion row - diffusion_row = list() + diffusion_row = [] z_start = z[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: @@ -1322,7 +1316,7 @@ class LatentDiffusionV1(DDPMV1): if inpaint: # make a simple center square - b, h, w = z.shape[0], z.shape[2], z.shape[3] + h, w = z.shape[2], z.shape[3] mask = torch.ones(N, h, w).to(self.device) # zeros will be filled in mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. @@ -1424,10 +1418,10 @@ class Layout2ImgDiffusionV1(LatentDiffusionV1): # TODO: move all layout-specific hacks to this class def __init__(self, cond_stage_key, *args, **kwargs): assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' - super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) + super().__init__(*args, cond_stage_key=cond_stage_key, **kwargs) def log_images(self, batch, N=8, *args, **kwargs): - logs = super().log_images(batch=batch, N=N, *args, **kwargs) + logs = super().log_images(*args, batch=batch, N=N, **kwargs) key = 'train' if self.training else 'validation' dset = self.trainer.datamodule.datasets[key] @@ -1443,7 +1437,7 @@ class Layout2ImgDiffusionV1(LatentDiffusionV1): logs['bbox_image'] = cond_img return logs -setattr(ldm.models.diffusion.ddpm, "DDPMV1", DDPMV1) -setattr(ldm.models.diffusion.ddpm, "LatentDiffusionV1", LatentDiffusionV1) -setattr(ldm.models.diffusion.ddpm, "DiffusionWrapperV1", DiffusionWrapperV1) -setattr(ldm.models.diffusion.ddpm, "Layout2ImgDiffusionV1", Layout2ImgDiffusionV1) +ldm.models.diffusion.ddpm.DDPMV1 = DDPMV1 +ldm.models.diffusion.ddpm.LatentDiffusionV1 = LatentDiffusionV1 +ldm.models.diffusion.ddpm.DiffusionWrapperV1 = DiffusionWrapperV1 +ldm.models.diffusion.ddpm.Layout2ImgDiffusionV1 = Layout2ImgDiffusionV1 diff --git a/extensions-builtin/Lora/lora.py b/extensions-builtin/Lora/lora.py index b5d0c98f..1308c48b 100644 --- a/extensions-builtin/Lora/lora.py +++ b/extensions-builtin/Lora/lora.py @@ -1,4 +1,3 @@ -import glob import os import re import torch @@ -177,7 +176,7 @@ def load_lora(name, filename): else: print(f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}') continue - assert False, f'Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}' + raise AssertionError(f"Lora layer {key_diffusers} matched a layer with unsupported type: {type(sd_module).__name__}") with torch.no_grad(): module.weight.copy_(weight) @@ -189,7 +188,7 @@ def load_lora(name, filename): elif lora_key == "lora_down.weight": lora_module.down = module else: - assert False, f'Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha' + raise AssertionError(f"Bad Lora layer name: {key_diffusers} - must end in lora_up.weight, lora_down.weight or alpha") if len(keys_failed_to_match) > 0: print(f"Failed to match keys when loading Lora {filename}: {keys_failed_to_match}") @@ -207,7 +206,7 @@ def load_loras(names, multipliers=None): loaded_loras.clear() loras_on_disk = [available_lora_aliases.get(name, None) for name in names] - if any([x is None for x in loras_on_disk]): + if any(x is None for x in loras_on_disk): list_available_loras() loras_on_disk = [available_lora_aliases.get(name, None) for name in names] @@ -314,7 +313,7 @@ def lora_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn.Mu print(f'failed to calculate lora weights for layer {lora_layer_name}') - setattr(self, "lora_current_names", wanted_names) + self.lora_current_names = wanted_names def lora_forward(module, input, original_forward): @@ -348,8 +347,8 @@ def lora_forward(module, input, original_forward): def lora_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): - setattr(self, "lora_current_names", ()) - setattr(self, "lora_weights_backup", None) + self.lora_current_names = () + self.lora_weights_backup = None def lora_Linear_forward(self, input): @@ -428,7 +427,7 @@ def infotext_pasted(infotext, params): added = [] - for k, v in params.items(): + for k in params: if not k.startswith("AddNet Model "): continue diff --git a/extensions-builtin/Lora/scripts/lora_script.py b/extensions-builtin/Lora/scripts/lora_script.py index 060bda05..728e0b86 100644 --- a/extensions-builtin/Lora/scripts/lora_script.py +++ b/extensions-builtin/Lora/scripts/lora_script.py @@ -53,7 +53,7 @@ script_callbacks.on_infotext_pasted(lora.infotext_pasted) shared.options_templates.update(shared.options_section(('extra_networks', "Extra Networks"), { - "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in lora.available_loras]}, refresh=lora.list_available_loras), + "sd_lora": shared.OptionInfo("None", "Add Lora to prompt", gr.Dropdown, lambda: {"choices": ["None", *lora.available_loras]}, refresh=lora.list_available_loras), "lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}), })) diff --git a/extensions-builtin/ScuNET/scripts/scunet_model.py b/extensions-builtin/ScuNET/scripts/scunet_model.py index c7fd5739..cc2cbc6a 100644 --- a/extensions-builtin/ScuNET/scripts/scunet_model.py +++ b/extensions-builtin/ScuNET/scripts/scunet_model.py @@ -10,10 +10,9 @@ from tqdm import tqdm from basicsr.utils.download_util import load_file_from_url import modules.upscaler -from modules import devices, modelloader +from modules import devices, modelloader, script_callbacks from scunet_model_arch import SCUNet as net from modules.shared import opts -from modules import images class UpscalerScuNET(modules.upscaler.Upscaler): @@ -133,8 +132,19 @@ class UpscalerScuNET(modules.upscaler.Upscaler): model = net(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64) model.load_state_dict(torch.load(filename), strict=True) model.eval() - for k, v in model.named_parameters(): + for _, v in model.named_parameters(): v.requires_grad = False model = model.to(device) return model + + +def on_ui_settings(): + import gradio as gr + from modules import shared + + shared.opts.add_option("SCUNET_tile", shared.OptionInfo(256, "Tile size for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, section=('upscaling', "Upscaling")).info("0 = no tiling")) + shared.opts.add_option("SCUNET_tile_overlap", shared.OptionInfo(8, "Tile overlap for SCUNET upscalers.", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, section=('upscaling', "Upscaling")).info("Low values = visible seam")) + + +script_callbacks.on_ui_settings(on_ui_settings) diff --git a/extensions-builtin/ScuNET/scunet_model_arch.py b/extensions-builtin/ScuNET/scunet_model_arch.py index 43ca8d36..b51a8806 100644 --- a/extensions-builtin/ScuNET/scunet_model_arch.py +++ b/extensions-builtin/ScuNET/scunet_model_arch.py @@ -61,7 +61,9 @@ class WMSA(nn.Module): Returns: output: tensor shape [b h w c] """ - if self.type != 'W': x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2)) + if self.type != 'W': + x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2)) + x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size) h_windows = x.size(1) w_windows = x.size(2) @@ -85,8 +87,9 @@ class WMSA(nn.Module): output = self.linear(output) output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size) - if self.type != 'W': output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), - dims=(1, 2)) + if self.type != 'W': + output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), dims=(1, 2)) + return output def relative_embedding(self): @@ -262,4 +265,4 @@ class SCUNet(nn.Module): nn.init.constant_(m.bias, 0) elif isinstance(m, nn.LayerNorm): nn.init.constant_(m.bias, 0) - nn.init.constant_(m.weight, 1.0) \ No newline at end of file + nn.init.constant_(m.weight, 1.0) diff --git a/extensions-builtin/SwinIR/scripts/swinir_model.py b/extensions-builtin/SwinIR/scripts/swinir_model.py index e8783bca..0ba50487 100644 --- a/extensions-builtin/SwinIR/scripts/swinir_model.py +++ b/extensions-builtin/SwinIR/scripts/swinir_model.py @@ -1,4 +1,3 @@ -import contextlib import os import numpy as np @@ -8,7 +7,7 @@ from basicsr.utils.download_util import load_file_from_url from tqdm import tqdm from modules import modelloader, devices, script_callbacks, shared -from modules.shared import cmd_opts, opts, state +from modules.shared import opts, state from swinir_model_arch import SwinIR as net from swinir_model_arch_v2 import Swin2SR as net2 from modules.upscaler import Upscaler, UpscalerData @@ -45,7 +44,7 @@ class UpscalerSwinIR(Upscaler): img = upscale(img, model) try: torch.cuda.empty_cache() - except: + except Exception: pass return img @@ -151,7 +150,7 @@ def inference(img, model, tile, tile_overlap, window_size, scale): for w_idx in w_idx_list: if state.interrupted or state.skipped: break - + in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile] out_patch = model(in_patch) out_patch_mask = torch.ones_like(out_patch) diff --git a/extensions-builtin/SwinIR/swinir_model_arch.py b/extensions-builtin/SwinIR/swinir_model_arch.py index 863f42db..93b93274 100644 --- a/extensions-builtin/SwinIR/swinir_model_arch.py +++ b/extensions-builtin/SwinIR/swinir_model_arch.py @@ -644,7 +644,7 @@ class SwinIR(nn.Module): """ def __init__(self, img_size=64, patch_size=1, in_chans=3, - embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6], + embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6), window_size=7, mlp_ratio=4., qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, ape=False, patch_norm=True, @@ -805,7 +805,7 @@ class SwinIR(nn.Module): def forward(self, x): H, W = x.shape[2:] x = self.check_image_size(x) - + self.mean = self.mean.type_as(x) x = (x - self.mean) * self.img_range @@ -844,7 +844,7 @@ class SwinIR(nn.Module): H, W = self.patches_resolution flops += H * W * 3 * self.embed_dim * 9 flops += self.patch_embed.flops() - for i, layer in enumerate(self.layers): + for layer in self.layers: flops += layer.flops() flops += H * W * 3 * self.embed_dim * self.embed_dim flops += self.upsample.flops() diff --git a/extensions-builtin/SwinIR/swinir_model_arch_v2.py b/extensions-builtin/SwinIR/swinir_model_arch_v2.py index 0e28ae6e..dad22cca 100644 --- a/extensions-builtin/SwinIR/swinir_model_arch_v2.py +++ b/extensions-builtin/SwinIR/swinir_model_arch_v2.py @@ -74,7 +74,7 @@ class WindowAttention(nn.Module): """ def __init__(self, dim, window_size, num_heads, qkv_bias=True, attn_drop=0., proj_drop=0., - pretrained_window_size=[0, 0]): + pretrained_window_size=(0, 0)): super().__init__() self.dim = dim @@ -241,7 +241,7 @@ class SwinTransformerBlock(nn.Module): attn_mask = None self.register_buffer("attn_mask", attn_mask) - + def calculate_mask(self, x_size): # calculate attention mask for SW-MSA H, W = x_size @@ -263,7 +263,7 @@ class SwinTransformerBlock(nn.Module): attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(attn_mask == 0, float(0.0)) - return attn_mask + return attn_mask def forward(self, x, x_size): H, W = x_size @@ -288,7 +288,7 @@ class SwinTransformerBlock(nn.Module): attn_windows = self.attn(x_windows, mask=self.attn_mask) # nW*B, window_size*window_size, C else: attn_windows = self.attn(x_windows, mask=self.calculate_mask(x_size).to(x.device)) - + # merge windows attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) shifted_x = window_reverse(attn_windows, self.window_size, H, W) # B H' W' C @@ -369,7 +369,7 @@ class PatchMerging(nn.Module): H, W = self.input_resolution flops = (H // 2) * (W // 2) * 4 * self.dim * 2 * self.dim flops += H * W * self.dim // 2 - return flops + return flops class BasicLayer(nn.Module): """ A basic Swin Transformer layer for one stage. @@ -447,7 +447,7 @@ class BasicLayer(nn.Module): nn.init.constant_(blk.norm1.weight, 0) nn.init.constant_(blk.norm2.bias, 0) nn.init.constant_(blk.norm2.weight, 0) - + class PatchEmbed(nn.Module): r""" Image to Patch Embedding Args: @@ -492,7 +492,7 @@ class PatchEmbed(nn.Module): flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1]) if self.norm is not None: flops += Ho * Wo * self.embed_dim - return flops + return flops class RSTB(nn.Module): """Residual Swin Transformer Block (RSTB). @@ -531,7 +531,7 @@ class RSTB(nn.Module): num_heads=num_heads, window_size=window_size, mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, + qkv_bias=qkv_bias, drop=drop, attn_drop=attn_drop, drop_path=drop_path, norm_layer=norm_layer, @@ -622,7 +622,7 @@ class Upsample(nn.Sequential): else: raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.') super(Upsample, self).__init__(*m) - + class Upsample_hf(nn.Sequential): """Upsample module. @@ -642,7 +642,7 @@ class Upsample_hf(nn.Sequential): m.append(nn.PixelShuffle(3)) else: raise ValueError(f'scale {scale} is not supported. ' 'Supported scales: 2^n and 3.') - super(Upsample_hf, self).__init__(*m) + super(Upsample_hf, self).__init__(*m) class UpsampleOneStep(nn.Sequential): @@ -667,8 +667,8 @@ class UpsampleOneStep(nn.Sequential): H, W = self.input_resolution flops = H * W * self.num_feat * 3 * 9 return flops - - + + class Swin2SR(nn.Module): r""" Swin2SR @@ -698,8 +698,8 @@ class Swin2SR(nn.Module): """ def __init__(self, img_size=64, patch_size=1, in_chans=3, - embed_dim=96, depths=[6, 6, 6, 6], num_heads=[6, 6, 6, 6], - window_size=7, mlp_ratio=4., qkv_bias=True, + embed_dim=96, depths=(6, 6, 6, 6), num_heads=(6, 6, 6, 6), + window_size=7, mlp_ratio=4., qkv_bias=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.1, norm_layer=nn.LayerNorm, ape=False, patch_norm=True, use_checkpoint=False, upscale=2, img_range=1., upsampler='', resi_connection='1conv', @@ -764,7 +764,7 @@ class Swin2SR(nn.Module): num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, - qkv_bias=qkv_bias, + qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results norm_layer=norm_layer, @@ -776,7 +776,7 @@ class Swin2SR(nn.Module): ) self.layers.append(layer) - + if self.upsampler == 'pixelshuffle_hf': self.layers_hf = nn.ModuleList() for i_layer in range(self.num_layers): @@ -787,7 +787,7 @@ class Swin2SR(nn.Module): num_heads=num_heads[i_layer], window_size=window_size, mlp_ratio=self.mlp_ratio, - qkv_bias=qkv_bias, + qkv_bias=qkv_bias, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])], # no impact on SR results norm_layer=norm_layer, @@ -799,7 +799,7 @@ class Swin2SR(nn.Module): ) self.layers_hf.append(layer) - + self.norm = norm_layer(self.num_features) # build the last conv layer in deep feature extraction @@ -829,10 +829,10 @@ class Swin2SR(nn.Module): self.conv_aux = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) self.conv_after_aux = nn.Sequential( nn.Conv2d(3, num_feat, 3, 1, 1), - nn.LeakyReLU(inplace=True)) + nn.LeakyReLU(inplace=True)) self.upsample = Upsample(upscale, num_feat) self.conv_last = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) - + elif self.upsampler == 'pixelshuffle_hf': self.conv_before_upsample = nn.Sequential(nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)) @@ -846,7 +846,7 @@ class Swin2SR(nn.Module): nn.Conv2d(embed_dim, num_feat, 3, 1, 1), nn.LeakyReLU(inplace=True)) self.conv_last_hf = nn.Conv2d(num_feat, num_out_ch, 3, 1, 1) - + elif self.upsampler == 'pixelshuffledirect': # for lightweight SR (to save parameters) self.upsample = UpsampleOneStep(upscale, embed_dim, num_out_ch, @@ -905,7 +905,7 @@ class Swin2SR(nn.Module): x = self.patch_unembed(x, x_size) return x - + def forward_features_hf(self, x): x_size = (x.shape[2], x.shape[3]) x = self.patch_embed(x) @@ -919,7 +919,7 @@ class Swin2SR(nn.Module): x = self.norm(x) # B L C x = self.patch_unembed(x, x_size) - return x + return x def forward(self, x): H, W = x.shape[2:] @@ -951,7 +951,7 @@ class Swin2SR(nn.Module): x = self.conv_after_body(self.forward_features(x)) + x x_before = self.conv_before_upsample(x) x_out = self.conv_last(self.upsample(x_before)) - + x_hf = self.conv_first_hf(x_before) x_hf = self.conv_after_body_hf(self.forward_features_hf(x_hf)) + x_hf x_hf = self.conv_before_upsample_hf(x_hf) @@ -977,15 +977,15 @@ class Swin2SR(nn.Module): x_first = self.conv_first(x) res = self.conv_after_body(self.forward_features(x_first)) + x_first x = x + self.conv_last(res) - + x = x / self.img_range + self.mean if self.upsampler == "pixelshuffle_aux": return x[:, :, :H*self.upscale, :W*self.upscale], aux - + elif self.upsampler == "pixelshuffle_hf": x_out = x_out / self.img_range + self.mean return x_out[:, :, :H*self.upscale, :W*self.upscale], x[:, :, :H*self.upscale, :W*self.upscale], x_hf[:, :, :H*self.upscale, :W*self.upscale] - + else: return x[:, :, :H*self.upscale, :W*self.upscale] @@ -994,7 +994,7 @@ class Swin2SR(nn.Module): H, W = self.patches_resolution flops += H * W * 3 * self.embed_dim * 9 flops += self.patch_embed.flops() - for i, layer in enumerate(self.layers): + for layer in self.layers: flops += layer.flops() flops += H * W * 3 * self.embed_dim * self.embed_dim flops += self.upsample.flops() @@ -1014,4 +1014,4 @@ if __name__ == '__main__': x = torch.randn((1, 3, height, width)) x = model(x) - print(x.shape) \ No newline at end of file + print(x.shape) diff --git a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js index 5c7a836a..114cf94c 100644 --- a/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js +++ b/extensions-builtin/prompt-bracket-checker/javascript/prompt-bracket-checker.js @@ -4,39 +4,39 @@ // If there's a mismatch, the keyword counter turns red and if you hover on it, a tooltip tells you what's wrong. function checkBrackets(textArea, counterElt) { - var counts = {}; - (textArea.value.match(/[(){}\[\]]/g) || []).forEach(bracket => { - counts[bracket] = (counts[bracket] || 0) + 1; - }); - var errors = []; + var counts = {}; + (textArea.value.match(/[(){}[\]]/g) || []).forEach(bracket => { + counts[bracket] = (counts[bracket] || 0) + 1; + }); + var errors = []; - function checkPair(open, close, kind) { - if (counts[open] !== counts[close]) { - errors.push( - `${open}...${close} - Detected ${counts[open] || 0} opening and ${counts[close] || 0} closing ${kind}.` - ); + function checkPair(open, close, kind) { + if (counts[open] !== counts[close]) { + errors.push( + `${open}...${close} - Detected ${counts[open] || 0} opening and ${counts[close] || 0} closing ${kind}.` + ); + } } - } - checkPair('(', ')', 'round brackets'); - checkPair('[', ']', 'square brackets'); - checkPair('{', '}', 'curly brackets'); - counterElt.title = errors.join('\n'); - counterElt.classList.toggle('error', errors.length !== 0); + checkPair('(', ')', 'round brackets'); + checkPair('[', ']', 'square brackets'); + checkPair('{', '}', 'curly brackets'); + counterElt.title = errors.join('\n'); + counterElt.classList.toggle('error', errors.length !== 0); } function setupBracketChecking(id_prompt, id_counter) { - var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); - var counter = gradioApp().getElementById(id_counter) + var textarea = gradioApp().querySelector("#" + id_prompt + " > label > textarea"); + var counter = gradioApp().getElementById(id_counter); - if (textarea && counter) { - textarea.addEventListener("input", () => checkBrackets(textarea, counter)); - } + if (textarea && counter) { + textarea.addEventListener("input", () => checkBrackets(textarea, counter)); + } } -onUiLoaded(function () { - setupBracketChecking('txt2img_prompt', 'txt2img_token_counter'); - setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter'); - setupBracketChecking('img2img_prompt', 'img2img_token_counter'); - setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter'); +onUiLoaded(function() { + setupBracketChecking('txt2img_prompt', 'txt2img_token_counter'); + setupBracketChecking('txt2img_neg_prompt', 'txt2img_negative_token_counter'); + setupBracketChecking('img2img_prompt', 'img2img_token_counter'); + setupBracketChecking('img2img_neg_prompt', 'img2img_negative_token_counter'); }); diff --git a/html/extra-networks-card.html b/html/extra-networks-card.html index 1d546217..6853b14f 100644 --- a/html/extra-networks-card.html +++ b/html/extra-networks-card.html @@ -6,7 +6,7 @@ - + {name} {description} diff --git a/html/licenses.html b/html/licenses.html index bc995aa0..ef6f2c0a 100644 --- a/html/licenses.html +++ b/html/licenses.html @@ -661,4 +661,30 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +

TAESD

+Tiny AutoEncoder for Stable Diffusion option for live previews +
+MIT License
+
+Copyright (c) 2023 Ollin Boer Bohan
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
 
\ No newline at end of file diff --git a/javascript/aspectRatioOverlay.js b/javascript/aspectRatioOverlay.js index 5160081d..1c08a1a9 100644 --- a/javascript/aspectRatioOverlay.js +++ b/javascript/aspectRatioOverlay.js @@ -1,111 +1,113 @@ - -let currentWidth = null; -let currentHeight = null; -let arFrameTimeout = setTimeout(function(){},0); - -function dimensionChange(e, is_width, is_height){ - - if(is_width){ - currentWidth = e.target.value*1.0 - } - if(is_height){ - currentHeight = e.target.value*1.0 - } - - var inImg2img = gradioApp().querySelector("#tab_img2img").style.display == "block"; - - if(!inImg2img){ - return; - } - - var targetElement = null; - - var tabIndex = get_tab_index('mode_img2img') - if(tabIndex == 0){ // img2img - targetElement = gradioApp().querySelector('#img2img_image div[data-testid=image] img'); - } else if(tabIndex == 1){ //Sketch - targetElement = gradioApp().querySelector('#img2img_sketch div[data-testid=image] img'); - } else if(tabIndex == 2){ // Inpaint - targetElement = gradioApp().querySelector('#img2maskimg div[data-testid=image] img'); - } else if(tabIndex == 3){ // Inpaint sketch - targetElement = gradioApp().querySelector('#inpaint_sketch div[data-testid=image] img'); - } - - - if(targetElement){ - - var arPreviewRect = gradioApp().querySelector('#imageARPreview'); - if(!arPreviewRect){ - arPreviewRect = document.createElement('div') - arPreviewRect.id = "imageARPreview"; - gradioApp().appendChild(arPreviewRect) - } - - - - var viewportOffset = targetElement.getBoundingClientRect(); - - var viewportscale = Math.min( targetElement.clientWidth/targetElement.naturalWidth, targetElement.clientHeight/targetElement.naturalHeight ) - - var scaledx = targetElement.naturalWidth*viewportscale - var scaledy = targetElement.naturalHeight*viewportscale - - var cleintRectTop = (viewportOffset.top+window.scrollY) - var cleintRectLeft = (viewportOffset.left+window.scrollX) - var cleintRectCentreY = cleintRectTop + (targetElement.clientHeight/2) - var cleintRectCentreX = cleintRectLeft + (targetElement.clientWidth/2) - - var arscale = Math.min( scaledx/currentWidth, scaledy/currentHeight ) - var arscaledx = currentWidth*arscale - var arscaledy = currentHeight*arscale - - var arRectTop = cleintRectCentreY-(arscaledy/2) - var arRectLeft = cleintRectCentreX-(arscaledx/2) - var arRectWidth = arscaledx - var arRectHeight = arscaledy - - arPreviewRect.style.top = arRectTop+'px'; - arPreviewRect.style.left = arRectLeft+'px'; - arPreviewRect.style.width = arRectWidth+'px'; - arPreviewRect.style.height = arRectHeight+'px'; - - clearTimeout(arFrameTimeout); - arFrameTimeout = setTimeout(function(){ - arPreviewRect.style.display = 'none'; - },2000); - - arPreviewRect.style.display = 'block'; - - } - -} - - -onUiUpdate(function(){ - var arPreviewRect = gradioApp().querySelector('#imageARPreview'); - if(arPreviewRect){ - arPreviewRect.style.display = 'none'; - } - var tabImg2img = gradioApp().querySelector("#tab_img2img"); - if (tabImg2img) { - var inImg2img = tabImg2img.style.display == "block"; - if(inImg2img){ - let inputs = gradioApp().querySelectorAll('input'); - inputs.forEach(function(e){ - var is_width = e.parentElement.id == "img2img_width" - var is_height = e.parentElement.id == "img2img_height" - - if((is_width || is_height) && !e.classList.contains('scrollwatch')){ - e.addEventListener('input', function(e){dimensionChange(e, is_width, is_height)} ) - e.classList.add('scrollwatch') - } - if(is_width){ - currentWidth = e.value*1.0 - } - if(is_height){ - currentHeight = e.value*1.0 - } - }) - } - } -}); + +let currentWidth = null; +let currentHeight = null; +let arFrameTimeout = setTimeout(function() {}, 0); + +function dimensionChange(e, is_width, is_height) { + + if (is_width) { + currentWidth = e.target.value * 1.0; + } + if (is_height) { + currentHeight = e.target.value * 1.0; + } + + var inImg2img = gradioApp().querySelector("#tab_img2img").style.display == "block"; + + if (!inImg2img) { + return; + } + + var targetElement = null; + + var tabIndex = get_tab_index('mode_img2img'); + if (tabIndex == 0) { // img2img + targetElement = gradioApp().querySelector('#img2img_image div[data-testid=image] img'); + } else if (tabIndex == 1) { //Sketch + targetElement = gradioApp().querySelector('#img2img_sketch div[data-testid=image] img'); + } else if (tabIndex == 2) { // Inpaint + targetElement = gradioApp().querySelector('#img2maskimg div[data-testid=image] img'); + } else if (tabIndex == 3) { // Inpaint sketch + targetElement = gradioApp().querySelector('#inpaint_sketch div[data-testid=image] img'); + } + + + if (targetElement) { + + var arPreviewRect = gradioApp().querySelector('#imageARPreview'); + if (!arPreviewRect) { + arPreviewRect = document.createElement('div'); + arPreviewRect.id = "imageARPreview"; + gradioApp().appendChild(arPreviewRect); + } + + + + var viewportOffset = targetElement.getBoundingClientRect(); + + var viewportscale = Math.min(targetElement.clientWidth / targetElement.naturalWidth, targetElement.clientHeight / targetElement.naturalHeight); + + var scaledx = targetElement.naturalWidth * viewportscale; + var scaledy = targetElement.naturalHeight * viewportscale; + + var cleintRectTop = (viewportOffset.top + window.scrollY); + var cleintRectLeft = (viewportOffset.left + window.scrollX); + var cleintRectCentreY = cleintRectTop + (targetElement.clientHeight / 2); + var cleintRectCentreX = cleintRectLeft + (targetElement.clientWidth / 2); + + var arscale = Math.min(scaledx / currentWidth, scaledy / currentHeight); + var arscaledx = currentWidth * arscale; + var arscaledy = currentHeight * arscale; + + var arRectTop = cleintRectCentreY - (arscaledy / 2); + var arRectLeft = cleintRectCentreX - (arscaledx / 2); + var arRectWidth = arscaledx; + var arRectHeight = arscaledy; + + arPreviewRect.style.top = arRectTop + 'px'; + arPreviewRect.style.left = arRectLeft + 'px'; + arPreviewRect.style.width = arRectWidth + 'px'; + arPreviewRect.style.height = arRectHeight + 'px'; + + clearTimeout(arFrameTimeout); + arFrameTimeout = setTimeout(function() { + arPreviewRect.style.display = 'none'; + }, 2000); + + arPreviewRect.style.display = 'block'; + + } + +} + + +onUiUpdate(function() { + var arPreviewRect = gradioApp().querySelector('#imageARPreview'); + if (arPreviewRect) { + arPreviewRect.style.display = 'none'; + } + var tabImg2img = gradioApp().querySelector("#tab_img2img"); + if (tabImg2img) { + var inImg2img = tabImg2img.style.display == "block"; + if (inImg2img) { + let inputs = gradioApp().querySelectorAll('input'); + inputs.forEach(function(e) { + var is_width = e.parentElement.id == "img2img_width"; + var is_height = e.parentElement.id == "img2img_height"; + + if ((is_width || is_height) && !e.classList.contains('scrollwatch')) { + e.addEventListener('input', function(e) { + dimensionChange(e, is_width, is_height); + }); + e.classList.add('scrollwatch'); + } + if (is_width) { + currentWidth = e.value * 1.0; + } + if (is_height) { + currentHeight = e.value * 1.0; + } + }); + } + } +}); diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js index b2bdf053..f14af1d4 100644 --- a/javascript/contextMenus.js +++ b/javascript/contextMenus.js @@ -1,166 +1,172 @@ - -contextMenuInit = function(){ - let eventListenerApplied=false; - let menuSpecs = new Map(); - - const uid = function(){ - return Date.now().toString(36) + Math.random().toString(36).substring(2); - } - - function showContextMenu(event,element,menuEntries){ - let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft; - let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop; - - let oldMenu = gradioApp().querySelector('#context-menu') - if(oldMenu){ - oldMenu.remove() - } - - let baseStyle = window.getComputedStyle(uiCurrentTab) - - const contextMenu = document.createElement('nav') - contextMenu.id = "context-menu" - contextMenu.style.background = baseStyle.background - contextMenu.style.color = baseStyle.color - contextMenu.style.fontFamily = baseStyle.fontFamily - contextMenu.style.top = posy+'px' - contextMenu.style.left = posx+'px' - - - - const contextMenuList = document.createElement('ul') - contextMenuList.className = 'context-menu-items'; - contextMenu.append(contextMenuList); - - menuEntries.forEach(function(entry){ - let contextMenuEntry = document.createElement('a') - contextMenuEntry.innerHTML = entry['name'] - contextMenuEntry.addEventListener("click", function() { - entry['func'](); - }) - contextMenuList.append(contextMenuEntry); - - }) - - gradioApp().appendChild(contextMenu) - - let menuWidth = contextMenu.offsetWidth + 4; - let menuHeight = contextMenu.offsetHeight + 4; - - let windowWidth = window.innerWidth; - let windowHeight = window.innerHeight; - - if ( (windowWidth - posx) < menuWidth ) { - contextMenu.style.left = windowWidth - menuWidth + "px"; - } - - if ( (windowHeight - posy) < menuHeight ) { - contextMenu.style.top = windowHeight - menuHeight + "px"; - } - - } - - function appendContextMenuOption(targetElementSelector,entryName,entryFunction){ - - var currentItems = menuSpecs.get(targetElementSelector) - - if(!currentItems){ - currentItems = [] - menuSpecs.set(targetElementSelector,currentItems); - } - let newItem = {'id':targetElementSelector+'_'+uid(), - 'name':entryName, - 'func':entryFunction, - 'isNew':true} - - currentItems.push(newItem) - return newItem['id'] - } - - function removeContextMenuOption(uid){ - menuSpecs.forEach(function(v) { - let index = -1 - v.forEach(function(e,ei){if(e['id']==uid){index=ei}}) - if(index>=0){ - v.splice(index, 1); - } - }) - } - - function addContextMenuEventListener(){ - if(eventListenerApplied){ - return; - } - gradioApp().addEventListener("click", function(e) { - if(! e.isTrusted){ - return - } - - let oldMenu = gradioApp().querySelector('#context-menu') - if(oldMenu){ - oldMenu.remove() - } - }); - gradioApp().addEventListener("contextmenu", function(e) { - let oldMenu = gradioApp().querySelector('#context-menu') - if(oldMenu){ - oldMenu.remove() - } - menuSpecs.forEach(function(v,k) { - if(e.composedPath()[0].matches(k)){ - showContextMenu(e,e.composedPath()[0],v) - e.preventDefault() - } - }) - }); - eventListenerApplied=true - - } - - return [appendContextMenuOption, removeContextMenuOption, addContextMenuEventListener] -} - -initResponse = contextMenuInit(); -appendContextMenuOption = initResponse[0]; -removeContextMenuOption = initResponse[1]; -addContextMenuEventListener = initResponse[2]; - -(function(){ - //Start example Context Menu Items - let generateOnRepeat = function(genbuttonid,interruptbuttonid){ - let genbutton = gradioApp().querySelector(genbuttonid); - let interruptbutton = gradioApp().querySelector(interruptbuttonid); - if(!interruptbutton.offsetParent){ - genbutton.click(); - } - clearInterval(window.generateOnRepeatInterval) - window.generateOnRepeatInterval = setInterval(function(){ - if(!interruptbutton.offsetParent){ - genbutton.click(); - } - }, - 500) - } - - appendContextMenuOption('#txt2img_generate','Generate forever',function(){ - generateOnRepeat('#txt2img_generate','#txt2img_interrupt'); - }) - appendContextMenuOption('#img2img_generate','Generate forever',function(){ - generateOnRepeat('#img2img_generate','#img2img_interrupt'); - }) - - let cancelGenerateForever = function(){ - clearInterval(window.generateOnRepeatInterval) - } - - appendContextMenuOption('#txt2img_interrupt','Cancel generate forever',cancelGenerateForever) - appendContextMenuOption('#txt2img_generate', 'Cancel generate forever',cancelGenerateForever) - appendContextMenuOption('#img2img_interrupt','Cancel generate forever',cancelGenerateForever) - appendContextMenuOption('#img2img_generate', 'Cancel generate forever',cancelGenerateForever) - -})(); -//End example Context Menu Items - -onUiUpdate(function(){ - addContextMenuEventListener() -}); + +var contextMenuInit = function() { + let eventListenerApplied = false; + let menuSpecs = new Map(); + + const uid = function() { + return Date.now().toString(36) + Math.random().toString(36).substring(2); + }; + + function showContextMenu(event, element, menuEntries) { + let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft; + let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop; + + let oldMenu = gradioApp().querySelector('#context-menu'); + if (oldMenu) { + oldMenu.remove(); + } + + let baseStyle = window.getComputedStyle(uiCurrentTab); + + const contextMenu = document.createElement('nav'); + contextMenu.id = "context-menu"; + contextMenu.style.background = baseStyle.background; + contextMenu.style.color = baseStyle.color; + contextMenu.style.fontFamily = baseStyle.fontFamily; + contextMenu.style.top = posy + 'px'; + contextMenu.style.left = posx + 'px'; + + + + const contextMenuList = document.createElement('ul'); + contextMenuList.className = 'context-menu-items'; + contextMenu.append(contextMenuList); + + menuEntries.forEach(function(entry) { + let contextMenuEntry = document.createElement('a'); + contextMenuEntry.innerHTML = entry['name']; + contextMenuEntry.addEventListener("click", function() { + entry['func'](); + }); + contextMenuList.append(contextMenuEntry); + + }); + + gradioApp().appendChild(contextMenu); + + let menuWidth = contextMenu.offsetWidth + 4; + let menuHeight = contextMenu.offsetHeight + 4; + + let windowWidth = window.innerWidth; + let windowHeight = window.innerHeight; + + if ((windowWidth - posx) < menuWidth) { + contextMenu.style.left = windowWidth - menuWidth + "px"; + } + + if ((windowHeight - posy) < menuHeight) { + contextMenu.style.top = windowHeight - menuHeight + "px"; + } + + } + + function appendContextMenuOption(targetElementSelector, entryName, entryFunction) { + + var currentItems = menuSpecs.get(targetElementSelector); + + if (!currentItems) { + currentItems = []; + menuSpecs.set(targetElementSelector, currentItems); + } + let newItem = { + id: targetElementSelector + '_' + uid(), + name: entryName, + func: entryFunction, + isNew: true + }; + + currentItems.push(newItem); + return newItem['id']; + } + + function removeContextMenuOption(uid) { + menuSpecs.forEach(function(v) { + let index = -1; + v.forEach(function(e, ei) { + if (e['id'] == uid) { + index = ei; + } + }); + if (index >= 0) { + v.splice(index, 1); + } + }); + } + + function addContextMenuEventListener() { + if (eventListenerApplied) { + return; + } + gradioApp().addEventListener("click", function(e) { + if (!e.isTrusted) { + return; + } + + let oldMenu = gradioApp().querySelector('#context-menu'); + if (oldMenu) { + oldMenu.remove(); + } + }); + gradioApp().addEventListener("contextmenu", function(e) { + let oldMenu = gradioApp().querySelector('#context-menu'); + if (oldMenu) { + oldMenu.remove(); + } + menuSpecs.forEach(function(v, k) { + if (e.composedPath()[0].matches(k)) { + showContextMenu(e, e.composedPath()[0], v); + e.preventDefault(); + } + }); + }); + eventListenerApplied = true; + + } + + return [appendContextMenuOption, removeContextMenuOption, addContextMenuEventListener]; +}; + +var initResponse = contextMenuInit(); +var appendContextMenuOption = initResponse[0]; +var removeContextMenuOption = initResponse[1]; +var addContextMenuEventListener = initResponse[2]; + +(function() { + //Start example Context Menu Items + let generateOnRepeat = function(genbuttonid, interruptbuttonid) { + let genbutton = gradioApp().querySelector(genbuttonid); + let interruptbutton = gradioApp().querySelector(interruptbuttonid); + if (!interruptbutton.offsetParent) { + genbutton.click(); + } + clearInterval(window.generateOnRepeatInterval); + window.generateOnRepeatInterval = setInterval(function() { + if (!interruptbutton.offsetParent) { + genbutton.click(); + } + }, + 500); + }; + + appendContextMenuOption('#txt2img_generate', 'Generate forever', function() { + generateOnRepeat('#txt2img_generate', '#txt2img_interrupt'); + }); + appendContextMenuOption('#img2img_generate', 'Generate forever', function() { + generateOnRepeat('#img2img_generate', '#img2img_interrupt'); + }); + + let cancelGenerateForever = function() { + clearInterval(window.generateOnRepeatInterval); + }; + + appendContextMenuOption('#txt2img_interrupt', 'Cancel generate forever', cancelGenerateForever); + appendContextMenuOption('#txt2img_generate', 'Cancel generate forever', cancelGenerateForever); + appendContextMenuOption('#img2img_interrupt', 'Cancel generate forever', cancelGenerateForever); + appendContextMenuOption('#img2img_generate', 'Cancel generate forever', cancelGenerateForever); + +})(); +//End example Context Menu Items + +onUiUpdate(function() { + addContextMenuEventListener(); +}); diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js index fe008924..e316a365 100644 --- a/javascript/dragdrop.js +++ b/javascript/dragdrop.js @@ -1,11 +1,11 @@ // allows drag-dropping files into gradio image elements, and also pasting images from clipboard -function isValidImageList( files ) { +function isValidImageList(files) { return files && files?.length === 1 && ['image/png', 'image/gif', 'image/jpeg'].includes(files[0].type); } -function dropReplaceImage( imgWrap, files ) { - if ( ! isValidImageList( files ) ) { +function dropReplaceImage(imgWrap, files) { + if (!isValidImageList(files)) { return; } @@ -14,44 +14,44 @@ function dropReplaceImage( imgWrap, files ) { imgWrap.querySelector('.modify-upload button + button, .touch-none + div button + button')?.click(); const callback = () => { const fileInput = imgWrap.querySelector('input[type="file"]'); - if ( fileInput ) { - if ( files.length === 0 ) { + if (fileInput) { + if (files.length === 0) { files = new DataTransfer(); files.items.add(tmpFile); fileInput.files = files.files; } else { fileInput.files = files; } - fileInput.dispatchEvent(new Event('change')); + fileInput.dispatchEvent(new Event('change')); } }; - - if ( imgWrap.closest('#pnginfo_image') ) { + + if (imgWrap.closest('#pnginfo_image')) { // special treatment for PNG Info tab, wait for fetch request to finish const oldFetch = window.fetch; - window.fetch = async (input, options) => { + window.fetch = async(input, options) => { const response = await oldFetch(input, options); - if ( 'api/predict/' === input ) { + if ('api/predict/' === input) { const content = await response.text(); window.fetch = oldFetch; - window.requestAnimationFrame( () => callback() ); + window.requestAnimationFrame(() => callback()); return new Response(content, { status: response.status, statusText: response.statusText, headers: response.headers - }) + }); } return response; - }; + }; } else { - window.requestAnimationFrame( () => callback() ); + window.requestAnimationFrame(() => callback()); } } window.document.addEventListener('dragover', e => { const target = e.composedPath()[0]; const imgWrap = target.closest('[data-testid="image"]'); - if ( !imgWrap && target.placeholder && target.placeholder.indexOf("Prompt") == -1) { + if (!imgWrap && target.placeholder && target.placeholder.indexOf("Prompt") == -1) { return; } e.stopPropagation(); @@ -65,33 +65,34 @@ window.document.addEventListener('drop', e => { return; } const imgWrap = target.closest('[data-testid="image"]'); - if ( !imgWrap ) { + if (!imgWrap) { return; } e.stopPropagation(); e.preventDefault(); const files = e.dataTransfer.files; - dropReplaceImage( imgWrap, files ); + dropReplaceImage(imgWrap, files); }); window.addEventListener('paste', e => { const files = e.clipboardData.files; - if ( ! isValidImageList( files ) ) { + if (!isValidImageList(files)) { return; } const visibleImageFields = [...gradioApp().querySelectorAll('[data-testid="image"]')] .filter(el => uiElementIsVisible(el)); - if ( ! visibleImageFields.length ) { + if (!visibleImageFields.length) { return; } - + const firstFreeImageField = visibleImageFields .filter(el => el.querySelector('input[type=file]'))?.[0]; dropReplaceImage( firstFreeImageField ? - firstFreeImageField : - visibleImageFields[visibleImageFields.length - 1] - , files ); + firstFreeImageField : + visibleImageFields[visibleImageFields.length - 1] + , files + ); }); diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index d2c2f190..fdf00b4d 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -1,120 +1,120 @@ -function keyupEditAttention(event){ - let target = event.originalTarget || event.composedPath()[0]; - if (! target.matches("[id*='_toprow'] [id*='_prompt'] textarea")) return; - if (! (event.metaKey || event.ctrlKey)) return; - - let isPlus = event.key == "ArrowUp" - let isMinus = event.key == "ArrowDown" - if (!isPlus && !isMinus) return; - - let selectionStart = target.selectionStart; - let selectionEnd = target.selectionEnd; - let text = target.value; - - function selectCurrentParenthesisBlock(OPEN, CLOSE){ - if (selectionStart !== selectionEnd) return false; - - // Find opening parenthesis around current cursor - const before = text.substring(0, selectionStart); - let beforeParen = before.lastIndexOf(OPEN); - if (beforeParen == -1) return false; - let beforeParenClose = before.lastIndexOf(CLOSE); - while (beforeParenClose !== -1 && beforeParenClose > beforeParen) { - beforeParen = before.lastIndexOf(OPEN, beforeParen - 1); - beforeParenClose = before.lastIndexOf(CLOSE, beforeParenClose - 1); - } - - // Find closing parenthesis around current cursor - const after = text.substring(selectionStart); - let afterParen = after.indexOf(CLOSE); - if (afterParen == -1) return false; - let afterParenOpen = after.indexOf(OPEN); - while (afterParenOpen !== -1 && afterParen > afterParenOpen) { - afterParen = after.indexOf(CLOSE, afterParen + 1); - afterParenOpen = after.indexOf(OPEN, afterParenOpen + 1); - } - if (beforeParen === -1 || afterParen === -1) return false; - - // Set the selection to the text between the parenthesis - const parenContent = text.substring(beforeParen + 1, selectionStart + afterParen); - const lastColon = parenContent.lastIndexOf(":"); - selectionStart = beforeParen + 1; - selectionEnd = selectionStart + lastColon; - target.setSelectionRange(selectionStart, selectionEnd); - return true; - } - - function selectCurrentWord(){ - if (selectionStart !== selectionEnd) return false; - const delimiters = opts.keyedit_delimiters + " \r\n\t"; - - // seek backward until to find beggining - while (!delimiters.includes(text[selectionStart - 1]) && selectionStart > 0) { - selectionStart--; - } - - // seek forward to find end - while (!delimiters.includes(text[selectionEnd]) && selectionEnd < text.length) { - selectionEnd++; - } - - target.setSelectionRange(selectionStart, selectionEnd); - return true; - } - - // If the user hasn't selected anything, let's select their current parenthesis block or word - if (!selectCurrentParenthesisBlock('<', '>') && !selectCurrentParenthesisBlock('(', ')')) { - selectCurrentWord(); - } - - event.preventDefault(); - - var closeCharacter = ')' - var delta = opts.keyedit_precision_attention - - if (selectionStart > 0 && text[selectionStart - 1] == '<'){ - closeCharacter = '>' - delta = opts.keyedit_precision_extra - } else if (selectionStart == 0 || text[selectionStart - 1] != "(") { - - // do not include spaces at the end - while(selectionEnd > selectionStart && text[selectionEnd-1] == ' '){ - selectionEnd -= 1; - } - if(selectionStart == selectionEnd){ - return - } - - text = text.slice(0, selectionStart) + "(" + text.slice(selectionStart, selectionEnd) + ":1.0)" + text.slice(selectionEnd); - - selectionStart += 1; - selectionEnd += 1; - } - - var end = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1; - var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + 1 + end)); - if (isNaN(weight)) return; - - weight += isPlus ? delta : -delta; - weight = parseFloat(weight.toPrecision(12)); - if(String(weight).length == 1) weight += ".0" - - if (closeCharacter == ')' && weight == 1) { - text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + 5); - selectionStart--; - selectionEnd--; - } else { - text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + 1 + end - 1); - } - - target.focus(); - target.value = text; - target.selectionStart = selectionStart; - target.selectionEnd = selectionEnd; - - updateInput(target) -} - -addEventListener('keydown', (event) => { - keyupEditAttention(event); -}); +function keyupEditAttention(event) { + let target = event.originalTarget || event.composedPath()[0]; + if (!target.matches("[id*='_toprow'] [id*='_prompt'] textarea")) return; + if (!(event.metaKey || event.ctrlKey)) return; + + let isPlus = event.key == "ArrowUp"; + let isMinus = event.key == "ArrowDown"; + if (!isPlus && !isMinus) return; + + let selectionStart = target.selectionStart; + let selectionEnd = target.selectionEnd; + let text = target.value; + + function selectCurrentParenthesisBlock(OPEN, CLOSE) { + if (selectionStart !== selectionEnd) return false; + + // Find opening parenthesis around current cursor + const before = text.substring(0, selectionStart); + let beforeParen = before.lastIndexOf(OPEN); + if (beforeParen == -1) return false; + let beforeParenClose = before.lastIndexOf(CLOSE); + while (beforeParenClose !== -1 && beforeParenClose > beforeParen) { + beforeParen = before.lastIndexOf(OPEN, beforeParen - 1); + beforeParenClose = before.lastIndexOf(CLOSE, beforeParenClose - 1); + } + + // Find closing parenthesis around current cursor + const after = text.substring(selectionStart); + let afterParen = after.indexOf(CLOSE); + if (afterParen == -1) return false; + let afterParenOpen = after.indexOf(OPEN); + while (afterParenOpen !== -1 && afterParen > afterParenOpen) { + afterParen = after.indexOf(CLOSE, afterParen + 1); + afterParenOpen = after.indexOf(OPEN, afterParenOpen + 1); + } + if (beforeParen === -1 || afterParen === -1) return false; + + // Set the selection to the text between the parenthesis + const parenContent = text.substring(beforeParen + 1, selectionStart + afterParen); + const lastColon = parenContent.lastIndexOf(":"); + selectionStart = beforeParen + 1; + selectionEnd = selectionStart + lastColon; + target.setSelectionRange(selectionStart, selectionEnd); + return true; + } + + function selectCurrentWord() { + if (selectionStart !== selectionEnd) return false; + const delimiters = opts.keyedit_delimiters + " \r\n\t"; + + // seek backward until to find beggining + while (!delimiters.includes(text[selectionStart - 1]) && selectionStart > 0) { + selectionStart--; + } + + // seek forward to find end + while (!delimiters.includes(text[selectionEnd]) && selectionEnd < text.length) { + selectionEnd++; + } + + target.setSelectionRange(selectionStart, selectionEnd); + return true; + } + + // If the user hasn't selected anything, let's select their current parenthesis block or word + if (!selectCurrentParenthesisBlock('<', '>') && !selectCurrentParenthesisBlock('(', ')')) { + selectCurrentWord(); + } + + event.preventDefault(); + + var closeCharacter = ')'; + var delta = opts.keyedit_precision_attention; + + if (selectionStart > 0 && text[selectionStart - 1] == '<') { + closeCharacter = '>'; + delta = opts.keyedit_precision_extra; + } else if (selectionStart == 0 || text[selectionStart - 1] != "(") { + + // do not include spaces at the end + while (selectionEnd > selectionStart && text[selectionEnd - 1] == ' ') { + selectionEnd -= 1; + } + if (selectionStart == selectionEnd) { + return; + } + + text = text.slice(0, selectionStart) + "(" + text.slice(selectionStart, selectionEnd) + ":1.0)" + text.slice(selectionEnd); + + selectionStart += 1; + selectionEnd += 1; + } + + var end = text.slice(selectionEnd + 1).indexOf(closeCharacter) + 1; + var weight = parseFloat(text.slice(selectionEnd + 1, selectionEnd + 1 + end)); + if (isNaN(weight)) return; + + weight += isPlus ? delta : -delta; + weight = parseFloat(weight.toPrecision(12)); + if (String(weight).length == 1) weight += ".0"; + + if (closeCharacter == ')' && weight == 1) { + text = text.slice(0, selectionStart - 1) + text.slice(selectionStart, selectionEnd) + text.slice(selectionEnd + 5); + selectionStart--; + selectionEnd--; + } else { + text = text.slice(0, selectionEnd + 1) + weight + text.slice(selectionEnd + 1 + end - 1); + } + + target.focus(); + target.value = text; + target.selectionStart = selectionStart; + target.selectionEnd = selectionEnd; + + updateInput(target); +} + +addEventListener('keydown', (event) => { + keyupEditAttention(event); +}); diff --git a/javascript/extensions.js b/javascript/extensions.js index 2a2d2f8e..efeaf3a5 100644 --- a/javascript/extensions.js +++ b/javascript/extensions.js @@ -1,71 +1,74 @@ - -function extensions_apply(_disabled_list, _update_list, disable_all){ - var disable = [] - var update = [] - - gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x){ - if(x.name.startsWith("enable_") && ! x.checked) - disable.push(x.name.substring(7)) - - if(x.name.startsWith("update_") && x.checked) - update.push(x.name.substring(7)) - }) - - restart_reload() - - return [JSON.stringify(disable), JSON.stringify(update), disable_all] -} - -function extensions_check(){ - var disable = [] - - gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x){ - if(x.name.startsWith("enable_") && ! x.checked) - disable.push(x.name.substring(7)) - }) - - gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x){ - x.innerHTML = "Loading..." - }) - - - var id = randomId() - requestProgress(id, gradioApp().getElementById('extensions_installed_top'), null, function(){ - - }) - - return [id, JSON.stringify(disable)] -} - -function install_extension_from_index(button, url){ - button.disabled = "disabled" - button.value = "Installing..." - - var textarea = gradioApp().querySelector('#extension_to_install textarea') - textarea.value = url - updateInput(textarea) - - gradioApp().querySelector('#install_extension_button').click() -} - -function config_state_confirm_restore(_, config_state_name, config_restore_type) { - if (config_state_name == "Current") { - return [false, config_state_name, config_restore_type]; - } - let restored = ""; - if (config_restore_type == "extensions") { - restored = "all saved extension versions"; - } else if (config_restore_type == "webui") { - restored = "the webui version"; - } else { - restored = "the webui version and all saved extension versions"; - } - let confirmed = confirm("Are you sure you want to restore from this state?\nThis will reset " + restored + "."); - if (confirmed) { - restart_reload(); - gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x){ - x.innerHTML = "Loading..." - }) - } - return [confirmed, config_state_name, config_restore_type]; -} + +function extensions_apply(_disabled_list, _update_list, disable_all) { + var disable = []; + var update = []; + + gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x) { + if (x.name.startsWith("enable_") && !x.checked) { + disable.push(x.name.substring(7)); + } + + if (x.name.startsWith("update_") && x.checked) { + update.push(x.name.substring(7)); + } + }); + + restart_reload(); + + return [JSON.stringify(disable), JSON.stringify(update), disable_all]; +} + +function extensions_check() { + var disable = []; + + gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x) { + if (x.name.startsWith("enable_") && !x.checked) { + disable.push(x.name.substring(7)); + } + }); + + gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x) { + x.innerHTML = "Loading..."; + }); + + + var id = randomId(); + requestProgress(id, gradioApp().getElementById('extensions_installed_top'), null, function() { + + }); + + return [id, JSON.stringify(disable)]; +} + +function install_extension_from_index(button, url) { + button.disabled = "disabled"; + button.value = "Installing..."; + + var textarea = gradioApp().querySelector('#extension_to_install textarea'); + textarea.value = url; + updateInput(textarea); + + gradioApp().querySelector('#install_extension_button').click(); +} + +function config_state_confirm_restore(_, config_state_name, config_restore_type) { + if (config_state_name == "Current") { + return [false, config_state_name, config_restore_type]; + } + let restored = ""; + if (config_restore_type == "extensions") { + restored = "all saved extension versions"; + } else if (config_restore_type == "webui") { + restored = "the webui version"; + } else { + restored = "the webui version and all saved extension versions"; + } + let confirmed = confirm("Are you sure you want to restore from this state?\nThis will reset " + restored + "."); + if (confirmed) { + restart_reload(); + gradioApp().querySelectorAll('#extensions .extension_status').forEach(function(x) { + x.innerHTML = "Loading..."; + }); + } + return [confirmed, config_state_name, config_restore_type]; +} diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index c85bc79a..aafe0a00 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -1,196 +1,215 @@ -function setupExtraNetworksForTab(tabname){ - gradioApp().querySelector('#'+tabname+'_extra_tabs').classList.add('extra-networks') - - var tabs = gradioApp().querySelector('#'+tabname+'_extra_tabs > div') - var search = gradioApp().querySelector('#'+tabname+'_extra_search textarea') - var refresh = gradioApp().getElementById(tabname+'_extra_refresh') - - search.classList.add('search') - tabs.appendChild(search) - tabs.appendChild(refresh) - - var applyFilter = function(){ - var searchTerm = search.value.toLowerCase() - - gradioApp().querySelectorAll('#'+tabname+'_extra_tabs div.card').forEach(function(elem){ - var searchOnly = elem.querySelector('.search_only') - var text = elem.querySelector('.name').textContent.toLowerCase() + " " + elem.querySelector('.search_term').textContent.toLowerCase() - - var visible = text.indexOf(searchTerm) != -1 - - if(searchOnly && searchTerm.length < 4){ - visible = false - } - - elem.style.display = visible ? "" : "none" - }) - } - - search.addEventListener("input", applyFilter); - applyFilter(); - - extraNetworksApplyFilter[tabname] = applyFilter; -} - -function applyExtraNetworkFilter(tabname){ - setTimeout(extraNetworksApplyFilter[tabname], 1); -} - -var extraNetworksApplyFilter = {} -var activePromptTextarea = {}; - -function setupExtraNetworks(){ - setupExtraNetworksForTab('txt2img') - setupExtraNetworksForTab('img2img') - - function registerPrompt(tabname, id){ - var textarea = gradioApp().querySelector("#" + id + " > label > textarea"); - - if (! activePromptTextarea[tabname]){ - activePromptTextarea[tabname] = textarea - } - - textarea.addEventListener("focus", function(){ - activePromptTextarea[tabname] = textarea; - }); - } - - registerPrompt('txt2img', 'txt2img_prompt') - registerPrompt('txt2img', 'txt2img_neg_prompt') - registerPrompt('img2img', 'img2img_prompt') - registerPrompt('img2img', 'img2img_neg_prompt') -} - -onUiLoaded(setupExtraNetworks) - -var re_extranet = /<([^:]+:[^:]+):[\d\.]+>/; -var re_extranet_g = /\s+<([^:]+:[^:]+):[\d\.]+>/g; - -function tryToRemoveExtraNetworkFromPrompt(textarea, text){ - var m = text.match(re_extranet) - if(! m) return false - - var partToSearch = m[1] - var replaced = false - var newTextareaText = textarea.value.replaceAll(re_extranet_g, function(found){ - m = found.match(re_extranet); - if(m[1] == partToSearch){ - replaced = true; - return "" - } - return found; - }) - - if(replaced){ - textarea.value = newTextareaText - return true; - } - - return false -} - -function cardClicked(tabname, textToAdd, allowNegativePrompt){ - var textarea = allowNegativePrompt ? activePromptTextarea[tabname] : gradioApp().querySelector("#" + tabname + "_prompt > label > textarea") - - if(! tryToRemoveExtraNetworkFromPrompt(textarea, textToAdd)){ - textarea.value = textarea.value + opts.extra_networks_add_text_separator + textToAdd - } - - updateInput(textarea) -} - -function saveCardPreview(event, tabname, filename){ - var textarea = gradioApp().querySelector("#" + tabname + '_preview_filename > label > textarea') - var button = gradioApp().getElementById(tabname + '_save_preview') - - textarea.value = filename - updateInput(textarea) - - button.click() - - event.stopPropagation() - event.preventDefault() -} - -function extraNetworksSearchButton(tabs_id, event){ - var searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > div > textarea') - var button = event.target - var text = button.classList.contains("search-all") ? "" : button.textContent.trim() - - searchTextarea.value = text - updateInput(searchTextarea) -} - -var globalPopup = null; -var globalPopupInner = null; -function popup(contents){ - if(! globalPopup){ - globalPopup = document.createElement('div') - globalPopup.onclick = function(){ globalPopup.style.display = "none"; }; - globalPopup.classList.add('global-popup'); - - var close = document.createElement('div') - close.classList.add('global-popup-close'); - close.onclick = function(){ globalPopup.style.display = "none"; }; - close.title = "Close"; - globalPopup.appendChild(close) - - globalPopupInner = document.createElement('div') - globalPopupInner.onclick = function(event){ event.stopPropagation(); return false; }; - globalPopupInner.classList.add('global-popup-inner'); - globalPopup.appendChild(globalPopupInner) - - gradioApp().appendChild(globalPopup); - } - - globalPopupInner.innerHTML = ''; - globalPopupInner.appendChild(contents); - - globalPopup.style.display = "flex"; -} - -function extraNetworksShowMetadata(text){ - var elem = document.createElement('pre') - elem.classList.add('popup-metadata'); - elem.textContent = text; - - popup(elem); -} - -function requestGet(url, data, handler, errorHandler){ - var xhr = new XMLHttpRequest(); - var args = Object.keys(data).map(function(k){ return encodeURIComponent(k) + '=' + encodeURIComponent(data[k]) }).join('&') - xhr.open("GET", url + "?" + args, true); - - xhr.onreadystatechange = function () { - if (xhr.readyState === 4) { - if (xhr.status === 200) { - try { - var js = JSON.parse(xhr.responseText); - handler(js) - } catch (error) { - console.error(error); - errorHandler() - } - } else{ - errorHandler() - } - } - }; - var js = JSON.stringify(data); - xhr.send(js); -} - -function extraNetworksRequestMetadata(event, extraPage, cardName){ - var showError = function(){ extraNetworksShowMetadata("there was an error getting metadata"); } - - requestGet("./sd_extra_networks/metadata", {"page": extraPage, "item": cardName}, function(data){ - if(data && data.metadata){ - extraNetworksShowMetadata(data.metadata) - } else{ - showError() - } - }, showError) - - event.stopPropagation() -} +function setupExtraNetworksForTab(tabname) { + gradioApp().querySelector('#' + tabname + '_extra_tabs').classList.add('extra-networks'); + + var tabs = gradioApp().querySelector('#' + tabname + '_extra_tabs > div'); + var search = gradioApp().querySelector('#' + tabname + '_extra_search textarea'); + var refresh = gradioApp().getElementById(tabname + '_extra_refresh'); + + search.classList.add('search'); + tabs.appendChild(search); + tabs.appendChild(refresh); + + var applyFilter = function() { + var searchTerm = search.value.toLowerCase(); + + gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card').forEach(function(elem) { + var searchOnly = elem.querySelector('.search_only'); + var text = elem.querySelector('.name').textContent.toLowerCase() + " " + elem.querySelector('.search_term').textContent.toLowerCase(); + + var visible = text.indexOf(searchTerm) != -1; + + if (searchOnly && searchTerm.length < 4) { + visible = false; + } + + elem.style.display = visible ? "" : "none"; + }); + }; + + search.addEventListener("input", applyFilter); + applyFilter(); + + extraNetworksApplyFilter[tabname] = applyFilter; +} + +function applyExtraNetworkFilter(tabname) { + setTimeout(extraNetworksApplyFilter[tabname], 1); +} + +var extraNetworksApplyFilter = {}; +var activePromptTextarea = {}; + +function setupExtraNetworks() { + setupExtraNetworksForTab('txt2img'); + setupExtraNetworksForTab('img2img'); + + function registerPrompt(tabname, id) { + var textarea = gradioApp().querySelector("#" + id + " > label > textarea"); + + if (!activePromptTextarea[tabname]) { + activePromptTextarea[tabname] = textarea; + } + + textarea.addEventListener("focus", function() { + activePromptTextarea[tabname] = textarea; + }); + } + + registerPrompt('txt2img', 'txt2img_prompt'); + registerPrompt('txt2img', 'txt2img_neg_prompt'); + registerPrompt('img2img', 'img2img_prompt'); + registerPrompt('img2img', 'img2img_neg_prompt'); +} + +onUiLoaded(setupExtraNetworks); + +var re_extranet = /<([^:]+:[^:]+):[\d.]+>/; +var re_extranet_g = /\s+<([^:]+:[^:]+):[\d.]+>/g; + +function tryToRemoveExtraNetworkFromPrompt(textarea, text) { + var m = text.match(re_extranet); + var replaced = false; + var newTextareaText; + if (m) { + var partToSearch = m[1]; + newTextareaText = textarea.value.replaceAll(re_extranet_g, function(found) { + m = found.match(re_extranet); + if (m[1] == partToSearch) { + replaced = true; + return ""; + } + return found; + }); + } else { + newTextareaText = textarea.value.replaceAll(new RegExp(text, "g"), function(found) { + if (found == text) { + replaced = true; + return ""; + } + return found; + }); + } + + if (replaced) { + textarea.value = newTextareaText; + return true; + } + + return false; +} + +function cardClicked(tabname, textToAdd, allowNegativePrompt) { + var textarea = allowNegativePrompt ? activePromptTextarea[tabname] : gradioApp().querySelector("#" + tabname + "_prompt > label > textarea"); + + if (!tryToRemoveExtraNetworkFromPrompt(textarea, textToAdd)) { + textarea.value = textarea.value + opts.extra_networks_add_text_separator + textToAdd; + } + + updateInput(textarea); +} + +function saveCardPreview(event, tabname, filename) { + var textarea = gradioApp().querySelector("#" + tabname + '_preview_filename > label > textarea'); + var button = gradioApp().getElementById(tabname + '_save_preview'); + + textarea.value = filename; + updateInput(textarea); + + button.click(); + + event.stopPropagation(); + event.preventDefault(); +} + +function extraNetworksSearchButton(tabs_id, event) { + var searchTextarea = gradioApp().querySelector("#" + tabs_id + ' > div > textarea'); + var button = event.target; + var text = button.classList.contains("search-all") ? "" : button.textContent.trim(); + + searchTextarea.value = text; + updateInput(searchTextarea); +} + +var globalPopup = null; +var globalPopupInner = null; +function popup(contents) { + if (!globalPopup) { + globalPopup = document.createElement('div'); + globalPopup.onclick = function() { + globalPopup.style.display = "none"; + }; + globalPopup.classList.add('global-popup'); + + var close = document.createElement('div'); + close.classList.add('global-popup-close'); + close.onclick = function() { + globalPopup.style.display = "none"; + }; + close.title = "Close"; + globalPopup.appendChild(close); + + globalPopupInner = document.createElement('div'); + globalPopupInner.onclick = function(event) { + event.stopPropagation(); return false; + }; + globalPopupInner.classList.add('global-popup-inner'); + globalPopup.appendChild(globalPopupInner); + + gradioApp().appendChild(globalPopup); + } + + globalPopupInner.innerHTML = ''; + globalPopupInner.appendChild(contents); + + globalPopup.style.display = "flex"; +} + +function extraNetworksShowMetadata(text) { + var elem = document.createElement('pre'); + elem.classList.add('popup-metadata'); + elem.textContent = text; + + popup(elem); +} + +function requestGet(url, data, handler, errorHandler) { + var xhr = new XMLHttpRequest(); + var args = Object.keys(data).map(function(k) { + return encodeURIComponent(k) + '=' + encodeURIComponent(data[k]); + }).join('&'); + xhr.open("GET", url + "?" + args, true); + + xhr.onreadystatechange = function() { + if (xhr.readyState === 4) { + if (xhr.status === 200) { + try { + var js = JSON.parse(xhr.responseText); + handler(js); + } catch (error) { + console.error(error); + errorHandler(); + } + } else { + errorHandler(); + } + } + }; + var js = JSON.stringify(data); + xhr.send(js); +} + +function extraNetworksRequestMetadata(event, extraPage, cardName) { + var showError = function() { + extraNetworksShowMetadata("there was an error getting metadata"); + }; + + requestGet("./sd_extra_networks/metadata", {page: extraPage, item: cardName}, function(data) { + if (data && data.metadata) { + extraNetworksShowMetadata(data.metadata); + } else { + showError(); + } + }, showError); + + event.stopPropagation(); +} diff --git a/javascript/generationParams.js b/javascript/generationParams.js index ef64ee2e..a877f8a5 100644 --- a/javascript/generationParams.js +++ b/javascript/generationParams.js @@ -1,33 +1,35 @@ // attaches listeners to the txt2img and img2img galleries to update displayed generation param text when the image changes let txt2img_gallery, img2img_gallery, modal = undefined; -onUiUpdate(function(){ - if (!txt2img_gallery) { - txt2img_gallery = attachGalleryListeners("txt2img") - } - if (!img2img_gallery) { - img2img_gallery = attachGalleryListeners("img2img") - } - if (!modal) { - modal = gradioApp().getElementById('lightboxModal') - modalObserver.observe(modal, { attributes : true, attributeFilter : ['style'] }); - } +onUiUpdate(function() { + if (!txt2img_gallery) { + txt2img_gallery = attachGalleryListeners("txt2img"); + } + if (!img2img_gallery) { + img2img_gallery = attachGalleryListeners("img2img"); + } + if (!modal) { + modal = gradioApp().getElementById('lightboxModal'); + modalObserver.observe(modal, {attributes: true, attributeFilter: ['style']}); + } }); let modalObserver = new MutationObserver(function(mutations) { - mutations.forEach(function(mutationRecord) { - let selectedTab = gradioApp().querySelector('#tabs div button.selected')?.innerText - if (mutationRecord.target.style.display === 'none' && (selectedTab === 'txt2img' || selectedTab === 'img2img')) - gradioApp().getElementById(selectedTab+"_generation_info_button")?.click() - }); + mutations.forEach(function(mutationRecord) { + let selectedTab = gradioApp().querySelector('#tabs div button.selected')?.innerText; + if (mutationRecord.target.style.display === 'none' && (selectedTab === 'txt2img' || selectedTab === 'img2img')) { + gradioApp().getElementById(selectedTab + "_generation_info_button")?.click(); + } + }); }); function attachGalleryListeners(tab_name) { - var gallery = gradioApp().querySelector('#'+tab_name+'_gallery') - gallery?.addEventListener('click', () => gradioApp().getElementById(tab_name+"_generation_info_button").click()); - gallery?.addEventListener('keydown', (e) => { - if (e.keyCode == 37 || e.keyCode == 39) // left or right arrow - gradioApp().getElementById(tab_name+"_generation_info_button").click() - }); - return gallery; + var gallery = gradioApp().querySelector('#' + tab_name + '_gallery'); + gallery?.addEventListener('click', () => gradioApp().getElementById(tab_name + "_generation_info_button").click()); + gallery?.addEventListener('keydown', (e) => { + if (e.keyCode == 37 || e.keyCode == 39) { // left or right arrow + gradioApp().getElementById(tab_name + "_generation_info_button").click(); + } + }); + return gallery; } diff --git a/javascript/hints.js b/javascript/hints.js index 3746df99..46f342cb 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -1,16 +1,17 @@ // mouseover tooltips for various UI elements -titles = { +var titles = { "Sampling steps": "How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results", "Sampling method": "Which algorithm to use to produce the image", - "GFPGAN": "Restore low quality faces using GFPGAN neural network", - "Euler a": "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps higher than 30-40 does not help", - "DDIM": "Denoising Diffusion Implicit Models - best at inpainting", - "UniPC": "Unified Predictor-Corrector Framework for Fast Sampling of Diffusion Models", - "DPM adaptive": "Ignores step count - uses a number of steps determined by the CFG and resolution", + "GFPGAN": "Restore low quality faces using GFPGAN neural network", + "Euler a": "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps higher than 30-40 does not help", + "DDIM": "Denoising Diffusion Implicit Models - best at inpainting", + "UniPC": "Unified Predictor-Corrector Framework for Fast Sampling of Diffusion Models", + "DPM adaptive": "Ignores step count - uses a number of steps determined by the CFG and resolution", - "Batch count": "How many batches of images to create (has no impact on generation performance or VRAM usage)", - "Batch size": "How many image to create in a single batch (increases generation performance at cost of higher VRAM usage)", + "\u{1F4D0}": "Auto detect size from img2img", + "Batch count": "How many batches of images to create (has no impact on generation performance or VRAM usage)", + "Batch size": "How many image to create in a single batch (increases generation performance at cost of higher VRAM usage)", "CFG Scale": "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results", "Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result", "\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time", @@ -40,7 +41,7 @@ titles = { "Inpaint at full resolution": "Upscale masked region to target resolution, do inpainting, downscale back and paste into original image", "Denoising strength": "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.", - + "Skip": "Stop processing current image and continue processing.", "Interrupt": "Stop processing images and return any results accumulated so far.", "Save": "Write image to a directory (default - log/images) and generation parameters into csv file.", @@ -66,8 +67,8 @@ titles = { "Interrogate": "Reconstruct prompt from existing image and put it into the prompt field.", - "Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [denoising], [clip_skip], [batch_number], [generation_number], [prompt_hash], [prompt], [prompt_no_styles], [prompt_spaces], [width], [height], [styles], [sampler], [seed], [model_hash], [model_name], [prompt_words], [date], [datetime], [datetime], [datetime