diff --git a/README.md b/README.md index c1538100..3cb4efb9 100644 --- a/README.md +++ b/README.md @@ -76,6 +76,10 @@ sudo pacman -S wget git python3 bash <(wget -qO- https://raw.githubusercontent.com/AUTOMATIC1111/stable-diffusion-webui/master/webui.sh) ``` +### Installation on Apple Silicon + +Find the instructions [here](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Installation-on-Apple-Silicon). + ## Documentation The documentation was moved from this README over to the project's [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki). diff --git a/javascript/aspectRatioOverlay.js b/javascript/aspectRatioOverlay.js index 52e9f381..96f1c00d 100644 --- a/javascript/aspectRatioOverlay.js +++ b/javascript/aspectRatioOverlay.js @@ -18,9 +18,9 @@ function dimensionChange(e,dimname){ return; } - var img2imgMode = gradioApp().querySelector("input[name=radio-img2img_mode]:checked") + var img2imgMode = gradioApp().querySelector('#mode_img2img.tabs > div > button.rounded-t-lg.border-gray-200') if(img2imgMode){ - img2imgMode=img2imgMode.value + img2imgMode=img2imgMode.innerText }else{ return; } @@ -30,12 +30,10 @@ function dimensionChange(e,dimname){ var targetElement = null; - if(img2imgMode=='Redraw whole image' && redrawImage){ + if(img2imgMode=='img2img' && redrawImage){ targetElement = redrawImage; - }else if(img2imgMode=='Inpaint a part of image' && inpaintImage){ + }else if(img2imgMode=='Inpaint' && inpaintImage){ targetElement = inpaintImage; - }else if(img2imgMode=='SD upscale' && redrawImage){ - targetElement = redrawImage; } if(targetElement){ @@ -119,6 +117,3 @@ onUiUpdate(function(){ }) } }); - - - diff --git a/javascript/hints.js b/javascript/hints.js index 65a67d50..9d3eecf3 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -59,6 +59,7 @@ titles = { "Images filename pattern": "Use following tags to define how filenames for images are chosen: [steps], [cfg], [prompt], [prompt_spaces], [width], [height], [sampler], [seed], [model_hash], [prompt_words], [date]; leave empty for default.", "Directory name pattern": "Use following tags to define how subdirectories for images and grids are chosen: [steps], [cfg], [prompt], [prompt_spaces], [width], [height], [sampler], [seed], [model_hash], [prompt_words], [date]; leave empty for default.", + "Max prompt words": "Set the maximum number of words to be used in the [prompt_words] option; ATTENTION: If the words are too long, they may exceed the maximum length of the file path that the system can handle", "Loopback": "Process an image, use it as an input, repeat.", "Loops": "How many times to repeat processing an image and using it as input for the next iteration", diff --git a/javascript/imageMaskFix.js b/javascript/imageMaskFix.js new file mode 100644 index 00000000..3d77bfe9 --- /dev/null +++ b/javascript/imageMaskFix.js @@ -0,0 +1,45 @@ +/** + * temporary fix for https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/668 + * @see https://github.com/gradio-app/gradio/issues/1721 + */ +window.addEventListener( 'resize', () => imageMaskResize()); +function imageMaskResize() { + const canvases = gradioApp().querySelectorAll('#img2maskimg .touch-none canvas'); + if ( ! canvases.length ) { + canvases_fixed = false; + window.removeEventListener( 'resize', imageMaskResize ); + return; + } + + const wrapper = canvases[0].closest('.touch-none'); + const previewImage = wrapper.previousElementSibling; + + if ( ! previewImage.complete ) { + previewImage.addEventListener( 'load', () => imageMaskResize()); + return; + } + + const w = previewImage.width; + const h = previewImage.height; + const nw = previewImage.naturalWidth; + const nh = previewImage.naturalHeight; + const portrait = nh > nw; + const factor = portrait; + + const wW = Math.min(w, portrait ? h/nh*nw : w/nw*nw); + const wH = Math.min(h, portrait ? h/nh*nh : w/nw*nh); + + wrapper.style.width = `${wW}px`; + wrapper.style.height = `${wH}px`; + wrapper.style.left = `${(w-wW)/2}px`; + wrapper.style.top = `${(h-wH)/2}px`; + + canvases.forEach( c => { + c.style.width = c.style.height = ''; + c.style.maxWidth = '100%'; + c.style.maxHeight = '100%'; + c.style.objectFit = 'contain'; + }); + } + + onUiUpdate(() => imageMaskResize()); \ No newline at end of file diff --git a/modules/images.py b/modules/images.py index f65f4553..6cf56ddb 100644 --- a/modules/images.py +++ b/modules/images.py @@ -249,7 +249,6 @@ invalid_filename_prefix = ' ' invalid_filename_postfix = ' .' re_nonletters = re.compile(r'[\s'+string.punctuation+']+') max_filename_part_length = 128 -max_prompt_words = 8 def sanitize_filename_part(text, replace_spaces=True): @@ -263,6 +262,8 @@ def sanitize_filename_part(text, replace_spaces=True): def apply_filename_pattern(x, p, seed, prompt): + max_prompt_words = opts.directories_max_prompt_words + if seed is not None: x = x.replace("[seed]", str(seed)) diff --git a/modules/img2img.py b/modules/img2img.py index 91689232..bce8b712 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -118,4 +118,8 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro shared.total_tqdm.clear() - return processed.images, processed.js(), plaintext_to_html(processed.info) + generation_info_js = processed.js() + if opts.samples_log_stdout: + print(generation_info_js) + + return processed.images, generation_info_js, plaintext_to_html(processed.info) \ No newline at end of file diff --git a/modules/shared.py b/modules/shared.py index 0b57685e..b712f20e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -100,81 +100,126 @@ def realesrgan_models_names(): return [x.name for x in modules.realesrgan_model.get_realesrgan_models()] -class Options: - class OptionInfo: - def __init__(self, default=None, label="", component=None, component_args=None, onchange=None): - self.default = default - self.label = label - self.component = component - self.component_args = component_args - self.onchange = onchange +class OptionInfo: + def __init__(self, default=None, label="", component=None, component_args=None, onchange=None): + self.default = default + self.label = label + self.component = component + self.component_args = component_args + self.onchange = onchange + self.section = None + +def options_section(section_identifer, options_dict): + for k, v in options_dict.items(): + v.section = section_identifer + + return options_dict + + +hide_dirs = {"visible": False} if cmd_opts.hide_ui_dir_config else None + +options_templates = {} + +options_templates.update(options_section(('saving-images', "Saving images/grids"), { + "samples_save": OptionInfo(True, "Always save all generated images"), + "samples_format": OptionInfo('png', 'File format for images'), + "samples_filename_pattern": OptionInfo("", "Images filename pattern"), + + "grid_save": OptionInfo(True, "Always save all generated image grids"), + "grid_format": OptionInfo('png', 'File format for grids'), + "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"), + "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"), + "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}), + + "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"), + "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."), + "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), + "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), + "export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"), + + "use_original_name_batch": OptionInfo(False, "Use original name for output filename during batch process in extras tab"), +})) + +options_templates.update(options_section(('saving-paths', "Paths for saving"), { + "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to three directories below", component_args=hide_dirs), + "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs), + "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs), + "outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs), + "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs), + "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), + "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), + "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), +})) + +options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { + "save_to_dirs": OptionInfo(False, "Save images to a subdirectory"), + "grid_save_to_dirs": OptionInfo(False, "Save grids to subdirectory"), + "directories_filename_pattern": OptionInfo("", "Directory name pattern"), + "directories_max_prompt_words": OptionInfo(8, "Max prompt words", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}), +})) + +options_templates.update(options_section(('upscaling', "Upscaling"), { + "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}), + "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}), + "realesrgan_enabled_models": OptionInfo(["Real-ESRGAN 4x plus", "Real-ESRGAN 4x plus anime 6B"], "Select which RealESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}), + "SWIN_tile": OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}), + "SWIN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}), + "ldsr_steps": OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}), + "ldsr_pre_down": OptionInfo(1, "LDSR Pre-process downssample scale. 1 = no down-sampling, 4 = 1/4 scale.", gr.Slider, {"minimum": 1, "maximum": 4, "step": 1}), + "ldsr_post_down": OptionInfo(1, "LDSR Post-process down-sample scale. 1 = no down-sampling, 4 = 1/4 scale.", gr.Slider, {"minimum": 1, "maximum": 4, "step": 1}), + + "upscaler_for_hires_fix": OptionInfo(None, "Upscaler for highres. fix", gr.Radio, lambda: {"choices": [x.name for x in sd_upscalers]}), +})) + +options_templates.update(options_section(('face-restoration', "Face restoration"), { + "face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), + "code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), + "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), + "save_selected_only": OptionInfo(False, "When using 'Save' button, only save a single selected image"), +})) + +options_templates.update(options_section(('system', "System"), { + "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}), + "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), + "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. Broken in PyCharm console."), +})) + +options_templates.update(options_section(('sd', "Stable Diffusion"), { + "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Radio, lambda: {"choices": [x.title for x in modules.sd_models.checkpoints_list.values()]}), + "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), + "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), + "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."), + "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."), + "enable_emphasis": OptionInfo(True, "Use (text) to make model pay more attention to text and [text] to make it pay less attention"), + "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), + "filter_nsfw": OptionInfo(False, "Filter NSFW content"), + "random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}), +})) + +options_templates.update(options_section(('interrogate', "Interrogate Options"), { + "interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"), + "interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"), + "interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}), + "interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), + "interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), + "interrogate_clip_dict_limit": OptionInfo(1500, "Interrogate: maximum number of lines in text file (0 = No limit)"), +})) + +options_templates.update(options_section(('ui', "User interface"), { + "show_progressbar": OptionInfo(True, "Show progressbar"), + "show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}), + "return_grid": OptionInfo(True, "Show grid in results for web"), + "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), + "font": OptionInfo("", "Font for image grids that have text"), + "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), + "js_modal_lightbox_initialy_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), +})) + + +class Options: data = None - hide_dirs = {"visible": False} if cmd_opts.hide_ui_dir_config else None - data_labels = { - "samples_filename_pattern": OptionInfo("", "Images filename pattern"), - "save_to_dirs": OptionInfo(False, "Save images to a subdirectory"), - "grid_save_to_dirs": OptionInfo(False, "Save grids to subdirectory"), - "directories_filename_pattern": OptionInfo("", "Directory name pattern"), - "outdir_samples": OptionInfo("", "Output directory for images; if empty, defaults to two directories below", component_args=hide_dirs), - "outdir_txt2img_samples": OptionInfo("outputs/txt2img-images", 'Output directory for txt2img images', component_args=hide_dirs), - "outdir_img2img_samples": OptionInfo("outputs/img2img-images", 'Output directory for img2img images', component_args=hide_dirs), - "outdir_extras_samples": OptionInfo("outputs/extras-images", 'Output directory for images from extras tab', component_args=hide_dirs), - "outdir_grids": OptionInfo("", "Output directory for grids; if empty, defaults to two directories below", component_args=hide_dirs), - "outdir_txt2img_grids": OptionInfo("outputs/txt2img-grids", 'Output directory for txt2img grids', component_args=hide_dirs), - "outdir_img2img_grids": OptionInfo("outputs/img2img-grids", 'Output directory for img2img grids', component_args=hide_dirs), - "outdir_save": OptionInfo("log/images", "Directory for saving images using the Save button", component_args=hide_dirs), - "samples_save": OptionInfo(True, "Always save all generated images"), - "save_selected_only": OptionInfo(False, "When using 'Save' button, only save a single selected image"), - "samples_format": OptionInfo('png', 'File format for individual samples'), - "filter_nsfw": OptionInfo(False, "Filter NSFW content"), - "grid_save": OptionInfo(True, "Always save all generated image grids"), - "return_grid": OptionInfo(True, "Show grid in results for web"), - "grid_format": OptionInfo('png', 'File format for grids'), - "grid_extended_filename": OptionInfo(False, "Add extended info (seed, prompt) to filename when saving grid"), - "grid_only_if_multiple": OptionInfo(True, "Do not save grids consisting of one picture"), - "n_rows": OptionInfo(-1, "Grid row count; use -1 for autodetect and 0 for it to be same as batch size", gr.Slider, {"minimum": -1, "maximum": 16, "step": 1}), - "jpeg_quality": OptionInfo(80, "Quality for saved jpeg images", gr.Slider, {"minimum": 1, "maximum": 100, "step": 1}), - "export_for_4chan": OptionInfo(True, "If PNG image is larger than 4MB or any dimension is larger than 4000, downscale and save copy as JPG"), - "enable_pnginfo": OptionInfo(True, "Save text information about generation parameters as chunks to png files"), - "add_model_hash_to_info": OptionInfo(False, "Add model hash to generation information"), - "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), - "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), - "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."), - "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."), - "font": OptionInfo("", "Font for image grids that have text"), - "enable_emphasis": OptionInfo(True, "Use (text) to make model pay more attention to text and [text] to make it pay less attention"), - "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), - "save_txt": OptionInfo(False, "Create a text file next to every image with generation parameters."), - "ESRGAN_tile": OptionInfo(192, "Tile size for ESRGAN upscalers. 0 = no tiling.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}), - "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}), - "realesrgan_enabled_models": OptionInfo(["Real-ESRGAN 4x plus", "Real-ESRGAN 4x plus anime 6B"], "Select which RealESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}), - "SWIN_tile": OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}), - "SWIN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}), - "ldsr_steps": OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}), - "ldsr_pre_down":OptionInfo(1, "LDSR Pre-process downssample scale. 1 = no down-sampling, 4 = 1/4 scale.", gr.Slider, {"minimum": 1, "maximum": 4, "step": 1}), - "ldsr_post_down":OptionInfo(1, "LDSR Post-process down-sample scale. 1 = no down-sampling, 4 = 1/4 scale.", gr.Slider, {"minimum": 1, "maximum": 4, "step": 1}), - "random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}), - "upscaler_for_hires_fix": OptionInfo(None, "Upscaler for highres. fix", gr.Radio, lambda: {"choices": [x.name for x in sd_upscalers]}), - "show_progressbar": OptionInfo(True, "Show progressbar"), - "show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}), - "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. Broken in PyCharm console."), - "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step":1}), - "face_restoration_model": OptionInfo(None, "Face restoration model", gr.Radio, lambda: {"choices": [x.name() for x in face_restorers]}), - "code_former_weight": OptionInfo(0.5, "CodeFormer weight parameter; 0 = maximum effect; 1 = minimum effect", gr.Slider, {"minimum": 0, "maximum": 1, "step": 0.01}), - "save_images_before_face_restoration": OptionInfo(False, "Save a copy of image before doing face restoration."), - "face_restoration_unload": OptionInfo(False, "Move face restoration model from VRAM into RAM after processing"), - "interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"), - "interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"), - "interrogate_clip_num_beams": OptionInfo(1, "Interrogate: num_beams for BLIP", gr.Slider, {"minimum": 1, "maximum": 16, "step": 1}), - "interrogate_clip_min_length": OptionInfo(24, "Interrogate: minimum description length (excluding artists, etc..)", gr.Slider, {"minimum": 1, "maximum": 128, "step": 1}), - "interrogate_clip_max_length": OptionInfo(48, "Interrogate: maximum description length", gr.Slider, {"minimum": 1, "maximum": 256, "step": 1}), - "interrogate_clip_dict_limit": OptionInfo(1500, "Interrogate: maximum number of lines in text file (0 = No limit)"), - "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Radio, lambda: {"choices": [x.title for x in modules.sd_models.checkpoints_list.values()]}), - "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), - "js_modal_lightbox_initialy_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), - "use_original_name_batch": OptionInfo(False, "Use original name for output filename during batch process"), - } + data_labels = options_templates def __init__(self): self.data = {k: v.default for k, v in self.data_labels.items()} diff --git a/modules/txt2img.py b/modules/txt2img.py index 9123fca1..d2cf39ef 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -44,5 +44,9 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: shared.total_tqdm.clear() - return processed.images, processed.js(), plaintext_to_html(processed.info) + generation_info_js = processed.js() + if opts.samples_log_stdout: + print(generation_info_js) + + return processed.images, generation_info_js, plaintext_to_html(processed.info) diff --git a/modules/ui.py b/modules/ui.py index 8925fbcb..036f2ed3 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -22,7 +22,6 @@ from modules.paths import script_path from modules.shared import opts, cmd_opts import modules.shared as shared from modules.sd_samplers import samplers, samplers_for_img2img -import modules.realesrgan_model as realesrgan import modules.ldsr_model import modules.scripts import modules.gfpgan_model @@ -814,9 +813,6 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): return comp(label=info.label, value=fun, **(args or {})) components = [] - keys = list(opts.data_labels.keys()) - settings_cols = 3 - items_per_col = math.ceil(len(keys) / settings_cols) def run_settings(*args): up = [] @@ -842,20 +838,33 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): settings_submit = gr.Button(value="Apply settings", variant='primary') result = gr.HTML() + settings_cols = 3 + items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols) + + cols_displayed = 0 + items_displayed = 0 + previous_section = None + column = None with gr.Row(elem_id="settings").style(equal_height=False): - for colno in range(settings_cols): - with gr.Column(variant='panel'): - for rowno in range(items_per_col): - index = rowno + colno * items_per_col + for i, (k, item) in enumerate(opts.data_labels.items()): - if index < len(keys): - components.append(create_setting_component(keys[index])) + if previous_section != item.section: + if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None): + if column is not None: + column.__exit__() - settings_submit.click( - fn=run_settings, - inputs=components, - outputs=[result] - ) + column = gr.Column(variant='panel') + column.__enter__() + + items_displayed = 0 + cols_displayed += 1 + + previous_section = item.section + + gr.HTML(elem_id="settings_header_text_{}".format(item.section[0]), value='

{}

'.format(item.section[1])) + + components.append(create_setting_component(k)) + items_displayed += 1 request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications") request_notifications.click( @@ -865,6 +874,15 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo): _js='function(){}' ) + if column is not None: + column.__exit__() + + settings_submit.click( + fn=run_settings, + inputs=components, + outputs=[result] + ) + interfaces = [ (txt2img_interface, "txt2img", "txt2img"), (img2img_interface, "img2img", "img2img"), diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index 08851f42..9719bb8f 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -11,7 +11,7 @@ from modules import images, processing, devices from modules.processing import Processed, process_images from modules.shared import opts, cmd_opts, state - +# https://github.com/parlance-zz/g-diffuser-bot def expand(x, dir, amount, power=0.75): is_left = dir == 3 is_right = dir == 1