Merge branch 'master' of github.com:AUTOMATIC1111/stable-diffusion-webui

This commit is contained in:
unknown 2022-12-10 04:57:12 -06:00
commit 4005cd66e0
5 changed files with 57 additions and 82 deletions

View File

@ -131,44 +131,13 @@ function ask_for_style_name(_, prompt_text, negative_prompt_text) {
return [name_, prompt_text, negative_prompt_text]
}
// returns css id for currently selected tab in ui
function selected_tab_id() {
tabs = gradioApp().querySelectorAll('#tabs div.tabitem')
for(var tab = 0; tab < tabs.length; tab++) {
if (tabs[tab].style.display != "none") return tabs[tab].id
function confirm_clear_prompt(prompt, negative_prompt) {
if(confirm("Delete prompt?")) {
prompt = ""
negative_prompt = ""
}
}
function clear_prompt() {
if(confirm("Delete prompt?")) {
let pos_prompt = gradioApp().querySelector("#txt2img_prompt > label > textarea");
let neg_prompt = gradioApp().querySelector("#txt2img_neg_prompt > label > textarea");
if (selected_tab_id() == "tab_txt2img") {
} else {
pos_prompt = gradioApp().querySelector("#img2img_prompt > label > textarea");
neg_prompt = gradioApp().querySelector("#img2img_neg_prompt > label > textarea");
}
pos_prompt.value = ""
neg_prompt.value = ""
//update prompt values on server-side
pos_prompt.dispatchEvent(
new Event("input", {bubbles: true})
)
neg_prompt.dispatchEvent(
new Event("input", {bubbles: true})
)
return true
} else return false
return [prompt, negative_prompt]
}

View File

@ -501,30 +501,39 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
image = params.image
fullfn = params.filename
info = params.pnginfo.get(pnginfo_section_name, None)
def _atomically_save_image(image_to_save, filename_without_extension, extension):
# save image with .tmp extension to avoid race condition when another process detects new image in the directory
temp_file_path = filename_without_extension + ".tmp"
image_format = Image.registered_extensions()[extension]
if extension.lower() == '.png':
pnginfo_data = PngImagePlugin.PngInfo()
if opts.enable_pnginfo:
for k, v in params.pnginfo.items():
pnginfo_data.add_text(k, str(v))
image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
elif extension.lower() in (".jpg", ".jpeg", ".webp"):
image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None:
exif_bytes = piexif.dump({
"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(info or "", encoding="unicode")
},
})
piexif.insert(exif_bytes, temp_file_path)
else:
image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality)
# atomically rename the file with correct extension
os.replace(temp_file_path, filename_without_extension + extension)
fullfn_without_extension, extension = os.path.splitext(params.filename)
def exif_bytes():
return piexif.dump({
"Exif": {
piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(info or "", encoding="unicode")
},
})
if extension.lower() == '.png':
pnginfo_data = PngImagePlugin.PngInfo()
if opts.enable_pnginfo:
for k, v in params.pnginfo.items():
pnginfo_data.add_text(k, str(v))
image.save(fullfn, quality=opts.jpeg_quality, pnginfo=pnginfo_data)
elif extension.lower() in (".jpg", ".jpeg", ".webp"):
image.save(fullfn, quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None:
piexif.insert(exif_bytes(), fullfn)
else:
image.save(fullfn, quality=opts.jpeg_quality)
_atomically_save_image(image, fullfn_without_extension, extension)
image.already_saved_as = fullfn
@ -538,9 +547,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
elif oversize:
image = image.resize((image.width * target_side_length // image.height, target_side_length), LANCZOS)
image.save(fullfn_without_extension + ".jpg", quality=opts.jpeg_quality)
if opts.enable_pnginfo and info is not None:
piexif.insert(exif_bytes(), fullfn_without_extension + ".jpg")
_atomically_save_image(image, fullfn_without_extension, ".jpg")
if opts.save_txt and info is not None:
txt_fullfn = f"{fullfn_without_extension}.txt"

View File

@ -401,7 +401,6 @@ options_templates.update(options_section(('ui', "User interface"), {
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
"show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
"clear_prompt_visible": OptionInfo(True, "Show clear prompt button"),
'quicksettings': OptionInfo("sd_model_checkpoint", "Quicksettings list"),
'localization': OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)),
}))

View File

@ -403,16 +403,17 @@ def create_toprow(is_img2img):
paste = gr.Button(value=paste_symbol, elem_id="paste")
save_style = gr.Button(value=save_style_symbol, elem_id="style_create")
prompt_style_apply = gr.Button(value=apply_style_symbol, elem_id="style_apply")
clear_prompt_button = gr.Button(
value=clear_prompt_symbol,
elem_id="clear_prompt",
visible=opts.clear_prompt_visible
)
clear_prompt_button = gr.Button(value=clear_prompt_symbol, elem_id=f"{id_part}_clear_prompt")
token_counter = gr.HTML(value="<span></span>", elem_id=f"{id_part}_token_counter")
token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
clear_prompt_button.click(
fn=lambda *x: x,
_js="confirm_clear_prompt",
inputs=[prompt, negative_prompt],
outputs=[prompt, negative_prompt],
)
button_interrogate = None
button_deepbooru = None
if is_img2img:
@ -447,7 +448,7 @@ def create_toprow(is_img2img):
prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())))
prompt_style2.save_to_config = True
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button, clear_prompt_button
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button
def setup_progressbar(progressbar, preview, id_part, textinfo=None):
@ -634,7 +635,7 @@ def create_ui():
modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False)
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _,txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, token_counter, token_button, clear_prompt_button = create_toprow(is_img2img=False)
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _,txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, token_counter, token_button = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="bytes", visible=False)
@ -686,7 +687,6 @@ def create_ui():
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
connect_clear_prompt(clear_prompt_button)
txt2img_args = dict(
fn=wrap_gradio_gpu_call(modules.txt2img.txt2img),
@ -793,7 +793,7 @@ def create_ui():
modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True)
with gr.Blocks(analytics_enabled=False) as img2img_interface:
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste,token_counter, token_button, clear_prompt_button = create_toprow(is_img2img=True)
img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste,token_counter, token_button = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
@ -884,7 +884,6 @@ def create_ui():
connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False)
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
connect_clear_prompt(clear_prompt_button)
img2img_prompt_img.change(
fn=modules.images.image_data,

View File

@ -17,13 +17,14 @@ class Script(scripts.Script):
return is_img2img
def ui(self, is_img2img):
info = gr.HTML("<p style=\"margin-bottom:0.75em\">Will upscale the image to twice the dimensions; use width and height sliders to set tile size</p>")
info = gr.HTML("<p style=\"margin-bottom:0.75em\">Will upscale the image by the selected scale factor; use width and height sliders to set tile size</p>")
overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64)
scale_factor = gr.Slider(minimum=1, maximum=4, step=1, label='Scale Factor', value=2)
upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index")
return [info, overlap, upscaler_index]
return [info, overlap, upscaler_index, scale_factor]
def run(self, p, _, overlap, upscaler_index):
def run(self, p, _, overlap, upscaler_index, scale_factor):
processing.fix_seed(p)
upscaler = shared.sd_upscalers[upscaler_index]
@ -34,9 +35,9 @@ class Script(scripts.Script):
seed = p.seed
init_img = p.init_images[0]
if(upscaler.name != "None"):
img = upscaler.scaler.upscale(init_img, 2, upscaler.data_path)
if (upscaler.name != "None"):
img = upscaler.scaler.upscale(init_img, scale_factor, upscaler.data_path)
else:
img = init_img
@ -69,7 +70,7 @@ class Script(scripts.Script):
work_results = []
for i in range(batch_count):
p.batch_size = batch_size
p.init_images = work[i*batch_size:(i+1)*batch_size]
p.init_images = work[i * batch_size:(i + 1) * batch_size]
state.job = f"Batch {i + 1 + n * batch_count} out of {state.job_count}"
processed = processing.process_images(p)