From 70931652a4289e28d83869b6d10cf11e80a70345 Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Fri, 30 Sep 2022 18:02:46 -0700 Subject: [PATCH 001/461] [xy_grid] made -1 seed fixing apply to Var. seed too --- scripts/xy_grid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 146663b0..9c078888 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -218,7 +218,7 @@ class Script(scripts.Script): ys = process_axis(y_opt, y_values) def fix_axis_seeds(axis_opt, axis_list): - if axis_opt.label == 'Seed': + if axis_opt.label == 'Seed' or 'Var. seed': return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list] else: return axis_list From cf141157e7b49b0b3a6e57dc7aa0d1345158b4c8 Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Fri, 30 Sep 2022 22:02:29 -0700 Subject: [PATCH 002/461] Added X/Y plot parameters to extra_generation_params --- scripts/xy_grid.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 9c078888..d9f8d55b 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -244,6 +244,14 @@ class Script(scripts.Script): return process_images(pc) + if not x_opt.label == 'Nothing': + p.extra_generation_params["X/Y Plot X Type"] = x_opt.label + p.extra_generation_params["X Values"] = '{' + ", ".join([f'{x}' for x in xs]) + '}' + + if not y_opt.label == 'Nothing': + p.extra_generation_params["X/Y Plot Y Type"] = y_opt.label + p.extra_generation_params["Y Values"] = '{' + ", ".join([f'{y}' for y in ys]) + '}' + processed = draw_xy_grid( p, xs=xs, From eba0c29dbc3bad8c4e32f1fa3a03dc6f9caf1f5a Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Sat, 1 Oct 2022 13:56:29 -0700 Subject: [PATCH 003/461] Updated xy_grid infotext formatting, parser regex --- modules/generation_parameters_copypaste.py | 2 +- scripts/xy_grid.py | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index ac1ba7f4..39d67d94 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -1,7 +1,7 @@ import re import gradio as gr -re_param_code = r"\s*([\w ]+):\s*([^,]+)(?:,|$)" +re_param_code = r"\s*([\w ]+):\s*((?:{[^}]+})|(?:[^,]+))(?:,|$)" re_param = re.compile(re_param_code) re_params = re.compile(r"^(?:" + re_param_code + "){3,}$") re_imagesize = re.compile(r"^(\d+)x(\d+)$") diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index d9f8d55b..f87c6c1f 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -245,12 +245,16 @@ class Script(scripts.Script): return process_images(pc) if not x_opt.label == 'Nothing': - p.extra_generation_params["X/Y Plot X Type"] = x_opt.label - p.extra_generation_params["X Values"] = '{' + ", ".join([f'{x}' for x in xs]) + '}' + p.extra_generation_params["XY Plot X Type"] = x_opt.label + p.extra_generation_params["X Values"] = '{' + x_values + '}' + if x_opt.label in ["Seed","Var. seed"] and not no_fixed_seeds: + p.extra_generation_params["Fixed X Values"] = '{' + ", ".join([str(x) for x in xs])+ '}' if not y_opt.label == 'Nothing': - p.extra_generation_params["X/Y Plot Y Type"] = y_opt.label - p.extra_generation_params["Y Values"] = '{' + ", ".join([f'{y}' for y in ys]) + '}' + p.extra_generation_params["XY Plot Y Type"] = y_opt.label + p.extra_generation_params["Y Values"] = '{' + y_values + '}' + if y_opt.label in ["Seed","Var. seed"] and not no_fixed_seeds: + p.extra_generation_params["Fixed Y Values"] = '{' + ", ".join([str(y) for y in ys])+ '}' processed = draw_xy_grid( p, From b99a4f769f11ed74df0344a23069d3858613fbef Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Sat, 1 Oct 2022 14:26:12 -0700 Subject: [PATCH 004/461] fixed expression error in condition --- scripts/xy_grid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index f87c6c1f..f1f54d9c 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -218,7 +218,7 @@ class Script(scripts.Script): ys = process_axis(y_opt, y_values) def fix_axis_seeds(axis_opt, axis_list): - if axis_opt.label == 'Seed' or 'Var. seed': + if axis_opt.label in ["Seed","Var. seed"]: return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list] else: return axis_list From f6a97868e57e44fba6c4283769fedd30ee11cacf Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Sat, 1 Oct 2022 14:36:09 -0700 Subject: [PATCH 005/461] fix to allow empty {} values --- modules/generation_parameters_copypaste.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 39d67d94..27d58dfd 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -1,7 +1,7 @@ import re import gradio as gr -re_param_code = r"\s*([\w ]+):\s*((?:{[^}]+})|(?:[^,]+))(?:,|$)" +re_param_code = r"\s*([\w ]+):\s*((?:{[^}]*})|(?:[^,]+))(?:,|$)" re_param = re.compile(re_param_code) re_params = re.compile(r"^(?:" + re_param_code + "){3,}$") re_imagesize = re.compile(r"^(\d+)x(\d+)$") From fe6e2362e8fa5d739de6997ab155a26686d20a49 Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Sun, 2 Oct 2022 22:04:28 -0700 Subject: [PATCH 006/461] Update xy_grid.py Changed XY Plot infotext value keys to not be so generic. --- scripts/xy_grid.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index f1f54d9c..ae011a17 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -246,15 +246,15 @@ class Script(scripts.Script): if not x_opt.label == 'Nothing': p.extra_generation_params["XY Plot X Type"] = x_opt.label - p.extra_generation_params["X Values"] = '{' + x_values + '}' + p.extra_generation_params["XY Plot X Values"] = '{' + x_values + '}' if x_opt.label in ["Seed","Var. seed"] and not no_fixed_seeds: - p.extra_generation_params["Fixed X Values"] = '{' + ", ".join([str(x) for x in xs])+ '}' + p.extra_generation_params["XY Plot Fixed X Values"] = '{' + ", ".join([str(x) for x in xs])+ '}' if not y_opt.label == 'Nothing': p.extra_generation_params["XY Plot Y Type"] = y_opt.label - p.extra_generation_params["Y Values"] = '{' + y_values + '}' + p.extra_generation_params["XY Plot Y Values"] = '{' + y_values + '}' if y_opt.label in ["Seed","Var. seed"] and not no_fixed_seeds: - p.extra_generation_params["Fixed Y Values"] = '{' + ", ".join([str(y) for y in ys])+ '}' + p.extra_generation_params["XY Plot Fixed Y Values"] = '{' + ", ".join([str(y) for y in ys])+ '}' processed = draw_xy_grid( p, From 14c1c2b9351f16d43ba4e6b6c9062edad44a6bec Mon Sep 17 00:00:00 2001 From: Alexandre Simard Date: Wed, 19 Oct 2022 13:53:52 -0400 Subject: [PATCH 007/461] Show PB texts at same time and earlier For big tasks (1000+ steps), waiting 1 minute to see ETA is long and this changes it so the number of steps done plays a role in showing the text as well. --- modules/ui.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index a2dbd41e..0abd177a 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -261,14 +261,14 @@ def wrap_gradio_call(func, extra_outputs=None): return f -def calc_time_left(progress, threshold, label, force_display): +def calc_time_left(progress, threshold, label, force_display, showTime): if progress == 0: return "" else: time_since_start = time.time() - shared.state.time_start eta = (time_since_start/progress) eta_relative = eta-time_since_start - if (eta_relative > threshold and progress > 0.02) or force_display: + if (eta_relative > threshold and showTime) or force_display: if eta_relative > 3600: return label + time.strftime('%H:%M:%S', time.gmtime(eta_relative)) elif eta_relative > 60: @@ -290,7 +290,10 @@ def check_progress_call(id_part): if shared.state.sampling_steps > 0: progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps - time_left = calc_time_left( progress, 1, " ETA: ", shared.state.time_left_force_display ) + # Show progress percentage and time left at the same moment, and base it also on steps done + showPBText = progress >= 0.01 or shared.state.sampling_step >= 10 + + time_left = calc_time_left( progress, 1, " ETA: ", shared.state.time_left_force_display, showPBText ) if time_left != "": shared.state.time_left_force_display = True @@ -298,7 +301,7 @@ def check_progress_call(id_part): progressbar = "" if opts.show_progressbar: - progressbar = f"""
{" " * 2 + str(int(progress*100))+"%" + time_left if progress > 0.01 else ""}
""" + progressbar = f"""
{" " * 2 + str(int(progress*100))+"%" + time_left if showPBText else ""}
""" image = gr_show(False) preview_visibility = gr_show(False) From 4fbdbddc18b21f712acae58bf41740d27023285f Mon Sep 17 00:00:00 2001 From: Alexandre Simard Date: Wed, 19 Oct 2022 15:21:36 -0400 Subject: [PATCH 008/461] Remove pad spaces from progress bar text --- javascript/progressbar.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/progressbar.js b/javascript/progressbar.js index 7a05726e..24ab4795 100644 --- a/javascript/progressbar.js +++ b/javascript/progressbar.js @@ -10,7 +10,7 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip if(opts.show_progress_in_title && progressbar && progressbar.offsetParent){ if(progressbar.innerText){ - let newtitle = 'Stable Diffusion - ' + progressbar.innerText + let newtitle = 'Stable Diffusion - ' + progressbar.innerText.slice(2) if(document.title != newtitle){ document.title = newtitle; } From 29e74d6e71826da9a3fe3c5790fed1329fc4d1e8 Mon Sep 17 00:00:00 2001 From: Melan Date: Thu, 20 Oct 2022 16:26:16 +0200 Subject: [PATCH 009/461] Add support for Tensorboard for training embeddings --- modules/shared.py | 4 +++ .../textual_inversion/textual_inversion.py | 31 ++++++++++++++++++- 2 files changed, 34 insertions(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index faede821..2c6341f7 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -254,6 +254,10 @@ options_templates.update(options_section(('training', "Training"), { "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), "training_write_csv_every": OptionInfo(500, "Save an csv containing the loss to log directory every N steps, 0 to disable"), + "training_enable_tensorboard": OptionInfo(False, "Enable tensorboard logging."), + "training_tensorboard_save_images": OptionInfo(False, "Save generated images within tensorboard."), + "training_tensorboard_flush_every": OptionInfo(120, "How often, in seconds, to flush the pending tensorboard events and summaries to disk."), + })) options_templates.update(options_section(('sd', "Stable Diffusion"), { diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 3be69562..c57d3ace 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -7,9 +7,11 @@ import tqdm import html import datetime import csv +import numpy as np +import torchvision.transforms from PIL import Image, PngImagePlugin - +from torch.utils.tensorboard import SummaryWriter from modules import shared, devices, sd_hijack, processing, sd_models import modules.textual_inversion.dataset from modules.textual_inversion.learn_schedule import LearnRateScheduler @@ -199,6 +201,19 @@ def write_loss(log_directory, filename, step, epoch_len, values): **values, }) +def tensorboard_add_scaler(tensorboard_writer, tag, value, step): + if shared.opts.training_enable_tensorboard: + tensorboard_writer.add_scalar(tag=tag, + scalar_value=value, global_step=step) + +def tensorboard_add_image(tensorboard_writer, tag, pil_image, step): + if shared.opts.training_enable_tensorboard: + # Convert a pil image to a torch tensor + img_tensor = torch.as_tensor(np.array(pil_image, copy=True)) + img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0], len(pil_image.getbands())) + img_tensor = img_tensor.permute((2, 0, 1)) + + tensorboard_writer.add_image(tag, img_tensor, global_step=step) def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): assert embedding_name, 'embedding not selected' @@ -252,6 +267,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate) + if shared.opts.training_enable_tensorboard: + os.makedirs(os.path.join(log_directory, "tensorboard"), exist_ok=True) + tensorboard_writer = SummaryWriter( + log_dir=os.path.join(log_directory, "tensorboard"), + flush_secs=shared.opts.training_tensorboard_flush_every) + pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step) for i, entries in pbar: embedding.step = i + ititial_step @@ -270,6 +291,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc del x losses[embedding.step % losses.shape[0]] = loss.item() + optimizer.zero_grad() loss.backward() @@ -285,6 +307,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc embedding.save(last_saved_file) embedding_yet_to_be_embedded = True + if shared.opts.training_enable_tensorboard: + tensorboard_add_scaler(tensorboard_writer, "Loss/train", losses.mean(), embedding.step) + tensorboard_add_scaler(tensorboard_writer, f"Loss/train/epoch-{epoch_num}", losses.mean(), epoch_step) + tensorboard_add_scaler(tensorboard_writer, "Learn rate/train", scheduler.learn_rate, embedding.step) + tensorboard_add_scaler(tensorboard_writer, f"Learn rate/train/epoch-{epoch_num}", scheduler.learn_rate, epoch_step) + write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), { "loss": f"{losses.mean():.7f}", "learn_rate": scheduler.learn_rate @@ -349,6 +377,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc embedding_yet_to_be_embedded = False image.save(last_saved_image) + tensorboard_add_image(tensorboard_writer, f"Validation at epoch {epoch_num}", image, embedding.step) last_saved_image += f", prompt: {preview_text}" From a6d593a6b51dc6a8443f2aa5c24caa391a04cd56 Mon Sep 17 00:00:00 2001 From: Melan Date: Thu, 20 Oct 2022 19:43:21 +0200 Subject: [PATCH 010/461] Fixed a typo in a variable --- modules/textual_inversion/textual_inversion.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index c57d3ace..ec8176bf 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -260,11 +260,11 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc last_saved_image = "" embedding_yet_to_be_embedded = False - ititial_step = embedding.step or 0 - if ititial_step > steps: + initial_step = embedding.step or 0 + if initial_step > steps: return embedding, filename - scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) + scheduler = LearnRateScheduler(learn_rate, steps, initial_step) optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate) if shared.opts.training_enable_tensorboard: @@ -273,9 +273,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc log_dir=os.path.join(log_directory, "tensorboard"), flush_secs=shared.opts.training_tensorboard_flush_every) - pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step) + pbar = tqdm.tqdm(enumerate(ds), total=steps-initial_step) for i, entries in pbar: - embedding.step = i + ititial_step + embedding.step = i + initial_step scheduler.apply(optimizer, embedding.step) if scheduler.finished: From 8f5912984794c4c69e429c4636e984854d911b6a Mon Sep 17 00:00:00 2001 From: Melan Date: Thu, 20 Oct 2022 22:37:16 +0200 Subject: [PATCH 011/461] Some changes to the tensorboard code and hypernetwork support --- modules/hypernetworks/hypernetwork.py | 18 +++++++- .../textual_inversion/textual_inversion.py | 45 +++++++++++-------- 2 files changed, 44 insertions(+), 19 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 74300122..5e919775 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -4,6 +4,7 @@ import html import os import sys import traceback +import tensorboard import tqdm import csv @@ -18,7 +19,6 @@ import modules.textual_inversion.dataset from modules.textual_inversion import textual_inversion from modules.textual_inversion.learn_schedule import LearnRateScheduler - class HypernetworkModule(torch.nn.Module): multiplier = 1.0 @@ -291,6 +291,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate) + if shared.opts.training_enable_tensorboard: + tensorboard_writer = textual_inversion.tensorboard_setup(log_directory) + pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) for i, entries in pbar: hypernetwork.step = i + ititial_step @@ -315,6 +318,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log optimizer.zero_grad() loss.backward() optimizer.step() + mean_loss = losses.mean() if torch.isnan(mean_loss): raise RuntimeError("Loss diverged.") @@ -323,6 +327,14 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log if hypernetwork.step > 0 and hypernetwork_dir is not None and hypernetwork.step % save_hypernetwork_every == 0: last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name}-{hypernetwork.step}.pt') hypernetwork.save(last_saved_file) + + if shared.opts.training_enable_tensorboard: + epoch_num = hypernetwork.step // len(ds) + epoch_step = hypernetwork.step - (epoch_num * len(ds)) + 1 + + textual_inversion.tensorboard_add(tensorboard_writer, loss=mean_loss, + global_step=hypernetwork.step, step=epoch_step, + learn_rate=scheduler.learn_rate, epoch_num=epoch_num) textual_inversion.write_loss(log_directory, "hypernetwork_loss.csv", hypernetwork.step, len(ds), { "loss": f"{mean_loss:.7f}", @@ -360,6 +372,10 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log processed = processing.process_images(p) image = processed.images[0] if len(processed.images)>0 else None + if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images: + textual_inversion.tensorboard_add_image(tensorboard_writer, f"Validation at epoch {epoch_num}", + image, hypernetwork.step) + if unload: shared.sd_model.cond_stage_model.to(devices.cpu) shared.sd_model.first_stage_model.to(devices.cpu) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index ec8176bf..b1dc2596 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -201,19 +201,30 @@ def write_loss(log_directory, filename, step, epoch_len, values): **values, }) +def tensorboard_setup(log_directory): + os.makedirs(os.path.join(log_directory, "tensorboard"), exist_ok=True) + return SummaryWriter( + log_dir=os.path.join(log_directory, "tensorboard"), + flush_secs=shared.opts.training_tensorboard_flush_every) + +def tensorboard_add(tensorboard_writer, loss, global_step, step, learn_rate, epoch_num): + tensorboard_add_scaler(tensorboard_writer, "Loss/train", loss, global_step) + tensorboard_add_scaler(tensorboard_writer, f"Loss/train/epoch-{epoch_num}", loss, step) + tensorboard_add_scaler(tensorboard_writer, "Learn rate/train", learn_rate, global_step) + tensorboard_add_scaler(tensorboard_writer, f"Learn rate/train/epoch-{epoch_num}", learn_rate, step) + def tensorboard_add_scaler(tensorboard_writer, tag, value, step): - if shared.opts.training_enable_tensorboard: - tensorboard_writer.add_scalar(tag=tag, - scalar_value=value, global_step=step) + tensorboard_writer.add_scalar(tag=tag, + scalar_value=value, global_step=step) def tensorboard_add_image(tensorboard_writer, tag, pil_image, step): - if shared.opts.training_enable_tensorboard: - # Convert a pil image to a torch tensor - img_tensor = torch.as_tensor(np.array(pil_image, copy=True)) - img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0], len(pil_image.getbands())) - img_tensor = img_tensor.permute((2, 0, 1)) + # Convert a pil image to a torch tensor + img_tensor = torch.as_tensor(np.array(pil_image, copy=True)) + img_tensor = img_tensor.view(pil_image.size[1], pil_image.size[0], + len(pil_image.getbands())) + img_tensor = img_tensor.permute((2, 0, 1)) - tensorboard_writer.add_image(tag, img_tensor, global_step=step) + tensorboard_writer.add_image(tag, img_tensor, global_step=step) def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): assert embedding_name, 'embedding not selected' @@ -268,10 +279,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate) if shared.opts.training_enable_tensorboard: - os.makedirs(os.path.join(log_directory, "tensorboard"), exist_ok=True) - tensorboard_writer = SummaryWriter( - log_dir=os.path.join(log_directory, "tensorboard"), - flush_secs=shared.opts.training_tensorboard_flush_every) + tensorboard_writer = tensorboard_setup(log_directory) pbar = tqdm.tqdm(enumerate(ds), total=steps-initial_step) for i, entries in pbar: @@ -308,10 +316,8 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc embedding_yet_to_be_embedded = True if shared.opts.training_enable_tensorboard: - tensorboard_add_scaler(tensorboard_writer, "Loss/train", losses.mean(), embedding.step) - tensorboard_add_scaler(tensorboard_writer, f"Loss/train/epoch-{epoch_num}", losses.mean(), epoch_step) - tensorboard_add_scaler(tensorboard_writer, "Learn rate/train", scheduler.learn_rate, embedding.step) - tensorboard_add_scaler(tensorboard_writer, f"Learn rate/train/epoch-{epoch_num}", scheduler.learn_rate, epoch_step) + tensorboard_add(tensorboard_writer, loss=losses.mean(), global_step=embedding.step, + step=epoch_step, learn_rate=scheduler.learn_rate, epoch_num=epoch_num) write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, len(ds), { "loss": f"{losses.mean():.7f}", @@ -377,7 +383,10 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc embedding_yet_to_be_embedded = False image.save(last_saved_image) - tensorboard_add_image(tensorboard_writer, f"Validation at epoch {epoch_num}", image, embedding.step) + + if shared.opts.training_enable_tensorboard and shared.opts.training_tensorboard_save_images: + tensorboard_add_image(tensorboard_writer, f"Validation at epoch {epoch_num}", + image, embedding.step) last_saved_image += f", prompt: {preview_text}" From 7543cf5e3b5eaced00582da257801227d1ff2a6e Mon Sep 17 00:00:00 2001 From: Melan Date: Thu, 20 Oct 2022 22:43:08 +0200 Subject: [PATCH 012/461] Fixed some typos in the code --- modules/hypernetworks/hypernetwork.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 5e919775..0cd94f49 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -284,19 +284,19 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log last_saved_file = "" last_saved_image = "" - ititial_step = hypernetwork.step or 0 - if ititial_step > steps: + initial_step = hypernetwork.step or 0 + if initial_step > steps: return hypernetwork, filename - scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) + scheduler = LearnRateScheduler(learn_rate, steps, initial_step) optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate) if shared.opts.training_enable_tensorboard: tensorboard_writer = textual_inversion.tensorboard_setup(log_directory) - pbar = tqdm.tqdm(enumerate(ds), total=steps - ititial_step) + pbar = tqdm.tqdm(enumerate(ds), total=steps - initial_step) for i, entries in pbar: - hypernetwork.step = i + ititial_step + hypernetwork.step = i + initial_step scheduler.apply(optimizer, hypernetwork.step) if scheduler.finished: From 18f86e41f6f289042c075bff1498e620ab997b8c Mon Sep 17 00:00:00 2001 From: Melan Date: Mon, 24 Oct 2022 17:21:18 +0200 Subject: [PATCH 013/461] Removed two unused imports --- modules/hypernetworks/hypernetwork.py | 1 - modules/textual_inversion/textual_inversion.py | 1 - 2 files changed, 2 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 0cd94f49..2263e95e 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -4,7 +4,6 @@ import html import os import sys import traceback -import tensorboard import tqdm import csv diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index b1dc2596..589314fe 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -9,7 +9,6 @@ import datetime import csv import numpy as np -import torchvision.transforms from PIL import Image, PngImagePlugin from torch.utils.tensorboard import SummaryWriter from modules import shared, devices, sd_hijack, processing, sd_models From c4b5ca5778340b21288d84dfb8fe1d5773c886a8 Mon Sep 17 00:00:00 2001 From: Yuta Hayashibe Date: Thu, 27 Oct 2022 22:00:28 +0900 Subject: [PATCH 014/461] Truncate too long filename --- modules/images.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/modules/images.py b/modules/images.py index 7870b5b7..42363ed3 100644 --- a/modules/images.py +++ b/modules/images.py @@ -416,6 +416,14 @@ def get_next_sequence_number(path, basename): return result + 1 +def truncate_fullpath(full_path, encoding='utf-8'): + dir_name, full_name = os.path.split(full_path) + file_name, file_ext = os.path.splitext(full_name) + max_length = os.statvfs(dir_name).f_namemax + file_name_truncated = file_name.encode(encoding)[:max_length - len(file_ext)].decode(encoding, 'ignore') + return os.path.join(dir_name , file_name_truncated + file_ext) + + def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None): """Save an image. @@ -456,7 +464,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i if save_to_dirs: dirname = namegen.apply(opts.directories_filename_pattern or "[prompt_words]").lstrip(' ').rstrip('\\ /') - path = os.path.join(path, dirname) + path = truncate_fullpath(os.path.join(path, dirname)) os.makedirs(path, exist_ok=True) @@ -480,13 +488,13 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i fullfn = None for i in range(500): fn = f"{basecount + i:05}" if basename == '' else f"{basename}-{basecount + i:04}" - fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}") + fullfn = truncate_fullpath(os.path.join(path, f"{fn}{file_decoration}.{extension}")) if not os.path.exists(fullfn): break else: - fullfn = os.path.join(path, f"{file_decoration}.{extension}") + fullfn = truncate_fullpath(os.path.join(path, f"{file_decoration}.{extension}")) else: - fullfn = os.path.join(path, f"{forced_filename}.{extension}") + fullfn = truncate_fullpath(os.path.join(path, f"{forced_filename}.{extension}")) pnginfo = existing_info or {} if info is not None: From 2a25729623717cc499e873752d9f4ebebd1e1078 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Fri, 28 Oct 2022 09:44:56 +0700 Subject: [PATCH 015/461] Gradient clipping in train tab --- modules/hypernetworks/hypernetwork.py | 10 +++++++++- modules/ui.py | 7 +++++++ 2 files changed, 16 insertions(+), 1 deletion(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 8113b35b..c5d60654 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -327,7 +327,7 @@ def report_statistics(loss_info:dict): -def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): +def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, clip_grad_mode, clip_grad_value, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): # images allows training previews to have infotext. Importing it at the top causes a circular import problem. from modules import images @@ -384,6 +384,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log if ititial_step > steps: return hypernetwork, filename + clip_grad_mode_value = clip_grad_mode == "value" + clip_grad_mode_norm = clip_grad_mode == "norm" + scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) # if optimizer == "AdamW": or else Adam / AdamW / SGD, etc... optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate) @@ -426,6 +429,11 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log steps_without_grad = 0 assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue' + if clip_grad_mode_value: + torch.nn.utils.clip_grad_value_(weights, clip_value=clip_grad_value) + elif clip_grad_mode_norm: + torch.nn.utils.clip_grad_norm_(weights, max_norm=clip_grad_value) + optimizer.step() if torch.isnan(losses[hypernetwork.step % losses.shape[0]]): diff --git a/modules/ui.py b/modules/ui.py index 0a63e357..97de7da2 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1313,6 +1313,9 @@ def create_ui(wrap_gradio_gpu_call): training_width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512) training_height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512) steps = gr.Number(label='Max steps', value=100000, precision=0) + with gr.Row(): + clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) + clip_grad_value = gr.Number(value=1.0, show_label=False) create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0) save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0) save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True) @@ -1406,6 +1409,8 @@ def create_ui(wrap_gradio_gpu_call): training_width, training_height, steps, + clip_grad_mode, + clip_grad_value, create_image_every, save_embedding_every, template_file, @@ -1431,6 +1436,8 @@ def create_ui(wrap_gradio_gpu_call): training_width, training_height, steps, + clip_grad_mode, + clip_grad_value, create_image_every, save_embedding_every, template_file, From a133042c669f666763f5da0f4440abdc839db653 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Fri, 28 Oct 2022 10:01:46 +0700 Subject: [PATCH 016/461] Forgot to remove this from train_embedding --- modules/ui.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 97de7da2..ba5e92a7 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1409,8 +1409,6 @@ def create_ui(wrap_gradio_gpu_call): training_width, training_height, steps, - clip_grad_mode, - clip_grad_value, create_image_every, save_embedding_every, template_file, From 1618df41bad092e068c61bf510b1e20856821ad5 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Fri, 28 Oct 2022 10:31:27 +0700 Subject: [PATCH 017/461] Gradient clipping for textual embedding --- modules/textual_inversion/textual_inversion.py | 11 ++++++++++- modules/ui.py | 2 ++ 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index ff002d3e..7bad73a6 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -206,7 +206,7 @@ def write_loss(log_directory, filename, step, epoch_len, values): }) -def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): +def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, clip_grad_mode, clip_grad_value, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): assert embedding_name, 'embedding not selected' shared.state.textinfo = "Initializing textual inversion training..." @@ -256,6 +256,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc if ititial_step > steps: return embedding, filename + clip_grad_mode_value = clip_grad_mode == "value" + clip_grad_mode_norm = clip_grad_mode == "norm" + scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate) @@ -280,6 +283,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc optimizer.zero_grad() loss.backward() + + if clip_grad_mode_value: + torch.nn.utils.clip_grad_value_(embedding.vec, clip_value=clip_grad_value) + elif clip_grad_mode_norm: + torch.nn.utils.clip_grad_norm_(embedding.vec, max_norm=clip_grad_value) + optimizer.step() diff --git a/modules/ui.py b/modules/ui.py index ba5e92a7..97de7da2 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1409,6 +1409,8 @@ def create_ui(wrap_gradio_gpu_call): training_width, training_height, steps, + clip_grad_mode, + clip_grad_value, create_image_every, save_embedding_every, template_file, From 16451ca573220e49f2eaaab97580b6b91287c8c4 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Fri, 28 Oct 2022 17:16:23 +0700 Subject: [PATCH 018/461] Learning rate sched syntax support for grad clipping --- modules/hypernetworks/hypernetwork.py | 13 ++++++++++--- modules/textual_inversion/learn_schedule.py | 11 ++++++++--- modules/textual_inversion/textual_inversion.py | 12 +++++++++--- modules/ui.py | 7 +++---- 4 files changed, 30 insertions(+), 13 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index c5d60654..86532063 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -383,11 +383,15 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log ititial_step = hypernetwork.step or 0 if ititial_step > steps: return hypernetwork, filename - + clip_grad_mode_value = clip_grad_mode == "value" clip_grad_mode_norm = clip_grad_mode == "norm" + clip_grad_enabled = clip_grad_mode_value or clip_grad_mode_norm + if clip_grad_enabled: + clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False) scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) + # if optimizer == "AdamW": or else Adam / AdamW / SGD, etc... optimizer = torch.optim.AdamW(weights, lr=scheduler.learn_rate) @@ -407,6 +411,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log if shared.state.interrupted: break + if clip_grad_enabled: + clip_grad_sched.step(hypernetwork.step) + with torch.autocast("cuda"): c = stack_conds([entry.cond for entry in entries]).to(devices.device) # c = torch.vstack([entry.cond for entry in entries]).to(devices.device) @@ -430,9 +437,9 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue' if clip_grad_mode_value: - torch.nn.utils.clip_grad_value_(weights, clip_value=clip_grad_value) + torch.nn.utils.clip_grad_value_(weights, clip_value=clip_grad_sched.learn_rate) elif clip_grad_mode_norm: - torch.nn.utils.clip_grad_norm_(weights, max_norm=clip_grad_value) + torch.nn.utils.clip_grad_norm_(weights, max_norm=clip_grad_sched.learn_rate) optimizer.step() diff --git a/modules/textual_inversion/learn_schedule.py b/modules/textual_inversion/learn_schedule.py index 2062726a..ffec3e1b 100644 --- a/modules/textual_inversion/learn_schedule.py +++ b/modules/textual_inversion/learn_schedule.py @@ -51,14 +51,19 @@ class LearnRateScheduler: self.finished = False - def apply(self, optimizer, step_number): + def step(self, step_number): if step_number <= self.end_step: - return + return False try: (self.learn_rate, self.end_step) = next(self.schedules) - except Exception: + except StopIteration: self.finished = True + return False + return True + + def apply(self, optimizer, step_number): + if not self.step(step_number): return if self.verbose: diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 7bad73a6..6b00c6a1 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -255,9 +255,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc ititial_step = embedding.step or 0 if ititial_step > steps: return embedding, filename - + clip_grad_mode_value = clip_grad_mode == "value" clip_grad_mode_norm = clip_grad_mode == "norm" + clip_grad_enabled = clip_grad_mode_value or clip_grad_mode_norm + if clip_grad_enabled: + clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False) scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate) @@ -273,6 +276,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc if shared.state.interrupted: break + if clip_grad_enabled: + clip_grad_sched.step(embedding.step) + with torch.autocast("cuda"): c = cond_model([entry.cond_text for entry in entries]) x = torch.stack([entry.latent for entry in entries]).to(devices.device) @@ -285,9 +291,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc loss.backward() if clip_grad_mode_value: - torch.nn.utils.clip_grad_value_(embedding.vec, clip_value=clip_grad_value) + torch.nn.utils.clip_grad_value_(embedding.vec, clip_value=clip_grad_sched.learn_rate) elif clip_grad_mode_norm: - torch.nn.utils.clip_grad_norm_(embedding.vec, max_norm=clip_grad_value) + torch.nn.utils.clip_grad_norm_(embedding.vec, max_norm=clip_grad_sched.learn_rate) optimizer.step() diff --git a/modules/ui.py b/modules/ui.py index 97de7da2..47d16429 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1305,7 +1305,9 @@ def create_ui(wrap_gradio_gpu_call): with gr.Row(): embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005") hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001") - + with gr.Row(): + clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) + clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="1.0", show_label=False) batch_size = gr.Number(label='Batch size', value=1, precision=0) dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images") log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion") @@ -1313,9 +1315,6 @@ def create_ui(wrap_gradio_gpu_call): training_width = gr.Slider(minimum=64, maximum=2048, step=64, label="Width", value=512) training_height = gr.Slider(minimum=64, maximum=2048, step=64, label="Height", value=512) steps = gr.Number(label='Max steps', value=100000, precision=0) - with gr.Row(): - clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) - clip_grad_value = gr.Number(value=1.0, show_label=False) create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0) save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0) save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True) From 840307f23738c38f7ac3ad636e53ccec66e71f8b Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Mon, 31 Oct 2022 13:49:24 +0700 Subject: [PATCH 019/461] Change default clip grad value to 0.1 It still defaults to disabled. Ref for value: https://github.com/danielalcalde/stable-diffusion-webui/commit/732b15820a9bde9f47e075a6209c3d47d47acb08 --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index 98f9565f..364953aa 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1256,7 +1256,7 @@ def create_ui(wrap_gradio_gpu_call): hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001") with gr.Row(): clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) - clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="1.0", show_label=False) + clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) batch_size = gr.Number(label='Batch size', value=1, precision=0) dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images") log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion") From 4123be632a98f70cda06e14c2f556f7ad38cd436 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Mon, 31 Oct 2022 13:53:22 +0700 Subject: [PATCH 020/461] Fix merge conflicts --- modules/hypernetworks/hypernetwork.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 65a584bb..207808ee 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -373,6 +373,12 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) + clip_grad_mode_value = clip_grad_mode == "value" + clip_grad_mode_norm = clip_grad_mode == "norm" + clip_grad_enabled = clip_grad_mode_value or clip_grad_mode_norm + if clip_grad_enabled: + clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False) + # dataset loading may take a while, so input validations and early returns should be done before this shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." with torch.autocast("cuda"): @@ -389,21 +395,10 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log previous_mean_loss = 0 print("Mean loss of {} elements".format(size)) - last_saved_file = "" - last_saved_image = "" - forced_filename = "" - ititial_step = hypernetwork.step or 0 if ititial_step > steps: return hypernetwork, filename - clip_grad_mode_value = clip_grad_mode == "value" - clip_grad_mode_norm = clip_grad_mode == "norm" - clip_grad_enabled = clip_grad_mode_value or clip_grad_mode_norm - if clip_grad_enabled: - clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False) - - scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) weights = hypernetwork.weights() for weight in weights: From d5ea878b2aa117588d85287cbd8983aa52177df5 Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Mon, 31 Oct 2022 13:54:40 +0700 Subject: [PATCH 021/461] Fix merge conflicts --- modules/hypernetworks/hypernetwork.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 207808ee..2df38c70 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -395,11 +395,6 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log previous_mean_loss = 0 print("Mean loss of {} elements".format(size)) - ititial_step = hypernetwork.step or 0 - if ititial_step > steps: - return hypernetwork, filename - - weights = hypernetwork.weights() for weight in weights: weight.requires_grad = True From cffc240a7327ae60671ff533469fc4ed4bf605de Mon Sep 17 00:00:00 2001 From: Nerogar Date: Sun, 23 Oct 2022 14:05:25 +0200 Subject: [PATCH 022/461] fixed textual inversion training with inpainting models --- .../textual_inversion/textual_inversion.py | 27 ++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 0aeb0459..2630c7c9 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -224,6 +224,26 @@ def validate_train_inputs(model_name, learn_rate, batch_size, data_root, templat if save_model_every or create_image_every: assert log_directory, "Log directory is empty" +def create_dummy_mask(x, width=None, height=None): + if shared.sd_model.model.conditioning_key in {'hybrid', 'concat'}: + + # The "masked-image" in this case will just be all zeros since the entire image is masked. + image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device) + image_conditioning = shared.sd_model.get_first_stage_encoding(shared.sd_model.encode_first_stage(image_conditioning)) + + # Add the fake full 1s mask to the first dimension. + image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) + image_conditioning = image_conditioning.to(x.dtype) + + else: + # Dummy zero conditioning if we're not using inpainting model. + # Still takes up a bit of memory, but no encoder call. + # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size. + image_conditioning = torch.zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device) + + return image_conditioning + + def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): save_embedding_every = save_embedding_every or 0 create_image_every = create_image_every or 0 @@ -286,6 +306,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc forced_filename = "" embedding_yet_to_be_embedded = False + img_c = None pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step) for i, entries in pbar: embedding.step = i + ititial_step @@ -299,8 +320,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc with torch.autocast("cuda"): c = cond_model([entry.cond_text for entry in entries]) + if img_c is None: + img_c = create_dummy_mask(c, training_width, training_height) + x = torch.stack([entry.latent for entry in entries]).to(devices.device) - loss = shared.sd_model(x, c)[0] + cond = {"c_concat": [img_c], "c_crossattn": [c]} + loss = shared.sd_model(x, cond)[0] del x losses[embedding.step % losses.shape[0]] = loss.item() From d624cb82a7c65a1ea04e4b6e23f0164a3ba25e25 Mon Sep 17 00:00:00 2001 From: Ikko Ashimine Date: Thu, 3 Nov 2022 01:05:00 +0900 Subject: [PATCH 023/461] Fix typo in ui.js interation -> interaction --- javascript/ui.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/ui.js b/javascript/ui.js index 7e116465..0308dce3 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -1,4 +1,4 @@ -// various functions for interation with ui.py not large enough to warrant putting them in separate files +// various functions for interaction with ui.py not large enough to warrant putting them in separate files function set_theme(theme){ gradioURL = window.location.href From bb832d7725187f8a8ab44faa6ee1b38cb5f600aa Mon Sep 17 00:00:00 2001 From: Muhammad Rizqi Nur Date: Sat, 5 Nov 2022 11:48:38 +0700 Subject: [PATCH 024/461] Simplify grad clip --- modules/hypernetworks/hypernetwork.py | 16 +++++++--------- modules/textual_inversion/textual_inversion.py | 16 +++++++--------- 2 files changed, 14 insertions(+), 18 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index f4c2668f..02b624e1 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -385,10 +385,10 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) - clip_grad_mode_value = clip_grad_mode == "value" - clip_grad_mode_norm = clip_grad_mode == "norm" - clip_grad_enabled = clip_grad_mode_value or clip_grad_mode_norm - if clip_grad_enabled: + clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else \ + torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \ + None + if clip_grad: clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False) # dataset loading may take a while, so input validations and early returns should be done before this @@ -433,7 +433,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log if shared.state.interrupted: break - if clip_grad_enabled: + if clip_grad: clip_grad_sched.step(hypernetwork.step) with torch.autocast("cuda"): @@ -458,10 +458,8 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, data_root, log steps_without_grad = 0 assert steps_without_grad < 10, 'no gradient found for the trained weight after backward() for 10 steps in a row; this is a bug; training cannot continue' - if clip_grad_mode_value: - torch.nn.utils.clip_grad_value_(weights, clip_value=clip_grad_sched.learn_rate) - elif clip_grad_mode_norm: - torch.nn.utils.clip_grad_norm_(weights, max_norm=clip_grad_sched.learn_rate) + if clip_grad: + clip_grad(weights, clip_grad_sched.learn_rate) optimizer.step() diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index c567ec3f..687d97bb 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -269,10 +269,10 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc scheduler = LearnRateScheduler(learn_rate, steps, ititial_step) - clip_grad_mode_value = clip_grad_mode == "value" - clip_grad_mode_norm = clip_grad_mode == "norm" - clip_grad_enabled = clip_grad_mode_value or clip_grad_mode_norm - if clip_grad_enabled: + clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else \ + torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \ + None + if clip_grad: clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False) # dataset loading may take a while, so input validations and early returns should be done before this shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." @@ -302,7 +302,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc if shared.state.interrupted: break - if clip_grad_enabled: + if clip_grad: clip_grad_sched.step(embedding.step) with torch.autocast("cuda"): @@ -316,10 +316,8 @@ def train_embedding(embedding_name, learn_rate, batch_size, data_root, log_direc optimizer.zero_grad() loss.backward() - if clip_grad_mode_value: - torch.nn.utils.clip_grad_value_(embedding.vec, clip_value=clip_grad_sched.learn_rate) - elif clip_grad_mode_norm: - torch.nn.utils.clip_grad_norm_(embedding.vec, max_norm=clip_grad_sched.learn_rate) + if clip_grad: + clip_grad(embedding.vec, clip_grad_sched.learn_rate) optimizer.step() From 75c4511e6b81ae8fb0dbd932043e8eb35cd09f72 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Tue, 29 Nov 2022 10:28:41 +0800 Subject: [PATCH 025/461] add AltDiffusion to webui Signed-off-by: zhaohu xing <920232796@qq.com> --- configs/altdiffusion/ad-inference.yaml | 72 + configs/stable-diffusion/v1-inference.yaml | 71 + ldm/data/__init__.py | 0 ldm/data/base.py | 23 + ldm/data/imagenet.py | 394 +++++ ldm/data/lsun.py | 92 ++ ldm/lr_scheduler.py | 98 ++ ldm/models/autoencoder.py | 443 +++++ ldm/models/diffusion/__init__.py | 0 ldm/models/diffusion/classifier.py | 267 +++ ldm/models/diffusion/ddim.py | 241 +++ ldm/models/diffusion/ddpm.py | 1445 +++++++++++++++++ ldm/models/diffusion/dpm_solver/__init__.py | 1 + ldm/models/diffusion/dpm_solver/dpm_solver.py | 1184 ++++++++++++++ ldm/models/diffusion/dpm_solver/sampler.py | 82 + ldm/models/diffusion/plms.py | 236 +++ ldm/modules/attention.py | 261 +++ ldm/modules/diffusionmodules/__init__.py | 0 ldm/modules/diffusionmodules/model.py | 835 ++++++++++ ldm/modules/diffusionmodules/openaimodel.py | 961 +++++++++++ ldm/modules/diffusionmodules/util.py | 267 +++ ldm/modules/distributions/__init__.py | 0 ldm/modules/distributions/distributions.py | 92 ++ ldm/modules/ema.py | 76 + ldm/modules/encoders/__init__.py | 0 ldm/modules/encoders/modules.py | 234 +++ ldm/modules/encoders/xlmr.py | 137 ++ ldm/modules/image_degradation/__init__.py | 2 + ldm/modules/image_degradation/bsrgan.py | 730 +++++++++ ldm/modules/image_degradation/bsrgan_light.py | 650 ++++++++ ldm/modules/image_degradation/utils/test.png | Bin 0 -> 441072 bytes ldm/modules/image_degradation/utils_image.py | 916 +++++++++++ ldm/modules/losses/__init__.py | 1 + ldm/modules/losses/contperceptual.py | 111 ++ ldm/modules/losses/vqperceptual.py | 167 ++ ldm/modules/x_transformer.py | 641 ++++++++ ldm/util.py | 203 +++ modules/devices.py | 4 +- modules/sd_hijack.py | 23 +- modules/shared.py | 6 +- 40 files changed, 10957 insertions(+), 9 deletions(-) create mode 100644 configs/altdiffusion/ad-inference.yaml create mode 100644 configs/stable-diffusion/v1-inference.yaml create mode 100644 ldm/data/__init__.py create mode 100644 ldm/data/base.py create mode 100644 ldm/data/imagenet.py create mode 100644 ldm/data/lsun.py create mode 100644 ldm/lr_scheduler.py create mode 100644 ldm/models/autoencoder.py create mode 100644 ldm/models/diffusion/__init__.py create mode 100644 ldm/models/diffusion/classifier.py create mode 100644 ldm/models/diffusion/ddim.py create mode 100644 ldm/models/diffusion/ddpm.py create mode 100644 ldm/models/diffusion/dpm_solver/__init__.py create mode 100644 ldm/models/diffusion/dpm_solver/dpm_solver.py create mode 100644 ldm/models/diffusion/dpm_solver/sampler.py create mode 100644 ldm/models/diffusion/plms.py create mode 100644 ldm/modules/attention.py create mode 100644 ldm/modules/diffusionmodules/__init__.py create mode 100644 ldm/modules/diffusionmodules/model.py create mode 100644 ldm/modules/diffusionmodules/openaimodel.py create mode 100644 ldm/modules/diffusionmodules/util.py create mode 100644 ldm/modules/distributions/__init__.py create mode 100644 ldm/modules/distributions/distributions.py create mode 100644 ldm/modules/ema.py create mode 100644 ldm/modules/encoders/__init__.py create mode 100644 ldm/modules/encoders/modules.py create mode 100644 ldm/modules/encoders/xlmr.py create mode 100644 ldm/modules/image_degradation/__init__.py create mode 100644 ldm/modules/image_degradation/bsrgan.py create mode 100644 ldm/modules/image_degradation/bsrgan_light.py create mode 100644 ldm/modules/image_degradation/utils/test.png create mode 100644 ldm/modules/image_degradation/utils_image.py create mode 100644 ldm/modules/losses/__init__.py create mode 100644 ldm/modules/losses/contperceptual.py create mode 100644 ldm/modules/losses/vqperceptual.py create mode 100644 ldm/modules/x_transformer.py create mode 100644 ldm/util.py diff --git a/configs/altdiffusion/ad-inference.yaml b/configs/altdiffusion/ad-inference.yaml new file mode 100644 index 00000000..1b11b63e --- /dev/null +++ b/configs/altdiffusion/ad-inference.yaml @@ -0,0 +1,72 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.xlmr.BertSeriesModelWithTransformation + params: + name: "XLMR-Large" \ No newline at end of file diff --git a/configs/stable-diffusion/v1-inference.yaml b/configs/stable-diffusion/v1-inference.yaml new file mode 100644 index 00000000..2e6ef0f2 --- /dev/null +++ b/configs/stable-diffusion/v1-inference.yaml @@ -0,0 +1,71 @@ +model: + base_learning_rate: 1.0e-04 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false # Note: different from the one we trained before + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False + + scheduler_config: # 10000 warmup steps + target: ldm.lr_scheduler.LambdaLinearScheduler + params: + warm_up_steps: [ 10000 ] + cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases + f_start: [ 1.e-6 ] + f_max: [ 1. ] + f_min: [ 1. ] + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_heads: 8 + use_spatial_transformer: True + transformer_depth: 1 + context_dim: 768 + use_checkpoint: True + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + # target: ldm.modules.encoders.modules.FrozenCLIPEmbedder + target: altclip.model.AltCLIPEmbedder \ No newline at end of file diff --git a/ldm/data/__init__.py b/ldm/data/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldm/data/base.py b/ldm/data/base.py new file mode 100644 index 00000000..b196c2f7 --- /dev/null +++ b/ldm/data/base.py @@ -0,0 +1,23 @@ +from abc import abstractmethod +from torch.utils.data import Dataset, ConcatDataset, ChainDataset, IterableDataset + + +class Txt2ImgIterableBaseDataset(IterableDataset): + ''' + Define an interface to make the IterableDatasets for text2img data chainable + ''' + def __init__(self, num_records=0, valid_ids=None, size=256): + super().__init__() + self.num_records = num_records + self.valid_ids = valid_ids + self.sample_ids = valid_ids + self.size = size + + print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.') + + def __len__(self): + return self.num_records + + @abstractmethod + def __iter__(self): + pass \ No newline at end of file diff --git a/ldm/data/imagenet.py b/ldm/data/imagenet.py new file mode 100644 index 00000000..1c473f9c --- /dev/null +++ b/ldm/data/imagenet.py @@ -0,0 +1,394 @@ +import os, yaml, pickle, shutil, tarfile, glob +import cv2 +import albumentations +import PIL +import numpy as np +import torchvision.transforms.functional as TF +from omegaconf import OmegaConf +from functools import partial +from PIL import Image +from tqdm import tqdm +from torch.utils.data import Dataset, Subset + +import taming.data.utils as tdu +from taming.data.imagenet import str_to_indices, give_synsets_from_indices, download, retrieve +from taming.data.imagenet import ImagePaths + +from ldm.modules.image_degradation import degradation_fn_bsr, degradation_fn_bsr_light + + +def synset2idx(path_to_yaml="data/index_synset.yaml"): + with open(path_to_yaml) as f: + di2s = yaml.load(f) + return dict((v,k) for k,v in di2s.items()) + + +class ImageNetBase(Dataset): + def __init__(self, config=None): + self.config = config or OmegaConf.create() + if not type(self.config)==dict: + self.config = OmegaConf.to_container(self.config) + self.keep_orig_class_label = self.config.get("keep_orig_class_label", False) + self.process_images = True # if False we skip loading & processing images and self.data contains filepaths + self._prepare() + self._prepare_synset_to_human() + self._prepare_idx_to_synset() + self._prepare_human_to_integer_label() + self._load() + + def __len__(self): + return len(self.data) + + def __getitem__(self, i): + return self.data[i] + + def _prepare(self): + raise NotImplementedError() + + def _filter_relpaths(self, relpaths): + ignore = set([ + "n06596364_9591.JPEG", + ]) + relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore] + if "sub_indices" in self.config: + indices = str_to_indices(self.config["sub_indices"]) + synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) # returns a list of strings + self.synset2idx = synset2idx(path_to_yaml=self.idx2syn) + files = [] + for rpath in relpaths: + syn = rpath.split("/")[0] + if syn in synsets: + files.append(rpath) + return files + else: + return relpaths + + def _prepare_synset_to_human(self): + SIZE = 2655750 + URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1" + self.human_dict = os.path.join(self.root, "synset_human.txt") + if (not os.path.exists(self.human_dict) or + not os.path.getsize(self.human_dict)==SIZE): + download(URL, self.human_dict) + + def _prepare_idx_to_synset(self): + URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1" + self.idx2syn = os.path.join(self.root, "index_synset.yaml") + if (not os.path.exists(self.idx2syn)): + download(URL, self.idx2syn) + + def _prepare_human_to_integer_label(self): + URL = "https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1" + self.human2integer = os.path.join(self.root, "imagenet1000_clsidx_to_labels.txt") + if (not os.path.exists(self.human2integer)): + download(URL, self.human2integer) + with open(self.human2integer, "r") as f: + lines = f.read().splitlines() + assert len(lines) == 1000 + self.human2integer_dict = dict() + for line in lines: + value, key = line.split(":") + self.human2integer_dict[key] = int(value) + + def _load(self): + with open(self.txt_filelist, "r") as f: + self.relpaths = f.read().splitlines() + l1 = len(self.relpaths) + self.relpaths = self._filter_relpaths(self.relpaths) + print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths))) + + self.synsets = [p.split("/")[0] for p in self.relpaths] + self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths] + + unique_synsets = np.unique(self.synsets) + class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets)) + if not self.keep_orig_class_label: + self.class_labels = [class_dict[s] for s in self.synsets] + else: + self.class_labels = [self.synset2idx[s] for s in self.synsets] + + with open(self.human_dict, "r") as f: + human_dict = f.read().splitlines() + human_dict = dict(line.split(maxsplit=1) for line in human_dict) + + self.human_labels = [human_dict[s] for s in self.synsets] + + labels = { + "relpath": np.array(self.relpaths), + "synsets": np.array(self.synsets), + "class_label": np.array(self.class_labels), + "human_label": np.array(self.human_labels), + } + + if self.process_images: + self.size = retrieve(self.config, "size", default=256) + self.data = ImagePaths(self.abspaths, + labels=labels, + size=self.size, + random_crop=self.random_crop, + ) + else: + self.data = self.abspaths + + +class ImageNetTrain(ImageNetBase): + NAME = "ILSVRC2012_train" + URL = "http://www.image-net.org/challenges/LSVRC/2012/" + AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2" + FILES = [ + "ILSVRC2012_img_train.tar", + ] + SIZES = [ + 147897477120, + ] + + def __init__(self, process_images=True, data_root=None, **kwargs): + self.process_images = process_images + self.data_root = data_root + super().__init__(**kwargs) + + def _prepare(self): + if self.data_root: + self.root = os.path.join(self.data_root, self.NAME) + else: + cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) + self.root = os.path.join(cachedir, "autoencoders/data", self.NAME) + + self.datadir = os.path.join(self.root, "data") + self.txt_filelist = os.path.join(self.root, "filelist.txt") + self.expected_length = 1281167 + self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop", + default=True) + if not tdu.is_prepared(self.root): + # prep + print("Preparing dataset {} in {}".format(self.NAME, self.root)) + + datadir = self.datadir + if not os.path.exists(datadir): + path = os.path.join(self.root, self.FILES[0]) + if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]: + import academictorrents as at + atpath = at.get(self.AT_HASH, datastore=self.root) + assert atpath == path + + print("Extracting {} to {}".format(path, datadir)) + os.makedirs(datadir, exist_ok=True) + with tarfile.open(path, "r:") as tar: + tar.extractall(path=datadir) + + print("Extracting sub-tars.") + subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar"))) + for subpath in tqdm(subpaths): + subdir = subpath[:-len(".tar")] + os.makedirs(subdir, exist_ok=True) + with tarfile.open(subpath, "r:") as tar: + tar.extractall(path=subdir) + + filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG")) + filelist = [os.path.relpath(p, start=datadir) for p in filelist] + filelist = sorted(filelist) + filelist = "\n".join(filelist)+"\n" + with open(self.txt_filelist, "w") as f: + f.write(filelist) + + tdu.mark_prepared(self.root) + + +class ImageNetValidation(ImageNetBase): + NAME = "ILSVRC2012_validation" + URL = "http://www.image-net.org/challenges/LSVRC/2012/" + AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5" + VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1" + FILES = [ + "ILSVRC2012_img_val.tar", + "validation_synset.txt", + ] + SIZES = [ + 6744924160, + 1950000, + ] + + def __init__(self, process_images=True, data_root=None, **kwargs): + self.data_root = data_root + self.process_images = process_images + super().__init__(**kwargs) + + def _prepare(self): + if self.data_root: + self.root = os.path.join(self.data_root, self.NAME) + else: + cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) + self.root = os.path.join(cachedir, "autoencoders/data", self.NAME) + self.datadir = os.path.join(self.root, "data") + self.txt_filelist = os.path.join(self.root, "filelist.txt") + self.expected_length = 50000 + self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop", + default=False) + if not tdu.is_prepared(self.root): + # prep + print("Preparing dataset {} in {}".format(self.NAME, self.root)) + + datadir = self.datadir + if not os.path.exists(datadir): + path = os.path.join(self.root, self.FILES[0]) + if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]: + import academictorrents as at + atpath = at.get(self.AT_HASH, datastore=self.root) + assert atpath == path + + print("Extracting {} to {}".format(path, datadir)) + os.makedirs(datadir, exist_ok=True) + with tarfile.open(path, "r:") as tar: + tar.extractall(path=datadir) + + vspath = os.path.join(self.root, self.FILES[1]) + if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]: + download(self.VS_URL, vspath) + + with open(vspath, "r") as f: + synset_dict = f.read().splitlines() + synset_dict = dict(line.split() for line in synset_dict) + + print("Reorganizing into synset folders") + synsets = np.unique(list(synset_dict.values())) + for s in synsets: + os.makedirs(os.path.join(datadir, s), exist_ok=True) + for k, v in synset_dict.items(): + src = os.path.join(datadir, k) + dst = os.path.join(datadir, v) + shutil.move(src, dst) + + filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG")) + filelist = [os.path.relpath(p, start=datadir) for p in filelist] + filelist = sorted(filelist) + filelist = "\n".join(filelist)+"\n" + with open(self.txt_filelist, "w") as f: + f.write(filelist) + + tdu.mark_prepared(self.root) + + + +class ImageNetSR(Dataset): + def __init__(self, size=None, + degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1., + random_crop=True): + """ + Imagenet Superresolution Dataloader + Performs following ops in order: + 1. crops a crop of size s from image either as random or center crop + 2. resizes crop to size with cv2.area_interpolation + 3. degrades resized crop with degradation_fn + + :param size: resizing to size after cropping + :param degradation: degradation_fn, e.g. cv_bicubic or bsrgan_light + :param downscale_f: Low Resolution Downsample factor + :param min_crop_f: determines crop size s, + where s = c * min_img_side_len with c sampled from interval (min_crop_f, max_crop_f) + :param max_crop_f: "" + :param data_root: + :param random_crop: + """ + self.base = self.get_base() + assert size + assert (size / downscale_f).is_integer() + self.size = size + self.LR_size = int(size / downscale_f) + self.min_crop_f = min_crop_f + self.max_crop_f = max_crop_f + assert(max_crop_f <= 1.) + self.center_crop = not random_crop + + self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA) + + self.pil_interpolation = False # gets reset later if incase interp_op is from pillow + + if degradation == "bsrgan": + self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f) + + elif degradation == "bsrgan_light": + self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f) + + else: + interpolation_fn = { + "cv_nearest": cv2.INTER_NEAREST, + "cv_bilinear": cv2.INTER_LINEAR, + "cv_bicubic": cv2.INTER_CUBIC, + "cv_area": cv2.INTER_AREA, + "cv_lanczos": cv2.INTER_LANCZOS4, + "pil_nearest": PIL.Image.NEAREST, + "pil_bilinear": PIL.Image.BILINEAR, + "pil_bicubic": PIL.Image.BICUBIC, + "pil_box": PIL.Image.BOX, + "pil_hamming": PIL.Image.HAMMING, + "pil_lanczos": PIL.Image.LANCZOS, + }[degradation] + + self.pil_interpolation = degradation.startswith("pil_") + + if self.pil_interpolation: + self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn) + + else: + self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size, + interpolation=interpolation_fn) + + def __len__(self): + return len(self.base) + + def __getitem__(self, i): + example = self.base[i] + image = Image.open(example["file_path_"]) + + if not image.mode == "RGB": + image = image.convert("RGB") + + image = np.array(image).astype(np.uint8) + + min_side_len = min(image.shape[:2]) + crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None) + crop_side_len = int(crop_side_len) + + if self.center_crop: + self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len) + + else: + self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len) + + image = self.cropper(image=image)["image"] + image = self.image_rescaler(image=image)["image"] + + if self.pil_interpolation: + image_pil = PIL.Image.fromarray(image) + LR_image = self.degradation_process(image_pil) + LR_image = np.array(LR_image).astype(np.uint8) + + else: + LR_image = self.degradation_process(image=image)["image"] + + example["image"] = (image/127.5 - 1.0).astype(np.float32) + example["LR_image"] = (LR_image/127.5 - 1.0).astype(np.float32) + + return example + + +class ImageNetSRTrain(ImageNetSR): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def get_base(self): + with open("data/imagenet_train_hr_indices.p", "rb") as f: + indices = pickle.load(f) + dset = ImageNetTrain(process_images=False,) + return Subset(dset, indices) + + +class ImageNetSRValidation(ImageNetSR): + def __init__(self, **kwargs): + super().__init__(**kwargs) + + def get_base(self): + with open("data/imagenet_val_hr_indices.p", "rb") as f: + indices = pickle.load(f) + dset = ImageNetValidation(process_images=False,) + return Subset(dset, indices) diff --git a/ldm/data/lsun.py b/ldm/data/lsun.py new file mode 100644 index 00000000..6256e457 --- /dev/null +++ b/ldm/data/lsun.py @@ -0,0 +1,92 @@ +import os +import numpy as np +import PIL +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms + + +class LSUNBase(Dataset): + def __init__(self, + txt_file, + data_root, + size=None, + interpolation="bicubic", + flip_p=0.5 + ): + self.data_paths = txt_file + self.data_root = data_root + with open(self.data_paths, "r") as f: + self.image_paths = f.read().splitlines() + self._length = len(self.image_paths) + self.labels = { + "relative_file_path_": [l for l in self.image_paths], + "file_path_": [os.path.join(self.data_root, l) + for l in self.image_paths], + } + + self.size = size + self.interpolation = {"linear": PIL.Image.LINEAR, + "bilinear": PIL.Image.BILINEAR, + "bicubic": PIL.Image.BICUBIC, + "lanczos": PIL.Image.LANCZOS, + }[interpolation] + self.flip = transforms.RandomHorizontalFlip(p=flip_p) + + def __len__(self): + return self._length + + def __getitem__(self, i): + example = dict((k, self.labels[k][i]) for k in self.labels) + image = Image.open(example["file_path_"]) + if not image.mode == "RGB": + image = image.convert("RGB") + + # default to score-sde preprocessing + img = np.array(image).astype(np.uint8) + crop = min(img.shape[0], img.shape[1]) + h, w, = img.shape[0], img.shape[1] + img = img[(h - crop) // 2:(h + crop) // 2, + (w - crop) // 2:(w + crop) // 2] + + image = Image.fromarray(img) + if self.size is not None: + image = image.resize((self.size, self.size), resample=self.interpolation) + + image = self.flip(image) + image = np.array(image).astype(np.uint8) + example["image"] = (image / 127.5 - 1.0).astype(np.float32) + return example + + +class LSUNChurchesTrain(LSUNBase): + def __init__(self, **kwargs): + super().__init__(txt_file="data/lsun/church_outdoor_train.txt", data_root="data/lsun/churches", **kwargs) + + +class LSUNChurchesValidation(LSUNBase): + def __init__(self, flip_p=0., **kwargs): + super().__init__(txt_file="data/lsun/church_outdoor_val.txt", data_root="data/lsun/churches", + flip_p=flip_p, **kwargs) + + +class LSUNBedroomsTrain(LSUNBase): + def __init__(self, **kwargs): + super().__init__(txt_file="data/lsun/bedrooms_train.txt", data_root="data/lsun/bedrooms", **kwargs) + + +class LSUNBedroomsValidation(LSUNBase): + def __init__(self, flip_p=0.0, **kwargs): + super().__init__(txt_file="data/lsun/bedrooms_val.txt", data_root="data/lsun/bedrooms", + flip_p=flip_p, **kwargs) + + +class LSUNCatsTrain(LSUNBase): + def __init__(self, **kwargs): + super().__init__(txt_file="data/lsun/cat_train.txt", data_root="data/lsun/cats", **kwargs) + + +class LSUNCatsValidation(LSUNBase): + def __init__(self, flip_p=0., **kwargs): + super().__init__(txt_file="data/lsun/cat_val.txt", data_root="data/lsun/cats", + flip_p=flip_p, **kwargs) diff --git a/ldm/lr_scheduler.py b/ldm/lr_scheduler.py new file mode 100644 index 00000000..be39da9c --- /dev/null +++ b/ldm/lr_scheduler.py @@ -0,0 +1,98 @@ +import numpy as np + + +class LambdaWarmUpCosineScheduler: + """ + note: use with a base_lr of 1.0 + """ + def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0): + self.lr_warm_up_steps = warm_up_steps + self.lr_start = lr_start + self.lr_min = lr_min + self.lr_max = lr_max + self.lr_max_decay_steps = max_decay_steps + self.last_lr = 0. + self.verbosity_interval = verbosity_interval + + def schedule(self, n, **kwargs): + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}") + if n < self.lr_warm_up_steps: + lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start + self.last_lr = lr + return lr + else: + t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps) + t = min(t, 1.0) + lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * ( + 1 + np.cos(t * np.pi)) + self.last_lr = lr + return lr + + def __call__(self, n, **kwargs): + return self.schedule(n,**kwargs) + + +class LambdaWarmUpCosineScheduler2: + """ + supports repeated iterations, configurable via lists + note: use with a base_lr of 1.0. + """ + def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0): + assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths) + self.lr_warm_up_steps = warm_up_steps + self.f_start = f_start + self.f_min = f_min + self.f_max = f_max + self.cycle_lengths = cycle_lengths + self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths)) + self.last_f = 0. + self.verbosity_interval = verbosity_interval + + def find_in_interval(self, n): + interval = 0 + for cl in self.cum_cycles[1:]: + if n <= cl: + return interval + interval += 1 + + def schedule(self, n, **kwargs): + cycle = self.find_in_interval(n) + n = n - self.cum_cycles[cycle] + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " + f"current cycle {cycle}") + if n < self.lr_warm_up_steps[cycle]: + f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] + self.last_f = f + return f + else: + t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle]) + t = min(t, 1.0) + f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * ( + 1 + np.cos(t * np.pi)) + self.last_f = f + return f + + def __call__(self, n, **kwargs): + return self.schedule(n, **kwargs) + + +class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2): + + def schedule(self, n, **kwargs): + cycle = self.find_in_interval(n) + n = n - self.cum_cycles[cycle] + if self.verbosity_interval > 0: + if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " + f"current cycle {cycle}") + + if n < self.lr_warm_up_steps[cycle]: + f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] + self.last_f = f + return f + else: + f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle]) + self.last_f = f + return f + diff --git a/ldm/models/autoencoder.py b/ldm/models/autoencoder.py new file mode 100644 index 00000000..6a9c4f45 --- /dev/null +++ b/ldm/models/autoencoder.py @@ -0,0 +1,443 @@ +import torch +import pytorch_lightning as pl +import torch.nn.functional as F +from contextlib import contextmanager + +from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer + +from ldm.modules.diffusionmodules.model import Encoder, Decoder +from ldm.modules.distributions.distributions import DiagonalGaussianDistribution + +from ldm.util import instantiate_from_config + + +class VQModel(pl.LightningModule): + def __init__(self, + ddconfig, + lossconfig, + n_embed, + embed_dim, + ckpt_path=None, + ignore_keys=[], + image_key="image", + colorize_nlabels=None, + monitor=None, + batch_resize_range=None, + scheduler_config=None, + lr_g_factor=1.0, + remap=None, + sane_index_shape=False, # tell vector quantizer to return indices as bhw + use_ema=False + ): + super().__init__() + self.embed_dim = embed_dim + self.n_embed = n_embed + self.image_key = image_key + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.loss = instantiate_from_config(lossconfig) + self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, + remap=remap, + sane_index_shape=sane_index_shape) + self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + if colorize_nlabels is not None: + assert type(colorize_nlabels)==int + self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) + if monitor is not None: + self.monitor = monitor + self.batch_resize_range = batch_resize_range + if self.batch_resize_range is not None: + print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.") + + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + self.scheduler_config = scheduler_config + self.lr_g_factor = lr_g_factor + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.parameters()) + self.model_ema.copy_to(self) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + def init_from_ckpt(self, path, ignore_keys=list()): + sd = torch.load(path, map_location="cpu")["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + print(f"Unexpected Keys: {unexpected}") + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self) + + def encode(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + quant, emb_loss, info = self.quantize(h) + return quant, emb_loss, info + + def encode_to_prequant(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + return h + + def decode(self, quant): + quant = self.post_quant_conv(quant) + dec = self.decoder(quant) + return dec + + def decode_code(self, code_b): + quant_b = self.quantize.embed_code(code_b) + dec = self.decode(quant_b) + return dec + + def forward(self, input, return_pred_indices=False): + quant, diff, (_,_,ind) = self.encode(input) + dec = self.decode(quant) + if return_pred_indices: + return dec, diff, ind + return dec, diff + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() + if self.batch_resize_range is not None: + lower_size = self.batch_resize_range[0] + upper_size = self.batch_resize_range[1] + if self.global_step <= 4: + # do the first few batches with max size to avoid later oom + new_resize = upper_size + else: + new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16)) + if new_resize != x.shape[2]: + x = F.interpolate(x, size=new_resize, mode="bicubic") + x = x.detach() + return x + + def training_step(self, batch, batch_idx, optimizer_idx): + # https://github.com/pytorch/pytorch/issues/37142 + # try not to fool the heuristics + x = self.get_input(batch, self.image_key) + xrec, qloss, ind = self(x, return_pred_indices=True) + + if optimizer_idx == 0: + # autoencode + aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train", + predicted_indices=ind) + + self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) + return aeloss + + if optimizer_idx == 1: + # discriminator + discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) + return discloss + + def validation_step(self, batch, batch_idx): + log_dict = self._validation_step(batch, batch_idx) + with self.ema_scope(): + log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") + return log_dict + + def _validation_step(self, batch, batch_idx, suffix=""): + x = self.get_input(batch, self.image_key) + xrec, qloss, ind = self(x, return_pred_indices=True) + aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, + self.global_step, + last_layer=self.get_last_layer(), + split="val"+suffix, + predicted_indices=ind + ) + + discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, + self.global_step, + last_layer=self.get_last_layer(), + split="val"+suffix, + predicted_indices=ind + ) + rec_loss = log_dict_ae[f"val{suffix}/rec_loss"] + self.log(f"val{suffix}/rec_loss", rec_loss, + prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) + self.log(f"val{suffix}/aeloss", aeloss, + prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) + if version.parse(pl.__version__) >= version.parse('1.4.0'): + del log_dict_ae[f"val{suffix}/rec_loss"] + self.log_dict(log_dict_ae) + self.log_dict(log_dict_disc) + return self.log_dict + + def configure_optimizers(self): + lr_d = self.learning_rate + lr_g = self.lr_g_factor*self.learning_rate + print("lr_d", lr_d) + print("lr_g", lr_g) + opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ + list(self.decoder.parameters())+ + list(self.quantize.parameters())+ + list(self.quant_conv.parameters())+ + list(self.post_quant_conv.parameters()), + lr=lr_g, betas=(0.5, 0.9)) + opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), + lr=lr_d, betas=(0.5, 0.9)) + + if self.scheduler_config is not None: + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }, + { + 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }, + ] + return [opt_ae, opt_disc], scheduler + return [opt_ae, opt_disc], [] + + def get_last_layer(self): + return self.decoder.conv_out.weight + + def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): + log = dict() + x = self.get_input(batch, self.image_key) + x = x.to(self.device) + if only_inputs: + log["inputs"] = x + return log + xrec, _ = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec.shape[1] > 3 + x = self.to_rgb(x) + xrec = self.to_rgb(xrec) + log["inputs"] = x + log["reconstructions"] = xrec + if plot_ema: + with self.ema_scope(): + xrec_ema, _ = self(x) + if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) + log["reconstructions_ema"] = xrec_ema + return log + + def to_rgb(self, x): + assert self.image_key == "segmentation" + if not hasattr(self, "colorize"): + self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) + x = F.conv2d(x, weight=self.colorize) + x = 2.*(x-x.min())/(x.max()-x.min()) - 1. + return x + + +class VQModelInterface(VQModel): + def __init__(self, embed_dim, *args, **kwargs): + super().__init__(embed_dim=embed_dim, *args, **kwargs) + self.embed_dim = embed_dim + + def encode(self, x): + h = self.encoder(x) + h = self.quant_conv(h) + return h + + def decode(self, h, force_not_quantize=False): + # also go through quantization layer + if not force_not_quantize: + quant, emb_loss, info = self.quantize(h) + else: + quant = h + quant = self.post_quant_conv(quant) + dec = self.decoder(quant) + return dec + + +class AutoencoderKL(pl.LightningModule): + def __init__(self, + ddconfig, + lossconfig, + embed_dim, + ckpt_path=None, + ignore_keys=[], + image_key="image", + colorize_nlabels=None, + monitor=None, + ): + super().__init__() + self.image_key = image_key + self.encoder = Encoder(**ddconfig) + self.decoder = Decoder(**ddconfig) + self.loss = instantiate_from_config(lossconfig) + assert ddconfig["double_z"] + self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) + self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) + self.embed_dim = embed_dim + if colorize_nlabels is not None: + assert type(colorize_nlabels)==int + self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) + if monitor is not None: + self.monitor = monitor + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) + + def init_from_ckpt(self, path, ignore_keys=list()): + sd = torch.load(path, map_location="cpu")["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + self.load_state_dict(sd, strict=False) + print(f"Restored from {path}") + + def encode(self, x): + h = self.encoder(x) + moments = self.quant_conv(h) + posterior = DiagonalGaussianDistribution(moments) + return posterior + + def decode(self, z): + z = self.post_quant_conv(z) + dec = self.decoder(z) + return dec + + def forward(self, input, sample_posterior=True): + posterior = self.encode(input) + if sample_posterior: + z = posterior.sample() + else: + z = posterior.mode() + dec = self.decode(z) + return dec, posterior + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() + return x + + def training_step(self, batch, batch_idx, optimizer_idx): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + + if optimizer_idx == 0: + # train encoder+decoder+logvar + aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) + self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) + return aeloss + + if optimizer_idx == 1: + # train the discriminator + discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, + last_layer=self.get_last_layer(), split="train") + + self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) + self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) + return discloss + + def validation_step(self, batch, batch_idx): + inputs = self.get_input(batch, self.image_key) + reconstructions, posterior = self(inputs) + aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, + last_layer=self.get_last_layer(), split="val") + + discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, + last_layer=self.get_last_layer(), split="val") + + self.log("val/rec_loss", log_dict_ae["val/rec_loss"]) + self.log_dict(log_dict_ae) + self.log_dict(log_dict_disc) + return self.log_dict + + def configure_optimizers(self): + lr = self.learning_rate + opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ + list(self.decoder.parameters())+ + list(self.quant_conv.parameters())+ + list(self.post_quant_conv.parameters()), + lr=lr, betas=(0.5, 0.9)) + opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), + lr=lr, betas=(0.5, 0.9)) + return [opt_ae, opt_disc], [] + + def get_last_layer(self): + return self.decoder.conv_out.weight + + @torch.no_grad() + def log_images(self, batch, only_inputs=False, **kwargs): + log = dict() + x = self.get_input(batch, self.image_key) + x = x.to(self.device) + if not only_inputs: + xrec, posterior = self(x) + if x.shape[1] > 3: + # colorize with random projection + assert xrec.shape[1] > 3 + x = self.to_rgb(x) + xrec = self.to_rgb(xrec) + log["samples"] = self.decode(torch.randn_like(posterior.sample())) + log["reconstructions"] = xrec + log["inputs"] = x + return log + + def to_rgb(self, x): + assert self.image_key == "segmentation" + if not hasattr(self, "colorize"): + self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) + x = F.conv2d(x, weight=self.colorize) + x = 2.*(x-x.min())/(x.max()-x.min()) - 1. + return x + + +class IdentityFirstStage(torch.nn.Module): + def __init__(self, *args, vq_interface=False, **kwargs): + self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff + super().__init__() + + def encode(self, x, *args, **kwargs): + return x + + def decode(self, x, *args, **kwargs): + return x + + def quantize(self, x, *args, **kwargs): + if self.vq_interface: + return x, None, [None, None, None] + return x + + def forward(self, x, *args, **kwargs): + return x diff --git a/ldm/models/diffusion/__init__.py b/ldm/models/diffusion/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldm/models/diffusion/classifier.py b/ldm/models/diffusion/classifier.py new file mode 100644 index 00000000..67e98b9d --- /dev/null +++ b/ldm/models/diffusion/classifier.py @@ -0,0 +1,267 @@ +import os +import torch +import pytorch_lightning as pl +from omegaconf import OmegaConf +from torch.nn import functional as F +from torch.optim import AdamW +from torch.optim.lr_scheduler import LambdaLR +from copy import deepcopy +from einops import rearrange +from glob import glob +from natsort import natsorted + +from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel +from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config + +__models__ = { + 'class_label': EncoderUNetModel, + 'segmentation': UNetModel +} + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +class NoisyLatentImageClassifier(pl.LightningModule): + + def __init__(self, + diffusion_path, + num_classes, + ckpt_path=None, + pool='attention', + label_key=None, + diffusion_ckpt_path=None, + scheduler_config=None, + weight_decay=1.e-2, + log_steps=10, + monitor='val/loss', + *args, + **kwargs): + super().__init__(*args, **kwargs) + self.num_classes = num_classes + # get latest config of diffusion model + diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1] + self.diffusion_config = OmegaConf.load(diffusion_config).model + self.diffusion_config.params.ckpt_path = diffusion_ckpt_path + self.load_diffusion() + + self.monitor = monitor + self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1 + self.log_time_interval = self.diffusion_model.num_timesteps // log_steps + self.log_steps = log_steps + + self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \ + else self.diffusion_model.cond_stage_key + + assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params' + + if self.label_key not in __models__: + raise NotImplementedError() + + self.load_classifier(ckpt_path, pool) + + self.scheduler_config = scheduler_config + self.use_scheduler = self.scheduler_config is not None + self.weight_decay = weight_decay + + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( + sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + def load_diffusion(self): + model = instantiate_from_config(self.diffusion_config) + self.diffusion_model = model.eval() + self.diffusion_model.train = disabled_train + for param in self.diffusion_model.parameters(): + param.requires_grad = False + + def load_classifier(self, ckpt_path, pool): + model_config = deepcopy(self.diffusion_config.params.unet_config.params) + model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels + model_config.out_channels = self.num_classes + if self.label_key == 'class_label': + model_config.pool = pool + + self.model = __models__[self.label_key](**model_config) + if ckpt_path is not None: + print('#####################################################################') + print(f'load from ckpt "{ckpt_path}"') + print('#####################################################################') + self.init_from_ckpt(ckpt_path) + + @torch.no_grad() + def get_x_noisy(self, x, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x)) + continuous_sqrt_alpha_cumprod = None + if self.diffusion_model.use_continuous_noise: + continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1) + # todo: make sure t+1 is correct here + + return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise, + continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod) + + def forward(self, x_noisy, t, *args, **kwargs): + return self.model(x_noisy, t) + + @torch.no_grad() + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = rearrange(x, 'b h w c -> b c h w') + x = x.to(memory_format=torch.contiguous_format).float() + return x + + @torch.no_grad() + def get_conditioning(self, batch, k=None): + if k is None: + k = self.label_key + assert k is not None, 'Needs to provide label key' + + targets = batch[k].to(self.device) + + if self.label_key == 'segmentation': + targets = rearrange(targets, 'b h w c -> b c h w') + for down in range(self.numd): + h, w = targets.shape[-2:] + targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest') + + # targets = rearrange(targets,'b c h w -> b h w c') + + return targets + + def compute_top_k(self, logits, labels, k, reduction="mean"): + _, top_ks = torch.topk(logits, k, dim=1) + if reduction == "mean": + return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item() + elif reduction == "none": + return (top_ks == labels[:, None]).float().sum(dim=-1) + + def on_train_epoch_start(self): + # save some memory + self.diffusion_model.model.to('cpu') + + @torch.no_grad() + def write_logs(self, loss, logits, targets): + log_prefix = 'train' if self.training else 'val' + log = {} + log[f"{log_prefix}/loss"] = loss.mean() + log[f"{log_prefix}/acc@1"] = self.compute_top_k( + logits, targets, k=1, reduction="mean" + ) + log[f"{log_prefix}/acc@5"] = self.compute_top_k( + logits, targets, k=5, reduction="mean" + ) + + self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True) + self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False) + self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True) + lr = self.optimizers().param_groups[0]['lr'] + self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True) + + def shared_step(self, batch, t=None): + x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key) + targets = self.get_conditioning(batch) + if targets.dim() == 4: + targets = targets.argmax(dim=1) + if t is None: + t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long() + else: + t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long() + x_noisy = self.get_x_noisy(x, t) + logits = self(x_noisy, t) + + loss = F.cross_entropy(logits, targets, reduction='none') + + self.write_logs(loss.detach(), logits.detach(), targets.detach()) + + loss = loss.mean() + return loss, logits, x_noisy, targets + + def training_step(self, batch, batch_idx): + loss, *_ = self.shared_step(batch) + return loss + + def reset_noise_accs(self): + self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in + range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)} + + def on_validation_start(self): + self.reset_noise_accs() + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + loss, *_ = self.shared_step(batch) + + for t in self.noisy_acc: + _, logits, _, targets = self.shared_step(batch, t) + self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean')) + self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean')) + + return loss + + def configure_optimizers(self): + optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) + + if self.use_scheduler: + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }] + return [optimizer], scheduler + + return optimizer + + @torch.no_grad() + def log_images(self, batch, N=8, *args, **kwargs): + log = dict() + x = self.get_input(batch, self.diffusion_model.first_stage_key) + log['inputs'] = x + + y = self.get_conditioning(batch) + + if self.label_key == 'class_label': + y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) + log['labels'] = y + + if ismap(y): + log['labels'] = self.diffusion_model.to_rgb(y) + + for step in range(self.log_steps): + current_time = step * self.log_time_interval + + _, logits, x_noisy, _ = self.shared_step(batch, t=current_time) + + log[f'inputs@t{current_time}'] = x_noisy + + pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes) + pred = rearrange(pred, 'b h w c -> b c h w') + + log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred) + + for key in log: + log[key] = log[key][:N] + + return log diff --git a/ldm/models/diffusion/ddim.py b/ldm/models/diffusion/ddim.py new file mode 100644 index 00000000..fb31215d --- /dev/null +++ b/ldm/models/diffusion/ddim.py @@ -0,0 +1,241 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm +from functools import partial + +from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, \ + extract_into_tensor + + +class DDIMSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for DDIM sampling is {size}, eta {eta}') + + samples, intermediates = self.ddim_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + ) + return samples, intermediates + + @torch.no_grad() + def ddim_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None,): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {'x_inter': [img], 'pred_x0': [img]} + time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning) + img, pred_x0 = outs + if callback: callback(i) + if img_callback: img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None): + b, *_, device = *x.shape, x.device + + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + e_t = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + c_in = torch.cat([unconditional_conditioning, c]) + e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) + + if score_corrector is not None: + assert self.model.parameterization == "eps" + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + @torch.no_grad() + def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): + # fast, but does not allow for exact reconstruction + # t serves as an index to gather the correct alphas + if use_original_steps: + sqrt_alphas_cumprod = self.sqrt_alphas_cumprod + sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod + else: + sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) + sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas + + if noise is None: + noise = torch.randn_like(x0) + return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise) + + @torch.no_grad() + def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, + use_original_steps=False): + + timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps + timesteps = timesteps[:t_start] + + time_range = np.flip(timesteps) + total_steps = timesteps.shape[0] + print(f"Running DDIM Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='Decoding image', total=total_steps) + x_dec = x_latent + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) + x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning) + return x_dec \ No newline at end of file diff --git a/ldm/models/diffusion/ddpm.py b/ldm/models/diffusion/ddpm.py new file mode 100644 index 00000000..bbedd04c --- /dev/null +++ b/ldm/models/diffusion/ddpm.py @@ -0,0 +1,1445 @@ +""" +wild mixture of +https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py +https://github.com/CompVis/taming-transformers +-- merci +""" + +import torch +import torch.nn as nn +import numpy as np +import pytorch_lightning as pl +from torch.optim.lr_scheduler import LambdaLR +from einops import rearrange, repeat +from contextlib import contextmanager +from functools import partial +from tqdm import tqdm +from torchvision.utils import make_grid +from pytorch_lightning.utilities.distributed import rank_zero_only + +from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config +from ldm.modules.ema import LitEma +from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution +from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL +from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like +from ldm.models.diffusion.ddim import DDIMSampler + + +__conditioning_keys__ = {'concat': 'c_concat', + 'crossattn': 'c_crossattn', + 'adm': 'y'} + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +def uniform_on_device(r1, r2, shape, device): + return (r1 - r2) * torch.rand(*shape, device=device) + r2 + + +class DDPM(pl.LightningModule): + # classic DDPM with Gaussian diffusion, in image space + def __init__(self, + unet_config, + timesteps=1000, + beta_schedule="linear", + loss_type="l2", + ckpt_path=None, + ignore_keys=[], + load_only_unet=False, + monitor="val/loss", + use_ema=True, + first_stage_key="image", + image_size=256, + channels=3, + log_every_t=100, + clip_denoised=True, + linear_start=1e-4, + linear_end=2e-2, + cosine_s=8e-3, + given_betas=None, + original_elbo_weight=0., + v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta + l_simple_weight=1., + conditioning_key=None, + parameterization="eps", # all assuming fixed variance schedules + scheduler_config=None, + use_positional_encodings=False, + learn_logvar=False, + logvar_init=0., + ): + super().__init__() + assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' + self.parameterization = parameterization + print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") + self.cond_stage_model = None + self.clip_denoised = clip_denoised + self.log_every_t = log_every_t + self.first_stage_key = first_stage_key + self.image_size = image_size # try conv? + self.channels = channels + self.use_positional_encodings = use_positional_encodings + self.model = DiffusionWrapper(unet_config, conditioning_key) + count_params(self.model, verbose=True) + self.use_ema = use_ema + if self.use_ema: + self.model_ema = LitEma(self.model) + print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") + + self.use_scheduler = scheduler_config is not None + if self.use_scheduler: + self.scheduler_config = scheduler_config + + self.v_posterior = v_posterior + self.original_elbo_weight = original_elbo_weight + self.l_simple_weight = l_simple_weight + + if monitor is not None: + self.monitor = monitor + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) + + self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, + linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) + + self.loss_type = loss_type + + self.learn_logvar = learn_logvar + self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) + if self.learn_logvar: + self.logvar = nn.Parameter(self.logvar, requires_grad=True) + + + def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if exists(given_betas): + betas = given_betas + else: + betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, + cosine_s=cosine_s) + alphas = 1. - betas + alphas_cumprod = np.cumprod(alphas, axis=0) + alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) + + timesteps, = betas.shape + self.num_timesteps = int(timesteps) + self.linear_start = linear_start + self.linear_end = linear_end + assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' + + to_torch = partial(torch.tensor, dtype=torch.float32) + + self.register_buffer('betas', to_torch(betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) + + # calculations for posterior q(x_{t-1} | x_t, x_0) + posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( + 1. - alphas_cumprod) + self.v_posterior * betas + # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) + self.register_buffer('posterior_variance', to_torch(posterior_variance)) + # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain + self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) + self.register_buffer('posterior_mean_coef1', to_torch( + betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) + self.register_buffer('posterior_mean_coef2', to_torch( + (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) + + if self.parameterization == "eps": + lvlb_weights = self.betas ** 2 / ( + 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) + elif self.parameterization == "x0": + lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) + else: + raise NotImplementedError("mu not supported") + # TODO how to choose this term + lvlb_weights[0] = lvlb_weights[1] + self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) + assert not torch.isnan(self.lvlb_weights).all() + + @contextmanager + def ema_scope(self, context=None): + if self.use_ema: + self.model_ema.store(self.model.parameters()) + self.model_ema.copy_to(self.model) + if context is not None: + print(f"{context}: Switched to EMA weights") + try: + yield None + finally: + if self.use_ema: + self.model_ema.restore(self.model.parameters()) + if context is not None: + print(f"{context}: Restored training weights") + + def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): + sd = torch.load(path, map_location="cpu") + if "state_dict" in list(sd.keys()): + sd = sd["state_dict"] + keys = list(sd.keys()) + for k in keys: + for ik in ignore_keys: + if k.startswith(ik): + print("Deleting key {} from state_dict.".format(k)) + del sd[k] + missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( + sd, strict=False) + print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") + if len(missing) > 0: + print(f"Missing Keys: {missing}") + if len(unexpected) > 0: + print(f"Unexpected Keys: {unexpected}") + + def q_mean_variance(self, x_start, t): + """ + Get the distribution q(x_t | x_0). + :param x_start: the [N x C x ...] tensor of noiseless inputs. + :param t: the number of diffusion steps (minus 1). Here, 0 means one step. + :return: A tuple (mean, variance, log_variance), all of x_start's shape. + """ + mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) + variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) + log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) + return mean, variance, log_variance + + def predict_start_from_noise(self, x_t, t, noise): + return ( + extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - + extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise + ) + + def q_posterior(self, x_start, x_t, t): + posterior_mean = ( + extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t + ) + posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) + posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) + return posterior_mean, posterior_variance, posterior_log_variance_clipped + + def p_mean_variance(self, x, t, clip_denoised: bool): + model_out = self.model(x, t) + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + if clip_denoised: + x_recon.clamp_(-1., 1.) + + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): + b, *_, device = *x.shape, x.device + model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) + noise = noise_like(x.shape, device, repeat_noise) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def p_sample_loop(self, shape, return_intermediates=False): + device = self.betas.device + b = shape[0] + img = torch.randn(shape, device=device) + intermediates = [img] + for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): + img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), + clip_denoised=self.clip_denoised) + if i % self.log_every_t == 0 or i == self.num_timesteps - 1: + intermediates.append(img) + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, batch_size=16, return_intermediates=False): + image_size = self.image_size + channels = self.channels + return self.p_sample_loop((batch_size, channels, image_size, image_size), + return_intermediates=return_intermediates) + + def q_sample(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) + + def get_loss(self, pred, target, mean=True): + if self.loss_type == 'l1': + loss = (target - pred).abs() + if mean: + loss = loss.mean() + elif self.loss_type == 'l2': + if mean: + loss = torch.nn.functional.mse_loss(target, pred) + else: + loss = torch.nn.functional.mse_loss(target, pred, reduction='none') + else: + raise NotImplementedError("unknown loss type '{loss_type}'") + + return loss + + def p_losses(self, x_start, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_out = self.model(x_noisy, t) + + loss_dict = {} + if self.parameterization == "eps": + target = noise + elif self.parameterization == "x0": + target = x_start + else: + raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") + + loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) + + log_prefix = 'train' if self.training else 'val' + + loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) + loss_simple = loss.mean() * self.l_simple_weight + + loss_vlb = (self.lvlb_weights[t] * loss).mean() + loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) + + loss = loss_simple + self.original_elbo_weight * loss_vlb + + loss_dict.update({f'{log_prefix}/loss': loss}) + + return loss, loss_dict + + def forward(self, x, *args, **kwargs): + # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size + # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' + t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() + return self.p_losses(x, t, *args, **kwargs) + + def get_input(self, batch, k): + x = batch[k] + if len(x.shape) == 3: + x = x[..., None] + x = rearrange(x, 'b h w c -> b c h w') + x = x.to(memory_format=torch.contiguous_format).float() + return x + + def shared_step(self, batch): + x = self.get_input(batch, self.first_stage_key) + loss, loss_dict = self(x) + return loss, loss_dict + + def training_step(self, batch, batch_idx): + loss, loss_dict = self.shared_step(batch) + + self.log_dict(loss_dict, prog_bar=True, + logger=True, on_step=True, on_epoch=True) + + self.log("global_step", self.global_step, + prog_bar=True, logger=True, on_step=True, on_epoch=False) + + if self.use_scheduler: + lr = self.optimizers().param_groups[0]['lr'] + self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) + + return loss + + @torch.no_grad() + def validation_step(self, batch, batch_idx): + _, loss_dict_no_ema = self.shared_step(batch) + with self.ema_scope(): + _, loss_dict_ema = self.shared_step(batch) + loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} + self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) + self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) + + def on_train_batch_end(self, *args, **kwargs): + if self.use_ema: + self.model_ema(self.model) + + def _get_rows_from_list(self, samples): + n_imgs_per_row = len(samples) + denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') + denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): + log = dict() + x = self.get_input(batch, self.first_stage_key) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + x = x.to(self.device)[:N] + log["inputs"] = x + + # get diffusion row + diffusion_row = list() + x_start = x[:n_row] + + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(x_start) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + diffusion_row.append(x_noisy) + + log["diffusion_row"] = self._get_rows_from_list(diffusion_row) + + if sample: + # get denoise row + with self.ema_scope("Plotting"): + samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) + + log["samples"] = samples + log["denoise_row"] = self._get_rows_from_list(denoise_row) + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.learn_logvar: + params = params + [self.logvar] + opt = torch.optim.AdamW(params, lr=lr) + return opt + + +class LatentDiffusion(DDPM): + """main class""" + def __init__(self, + first_stage_config, + cond_stage_config, + num_timesteps_cond=None, + cond_stage_key="image", + cond_stage_trainable=False, + concat_mode=True, + cond_stage_forward=None, + conditioning_key=None, + scale_factor=1.0, + scale_by_std=False, + *args, **kwargs): + self.num_timesteps_cond = default(num_timesteps_cond, 1) + self.scale_by_std = scale_by_std + assert self.num_timesteps_cond <= kwargs['timesteps'] + # for backwards compatibility after implementation of DiffusionWrapper + if conditioning_key is None: + conditioning_key = 'concat' if concat_mode else 'crossattn' + if cond_stage_config == '__is_unconditional__': + conditioning_key = None + ckpt_path = kwargs.pop("ckpt_path", None) + ignore_keys = kwargs.pop("ignore_keys", []) + super().__init__(conditioning_key=conditioning_key, *args, **kwargs) + self.concat_mode = concat_mode + self.cond_stage_trainable = cond_stage_trainable + self.cond_stage_key = cond_stage_key + try: + self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 + except: + self.num_downs = 0 + if not scale_by_std: + self.scale_factor = scale_factor + else: + self.register_buffer('scale_factor', torch.tensor(scale_factor)) + self.instantiate_first_stage(first_stage_config) + self.instantiate_cond_stage(cond_stage_config) + self.cond_stage_forward = cond_stage_forward + self.clip_denoised = False + self.bbox_tokenizer = None + + self.restarted_from_ckpt = False + if ckpt_path is not None: + self.init_from_ckpt(ckpt_path, ignore_keys) + self.restarted_from_ckpt = True + + def make_cond_schedule(self, ): + self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) + ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() + self.cond_ids[:self.num_timesteps_cond] = ids + + @rank_zero_only + @torch.no_grad() + def on_train_batch_start(self, batch, batch_idx, dataloader_idx): + # only for very first batch + if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: + assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' + # set rescale weight to 1./std of encodings + print("### USING STD-RESCALING ###") + x = super().get_input(batch, self.first_stage_key) + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + del self.scale_factor + self.register_buffer('scale_factor', 1. / z.flatten().std()) + print(f"setting self.scale_factor to {self.scale_factor}") + print("### USING STD-RESCALING ###") + + def register_schedule(self, + given_betas=None, beta_schedule="linear", timesteps=1000, + linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) + + self.shorten_cond_schedule = self.num_timesteps_cond > 1 + if self.shorten_cond_schedule: + self.make_cond_schedule() + + def instantiate_first_stage(self, config): + model = instantiate_from_config(config) + self.first_stage_model = model.eval() + self.first_stage_model.train = disabled_train + for param in self.first_stage_model.parameters(): + param.requires_grad = False + + def instantiate_cond_stage(self, config): + if not self.cond_stage_trainable: + if config == "__is_first_stage__": + print("Using first stage also as cond stage.") + self.cond_stage_model = self.first_stage_model + elif config == "__is_unconditional__": + print(f"Training {self.__class__.__name__} as an unconditional model.") + self.cond_stage_model = None + # self.be_unconditional = True + else: + model = instantiate_from_config(config) + self.cond_stage_model = model.eval() + self.cond_stage_model.train = disabled_train + for param in self.cond_stage_model.parameters(): + param.requires_grad = False + else: + assert config != '__is_first_stage__' + assert config != '__is_unconditional__' + model = instantiate_from_config(config) + self.cond_stage_model = model + + def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): + denoise_row = [] + for zd in tqdm(samples, desc=desc): + denoise_row.append(self.decode_first_stage(zd.to(self.device), + force_not_quantize=force_no_decoder_quantization)) + n_imgs_per_row = len(denoise_row) + denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W + denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') + denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') + denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) + return denoise_grid + + def get_first_stage_encoding(self, encoder_posterior): + if isinstance(encoder_posterior, DiagonalGaussianDistribution): + z = encoder_posterior.sample() + elif isinstance(encoder_posterior, torch.Tensor): + z = encoder_posterior + else: + raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") + return self.scale_factor * z + + def get_learned_conditioning(self, c): + if self.cond_stage_forward is None: + if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): + c = self.cond_stage_model.encode(c) + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + else: + c = self.cond_stage_model(c) + else: + assert hasattr(self.cond_stage_model, self.cond_stage_forward) + c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) + return c + + def meshgrid(self, h, w): + y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) + x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) + + arr = torch.cat([y, x], dim=-1) + return arr + + def delta_border(self, h, w): + """ + :param h: height + :param w: width + :return: normalized distance to image border, + wtith min distance = 0 at border and max dist = 0.5 at image center + """ + lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) + arr = self.meshgrid(h, w) / lower_right_corner + dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] + dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] + edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] + return edge_dist + + def get_weighting(self, h, w, Ly, Lx, device): + weighting = self.delta_border(h, w) + weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], + self.split_input_params["clip_max_weight"], ) + weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) + + if self.split_input_params["tie_braker"]: + L_weighting = self.delta_border(Ly, Lx) + L_weighting = torch.clip(L_weighting, + self.split_input_params["clip_min_tie_weight"], + self.split_input_params["clip_max_tie_weight"]) + + L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) + weighting = weighting * L_weighting + return weighting + + def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code + """ + :param x: img of size (bs, c, h, w) + :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) + """ + bs, nc, h, w = x.shape + + # number of crops in image + Ly = (h - kernel_size[0]) // stride[0] + 1 + Lx = (w - kernel_size[1]) // stride[1] + 1 + + if uf == 1 and df == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) + + weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) + + elif uf > 1 and df == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), + dilation=1, padding=0, + stride=(stride[0] * uf, stride[1] * uf)) + fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) + + weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) + + elif df > 1 and uf == 1: + fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) + unfold = torch.nn.Unfold(**fold_params) + + fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), + dilation=1, padding=0, + stride=(stride[0] // df, stride[1] // df)) + fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) + + weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) + normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap + weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) + + else: + raise NotImplementedError + + return fold, unfold, normalization, weighting + + @torch.no_grad() + def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, + cond_key=None, return_original_cond=False, bs=None): + x = super().get_input(batch, k) + if bs is not None: + x = x[:bs] + x = x.to(self.device) + encoder_posterior = self.encode_first_stage(x) + z = self.get_first_stage_encoding(encoder_posterior).detach() + + if self.model.conditioning_key is not None: + if cond_key is None: + cond_key = self.cond_stage_key + if cond_key != self.first_stage_key: + if cond_key in ['caption', 'coordinates_bbox']: + xc = batch[cond_key] + elif cond_key == 'class_label': + xc = batch + else: + xc = super().get_input(batch, cond_key).to(self.device) + else: + xc = x + if not self.cond_stage_trainable or force_c_encode: + if isinstance(xc, dict) or isinstance(xc, list): + # import pudb; pudb.set_trace() + c = self.get_learned_conditioning(xc) + else: + c = self.get_learned_conditioning(xc.to(self.device)) + else: + c = xc + if bs is not None: + c = c[:bs] + + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + ckey = __conditioning_keys__[self.model.conditioning_key] + c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} + + else: + c = None + xc = None + if self.use_positional_encodings: + pos_x, pos_y = self.compute_latent_shifts(batch) + c = {'pos_x': pos_x, 'pos_y': pos_y} + out = [z, c] + if return_first_stage_outputs: + xrec = self.decode_first_stage(z) + out.extend([x, xrec]) + if return_original_cond: + out.append(xc) + return out + + @torch.no_grad() + def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, 'b h w c -> b c h w').contiguous() + + z = 1. / self.scale_factor * z + + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + uf = self.split_input_params["vqf"] + bs, nc, h, w = z.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) + + z = unfold(z) # (bn, nc * prod(**ks), L) + # 1. Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + # 2. apply model loop over last dim + if isinstance(self.first_stage_model, VQModelInterface): + output_list = [self.first_stage_model.decode(z[:, :, :, :, i], + force_not_quantize=predict_cids or force_not_quantize) + for i in range(z.shape[-1])] + else: + + output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) + o = o * weighting + # Reverse 1. reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization # norm is shape (1, 1, h, w) + return decoded + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + # same as above but without decorator + def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): + if predict_cids: + if z.dim() == 4: + z = torch.argmax(z.exp(), dim=1).long() + z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) + z = rearrange(z, 'b h w c -> b c h w').contiguous() + + z = 1. / self.scale_factor * z + + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + uf = self.split_input_params["vqf"] + bs, nc, h, w = z.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) + + z = unfold(z) # (bn, nc * prod(**ks), L) + # 1. Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + # 2. apply model loop over last dim + if isinstance(self.first_stage_model, VQModelInterface): + output_list = [self.first_stage_model.decode(z[:, :, :, :, i], + force_not_quantize=predict_cids or force_not_quantize) + for i in range(z.shape[-1])] + else: + + output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) + o = o * weighting + # Reverse 1. reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization # norm is shape (1, 1, h, w) + return decoded + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + else: + if isinstance(self.first_stage_model, VQModelInterface): + return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) + else: + return self.first_stage_model.decode(z) + + @torch.no_grad() + def encode_first_stage(self, x): + if hasattr(self, "split_input_params"): + if self.split_input_params["patch_distributed_vq"]: + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + df = self.split_input_params["vqf"] + self.split_input_params['original_image_size'] = x.shape[-2:] + bs, nc, h, w = x.shape + if ks[0] > h or ks[1] > w: + ks = (min(ks[0], h), min(ks[1], w)) + print("reducing Kernel") + + if stride[0] > h or stride[1] > w: + stride = (min(stride[0], h), min(stride[1], w)) + print("reducing stride") + + fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) + z = unfold(x) # (bn, nc * prod(**ks), L) + # Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) + for i in range(z.shape[-1])] + + o = torch.stack(output_list, axis=-1) + o = o * weighting + + # Reverse reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + decoded = fold(o) + decoded = decoded / normalization + return decoded + + else: + return self.first_stage_model.encode(x) + else: + return self.first_stage_model.encode(x) + + def shared_step(self, batch, **kwargs): + x, c = self.get_input(batch, self.first_stage_key) + loss = self(x, c) + return loss + + def forward(self, x, c, *args, **kwargs): + t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() + if self.model.conditioning_key is not None: + assert c is not None + if self.cond_stage_trainable: + c = self.get_learned_conditioning(c) + if self.shorten_cond_schedule: # TODO: drop this option + tc = self.cond_ids[t].to(self.device) + c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) + return self.p_losses(x, c, t, *args, **kwargs) + + def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset + def rescale_bbox(bbox): + x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) + y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) + w = min(bbox[2] / crop_coordinates[2], 1 - x0) + h = min(bbox[3] / crop_coordinates[3], 1 - y0) + return x0, y0, w, h + + return [rescale_bbox(b) for b in bboxes] + + def apply_model(self, x_noisy, t, cond, return_ids=False): + + if isinstance(cond, dict): + # hybrid case, cond is exptected to be a dict + pass + else: + if not isinstance(cond, list): + cond = [cond] + key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' + cond = {key: cond} + + if hasattr(self, "split_input_params"): + assert len(cond) == 1 # todo can only deal with one conditioning atm + assert not return_ids + ks = self.split_input_params["ks"] # eg. (128, 128) + stride = self.split_input_params["stride"] # eg. (64, 64) + + h, w = x_noisy.shape[-2:] + + fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) + + z = unfold(x_noisy) # (bn, nc * prod(**ks), L) + # Reshape to img shape + z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] + + if self.cond_stage_key in ["image", "LR_image", "segmentation", + 'bbox_img'] and self.model.conditioning_key: # todo check for completeness + c_key = next(iter(cond.keys())) # get key + c = next(iter(cond.values())) # get value + assert (len(c) == 1) # todo extend to list with more than one elem + c = c[0] # get element + + c = unfold(c) + c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) + + cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] + + elif self.cond_stage_key == 'coordinates_bbox': + assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' + + # assuming padding of unfold is always 0 and its dilation is always 1 + n_patches_per_row = int((w - ks[0]) / stride[0] + 1) + full_img_h, full_img_w = self.split_input_params['original_image_size'] + # as we are operating on latents, we need the factor from the original image size to the + # spatial latent size to properly rescale the crops for regenerating the bbox annotations + num_downs = self.first_stage_model.encoder.num_resolutions - 1 + rescale_latent = 2 ** (num_downs) + + # get top left postions of patches as conforming for the bbbox tokenizer, therefore we + # need to rescale the tl patch coordinates to be in between (0,1) + tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, + rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) + for patch_nr in range(z.shape[-1])] + + # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) + patch_limits = [(x_tl, y_tl, + rescale_latent * ks[0] / full_img_w, + rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] + # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] + + # tokenize crop coordinates for the bounding boxes of the respective patches + patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) + for bbox in patch_limits] # list of length l with tensors of shape (1, 2) + print(patch_limits_tknzd[0].shape) + # cut tknzd crop position from conditioning + assert isinstance(cond, dict), 'cond must be dict to be fed into model' + cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) + print(cut_cond.shape) + + adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) + adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') + print(adapted_cond.shape) + adapted_cond = self.get_learned_conditioning(adapted_cond) + print(adapted_cond.shape) + adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) + print(adapted_cond.shape) + + cond_list = [{'c_crossattn': [e]} for e in adapted_cond] + + else: + cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient + + # apply model by loop over crops + output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] + assert not isinstance(output_list[0], + tuple) # todo cant deal with multiple model outputs check this never happens + + o = torch.stack(output_list, axis=-1) + o = o * weighting + # Reverse reshape to img shape + o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) + # stitch crops together + x_recon = fold(o) / normalization + + else: + x_recon = self.model(x_noisy, t, **cond) + + if isinstance(x_recon, tuple) and not return_ids: + return x_recon[0] + else: + return x_recon + + def _predict_eps_from_xstart(self, x_t, t, pred_xstart): + return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ + extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) + + def _prior_bpd(self, x_start): + """ + Get the prior KL term for the variational lower-bound, measured in + bits-per-dim. + This term can't be optimized, as it only depends on the encoder. + :param x_start: the [N x C x ...] tensor of inputs. + :return: a batch of [N] KL values (in bits), one per batch element. + """ + batch_size = x_start.shape[0] + t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) + qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) + kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) + return mean_flat(kl_prior) / np.log(2.0) + + def p_losses(self, x_start, cond, t, noise=None): + noise = default(noise, lambda: torch.randn_like(x_start)) + x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) + model_output = self.apply_model(x_noisy, t, cond) + + loss_dict = {} + prefix = 'train' if self.training else 'val' + + if self.parameterization == "x0": + target = x_start + elif self.parameterization == "eps": + target = noise + else: + raise NotImplementedError() + + loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) + loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) + + logvar_t = self.logvar[t].to(self.device) + loss = loss_simple / torch.exp(logvar_t) + logvar_t + # loss = loss_simple / torch.exp(self.logvar) + self.logvar + if self.learn_logvar: + loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) + loss_dict.update({'logvar': self.logvar.data.mean()}) + + loss = self.l_simple_weight * loss.mean() + + loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) + loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() + loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) + loss += (self.original_elbo_weight * loss_vlb) + loss_dict.update({f'{prefix}/loss': loss}) + + return loss, loss_dict + + def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, + return_x0=False, score_corrector=None, corrector_kwargs=None): + t_in = t + model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) + + if score_corrector is not None: + assert self.parameterization == "eps" + model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) + + if return_codebook_ids: + model_out, logits = model_out + + if self.parameterization == "eps": + x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) + elif self.parameterization == "x0": + x_recon = model_out + else: + raise NotImplementedError() + + if clip_denoised: + x_recon.clamp_(-1., 1.) + if quantize_denoised: + x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) + model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) + if return_codebook_ids: + return model_mean, posterior_variance, posterior_log_variance, logits + elif return_x0: + return model_mean, posterior_variance, posterior_log_variance, x_recon + else: + return model_mean, posterior_variance, posterior_log_variance + + @torch.no_grad() + def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, + return_codebook_ids=False, quantize_denoised=False, return_x0=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): + b, *_, device = *x.shape, x.device + outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, + return_codebook_ids=return_codebook_ids, + quantize_denoised=quantize_denoised, + return_x0=return_x0, + score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) + if return_codebook_ids: + raise DeprecationWarning("Support dropped.") + model_mean, _, model_log_variance, logits = outputs + elif return_x0: + model_mean, _, model_log_variance, x0 = outputs + else: + model_mean, _, model_log_variance = outputs + + noise = noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + # no noise when t == 0 + nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) + + if return_codebook_ids: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) + if return_x0: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 + else: + return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise + + @torch.no_grad() + def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, + img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., + score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, + log_every_t=None): + if not log_every_t: + log_every_t = self.log_every_t + timesteps = self.num_timesteps + if batch_size is not None: + b = batch_size if batch_size is not None else shape[0] + shape = [batch_size] + list(shape) + else: + b = batch_size = shape[0] + if x_T is None: + img = torch.randn(shape, device=self.device) + else: + img = x_T + intermediates = [] + if cond is not None: + if isinstance(cond, dict): + cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else + list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + else: + cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', + total=timesteps) if verbose else reversed( + range(0, timesteps)) + if type(temperature) == float: + temperature = [temperature] * timesteps + + for i in iterator: + ts = torch.full((b,), i, device=self.device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != 'hybrid' + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img, x0_partial = self.p_sample(img, cond, ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised, return_x0=True, + temperature=temperature[i], noise_dropout=noise_dropout, + score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) + if mask is not None: + assert x0 is not None + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1. - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(x0_partial) + if callback: callback(i) + if img_callback: img_callback(img, i) + return img, intermediates + + @torch.no_grad() + def p_sample_loop(self, cond, shape, return_intermediates=False, + x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, start_T=None, + log_every_t=None): + + if not log_every_t: + log_every_t = self.log_every_t + device = self.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + intermediates = [img] + if timesteps is None: + timesteps = self.num_timesteps + + if start_T is not None: + timesteps = min(timesteps, start_T) + iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( + range(0, timesteps)) + + if mask is not None: + assert x0 is not None + assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match + + for i in iterator: + ts = torch.full((b,), i, device=device, dtype=torch.long) + if self.shorten_cond_schedule: + assert self.model.conditioning_key != 'hybrid' + tc = self.cond_ids[ts].to(cond.device) + cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) + + img = self.p_sample(img, cond, ts, + clip_denoised=self.clip_denoised, + quantize_denoised=quantize_denoised) + if mask is not None: + img_orig = self.q_sample(x0, ts) + img = img_orig * mask + (1. - mask) * img + + if i % log_every_t == 0 or i == timesteps - 1: + intermediates.append(img) + if callback: callback(i) + if img_callback: img_callback(img, i) + + if return_intermediates: + return img, intermediates + return img + + @torch.no_grad() + def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, + verbose=True, timesteps=None, quantize_denoised=False, + mask=None, x0=None, shape=None,**kwargs): + if shape is None: + shape = (batch_size, self.channels, self.image_size, self.image_size) + if cond is not None: + if isinstance(cond, dict): + cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else + list(map(lambda x: x[:batch_size], cond[key])) for key in cond} + else: + cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] + return self.p_sample_loop(cond, + shape, + return_intermediates=return_intermediates, x_T=x_T, + verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, + mask=mask, x0=x0) + + @torch.no_grad() + def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): + + if ddim: + ddim_sampler = DDIMSampler(self) + shape = (self.channels, self.image_size, self.image_size) + samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, + shape,cond,verbose=False,**kwargs) + + else: + samples, intermediates = self.sample(cond=cond, batch_size=batch_size, + return_intermediates=True,**kwargs) + + return samples, intermediates + + + @torch.no_grad() + def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, + quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, + plot_diffusion_rows=True, **kwargs): + + use_ddim = ddim_steps is not None + + log = dict() + z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, + return_first_stage_outputs=True, + force_c_encode=True, + return_original_cond=True, + bs=N) + N = min(x.shape[0], N) + n_row = min(x.shape[0], n_row) + log["inputs"] = x + log["reconstruction"] = xrec + if self.model.conditioning_key is not None: + if hasattr(self.cond_stage_model, "decode"): + xc = self.cond_stage_model.decode(c) + log["conditioning"] = xc + elif self.cond_stage_key in ["caption"]: + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) + log["conditioning"] = xc + elif self.cond_stage_key == 'class_label': + xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) + log['conditioning'] = xc + elif isimage(xc): + log["conditioning"] = xc + if ismap(xc): + log["original_conditioning"] = self.to_rgb(xc) + + if plot_diffusion_rows: + # get diffusion row + diffusion_row = list() + z_start = z[:n_row] + for t in range(self.num_timesteps): + if t % self.log_every_t == 0 or t == self.num_timesteps - 1: + t = repeat(torch.tensor([t]), '1 -> b', b=n_row) + t = t.to(self.device).long() + noise = torch.randn_like(z_start) + z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) + diffusion_row.append(self.decode_first_stage(z_noisy)) + + diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W + diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') + diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') + diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) + log["diffusion_row"] = diffusion_grid + + if sample: + # get denoise row + with self.ema_scope("Plotting"): + samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, + ddim_steps=ddim_steps,eta=ddim_eta) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) + x_samples = self.decode_first_stage(samples) + log["samples"] = x_samples + if plot_denoise_rows: + denoise_grid = self._get_denoise_row_from_list(z_denoise_row) + log["denoise_row"] = denoise_grid + + if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( + self.first_stage_model, IdentityFirstStage): + # also display when quantizing x0 while sampling + with self.ema_scope("Plotting Quantized Denoised"): + samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, + ddim_steps=ddim_steps,eta=ddim_eta, + quantize_denoised=True) + # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, + # quantize_denoised=True) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_x0_quantized"] = x_samples + + if inpaint: + # make a simple center square + b, h, w = z.shape[0], z.shape[2], z.shape[3] + mask = torch.ones(N, h, w).to(self.device) + # zeros will be filled in + mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. + mask = mask[:, None, ...] + with self.ema_scope("Plotting Inpaint"): + + samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, + ddim_steps=ddim_steps, x0=z[:N], mask=mask) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_inpainting"] = x_samples + log["mask"] = mask + + # outpaint + with self.ema_scope("Plotting Outpaint"): + samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, + ddim_steps=ddim_steps, x0=z[:N], mask=mask) + x_samples = self.decode_first_stage(samples.to(self.device)) + log["samples_outpainting"] = x_samples + + if plot_progressive_rows: + with self.ema_scope("Plotting Progressives"): + img, progressives = self.progressive_denoising(c, + shape=(self.channels, self.image_size, self.image_size), + batch_size=N) + prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") + log["progressive_row"] = prog_row + + if return_keys: + if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: + return log + else: + return {key: log[key] for key in return_keys} + return log + + def configure_optimizers(self): + lr = self.learning_rate + params = list(self.model.parameters()) + if self.cond_stage_trainable: + print(f"{self.__class__.__name__}: Also optimizing conditioner params!") + params = params + list(self.cond_stage_model.parameters()) + if self.learn_logvar: + print('Diffusion model optimizing logvar') + params.append(self.logvar) + opt = torch.optim.AdamW(params, lr=lr) + if self.use_scheduler: + assert 'target' in self.scheduler_config + scheduler = instantiate_from_config(self.scheduler_config) + + print("Setting up LambdaLR scheduler...") + scheduler = [ + { + 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), + 'interval': 'step', + 'frequency': 1 + }] + return [opt], scheduler + return opt + + @torch.no_grad() + def to_rgb(self, x): + x = x.float() + if not hasattr(self, "colorize"): + self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) + x = nn.functional.conv2d(x, weight=self.colorize) + x = 2. * (x - x.min()) / (x.max() - x.min()) - 1. + return x + + +class DiffusionWrapper(pl.LightningModule): + def __init__(self, diff_model_config, conditioning_key): + super().__init__() + self.diffusion_model = instantiate_from_config(diff_model_config) + self.conditioning_key = conditioning_key + assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm'] + + def forward(self, x, t, c_concat: list = None, c_crossattn: list = None): + if self.conditioning_key is None: + out = self.diffusion_model(x, t) + elif self.conditioning_key == 'concat': + xc = torch.cat([x] + c_concat, dim=1) + out = self.diffusion_model(xc, t) + elif self.conditioning_key == 'crossattn': + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(x, t, context=cc) + elif self.conditioning_key == 'hybrid': + xc = torch.cat([x] + c_concat, dim=1) + cc = torch.cat(c_crossattn, 1) + out = self.diffusion_model(xc, t, context=cc) + elif self.conditioning_key == 'adm': + cc = c_crossattn[0] + out = self.diffusion_model(x, t, y=cc) + else: + raise NotImplementedError() + + return out + + +class Layout2ImgDiffusion(LatentDiffusion): + # TODO: move all layout-specific hacks to this class + def __init__(self, cond_stage_key, *args, **kwargs): + assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' + super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) + + def log_images(self, batch, N=8, *args, **kwargs): + logs = super().log_images(batch=batch, N=N, *args, **kwargs) + + key = 'train' if self.training else 'validation' + dset = self.trainer.datamodule.datasets[key] + mapper = dset.conditional_builders[self.cond_stage_key] + + bbox_imgs = [] + map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno)) + for tknzd_bbox in batch[self.cond_stage_key][:N]: + bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256)) + bbox_imgs.append(bboximg) + + cond_img = torch.stack(bbox_imgs, dim=0) + logs['bbox_image'] = cond_img + return logs diff --git a/ldm/models/diffusion/dpm_solver/__init__.py b/ldm/models/diffusion/dpm_solver/__init__.py new file mode 100644 index 00000000..7427f38c --- /dev/null +++ b/ldm/models/diffusion/dpm_solver/__init__.py @@ -0,0 +1 @@ +from .sampler import DPMSolverSampler \ No newline at end of file diff --git a/ldm/models/diffusion/dpm_solver/dpm_solver.py b/ldm/models/diffusion/dpm_solver/dpm_solver.py new file mode 100644 index 00000000..bdb64e0c --- /dev/null +++ b/ldm/models/diffusion/dpm_solver/dpm_solver.py @@ -0,0 +1,1184 @@ +import torch +import torch.nn.functional as F +import math + + +class NoiseScheduleVP: + def __init__( + self, + schedule='discrete', + betas=None, + alphas_cumprod=None, + continuous_beta_0=0.1, + continuous_beta_1=20., + ): + """Create a wrapper class for the forward SDE (VP type). + + *** + Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t. + We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images. + *** + + The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ). + We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper). + Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have: + + log_alpha_t = self.marginal_log_mean_coeff(t) + sigma_t = self.marginal_std(t) + lambda_t = self.marginal_lambda(t) + + Moreover, as lambda(t) is an invertible function, we also support its inverse function: + + t = self.inverse_lambda(lambda_t) + + =============================================================== + + We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]). + + 1. For discrete-time DPMs: + + For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by: + t_i = (i + 1) / N + e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1. + We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3. + + Args: + betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details) + alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details) + + Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`. + + **Important**: Please pay special attention for the args for `alphas_cumprod`: + The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that + q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ). + Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have + alpha_{t_n} = \sqrt{\hat{alpha_n}}, + and + log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}). + + + 2. For continuous-time DPMs: + + We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise + schedule are the default settings in DDPM and improved-DDPM: + + Args: + beta_min: A `float` number. The smallest beta for the linear schedule. + beta_max: A `float` number. The largest beta for the linear schedule. + cosine_s: A `float` number. The hyperparameter in the cosine schedule. + cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule. + T: A `float` number. The ending time of the forward process. + + =============================================================== + + Args: + schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs, + 'linear' or 'cosine' for continuous-time DPMs. + Returns: + A wrapper object of the forward SDE (VP type). + + =============================================================== + + Example: + + # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1): + >>> ns = NoiseScheduleVP('discrete', betas=betas) + + # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1): + >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod) + + # For continuous-time DPMs (VPSDE), linear schedule: + >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.) + + """ + + if schedule not in ['discrete', 'linear', 'cosine']: + raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule)) + + self.schedule = schedule + if schedule == 'discrete': + if betas is not None: + log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0) + else: + assert alphas_cumprod is not None + log_alphas = 0.5 * torch.log(alphas_cumprod) + self.total_N = len(log_alphas) + self.T = 1. + self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)) + self.log_alpha_array = log_alphas.reshape((1, -1,)) + else: + self.total_N = 1000 + self.beta_0 = continuous_beta_0 + self.beta_1 = continuous_beta_1 + self.cosine_s = 0.008 + self.cosine_beta_max = 999. + self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s + self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.)) + self.schedule = schedule + if schedule == 'cosine': + # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T. + # Note that T = 0.9946 may be not the optimal setting. However, we find it works well. + self.T = 0.9946 + else: + self.T = 1. + + def marginal_log_mean_coeff(self, t): + """ + Compute log(alpha_t) of a given continuous-time label t in [0, T]. + """ + if self.schedule == 'discrete': + return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1)) + elif self.schedule == 'linear': + return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0 + elif self.schedule == 'cosine': + log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.)) + log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0 + return log_alpha_t + + def marginal_alpha(self, t): + """ + Compute alpha_t of a given continuous-time label t in [0, T]. + """ + return torch.exp(self.marginal_log_mean_coeff(t)) + + def marginal_std(self, t): + """ + Compute sigma_t of a given continuous-time label t in [0, T]. + """ + return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) + + def marginal_lambda(self, t): + """ + Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. + """ + log_mean_coeff = self.marginal_log_mean_coeff(t) + log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) + return log_mean_coeff - log_std + + def inverse_lambda(self, lamb): + """ + Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t. + """ + if self.schedule == 'linear': + tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) + Delta = self.beta_0**2 + tmp + return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0) + elif self.schedule == 'discrete': + log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb) + t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1])) + return t.reshape((-1,)) + else: + log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) + t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s + t = t_fn(log_alpha) + return t + + +def model_wrapper( + model, + noise_schedule, + model_type="noise", + model_kwargs={}, + guidance_type="uncond", + condition=None, + unconditional_condition=None, + guidance_scale=1., + classifier_fn=None, + classifier_kwargs={}, +): + """Create a wrapper function for the noise prediction model. + + DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to + firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. + + We support four types of the diffusion model by setting `model_type`: + + 1. "noise": noise prediction model. (Trained by predicting noise). + + 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). + + 3. "v": velocity prediction model. (Trained by predicting the velocity). + The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. + + [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." + arXiv preprint arXiv:2202.00512 (2022). + [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." + arXiv preprint arXiv:2210.02303 (2022). + + 4. "score": marginal score function. (Trained by denoising score matching). + Note that the score function and the noise prediction model follows a simple relationship: + ``` + noise(x_t, t) = -sigma_t * score(x_t, t) + ``` + + We support three types of guided sampling by DPMs by setting `guidance_type`: + 1. "uncond": unconditional sampling by DPMs. + The input `model` has the following format: + `` + model(x, t_input, **model_kwargs) -> noise | x_start | v | score + `` + + 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. + The input `model` has the following format: + `` + model(x, t_input, **model_kwargs) -> noise | x_start | v | score + `` + + The input `classifier_fn` has the following format: + `` + classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) + `` + + [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," + in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. + + 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. + The input `model` has the following format: + `` + model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score + `` + And if cond == `unconditional_condition`, the model output is the unconditional DPM output. + + [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." + arXiv preprint arXiv:2207.12598 (2022). + + + The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) + or continuous-time labels (i.e. epsilon to T). + + We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: + `` + def model_fn(x, t_continuous) -> noise: + t_input = get_model_input_time(t_continuous) + return noise_pred(model, x, t_input, **model_kwargs) + `` + where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. + + =============================================================== + + Args: + model: A diffusion model with the corresponding format described above. + noise_schedule: A noise schedule object, such as NoiseScheduleVP. + model_type: A `str`. The parameterization type of the diffusion model. + "noise" or "x_start" or "v" or "score". + model_kwargs: A `dict`. A dict for the other inputs of the model function. + guidance_type: A `str`. The type of the guidance for sampling. + "uncond" or "classifier" or "classifier-free". + condition: A pytorch tensor. The condition for the guided sampling. + Only used for "classifier" or "classifier-free" guidance type. + unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. + Only used for "classifier-free" guidance type. + guidance_scale: A `float`. The scale for the guided sampling. + classifier_fn: A classifier function. Only used for the classifier guidance. + classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. + Returns: + A noise prediction model that accepts the noised data and the continuous time as the inputs. + """ + + def get_model_input_time(t_continuous): + """ + Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time. + For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N]. + For continuous-time DPMs, we just use `t_continuous`. + """ + if noise_schedule.schedule == 'discrete': + return (t_continuous - 1. / noise_schedule.total_N) * 1000. + else: + return t_continuous + + def noise_pred_fn(x, t_continuous, cond=None): + if t_continuous.reshape((-1,)).shape[0] == 1: + t_continuous = t_continuous.expand((x.shape[0])) + t_input = get_model_input_time(t_continuous) + if cond is None: + output = model(x, t_input, **model_kwargs) + else: + output = model(x, t_input, cond, **model_kwargs) + if model_type == "noise": + return output + elif model_type == "x_start": + alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims) + elif model_type == "v": + alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x + elif model_type == "score": + sigma_t = noise_schedule.marginal_std(t_continuous) + dims = x.dim() + return -expand_dims(sigma_t, dims) * output + + def cond_grad_fn(x, t_input): + """ + Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t). + """ + with torch.enable_grad(): + x_in = x.detach().requires_grad_(True) + log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs) + return torch.autograd.grad(log_prob.sum(), x_in)[0] + + def model_fn(x, t_continuous): + """ + The noise predicition model function that is used for DPM-Solver. + """ + if t_continuous.reshape((-1,)).shape[0] == 1: + t_continuous = t_continuous.expand((x.shape[0])) + if guidance_type == "uncond": + return noise_pred_fn(x, t_continuous) + elif guidance_type == "classifier": + assert classifier_fn is not None + t_input = get_model_input_time(t_continuous) + cond_grad = cond_grad_fn(x, t_input) + sigma_t = noise_schedule.marginal_std(t_continuous) + noise = noise_pred_fn(x, t_continuous) + return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad + elif guidance_type == "classifier-free": + if guidance_scale == 1. or unconditional_condition is None: + return noise_pred_fn(x, t_continuous, cond=condition) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t_continuous] * 2) + c_in = torch.cat([unconditional_condition, condition]) + noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) + return noise_uncond + guidance_scale * (noise - noise_uncond) + + assert model_type in ["noise", "x_start", "v"] + assert guidance_type in ["uncond", "classifier", "classifier-free"] + return model_fn + + +class DPM_Solver: + def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.): + """Construct a DPM-Solver. + + We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0"). + If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver). + If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++). + In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True. + The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales. + + Args: + model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]): + `` + def model_fn(x, t_continuous): + return noise + `` + noise_schedule: A noise schedule object, such as NoiseScheduleVP. + predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model. + thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1]. + max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding. + + [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b. + """ + self.model = model_fn + self.noise_schedule = noise_schedule + self.predict_x0 = predict_x0 + self.thresholding = thresholding + self.max_val = max_val + + def noise_prediction_fn(self, x, t): + """ + Return the noise prediction model. + """ + return self.model(x, t) + + def data_prediction_fn(self, x, t): + """ + Return the data prediction model (with thresholding). + """ + noise = self.noise_prediction_fn(x, t) + dims = x.dim() + alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t) + x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims) + if self.thresholding: + p = 0.995 # A hyperparameter in the paper of "Imagen" [1]. + s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1) + s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims) + x0 = torch.clamp(x0, -s, s) / s + return x0 + + def model_fn(self, x, t): + """ + Convert the model to the noise prediction model or the data prediction model. + """ + if self.predict_x0: + return self.data_prediction_fn(x, t) + else: + return self.noise_prediction_fn(x, t) + + def get_time_steps(self, skip_type, t_T, t_0, N, device): + """Compute the intermediate time steps for sampling. + + Args: + skip_type: A `str`. The type for the spacing of the time steps. We support three types: + - 'logSNR': uniform logSNR for the time steps. + - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) + - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + N: A `int`. The total number of the spacing of the time steps. + device: A torch device. + Returns: + A pytorch tensor of the time steps, with the shape (N + 1,). + """ + if skip_type == 'logSNR': + lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device)) + lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device)) + logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device) + return self.noise_schedule.inverse_lambda(logSNR_steps) + elif skip_type == 'time_uniform': + return torch.linspace(t_T, t_0, N + 1).to(device) + elif skip_type == 'time_quadratic': + t_order = 2 + t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device) + return t + else: + raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type)) + + def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): + """ + Get the order of each step for sampling by the singlestep DPM-Solver. + + We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast". + Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is: + - If order == 1: + We take `steps` of DPM-Solver-1 (i.e. DDIM). + - If order == 2: + - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling. + - If steps % 2 == 0, we use K steps of DPM-Solver-2. + - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1. + - If order == 3: + - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. + - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1. + - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1. + - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2. + + ============================================ + Args: + order: A `int`. The max order for the solver (2 or 3). + steps: A `int`. The total number of function evaluations (NFE). + skip_type: A `str`. The type for the spacing of the time steps. We support three types: + - 'logSNR': uniform logSNR for the time steps. + - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) + - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + device: A torch device. + Returns: + orders: A list of the solver order of each step. + """ + if order == 3: + K = steps // 3 + 1 + if steps % 3 == 0: + orders = [3,] * (K - 2) + [2, 1] + elif steps % 3 == 1: + orders = [3,] * (K - 1) + [1] + else: + orders = [3,] * (K - 1) + [2] + elif order == 2: + if steps % 2 == 0: + K = steps // 2 + orders = [2,] * K + else: + K = steps // 2 + 1 + orders = [2,] * (K - 1) + [1] + elif order == 1: + K = 1 + orders = [1,] * steps + else: + raise ValueError("'order' must be '1' or '2' or '3'.") + if skip_type == 'logSNR': + # To reproduce the results in DPM-Solver paper + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) + else: + timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders)).to(device)] + return timesteps_outer, orders + + def denoise_to_zero_fn(self, x, s): + """ + Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization. + """ + return self.data_prediction_fn(x, s) + + def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False): + """ + DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s`. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + if self.predict_x0: + phi_1 = torch.expm1(-h) + if model_s is None: + model_s = self.model_fn(x, s) + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + ) + if return_intermediate: + return x_t, {'model_s': model_s} + else: + return x_t + else: + phi_1 = torch.expm1(h) + if model_s is None: + model_s = self.model_fn(x, s) + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + ) + if return_intermediate: + return x_t, {'model_s': model_s} + else: + return x_t + + def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type='dpm_solver'): + """ + Singlestep solver DPM-Solver-2 from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + r1: A `float`. The hyperparameter of the second-order solver. + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + if r1 is None: + r1 = 0.5 + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + lambda_s1 = lambda_s + r1 * h + s1 = ns.inverse_lambda(lambda_s1) + log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t) + alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t) + + if self.predict_x0: + phi_11 = torch.expm1(-r1 * h) + phi_1 = torch.expm1(-h) + + if model_s is None: + model_s = self.model_fn(x, s) + x_s1 = ( + expand_dims(sigma_s1 / sigma_s, dims) * x + - expand_dims(alpha_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s) + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (model_s1 - model_s) + ) + else: + phi_11 = torch.expm1(r1 * h) + phi_1 = torch.expm1(h) + + if model_s is None: + model_s = self.model_fn(x, s) + x_s1 = ( + expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x + - expand_dims(sigma_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s) + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s) + ) + if return_intermediate: + return x_t, {'model_s': model_s, 'model_s1': model_s1} + else: + return x_t + + def singlestep_dpm_solver_third_update(self, x, s, t, r1=1./3., r2=2./3., model_s=None, model_s1=None, return_intermediate=False, solver_type='dpm_solver'): + """ + Singlestep solver DPM-Solver-3 from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + r1: A `float`. The hyperparameter of the third-order solver. + r2: A `float`. The hyperparameter of the third-order solver. + model_s: A pytorch tensor. The model function evaluated at time `s`. + If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. + model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`). + If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it. + return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + if r1 is None: + r1 = 1. / 3. + if r2 is None: + r2 = 2. / 3. + ns = self.noise_schedule + dims = x.dim() + lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) + h = lambda_t - lambda_s + lambda_s1 = lambda_s + r1 * h + lambda_s2 = lambda_s + r2 * h + s1 = ns.inverse_lambda(lambda_s1) + s2 = ns.inverse_lambda(lambda_s2) + log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t) + sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(s2), ns.marginal_std(t) + alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t) + + if self.predict_x0: + phi_11 = torch.expm1(-r1 * h) + phi_12 = torch.expm1(-r2 * h) + phi_1 = torch.expm1(-h) + phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1. + phi_2 = phi_1 / h + 1. + phi_3 = phi_2 / h - 0.5 + + if model_s is None: + model_s = self.model_fn(x, s) + if model_s1 is None: + x_s1 = ( + expand_dims(sigma_s1 / sigma_s, dims) * x + - expand_dims(alpha_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + x_s2 = ( + expand_dims(sigma_s2 / sigma_s, dims) * x + - expand_dims(alpha_s2 * phi_12, dims) * model_s + + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s) + ) + model_s2 = self.model_fn(x_s2, s2) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s) + ) + elif solver_type == 'taylor': + D1_0 = (1. / r1) * (model_s1 - model_s) + D1_1 = (1. / r2) * (model_s2 - model_s) + D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) + D2 = 2. * (D1_1 - D1_0) / (r2 - r1) + x_t = ( + expand_dims(sigma_t / sigma_s, dims) * x + - expand_dims(alpha_t * phi_1, dims) * model_s + + expand_dims(alpha_t * phi_2, dims) * D1 + - expand_dims(alpha_t * phi_3, dims) * D2 + ) + else: + phi_11 = torch.expm1(r1 * h) + phi_12 = torch.expm1(r2 * h) + phi_1 = torch.expm1(h) + phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1. + phi_2 = phi_1 / h - 1. + phi_3 = phi_2 / h - 0.5 + + if model_s is None: + model_s = self.model_fn(x, s) + if model_s1 is None: + x_s1 = ( + expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x + - expand_dims(sigma_s1 * phi_11, dims) * model_s + ) + model_s1 = self.model_fn(x_s1, s1) + x_s2 = ( + expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x + - expand_dims(sigma_s2 * phi_12, dims) * model_s + - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s) + ) + model_s2 = self.model_fn(x_s2, s2) + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s) + ) + elif solver_type == 'taylor': + D1_0 = (1. / r1) * (model_s1 - model_s) + D1_1 = (1. / r2) * (model_s2 - model_s) + D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) + D2 = 2. * (D1_1 - D1_0) / (r2 - r1) + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x + - expand_dims(sigma_t * phi_1, dims) * model_s + - expand_dims(sigma_t * phi_2, dims) * D1 + - expand_dims(sigma_t * phi_3, dims) * D2 + ) + + if return_intermediate: + return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2} + else: + return x_t + + def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"): + """ + Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if solver_type not in ['dpm_solver', 'taylor']: + raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) + ns = self.noise_schedule + dims = x.dim() + model_prev_1, model_prev_0 = model_prev_list + t_prev_1, t_prev_0 = t_prev_list + lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) + log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) + sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + h_0 = lambda_prev_0 - lambda_prev_1 + h = lambda_t - lambda_prev_0 + r0 = h_0 / h + D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) + if self.predict_x0: + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 + ) + else: + if solver_type == 'dpm_solver': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 + ) + elif solver_type == 'taylor': + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 + ) + return x_t + + def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'): + """ + Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + ns = self.noise_schedule + dims = x.dim() + model_prev_2, model_prev_1, model_prev_0 = model_prev_list + t_prev_2, t_prev_1, t_prev_0 = t_prev_list + lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) + log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) + sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) + alpha_t = torch.exp(log_alpha_t) + + h_1 = lambda_prev_1 - lambda_prev_2 + h_0 = lambda_prev_0 - lambda_prev_1 + h = lambda_t - lambda_prev_0 + r0, r1 = h_0 / h, h_1 / h + D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) + D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2) + D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1) + D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1) + if self.predict_x0: + x_t = ( + expand_dims(sigma_t / sigma_prev_0, dims) * x + - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 + + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1 + - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h**2 - 0.5), dims) * D2 + ) + else: + x_t = ( + expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x + - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 + - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1 + - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h**2 - 0.5), dims) * D2 + ) + return x_t + + def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, r2=None): + """ + Singlestep DPM-Solver with the order `order` from time `s` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + s: A pytorch tensor. The starting time, with the shape (x.shape[0],). + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. + return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + r1: A `float`. The hyperparameter of the second-order or third-order solver. + r2: A `float`. The hyperparameter of the third-order solver. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if order == 1: + return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate) + elif order == 2: + return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1) + elif order == 3: + return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2) + else: + raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) + + def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'): + """ + Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`. + + Args: + x: A pytorch tensor. The initial value at time `s`. + model_prev_list: A list of pytorch tensor. The previous computed model values. + t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) + t: A pytorch tensor. The ending time, with the shape (x.shape[0],). + order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_t: A pytorch tensor. The approximated solution at time `t`. + """ + if order == 1: + return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1]) + elif order == 2: + return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) + elif order == 3: + return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) + else: + raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) + + def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, solver_type='dpm_solver'): + """ + The adaptive step size solver based on singlestep DPM-Solver. + + Args: + x: A pytorch tensor. The initial value at time `t_T`. + order: A `int`. The (higher) order of the solver. We only support order == 2 or 3. + t_T: A `float`. The starting time of the sampling (default is T). + t_0: A `float`. The ending time of the sampling (default is epsilon). + h_init: A `float`. The initial step size (for logSNR). + atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1]. + rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05. + theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1]. + t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the + current time and `t_0` is less than `t_err`. The default setting is 1e-5. + solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. + The type slightly impacts the performance. We recommend to use 'dpm_solver' type. + Returns: + x_0: A pytorch tensor. The approximated solution at time `t_0`. + + [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021. + """ + ns = self.noise_schedule + s = t_T * torch.ones((x.shape[0],)).to(x) + lambda_s = ns.marginal_lambda(s) + lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x)) + h = h_init * torch.ones_like(s).to(x) + x_prev = x + nfe = 0 + if order == 2: + r1 = 0.5 + lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True) + higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, solver_type=solver_type, **kwargs) + elif order == 3: + r1, r2 = 1. / 3., 2. / 3. + lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type) + higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs) + else: + raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order)) + while torch.abs((s - t_0)).mean() > t_err: + t = ns.inverse_lambda(lambda_s + h) + x_lower, lower_noise_kwargs = lower_update(x, s, t) + x_higher = higher_update(x, s, t, **lower_noise_kwargs) + delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev))) + norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True)) + E = norm_fn((x_higher - x_lower) / delta).max() + if torch.all(E <= 1.): + x = x_higher + s = t + x_prev = x_lower + lambda_s = ns.marginal_lambda(s) + h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s) + nfe += order + print('adaptive solver nfe', nfe) + return x + + def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform', + method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', + atol=0.0078, rtol=0.05, + ): + """ + Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`. + + ===================================================== + + We support the following algorithms for both noise prediction model and data prediction model: + - 'singlestep': + Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver. + We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps). + The total number of function evaluations (NFE) == `steps`. + Given a fixed NFE == `steps`, the sampling procedure is: + - If `order` == 1: + - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM). + - If `order` == 2: + - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling. + - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2. + - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. + - If `order` == 3: + - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. + - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. + - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1. + - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2. + - 'multistep': + Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`. + We initialize the first `order` values by lower order multistep solvers. + Given a fixed NFE == `steps`, the sampling procedure is: + Denote K = steps. + - If `order` == 1: + - We use K steps of DPM-Solver-1 (i.e. DDIM). + - If `order` == 2: + - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2. + - If `order` == 3: + - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3. + - 'singlestep_fixed': + Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3). + We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE. + - 'adaptive': + Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper). + We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`. + You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs + (NFE) and the sample quality. + - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2. + - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3. + + ===================================================== + + Some advices for choosing the algorithm: + - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs: + Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`. + e.g. + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False) + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3, + skip_type='time_uniform', method='singlestep') + - For **guided sampling with large guidance scale** by DPMs: + Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`. + e.g. + >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True) + >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2, + skip_type='time_uniform', method='multistep') + + We support three types of `skip_type`: + - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images** + - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**. + - 'time_quadratic': quadratic time for the time steps. + + ===================================================== + Args: + x: A pytorch tensor. The initial value at time `t_start` + e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution. + steps: A `int`. The total number of function evaluations (NFE). + t_start: A `float`. The starting time of the sampling. + If `T` is None, we use self.noise_schedule.T (default is 1.0). + t_end: A `float`. The ending time of the sampling. + If `t_end` is None, we use 1. / self.noise_schedule.total_N. + e.g. if total_N == 1000, we have `t_end` == 1e-3. + For discrete-time DPMs: + - We recommend `t_end` == 1. / self.noise_schedule.total_N. + For continuous-time DPMs: + - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15. + order: A `int`. The order of DPM-Solver. + skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'. + method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'. + denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step. + Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1). + + This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and + score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID + for diffusion models sampling by diffusion SDEs for low-resolutional images + (such as CIFAR-10). However, we observed that such trick does not matter for + high-resolutional images. As it needs an additional NFE, we do not recommend + it for high-resolutional images. + lower_order_final: A `bool`. Whether to use lower order solvers at the final steps. + Only valid for `method=multistep` and `steps < 15`. We empirically find that + this trick is a key to stabilizing the sampling by DPM-Solver with very few steps + (especially for steps <= 10). So we recommend to set it to be `True`. + solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`. + atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. + rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. + Returns: + x_end: A pytorch tensor. The approximated solution at time `t_end`. + + """ + t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end + t_T = self.noise_schedule.T if t_start is None else t_start + device = x.device + if method == 'adaptive': + with torch.no_grad(): + x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type) + elif method == 'multistep': + assert steps >= order + timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) + assert timesteps.shape[0] - 1 == steps + with torch.no_grad(): + vec_t = timesteps[0].expand((x.shape[0])) + model_prev_list = [self.model_fn(x, vec_t)] + t_prev_list = [vec_t] + # Init the first `order` values by lower order multistep DPM-Solver. + for init_order in range(1, order): + vec_t = timesteps[init_order].expand(x.shape[0]) + x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, solver_type=solver_type) + model_prev_list.append(self.model_fn(x, vec_t)) + t_prev_list.append(vec_t) + # Compute the remaining values by `order`-th order multistep DPM-Solver. + for step in range(order, steps + 1): + vec_t = timesteps[step].expand(x.shape[0]) + if lower_order_final and steps < 15: + step_order = min(order, steps + 1 - step) + else: + step_order = order + x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, solver_type=solver_type) + for i in range(order - 1): + t_prev_list[i] = t_prev_list[i + 1] + model_prev_list[i] = model_prev_list[i + 1] + t_prev_list[-1] = vec_t + # We do not need to evaluate the final model value. + if step < steps: + model_prev_list[-1] = self.model_fn(x, vec_t) + elif method in ['singlestep', 'singlestep_fixed']: + if method == 'singlestep': + timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device) + elif method == 'singlestep_fixed': + K = steps // order + orders = [order,] * K + timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device) + for i, order in enumerate(orders): + t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1] + timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), N=order, device=device) + lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner) + vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0]) + h = lambda_inner[-1] - lambda_inner[0] + r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h + r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h + x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2) + if denoise_to_zero: + x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0) + return x + + + +############################################################# +# other utility functions +############################################################# + +def interpolate_fn(x, xp, yp): + """ + A piecewise linear function y = f(x), using xp and yp as keypoints. + We implement f(x) in a differentiable way (i.e. applicable for autograd). + The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) + + Args: + x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). + xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. + yp: PyTorch tensor with shape [C, K]. + Returns: + The function values f(x), with shape [N, C]. + """ + N, K = x.shape[0], xp.shape[1] + all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2) + sorted_all_x, x_indices = torch.sort(all_x, dim=2) + x_idx = torch.argmin(x_indices, dim=2) + cand_start_idx = x_idx - 1 + start_idx = torch.where( + torch.eq(x_idx, 0), + torch.tensor(1, device=x.device), + torch.where( + torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, + ), + ) + end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1) + start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2) + end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2) + start_idx2 = torch.where( + torch.eq(x_idx, 0), + torch.tensor(0, device=x.device), + torch.where( + torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, + ), + ) + y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1) + start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2) + end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2) + cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x) + return cand + + +def expand_dims(v, dims): + """ + Expand the tensor `v` to the dim `dims`. + + Args: + `v`: a PyTorch tensor with shape [N]. + `dim`: a `int`. + Returns: + a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. + """ + return v[(...,) + (None,)*(dims - 1)] \ No newline at end of file diff --git a/ldm/models/diffusion/dpm_solver/sampler.py b/ldm/models/diffusion/dpm_solver/sampler.py new file mode 100644 index 00000000..2c42d6f9 --- /dev/null +++ b/ldm/models/diffusion/dpm_solver/sampler.py @@ -0,0 +1,82 @@ +"""SAMPLING ONLY.""" + +import torch + +from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver + + +class DPMSolverSampler(object): + def __init__(self, model, **kwargs): + super().__init__() + self.model = model + to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) + self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + + # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') + + device = self.model.betas.device + if x_T is None: + img = torch.randn(size, device=device) + else: + img = x_T + + ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) + + model_fn = model_wrapper( + lambda x, t, c: self.model.apply_model(x, t, c), + ns, + model_type="noise", + guidance_type="classifier-free", + condition=conditioning, + unconditional_condition=unconditional_conditioning, + guidance_scale=unconditional_guidance_scale, + ) + + dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False) + x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True) + + return x.to(device), None diff --git a/ldm/models/diffusion/plms.py b/ldm/models/diffusion/plms.py new file mode 100644 index 00000000..78eeb100 --- /dev/null +++ b/ldm/models/diffusion/plms.py @@ -0,0 +1,236 @@ +"""SAMPLING ONLY.""" + +import torch +import numpy as np +from tqdm import tqdm +from functools import partial + +from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like + + +class PLMSSampler(object): + def __init__(self, model, schedule="linear", **kwargs): + super().__init__() + self.model = model + self.ddpm_num_timesteps = model.num_timesteps + self.schedule = schedule + + def register_buffer(self, name, attr): + if type(attr) == torch.Tensor: + if attr.device != torch.device("cuda"): + attr = attr.to(torch.device("cuda")) + setattr(self, name, attr) + + def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): + if ddim_eta != 0: + raise ValueError('ddim_eta must be 0 for PLMS') + self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, + num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) + alphas_cumprod = self.model.alphas_cumprod + assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' + to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) + + self.register_buffer('betas', to_torch(self.model.betas)) + self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) + self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) + + # calculations for diffusion q(x_t | x_{t-1}) and others + self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) + self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) + self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) + self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) + + # ddim sampling parameters + ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), + ddim_timesteps=self.ddim_timesteps, + eta=ddim_eta,verbose=verbose) + self.register_buffer('ddim_sigmas', ddim_sigmas) + self.register_buffer('ddim_alphas', ddim_alphas) + self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) + self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) + sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( + (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( + 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) + self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) + + @torch.no_grad() + def sample(self, + S, + batch_size, + shape, + conditioning=None, + callback=None, + normals_sequence=None, + img_callback=None, + quantize_x0=False, + eta=0., + mask=None, + x0=None, + temperature=1., + noise_dropout=0., + score_corrector=None, + corrector_kwargs=None, + verbose=True, + x_T=None, + log_every_t=100, + unconditional_guidance_scale=1., + unconditional_conditioning=None, + # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... + **kwargs + ): + if conditioning is not None: + if isinstance(conditioning, dict): + cbs = conditioning[list(conditioning.keys())[0]].shape[0] + if cbs != batch_size: + print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") + else: + if conditioning.shape[0] != batch_size: + print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") + + self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) + # sampling + C, H, W = shape + size = (batch_size, C, H, W) + print(f'Data shape for PLMS sampling is {size}') + + samples, intermediates = self.plms_sampling(conditioning, size, + callback=callback, + img_callback=img_callback, + quantize_denoised=quantize_x0, + mask=mask, x0=x0, + ddim_use_original_steps=False, + noise_dropout=noise_dropout, + temperature=temperature, + score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + x_T=x_T, + log_every_t=log_every_t, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + ) + return samples, intermediates + + @torch.no_grad() + def plms_sampling(self, cond, shape, + x_T=None, ddim_use_original_steps=False, + callback=None, timesteps=None, quantize_denoised=False, + mask=None, x0=None, img_callback=None, log_every_t=100, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None,): + device = self.model.betas.device + b = shape[0] + if x_T is None: + img = torch.randn(shape, device=device) + else: + img = x_T + + if timesteps is None: + timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps + elif timesteps is not None and not ddim_use_original_steps: + subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 + timesteps = self.ddim_timesteps[:subset_end] + + intermediates = {'x_inter': [img], 'pred_x0': [img]} + time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) + total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] + print(f"Running PLMS Sampling with {total_steps} timesteps") + + iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) + old_eps = [] + + for i, step in enumerate(iterator): + index = total_steps - i - 1 + ts = torch.full((b,), step, device=device, dtype=torch.long) + ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) + + if mask is not None: + assert x0 is not None + img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? + img = img_orig * mask + (1. - mask) * img + + outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, + quantize_denoised=quantize_denoised, temperature=temperature, + noise_dropout=noise_dropout, score_corrector=score_corrector, + corrector_kwargs=corrector_kwargs, + unconditional_guidance_scale=unconditional_guidance_scale, + unconditional_conditioning=unconditional_conditioning, + old_eps=old_eps, t_next=ts_next) + img, pred_x0, e_t = outs + old_eps.append(e_t) + if len(old_eps) >= 4: + old_eps.pop(0) + if callback: callback(i) + if img_callback: img_callback(pred_x0, i) + + if index % log_every_t == 0 or index == total_steps - 1: + intermediates['x_inter'].append(img) + intermediates['pred_x0'].append(pred_x0) + + return img, intermediates + + @torch.no_grad() + def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, + temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, + unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None): + b, *_, device = *x.shape, x.device + + def get_model_output(x, t): + if unconditional_conditioning is None or unconditional_guidance_scale == 1.: + e_t = self.model.apply_model(x, t, c) + else: + x_in = torch.cat([x] * 2) + t_in = torch.cat([t] * 2) + c_in = torch.cat([unconditional_conditioning, c]) + e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) + e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) + + if score_corrector is not None: + assert self.model.parameterization == "eps" + e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) + + return e_t + + alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas + alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev + sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas + sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas + + def get_x_prev_and_pred_x0(e_t, index): + # select parameters corresponding to the currently considered timestep + a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) + a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) + sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) + sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) + + # current prediction for x_0 + pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() + if quantize_denoised: + pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) + # direction pointing to x_t + dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t + noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature + if noise_dropout > 0.: + noise = torch.nn.functional.dropout(noise, p=noise_dropout) + x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise + return x_prev, pred_x0 + + e_t = get_model_output(x, t) + if len(old_eps) == 0: + # Pseudo Improved Euler (2nd order) + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) + e_t_next = get_model_output(x_prev, t_next) + e_t_prime = (e_t + e_t_next) / 2 + elif len(old_eps) == 1: + # 2nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (3 * e_t - old_eps[-1]) / 2 + elif len(old_eps) == 2: + # 3nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 + elif len(old_eps) >= 3: + # 4nd order Pseudo Linear Multistep (Adams-Bashforth) + e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 + + x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) + + return x_prev, pred_x0, e_t diff --git a/ldm/modules/attention.py b/ldm/modules/attention.py new file mode 100644 index 00000000..f4eff39c --- /dev/null +++ b/ldm/modules/attention.py @@ -0,0 +1,261 @@ +from inspect import isfunction +import math +import torch +import torch.nn.functional as F +from torch import nn, einsum +from einops import rearrange, repeat + +from ldm.modules.diffusionmodules.util import checkpoint + + +def exists(val): + return val is not None + + +def uniq(arr): + return{el: True for el in arr}.keys() + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def max_neg_value(t): + return -torch.finfo(t.dtype).max + + +def init_(tensor): + dim = tensor.shape[-1] + std = 1 / math.sqrt(dim) + tensor.uniform_(-std, std) + return tensor + + +# feedforward +class GEGLU(nn.Module): + def __init__(self, dim_in, dim_out): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out * 2) + + def forward(self, x): + x, gate = self.proj(x).chunk(2, dim=-1) + return x * F.gelu(gate) + + +class FeedForward(nn.Module): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): + super().__init__() + inner_dim = int(dim * mult) + dim_out = default(dim_out, dim) + project_in = nn.Sequential( + nn.Linear(dim, inner_dim), + nn.GELU() + ) if not glu else GEGLU(dim, inner_dim) + + self.net = nn.Sequential( + project_in, + nn.Dropout(dropout), + nn.Linear(inner_dim, dim_out) + ) + + def forward(self, x): + return self.net(x) + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def Normalize(in_channels): + return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) + + +class LinearAttention(nn.Module): + def __init__(self, dim, heads=4, dim_head=32): + super().__init__() + self.heads = heads + hidden_dim = dim_head * heads + self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) + self.to_out = nn.Conv2d(hidden_dim, dim, 1) + + def forward(self, x): + b, c, h, w = x.shape + qkv = self.to_qkv(x) + q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) + k = k.softmax(dim=-1) + context = torch.einsum('bhdn,bhen->bhde', k, v) + out = torch.einsum('bhde,bhdn->bhen', context, q) + out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) + return self.to_out(out) + + +class SpatialSelfAttention(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.k = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.v = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.proj_out = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b,c,h,w = q.shape + q = rearrange(q, 'b c h w -> b (h w) c') + k = rearrange(k, 'b c h w -> b c (h w)') + w_ = torch.einsum('bij,bjk->bik', q, k) + + w_ = w_ * (int(c)**(-0.5)) + w_ = torch.nn.functional.softmax(w_, dim=2) + + # attend to values + v = rearrange(v, 'b c h w -> b c (h w)') + w_ = rearrange(w_, 'b i j -> b j i') + h_ = torch.einsum('bij,bjk->bik', v, w_) + h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) + h_ = self.proj_out(h_) + + return x+h_ + + +class CrossAttention(nn.Module): + def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.): + super().__init__() + inner_dim = dim_head * heads + context_dim = default(context_dim, query_dim) + + self.scale = dim_head ** -0.5 + self.heads = heads + + self.to_q = nn.Linear(query_dim, inner_dim, bias=False) + self.to_k = nn.Linear(context_dim, inner_dim, bias=False) + self.to_v = nn.Linear(context_dim, inner_dim, bias=False) + + self.to_out = nn.Sequential( + nn.Linear(inner_dim, query_dim), + nn.Dropout(dropout) + ) + + def forward(self, x, context=None, mask=None): + h = self.heads + + q = self.to_q(x) + context = default(context, x) + k = self.to_k(context) + v = self.to_v(context) + + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + + sim = einsum('b i d, b j d -> b i j', q, k) * self.scale + + if exists(mask): + mask = rearrange(mask, 'b ... -> b (...)') + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, 'b j -> (b h) () j', h=h) + sim.masked_fill_(~mask, max_neg_value) + + # attention, what we cannot get enough of + attn = sim.softmax(dim=-1) + + out = einsum('b i j, b j d -> b i d', attn, v) + out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + return self.to_out(out) + + +class BasicTransformerBlock(nn.Module): + def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True): + super().__init__() + self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is a self-attention + self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) + self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, + heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none + self.norm1 = nn.LayerNorm(dim) + self.norm2 = nn.LayerNorm(dim) + self.norm3 = nn.LayerNorm(dim) + self.checkpoint = checkpoint + + def forward(self, x, context=None): + return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) + + def _forward(self, x, context=None): + x = self.attn1(self.norm1(x)) + x + x = self.attn2(self.norm2(x), context=context) + x + x = self.ff(self.norm3(x)) + x + return x + + +class SpatialTransformer(nn.Module): + """ + Transformer block for image-like data. + First, project the input (aka embedding) + and reshape to b, t, d. + Then apply standard transformer action. + Finally, reshape to image + """ + def __init__(self, in_channels, n_heads, d_head, + depth=1, dropout=0., context_dim=None): + super().__init__() + self.in_channels = in_channels + inner_dim = n_heads * d_head + self.norm = Normalize(in_channels) + + self.proj_in = nn.Conv2d(in_channels, + inner_dim, + kernel_size=1, + stride=1, + padding=0) + + self.transformer_blocks = nn.ModuleList( + [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) + for d in range(depth)] + ) + + self.proj_out = zero_module(nn.Conv2d(inner_dim, + in_channels, + kernel_size=1, + stride=1, + padding=0)) + + def forward(self, x, context=None): + # note: if no context is given, cross-attention defaults to self-attention + b, c, h, w = x.shape + x_in = x + x = self.norm(x) + x = self.proj_in(x) + x = rearrange(x, 'b c h w -> b (h w) c') + for block in self.transformer_blocks: + x = block(x, context=context) + x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w) + x = self.proj_out(x) + return x + x_in \ No newline at end of file diff --git a/ldm/modules/diffusionmodules/__init__.py b/ldm/modules/diffusionmodules/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldm/modules/diffusionmodules/model.py b/ldm/modules/diffusionmodules/model.py new file mode 100644 index 00000000..533e589a --- /dev/null +++ b/ldm/modules/diffusionmodules/model.py @@ -0,0 +1,835 @@ +# pytorch_diffusion + derived encoder decoder +import math +import torch +import torch.nn as nn +import numpy as np +from einops import rearrange + +from ldm.util import instantiate_from_config +from ldm.modules.attention import LinearAttention + + +def get_timestep_embedding(timesteps, embedding_dim): + """ + This matches the implementation in Denoising Diffusion Probabilistic Models: + From Fairseq. + Build sinusoidal embeddings. + This matches the implementation in tensor2tensor, but differs slightly + from the description in Section 3.5 of "Attention Is All You Need". + """ + assert len(timesteps.shape) == 1 + + half_dim = embedding_dim // 2 + emb = math.log(10000) / (half_dim - 1) + emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) + emb = emb.to(device=timesteps.device) + emb = timesteps.float()[:, None] * emb[None, :] + emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) + if embedding_dim % 2 == 1: # zero pad + emb = torch.nn.functional.pad(emb, (0,1,0,0)) + return emb + + +def nonlinearity(x): + # swish + return x*torch.sigmoid(x) + + +def Normalize(in_channels, num_groups=32): + return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) + + +class Upsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") + if self.with_conv: + x = self.conv(x) + return x + + +class Downsample(nn.Module): + def __init__(self, in_channels, with_conv): + super().__init__() + self.with_conv = with_conv + if self.with_conv: + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=3, + stride=2, + padding=0) + + def forward(self, x): + if self.with_conv: + pad = (0,1,0,1) + x = torch.nn.functional.pad(x, pad, mode="constant", value=0) + x = self.conv(x) + else: + x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) + return x + + +class ResnetBlock(nn.Module): + def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, + dropout, temb_channels=512): + super().__init__() + self.in_channels = in_channels + out_channels = in_channels if out_channels is None else out_channels + self.out_channels = out_channels + self.use_conv_shortcut = conv_shortcut + + self.norm1 = Normalize(in_channels) + self.conv1 = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + if temb_channels > 0: + self.temb_proj = torch.nn.Linear(temb_channels, + out_channels) + self.norm2 = Normalize(out_channels) + self.dropout = torch.nn.Dropout(dropout) + self.conv2 = torch.nn.Conv2d(out_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + self.conv_shortcut = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + else: + self.nin_shortcut = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=1, + stride=1, + padding=0) + + def forward(self, x, temb): + h = x + h = self.norm1(h) + h = nonlinearity(h) + h = self.conv1(h) + + if temb is not None: + h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] + + h = self.norm2(h) + h = nonlinearity(h) + h = self.dropout(h) + h = self.conv2(h) + + if self.in_channels != self.out_channels: + if self.use_conv_shortcut: + x = self.conv_shortcut(x) + else: + x = self.nin_shortcut(x) + + return x+h + + +class LinAttnBlock(LinearAttention): + """to match AttnBlock usage""" + def __init__(self, in_channels): + super().__init__(dim=in_channels, heads=1, dim_head=in_channels) + + +class AttnBlock(nn.Module): + def __init__(self, in_channels): + super().__init__() + self.in_channels = in_channels + + self.norm = Normalize(in_channels) + self.q = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.k = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.v = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + self.proj_out = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=1, + stride=1, + padding=0) + + + def forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + + # compute attention + b,c,h,w = q.shape + q = q.reshape(b,c,h*w) + q = q.permute(0,2,1) # b,hw,c + k = k.reshape(b,c,h*w) # b,c,hw + w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] + w_ = w_ * (int(c)**(-0.5)) + w_ = torch.nn.functional.softmax(w_, dim=2) + + # attend to values + v = v.reshape(b,c,h*w) + w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q) + h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] + h_ = h_.reshape(b,c,h,w) + + h_ = self.proj_out(h_) + + return x+h_ + + +def make_attn(in_channels, attn_type="vanilla"): + assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown' + print(f"making attention of type '{attn_type}' with {in_channels} in_channels") + if attn_type == "vanilla": + return AttnBlock(in_channels) + elif attn_type == "none": + return nn.Identity(in_channels) + else: + return LinAttnBlock(in_channels) + + +class Model(nn.Module): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"): + super().__init__() + if use_linear_attn: attn_type = "linear" + self.ch = ch + self.temb_ch = self.ch*4 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + + self.use_timestep = use_timestep + if self.use_timestep: + # timestep embedding + self.temb = nn.Module() + self.temb.dense = nn.ModuleList([ + torch.nn.Linear(self.ch, + self.temb_ch), + torch.nn.Linear(self.temb_ch, + self.temb_ch), + ]) + + # downsampling + self.conv_in = torch.nn.Conv2d(in_channels, + self.ch, + kernel_size=3, + stride=1, + padding=1) + + curr_res = resolution + in_ch_mult = (1,)+tuple(ch_mult) + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch*in_ch_mult[i_level] + block_out = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions-1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch*ch_mult[i_level] + skip_in = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks+1): + if i_block == self.num_res_blocks: + skip_in = ch*in_ch_mult[i_level] + block.append(ResnetBlock(in_channels=block_in+skip_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + out_ch, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x, t=None, context=None): + #assert x.shape[2] == x.shape[3] == self.resolution + if context is not None: + # assume aligned context, cat along channel axis + x = torch.cat((x, context), dim=1) + if self.use_timestep: + # timestep embedding + assert t is not None + temb = get_timestep_embedding(t, self.ch) + temb = self.temb.dense[0](temb) + temb = nonlinearity(temb) + temb = self.temb.dense[1](temb) + else: + temb = None + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1], temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions-1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks+1): + h = self.up[i_level].block[i_block]( + torch.cat([h, hs.pop()], dim=1), temb) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + def get_last_layer(self): + return self.conv_out.weight + + +class Encoder(nn.Module): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla", + **ignore_kwargs): + super().__init__() + if use_linear_attn: attn_type = "linear" + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + + # downsampling + self.conv_in = torch.nn.Conv2d(in_channels, + self.ch, + kernel_size=3, + stride=1, + padding=1) + + curr_res = resolution + in_ch_mult = (1,)+tuple(ch_mult) + self.in_ch_mult = in_ch_mult + self.down = nn.ModuleList() + for i_level in range(self.num_resolutions): + block = nn.ModuleList() + attn = nn.ModuleList() + block_in = ch*in_ch_mult[i_level] + block_out = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks): + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + down = nn.Module() + down.block = block + down.attn = attn + if i_level != self.num_resolutions-1: + down.downsample = Downsample(block_in, resamp_with_conv) + curr_res = curr_res // 2 + self.down.append(down) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + 2*z_channels if double_z else z_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + # timestep embedding + temb = None + + # downsampling + hs = [self.conv_in(x)] + for i_level in range(self.num_resolutions): + for i_block in range(self.num_res_blocks): + h = self.down[i_level].block[i_block](hs[-1], temb) + if len(self.down[i_level].attn) > 0: + h = self.down[i_level].attn[i_block](h) + hs.append(h) + if i_level != self.num_resolutions-1: + hs.append(self.down[i_level].downsample(hs[-1])) + + # middle + h = hs[-1] + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # end + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class Decoder(nn.Module): + def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, + resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, + attn_type="vanilla", **ignorekwargs): + super().__init__() + if use_linear_attn: attn_type = "linear" + self.ch = ch + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + self.resolution = resolution + self.in_channels = in_channels + self.give_pre_end = give_pre_end + self.tanh_out = tanh_out + + # compute in_ch_mult, block_in and curr_res at lowest res + in_ch_mult = (1,)+tuple(ch_mult) + block_in = ch*ch_mult[self.num_resolutions-1] + curr_res = resolution // 2**(self.num_resolutions-1) + self.z_shape = (1,z_channels,curr_res,curr_res) + print("Working with z of shape {} = {} dimensions.".format( + self.z_shape, np.prod(self.z_shape))) + + # z to block_in + self.conv_in = torch.nn.Conv2d(z_channels, + block_in, + kernel_size=3, + stride=1, + padding=1) + + # middle + self.mid = nn.Module() + self.mid.block_1 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) + self.mid.block_2 = ResnetBlock(in_channels=block_in, + out_channels=block_in, + temb_channels=self.temb_ch, + dropout=dropout) + + # upsampling + self.up = nn.ModuleList() + for i_level in reversed(range(self.num_resolutions)): + block = nn.ModuleList() + attn = nn.ModuleList() + block_out = ch*ch_mult[i_level] + for i_block in range(self.num_res_blocks+1): + block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + if curr_res in attn_resolutions: + attn.append(make_attn(block_in, attn_type=attn_type)) + up = nn.Module() + up.block = block + up.attn = attn + if i_level != 0: + up.upsample = Upsample(block_in, resamp_with_conv) + curr_res = curr_res * 2 + self.up.insert(0, up) # prepend to get consistent order + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + out_ch, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, z): + #assert z.shape[1:] == self.z_shape[1:] + self.last_z_shape = z.shape + + # timestep embedding + temb = None + + # z to block_in + h = self.conv_in(z) + + # middle + h = self.mid.block_1(h, temb) + h = self.mid.attn_1(h) + h = self.mid.block_2(h, temb) + + # upsampling + for i_level in reversed(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks+1): + h = self.up[i_level].block[i_block](h, temb) + if len(self.up[i_level].attn) > 0: + h = self.up[i_level].attn[i_block](h) + if i_level != 0: + h = self.up[i_level].upsample(h) + + # end + if self.give_pre_end: + return h + + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + if self.tanh_out: + h = torch.tanh(h) + return h + + +class SimpleDecoder(nn.Module): + def __init__(self, in_channels, out_channels, *args, **kwargs): + super().__init__() + self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1), + ResnetBlock(in_channels=in_channels, + out_channels=2 * in_channels, + temb_channels=0, dropout=0.0), + ResnetBlock(in_channels=2 * in_channels, + out_channels=4 * in_channels, + temb_channels=0, dropout=0.0), + ResnetBlock(in_channels=4 * in_channels, + out_channels=2 * in_channels, + temb_channels=0, dropout=0.0), + nn.Conv2d(2*in_channels, in_channels, 1), + Upsample(in_channels, with_conv=True)]) + # end + self.norm_out = Normalize(in_channels) + self.conv_out = torch.nn.Conv2d(in_channels, + out_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + for i, layer in enumerate(self.model): + if i in [1,2,3]: + x = layer(x, None) + else: + x = layer(x) + + h = self.norm_out(x) + h = nonlinearity(h) + x = self.conv_out(h) + return x + + +class UpsampleDecoder(nn.Module): + def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution, + ch_mult=(2,2), dropout=0.0): + super().__init__() + # upsampling + self.temb_ch = 0 + self.num_resolutions = len(ch_mult) + self.num_res_blocks = num_res_blocks + block_in = in_channels + curr_res = resolution // 2 ** (self.num_resolutions - 1) + self.res_blocks = nn.ModuleList() + self.upsample_blocks = nn.ModuleList() + for i_level in range(self.num_resolutions): + res_block = [] + block_out = ch * ch_mult[i_level] + for i_block in range(self.num_res_blocks + 1): + res_block.append(ResnetBlock(in_channels=block_in, + out_channels=block_out, + temb_channels=self.temb_ch, + dropout=dropout)) + block_in = block_out + self.res_blocks.append(nn.ModuleList(res_block)) + if i_level != self.num_resolutions - 1: + self.upsample_blocks.append(Upsample(block_in, True)) + curr_res = curr_res * 2 + + # end + self.norm_out = Normalize(block_in) + self.conv_out = torch.nn.Conv2d(block_in, + out_channels, + kernel_size=3, + stride=1, + padding=1) + + def forward(self, x): + # upsampling + h = x + for k, i_level in enumerate(range(self.num_resolutions)): + for i_block in range(self.num_res_blocks + 1): + h = self.res_blocks[i_level][i_block](h, None) + if i_level != self.num_resolutions - 1: + h = self.upsample_blocks[k](h) + h = self.norm_out(h) + h = nonlinearity(h) + h = self.conv_out(h) + return h + + +class LatentRescaler(nn.Module): + def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2): + super().__init__() + # residual block, interpolate, residual block + self.factor = factor + self.conv_in = nn.Conv2d(in_channels, + mid_channels, + kernel_size=3, + stride=1, + padding=1) + self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0) for _ in range(depth)]) + self.attn = AttnBlock(mid_channels) + self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, + out_channels=mid_channels, + temb_channels=0, + dropout=0.0) for _ in range(depth)]) + + self.conv_out = nn.Conv2d(mid_channels, + out_channels, + kernel_size=1, + ) + + def forward(self, x): + x = self.conv_in(x) + for block in self.res_block1: + x = block(x, None) + x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor)))) + x = self.attn(x) + for block in self.res_block2: + x = block(x, None) + x = self.conv_out(x) + return x + + +class MergedRescaleEncoder(nn.Module): + def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks, + attn_resolutions, dropout=0.0, resamp_with_conv=True, + ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1): + super().__init__() + intermediate_chn = ch * ch_mult[-1] + self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult, + z_channels=intermediate_chn, double_z=False, resolution=resolution, + attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, + out_ch=None) + self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn, + mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth) + + def forward(self, x): + x = self.encoder(x) + x = self.rescaler(x) + return x + + +class MergedRescaleDecoder(nn.Module): + def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8), + dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1): + super().__init__() + tmp_chn = z_channels*ch_mult[-1] + self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout, + resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks, + ch_mult=ch_mult, resolution=resolution, ch=ch) + self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn, + out_channels=tmp_chn, depth=rescale_module_depth) + + def forward(self, x): + x = self.rescaler(x) + x = self.decoder(x) + return x + + +class Upsampler(nn.Module): + def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): + super().__init__() + assert out_size >= in_size + num_blocks = int(np.log2(out_size//in_size))+1 + factor_up = 1.+ (out_size % in_size) + print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}") + self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels, + out_channels=in_channels) + self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2, + attn_resolutions=[], in_channels=None, ch=in_channels, + ch_mult=[ch_mult for _ in range(num_blocks)]) + + def forward(self, x): + x = self.rescaler(x) + x = self.decoder(x) + return x + + +class Resize(nn.Module): + def __init__(self, in_channels=None, learned=False, mode="bilinear"): + super().__init__() + self.with_conv = learned + self.mode = mode + if self.with_conv: + print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode") + raise NotImplementedError() + assert in_channels is not None + # no asymmetric padding in torch conv, must do it ourselves + self.conv = torch.nn.Conv2d(in_channels, + in_channels, + kernel_size=4, + stride=2, + padding=1) + + def forward(self, x, scale_factor=1.0): + if scale_factor==1.0: + return x + else: + x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor) + return x + +class FirstStagePostProcessor(nn.Module): + + def __init__(self, ch_mult:list, in_channels, + pretrained_model:nn.Module=None, + reshape=False, + n_channels=None, + dropout=0., + pretrained_config=None): + super().__init__() + if pretrained_config is None: + assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' + self.pretrained_model = pretrained_model + else: + assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' + self.instantiate_pretrained(pretrained_config) + + self.do_reshape = reshape + + if n_channels is None: + n_channels = self.pretrained_model.encoder.ch + + self.proj_norm = Normalize(in_channels,num_groups=in_channels//2) + self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3, + stride=1,padding=1) + + blocks = [] + downs = [] + ch_in = n_channels + for m in ch_mult: + blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout)) + ch_in = m * n_channels + downs.append(Downsample(ch_in, with_conv=False)) + + self.model = nn.ModuleList(blocks) + self.downsampler = nn.ModuleList(downs) + + + def instantiate_pretrained(self, config): + model = instantiate_from_config(config) + self.pretrained_model = model.eval() + # self.pretrained_model.train = False + for param in self.pretrained_model.parameters(): + param.requires_grad = False + + + @torch.no_grad() + def encode_with_pretrained(self,x): + c = self.pretrained_model.encode(x) + if isinstance(c, DiagonalGaussianDistribution): + c = c.mode() + return c + + def forward(self,x): + z_fs = self.encode_with_pretrained(x) + z = self.proj_norm(z_fs) + z = self.proj(z) + z = nonlinearity(z) + + for submodel, downmodel in zip(self.model,self.downsampler): + z = submodel(z,temb=None) + z = downmodel(z) + + if self.do_reshape: + z = rearrange(z,'b c h w -> b (h w) c') + return z + diff --git a/ldm/modules/diffusionmodules/openaimodel.py b/ldm/modules/diffusionmodules/openaimodel.py new file mode 100644 index 00000000..fcf95d1e --- /dev/null +++ b/ldm/modules/diffusionmodules/openaimodel.py @@ -0,0 +1,961 @@ +from abc import abstractmethod +from functools import partial +import math +from typing import Iterable + +import numpy as np +import torch as th +import torch.nn as nn +import torch.nn.functional as F + +from ldm.modules.diffusionmodules.util import ( + checkpoint, + conv_nd, + linear, + avg_pool_nd, + zero_module, + normalization, + timestep_embedding, +) +from ldm.modules.attention import SpatialTransformer + + +# dummy replace +def convert_module_to_f16(x): + pass + +def convert_module_to_f32(x): + pass + + +## go +class AttentionPool2d(nn.Module): + """ + Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py + """ + + def __init__( + self, + spacial_dim: int, + embed_dim: int, + num_heads_channels: int, + output_dim: int = None, + ): + super().__init__() + self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) + self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) + self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) + self.num_heads = embed_dim // num_heads_channels + self.attention = QKVAttention(self.num_heads) + + def forward(self, x): + b, c, *_spatial = x.shape + x = x.reshape(b, c, -1) # NC(HW) + x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) + x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) + x = self.qkv_proj(x) + x = self.attention(x) + x = self.c_proj(x) + return x[:, :, 0] + + +class TimestepBlock(nn.Module): + """ + Any module where forward() takes timestep embeddings as a second argument. + """ + + @abstractmethod + def forward(self, x, emb): + """ + Apply the module to `x` given `emb` timestep embeddings. + """ + + +class TimestepEmbedSequential(nn.Sequential, TimestepBlock): + """ + A sequential module that passes timestep embeddings to the children that + support it as an extra input. + """ + + def forward(self, x, emb, context=None): + for layer in self: + if isinstance(layer, TimestepBlock): + x = layer(x, emb) + elif isinstance(layer, SpatialTransformer): + x = layer(x, context) + else: + x = layer(x) + return x + + +class Upsample(nn.Module): + """ + An upsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + upsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + if use_conv: + self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) + + def forward(self, x): + assert x.shape[1] == self.channels + if self.dims == 3: + x = F.interpolate( + x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" + ) + else: + x = F.interpolate(x, scale_factor=2, mode="nearest") + if self.use_conv: + x = self.conv(x) + return x + +class TransposedUpsample(nn.Module): + 'Learned 2x upsampling without padding' + def __init__(self, channels, out_channels=None, ks=5): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + + self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) + + def forward(self,x): + return self.up(x) + + +class Downsample(nn.Module): + """ + A downsampling layer with an optional convolution. + :param channels: channels in the inputs and outputs. + :param use_conv: a bool determining if a convolution is applied. + :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then + downsampling occurs in the inner-two dimensions. + """ + + def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): + super().__init__() + self.channels = channels + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.dims = dims + stride = 2 if dims != 3 else (1, 2, 2) + if use_conv: + self.op = conv_nd( + dims, self.channels, self.out_channels, 3, stride=stride, padding=padding + ) + else: + assert self.channels == self.out_channels + self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) + + def forward(self, x): + assert x.shape[1] == self.channels + return self.op(x) + + +class ResBlock(TimestepBlock): + """ + A residual block that can optionally change the number of channels. + :param channels: the number of input channels. + :param emb_channels: the number of timestep embedding channels. + :param dropout: the rate of dropout. + :param out_channels: if specified, the number of out channels. + :param use_conv: if True and out_channels is specified, use a spatial + convolution instead of a smaller 1x1 convolution to change the + channels in the skip connection. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param use_checkpoint: if True, use gradient checkpointing on this module. + :param up: if True, use this block for upsampling. + :param down: if True, use this block for downsampling. + """ + + def __init__( + self, + channels, + emb_channels, + dropout, + out_channels=None, + use_conv=False, + use_scale_shift_norm=False, + dims=2, + use_checkpoint=False, + up=False, + down=False, + ): + super().__init__() + self.channels = channels + self.emb_channels = emb_channels + self.dropout = dropout + self.out_channels = out_channels or channels + self.use_conv = use_conv + self.use_checkpoint = use_checkpoint + self.use_scale_shift_norm = use_scale_shift_norm + + self.in_layers = nn.Sequential( + normalization(channels), + nn.SiLU(), + conv_nd(dims, channels, self.out_channels, 3, padding=1), + ) + + self.updown = up or down + + if up: + self.h_upd = Upsample(channels, False, dims) + self.x_upd = Upsample(channels, False, dims) + elif down: + self.h_upd = Downsample(channels, False, dims) + self.x_upd = Downsample(channels, False, dims) + else: + self.h_upd = self.x_upd = nn.Identity() + + self.emb_layers = nn.Sequential( + nn.SiLU(), + linear( + emb_channels, + 2 * self.out_channels if use_scale_shift_norm else self.out_channels, + ), + ) + self.out_layers = nn.Sequential( + normalization(self.out_channels), + nn.SiLU(), + nn.Dropout(p=dropout), + zero_module( + conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) + ), + ) + + if self.out_channels == channels: + self.skip_connection = nn.Identity() + elif use_conv: + self.skip_connection = conv_nd( + dims, channels, self.out_channels, 3, padding=1 + ) + else: + self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) + + def forward(self, x, emb): + """ + Apply the block to a Tensor, conditioned on a timestep embedding. + :param x: an [N x C x ...] Tensor of features. + :param emb: an [N x emb_channels] Tensor of timestep embeddings. + :return: an [N x C x ...] Tensor of outputs. + """ + return checkpoint( + self._forward, (x, emb), self.parameters(), self.use_checkpoint + ) + + + def _forward(self, x, emb): + if self.updown: + in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] + h = in_rest(x) + h = self.h_upd(h) + x = self.x_upd(x) + h = in_conv(h) + else: + h = self.in_layers(x) + emb_out = self.emb_layers(emb).type(h.dtype) + while len(emb_out.shape) < len(h.shape): + emb_out = emb_out[..., None] + if self.use_scale_shift_norm: + out_norm, out_rest = self.out_layers[0], self.out_layers[1:] + scale, shift = th.chunk(emb_out, 2, dim=1) + h = out_norm(h) * (1 + scale) + shift + h = out_rest(h) + else: + h = h + emb_out + h = self.out_layers(h) + return self.skip_connection(x) + h + + +class AttentionBlock(nn.Module): + """ + An attention block that allows spatial positions to attend to each other. + Originally ported from here, but adapted to the N-d case. + https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. + """ + + def __init__( + self, + channels, + num_heads=1, + num_head_channels=-1, + use_checkpoint=False, + use_new_attention_order=False, + ): + super().__init__() + self.channels = channels + if num_head_channels == -1: + self.num_heads = num_heads + else: + assert ( + channels % num_head_channels == 0 + ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" + self.num_heads = channels // num_head_channels + self.use_checkpoint = use_checkpoint + self.norm = normalization(channels) + self.qkv = conv_nd(1, channels, channels * 3, 1) + if use_new_attention_order: + # split qkv before split heads + self.attention = QKVAttention(self.num_heads) + else: + # split heads before split qkv + self.attention = QKVAttentionLegacy(self.num_heads) + + self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) + + def forward(self, x): + return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! + #return pt_checkpoint(self._forward, x) # pytorch + + def _forward(self, x): + b, c, *spatial = x.shape + x = x.reshape(b, c, -1) + qkv = self.qkv(self.norm(x)) + h = self.attention(qkv) + h = self.proj_out(h) + return (x + h).reshape(b, c, *spatial) + + +def count_flops_attn(model, _x, y): + """ + A counter for the `thop` package to count the operations in an + attention operation. + Meant to be used like: + macs, params = thop.profile( + model, + inputs=(inputs, timestamps), + custom_ops={QKVAttention: QKVAttention.count_flops}, + ) + """ + b, c, *spatial = y[0].shape + num_spatial = int(np.prod(spatial)) + # We perform two matmuls with the same number of ops. + # The first computes the weight matrix, the second computes + # the combination of the value vectors. + matmul_ops = 2 * b * (num_spatial ** 2) * c + model.total_ops += th.DoubleTensor([matmul_ops]) + + +class QKVAttentionLegacy(nn.Module): + """ + A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", q * scale, k * scale + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class QKVAttention(nn.Module): + """ + A module which performs QKV attention and splits in a different order. + """ + + def __init__(self, n_heads): + super().__init__() + self.n_heads = n_heads + + def forward(self, qkv): + """ + Apply QKV attention. + :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. + :return: an [N x (H * C) x T] tensor after attention. + """ + bs, width, length = qkv.shape + assert width % (3 * self.n_heads) == 0 + ch = width // (3 * self.n_heads) + q, k, v = qkv.chunk(3, dim=1) + scale = 1 / math.sqrt(math.sqrt(ch)) + weight = th.einsum( + "bct,bcs->bts", + (q * scale).view(bs * self.n_heads, ch, length), + (k * scale).view(bs * self.n_heads, ch, length), + ) # More stable with f16 than dividing afterwards + weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) + a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) + return a.reshape(bs, -1, length) + + @staticmethod + def count_flops(model, _x, y): + return count_flops_attn(model, _x, y) + + +class UNetModel(nn.Module): + """ + The full UNet model with attention and timestep embedding. + :param in_channels: channels in the input Tensor. + :param model_channels: base channel count for the model. + :param out_channels: channels in the output Tensor. + :param num_res_blocks: number of residual blocks per downsample. + :param attention_resolutions: a collection of downsample rates at which + attention will take place. May be a set, list, or tuple. + For example, if this contains 4, then at 4x downsampling, attention + will be used. + :param dropout: the dropout probability. + :param channel_mult: channel multiplier for each level of the UNet. + :param conv_resample: if True, use learned convolutions for upsampling and + downsampling. + :param dims: determines if the signal is 1D, 2D, or 3D. + :param num_classes: if specified (as an int), then this model will be + class-conditional with `num_classes` classes. + :param use_checkpoint: use gradient checkpointing to reduce memory usage. + :param num_heads: the number of attention heads in each attention layer. + :param num_heads_channels: if specified, ignore num_heads and instead use + a fixed channel width per attention head. + :param num_heads_upsample: works with num_heads to set a different number + of heads for upsampling. Deprecated. + :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. + :param resblock_updown: use residual blocks for up/downsampling. + :param use_new_attention_order: use a different attention pattern for potentially + increased efficiency. + """ + + def __init__( + self, + image_size, + in_channels, + model_channels, + out_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + num_classes=None, + use_checkpoint=False, + use_fp16=False, + num_heads=-1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + use_spatial_transformer=False, # custom transformer support + transformer_depth=1, # custom transformer support + context_dim=None, # custom transformer support + n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model + legacy=True, + ): + super().__init__() + if use_spatial_transformer: + assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' + + if context_dim is not None: + assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' + from omegaconf.listconfig import ListConfig + if type(context_dim) == ListConfig: + context_dim = list(context_dim) + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + if num_heads == -1: + assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' + + if num_head_channels == -1: + assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' + + self.image_size = image_size + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + self.num_res_blocks = num_res_blocks + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.num_classes = num_classes + self.use_checkpoint = use_checkpoint + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + self.predict_codebook_ids = n_embed is not None + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + if self.num_classes is not None: + self.label_emb = nn.Embedding(num_classes, time_embed_dim) + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for _ in range(num_res_blocks): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self._feature_size += ch + + self.output_blocks = nn.ModuleList([]) + for level, mult in list(enumerate(channel_mult))[::-1]: + for i in range(num_res_blocks + 1): + ich = input_block_chans.pop() + layers = [ + ResBlock( + ch + ich, + time_embed_dim, + dropout, + out_channels=model_channels * mult, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = model_channels * mult + if ds in attention_resolutions: + if num_head_channels == -1: + dim_head = ch // num_heads + else: + num_heads = ch // num_head_channels + dim_head = num_head_channels + if legacy: + #num_heads = 1 + dim_head = ch // num_heads if use_spatial_transformer else num_head_channels + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads_upsample, + num_head_channels=dim_head, + use_new_attention_order=use_new_attention_order, + ) if not use_spatial_transformer else SpatialTransformer( + ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim + ) + ) + if level and i == num_res_blocks: + out_ch = ch + layers.append( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + up=True, + ) + if resblock_updown + else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) + ) + ds //= 2 + self.output_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), + ) + if self.predict_codebook_ids: + self.id_predictor = nn.Sequential( + normalization(ch), + conv_nd(dims, model_channels, n_embed, 1), + #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits + ) + + def convert_to_fp16(self): + """ + Convert the torso of the model to float16. + """ + self.input_blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + self.output_blocks.apply(convert_module_to_f16) + + def convert_to_fp32(self): + """ + Convert the torso of the model to float32. + """ + self.input_blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + self.output_blocks.apply(convert_module_to_f32) + + def forward(self, x, timesteps=None, context=None, y=None,**kwargs): + """ + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :param context: conditioning plugged in via crossattn + :param y: an [N] Tensor of labels, if class-conditional. + :return: an [N x C x ...] Tensor of outputs. + """ + assert (y is not None) == ( + self.num_classes is not None + ), "must specify y if and only if the model is class-conditional" + hs = [] + t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) + emb = self.time_embed(t_emb) + + if self.num_classes is not None: + assert y.shape == (x.shape[0],) + emb = emb + self.label_emb(y) + + h = x.type(self.dtype) + for module in self.input_blocks: + h = module(h, emb, context) + hs.append(h) + h = self.middle_block(h, emb, context) + for module in self.output_blocks: + h = th.cat([h, hs.pop()], dim=1) + h = module(h, emb, context) + h = h.type(x.dtype) + if self.predict_codebook_ids: + return self.id_predictor(h) + else: + return self.out(h) + + +class EncoderUNetModel(nn.Module): + """ + The half UNet model with attention and timestep embedding. + For usage, see UNet. + """ + + def __init__( + self, + image_size, + in_channels, + model_channels, + out_channels, + num_res_blocks, + attention_resolutions, + dropout=0, + channel_mult=(1, 2, 4, 8), + conv_resample=True, + dims=2, + use_checkpoint=False, + use_fp16=False, + num_heads=1, + num_head_channels=-1, + num_heads_upsample=-1, + use_scale_shift_norm=False, + resblock_updown=False, + use_new_attention_order=False, + pool="adaptive", + *args, + **kwargs + ): + super().__init__() + + if num_heads_upsample == -1: + num_heads_upsample = num_heads + + self.in_channels = in_channels + self.model_channels = model_channels + self.out_channels = out_channels + self.num_res_blocks = num_res_blocks + self.attention_resolutions = attention_resolutions + self.dropout = dropout + self.channel_mult = channel_mult + self.conv_resample = conv_resample + self.use_checkpoint = use_checkpoint + self.dtype = th.float16 if use_fp16 else th.float32 + self.num_heads = num_heads + self.num_head_channels = num_head_channels + self.num_heads_upsample = num_heads_upsample + + time_embed_dim = model_channels * 4 + self.time_embed = nn.Sequential( + linear(model_channels, time_embed_dim), + nn.SiLU(), + linear(time_embed_dim, time_embed_dim), + ) + + self.input_blocks = nn.ModuleList( + [ + TimestepEmbedSequential( + conv_nd(dims, in_channels, model_channels, 3, padding=1) + ) + ] + ) + self._feature_size = model_channels + input_block_chans = [model_channels] + ch = model_channels + ds = 1 + for level, mult in enumerate(channel_mult): + for _ in range(num_res_blocks): + layers = [ + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=mult * model_channels, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ) + ] + ch = mult * model_channels + if ds in attention_resolutions: + layers.append( + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=num_head_channels, + use_new_attention_order=use_new_attention_order, + ) + ) + self.input_blocks.append(TimestepEmbedSequential(*layers)) + self._feature_size += ch + input_block_chans.append(ch) + if level != len(channel_mult) - 1: + out_ch = ch + self.input_blocks.append( + TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + out_channels=out_ch, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + down=True, + ) + if resblock_updown + else Downsample( + ch, conv_resample, dims=dims, out_channels=out_ch + ) + ) + ) + ch = out_ch + input_block_chans.append(ch) + ds *= 2 + self._feature_size += ch + + self.middle_block = TimestepEmbedSequential( + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + AttentionBlock( + ch, + use_checkpoint=use_checkpoint, + num_heads=num_heads, + num_head_channels=num_head_channels, + use_new_attention_order=use_new_attention_order, + ), + ResBlock( + ch, + time_embed_dim, + dropout, + dims=dims, + use_checkpoint=use_checkpoint, + use_scale_shift_norm=use_scale_shift_norm, + ), + ) + self._feature_size += ch + self.pool = pool + if pool == "adaptive": + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + nn.AdaptiveAvgPool2d((1, 1)), + zero_module(conv_nd(dims, ch, out_channels, 1)), + nn.Flatten(), + ) + elif pool == "attention": + assert num_head_channels != -1 + self.out = nn.Sequential( + normalization(ch), + nn.SiLU(), + AttentionPool2d( + (image_size // ds), ch, num_head_channels, out_channels + ), + ) + elif pool == "spatial": + self.out = nn.Sequential( + nn.Linear(self._feature_size, 2048), + nn.ReLU(), + nn.Linear(2048, self.out_channels), + ) + elif pool == "spatial_v2": + self.out = nn.Sequential( + nn.Linear(self._feature_size, 2048), + normalization(2048), + nn.SiLU(), + nn.Linear(2048, self.out_channels), + ) + else: + raise NotImplementedError(f"Unexpected {pool} pooling") + + def convert_to_fp16(self): + """ + Convert the torso of the model to float16. + """ + self.input_blocks.apply(convert_module_to_f16) + self.middle_block.apply(convert_module_to_f16) + + def convert_to_fp32(self): + """ + Convert the torso of the model to float32. + """ + self.input_blocks.apply(convert_module_to_f32) + self.middle_block.apply(convert_module_to_f32) + + def forward(self, x, timesteps): + """ + Apply the model to an input batch. + :param x: an [N x C x ...] Tensor of inputs. + :param timesteps: a 1-D batch of timesteps. + :return: an [N x K] Tensor of outputs. + """ + emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) + + results = [] + h = x.type(self.dtype) + for module in self.input_blocks: + h = module(h, emb) + if self.pool.startswith("spatial"): + results.append(h.type(x.dtype).mean(dim=(2, 3))) + h = self.middle_block(h, emb) + if self.pool.startswith("spatial"): + results.append(h.type(x.dtype).mean(dim=(2, 3))) + h = th.cat(results, axis=-1) + return self.out(h) + else: + h = h.type(x.dtype) + return self.out(h) + diff --git a/ldm/modules/diffusionmodules/util.py b/ldm/modules/diffusionmodules/util.py new file mode 100644 index 00000000..a952e6c4 --- /dev/null +++ b/ldm/modules/diffusionmodules/util.py @@ -0,0 +1,267 @@ +# adopted from +# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py +# and +# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py +# and +# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py +# +# thanks! + + +import os +import math +import torch +import torch.nn as nn +import numpy as np +from einops import repeat + +from ldm.util import instantiate_from_config + + +def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): + if schedule == "linear": + betas = ( + torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 + ) + + elif schedule == "cosine": + timesteps = ( + torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s + ) + alphas = timesteps / (1 + cosine_s) * np.pi / 2 + alphas = torch.cos(alphas).pow(2) + alphas = alphas / alphas[0] + betas = 1 - alphas[1:] / alphas[:-1] + betas = np.clip(betas, a_min=0, a_max=0.999) + + elif schedule == "sqrt_linear": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) + elif schedule == "sqrt": + betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 + else: + raise ValueError(f"schedule '{schedule}' unknown.") + return betas.numpy() + + +def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): + if ddim_discr_method == 'uniform': + c = num_ddpm_timesteps // num_ddim_timesteps + ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) + elif ddim_discr_method == 'quad': + ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) + else: + raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') + + # assert ddim_timesteps.shape[0] == num_ddim_timesteps + # add one to get the final alpha values right (the ones from first scale to data during sampling) + steps_out = ddim_timesteps + 1 + if verbose: + print(f'Selected timesteps for ddim sampler: {steps_out}') + return steps_out + + +def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): + # select alphas for computing the variance schedule + alphas = alphacums[ddim_timesteps] + alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) + + # according the the formula provided in https://arxiv.org/abs/2010.02502 + sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) + if verbose: + print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') + print(f'For the chosen value of eta, which is {eta}, ' + f'this results in the following sigma_t schedule for ddim sampler {sigmas}') + return sigmas, alphas, alphas_prev + + +def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): + """ + Create a beta schedule that discretizes the given alpha_t_bar function, + which defines the cumulative product of (1-beta) over time from t = [0,1]. + :param num_diffusion_timesteps: the number of betas to produce. + :param alpha_bar: a lambda that takes an argument t from 0 to 1 and + produces the cumulative product of (1-beta) up to that + part of the diffusion process. + :param max_beta: the maximum beta to use; use values lower than 1 to + prevent singularities. + """ + betas = [] + for i in range(num_diffusion_timesteps): + t1 = i / num_diffusion_timesteps + t2 = (i + 1) / num_diffusion_timesteps + betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) + return np.array(betas) + + +def extract_into_tensor(a, t, x_shape): + b, *_ = t.shape + out = a.gather(-1, t) + return out.reshape(b, *((1,) * (len(x_shape) - 1))) + + +def checkpoint(func, inputs, params, flag): + """ + Evaluate a function without caching intermediate activations, allowing for + reduced memory at the expense of extra compute in the backward pass. + :param func: the function to evaluate. + :param inputs: the argument sequence to pass to `func`. + :param params: a sequence of parameters `func` depends on but does not + explicitly take as arguments. + :param flag: if False, disable gradient checkpointing. + """ + if flag: + args = tuple(inputs) + tuple(params) + return CheckpointFunction.apply(func, len(inputs), *args) + else: + return func(*inputs) + + +class CheckpointFunction(torch.autograd.Function): + @staticmethod + def forward(ctx, run_function, length, *args): + ctx.run_function = run_function + ctx.input_tensors = list(args[:length]) + ctx.input_params = list(args[length:]) + + with torch.no_grad(): + output_tensors = ctx.run_function(*ctx.input_tensors) + return output_tensors + + @staticmethod + def backward(ctx, *output_grads): + ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] + with torch.enable_grad(): + # Fixes a bug where the first op in run_function modifies the + # Tensor storage in place, which is not allowed for detach()'d + # Tensors. + shallow_copies = [x.view_as(x) for x in ctx.input_tensors] + output_tensors = ctx.run_function(*shallow_copies) + input_grads = torch.autograd.grad( + output_tensors, + ctx.input_tensors + ctx.input_params, + output_grads, + allow_unused=True, + ) + del ctx.input_tensors + del ctx.input_params + del output_tensors + return (None, None) + input_grads + + +def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): + """ + Create sinusoidal timestep embeddings. + :param timesteps: a 1-D Tensor of N indices, one per batch element. + These may be fractional. + :param dim: the dimension of the output. + :param max_period: controls the minimum frequency of the embeddings. + :return: an [N x dim] Tensor of positional embeddings. + """ + if not repeat_only: + half = dim // 2 + freqs = torch.exp( + -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half + ).to(device=timesteps.device) + args = timesteps[:, None].float() * freqs[None] + embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) + if dim % 2: + embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) + else: + embedding = repeat(timesteps, 'b -> b d', d=dim) + return embedding + + +def zero_module(module): + """ + Zero out the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().zero_() + return module + + +def scale_module(module, scale): + """ + Scale the parameters of a module and return it. + """ + for p in module.parameters(): + p.detach().mul_(scale) + return module + + +def mean_flat(tensor): + """ + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def normalization(channels): + """ + Make a standard normalization layer. + :param channels: number of input channels. + :return: an nn.Module for normalization. + """ + return GroupNorm32(32, channels) + + +# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. +class SiLU(nn.Module): + def forward(self, x): + return x * torch.sigmoid(x) + + +class GroupNorm32(nn.GroupNorm): + def forward(self, x): + return super().forward(x.float()).type(x.dtype) + +def conv_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D convolution module. + """ + if dims == 1: + return nn.Conv1d(*args, **kwargs) + elif dims == 2: + return nn.Conv2d(*args, **kwargs) + elif dims == 3: + return nn.Conv3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +def linear(*args, **kwargs): + """ + Create a linear module. + """ + return nn.Linear(*args, **kwargs) + + +def avg_pool_nd(dims, *args, **kwargs): + """ + Create a 1D, 2D, or 3D average pooling module. + """ + if dims == 1: + return nn.AvgPool1d(*args, **kwargs) + elif dims == 2: + return nn.AvgPool2d(*args, **kwargs) + elif dims == 3: + return nn.AvgPool3d(*args, **kwargs) + raise ValueError(f"unsupported dimensions: {dims}") + + +class HybridConditioner(nn.Module): + + def __init__(self, c_concat_config, c_crossattn_config): + super().__init__() + self.concat_conditioner = instantiate_from_config(c_concat_config) + self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) + + def forward(self, c_concat, c_crossattn): + c_concat = self.concat_conditioner(c_concat) + c_crossattn = self.crossattn_conditioner(c_crossattn) + return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} + + +def noise_like(shape, device, repeat=False): + repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) + noise = lambda: torch.randn(shape, device=device) + return repeat_noise() if repeat else noise() \ No newline at end of file diff --git a/ldm/modules/distributions/__init__.py b/ldm/modules/distributions/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldm/modules/distributions/distributions.py b/ldm/modules/distributions/distributions.py new file mode 100644 index 00000000..f2b8ef90 --- /dev/null +++ b/ldm/modules/distributions/distributions.py @@ -0,0 +1,92 @@ +import torch +import numpy as np + + +class AbstractDistribution: + def sample(self): + raise NotImplementedError() + + def mode(self): + raise NotImplementedError() + + +class DiracDistribution(AbstractDistribution): + def __init__(self, value): + self.value = value + + def sample(self): + return self.value + + def mode(self): + return self.value + + +class DiagonalGaussianDistribution(object): + def __init__(self, parameters, deterministic=False): + self.parameters = parameters + self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) + self.logvar = torch.clamp(self.logvar, -30.0, 20.0) + self.deterministic = deterministic + self.std = torch.exp(0.5 * self.logvar) + self.var = torch.exp(self.logvar) + if self.deterministic: + self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) + + def sample(self): + x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) + return x + + def kl(self, other=None): + if self.deterministic: + return torch.Tensor([0.]) + else: + if other is None: + return 0.5 * torch.sum(torch.pow(self.mean, 2) + + self.var - 1.0 - self.logvar, + dim=[1, 2, 3]) + else: + return 0.5 * torch.sum( + torch.pow(self.mean - other.mean, 2) / other.var + + self.var / other.var - 1.0 - self.logvar + other.logvar, + dim=[1, 2, 3]) + + def nll(self, sample, dims=[1,2,3]): + if self.deterministic: + return torch.Tensor([0.]) + logtwopi = np.log(2.0 * np.pi) + return 0.5 * torch.sum( + logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, + dim=dims) + + def mode(self): + return self.mean + + +def normal_kl(mean1, logvar1, mean2, logvar2): + """ + source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 + Compute the KL divergence between two gaussians. + Shapes are automatically broadcasted, so batches can be compared to + scalars, among other use cases. + """ + tensor = None + for obj in (mean1, logvar1, mean2, logvar2): + if isinstance(obj, torch.Tensor): + tensor = obj + break + assert tensor is not None, "at least one argument must be a Tensor" + + # Force variances to be Tensors. Broadcasting helps convert scalars to + # Tensors, but it does not work for torch.exp(). + logvar1, logvar2 = [ + x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) + for x in (logvar1, logvar2) + ] + + return 0.5 * ( + -1.0 + + logvar2 + - logvar1 + + torch.exp(logvar1 - logvar2) + + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) + ) diff --git a/ldm/modules/ema.py b/ldm/modules/ema.py new file mode 100644 index 00000000..c8c75af4 --- /dev/null +++ b/ldm/modules/ema.py @@ -0,0 +1,76 @@ +import torch +from torch import nn + + +class LitEma(nn.Module): + def __init__(self, model, decay=0.9999, use_num_upates=True): + super().__init__() + if decay < 0.0 or decay > 1.0: + raise ValueError('Decay must be between 0 and 1') + + self.m_name2s_name = {} + self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) + self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates + else torch.tensor(-1,dtype=torch.int)) + + for name, p in model.named_parameters(): + if p.requires_grad: + #remove as '.'-character is not allowed in buffers + s_name = name.replace('.','') + self.m_name2s_name.update({name:s_name}) + self.register_buffer(s_name,p.clone().detach().data) + + self.collected_params = [] + + def forward(self,model): + decay = self.decay + + if self.num_updates >= 0: + self.num_updates += 1 + decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates)) + + one_minus_decay = 1.0 - decay + + with torch.no_grad(): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + + for key in m_param: + if m_param[key].requires_grad: + sname = self.m_name2s_name[key] + shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) + shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) + else: + assert not key in self.m_name2s_name + + def copy_to(self, model): + m_param = dict(model.named_parameters()) + shadow_params = dict(self.named_buffers()) + for key in m_param: + if m_param[key].requires_grad: + m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) + else: + assert not key in self.m_name2s_name + + def store(self, parameters): + """ + Save the current parameters for restoring later. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + temporarily stored. + """ + self.collected_params = [param.clone() for param in parameters] + + def restore(self, parameters): + """ + Restore the parameters stored with the `store` method. + Useful to validate the model with EMA parameters without affecting the + original optimization process. Store the parameters before the + `copy_to` method. After validation (or model saving), use this to + restore the former parameters. + Args: + parameters: Iterable of `torch.nn.Parameter`; the parameters to be + updated with the stored parameters. + """ + for c_param, param in zip(self.collected_params, parameters): + param.data.copy_(c_param.data) diff --git a/ldm/modules/encoders/__init__.py b/ldm/modules/encoders/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/ldm/modules/encoders/modules.py b/ldm/modules/encoders/modules.py new file mode 100644 index 00000000..ededbe43 --- /dev/null +++ b/ldm/modules/encoders/modules.py @@ -0,0 +1,234 @@ +import torch +import torch.nn as nn +from functools import partial +import clip +from einops import rearrange, repeat +from transformers import CLIPTokenizer, CLIPTextModel +import kornia + +from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test + + +class AbstractEncoder(nn.Module): + def __init__(self): + super().__init__() + + def encode(self, *args, **kwargs): + raise NotImplementedError + + + +class ClassEmbedder(nn.Module): + def __init__(self, embed_dim, n_classes=1000, key='class'): + super().__init__() + self.key = key + self.embedding = nn.Embedding(n_classes, embed_dim) + + def forward(self, batch, key=None): + if key is None: + key = self.key + # this is for use in crossattn + c = batch[key][:, None] + c = self.embedding(c) + return c + + +class TransformerEmbedder(AbstractEncoder): + """Some transformer encoder layers""" + def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"): + super().__init__() + self.device = device + self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, + attn_layers=Encoder(dim=n_embed, depth=n_layer)) + + def forward(self, tokens): + tokens = tokens.to(self.device) # meh + z = self.transformer(tokens, return_embeddings=True) + return z + + def encode(self, x): + return self(x) + + +class BERTTokenizer(AbstractEncoder): + """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)""" + def __init__(self, device="cuda", vq_interface=True, max_length=77): + super().__init__() + from transformers import BertTokenizerFast # TODO: add to reuquirements + self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") + self.device = device + self.vq_interface = vq_interface + self.max_length = max_length + + def forward(self, text): + batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, + return_overflowing_tokens=False, padding="max_length", return_tensors="pt") + tokens = batch_encoding["input_ids"].to(self.device) + return tokens + + @torch.no_grad() + def encode(self, text): + tokens = self(text) + if not self.vq_interface: + return tokens + return None, None, [None, None, tokens] + + def decode(self, text): + return text + + +class BERTEmbedder(AbstractEncoder): + """Uses the BERT tokenizr model and add some transformer encoder layers""" + def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77, + device="cuda",use_tokenizer=True, embedding_dropout=0.0): + super().__init__() + self.use_tknz_fn = use_tokenizer + if self.use_tknz_fn: + self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len) + self.device = device + self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, + attn_layers=Encoder(dim=n_embed, depth=n_layer), + emb_dropout=embedding_dropout) + + def forward(self, text): + if self.use_tknz_fn: + tokens = self.tknz_fn(text)#.to(self.device) + else: + tokens = text + z = self.transformer(tokens, return_embeddings=True) + return z + + def encode(self, text): + # output of length 77 + return self(text) + + +class SpatialRescaler(nn.Module): + def __init__(self, + n_stages=1, + method='bilinear', + multiplier=0.5, + in_channels=3, + out_channels=None, + bias=False): + super().__init__() + self.n_stages = n_stages + assert self.n_stages >= 0 + assert method in ['nearest','linear','bilinear','trilinear','bicubic','area'] + self.multiplier = multiplier + self.interpolator = partial(torch.nn.functional.interpolate, mode=method) + self.remap_output = out_channels is not None + if self.remap_output: + print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.') + self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias) + + def forward(self,x): + for stage in range(self.n_stages): + x = self.interpolator(x, scale_factor=self.multiplier) + + + if self.remap_output: + x = self.channel_mapper(x) + return x + + def encode(self, x): + return self(x) + +class FrozenCLIPEmbedder(AbstractEncoder): + """Uses the CLIP transformer encoder for text (from Hugging Face)""" + def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): + super().__init__() + self.tokenizer = CLIPTokenizer.from_pretrained(version) + self.transformer = CLIPTextModel.from_pretrained(version) + self.device = device + self.max_length = max_length + self.freeze() + + def freeze(self): + self.transformer = self.transformer.eval() + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, + return_overflowing_tokens=False, padding="max_length", return_tensors="pt") + tokens = batch_encoding["input_ids"].to(self.device) + outputs = self.transformer(input_ids=tokens) + + z = outputs.last_hidden_state + return z + + def encode(self, text): + return self(text) + + +class FrozenCLIPTextEmbedder(nn.Module): + """ + Uses the CLIP transformer encoder for text. + """ + def __init__(self, version='ViT-L/14', device="cuda", max_length=77, n_repeat=1, normalize=True): + super().__init__() + self.model, _ = clip.load(version, jit=False, device="cpu") + self.device = device + self.max_length = max_length + self.n_repeat = n_repeat + self.normalize = normalize + + def freeze(self): + self.model = self.model.eval() + for param in self.parameters(): + param.requires_grad = False + + def forward(self, text): + tokens = clip.tokenize(text).to(self.device) + z = self.model.encode_text(tokens) + if self.normalize: + z = z / torch.linalg.norm(z, dim=1, keepdim=True) + return z + + def encode(self, text): + z = self(text) + if z.ndim==2: + z = z[:, None, :] + z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat) + return z + + +class FrozenClipImageEmbedder(nn.Module): + """ + Uses the CLIP image encoder. + """ + def __init__( + self, + model, + jit=False, + device='cuda' if torch.cuda.is_available() else 'cpu', + antialias=False, + ): + super().__init__() + self.model, _ = clip.load(name=model, device=device, jit=jit) + + self.antialias = antialias + + self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) + self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) + + def preprocess(self, x): + # normalize to [0,1] + x = kornia.geometry.resize(x, (224, 224), + interpolation='bicubic',align_corners=True, + antialias=self.antialias) + x = (x + 1.) / 2. + # renormalize according to clip + x = kornia.enhance.normalize(x, self.mean, self.std) + return x + + def forward(self, x): + # x is assumed to be in range [-1,1] + return self.model.encode_image(self.preprocess(x)) + + +if __name__ == "__main__": + from ldm.util import count_params + model = FrozenCLIPEmbedder() + count_params(model, verbose=True) \ No newline at end of file diff --git a/ldm/modules/encoders/xlmr.py b/ldm/modules/encoders/xlmr.py new file mode 100644 index 00000000..beab3fdf --- /dev/null +++ b/ldm/modules/encoders/xlmr.py @@ -0,0 +1,137 @@ +from transformers import BertPreTrainedModel,BertModel,BertConfig +import torch.nn as nn +import torch +from transformers.models.xlm_roberta.configuration_xlm_roberta import XLMRobertaConfig +from transformers import XLMRobertaModel,XLMRobertaTokenizer +from typing import Optional + +class BertSeriesConfig(BertConfig): + def __init__(self, vocab_size=30522, hidden_size=768, num_hidden_layers=12, num_attention_heads=12, intermediate_size=3072, hidden_act="gelu", hidden_dropout_prob=0.1, attention_probs_dropout_prob=0.1, max_position_embeddings=512, type_vocab_size=2, initializer_range=0.02, layer_norm_eps=1e-12, pad_token_id=0, position_embedding_type="absolute", use_cache=True, classifier_dropout=None,project_dim=512, pooler_fn="average",learn_encoder=False,model_type='bert',**kwargs): + + super().__init__(vocab_size, hidden_size, num_hidden_layers, num_attention_heads, intermediate_size, hidden_act, hidden_dropout_prob, attention_probs_dropout_prob, max_position_embeddings, type_vocab_size, initializer_range, layer_norm_eps, pad_token_id, position_embedding_type, use_cache, classifier_dropout, **kwargs) + self.project_dim = project_dim + self.pooler_fn = pooler_fn + self.learn_encoder = learn_encoder + +class RobertaSeriesConfig(XLMRobertaConfig): + def __init__(self, pad_token_id=1, bos_token_id=0, eos_token_id=2,project_dim=512,pooler_fn='cls',learn_encoder=False, **kwargs): + super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) + self.project_dim = project_dim + self.pooler_fn = pooler_fn + self.learn_encoder = learn_encoder + + +class BertSeriesModelWithTransformation(BertPreTrainedModel): + + _keys_to_ignore_on_load_unexpected = [r"pooler"] + _keys_to_ignore_on_load_missing = [r"position_ids", r"predictions.decoder.bias"] + config_class = BertSeriesConfig + + def __init__(self, config=None, **kargs): + # modify initialization for autoloading + if config is None: + config = XLMRobertaConfig() + config.attention_probs_dropout_prob= 0.1 + config.bos_token_id=0 + config.eos_token_id=2 + config.hidden_act='gelu' + config.hidden_dropout_prob=0.1 + config.hidden_size=1024 + config.initializer_range=0.02 + config.intermediate_size=4096 + config.layer_norm_eps=1e-05 + config.max_position_embeddings=514 + + config.num_attention_heads=16 + config.num_hidden_layers=24 + config.output_past=True + config.pad_token_id=1 + config.position_embedding_type= "absolute" + + config.type_vocab_size= 1 + config.use_cache=True + config.vocab_size= 250002 + config.project_dim = 768 + config.learn_encoder = False + super().__init__(config) + self.roberta = XLMRobertaModel(config) + self.transformation = nn.Linear(config.hidden_size,config.project_dim) + self.pre_LN=nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps) + self.tokenizer = XLMRobertaTokenizer.from_pretrained('xlm-roberta-large') + self.pooler = lambda x: x[:,0] + self.post_init() + + def encode(self,c): + device = next(self.parameters()).device + text = self.tokenizer(c, + truncation=True, + max_length=77, + return_length=False, + return_overflowing_tokens=False, + padding="max_length", + return_tensors="pt") + text["input_ids"] = torch.tensor(text["input_ids"]).to(device) + text["attention_mask"] = torch.tensor( + text['attention_mask']).to(device) + features = self(**text) + return features['projection_state'] + + def forward( + self, + input_ids: Optional[torch.Tensor] = None, + attention_mask: Optional[torch.Tensor] = None, + token_type_ids: Optional[torch.Tensor] = None, + position_ids: Optional[torch.Tensor] = None, + head_mask: Optional[torch.Tensor] = None, + inputs_embeds: Optional[torch.Tensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.Tensor] = None, + output_attentions: Optional[bool] = None, + return_dict: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + ) : + r""" + """ + + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + + outputs = self.roberta( + input_ids=input_ids, + attention_mask=attention_mask, + token_type_ids=token_type_ids, + position_ids=position_ids, + head_mask=head_mask, + inputs_embeds=inputs_embeds, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + output_hidden_states=True, + return_dict=return_dict, + ) + + # last module outputs + sequence_output = outputs[0] + + + # project every module + sequence_output_ln = self.pre_LN(sequence_output) + + # pooler + pooler_output = self.pooler(sequence_output_ln) + pooler_output = self.transformation(pooler_output) + projection_state = self.transformation(outputs.last_hidden_state) + + return { + 'pooler_output':pooler_output, + 'last_hidden_state':outputs.last_hidden_state, + 'hidden_states':outputs.hidden_states, + 'attentions':outputs.attentions, + 'projection_state':projection_state, + 'sequence_out': sequence_output + } + + +class RobertaSeriesModelWithTransformation(BertSeriesModelWithTransformation): + base_model_prefix = 'roberta' + config_class= RobertaSeriesConfig \ No newline at end of file diff --git a/ldm/modules/image_degradation/__init__.py b/ldm/modules/image_degradation/__init__.py new file mode 100644 index 00000000..7836cada --- /dev/null +++ b/ldm/modules/image_degradation/__init__.py @@ -0,0 +1,2 @@ +from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr +from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light diff --git a/ldm/modules/image_degradation/bsrgan.py b/ldm/modules/image_degradation/bsrgan.py new file mode 100644 index 00000000..32ef5616 --- /dev/null +++ b/ldm/modules/image_degradation/bsrgan.py @@ -0,0 +1,730 @@ +# -*- coding: utf-8 -*- +""" +# -------------------------------------------- +# Super-Resolution +# -------------------------------------------- +# +# Kai Zhang (cskaizhang@gmail.com) +# https://github.com/cszn +# From 2019/03--2021/08 +# -------------------------------------------- +""" + +import numpy as np +import cv2 +import torch + +from functools import partial +import random +from scipy import ndimage +import scipy +import scipy.stats as ss +from scipy.interpolate import interp2d +from scipy.linalg import orth +import albumentations + +import ldm.modules.image_degradation.utils_image as util + + +def modcrop_np(img, sf): + ''' + Args: + img: numpy image, WxH or WxHxC + sf: scale factor + Return: + cropped image + ''' + w, h = img.shape[:2] + im = np.copy(img) + return im[:w - w % sf, :h - h % sf, ...] + + +""" +# -------------------------------------------- +# anisotropic Gaussian kernels +# -------------------------------------------- +""" + + +def analytic_kernel(k): + """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" + k_size = k.shape[0] + # Calculate the big kernels size + big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) + # Loop over the small kernel to fill the big one + for r in range(k_size): + for c in range(k_size): + big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k + # Crop the edges of the big kernel to ignore very small values and increase run time of SR + crop = k_size // 2 + cropped_big_k = big_k[crop:-crop, crop:-crop] + # Normalize to 1 + return cropped_big_k / cropped_big_k.sum() + + +def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): + """ generate an anisotropic Gaussian kernel + Args: + ksize : e.g., 15, kernel size + theta : [0, pi], rotation angle range + l1 : [0.1,50], scaling of eigenvalues + l2 : [0.1,l1], scaling of eigenvalues + If l1 = l2, will get an isotropic Gaussian kernel. + Returns: + k : kernel + """ + + v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) + V = np.array([[v[0], v[1]], [v[1], -v[0]]]) + D = np.array([[l1, 0], [0, l2]]) + Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) + k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) + + return k + + +def gm_blur_kernel(mean, cov, size=15): + center = size / 2.0 + 0.5 + k = np.zeros([size, size]) + for y in range(size): + for x in range(size): + cy = y - center + 1 + cx = x - center + 1 + k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) + + k = k / np.sum(k) + return k + + +def shift_pixel(x, sf, upper_left=True): + """shift pixel for super-resolution with different scale factors + Args: + x: WxHxC or WxH + sf: scale factor + upper_left: shift direction + """ + h, w = x.shape[:2] + shift = (sf - 1) * 0.5 + xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) + if upper_left: + x1 = xv + shift + y1 = yv + shift + else: + x1 = xv - shift + y1 = yv - shift + + x1 = np.clip(x1, 0, w - 1) + y1 = np.clip(y1, 0, h - 1) + + if x.ndim == 2: + x = interp2d(xv, yv, x)(x1, y1) + if x.ndim == 3: + for i in range(x.shape[-1]): + x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) + + return x + + +def blur(x, k): + ''' + x: image, NxcxHxW + k: kernel, Nx1xhxw + ''' + n, c = x.shape[:2] + p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 + x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') + k = k.repeat(1, c, 1, 1) + k = k.view(-1, 1, k.shape[2], k.shape[3]) + x = x.view(1, -1, x.shape[2], x.shape[3]) + x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) + x = x.view(n, c, x.shape[2], x.shape[3]) + + return x + + +def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): + """" + # modified version of https://github.com/assafshocher/BlindSR_dataset_generator + # Kai Zhang + # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var + # max_var = 2.5 * sf + """ + # Set random eigen-vals (lambdas) and angle (theta) for COV matrix + lambda_1 = min_var + np.random.rand() * (max_var - min_var) + lambda_2 = min_var + np.random.rand() * (max_var - min_var) + theta = np.random.rand() * np.pi # random theta + noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 + + # Set COV matrix using Lambdas and Theta + LAMBDA = np.diag([lambda_1, lambda_2]) + Q = np.array([[np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)]]) + SIGMA = Q @ LAMBDA @ Q.T + INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] + + # Set expectation position (shifting kernel for aligned image) + MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) + MU = MU[None, None, :, None] + + # Create meshgrid for Gaussian + [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) + Z = np.stack([X, Y], 2)[:, :, :, None] + + # Calcualte Gaussian for every pixel of the kernel + ZZ = Z - MU + ZZ_t = ZZ.transpose(0, 1, 3, 2) + raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) + + # shift the kernel so it will be centered + # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) + + # Normalize the kernel and return + # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) + kernel = raw_kernel / np.sum(raw_kernel) + return kernel + + +def fspecial_gaussian(hsize, sigma): + hsize = [hsize, hsize] + siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] + std = sigma + [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) + arg = -(x * x + y * y) / (2 * std * std) + h = np.exp(arg) + h[h < scipy.finfo(float).eps * h.max()] = 0 + sumh = h.sum() + if sumh != 0: + h = h / sumh + return h + + +def fspecial_laplacian(alpha): + alpha = max([0, min([alpha, 1])]) + h1 = alpha / (alpha + 1) + h2 = (1 - alpha) / (alpha + 1) + h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] + h = np.array(h) + return h + + +def fspecial(filter_type, *args, **kwargs): + ''' + python code from: + https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py + ''' + if filter_type == 'gaussian': + return fspecial_gaussian(*args, **kwargs) + if filter_type == 'laplacian': + return fspecial_laplacian(*args, **kwargs) + + +""" +# -------------------------------------------- +# degradation models +# -------------------------------------------- +""" + + +def bicubic_degradation(x, sf=3): + ''' + Args: + x: HxWxC image, [0, 1] + sf: down-scale factor + Return: + bicubicly downsampled LR image + ''' + x = util.imresize_np(x, scale=1 / sf) + return x + + +def srmd_degradation(x, k, sf=3): + ''' blur + bicubic downsampling + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2018learning, + title={Learning a single convolutional super-resolution network for multiple degradations}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={3262--3271}, + year={2018} + } + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' + x = bicubic_degradation(x, sf=sf) + return x + + +def dpsr_degradation(x, k, sf=3): + ''' bicubic downsampling + blur + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2019deep, + title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={1671--1681}, + year={2019} + } + ''' + x = bicubic_degradation(x, sf=sf) + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + return x + + +def classical_degradation(x, k, sf=3): + ''' blur + downsampling + Args: + x: HxWxC image, [0, 1]/[0, 255] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) + st = 0 + return x[st::sf, st::sf, ...] + + +def add_sharpening(img, weight=0.5, radius=50, threshold=10): + """USM sharpening. borrowed from real-ESRGAN + Input image: I; Blurry image: B. + 1. K = I + weight * (I - B) + 2. Mask = 1 if abs(I - B) > threshold, else: 0 + 3. Blur mask: + 4. Out = Mask * K + (1 - Mask) * I + Args: + img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. + weight (float): Sharp weight. Default: 1. + radius (float): Kernel size of Gaussian blur. Default: 50. + threshold (int): + """ + if radius % 2 == 0: + radius += 1 + blur = cv2.GaussianBlur(img, (radius, radius), 0) + residual = img - blur + mask = np.abs(residual) * 255 > threshold + mask = mask.astype('float32') + soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) + + K = img + weight * residual + K = np.clip(K, 0, 1) + return soft_mask * K + (1 - soft_mask) * img + + +def add_blur(img, sf=4): + wd2 = 4.0 + sf + wd = 2.0 + 0.2 * sf + if random.random() < 0.5: + l1 = wd2 * random.random() + l2 = wd2 * random.random() + k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) + else: + k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random()) + img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') + + return img + + +def add_resize(img, sf=4): + rnum = np.random.rand() + if rnum > 0.8: # up + sf1 = random.uniform(1, 2) + elif rnum < 0.7: # down + sf1 = random.uniform(0.5 / sf, 1) + else: + sf1 = 1.0 + img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + return img + + +# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): +# noise_level = random.randint(noise_level1, noise_level2) +# rnum = np.random.rand() +# if rnum > 0.6: # add color Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) +# elif rnum < 0.4: # add grayscale Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) +# else: # add noise +# L = noise_level2 / 255. +# D = np.diag(np.random.rand(3)) +# U = orth(np.random.rand(3, 3)) +# conv = np.dot(np.dot(np.transpose(U), D), U) +# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) +# img = np.clip(img, 0.0, 1.0) +# return img + +def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + rnum = np.random.rand() + if rnum > 0.6: # add color Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: # add grayscale Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: # add noise + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_speckle_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + img = np.clip(img, 0.0, 1.0) + rnum = random.random() + if rnum > 0.6: + img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: + img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_Poisson_noise(img): + img = np.clip((img * 255.0).round(), 0, 255) / 255. + vals = 10 ** (2 * random.random() + 2.0) # [2, 4] + if random.random() < 0.5: + img = np.random.poisson(img * vals).astype(np.float32) / vals + else: + img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) + img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. + noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray + img += noise_gray[:, :, np.newaxis] + img = np.clip(img, 0.0, 1.0) + return img + + +def add_JPEG_noise(img): + quality_factor = random.randint(30, 95) + img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) + result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) + img = cv2.imdecode(encimg, 1) + img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) + return img + + +def random_crop(lq, hq, sf=4, lq_patchsize=64): + h, w = lq.shape[:2] + rnd_h = random.randint(0, h - lq_patchsize) + rnd_w = random.randint(0, w - lq_patchsize) + lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] + + rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) + hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] + return lq, hq + + +def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = img.shape[:2] + img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = img.shape[:2] + + if h < lq_patchsize * sf or w < lq_patchsize * sf: + raise ValueError(f'img size ({h1}X{w1}) is too small!') + + hq = img.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + img = util.imresize_np(img, 1 / 2, True) + img = np.clip(img, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + img = add_blur(img, sf=sf) + + elif i == 1: + img = add_blur(img, sf=sf) + + elif i == 2: + a, b = img.shape[1], img.shape[0] + # downsample2 + if random.random() < 0.75: + sf1 = random.uniform(1, 2 * sf) + img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') + img = img[0::sf, 0::sf, ...] # nearest downsampling + img = np.clip(img, 0.0, 1.0) + + elif i == 3: + # downsample3 + img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + img = add_JPEG_noise(img) + + elif i == 6: + # add processed camera sensor noise + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + img = add_JPEG_noise(img) + + # random crop + img, hq = random_crop(img, hq, sf_ori, lq_patchsize) + + return img, hq + + +# todo no isp_model? +def degradation_bsrgan_variant(image, sf=4, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + image = util.uint2single(image) + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = image.shape[:2] + image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = image.shape[:2] + + hq = image.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + image = util.imresize_np(image, 1 / 2, True) + image = np.clip(image, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + image = add_blur(image, sf=sf) + + elif i == 1: + image = add_blur(image, sf=sf) + + elif i == 2: + a, b = image.shape[1], image.shape[0] + # downsample2 + if random.random() < 0.75: + sf1 = random.uniform(1, 2 * sf) + image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') + image = image[0::sf, 0::sf, ...] # nearest downsampling + image = np.clip(image, 0.0, 1.0) + + elif i == 3: + # downsample3 + image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + image = np.clip(image, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + image = add_JPEG_noise(image) + + # elif i == 6: + # # add processed camera sensor noise + # if random.random() < isp_prob and isp_model is not None: + # with torch.no_grad(): + # img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + image = add_JPEG_noise(image) + image = util.single2uint(image) + example = {"image":image} + return example + + +# TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc... +def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None): + """ + This is an extended degradation model by combining + the degradation models of BSRGAN and Real-ESRGAN + ---------- + img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) + sf: scale factor + use_shuffle: the degradation shuffle + use_sharp: sharpening the img + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + + h1, w1 = img.shape[:2] + img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = img.shape[:2] + + if h < lq_patchsize * sf or w < lq_patchsize * sf: + raise ValueError(f'img size ({h1}X{w1}) is too small!') + + if use_sharp: + img = add_sharpening(img) + hq = img.copy() + + if random.random() < shuffle_prob: + shuffle_order = random.sample(range(13), 13) + else: + shuffle_order = list(range(13)) + # local shuffle for noise, JPEG is always the last one + shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6))) + shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13))) + + poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1 + + for i in shuffle_order: + if i == 0: + img = add_blur(img, sf=sf) + elif i == 1: + img = add_resize(img, sf=sf) + elif i == 2: + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) + elif i == 3: + if random.random() < poisson_prob: + img = add_Poisson_noise(img) + elif i == 4: + if random.random() < speckle_prob: + img = add_speckle_noise(img) + elif i == 5: + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + elif i == 6: + img = add_JPEG_noise(img) + elif i == 7: + img = add_blur(img, sf=sf) + elif i == 8: + img = add_resize(img, sf=sf) + elif i == 9: + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) + elif i == 10: + if random.random() < poisson_prob: + img = add_Poisson_noise(img) + elif i == 11: + if random.random() < speckle_prob: + img = add_speckle_noise(img) + elif i == 12: + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + else: + print('check the shuffle!') + + # resize to desired size + img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])), + interpolation=random.choice([1, 2, 3])) + + # add final JPEG compression noise + img = add_JPEG_noise(img) + + # random crop + img, hq = random_crop(img, hq, sf, lq_patchsize) + + return img, hq + + +if __name__ == '__main__': + print("hey") + img = util.imread_uint('utils/test.png', 3) + print(img) + img = util.uint2single(img) + print(img) + img = img[:448, :448] + h = img.shape[0] // 4 + print("resizing to", h) + sf = 4 + deg_fn = partial(degradation_bsrgan_variant, sf=sf) + for i in range(20): + print(i) + img_lq = deg_fn(img) + print(img_lq) + img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"] + print(img_lq.shape) + print("bicubic", img_lq_bicubic.shape) + print(img_hq.shape) + lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) + util.imsave(img_concat, str(i) + '.png') + + diff --git a/ldm/modules/image_degradation/bsrgan_light.py b/ldm/modules/image_degradation/bsrgan_light.py new file mode 100644 index 00000000..9e1f8239 --- /dev/null +++ b/ldm/modules/image_degradation/bsrgan_light.py @@ -0,0 +1,650 @@ +# -*- coding: utf-8 -*- +import numpy as np +import cv2 +import torch + +from functools import partial +import random +from scipy import ndimage +import scipy +import scipy.stats as ss +from scipy.interpolate import interp2d +from scipy.linalg import orth +import albumentations + +import ldm.modules.image_degradation.utils_image as util + +""" +# -------------------------------------------- +# Super-Resolution +# -------------------------------------------- +# +# Kai Zhang (cskaizhang@gmail.com) +# https://github.com/cszn +# From 2019/03--2021/08 +# -------------------------------------------- +""" + + +def modcrop_np(img, sf): + ''' + Args: + img: numpy image, WxH or WxHxC + sf: scale factor + Return: + cropped image + ''' + w, h = img.shape[:2] + im = np.copy(img) + return im[:w - w % sf, :h - h % sf, ...] + + +""" +# -------------------------------------------- +# anisotropic Gaussian kernels +# -------------------------------------------- +""" + + +def analytic_kernel(k): + """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" + k_size = k.shape[0] + # Calculate the big kernels size + big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) + # Loop over the small kernel to fill the big one + for r in range(k_size): + for c in range(k_size): + big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k + # Crop the edges of the big kernel to ignore very small values and increase run time of SR + crop = k_size // 2 + cropped_big_k = big_k[crop:-crop, crop:-crop] + # Normalize to 1 + return cropped_big_k / cropped_big_k.sum() + + +def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): + """ generate an anisotropic Gaussian kernel + Args: + ksize : e.g., 15, kernel size + theta : [0, pi], rotation angle range + l1 : [0.1,50], scaling of eigenvalues + l2 : [0.1,l1], scaling of eigenvalues + If l1 = l2, will get an isotropic Gaussian kernel. + Returns: + k : kernel + """ + + v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) + V = np.array([[v[0], v[1]], [v[1], -v[0]]]) + D = np.array([[l1, 0], [0, l2]]) + Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) + k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) + + return k + + +def gm_blur_kernel(mean, cov, size=15): + center = size / 2.0 + 0.5 + k = np.zeros([size, size]) + for y in range(size): + for x in range(size): + cy = y - center + 1 + cx = x - center + 1 + k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) + + k = k / np.sum(k) + return k + + +def shift_pixel(x, sf, upper_left=True): + """shift pixel for super-resolution with different scale factors + Args: + x: WxHxC or WxH + sf: scale factor + upper_left: shift direction + """ + h, w = x.shape[:2] + shift = (sf - 1) * 0.5 + xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) + if upper_left: + x1 = xv + shift + y1 = yv + shift + else: + x1 = xv - shift + y1 = yv - shift + + x1 = np.clip(x1, 0, w - 1) + y1 = np.clip(y1, 0, h - 1) + + if x.ndim == 2: + x = interp2d(xv, yv, x)(x1, y1) + if x.ndim == 3: + for i in range(x.shape[-1]): + x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) + + return x + + +def blur(x, k): + ''' + x: image, NxcxHxW + k: kernel, Nx1xhxw + ''' + n, c = x.shape[:2] + p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 + x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') + k = k.repeat(1, c, 1, 1) + k = k.view(-1, 1, k.shape[2], k.shape[3]) + x = x.view(1, -1, x.shape[2], x.shape[3]) + x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) + x = x.view(n, c, x.shape[2], x.shape[3]) + + return x + + +def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): + """" + # modified version of https://github.com/assafshocher/BlindSR_dataset_generator + # Kai Zhang + # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var + # max_var = 2.5 * sf + """ + # Set random eigen-vals (lambdas) and angle (theta) for COV matrix + lambda_1 = min_var + np.random.rand() * (max_var - min_var) + lambda_2 = min_var + np.random.rand() * (max_var - min_var) + theta = np.random.rand() * np.pi # random theta + noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 + + # Set COV matrix using Lambdas and Theta + LAMBDA = np.diag([lambda_1, lambda_2]) + Q = np.array([[np.cos(theta), -np.sin(theta)], + [np.sin(theta), np.cos(theta)]]) + SIGMA = Q @ LAMBDA @ Q.T + INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] + + # Set expectation position (shifting kernel for aligned image) + MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) + MU = MU[None, None, :, None] + + # Create meshgrid for Gaussian + [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) + Z = np.stack([X, Y], 2)[:, :, :, None] + + # Calcualte Gaussian for every pixel of the kernel + ZZ = Z - MU + ZZ_t = ZZ.transpose(0, 1, 3, 2) + raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) + + # shift the kernel so it will be centered + # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) + + # Normalize the kernel and return + # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) + kernel = raw_kernel / np.sum(raw_kernel) + return kernel + + +def fspecial_gaussian(hsize, sigma): + hsize = [hsize, hsize] + siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] + std = sigma + [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) + arg = -(x * x + y * y) / (2 * std * std) + h = np.exp(arg) + h[h < scipy.finfo(float).eps * h.max()] = 0 + sumh = h.sum() + if sumh != 0: + h = h / sumh + return h + + +def fspecial_laplacian(alpha): + alpha = max([0, min([alpha, 1])]) + h1 = alpha / (alpha + 1) + h2 = (1 - alpha) / (alpha + 1) + h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] + h = np.array(h) + return h + + +def fspecial(filter_type, *args, **kwargs): + ''' + python code from: + https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py + ''' + if filter_type == 'gaussian': + return fspecial_gaussian(*args, **kwargs) + if filter_type == 'laplacian': + return fspecial_laplacian(*args, **kwargs) + + +""" +# -------------------------------------------- +# degradation models +# -------------------------------------------- +""" + + +def bicubic_degradation(x, sf=3): + ''' + Args: + x: HxWxC image, [0, 1] + sf: down-scale factor + Return: + bicubicly downsampled LR image + ''' + x = util.imresize_np(x, scale=1 / sf) + return x + + +def srmd_degradation(x, k, sf=3): + ''' blur + bicubic downsampling + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2018learning, + title={Learning a single convolutional super-resolution network for multiple degradations}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={3262--3271}, + year={2018} + } + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' + x = bicubic_degradation(x, sf=sf) + return x + + +def dpsr_degradation(x, k, sf=3): + ''' bicubic downsampling + blur + Args: + x: HxWxC image, [0, 1] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + Reference: + @inproceedings{zhang2019deep, + title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, + author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, + booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, + pages={1671--1681}, + year={2019} + } + ''' + x = bicubic_degradation(x, sf=sf) + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + return x + + +def classical_degradation(x, k, sf=3): + ''' blur + downsampling + Args: + x: HxWxC image, [0, 1]/[0, 255] + k: hxw, double + sf: down-scale factor + Return: + downsampled LR image + ''' + x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') + # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) + st = 0 + return x[st::sf, st::sf, ...] + + +def add_sharpening(img, weight=0.5, radius=50, threshold=10): + """USM sharpening. borrowed from real-ESRGAN + Input image: I; Blurry image: B. + 1. K = I + weight * (I - B) + 2. Mask = 1 if abs(I - B) > threshold, else: 0 + 3. Blur mask: + 4. Out = Mask * K + (1 - Mask) * I + Args: + img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. + weight (float): Sharp weight. Default: 1. + radius (float): Kernel size of Gaussian blur. Default: 50. + threshold (int): + """ + if radius % 2 == 0: + radius += 1 + blur = cv2.GaussianBlur(img, (radius, radius), 0) + residual = img - blur + mask = np.abs(residual) * 255 > threshold + mask = mask.astype('float32') + soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) + + K = img + weight * residual + K = np.clip(K, 0, 1) + return soft_mask * K + (1 - soft_mask) * img + + +def add_blur(img, sf=4): + wd2 = 4.0 + sf + wd = 2.0 + 0.2 * sf + + wd2 = wd2/4 + wd = wd/4 + + if random.random() < 0.5: + l1 = wd2 * random.random() + l2 = wd2 * random.random() + k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) + else: + k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random()) + img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') + + return img + + +def add_resize(img, sf=4): + rnum = np.random.rand() + if rnum > 0.8: # up + sf1 = random.uniform(1, 2) + elif rnum < 0.7: # down + sf1 = random.uniform(0.5 / sf, 1) + else: + sf1 = 1.0 + img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + return img + + +# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): +# noise_level = random.randint(noise_level1, noise_level2) +# rnum = np.random.rand() +# if rnum > 0.6: # add color Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) +# elif rnum < 0.4: # add grayscale Gaussian noise +# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) +# else: # add noise +# L = noise_level2 / 255. +# D = np.diag(np.random.rand(3)) +# U = orth(np.random.rand(3, 3)) +# conv = np.dot(np.dot(np.transpose(U), D), U) +# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) +# img = np.clip(img, 0.0, 1.0) +# return img + +def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + rnum = np.random.rand() + if rnum > 0.6: # add color Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: # add grayscale Gaussian noise + img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: # add noise + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_speckle_noise(img, noise_level1=2, noise_level2=25): + noise_level = random.randint(noise_level1, noise_level2) + img = np.clip(img, 0.0, 1.0) + rnum = random.random() + if rnum > 0.6: + img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) + elif rnum < 0.4: + img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) + else: + L = noise_level2 / 255. + D = np.diag(np.random.rand(3)) + U = orth(np.random.rand(3, 3)) + conv = np.dot(np.dot(np.transpose(U), D), U) + img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) + img = np.clip(img, 0.0, 1.0) + return img + + +def add_Poisson_noise(img): + img = np.clip((img * 255.0).round(), 0, 255) / 255. + vals = 10 ** (2 * random.random() + 2.0) # [2, 4] + if random.random() < 0.5: + img = np.random.poisson(img * vals).astype(np.float32) / vals + else: + img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) + img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. + noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray + img += noise_gray[:, :, np.newaxis] + img = np.clip(img, 0.0, 1.0) + return img + + +def add_JPEG_noise(img): + quality_factor = random.randint(80, 95) + img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) + result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) + img = cv2.imdecode(encimg, 1) + img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) + return img + + +def random_crop(lq, hq, sf=4, lq_patchsize=64): + h, w = lq.shape[:2] + rnd_h = random.randint(0, h - lq_patchsize) + rnd_w = random.randint(0, w - lq_patchsize) + lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] + + rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) + hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] + return lq, hq + + +def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = img.shape[:2] + img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = img.shape[:2] + + if h < lq_patchsize * sf or w < lq_patchsize * sf: + raise ValueError(f'img size ({h1}X{w1}) is too small!') + + hq = img.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + img = util.imresize_np(img, 1 / 2, True) + img = np.clip(img, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + img = add_blur(img, sf=sf) + + elif i == 1: + img = add_blur(img, sf=sf) + + elif i == 2: + a, b = img.shape[1], img.shape[0] + # downsample2 + if random.random() < 0.75: + sf1 = random.uniform(1, 2 * sf) + img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') + img = img[0::sf, 0::sf, ...] # nearest downsampling + img = np.clip(img, 0.0, 1.0) + + elif i == 3: + # downsample3 + img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + img = np.clip(img, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + img = add_JPEG_noise(img) + + elif i == 6: + # add processed camera sensor noise + if random.random() < isp_prob and isp_model is not None: + with torch.no_grad(): + img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + img = add_JPEG_noise(img) + + # random crop + img, hq = random_crop(img, hq, sf_ori, lq_patchsize) + + return img, hq + + +# todo no isp_model? +def degradation_bsrgan_variant(image, sf=4, isp_model=None): + """ + This is the degradation model of BSRGAN from the paper + "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" + ---------- + sf: scale factor + isp_model: camera ISP model + Returns + ------- + img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] + hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] + """ + image = util.uint2single(image) + isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 + sf_ori = sf + + h1, w1 = image.shape[:2] + image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop + h, w = image.shape[:2] + + hq = image.copy() + + if sf == 4 and random.random() < scale2_prob: # downsample1 + if np.random.rand() < 0.5: + image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + image = util.imresize_np(image, 1 / 2, True) + image = np.clip(image, 0.0, 1.0) + sf = 2 + + shuffle_order = random.sample(range(7), 7) + idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) + if idx1 > idx2: # keep downsample3 last + shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] + + for i in shuffle_order: + + if i == 0: + image = add_blur(image, sf=sf) + + # elif i == 1: + # image = add_blur(image, sf=sf) + + if i == 0: + pass + + elif i == 2: + a, b = image.shape[1], image.shape[0] + # downsample2 + if random.random() < 0.8: + sf1 = random.uniform(1, 2 * sf) + image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), + interpolation=random.choice([1, 2, 3])) + else: + k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) + k_shifted = shift_pixel(k, sf) + k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel + image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') + image = image[0::sf, 0::sf, ...] # nearest downsampling + + image = np.clip(image, 0.0, 1.0) + + elif i == 3: + # downsample3 + image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) + image = np.clip(image, 0.0, 1.0) + + elif i == 4: + # add Gaussian noise + image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2) + + elif i == 5: + # add JPEG noise + if random.random() < jpeg_prob: + image = add_JPEG_noise(image) + # + # elif i == 6: + # # add processed camera sensor noise + # if random.random() < isp_prob and isp_model is not None: + # with torch.no_grad(): + # img, hq = isp_model.forward(img.copy(), hq) + + # add final JPEG compression noise + image = add_JPEG_noise(image) + image = util.single2uint(image) + example = {"image": image} + return example + + + + +if __name__ == '__main__': + print("hey") + img = util.imread_uint('utils/test.png', 3) + img = img[:448, :448] + h = img.shape[0] // 4 + print("resizing to", h) + sf = 4 + deg_fn = partial(degradation_bsrgan_variant, sf=sf) + for i in range(20): + print(i) + img_hq = img + img_lq = deg_fn(img)["image"] + img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq) + print(img_lq) + img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"] + print(img_lq.shape) + print("bicubic", img_lq_bicubic.shape) + print(img_hq.shape) + lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), + (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), + interpolation=0) + img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) + util.imsave(img_concat, str(i) + '.png') diff --git a/ldm/modules/image_degradation/utils/test.png b/ldm/modules/image_degradation/utils/test.png new file mode 100644 index 0000000000000000000000000000000000000000..4249b43de0f22707758d13c240268a401642f6e6 GIT binary patch literal 441072 zcmWh!c|6nqAO8$7B{n3LV`kK(93v(n=FF9&gWOr7x#ec=DLIy6$XOP(=y2x<5$5{3 zs+mc-V`-Qp{Pz3DAA5K__ISMae!rgQE7jW4_~_x2hXDXMYHEV90RS#N006atxj3JE zF4jW;AOJAMT(%1vnml1{bTxP?g+DiynQo9o!I6N_%E*vbgZuO|L|mjk7P zI+d=K`&W>AKZIh#!o$NOBX`NMJA*)>jW^|y3Q#;Aq4n&kr^~q#OBBtfvCT(8H#W{9o?KF0OXT!$_mv{Kc%5DquBFg3b@sO7_q?^dupWPXl z54e1i%uFqg$z=NZ`PI>IX={rkWUC^bXM^*czmHU$U0g`pQ7yUKjc+^zLamVJ`t&iC zhXDc@z;14{=4mUN9YVU<+VqJhq?`3MyZ|P+*|}Zzzq~wlF8)L?v){TxVRY055O3&vbrg{ zA{o<(b&h;RX>9lo!|;7Uqfqe5%F4|tQh4Ef-*!PDFMfB=nY|a|vb(S<<#G>;$qqX2 zIe;GfzRJ$OsO?f{*~dj#N(O_&niw&AvlF|Go5O4z(*ri6szhcjMxh^?P*8(MDie??6!N&){dv4x%IdQ+0(SPrz81#ezRI<%+xlBmx>e#T6 zUq7hrDyIByUXJI@r^JW(+`^n|0)2ph+o1p$0O!!J-dAZDp@>Hi=#!fPK;CSaCn+CZSTJ0g!<}JmE`;e5Cp(i=ACVn zB_^PtC~nSu#5ZmKw0!9DQ-eUj&+$%Uey#fQ60p2dp@#vyGPgUkqaQj<4;mnkq!R4< z>0nSsT}EGEo)t@b(3Uh8K9?OV;3idhuuhvts2cgzpt(RGK#DQZZ((n1ihdE6u>jy# zeGPt!1cma2s@ogNa|Qa_;wYcVy~Rb&)3N_T$+2w4TKG<0y~D(KvR1Cp1}_5BlREYl z?>K>@efNTET9Ev0!oIJP54PB})&n6njk2EAfA?iq^ozsjoRPZ$-Fuq%Az8T?dr&4J zSr9Ab0gvr8|hg#PRPNJDi*8$MoBXp|R<~5E&U6`0(0U>wh5lkAQ$IP>&=ijvyI# zQ)1@f@Xt9OJwA9KpS-+0CNMPdr&O>%+(=Ikh6VmLF$Zb2b=Ud@+PW8ZYagl1g}ck3 z_yG9_Kl_|+B1~=6)ls2bXKXK5JNPjBjjA}0S7O*=Ogq(lq#!VmHANHemFTXi_};?Q z;)N4_)pH^5h{?F~`FDrw$jAVPPa|wrY|I)M%-t6D)WJGgm+o7qdAQr_Dz6!G&DYip zJMQo>XoUW=gyV*V{1)TMb6I7)Zh1;=)M}Eu`w|bjoKo;jTG9o9ME-o(6?T!?o<;L0zbKwDO9L*ayGU~X@-c8024k|S-(`b>%6F?fQo489W-9&-+-!H-tS@S~D7)(emDeqNfUd4%5MoCwY7A%P;gVN*-QiV5V%)Acg zGI4HRwacrSgw3LE7!`Sbc)ETAXia=^S2;v z{nYX35JwABdK)s8$}%?*Oa`YWrS2|dv>O5G(-`p$Kmw3?@o$B)G2CDeHHE{!(L)3< z!FTv<4G0e1-Q2&gLa1*hmSg{A9K2=kPsHv`nD#oeX&VnP#IM2iyL~A_jM#%q@TpR( z@YXlW&j`6;jM_Js*SG5%ub)x~6RcY|qwS>tCRBTS-6V#d-F z8*KTw19N4|js9uRam^hLS9k#{{q~(ATa6%<-z~fYysr7aHhES>Ru#T5G}TxQ0H}F{ zE%JaFyOok{n20yL428BqGjsc2*I5EYk<-GLdHh{@M%@gaK)`LI{Q}Pl#M_`>K0yI0 ziI58Vc&&;)^(KTtCO5zYIxqh&cM2;O;=8ZxpLRBJl*(MC7uY{~ciQM&tzur#6{6(x zqkwYA^$@p0G7+&+VlKclXQ|lUGnxev}0M9+aM5dipA{kGc>L?eyROxZFEvh0F4Bx-;UoyoB+(Z!(VuCERE9huC#1EW%2;_IfrHa}9 z1+K*l5KIbIz(iESDV3(UZ?L&+#A>*|baTEpQ=Pvl|It*pvc0WjWu*baf^+*HU;J?O zCm~YwBwwgJk33349ple^+a0Q5%gRQfM4+(QTZFJ+;?(yR3OF5L({PLn7_(G+^%sdI z$QLR`19I~pnUNIrIm*jFc;zmjGrTZW?zqy(2PSPVhUO#p+`$Jq8`ywxnRFH#^l>siWIkV0qf@ zJ_<8ghg;wO_fLE9N{!Y%^AS5U5MF%Lh)Hv1OifXLN9nknw}Qjr9%&Atp}FOp7b{dp zqime?Y-PV??rJL`<=}QW>^E}^#wIX@&1N^(dO8D>w;WG(nt*AzQ_+67pt=lcT`DWv zhU-T(Z9IfROE+0l)cook%7bXT-p<-C2pS*uIknvQv_iSG0?s8v;*Lkn1bm}|Tm=sO zDG)(5?21P_V@++!-RC@<94QobG=s1eb)GV&!YeX+tGuGq*p3~Y_ExcPHc+cb>4iD? zWjQuI5%VRjIrM;Qw-&_3Wnwm>mip(a+hm;b?62wF+Kh5Iyq$U*Tj-YNE7;BzKQx?@ z=gl+-`!G%f!}Ig=RAji~E`Mm$dtPqR+3q`MnV6o)84b*XpA2$A?7tt~Ax=IN17$DWwjh?vbm`D5{&R02=->sPXIk0W^ziEd?F0>N?xkfJvJ ztEtSKI}tIP(eF!mfF&bfo;)8;GOZ5viC(`j^Imm@d#wL5v_JReF+dzY16IWVu43E| zD<96yrDOHpVAZJ5+`EN=K0`*=N4l?CrDY->4W}wU#OR(V^H+lp7Yo_f#R0~;eA8H} zJ~dHuRAT6A_>F7+L8$8!&2^n>=WKgTYfk7D&f8((0q@=Q2 z|BMdL^9|3-q5ea|nL}gHfI@lbWjIE>qr2L}^|}wGyZe}iK=CVYzZ&)hqtgh4Dl3`+ zg3ZIJ-y@{U*g8htVJ4GQML89g3a_Rn4^RB+RD|qI_5+iXmCEKe4}S0fzjih&n{x_4 zFaVx)oBNYnlV3<0=i;J*n3s~@mnGfi#kcl7U3D$bfZ4BRnTcVpAeb=8L@ zafoGeiv=r6t0>Hs(nLx%8R&WKN4un~g8880JHd{oK}u?_vG;bRV>FANDiyV=+8{lh zCWdz-n#OT^e|{uD4!s%KjOaMa{h*r6q1AqM`IW1?EfgPV?^X02tS}S~HLVQRdS*#R zaoF=6`*SbMgDi>mI9laN0$4?{@3${yr81iFO6#?w=Um@xRCt6L(sccZmM?8*yKjCY z2DfWwzPd?gGny*%RwJWhTbUtzdSh{5YT7j6CEF3VTZ==cR*rusg)4ju&gJ4#J_66J zgurZYC&iWE5S3EdcD32@2Nhaht;b3zY-=p~nr^`&~KOwC)?=({PcHe+msfS)ZUv%!1m8g0a64$exY8oud6U=|uFbO}S~V zq#gn_ys@$};Sw7i9XVFwz2t2w3{RVKctz0wG=livL*ECA$_HxjVR(UHlm@pyHy@yW zX+W2U2SZ4K+{^tQ=aex8YBTQ_17^>a&2l6&Zr7ky{r+HNNLeWbBJf?L11ZHK1-+6khzS}Vq-VcLd$q~>8ryhb&aKGV27$KBl z?O{i{{~fY4Pt3OIMWgZQtKVy`8^Yii|4@5rFi};eqDioZFVW*d8x%O0I9NH@h~1Ii zkHo6lhT7Wm5NKBY-Qpf+pl~=!5|4(#1;w!jxt{`nX+8U8t;uF~7j-a)9DXy`Yhi&> z@knoyA1xOJ6L}B=YlBx%MZh1%Nj5|QJuEO?*=vqjm=k_{&5R%FLkSS&4YtI*_%;31 zF2so)UKlvg%r35oU{cieMcpLJ@>h0slJg#A|LW-DTZwkmK;_SGFLb0jFj}LwZG854 zpJ1GVk3&=c>s4HC+~1`6O&eicT4N+VqPDgIoacg8nlp-ra?#2=I9iwZZcEYN{K%qq zS6HiaQDGtQV`T-$VB-zQcNIjmVDK)$bFT6M0iDCa$x#Qxtw6NyrJ_2VK_};*YKtt% zIT=c<)W_BaHzyi_3ryyn#jQ@Zq z%tvh zsfK;^UoMNJ9L8YYdjx(i(bQVwv_+7{K|`P zp5Eg_GaTAwCQ6P^klUIu!ra{P zl_%p$&zd4nwVwwBDAsH!X&@!!H>F?B&deQphClOFrQP^a^erz~DWDKhWl&Q?zX#zf zyA#JJa=C5t)6K0Nj#$3Jl5ZatYOkiRo#0 z`ujDD3`aR|gyqw_?qaAhdS(JmUS5z8kTz^|3YVsmD<^M=P*c|z#|R<0T)V#^I2tIBy-*WzAAkOo=WMdgdZIt<^sH`jsNmWi(ecDV_J zCNct!)RMJVOzIknX4K-!G;2WA-!U$ni4)l56v-sqGE-rlc@#-!J6QG20ChBrZt-aR z?$E;R6E)nQ7PtYjw%g?%;iDpf>kqxWqrK>kRsEwkxo-1ibaSwZs$I;PY;gUP7vgL0 z+aF>!LuFJNE~;2oL>+XHGm3Pc*i1Py_SaqZUq?UBHVQ@Ao@$@$-WuT?VovKnuIac} z$}BIO)5N#}o;yB4Rv$OE9(J;9LQo+qHS_DIF}0;3jq?6}$@KO)-c_toCm@*aTB#DI z5>#!A$wqvR(@$&{ekUSkgy8?WGK6l?`(BKXE@;p=82Zm6G{k2pK4Hu|CLK4|?@XL{N~S{r^rQMsSkIsBja9B zdYzg4^%WO&oeEnP_3U%sKgA!6zsLyIBt7N^q45dAS+aR&Ww>5i=LK>7@qNR0B$@D1 z1)JY^c~r-E;)i|Y@=*x_1TQteud)mifp6$Ysn+ExJWIIG4g8sMWU8OkP^;n221am>)XP->-Ky6SCag zNXjk12eL9jnMod#SK8qS5~)YhkO<*;gj9F^2QK}=PRy0)YLjdT{3K@th)YRR zKg<{8%!v}n+|LkjIRZZ7~uC6X$ z;nw=Posa$4@d~o(-ZzgtI57-Ak zqz~3~qj%QVLR)uFK-tawD1da+&!WFJx{1CzqIOAFmm7w92rk{6O3-R%Fnm_Z8*z>} z9HVY|V?6Tsk8ELBBdukHLjZ6%Ay8puc|k_dNq%TQVBT*>H?PTV|95W{-;#lS1HK$n zg2rt8=av`+Ip(XQwtp6YxqaC5PF_e>S%ttM@8g74zFyWN;B9(?^5%Yfu~()X4TBM- zo$+5CHEN3Uy(zTXjA0wgcH#ARq)}ApvPwL51b$4>cZX zI9i!4qP%E-C6q5OBy(Pr?66GNF17^s@Yl=Q_-|ltUzmaEAi@A_`Td23(Ttc$b5IsO zf;lJbQA&zCtND0IXPn|;D-6e&5!K(HdhC8`H66FE^7`7nNH?*^pPvl(>Rq!|=bA6L zo%i4FSj5O(1p)>Wg#2Ekaa>G;?*~&inynGbs)}K=n1KU8ZzrWj$HC0dhKtAlx;md4 zyO|@0R+k&cPHI&}H!~(2nH_WtkKt(cED(JYpPJnn1q76chQ53L3u|)5++>t)ed&8= z*cmRHD@d6VNZiFEj`$Qf`bGBb+*jK}Dn^W2I>%I5K#ZoRBUV4?c{x(zgr(b|ZP{VH zvm9Tgz_NLR@<=N<4LT?&E4i*vPcqPuv`h@>z;i#$J*A03g~EPfuu^ys8d}1Q#(yW| z2#fJZYk`q!PZPn4oxz#1<=#ewms{i=HlbKaYP2VgWPT1O5zK$i8r;@V%1UvtZcs3uNSMKL;CSd;p zeAsGaH1dE|bRdye(7fvLwU*Lc*EhQzrIUYmLD{cvd490F%+rTK{SF2MugTX_@xQtSwR~v~ust7Tm75Z1Rq^ zYeor$Gf+;_O>eo_9_mC8ukeEc)~$D2j!J@uB8Boavbj|rCYE0q&``f(T3)d}T-VtB zV|iMCVUAL>(o&-Xhyxavw&I7ZRBS}~F}Jyb7A{O`zd*d8vJ%ZH>X<<}Q!~>ugWFLz zGyiO?Ebr24R@Jj0woFL@!E%|eQaoZjq8g#&7t*pUS>bu7;Y(#z>>A%DH`u{_@VWFK z9U=9LU@w{VB1kbOM~h!L3C4wbVrYlKT0Kiz9qCT%q0o^SKh#f zU$`$_gwoT-+uK{H17|RK<%`Vyd0j5o>}&r1dI+H?RXP4Q`z{LdiTiQ@T=_Wvprmw2Z45H6&4q24rIUt8RRa;Io;Cm=|e^f~8Lk?hc2D^Gv;D<^)IosB< zEQ9Z_SZ;qnnd{K=j-NvuJX^V(+_n+4xESBIyfY0ipn42gPIlYWxmKyXtcV***E58Hq%{_<*Ce_{!ZG z^~;pZyUDD{5CpDrsOVr$-`zrEAE3AyH7vx4zV5h8ImeRdAK=8Evw`6ejj%tBzOg$a zMGihWWY%mTClo!!btqYEXRG=(j?%p#X0NPS*f$b{Od>hFsuk2hiO z9v$Y0O%CwWtjK0 zHVAfx!4bkmIx!BGEb(KRnLH=_Ch|!o5U$VFU=u-zuCg#M4Uzh(xkmoQFQV1_0CoYzVSvNA75yQn@oA8SD__2 zLt1C^O&u*H4QhC1Ui8qtG^jxaA)DAeR9D9#_veXS;wo=R7aN*7w8;l^u{#D#NvNP~ z!DYLvAN+!T#M+Cs_Pc}e#c$>S@#tfcxQj9((%fQ~zs&Z><&sW7fleyua>|!8Je@JU zXF6(C%%2#I#8HmYPhIeY0a=LZR})=0$2^zYy0fYzp#-x6i2(ZI%JN3v{IQZ-1LSbx zi1yp(Dz4{kO|R7@>*b6Pla_1q8cC{LDTM;oH3{*D@+|~h!C%B1&CK=u2<6V> zF2?tg!XG4YNa$1NCt=k4%AlFqkDU_VLLe}N4434Eh-D8AYxp1<`f#=Xvd4^)J}X?O z$SR~NvZ?L@_$uApSo`7Hs#Ku_5R5qu|5kVIfg=Yf8rOBY!~>{@K5{|MYrLsx-0f&^ zXYcOpbGX^{F(GN4OOrWTU9k27+tCYQ0%yo0NdJcMp4H8rot@3i@yLVq#gP;tX)~mi zl@(C^h8;Fwp^gbyjnR5G!*X~!qIQl@6}!(Wirw3o7WCZ=&z|_W!baSTJd;|f1 zk^QoBO{-?y^JaOt+Z-pzq{KD!v$T!w%oPN^yzujk_A|?QR?n@2zw^3xh#b48>-fFp z&CN}*2N?xHZAaXQO$;V56d4;EYt>Nv7@U7|z|h{9Iq}Nb&((KfDB@Ik5E6OXUFU_i zT^;V3f9*Z&1D*zxfr>h*>3l&7Wwkk}T<^xH9o`V};+DLzR#boDFR2Lh&i!ghk>vl+ zA_<*N)hD^+1f^6#7(&B9ombQT(a#tcCXraNsUj*0`VdFHu21Ne^f&`ceyNyDEF++!@}JHKEkK%*<+f>{lOqyn zJc*p`e*XW*zZkspch+a9>*~OKxTz`ND&RDs?jHg#lvjzYtl5~NKZ1}sy^a%;lK)%| ztYUHZO;UbbC28NQndbG+<>FsE)3YWi<0==jYvjadH~mBH@N2bwRbHOO>2$$LSv4g= zJkJ+_u1@sZCYE@#<6dp66VuO8(jutNoS&6QjcRhJdi?FgivHg;=iqz1w;!}cwNm`5 z?3$ZY zF}e?pNej{G*BdgXEvK6Z^15yn{{gkNExIgd1^c^YLBz%#B9~1*Qv1{_cBQ!3*+E8~ z1w>NUND^VU#n`+{99MWJlvewQ;NVjk(R>Yym@8nl-~ekg_qmgq0H9zhO=@_A9h|4unbOF}n5RW(?k1s6#P$&)A9&}ft?Z~8bvFz_@wR0>r5fSBb#k*n<2?~=Y2vE6z33do$N!y~btY!|Vd>V9F-z@-z z@oKKnw?v$6Wlxm?vyorELe!=ws@t9kR= zyUf;5_7EE`6}sqhART+y=LUGN#jWUSFt?@}YvF-ZEntgMKdL1NQT%H-nfi4ULZ9qO zzmaUM8a@Xfxd{6~Dx^U!Id>*+YQ`HRJOG@IO|Hc;lWds4OX(Y2 zu)MtVG`;EKB@Z5@-&DmCQNk`)I^iS+k^V*ibk*Y1v)qixstqkISR)KPS1?JLSOua5 zf+nV9OF;w)>y(OFgF6wffIBE!%Q=094}hClEl8qsJtH%_g+X(|LsK(xD8GZ zOpMl}sGGux71`NAFE{#mg}EBg0q#xK6b12*F+)ZLX;pqz zKwGDq&!e=W>>xTjy2?Z}V&{x7^2Pl8eD*?Ai@9wgujH*O1yIl;_{zE@rG^vVFFffI zUwbW&%<1za<>*8(B_#&u$$`j?3(&h_-Qp4c`VARE;jIEb!_QaPYckEbJkm|(vE7EL1mpFU(()@41 zMWq_W<(6{<=!q=4Opg8+BpLA=#c3+~weIhP=RE`u zdKQ)=XA$k-eG6Ly%teq%Nf0q} zY2gCqzs10a2rZ>~Qj*Wbze<>|=8>m%os)=e8hoc*kv`Wk*HQAwaD@gv8=<1-&Tk-At7 zxzv7AFv|Iyx8uSD=-+*gVmNOb64!R{P86>YR6tb98O951r~l5Bl@3{cxv-ijDsvoSP%T)a z{Infv<@O)F@n%Ya%zKt+jN3K;6@Q*P_#~n0nIuip4{Q6=&!Zw42Y+*D%RV6xp8BdP z;LnGG)`P9ZzfmzU;ikwsElw-MnbGpJfM|_u7?b+i*z_G#2p( zzktob@edHGGG%AqiM#3JQX{YgM3nP>8rBtXxt z?@*nqieEyp+Pnb>e8iN^?#5Ny{o_SVF!mTIwEd zVNG%<%O;m|ad{juP6c^3a!965e_vEn zbCVs6jiRCL%47pLR-JA#IYjx{%)}52L}gptcqGhN;odbn$KqLe|_5Y)~JmT z3Z?c!ul69z9lN};nob@u9P6&`n~f*1mlX<*s?RH$js{oJMn+!z`bcLQbaV2!`g9#4 z!fgQgY>+&%%?ba9BDt#-PrLV`AVI7ZoOdPIGxW&dBPC=u<1aD8QTZ~r^~7lUpD_lwElgI3#V7i^hoR5u6SPRfiLqH zehPbPug-hO*6L>9dGC&;`{5Bg`zg$Fxl`hh+tf}-y|2^qf_F!wMkru>%C{day=HDM zWs1%4V1r!+V(%L_)!ihWm`*Inb|Vd);<=vpNjTjki!l;>Qj z!YTfj6tDd}HH_J68;9wA5fA%!s}l4BJb{w(Z4Rhs*qObmd&@Y z|Cy!6YTYh6pp7d$hDtT6Y7}$N@w|5fWCKGbB%&k=ee~deG(QSJ`m=IBQMGxGU;6K| zgk*o)((WXy#4fJN&v5TfB7JgetE0Hw$_)P*x8PGl!cj7}t6% zh$9MCI$Fv&UiDA8|LJfzN-0@RShj0MgV9JZvc=!zCe% z#0a~=6&lPvg*D{hwjSku+wTI7iVK39j()vn$*GBz-wj0h`_xpVd)^EjVAE=RclI}4 zop`ylcb_(~yZAR)>)eQ%$otdWDdTw{F+JG%7rzQ-%z$a}J@Lhz>V!lIO-=V>+{L!6 zlIfBFy{}7+b@z2#_Wx+a{@d?naz;q<#~51eR!G`Z#L=^+q`8s6{dGF|?oG&Dh1p;S zPFbGe?6TbQ`PRnla!%buonn;Ev!t6LxoD{#y-R9=~+SA3Qc{QQa*G-77iYYU^X+}T!-GA`%ItURE`+*4{T-PPqimDr45Cnr)|iO!aNaiB#`lQp z>T{aU)5Hl2S_?08U-Bd?>nvBEtsUwC##!KIFVHQ!Gte^( zK|aWl_TH8KHep~SeL}#SSE~FT4E*aF1!P6EB_<&gfSu%2SMlEeBATmwdbZzD8>r9K zc3k5NZcv(Aofyuo&QlPy(dSyMPqd&A>jop7i|O@Wwcd^|M_ z(165SSlgm_^du{v>z!$z&V~73=Wd(ICkWWem^Kisdn-2fTAcfh)3yXn2ztDNx4|ZE zQ)fo(=DrPQ;YkPy?_Z|B5XW7=F4eMYSIz=l;KvXy_eA5%Jv|^W(o~Q-)KBt6KYJRU zM{ZDLsVXHF1l=q*EiY*DW}Jl1s?OfZMbGjOpnA^BIu=1l&kwb@5KiWUyX15psGq3R zstpOk+i(gbR#wM}or)NVHPuy1s@v-0?8#<61L4;K0Z-NX)%we7?zg%)R(bbQi7d52 zPJXdsLXDprNF32_ZEa;wR4FMb4Js)CQt&N3njNPUwz9D?X4ju>yT3Xj)VYrAv6~y` z@LM$5=I`z`!x$L@ z7`t~R5v`nJ{Zz+PJ#!c8cqpvl)|}^k-C!tRcCUF_v;d&=BD)|fj5fXzQ&ofhI9uSd z^uFx=D?PFM{|%3>C_7;-0qbT{cXc0{bxp-DPb5pNVYkH(D`hw;3E|bYp*!5c$~@m% z&Dj1O<}+L<1wG0U<)RR~(KJ^u8nIEX!z=ti^>4?bBC$TvJxR7uZw1dtg}~%`woO_# zQ?~YlwUUe$Bbt+i|D)Ppy0jmV@%BHD=Tq#H5%4WKBWrw_zAFlPUXB#YX#p|i?l{Lu< zA#!*MYR+c!_uq1))NtDr+8~KUfBC~HzUy<#N*rX2Xwr9IS^P%rRrwO+`5@ zMN*a|*WzuSh?JIZN#WW1Kcs ztD|6(JM&30<=dL=sc4jWhRTlkYcm5VSeU?L^&0y$aDP9gNNI3zd9T)&z3cGllY|V{ zuRjZiP8cE{e#!o;t(4Qp8X2)gzQ{Hgjk)4xiGj`OM6|ZJWGxC5j)=ZKrjlbLv2ed> zipj1J#qI6wHP?vAyN5EPO$JUwF}I(pq~%(YZDan}cYlLoP3K(O|NKyRq$|{tNFv`o z95YKReOzJAuoGUjOmtH`GEgz@VD_La$oVNpkuqBk_BnjDs>*L-*%22~SWcdwZ{68* zc{X_3U#MZag*l?Ox6f|nWRVqYvutPQLg=tLgTa_QXCF`aC-~-o)fMFD$X6Ca4JjE zWzVUKtD0SeHfM@4iy| zaZ}SkVNdCUPTZI#-p=h4$JK{O|Bf9^*%;92TkQ zmH8U1)hpczHoA%)B0=M*7EeBbQ^nc$Ff7Ub z=_k|~0fhNo+QcBo)LY(Yxh}T-N_YPUbAN@gx0Vrm<0;zA$2_jYDs?R48BrXj! zmB|MI8?Tp?TqYfXYmyo-UX;%?oC_CR^Jj9ao_VEg^`gLv+&5Ceev4B!n*ZfF*O9eJ z$%y>7>g8d;#s6!S=XSC274B)~c{q|BZrNE)Uvg#&KDAB9>7_(>s9U3SYgOxiLKSW= zVc-R4u(#U%4u37M8BijRcsfo@u&X#*P~{#smJ>)JLvZuVV%WCJy(@tSVn_U{9w0@~8blJ*eIC6}lPb9h-4y?Zr_@wrlZBKx zWajF%oZ0N4ikg_cotS24dUG}>&Xk{SWZNk753>HP{p`-Hd!B7WoN`pWBvUG?sy#L_ zF%jZqAYh6SykXW*#SWp7k>u=N?cuCMpK{Hvg)-TCNo2aAO<)4<;Y$XFP`T63eFT6u zrC_iQj?Csd2k2XB&~2~MOSR`PLd%61GX+nDj5ocGK2@AaQsvT-pBWSp%Oq%8aLNXz zV>9y^(Q>=a#u#xDw`Pey5&Qy2srvt!=U)sGb_-_IQZ{zhc5^s^=*Wm_^3-O?E8I(q zAWK`LndTKwl1|i4J^i{~ky&_z4)pO7%m{?!m=g|>Om2zyw+)tc;N!yo^0^iMC}&um zhC8&iKlNFyJou|@ka;%a+t?$5^jmqNu<+lv-5{GnP0Pz|#MABy=7*d!$C6|0nV@o@`HxGH<6{~nk- z-$`N|K6t>ZGb$Ue`@_|C`FYIw2nC1wcc6OJncAuSzsnnqtGw$?oZtF->~3A`Mhc_< zN>;E04o}5om8St>_B~lA=EKdtxz}Xz$L3~d zwe_Tdl23HyUC>jV^_PQ`7&|DPxiLh6w#TKc1E~bj(G+R)Exl=H;nS)9YH68$)^D5c zw^wUPJQsCGv|?V8YNx(vsn);$t_LK1S#Mu6QN1E!TT(#y0$hB2d?qJQz8!(|l=}L} z9t*elqWPN7GuXsS2JrwN{F>-yH20H=tXe~yI^a3yA+ETp1RzV z=H=c0I;qFW!ak+a^sf!ag)u!0=T`Mch@2Asq4(lOhAVt_cKfHDWwh5Td%Dd`P7aI3 z+73i31-Y3eetQOS^Or>ma(r{X|Q>1-(Y;1iOMsEtoNGB#obi`aRQbvybt}{)vrPE)vV)Hm zKe+-Dz;kYj$sv#)xAM#Hra|q#?e1QLRX8wldF31fK!s|~(#B=kgIbs=gGe#I{}<3H zE5J1$&N637X4-S(=o>?3Nc5oX-I|q&<^LjsQm#4nJZ`G=E)gv!V8Lg{xDp+N`J3&RmR8vzD;@<( z$1VAxA!#K-^LUe9^y~U8GaZXTs_;djNIz&J^yzuAfIolsGgKm$>vp5p?>BKeuK5)$ z95EUbfo=D@D~q*E98r6inKxA%LaQ4#`U0PsX>3A(5^=bi3+g{_JUit7dVu@5rQDOw zhE;a8jF!H1S(Ch;yTf@75y~cO7h%D$V1_zWG7QHTS7Hb$>&*fTtxpt-1$btgG02n=evMl6&G(Q2ZiT z4fIfPTb6yH@i*kPQT4AM4&46LVnKYoX`&0o7j-6iuz??jMGF&Tul5N*x|GX)x1GFv z!x=iXqkO4Y+bqoup)B{6C-s@I9@pUX)KWbqdYThDA8>Y$H>>uyQbuMKQ~JjVU=T?k zS2}E!7=OM}N2Kv+(w|HL`-@LUID1B%r1i_4&~?Or5yp5O-sI>)(cDyzs$*OPbpBaA zu9Pn`fn{!@ZYp!)z4`#~x8tsubSb($K!eBsoQ#XHaNgWqQ&kz_i3Mx>Q^OTL$3VvN zCMnx9`G3X=2z2C3HAE;M`OVLv8A zL25qjnM*Qr3vK`Em7HjawM5F@xA&wvN2Oged)PTonQ~}-e6Mb0Glpq;TY;QC;7ipc z^(?$S-`+p=sr-K&opn@`|NF*AH*A0i(j$j}G>j5qgtU~TG)gx}hs5X*$$@~*Y&z8P}}^mBM(6!^$FMq-Ti^YIk9?i+vD)I zrB|05(mG^NHw>=E=MO>z4aF&4hf1o>e2NZqvFo;9`&0V{>Tp46C7e)e42f@0aFSX< zDRsIU)J7YWsz(Yb{LNbul|lhAp>DvB`r!Tj@-WLXR4bi}3y)a$0Vwbo&{J0~<+$7c znYQ1LiOWbYJZUU=_AJL+8&Ft*Us8+=8aSlQ26e5S`$&IC&uPd3T*C_sHDk0-7J~q} zDYs1TYoojMzj$@HmcBDOMOe!|ce`lQuWbkR1j`Bi#Z-u@9LGZ8EkRWwYyOD9&``Lg zVCdVN!ue7q4Ook&ClmywIW_PSWEU1{;t(n(7={;LE&;FD)j|4CDXvQfzH3dZkI3H1 zL}meo?mK^suXmLzRqsfTfp13*+DK@aYs{VDl=u~+>eeg0MijNOc6wzbyXj9v|EHvz zyCce{_qXqJFs3G)J7OP8QQrF>vM0;7?hXNiE%Aiq*WNJ)E9>|B4zWuA%%ZXflCyVT zne-pjViA{z_`m})PR@w}bhhwI%vmIL21y*IY6ZeV&nQ9KQPue9HRt&KGeZIv}6$$&)}4FW#S&GISW+ z=a-~Fzk!BGGA%99h9hueR6yPdR|&m8eRO?JJX{%>%yjT@gk&>mS#cDN!_&@%Pw{UM zWpGG~<6GynVY%Wy1(MBI~2g*9N zve2uDAX9hM%BfQxEZ`@rt10X07K9?fQk6d()fE_!;>L4DN<(!Oe}znF)+Mc(Ssvpf zvYDWwGao?DIG#i&=Wc=p1?A(n*{S2`B<0C5C+gjhmB_c``D%U322{_Td^m-ovXNAL zXK5IpH<>Fv`9=TjJ8gHgyh|1}*Ve)A(cXRxWcBMp`_ENf&sl?|s68TkiPzbhMZI3^Jn?kl)@} zswidvZ+!;P>S|4;k(sEB#1owvAUoLlyXk@IuI}ZJAfD&9QYa9AJn9~9nn?l#kgcEH&zVjh?|`H9p27&*b&K*4=76h!ywvucOM8 zwU60!$rd66f?~ruFmR9x;7mt1e(euQTsrjYS`o+nfs^g{iVoymdlLvG0|{O-_YudH zpG&mn!o8)R9BkVc=mAl(keV3-M7r7QpJk)(pYb-`8PmdD%2(W%fE(`EE-?_sGR_=W z0i-xzhzJm9{#m^kThny&>M@ONycQihO%f@AG>a}ZE_*B`*Hmw6dOYz{!g^gZjl=>K zBsl23az@V3^tyF=hKAqebS#c0mVd0nUyLX23;v6lRaJDG+&Vt9Is(wPT7F$NHLa?W zTTjzhI9e?zslvFv$szxK!5?!2o&5`^0fn0tMkwGP(Ot-Qv)S*xa8G{y7eW?E9NM2F zBZS8x%cMykPJiMV9&>tW_L4<}f=EgH1Mg22RX2JmsTLa5SC6TQH;|FmM@YXD$Dbf8 zw zJRwnGb|xkApODgIP*jl#j)(INB_(1Ezn}IX8t;qs4duez%^SJ?%u^&=o)YIqtbH$N z3`PH*(~4ETcX7fxqjC6{%R>#CB@!mJfZg+g%hhF^B=+HvVHOjA)A4g#m0P4C=P=^V zzC8L+*<0pMRp-0&CtaG}_i^^G=$^+>jI=7aaKBrWe%L1N$Fj{erI181RU)u*En!3uvZx_=`517fkA8Wu(i1UXUw5#Kc+d*{xx4vzMZB zDh~ZpTZZBy@<6s@#cw@gti5{wE;J=c`cxXHa9~VqQ0n6(Y>R%vYXU&_EM0^Qp?Lfc z&@?tuV=SuKj^A$X?)=)G?EKH|281?jazbc%Z+kwivQI01-`uo? zELAHiz%fREE;+P|6=^ZSUkxa>Cwsb(c63Yg7}xVk48RLY2mDkezgA20)|_0^78Ek#gr0MQ4z*%2 zs~{n+XA0gLoZaETT+F^vGeEge(2t*7?(Y&)h@en&)yr6u+r~ z0^2hA68%&{tgj!b)p2pYEk2=a-t5ZW15ewUkiX%b6Y5sx#`YOMC=e=+4Wc8q+2UbS zKrlqd#gk9>P(FQe;<8fv8|!u5H~IALzKk^!MfJTfEixh{T>SJ@XBP+yYMX}>73{I7 zKAic~*~(gBS@#8S8{tm~w&NY3sXZrP0~wBQ!YL~NI|bF~pdBKaxEnUUJ~g=OHmGE= z65Bxit|-s!C5Qk`_xp+-pJaU5yLWz{{<6B?U}C2?5hDWE;#mX{3$<0zul z!Sj`W*+|$kZ`s&rlIF|oKr5!^AH+vy_H}c4Fx*^sDJG>-4AES?@x(8?WsO_J0h8FCUGo1<` zK4&-dGfe4n{HQ;Dulx6K~dhb$zHJ(Ed zjErQe3-d#}`N##|yW1t;mdANo({+E5^6zg7`*iXHAwT@Jf@0qJE77(KNiFpGYn9 z%Kc+giry>VVCj^OZ?m` zK7BcGrf8dvK~YtLo9!1sOV|#u{+VH)%dLO2m1Sx2cdL)8^pV}~ru)R~(uyzhX8Smb z#0hB{{ZDDAA!PraTq^w}A9|*(?Xj4?UPnO>3-$`fccW#0;*he#E#?lP+)sv#pMZvc z4xFC){#7gd(|1fvxE@|t2>}VshQC$Y$5Ft6Yo4797n8k|%N>xOu`N}^6}#oGQn*}v zc)K!`^)c-BNbCW5)r`k$qRWl6iGhA{g|{c}>qO&wL+T<#WPBoxto<=8-c5K{TttKl zD&C)?G!2^WLfalYjSxf#|J+E^D=0yw5p9j>na4i@)iY|&WH81tWfWen#2ASw zNq9)ji^JL2g>a~|`Tl?yx?^l`W^jdyP3RNg5_$b^iPi}>1Y=#@n}RH=<|F32gPF9R zEe8#q<8miY@xog6 z|F*A4xQXSwiOF0RDW*i5b$bq*ARONDh%73bfRM?TEJ;C2LR>?n4*NWuyLtfG&z}EJI@Vm z8NO7OW&oi=sTimT^e~9APaU>i-Zue&O|o9U{JXW#b-VQ>Y_;)lZ|~2UkI^|WImVhE z2g_%P4A_x?Nunw+ejTg5F5uWb$vyR70?Kp#*rmft=?^JSo^u+|_X~>(C;ZaWE~8T#JocVWSIm)Z zc@D`$W~65Qg9ZyP7x*qm+~X*oU{*C zHYYg1s`Of2p#iV8XJYMhxL>xf9e>JAh&*fpU_Pt46Eg;X4&u=lu2sJ7N7YXJQ6SjR zN`^8bwi3o}t@4ONx>%`{jyPQgN;q8ZVEbn38&38l_M7i5;J#g=dse9DbxI`OiA63L~qG9!vp zdVSU}BUGP#_GHEUM9zv*+}R=9SYIgFvDb>K{?awGp+zcHBoC({iPZ2Rs7IIs`b89p zIO#_Z<1ocknxh@1ZU!X1O`$P6t18rhhfP(fSoQ-T|KFbMaS5}P=g|~KUrs;|N61kq zxmk(`nXo)XVv^muATeV_MyE8E2e#^(4&n5pB?Ifh(ymLd%%V!$^4Q{~%RTLQyh0|Wt|Lvxn)I4w`@ZhBOS7P!k!AoUU zP3CM7r9bPtc}S6tgWx{ia7x+BMJgQL`|QKtB~{QWEIV5s*VrchaQb@+8BW9Jfx*ju z5#n>wH#jJ>`P1~wh;iiYg~gS!qm)?~F>YESBdkpv`JSQ5}@iRVlz z<-&uza&KylK>BdZY*QrZ*$EYzz3V$V1A?esU_FfzV!*PxWKXAMX zkiuDs;p_5)5qRUH6&Z>M*Rxi4SJvn1>h;&sx$LC8UxWic6K{)XkwNEv%wy)!%BdiB zQVs2v4C>c!XnnUA6Zlp7`?sxZ5#WsEB9LbLnCO$TRWs-D6;9>G?*l!@mJ9T&V5@?% zfZTLWhd9lDLi6OzZq|G7dBzL*3)e|53&AWDknA#9I0uBLy^cInn0+n}ck@uV#70COC>k@;c%GnE3byXf3J}X;M#_+9+ zJy22WCkD*!(zE|1P2aq!3}K=vilp+O_%c_R;x+}D>Rx%y%tihdlCYrw?*lx-aV3|Y zLVl+V-y(1*6+^p2(hM2i&)BNnG&WCzx|2sQ6yBu}vxrH`+;VsHNb*$z`Go^qm8BoWZzxc9=;FVscykpm!q2ZDo%K6WoQhKN-9 z+B_=7qD>wGL`*aI2w}4(0glS#5+bougxYyP6rb}?s20@7XL76dC|HX-V;bdwE79@g zRQxRO?D7EJfWbUHAml8BGndR}oZdnLZ!d0F-a+vZ-p++g7nRGDTJ+Q?sm zaj7*o$8l{QKxzcNJjY&%d|=Y_ON`SO_)ia5K1bjQGQPA@exN;I(tr`g`#zGNX3@CX$`u? zB&SqZIy(!cuMW@3n0Zx|Q<@D9N;Xgu}6JTIL)sGxk&WhT39bH>kJ^!dBn zHp}2f1%Cub=tdz)HaT(0AlDv~$gG)Pt7ek;oZ5K1MoatBZg>@A2pAxqt$bM^9PXoq zOWAU&=sJwG=&H0Fxi8#>EM3C3;9T6)6GyU|ao*7Gy7xj*vnUPRT$w-v3i02>UKs)F z#4?_uAjOd}wQ>qjDr&EgYX$eAzErp>6#p_d5dxjL@N~2(<;IUe`j8JVCJDXmyb@_M8-wqCMkfZAs!yyn&nRG<=fj*vzQjm8EPMcZUjzE z^qv$Dqc3*Ceu=uE3MJv}8+T2l9Cj-2yX?pbd^4x$Dr+iAq{t8OP8mgT*v=jbKgTx& zpE9Lz+2I!!k;aX<6aWqo07shT8Ae{qO0Y7o}qvI%ouX*|rW|Ahi~uK@2IO~mr=&ch|( zrx86`FGQnYPsgba*9p*L-soJO2OL!(kOSJ^*qU#v9hJ(aVY8w4Rpbf6!0V`ENap%> z3wRmgT|ThNgi1(06}fPqvrAhSYv`%)g&Y=3~)YHa^M0OztQ## zJw-hPGJ*#29Z`JP8G3cQ71$B4Ca4_Sc~oOdj=$LGY68$`ArU#tAxjrGtw~B>drC6? zx!%)DJ3TdUpzPDg3B5lp)5&_x**+JtVkAo&^FmvZE|i!C4S{POIcIJN}@68g1y`oQDM;IwiOEe@fV$MZk8 z|Fih6Y3mAkNc!+dN-kZRJ+Jtc=sN2&@>%)s_M?WHQ5Kr>)L%(Wpn4( ztENrUD-pi^6NSQrO%6wxMj%GnX`bEijvbu(ES%=32;a}25tQ5^qT$J+My+TB@@56+ zSn#jWUhw}Sl?DJak{l*wt149;hqh~j^z4H_SG8i*nZPePIuDiNUc}`DrHGI7K>@QQ zLiXBf+qZ)wlCLtrwPU_OUt2R=Z7fYyv7ZwB0oJL}9kX%aidKetC?tSXZ`tk>rYUV# zEdK`*ry8TR#%7Ij`GAql$IfGh&l=i-K3jl5Pc#vy9og`mTjL>LvT0Ii!NhCOUx2J6 z#%w?bQMqa#@XCd|NVC80)&urvjRGx7&WE9vae6tNye9z#VC!4}bsL>t(HIhz^J=@| zOUyWMt6p_mKmo`DAxTlr%Ah&nZn=JuqTrlSgeI=y1Isla%1#A8I1qiB>6+_AI1Z=N zAzX6^x2nYHuGdX|4)x_eLW_5)&5ClIpPlGZz8NvCf$`0!+x#2jFEK?Nv{ue& z`Z1&QtuMb&zPqii?6MHy=OR4M;W!G~Bw&t*H5p#=A4yIDpxly#exADUr7N)9ux!F) z{5kE5HFjh10r>471+%c{em9f7P=h@_qUIlJwIz+ zoX}AKx8c>c#x5*s^5$oXL0REhr?ux=V@WZ_7gv-aphBVitUnvTSkPY{n@J5?8P4zSNWKX5 z?FTTjze*Pvg&w~aszsSg#Rmr?`pbVy&;Hc(^OqD;LfDAC#G}}VXHy}~vU7;_z4Udq zYz#d#N+Qa;rZ4^M;MON#x0tx7BC1a$;!B=6&7WoP^^aGPzT^M<>yoT7YgjS7I?A=7 z(1H?8N6AjZvXl2McuY$<(Y*idrBuaGx+wHnXD8@Ol6lv&cJ{iz#924%C55in#Y;6m z3%8Xs5`(T0))|+Q)P-$jBR8F1aCY@|(Zf0qV-x9Ox^Wl)b!mV=9NhY0JyEDp^}O0C ztL*i2>cp7b^HSA2@~Lm(&EcizE4%`uux~eQ0eE`cM2f8IY;MbKO%~I3_`stYvna>?SvUDA%--)p^$!iSU~;G2n}|e* z_D{sLYIh7|^%3{{-;iG~IyyQ^GJvan&VaN72+5}E(bd@{(~ZS?^UkgaG&3|bTPG*R z*eVm#Lo{cYQXOE*>1^q01+T>5;t2qc2>p9HgwjW% zP1f%YUEhoXer|HmX{ZJO^)yL0uL06iZ53KGU-;w7;<6ETxd7z(Q%lvm7Bh2s5mI^y z-jA!fGC~7-kJZV?h~^ zmIyLn-j;nJ=Fj=aLZb+~C89M0K#?1P4Dl99U2yE5W&Qns&od>S(?l7ZuZ)dl8Ed1q zMxTg2uBvZsYmMH+VX$+c7c{{KM}&PP=p|qiV#DR&pAq1o9n(Db(f?p_<@!2qTv9aX zq2ZR|_$?|*ZDfoF!g9p2v0YOsf6cFLV1umo{)IG&q>`6ntHgYnHxR?83KxzUuU$Fz zV<$kgn+x`mD_|saciTE=zd6xln#ONfS!hlN3EAbNBB={Gd{%R^uCOy2f-UoYTPcjH z93`JYSh0W|8+B5vzgMNKdYWU0!JSdNkf~RX+P*}U%sF&a!PqEXG;s&8Q}N#--!JTQzeZ+)~#wTxnprZ`G3SFAG0KJ5zhlk4$?@1+@D-=k<~(V`gdhS(p?8!YzMoSoHXgZDq~y^}|IS|! zr!bX>4J7=A+!g&>795weZ5dl(U;4^Y?yhv=KMs0+g(F42yY0T=Og86_4WO}oW`Jl@&O%J;*cQ>h7wq^$kr+|VyUf|YjK^~Pne^SF(+r$u(M#BL`z zvEsjg^wpcTHW_DBmgHK~?>%}v1*B)!nkA2rLS4~#kfk$PJQmzqt?I$gwKM&Ah#s(F z_qa>m)vmb5;6P%m@xI2e0aHem*NM;DkdS~tlsC`@5Eu}GNhll7$?={*TBXHUEMWA~ zgm&7EB~3oVte&0;bIYir{AC-Ess7;xEzhgwjdoh3b|4nfgve=CF#XVr2a%Vs(imgs z@fL84XZx(4=DO1eY(@;Dr$h`Z9YoLDgjJ<$R0zbd6|c73jjtXEY{LP9a!+nU^}Y=` z$k?f2;B!EHT+ZU)Y>9T%3!#|WuN@5mMNP6(# z1|SE$AfMJeaaMju>cQ2_$15oj);s#PTFY+ThD^N=IIH=W+uGm`#HJ0~38h2@$pUbAec z$7WiYKS2A}qzlhn9J^|a;`Rw`z8eaxG`W7Di~6d<3u;(1KAT*VWt+ZM7GD!lok)Dq z*}~quE|FKX|NfKxZ$(gDT6~5X2f;(RdV}iKXu)VBWsP}iHmUw_B>pZFJE%%ZA$I!} z1t>lWe?4<9OWHIBa;#tyR~V=6Qx_wx{`f-mnK%{IgS1lOiP*vP7SaWW&Pixe&j77W z?MeKS^#a^dc)5Ko8T&S8(zakwHlen>(8_*c%JAEsZ}9lxhF=q7G0o>}X=o|~Qi16a znJwIP9=G16#q03NynTtVm_k=*J&U~+!*rm4<>0zWOG1K6_ch}?Qh^WO1Y1hjeu{K| zf4b01P&i>i%L27oIL{kbdFkyzqhIy=Dwt(xI;d;KMN!?Ho+OH3I1!cW-9P5*hNLxL z*j{If=ggcBAAy&4kMpXtkP=zBnVRMSB_*2K7fV3~y4Hx={vP-w{NW4X;c==yU3Com zV9?}PY4-{_BU`(sC0>qONO~KLAP@RPPp^%^>2=?Ll{H!2;8l7+MI#~%#n`Fjr|6Kb3Jra)fYC78vYlThPqe8` z1Q-gmByJjbapQwMCvL#o0fY*_zoB09Bh)6^i~v0ENqO=TDd^Q|E3N#U4iIiVi-DWUXldjt6X zZUTe9LJ$aRxFwM5YlvuySd7|W>*hmiihr5F#UImOZVMH~_mZF4A zf>_$U`y2p&LfOp7XO((Mix7742AHJ9d52h=QfcRH{LmF_S9(T}J zcN+^?8_IrFV9C-I%rKNTT$!8Usm%>A&ih5u! znTE_DkRo2t!h2_es4;p|x@SrG@nQ27VKWU&3~F|?JYz@UN;rkDfIff(#wM#lN@VQvrKFGEe~HuldsA1rlX8e5f)?70JtEY+VOWvlkf{ zQSl}J_s7g9N6F$jMbyN$A}7daik6mye&3`T3!(TY|53!cl+B^+@fxt=GW%yu-UEW?8Wt`LUm~B@* z?!hC4n=M4dd)aOqIjPVtEsuzt{`QJ0zS|NpQFzk+&D@io&@F+sa{p%5m+z5&StTYnDq=)NKqz_h^lf`f#~c@{LNi0% zcaAqO69Ror77nEC^nAHE6+Lp<=00LI=9U(dA*&(4g?Hl6cHH{P7%N-h>R%*P-t9;!QHGpcgBCTFCycV=ER!xt8u9+rAk!D5Pl0Qzcxaf_|P9U+KVTHAJ{ z1XDQ{8HMwXD&E-Z0iABQOCxStw3+j!RKeuK2hTVS#SdK*1xnt^Ck=`mUvol%s+uth zh_@ip*ja`}haG=sxR}DZqUXw*-uUn7sI8!ha)*DPgBtAcvdwq)&Hqm3pd-p_WJc`V zqG`qL`1t5z=}va1?-Yeyb`gOlvR~YUin=6@TG>|T*OV9_)M1ZEW&(b=N#3j^n`C^M z%iS?`0vbOy-&|AFI90nDJ7W%PtCrCi^LTGT#Bn}rOhJyBE8jO?$2Ml0c&@BLa<6EqCEO?=npCZ=&AkrvD5}*o3zW)Q zhq+47O*S&H;PtjTqGkSHue*^SD?goX{n>m~Sqv^T`>?#+Q;gWCOWs6doSFddF}Q5O z(`D~J&kD-X5Nd%UaQ$j@gcs7XiF-7aa6c>apK3#tai?qdx;lB!`RhcjpGcETIg0M$ zbv@s~GnI_NR}9%BM69w^AgS|Y5HQpkIB4XlsP_KnZRDlCPA&CNVeTE9z$;CoN<+F= z+?4?l>+yX8+w7ksX+QVc=T7PiE=H6=6G~*?v02%VXnDC(c1J9`-ZV+JQ601R-5idO zj{}`2JJQD^L`ILiL*4JdL8$FM*}U=y zW-dD&-Q z4e~=g`le#RW92sVgk6Dub2(^17USe-1}b**d?}YMd*_A~x7TIa0qQyDvsZ85P5?*h z^6tptDY+bI_J@=61UyBfdQ)r?F?$}e;M*sZt)G$Bb8zN4VKF!=mLxoQb0aw;)><;A zOZ@7A>6|I4KLlh$?qDu6zB!7ub^eNGew7ltfG2&DtfvWcResC#r0`q70O|qWiKX9ygr!`q}JNww{-ocTURC=9Y-|%or4HcpQQh-qA$DfY0clYF39O$M%hG2u;2(*$p_x z$!K9u=b+tM@3`!VN1PNWZ+lW(8%i^!z$bfcybaakh6NaPAQ1zB;HuaCH$vx4L#Y?U`C6(6o^lduu|H?7a*;5?cJY2g3wpcw2hU4H=ODK}hsV zWl8E5x}2@ZjNd1#lo?c$Y}oh*ffF+j1U4}EJS*bdrYZHRUil0E1#v>PRe&2-cHzhB zL2K;Yy?-r?B8~{cAxd{d~?&b zsViw^FxqFrn*-q+&a0rWq|yyBw%T!=X+!?-B_XNu5U=5b)L{zvOTF8mJwAvo=>pS*BZAWa@gX+!IakXVcbG99#mXi% z@b%Z?OQzRlgb>Sv!aYXeU7ek?Ml}%Ejx;kt~lNP3-6=c3sca7|i)iS2_u{4%V*crdc(umC$Oq z`CW9dB$tg6#5FFtYRY-!m68=zwRoVDz6TApsN1rOD175(zYw91nELf?_0xH~M9}o3 zXZ0&?HRO~*+=B;Q>hB(ws=#{3XQx(!Y+u)^I~y8T_lJ-P3kNC__o#o$A6PXTj*P6l z#Ce;;Toe0z;T-0RHK2_Bp9+XjcVz%&Uu|uj2g~y9%L0%2lal#$Icmy~<7J~~ib!Ej z(3@h5HCM?H;^&4>HnY9A=k*dTvOp1_N-P1aiB1tjkRV4=MCB>;0gy(WMCIeG`FbEU z(yB@yZ4yBq^7&2`O_EJLG~W3<)^2&##}a*8UO6h3PQDYu-mU^-onNMHj10uG%r$%` z258%=8Lu;13vw)9y%O96TwHF!b17@f%Wjf+w4W;5+uQjmVwH2)b5CRk!ykXoWr9qJ zCDp{f#7`7X=ZNj^P0D*cG?wMq3g8Gw?F&SqrSx%AZyJE<`}l@_vy{~dT@(Ax!a$x7 z%DJPC{>DdbFI*wIQV`zYgWNvNyhL~{PW+|8&i!bD0lsneQDb2$AO9l zhURaPjS26!@}LVC5-4xZK=ZSNc%#y+Pr4BvFWPz8tku&}73SCjcDmuLC=MR>c~8{n ztSN_ryDMS@Ow5Ff(;AL+D+#w;@Qau5gyNd-=n+7+b2VTkLIpa(@;bb7ym*kD?5t-_ z1Z)qGyO)xEHODt$fAWCn!~WVqOhIHDD&?akrDcKT#LhI{%8JWcSC|^?+~Q%}a%$+m ztge92kO1j+7E6{`v(>d_anCaI9=N?Su17T=^JBv_YIBFxz+I@7E~4_=BT!ZSBk@!p z-_OP}q=vS4m1v%>Lp_g;*y;vJ5I>>*KD9ws%t-BW^bc>Yn%>_1s|%Ja$V%q}8*=&Z z-~7^9&yAaRGSab>AfFFO@qF-yk?v^b6ji+H?SNGm34|SbN`#1yh&5f~KVlI77}R{) zi*d2HzZv!h_Q5%VE0@w6)+^#7QCg7x17U1P!XCBmethIH{$6uGRsavFW-!dg@<;v+ zRS2;seWU)!jBHsohw4l=#NweIakU)>{!QdAQ#9D6TyD9Udp2_T^1+5QA zfiV=)eB$*x-XxOx(pqO&w259kUkAhZ-JVX^R}Ao^-o#1@mtgn>f~SC)72FH3duL|e zcl>?n&~;8LTslrTNTOY)GyxxUYg;i+VX#GJjJ?X<5P zjjab;^Bc>?!yg2(UJ6GQ@`>-r?rfeKJ99;~wcUUft3DXAO(tm-4PY|$s)Rl!51|@( z>a(63FvHh^AR9k&`PgTFXzyqU1_;ZM3`WdY(;pqLxipzoCz<8_{?BRRXo6naVhv(b zfl==W#D(uPpV~7ScADNKAmPvn@5a!lgY=3_5@v=0A#%Veq<=qtnv8;qxe){G2><{f zsBGZc_=*mmtX=`~rH|=k)q5J1;V0R|UJB@zjpItTJIfAjEgc==)w<5(GRN(bZBGpI zy)RbR4lXR#XkNJ5GYyF*M7FL&h9Lmh;``0_w6?^}4UadN{3oxS`OKW30{8}d+X%}m z+s9WPB_GhvRA$qU)Bf{dW#^0dDjkpWN+5=|2ksP|breV-(FOl?@Wu4n+qr676Ff#u z3icE*O;~^HS*2K?TRSFQUe3w3A5lR{O4brKLf^Nw*x-V=u|OJpA({MO(j9ah2kJ)O zH%L?hyha%=qE17UXM}_!NrD5Rb;66fGe()kB&mk`%*xtD4*`|Li$U%)b}0qNWl}tm zlh#riIy&^+&3gXQ`HKHq$4%baYS`sPHCbol6}D{Q>FwXs8SJzCt}yJ;#f4iJt6pMW zCsvrZ`$~k>(sEn&y;6SJ=rdh7<*g%BJEkrhYN zb?`u0WxYFMBF_7!E`b?rMr_;V*8S;rT|NDudEdHyY40QUUQ}7xlaFNqzx6&U1_uT^ zE$bmK;%CyE-jx^}w^NDj?46(VCN;HLkWYJPhz{a`uv#ZQ(d$6-Y9{@=OPnvleRFS~prKD1p4U$wk`4d_N@YNaYbhx%OJ1$(dtw`Wc@{gf2 z;=?f+^G;{-QV(rvC8Nrt!2ES38GKOTXuuw4v;-ua$~^1O=|LHKZJi11**Rb~5LPeePpm34zw|ujDP9*SP+4Tocs2$EB#p}yKBqzPhK1=U#d3&F@EXSg{Bk; z_@BQZ0NJQt6h@t0YzRQXE%d!tUOA=kw`)`#44HHlkFDZLb$5)S^U6J(OU9rs1#~fn zgb!1ZX8C_yE{{WYTYsV2P^w{uZ*oN6L%41_C8uik36DE|?{>(!j{!*S$<3{w?I{&_ z3Pb?zA(Ojz#^26!K4(zRapBC!L=FHBJqo|7nqYmc-<40sEn=UDCLa}?XrSO!j zv}g@M`?&P&aR;@!DoipUvjlp3D@Ex~Y>MGo#h;GfSrDI&_r2qgW}z&0+Iu&V=DmW& zerjQ$xY1hRdSK;%Q1HrqsH%Z&>7?uOWP(_nISzjNoVXcHoF;4VT$s2iee~+B>_==nrkAKWe9>Sn4etHnz>bW#Wmh)46kK zz)aC?_`Q{5w4I9W?)^+}Q&u^VCO&WR+te2N<8a2WDFOEV+|`buDtbn20zL%x%M*Zf z2E6@yvY|vOyc67lg4BA-pUn#8ox9}UX{xwf`>hXCuUsC>~$9fcxuNxE9t%8`UXy_c#@wis2WX;CQ>^OW< z_;e<~n%8=WK&SWdOE8_$Oue#+1W(n*e~|xPzMa;t+mCm_5#LbHi#l)F=$+tEd~kbx zh{@wACQME8-()K6PNysb^?y0A>c=5%sEuso<}-J;f3x^#K4z7MEFCxJTmo0Bs#st_ zkCaU%e$;8G`4^wUF6aYhcG(myLMrW5z>vYH&KPr26?+48qPwqlwP^H^V6hu#?)UdY z|0bW_>JEhbyK@gczh5~F&0{JwP*jbO_AU7prz1Fc7y54@>@;s@CVS`4GQMe!j%st; z4bQ({A3K?zg#A5z$VQX|B0wT4aIKW`&8)wFo+ADGg@oT%8qdnL{=W;Oz03_djg>TC zwTH^Fe5B2!Xj+3=xGC7Ic5!zWe~;eY64?KGP8Dn~jb^R(hm z)mJWGBjIHqL!dm7QJXYI*{WUs}oT zxa5@`I>=1e!df&c_P>P%y6g|4)+e8ORM562!}edUn{sr*=$(~ZH9R!* z=%(O5Or1(JsqydpsjabRD#2ZaE)KovzPK-Y8m6}8<-f9~_^jwOe}1KaTS@Ry$lv$$D-GPEBX-mkjzp ziq1Qp>i>`8myjgxwMoX6zS$|6H(O-8_O(Kk9T%6(WZcZi%te$vQo8mC*<8uqWL%NN zm7D#0|L&hXdPw))&wHHLInTq^=ghI=7y92=RC=8+XJhks9ex&@XN6Aqz!1x!cZVWb zJ&*jH6>6%Ftk%T+`Kea&E-2GJ@9oq!yiROkJo{F-Xtw13#(y64SGJcr|?;AKdIwRq3U^WH=1ibv8nheb1f z4Owc-<>;^TKA~4;x6yvyJ49N=l~yLlYIp;hH~wjlP&x_yA9M1aKjwpPA{46ve1UX zsOR0KXSdm2x|U}QOb1Ey&y`(%#PayEwRA&LOO`3e$bnma>g`;KjyI|owFWEr@U`6) z_)B%j+cFfUE~4)*1G3NH)GbXd zvz{1fQKkawVv2}ZX;3HtTobaOPe$CQrJJ7$ttzRugDf}Cb8~~!@d*nWbQZOR)z7+1 zCnY5Ta0k%8#v7LBo506FmK$c9drcID*MWQZwkNK8^l-Je3o2Inl}qB?Ud)old%Ol@ z2`3XbJ@jpHZeig^LP;v}tj>Tmd4Uo(sp7h;`7ga`*DtE|52EU%aZN`ROE5+;{hqW&^`x z?8dhU0kQX!p@Bw^YQCst3vj0YVu-VHWR)%!q3G?%z-3Xls9kiwde+U4bv3?k#!rO2 z2LmBp{`aXqm1qw-6W8*)uT|L{*qNcv#>FE!f??E^Z#PwT7Uxa?Lho$bYr#vVH0_zJ zE{L7(?wl{j*eNQK=YckR^cRdtFgDywg{!De)cab|$f0BbUdJEOdKn{G@2ZkisYKgH z)_hOadU${HEW9fr+@UcgK4*&)rx7Czi&<;G%&pB%;1i^ay;jdqD7qqZd&#e+-j>O2 z?oG(Z5hK**&Gm7=*Djq0t|j*B;ZevVRv#*=yWM}dq8~E9$#S0Y%S0mACf-nvAx$E) z9CbaTS}QSB5Y4Y;l@r~p6t0y$qmuuY7G%+4kY3_|g%z_s1ohlkMfLGUbBd$6PvyBb3kp& z9soYN*J57Zei&J?E>C=uQ=$hC$Bw7hjsxweY_2%b8;AX-Ji_6CT|PLFj(jrnuXRU9 zESR?2`b}7#;7qE^&+V_%Vmv2x| z&Eigv_y6(N`o%RuzY&42QF#)?K*B=u;kV(@M<w(`ZYr?t6;wmRGRins{60mBwK(Y) z@L$M7klT%^jghqIfimH_FUYp$xweMm^0t$0uP~DRMo8b`+U{E0VO`k2PTo-N;-fzY zol1wZas}fapf!}5N*NU2ZrBDgEUC!%>zUi5l zCwPlIwLM~1M&904cdZnA4r-QcOmUFvDFeP4mcqtc*S1@6YP?tw7XVmi$$VW9AwH>+{E@aWG}2j2xw=Qlbxd*B!m#wR1t z>eQdNZR^J;W)Mk0i9*z&XeIqy$YKE!3B?1eEh`iCW-h&H*ErQb6o6PpAdui~77v#g zV>*BO-o`7_gBx&XXJ>XsMuvo)qJkzPqt}t=)bCp0fHEP;UPg<9=0JhoE{@}>okoUB zIr2msC3+j}&RZp}rGB~Vqr3lnp5dL+T40X&X+^jP$fMywNx=xHdMb1N*fhh z5DL5<-+DY(f~%)TRNq|UF2Rbge-f94J6LAk<(q2Q$oY?zh=9FWL1PnNX-UeG|E#Zn zI6tb}S!{d2P()fA?dbszCZkfwGm~)g4)56}x$St!Yw=2UE1s_7$;}Z36G0S>kHzFSG@Z^J`+bo;&8&qLKYiz-(8 zGdl5d%8fS8-{(O_Z?M{KaO+r7`-Cp`?Ah%&*K&L+<=dwD?uPtvRocW7ymQ~x^gLn& zCJ`qfqF-$hBMWPY&mbNCdeNZb=equsc3tVANM_)hJd4agzo~GPCTtgv|D1aq&E{EW zWs1N3ka@}!?p(b9wg}y%zyJQ-?8q4C!#%aL%{>Ti;`FBp0d4kN;jcPl>d5#pq>mG! zp%MD(=0D{T8d0`nWQNgTqj}IiN(7!YG$0Q{J*zmJbJVuy`LAa6len!ZS|}k4k&cWW z>OPz!m+mwL=K26b`@lCZ9|G9WoJHJw?QO3V;Lw$|-C_ogIsfh43l|+>g**GSTZ?tH zv(RE64m2andg&o}{BbH5u)=wBImWlg^z;oaQR*`oH;5V97};{{Qu@|5qsJIBXEqBq0opJ@Fq&RJ{@|jq>bjDN8Lpqi zU{?rPAEd$K(>XMhQ1*FdU2gQv8-Do8TCiMRDHS-ILi$q*;AcGNEWrP6n+D+kym20;_LDkVXnK$$_+fJb_+!=`a zFUZT=vvq_h(AV>GcUS1^QjW}Y(XC0kL3c+Ag-PLeclFdKScR1P4v$LFgiSp$J(X)C zVfq)u!iVr~*4immRF_`#czZiCS>FuY!WQYMg{*0Am^XXh3)_&NDt(ZhaLYNCUF|hn zH^RD8IAeF?nbLrvlbu!39qVBkx52hOCiB~HVUo{TI- zei=w~=jAe{P3dKXurC}QvrsZcxb&(+O2%mj0NL;-fG6ze&@l`#zpy|%O&fFHNI;Vo zrJb`kr;coUsW>wV{f3MqaQAsMX{k@By(VE3O)dAAe;f6clI+0 zR8Z%6dIFo(4o0RarVcZkv-M1M!_~eDsiWqrNE4rlE;oHYUbej^b^2#uG|3=FBFVrB zVRY@Dw2D)uFwZoM>84KBh=yNu3mue_`PMrUpZ@0u@4Bh)cpQ0dU?^V^FPmSsRvX}! zoZGp2fB5@-h^=XFNx73!m9~T_{=v~^-KV!>I>s-ynl7-Kzux$(T9YFp7gMHQ&q-qu zTznJstkfmE=@JG4&vamqXyp*qlfy6SV_X+pA&Y)Cv>zqQwXmf+eHB(bym?@nFEzAq zymW!d(!#Uy2F7Kstn3Kd*I-soxo`7<4$pQyk|vZ(({m`DuGXNjHOl?uQ`nTZvyOnN ziZA~^@(ws^yW{DG$gxp|Yf(cq35{PTVl}AZu$Zbe(3uF*1;EOA>lZobI6K|j9cd-D`U=`T zkV*8BORB7u!C)8}caA&*?r~c=LVQ<^sj9YpvaG~xGEgEUsXCNTpE_{W@Xf&|Cr~Ps zG4CURkU9XbuwwVYo3SypUzQ=xoo;Uf6{mVS6oV8rKJ@ShAV114nqHDlnjM4MRD}X@v4?z zE`BR{aR;eQwV}305D+g{xcZ5N)2NpmCb{dMd+aKhzg7|`NH{Dgh!yfXK3$L+fc!Zm zJ=U4sC9EMc4-eM;n`Xz&+}sl9qzv5XXG3;^SpSGyeF4V1$ll7A7GG{ppiqv^6Z#3v zP4n(U^`8Pk+qwWSpD|J_q* zh=c=NqQ?BKkUxN1{QBj)n4xej{1{GzPoAju2eQijjQ7OO9{Y7yϐ}ewmE<1P{om13ZIR;da-v zM;oK&d?U@74==?Xt^fL@M&KFTYiZds$mqA`+L39|6!E4L&9ziXyIR*>P|HqX?G9mm zo2sn>DM)jK<)E{4sNp8S=7ho2X+4$Y$puMlM2_Xs6D_3ZX7cH!e4Rbaru0@0`pgEjmc3J{DYsRVcJ`UfBl+KLD!TmlC5uT zm9G7um@R3S5p??*kp3XpFGn+$A2~Ta7ZL6p=Q!1uc0pa8p0CV#jHmhXf`CJO`^~Qq zF5~OOAGcA-Wj-qa_AZ~ZjtDa7X1PE;>N_+lD!dSr+1PGLKgwhdA1pL;W)N@GZ;@R0 znEM#;peZN$1AS>t7<5`fY$f2OBxqM5g-nK!mlYsa+5sN>-#@8D2_>9=oTQJB`a7W;l`{M&x#!bC+%~iBoG%2lb@=u_cxGK%A?{!G8diGohMMi z>KzFp-C*3uOxkDj^j49#hS5UP1PS;aL2eK4?D#Zbd8qnM&nl{aR>lj$_w`AY2Hw=( zKM^db6nw;jXQ~BU0`Ssm^0JSdl2RMcYw{P}r6s8huk}2L%vuAlzkdZIpDO0PAmj1k ze!yXVT$M+P4@dX)th{u?OFJp-gDJ4hWE8Y0P#7<-`F5$9QStMH;h*g$OyV37Q1UYF zJoe9RMgw7$KydrUEA~>^debCMkc&^e!Ct&nUNtkEcqVy zf6)j*9P;mk^GFs!sA&8Jl(lW##_wi(J>;M8UT3-kaY&oABhLpTRy0UUjok zA{DNOxJpplE%c1H8M8X)XCDm8UVBD)7fz36(I#pRn9cYNEQ2%6vH23Y&|8zxR~x<_{r z!x^2+Q6fssA^(0KFBI3eOnYFg44u~dZw=GGoqNPx3>@l;2BQdrK;S_xCJwj|ip?bO z=^Zx{GhdjftGGz_xuQGJ6U}4boMhWl^Iy_iZ8-c1!JvN$Q6eRgL6Z=8$2U8HSHdv1 z#6%VO$l8uMZM;XrTQb8=yy5PL<5~9I;VS0iXfYFyhqj^*$9mswB|HfUvHU96BbwM- z{LqP#g1*`VZ`*T~+K_FfzlWm*eQ*@Si>jnSlwcX#r&cP(JgeZ}3kh?OUO9Cs#@bAP zyNw_L>wt4BZg~92(({wUbDqBJ+{vja$?nvYkweHA`Jt^y7GQ&e8VL<7I^l{~mETRg z$FoH+w#QkZ^i_O97G=aMO?IBt&HwUm8oM&MIpGX}xQ9fo(q~nqRZh2sW*Yqt;G_;{ zx^~ohC*EzNY1b#WsE>w-Blh(4q<*iSeqVLRV^mh}{!6Jur^&yCW2D1CE@Blgj*&kS z3A~*Zg|a@URU!?8B+>qx9eVF~Wpi~Z74P?xe)=w(HMXjKG1Gp!;Dzze(sDGTZ&%QK zyZN%Qig~1S`Jq{tVr1)l+KLZFkPjHd*Z; zVBi*DFRhTm=J;8Q2L|RfSlRv4Y#GKCDISC3VEJ_9ukc?%VVJP$!<|9$mY1ObqFn1LDLsMXPSB8ER2 zm5m|L|CGtD6p+!o!^d_13Zw&UYrIF9DHw+Mt2W?23|ogfW;AA|oC+P~Yrgm9X7z2G zeOZP!L1z`q9m(#8WOO*o1e43{=6`t+dPWbyyXiu}e}q8l4*u=GFCgK>YUfIzad9^( z<>u(s0K;hd(^DZ<$jg#c=a*DvWp5>mI40R}l&$+BbZY-EarTbaEL49!{mzVcY)vO1xHubk5b_{wa=R%Vd$jLig=GT?vdpguX5fVS7MD33ID2h|r1LM>yUsDp{L2wnj z(SIF&VI=3jC!dZUt7!LC^Fj>Mkg*;X&?lC}*eC&>`wEzXtIKb8 zKbpCsv7PdUwmqm$wSLB(#;CQWW!7Cr=D3CR7vR6_@1N}LJ!^=MS>ew}Y5aZKM9v=K zn`0P*d!(-k0qc9panqN^5NgVsl>rJA%^K$ z1B>1Uj(0iriPmo5cSqRhw=`VZV7j2Jy`V4xfe;QSxZs5>&5X6{xME=9&?f;P+TwI9 zP?{%^;RE~;jc|op*3Pc!zOxg`Mi!n{)Yco*7>j9?ndxM#znGL;eht1tQ<<&XFU()i zPE=i3nTi#a@}@1-+ZOC;+8dS6>%2bE|1)^b*ZZ|GJM6g%_1MR1Hsx1|&%_ufoe<|@SgKE?Hm$*R|jDY$f8s4Y`1smAhk=I67UHaftGM(%M} zk?keZjNHDxSv^_Nw{LH1shD09e(I)Pn0#5%KZxd4tgz*)jJ1rwL4liZg@r5N81(3v zMzT9=f|Ca8q)?dUQ}Nd_p%)k{R^%ZSVuPV!opY|GklHQQt7}*9@E5@3vDll@UtFmq z#R~Z#1@IAs*w5(u@mKKE!kb&}B`6*L1(622gF3%e+}#W7x4u-C#*zT^u#)yljKS2>0B-;1BPz+uD@_wLzrKggtbr4fF!kg%?_6VWc(@u_0e3LnX7cn$f`plna+-&Wg^ z-PzXp@%g{J)3}CJkY`GeBCN>5AI3`hm2z(Zgg1uK3)C1+7MiS=jypI+cyp`ig3(;f zv}g1cx&JDmuI$&6nb%1_H*$Cz6HTndSbg1#rH7pef!wc?b{1QPod60hGunP71$Fqz)*a(CO%k9Vn? zmnT+<4y7WM-1mKqK6En=fZj)D{h?m`NPFXgMf`E0 zj^xMTJ`OvbNw;%>Kdi%QD{N(b4IA=>%MKOaIRrdWP@KmMX3r$v|_#s?u4n5$Z(Y$b$+f7x(;%AWq< zD~xZ+WVRRpW@1LOn_@!RU%pS>a_=vY*mOhB$*}a_igAj-^B|}M5APIDNk|r53nDc+ddFN+I zN>YZ4jKZ?nVIFSv*k2rm&k^!S&G0YQhKAoR2?Y>?+2JOV=|#ey$79_Ok88y9XCE=7 zy4AgnJLf;)eAse=vzU(T%_|)%uodMox4UFYry=`r6Mlap@-syV+NzX2uJUDem3#k-*$YrdWxlHE||GF_j1}=k?AQeKdBf1?s#-8Q z$Xr{F#{fbbj@-QY9cBCqc=TnCn_O`5lXnvD2&3K+WnMzT6vcTo;|*;0?Dx>vnuJ~M zx+G&K-&>MY9QG%5a*4Nqk8-bc*X3|rs5_8ynrvf(EKM?>PdpZ>v5IYan9x3D(NPXCQdU0Z>sA8 z7Pf)B<$t5ZX`Y*%R!E7N-2W_kyhV?pX7Wh1x~K)ayFcr1>HnsL?$vQWRAoR&EvOSd zbv-Z#V%GRYdp{=aj7Hsb&HB)(-_bLKo!0ja+7l-|dyHX}3|ItTLqb$>AWv~HS51J- z^_@#2ccGsB>+HWAO}c5YH(m({n))cWH-$b8;r`C|lc#n^1_+cP=jGot_rB;^?gwxI z`IiWYyu6Iy7XD#W>UIq+ZCw=Vro#QK-s~TQVVxW#}xC3$lyb z2VsZVi)Vkm!s>XBVzQ6h&Wg`<)nu&+|9_mr&i*;&?l~xY{8q{Sb}(Su;wsHW-43MB z-*(2+?tFqkIkv2%EF4Pt*6Qq&sPg+rKDYIu%^^mS*>9PM`=5+V-$uQCGRCA9GAS2$ z3d`pG-Nt zsu>I62HDIEcHR@l9!C&w^d>{BJwo(ssOM&>;v8 z3u(YvVC(mzuRTw>GwMmiib``qT`Ps|XWOVtNnFqleHQAfhl~ZGPz)otV@V;^4uw4z z@XLJ-J2L*i_`?PZrUfl^pGfw(#rZ(Zt*q@_Hnh4d8OZ@HsYUwOGRWUxHTwei9X%Y1 zVMhqP*JxkGVZ137cI0+r^A#|iv{aX#T|QWM20g8mP+;%NP_!jv3^~`gH5mxy>Vr;7 zBC6r#=ZV^;?9}gv!T!LytQer6dDN;Fv0ZdA&6{he{LXNe2Cd}R`X^mTUR4|^xMmSH z0yL*JAO1Z!2*1Ty7#qUUCsgBSPdzt+)EurjL(|NxbiMD;(>{s2_r+XGW?}L|{;uAB%R2Nlg-D|IV7aA>HNTR;#0l8 z-?@?<{&Bdfln5^>BxkeXj~-n~iWQ7X;{!I0^O|2sSS}-hdPktljlQr<{wY$K>gA)r z>%U?sLIw-<*o)xDmUpa+NBK)Y(+$~RoMM&JVIU0O|VbomVIt!<#wx_6e`)N_E}lo z*~rP%-Wl#2I<5Ax8okj=q3o6rwXM7r)BdU!+98_=|Ah_4N^jqV5wAf~`1rb~+%il? zg6wX4Bds(BL?eDc+Y4S&JbiNm_A^FLw~t1mbHD1B>rTts1E!JA&KDIwt(!wdJ&G(M zO{+(?ZzuXFVr=TB-CtqLpE#O&bSM_RYQ&+-BQ}1iGe|N$d)N9t)j^wZ9GBbeVzSKg zE|$)%ayt1!$@ys5j?#(UdHMN%*guK0l^7XDPz3JuMjX39k&aZB^X=no`VQmcN$ioZ zcmIV45&Sq52CM|8c9al~?`! zU{r%-6QC(9?(~gVucJg@u>q`iJvjO0LG;!}T!U5H$-Z_<#;Q<($bwoyUCjXF$lH4n za!`is_Ujknv9#b4?O$W9qwTVh)9~#`*(Re=+&@Hyh(q&t*f)WM({^YZZ}Fv<1R~n$ zhJkS|_-@FA*eQjHpZ{Lm_B?1i8z*Oa9ll$!D%>%R|v|MqWc+dd-5Jx%Pe2_XOW5T}M&5eieQbY`~?d>gdZ#=NvE z!;Y3?CMPe5i-@TD3U$qU*34fS1loM}PaIKIU6NXr-EnRklRZ>D>m}2qN4wBd0=MJI z11A8;b70SW?mFowo%W1~a)fBv%xwFY>O_7WjOkqd`xlRQ_V#X{C>N}oaCHNN=UR?L zp-U3N$Ayg_9{*##o+|RX<6BKy+($|;w;<6t%Z5tO`SkiBi<{OqTw^G>SC7j z{S^6D?47`Kc~y2CrixeE1*ix)@AIQrR&Km?e2zSPinynEFXU){tvD|waz6HFZLp=Tw?&&P=m3nRa9%(^SoEy+)W^^alY!G$aW)>BIHVP520w%y5^ zal*{$Ra-5L(wj3mjA|0vv|r$ZsuVBCTGwgAS5aMip*E8Zuan~kJ0y*yz&I}AGk zPv&{e`9|IO9%v;#1?7D)yFR4_D7Vs8w~vd(V1~1yG0q%u8P9TqJ!G=$GbfEEhw&dm z&Mv3qAMo@%ho`7EHun} zmcJzq6pkOP3@fE6Jz~D9yJBH=EjoYLQloevK>phKd|P$}k1h$+ac#SN#a$(B7O6s> z$|W8D-!I{3^QN0bEY?KVKVHTSAPf%JpA6z_$Nn~L{|=D9r*v-^U8^eqiI=Q3bL*4a zq57Q0<=ET+{>j?zbrHerdB0pzaZ(;jDz<)nz=_F7bKxKu{F;Dpbd~8DFOK|wMA0~^ zg(M!}Tx*bn7DWuzU`3;?+R5|Pvmk2z#eJGqu#m;Lo-JI2sW|+ta(%Ol3Y(Xy4P%@W z%N-lxWf>o$;CHIl+F_AQ0avZ1GCk>qd+jocrjY9Ea$;YS5>(tGIjSP^@Aj~r{kq`y z*TrHS-0DcUbUtV z)%uCR|3uo^GJMQ_1M0??7+K`|%t8vf;Ak{>qr} z%q^sbCa+_5H@m)6U!8D^VPeED~DGlplrhs%mc4H&?6sb@{aIX_@Ceqp}#q zP3rfb-2M=M-3YJiZM+1{r{$0-xO?MMET1~hCHKZ#x0CxnNvmCln~3-c)?iQTF}YBg z&R1?jg{g0{D3s&BJx0(|d*JC(u(jhW1(#;k?$ltJ^6Tn6V@Ldbw}P&GSndj0G#Hgi zd?(gj@ki9R0tgXu#O7)D_&BA+cTq!E_kOpC$O+t(FMeAv$8ja2n$}s_=YWz4mjASd z{J4)Zhxf>Slw9_zxKFO}voqDZfdKpUOgP^OIqaPG|4>W z?{XO^9glGkx!m4am@C}`&2*J|ra73aZ7!Aa#QBNCrR+c3Lmr!roy)g~Syl?oJVmmA zyi%+lPLj$<(Gf3eoCk??Ju&>)4EYo>OawClc^h$d(kl>+_-37N`f=x&^z+Y3k`h

9YZ4 zrJgBJV=8EpJl{6KU=9;csj(1ndMA&S;}$g`M>SK>LLwblAAUTyOoUlSL&nD8p_TMH z-U4xNBi;T{SMDclN%PR}TAA5bZ+@9b`?TFhYm}i{!*HMNGT!j}x6LZ1OwDej(N8y- z%1Qgnm3kRoU~EQdB3?kL;Ar|V3$9{Ht4zJoaU!2#>~gH9D@heGIxxD$pC=*k);Ie% zI8%%k-!@204bMi4{uoD1cPFT*qeR;MLNbmNziQ3%xx{?zkt*4_`ZdtOW$kcxH|hLO zOj6qkKu24I7t8}9! zu*@Rh6^Um=f!CUw>#?CU2gaTt5$2)-H&pRWoO6(_b#L|M-27Ws-0f9T@#&kk>9#6L zTa+I%NHTHrS6^kbAc%`xSeT45`m>PzF?>3sZl=Nmx$NAY52>u3nZdNpwk>+~Wb@XGZDWj{K&0 zFK@Wq$g!4x@V-o($-#oejWt^5qN>SS22+bm-rIPUNK=hh^=8U7cK=b0O=$P2Z33zQ zF7X54gjvK-+=J&DJceLHsKU=TWX|`_cRLr);_AY(ibn#L`Rn`t2Im?|`OPDS)&CvL zTcHan!(HBh6B_6*xD{FbPZ^%DGWFMHk&Kn4S5sQ5o(5A`jMw5&VCcQ(GF964i4>bj zkE8Y*`ZcC3l1hn*C9x%>F_7?<5bhY4YiLX zNV~ojc(^KU{?;?K=h!FQ#h|4IbhQWWL`5~D35;so(a)h-4XMHf30Ap8U}4(c){Flc zteCL!gw1Y?|56!BrxK86JqA$Mvc*>6uBe)RS6 z9gE%I9&UFd+XIJ(EH8nBTi~;!y*-~PPidWphYs6XY!iq{vuwT;W799zWw*8(ol7b| z8I-zTU`$;!F%{focN zC>~zcp|O#~w!fH;w)Y`v+#^X27GMAC}Orho?-%%?-JBSHp5Nt($*Lv&4jcXtQ<+k!!)bJZ8?-WYK| zXhHV^Ss_nab@}k{-%k?w_)bQ*Mpu3Y2scn-HKD8?!X0=v7|gR;c*kOobBW-m3Q|XK}uK^Lhub*UlC3WmTT8&h8t=du5I4pSnukcC3%)|9`Wa z?0@L3^mamlsez&KV*=e99Y6kqQXQd7m*YSy=T_mZ*)HxE7=A46$5hn7p)P?l?3#Gy%KUJ$`X=?NTprxi$fXt|cT$mM1SS4+i6S!&qs`>RCz6`^zPO z7{7Nb@m&`G^z0d!jRD_}uVOMN%ks}%pV{0Epd*aWK``0RWzXxgx4&^26fHI3TFAoB%H-HdUCNQj zPn$RD0jli^oNc{ZSADgaQl8ggiE{1lA0U6lMmqQYz-g+QFxeZjjNszX43H*ILsMti z1j~zQ%Uc77!Mh7RfUw}^AuLOD|6Er(I2cbS`1k?`pf9vEImX69Rj1r#ica!Cl7)mA z5-J??E1dAo6`Mk2a6(mS&$Yj6tJK~d?WnXE1%(i|tIlnn3~v#hEWhB?2)!#dR!CnV z2>a7$fI7qMme_6Vf6i3O4-dsBO#Uqvn`Jjo??b+KkD}O0cUKWIHyn`Wn=smhQ&@_O z^dqf8pL@|pm)V7VEMNcOcFEMnSblOo&c`vF{*UkxNp_Ja4(VPx-^46}mp=Nw>Ru*V z2rr*%Ryd`srJSpbzsu(AaiuUYsc>vmmNnyhckwa}QVD;98Wfl-odKAFS6*u{l@q1< zQ!L+rQ}2Mhjzya3#Gsno@?2{>hlnCB9MTdl$HC7!~1ELO6+JHe`^_*PPe{P5|m_o=;s zJKWH9x#7_<*Sj-ZOfKdAF9QuEsbNOY>-+vW{Gh9CxJI&nDVq^4pEn$RdAiuFz;|t3 zpg{6&MXikvSKNCot*QDnJ_NrIQDF5OCaH`2UZYDMyPs<^kp{yS=H<2Zxzo>qN9D(Mg&gRi*d^6DPk`15x|1shE|;TO zmdzxa|09dXQQAXt!;tD{D+-rYYxW;8KSU@hrXvSw3V5U4s5aJostRWYiy`enyb&d_ z)oW?)F$tFFAm04Dtei*N$K;)%(OSH;?Rw8}5_pgf_kSAGy0o?R1%`%(qW(S6g3ALh zqE;zLUpAe3*3DBcNeu&aB&L(!Z}(|1{$x1tB@4CyDpR^6Q<=umv8)Bx#fpN|-7t~l zd}qJ(YUA%2+pF4!R;5HHHDE4Nnp^A{3p?8TH_(%mF(9k&=z-rk-cT#mFm{6+B;XoZ)ux&K`wlUxpHZmW3exZeYmuvmP~lG4X-jo*;K}VoNdn3&K7PrI#@=p*~61nBKOCU&7z5yc@Uo8_0x5ND(wm zvRR=JUEt2B=hWP`G((!vV;5eq)H7Z**{`j8ZQ0H*mtDP7ra*&qDIo+;Uy3&ZjoKQZ zE-gt}xZl)avr(TU`u*S4b)wI9X+%ypLtp&Es!FP=CP~%))L6^||%rn^{a=%Sr{ zpX|pyhRSKu^o`9xJ;+`b=C^XGAY({YJ|aTTv0SI%JvQQkvk?@cdz#|gE7}gnv`7@< z42sR-7(dE+C0^~&Q)vcN8yFHKrHB*4u)?Z48L0JjbDE2CbsHHU40@5`#K1GqGQ1l= z72XP@&G~WCq^j<(1LuuIY`33uQ9o|r(8nk}%3o#7_4NdnyN=CRy#NRJl|7^0jn>w> zb6m+HXa>c>!U-fvD)wnC1#NAU9<#K$BXvnSE(pOjv^x3kfVbb^9C2zIzkAw**|=G_ zyzU<+&l7sIthsV+aCGd={yevFp4lHm^=SKzV}12*??FLKNZYIjYy?{qbnK18Ki$F zC&3NzazuMR;u8)U*tO#6h*n*!z6~ZQh*^~?5_cJr8KWWO1ay-`BB^2LXxm?nFoaH& zyoW%X9Mpy7!|gzjbh(F@tB&IYN0Lgko^hV~<>l!r20(kt^zCs^!~~0;VlY7%^d!b8 zl9#s)5gLl%X6ONdrBOF|xAoQ0xfWB$z`6vI;X8x4)>VWyh;4&3y3Fxa9m40!e*@%!KP-3nMz{Kxsx4sH zj<^DyS9S8iZqORj8h)6I^iw`odHxi_ z&ZVe2|9vF@cX4tEbD%JjxE? z5$C25u~^4`DARJS&{|9T*GBL2aQhyFDIaRql`+F3vBz)3f_)B!vR;ys^|jfjdRsx6 zkY!~_*IL+J!l1_9LgL@Yc>MQ+ z+aH7I`40d?wO{*^D&L2)aKbB7777zXN>i8k$c;-T?ayJBtk9dQ9qQs;?aYHUwa7`~ zo3xah>_>fNKTvjFrseL+aY8j!&fm(bo;GY&pk^0L9yMg4%`R(88XL~SS(~n^S6h^z z;^+c22N8)YvWW@VRs{<$NVX_rIw3(~Z%J4;6I@l@mDSa4^m>&PE1bubdYMosKDD+e$XkB}B@^5AL-~X$L4GfY9hsVc30&Y`f6LOD5 zX*y{PieFnYP>Sxk!20k*#{A)#Q`*-8OxL`dxs!@H9Dg-(FvGf+YTlWxXK7hL0lU&D zCvMd`_dgTyhC#Hm$Ljp%I<@*(;pfF2t32ou`RFt(8Z}G~NC)&Pl z@#zxis;K~_cfoNi9#n9xzKj1+Y4`F>~8Kf6<#_;&hvJ;?6*@{sdXbuU715ZBl` z=G^Q$tM;Cysp?|*z#0$(egTAme_^W3|Gj0ceL!2aJCHf9V`Rk2W&lw~?N8WoUDz?u z3>0&G2;i^KhrA(La$(dK|F)2;Ocb`LU^3Z5r`b(Q!STC)|LltUGpySXJ|l$!lRd3Fl9_2ZMA$#UJSF- zK3&Vm3q4%Ru;*AnAS|BE{a-$gi2=qY!v~V?zdecn7e#xcDU5za*B(YdbExGf1eSiJ~Z14B;d7kGy zPPiO4{##mr^AS4{xO*p?ucJluTpmnD9qu;e1ck<2a7~r$W!?dQx|^2yv~jGJUtN&g z`_Vp7!=ke>?_O(m1>O{~)^pG|ksOvSVo%3wFC8btc|G>MxR*0-ndRJ;;o)SYW+%+QDH1UwmpD$#XJFX+1;qiHF>*C0eRA3MNZX^1|QOasnhfSN8ZdK~s*Dts&d#q}~^*RT6DD zB!8(nlsh(|I$q*u>@VuF#bE{_;1&CUHdyq#K1RQ+z)ZDYXNiOzHYHwpfIv33sgEIlnsaw_ewpg1tYzzRwP;R^_I%xbU8A`=QJD%fi>qF3z-o&mqNgt9@G6 zK+@k3)wjR3w7ObOy$bWxlD%=3E|q;oW`BNYGWluAPJp`br-|sxY4R0sWKUIqGJ+~q zxsddbO@zN7Ej{C6n^Ga^O~Bm7KgDpeff>439v{|GRE*}BF8olKoZ;{iTrkm#mm!r{ zVW%&Dqli`X$yvW_tKPXxZ5$sv=;3H$@v*VX>M5i1^Tq!~^9_3!)M;PS(SB)inH*_Y zJm0+IV2McWnz(8CpFD8pK&ux|_JJR7@ddhKSuvNT8pBB+tT*TZoqW2!7F_ui7Y>NG zhH$S514Hnw`SpOr1Kd0SJ+i!_AReDZ578vI-q5+9C>N3o#s*%;{~B^>V!Pn_p%}k- z{GJMklt3NaqtV7F6iUCuC3|AO$H5A%Lg#Q$2Pt>H(l1dv{Ibla&$kwL1kMOKI3{oX z`v-0?ZxP`2GAJ0O$#})w>`P{9#EYsWx%6w$Po;;B%JbVp+#$!=e{&_o;F- zS#bzV<%$Z~)vUtS`E6A77BO;6)EmT5#hZHk6;)LcRF{JJqLCjjy#Xcw(lFHu4_f2ka`niKgz_iczV8-vPFZHk{d={1g#+As=%DBIc z_PmlGLV}xxPFsIvwz}w0bLpN3GxCDCqll66K-snB?fjqEzp`)gAoX9!`bUvH`Sqbm zM(J9jPr8-(BwCBs% z%0&>nD{Et8%c2sbQh{vi_X%5+#psi%<5hnbRzO3c_3clcY*WUbvS4%a?+q#hLENs| z!tO-f(!;vQ(pQd_#>sr*yUM`TjBM03zM=MYak2Dqbx{7;Z-pF0S&q~%cw5w$%|yQ! zlUIb~3N9*hUxKtK!661?s)krgVfss#AavewXHAROzF@1~3Nul!CEqHz&fG*_cjahX z%rPGx(>j1udqtG7bg-4SssHHyiHUJn=jDn&?lL;;UII@~X0Yy7{LJTbUE_?}PAS#X z?V7hBtX`429B?Z5)&zR9b@iU-0F^*ZD)k{Q^|7OMPg&4d*E$K)b zd{*jaLC07Q0%7>$E4CQ8GL;xRSW9|Dh8JTb)@LOla_<0G26>0G@ZTq7@qF7l?;vx8*^s|O@sDMo^Lza z=U{I~@Njc9kXG}w@@-aYrb2d{JYpY-G8)9Qh7XIa+4h<1z&|DA=&_V>!>^ZFoAZ|Et*2*s@_e#L|i|;`%RI^!t#UM=GC)^k0pc2MvA@-DFZRs@!$ z(c0Q}4(4%oD2x8&WIZAUqM&dOplq8z?qPkq3JZ^%+eAyVnlHaZZ+2}q_I(^~z}%tz z?OHst2cbSq{RmXu!I8tQrCutj{ycZz->dbz14y#EH2r2Vr4zUuEEhu*aXh)MQ3d5= zdtf8Qx1?A`fW$Htz$Dbqj&g!`4G7Go+SCTwRDW?BvZi!!*JsncLgik=gXlS06{l{D z*D{{`Oe(zS(TkL?K7XRiC;4ml=wPRB|Aphxn?7)=KiKd`%aO-MY{}h}{Yl#zW?}5O zVVu~FF>NffZM=`1UN8XFg(9$05$Sp&@vW5oR#YbwT@=)wDB@y*;%nd9j!2`8%Rt1* z_gN9dI;8ve8^oa zfAL%Em~Sa|pA3B*Yd0N}{TdrNW7!=IM&;jMAXZnn2Z<@kX@!YZepr7pCH+@ot0RJ9 zZkI1A-P$gQX?Jt>=0!TR3o96`79i3Y+zrPQpijygp+0fI1p3M74A07T3DrMGX%&gv$YiV%ki#}QnA+=2If$oR3y|oU4 z{>>UI8Kku`gV{R`Qb^B#4zky_dzNesO>ovslF0Q=r(ddtUH2Nk7(Yuyj1az1dk-Qy zi?ojw__EUc9p(iVy1_sjtP=&s;$aDTi4>IFS=pzjGLr|&q`y{QKm9f=Sdpc&kq2S} zty1)Y7a8bEKpF2jtdpnohp!e#=;tuojg(5GEE+IVZu{E zD1*T#Fc!!DIe*VK+$u-(=iil6d?=J;Dzwx7a!Oz;A&jcLd9)f}pjcv3LD{Z`xl{Og zYn0#fxB?6PE%#EClp&buD?PcBhw=<12?zC*qGn04i)_g=h=S=X+m>$cn_?VQqL~VO zt`^jMbO~+^^I`4waCSof!zKclm+er|(I?uIFPbI~w&WL&w%NI+)?W9)_PL0#A3xoL`&FpUAHeUW)E{K9BrWJqR|(nFr@R zu8~w8NLVTQ8$8m%Pxh=An`#!crGK@F1j(9@cb3(M!?cR0r@?*1uWd3M_$B~Z_4ZQZ zgvXoO2>Lsc8q}`VF%5?#Ma*ZFiE?VunPmm7$OxW_Olg2Q%LO`96V6Yy2P?-Aqy9C1 z`hMs|-BGbQF+0itvY<7e3>v>BaYG0z{uIMWdtwrML2LNYs_~YCe70>07S!3y*uK0w z46>>f7jXO>d+gBHN+;mmojq1a^at<%n8UTpX)5ea0?=BO0=Wkf+BT?2C|uoG6mnm! zu-vnDZ9<_M@iY(mKms+l1g4+zFkeOyL%ra+&g22Y;$Q(MmmaPu(kT|+OK>^U+TRQo?dz0PeN}J{7sH>%o-x9AF7F$nzc*5BbtaH)BhmYz-K7S13Ts ztn*ROv?jN@8kIM6C{~#0TZI{0>8DM&=Zl*ULU%$52jkh9Qn~XF4m@N2UA}mFnMRwB zM#ntFhml!rE19HZn=`p==EboFXI{N__oHX)iO~2f&27BZ!uPTCaiGd%hRV5k(RWaU zHrky5ZapnLeeS?mo^J6>$&pf3)Xjn4mUmHKd7gCQ32iAKY3Yo;#8Ba;Fn!@{-}gw7_Fdoz5~OqCRK zO)dAYb(SqnAJlt?oN>I=P6Q&b{uhEMuoHD^(Qr8khWFOXMbf^72;I`ZbKbhil|jYj ziYqb$wvI-1hFppSCTeZq{$sFp9^7k4S}H&kkiFB_zyVbL#Q$Wi??~t9!bwumi%qT& zgHcfOx9Lw^=nlt+Z2S&BUaNRHnQOGAO+Gw4EV__9(j9eBPT6WdLhQBwp%%Dx)%9T}wx2r6@rf{Ts)71NJGz{z6;` zsFw_MNBO4yURn~F{M`uN&GJN}lqVfNvpd>e?z*Ks%b*(ssJCjPC;W;1)+7Rf=Tn$D zRSSkwri_m9xfbB8t1;TzTG6|^zau(uJeJxn#VUFey>pwbgfB;aLInZZj_Jc7zKs1d zKTikonMf8M?+9AlZgA`~(SX58Ar;Hv=}lBkkL$10rAQkv5Kw>ih!g0wK?hI*Q~K^{J1WAMhkD{~j90@y*IxaZr(xD%mbj-}B18 z)GN1KHH5hhaTjXPHYv%qr~8!}yTgua9c=_n9M;);-Ryo!;A(rGiX`O1^W62dWE$Umg&fDma@JEKPLD|pGD{Jfy z(;qP_N$#T%&VE>4toFo5JJ ziO7?!L2C37rSAZIYk4aA&vDUZ`ix{Mj%S$fFZ~y@yG{x9CC%E7WVE$fF|ni48b>6V zD|v^N?hp}5`p#pBaJF;SAM#>ltn-=eyXN@+x9{|b=w7NiN7leW$J^ABgx&F2-z~zl za%%2!#}647OKLI%kv)if#K9)9=cyQPZ_ga~+WsfwpqICeT`yGJ09pr+%EVH3SzcYV zK{l=DdL-SGS|g*}u6GBT%~8$=Og@g~bN(4>=b1+5@xa7ebd)B1t*NW`G`Fi4|1FeE zW)neL!vG?MmeiQ!fPZO4pJ}{cG(yv|tcT1p{Zw0;EgbZ?SbI7cDRpnKbsA>peC6zW zHwJO1rqCoiy{u8Zf)cvmjbVGLV*eZhu676vqyW5w%kwTVTDfr69dzvB6=-{r5n*9J z2BH@2lw!+_OKDd5JCF=C>{Jwm2Ev?40DDd@t-X4LUhG>JfZk zPux(oLv08hZw*fAoB+9^He`JEY6ySg;F5U!*6ck;x>x`8+dR~MsU5X%kuM8vmiUv8 z=yZ^mua}a#!XhH^`gc$VvVkZRz1zm@t)sOBuUn5z(a_%31G(p!>4-h`Gp!Wp^ z7Kw1|}e?Z_gJJ z8xDH=ilzvfdezy5tx-q&vu%sfc81y|>DVlF&z?GKYafl5^5a}jbhk9yA5weJ@fJRl z8emAD`)f}GC$u0*F^-QD$Kn`^Ad#fW4jNvgD-zw#k?GBEZYX(^QX48)u55zm_XL2x ztmpY1Y!iNMq3OYKUpICpJJhG9@Obq}$?`lH1xhk6(FdA)8 z$G*SihmSfx+qB(t3pMnpguhM+iv;}b->(u74S(58C%4F3 zD;V$SJu3Or>+xZEFK)OOL%8wDIqW6Jih&ES5IcSpD(_aaH8jUO0}>Yg$S1jMcy)iu zQ3015)A;RKDTkK?t1i?@UL`73Yb;faF-z9e_3>rY6W*bBMjtOTNrWu%LSu!`n39oQ z^OKTCf0zBE54wczPTiZl?bDRQ{zTgQw0TOI1~9Se{PO!J!hrfkq%If}Dfq!Ra&HCm}W6JAj{Thvuz%Z zwblE8^YJZTI%P&>b58RzbhM)ZlZr1(|E34kT~@b6)rR)B)%njrgyUZ!^dQw3m?gP; z{}i)Rol9&n+Oa1pi5_fUi^o#kdqUC2+iERcU0q=0l*ELmgF)C@E|&t1u0n!B`el>@ z(jZ+<)Ei?B4V1oHI@)15wRp!DAJ1MyOJ;X1BHPqlfc-$u_!^Dn=~fXb0li2Dx03I) zwL6Jl%ENbNyL3*Pb$Yt4S$i5yDjMr~6qJK0*m+C)r=+9dr)M|zbk+@?LM6%GJPF}# zq_uy3%{Dn|lNy+3>C2U?qA*#TH|{s_`jlQYFDQC}iO(xp*hu0QVz{0dd zu+TG5pQOg#|5CTpCdFhn|9&apVN8r;O%@{2IdAo27^wv`^MO2mi2do<4o;=jfQR2~ z(Vy1TuAtl2fJY1REwWeD7>!0RKc+2cQvSCo{9fMHyo0YTcbwb<#XR@eKVGfi+eoP_ zG|+#RDGYSMfV!8Iv|5Gv(%et^%y0p@7A=`@UX+>5uUok@a=W#{4#L-M3Y;*Hpm|v3 zI;{gS_japzF)oL@6}hjsR&GVfUEB&EEO%UDEU}FN9oIKC@kVP$uJ=u+Y_tFMLY~Rn zss5PFa`XMNEhe*w>;7f&V`aPFwjw`XHIBzF zq|m;-3IFKB!Dr;L4slhz1&vi0l8(_PzD8K-g-k|3pZQ%)@Aqh$+Z!t0(-2T@<};z>+T5rNi$AF*Zo0q z@+*&CkKbMFlv%%y>KFND7+q(F2!5wkDa*SRh-#RnkNG6h{LcTZ+o?^Y`TDhA9%TDU zw3d4bqx6lsY@<5Gz)9p&sQK+UiGh66T93P42d{)l9{4tU&(x6`qlOZ9kkaaaXqr_~ z)1|^@db*g6i4QQ`b)R!-8o40R<>%g6&%A&&J_A9gYGb0C&H9V?Lx*_KhM25Y@8n-v z6WY7;4_wn6sV3fC_1*>e-@!es?*d4z3mpis5eQS2l(q4D-v^ zA_QDU0oSo9>g#@S4Qxt#tLVoDRIb|huy3nlNBWCUUG|V+Af=i z15()f`;jib8|Wkf>PUHFQFG{VP){tJ_}~F5v};3ainZ~lG9>>I0FBP^NVT;4slMiT z1hfOJ)ww4aP82^(YW4}r*@KKEVIw9)GwI$`V|7Z|!hthvr5C7A?0=f4E`m@C&EA)}Hkn39O?-kykXIUK#gly{_ zOq#YEwEwd$wP@&UWC=N7q@?!V@mUw^vQ;@&6vuMo<@Y0}|Mao44KaykD2QCI9mjq_ zjSDUW`;i4qx#L;ry*jMK?H?iBN0BwLl!J*)od2um*AIX1Eg6&t_Jd9Ny+yl(;%OcZ zU&;Sje;d$JlE>dKfc}^Dn!)Ip(1$w|$CHVJ9U((^|J3HzEc#l`L4CiSKZGk8bOwsq zh_t4I!ok8ef3~^BlX2QkvWuIEz81%H=;Jqx8vQ*h#f3%`Y3;@3}kE;f6Tdk=H9 z_U6=ErlX#}+MV;rf38MvK`Q{OSz6PK59>!h3CC=i^w`8Ak1P%j-gl$p5VxXJ3?WSL zOWEhvINfUXa^T3zjGs#zAQBELOp`*ttbav2Fbx9k}V}R`7fyB zXMQ__;zM$g2t9fHtp`$nwA1vqBriEjv67at@Nk{e!jKn-GRfpTY$aG1E&s(IyzFj@ zA_@5YXJ`djWkDSZaaIkCE%ysLA%VtqwYrnuf{9D(0Pw{?y#ei*8C$uL%5fbkyn;Y% zw!QW)2nV52?{LT3^8aEImO_b1F<-Ykm!>%`@51#GQ=1tdf~ z*VY1njthWdIQf=Nv?rK@+Vzosaj;pE$)!NDjUboJgB2Z z^Lw%<(;_<@P?9nTmKTaP>g8ct0tqPMP9Z1^`L*BspD`)^Rme(E59?qyYtsjirWO3~#t}L(C=hpoeoYks#3Wn3eOvkidMrP8^f5W58d6lPGiZ8!I zPFwBkc5ySeO8%otDv;{B(W7XN%)p{F5$=wn1`MYwTXJ_8!WAl35#t|WP0q$5dp`Ti zV7!VGdSC4vef%i=wsb{4rY35^HlBS3Z*Mex(pPc7a?t(v!*0G76lG?&+)ohd`E%8Q zsat+*J=aLdx~Amaxz#fRtfOLdNTY3pe1eUYsAClKn3cZ@jQ*qZc^gas<)(2D6LF;= zcEA4OxpyvJ@CZjA$pq$vN=7{;IELj4-2G$5M+>nl4`GYSy(m@VzAkXV>N$A{I$n1F zZvFbqhdTUGzpO7sF5oRA$iyYC?f(5sT+?C8r8yoh_l40`Cte<{=|C5?*N_nrJ#^&D(0;)_w}0v@$Wl)j0<>=~Fi4|;q!-;ZtaN|u-P?yV)ItT#V+jc_1 z;ALnXZengy6?BdNtO&!UYOEDI`K#8{pn}(wksD%^phrap^gnl2nj(|`A|D)Fd@Gk5 z2LTipUpr?2a~oxKI4giE6rV|@1qd|Ukkog^q}@OHGf0hTdT0rfTaRN^nAlxXJu(X} zdo0{n(~AquH5@U7f^D46xFGWic&jGo(7Vz zETk~1ksRoQMS@usbiCYi>_OSRM4Kp1qWKd66pXk2Ua?=XT~K{Rpj5Ve?o*bJPgv=B zT)AeZRoV)Ms1LdTi3h|%mwyLq31WL7?$&|8^+?kaeYXUhFzKLJ*OnmlPGfjS=fukw zJ}^;!kN?lo@3n}IRb`nouaReRw@Y3iFof@s%_P5CY{w60{^UcyHk7U%N+6LgGrq=! zdzdVG!&P&0H?0k-qvA!m+6>AvMlO}C`qyM__E_^3!{zw)#FW*jw>)FfI}P3DwtlF z7f5ujl+o~{Qy}~t$WDGm#fr;_ExGf=<%SH5#SqcU&!(8H)g1rmKMD^_bhKiiA4tX3 zo_XT#atdiV*tk!Ni_FREKhw;vbDsI7wFJy%%Ko@vZQ>EOH>e4(XCbeUy>=@;?AKox?}R|n2Gcwo=aNjzIQz$8 z5X=lxoy$aK!CIkQ!F9yo-vo(>oryW2o>Yv5Lq*hGi?PGa!tb<(5oeT`Vdu^g$EYWp zp@AjGb2=xUjxk3YZHtlU|8+sq0A*$`x9W?GkQ&-%QBsNJqM9*Kz@%vhFJpR<4K#JB zOHy%6BN;EAE9LSzpJOUTcBHU#C>Yi`)qXLB2Dr1Em3A%JniUtCvZ)hSex~4k#I9ee zsx8U3){oC*Wq%Z6`uqoijVCCAdJ6U86%g)(~(u;}1mPN8)h#FJ*GObii@WpXV@qGj9EHYF7vMaMMH-T%^>U*ViC3Izwxu@y~YbSm1g$+wGL z3xm*^^YT7;u5Pu&w*?Ppt4BVP zY5MQ2g=f{tMWK^^t%V0&UGvA^YOfVf0;MU)W6sZKhm#TV-Uc|FyXpmS&h2OE6^xrZ zITz;VrFK=3!{O<%WeTB_VxU${mFZc<8#7M6k2XfQiDj)cbGiHu`W7{{)2_d?Zt1?j z50I;ThUmt}R<7neWjfWal-KmWLJX4OXT13NTerRYycdk(^PlpcZNSatG+`ll*N zD4jj^7{U&7kjo8Er!GWAfz7f5Dfsf)7%Rhbmp>xiS@kQ*4x{P=W5F=#xONMv3>EB; zXDpx3ylt@Ex4Xyud;gkz z(Ru<`97-`hp$3+06t*>X`)@}z9lhMT4ac@qR&K5+%H7_vyjE8niQ+Mes`766V$b!8 zZXBIypCfJ!k%K?!2w?1&vSuNUVrDqUXn60gm|txVVI==n|K14N<-uLGm_14$4`Xby zYN2n{Y`aT{=w%m(CCeVLf6#s4$Iov7&;pn=N?cdyA&D8e_fZM6%E5Ndu3!Fv5h0(l zqF(vjQimwz642BHXNO5hz7VPB$}kXZj%RKnAfz7$Cg03p;E9a|lFvT(<8)(as+Zmp zuam>_Duls|MIy=!1awjvaItc-kYv`_^UkrI2CKQUC6L%4Byr3g`x`vtr}lNJkL&BW zq1PJ;1?Gf|70eswjQ12a$WR{RhIQ+QMlY#6aHhg&0c|vuf#20GKWr`u=<6=QtB3Qq+^L66>Y6?3m0MdzaBu&TZlqqJVCV_&o@>&DJOxp(9iE^ln5`aQ|kweEl0qCy!b zsJbv}st)xdJ?LBXZ@a3rySuvHOg|6JMF6pMXemM?Eur@PfdM&_oX0!u*X8X=6k6RU7>W+L^vJ1elR-&l9394uqFXTNu0D} zkBUR!G8ZgrUPuac?CWx908~b$6^leIVPV?yim$45ie=1UgwE%G)J-C~Z7Dvo*%ze5 z%NZb(wy>EspAt_F?UG3te?e#q1R&#Nsg|5C$zuQ?inx;>J|8YSo zE!q3o*=6gR{k!G_D5bJls z3Lt7HKeUs(32MM(f30x|&Ro6?sX1-Q$Zl=kIxFt;Tb_x*_pVCe4eOI7+j&(gRdY1rt9$eY+dJEsbOVHCP~J8>ttzjj8?f&^g+^^ zX5YaorR&~~#Pl}FG4dphZg1>lH}~${Q)sc$dJ6CB^cN)omQsqAvCjO_I|qGT>3Fap zCOMBwzR-O$5NE)&omSP|9YY_hCl^265~~i%fFf2$HMK8~T&N%0uYb1jqAuW!oEK77_5@678; zSC&<+P4M~I7eHa-K!CctAT3}&m8xZQVmn;6h5Aa4>?LNvBV`K??>(B{bCR+7I7@d0 zUH7XN>mkSG$6oz@eY(1sM>`k;S#x*Kt zA-?{-NcV9Xlp?QA)PVb}mf%6XP$JkPW^FsHM*m+?J8bCA}#R8ypn?xMF zFDa|Qs2-+^Q#O8_tOwUixp#MYXkfW0K`i%6>xS0w%Y{BQTm*II=8(p{>IVg9*1}Z$ zBQzd+i|NR$+fSQc&nbKEewe|SmpLW+san`>^y9i7cKV+iZ&B8*#ys(Lxsu%HEkTNe z6S=XvDVda&M~*Eu`ws`+>$!??NrmrY%H_Vj;nv`O<-V_H{UJq@=dIWN>jC?Lmb3Pb zPKoj%KC`S^xLvS!cWmL#9n7ji8WDu>TAtN zNQ$uuc^h#|&#WM#Ff&TAlt|Q@-WN8|m_YE*jX+#E>IXuYA^7aM&Pt?wp?m#c?lY4& z-0WEuxB*-{yH;`*B3?R{{W(qcvhBArd0QR(iqBcmmD9+ zsLGfqyUfE9_&u*Q1De!&#us1BjN>QAd2Q(47l)4tPm@hwsj%L6j-(6kts7A99^xxy3@IB zd1u%)iR%9q7`oDH%EQ`WP?~m#k-{7bFRKffhmQ$f<7|8+ALq|0}RbbXL?g z))-a~0oii--jja^REM6375ny*so~*UtJU{!bG-*n$%BIfZ%|dExn8xgqIDO?Lsy9r z&QlppLX5_K{@3$gKl@4%dcwS@cxT}aRy+P%x9E+)bWE0aDxrPQp3>7`nLbrvo6la` zToAe1gO5`)#yhICy_~=6*!d|zeE03?h&jlb*)xhn)0g9cg8cT9vyS}V#~#bgy6K1L z&HUMPe|@rdy6&A|T{c?Q+EHuak8sT!u3poB#5pLTVT(+?_bGRMJoRj5+!%7xh{!M-#`htckLRuE|qtUBv?XZ_Icn!_E8Tl7Qm*>P+V^zrwf zKqAk#9smzmf+ir+KYnAB5Lh2N=E3MoS^}3x>VwXP!UOzpa z&;r|HWM@gneU1hbEgSR>O)Je+@_mN;;rCNVhunj2<&k}jyEoP~mhge1>iCBps<6+f0bTc z_-gGCQHmwTuH2Org9K0dsE5CjO1diL zz~z@=<0NJ)8uzFEiGec&b-y~i{jAspXN8N>sZ4C_D_PcXQ^<68t8fGXn5Yw}-D%}V`+^XY3jA86Bqp9j zLZ(2zF3w{>iN^S@QVe(j6`8MVk|Vbk7#6qfCH1uePp zRAQA+tIM6KVZ3sx;$(KsKAoW7Szn?J%23jmRK{uKenUJrNVB+j=OUT?^^bz@TB$b- zKR)m2>+93_8b)=w^QJACNB2g2e7hFOqO+Go{Wl&E8EsOfj{m_k?0o;ABy|qdN{F-P zk@JE|DsJD1f>bU3i%?6L2DYQwrB(6lanL^=tSNw=3=OrT7IS;J$NbFGp1Q&@L33jz z3>d6V6skG)9PG~%sb`bygbgI8mOqEs*NMIND#jN95|L*rZ<5(w(}Ei8TM^cT?ec`y z)VMyzwMOcKOA3bj#!WpFGEfOLPRHVGDYZM3R~v47U~+ob=-l{kdPrbqY+V?XZbDK; zgbKdp#-*HxM3=O*2)^#TEqRUI>b3mZ8mpAByNyANd{_MxqX)CkF*1a}hCd?(ZDb8H zD$k8err>gNjmfbcllurEv-`X9Thn7wX9zA`hdH^19p!RYp2o_sY4`oiWoNCQ z;n+?IvtjKm+M`tUoZHOIUMD|Exh}(Fr0G)oqN*sw5CA}eK7tRxa{XRwMW%H4gM;b1GdAs)+{ z0sr}4=gwmepOsc$%Y!TzFywyA~X97RB)db8nQ=SG%4bDsSjzsq-@}-b^@&1a1sT71ys-#y+};I=-VE2bO_2T` zpJrb*)w3DPb)=gEx_NI_L03J9OM+pwx2tPRvhQT8m%tb4m8M^oWhCqF?@PoQ<9W=x zK4XfCo58K-tXdN3ac{l%CPyV9QcF-i7bU6R+_|iE^slXNjOKV2|6%;#f8cHYvpYwv zV`5|3hJYJw?x58Kd1vjK$}D2-YC`^B*NO+8{G1%Vc~Fv3U4DYGDLaQChTyd(Xsn>6 zMWhs6pW{}yaWFv&rS_g|cFji%nRg;5W7S!KXf<>Hb=H-0OFlof(nZgOQ)h>22(}v1 zb9#DuyAc9exYDHBa=ZrF@2uMT_dIjpZT~W@e<8%Bk${lRrEKr5*{Cp2dgmzL#Sg(= zd;9qVzkKoAJI-8JWv>Y?DH%MGVdxH_x9#uI`MywBDEoXghGBC}WuK8}Z@BN?ZT7ln z!cVn-jj@OcKg~V5;J(#tvHkRx-I5VUQ?zsW$e6^=fKjIV$6jrLsGdDv-fD_M=JV@P&RaX-IOH;^}l8AC6LlMKyek| zCS2=^Twp^7T?f+)Q~{10Wcv&*NnCkw`}Wdxs+Rg?*t#~@m>3@9@5Cfe?c(nmmirex zXr7m7axeIqq;vSlC{QH%!|%PlpdLOY98adM-gvFtQzB)1X~~@UZ!!8{Luc>uNxPM{ z;4?s*<|mI_T`sz;J0G34Jk!FdNcZk&{z$&>WY=9#0YV3X+}68pk6t>d7BCj(>HQkc;SrJ5m|qX+;pKD zbpgD)B6<#&c4ESI|E`5;KS>VV)I1wl0@8shmZ}8)vu(J!qhp{7pUdRiw~R@wgFtd^RgC>!H^14s7c-=gC{)e$L-bO8LESfkSFTVUK5QovsRJ7j=h3rUrnphfmLL@#!8^E!?` zdj5=y({`AHo?&U~^#|Kg<%{}O&!#VYXQ&;-|7qYVz>lDc8yN`hbL3k0PYL+3{5NdZ zV3B5=eJ$@|6!(rwU0%6-J9J?iA8!{&4|wJr{dtq0pZZ81>5%L4=Ci1ZNCr{P$Q;tPD}f3wwm(U}t>tx)cgm@#+^&$^H@_(6-pqIk(6#rIGub zPRgj5ytcO16yU!4pt)ntH)eZ`Dqn$LoIuN%p}Mbsvc>pfpb*8McFn)yx7-cgnCSG% zMng=RxT~aM@y}9OYZ**bYG>Y;ePRj3su2`(UbD!gn&^G%@ST4o%X)*MOn=G*4n@%M z^wZy~GL__jJ>M%mGBZbVItjCTxN&C;!3pXVU6SWFP4{msZdtzahcJJ~LV4uYsiWN| zUN+B|HObtU9d%*!!LK)z(LC&l6ikqN6^398%#=4E$x3MmL`iv|KuQ6jg=ym)8&nU9 z72odXuqNDxRu1W_B;114&t{k%molZnOwZf0SB`hM)z7Y5XRz-iJx1eX_M|L=n>rjm z>$NO`HZQvnX{W@o0R5rCrEfht#U^*31-9RTmtv(VEP|LAw%-iAr^bRstEcPg*RKjR zttoRu0-tYqx%a1M&Ay~jo0v_lsevN2Hy=Ds{rz4bOlf)B7R{S@U9WpbaRMqMmV|q( zfEGW+@&;3Ov7DKsXNSfQm=h&0U-s+-_;2q^p4K@E`@0ZoU-M2^&YkBaM;SH3y}^d^ zfaF_OD+B6`39E=8P#yuSL@CwU#P&u@>UIUUs;a7~8*Y*I0 z8xUGRK)L}f%Q0)~pcSW9*h>i36gpGkN8zpbmd=|`kf zrgw=g=*KNi4mKut-(n#1(`Sv+w7`6@tUfPzwuAbme$Fp+Fo3Jde$iJAL{1gJd|;DTfEg#>;JaSS?v7U9#mN41t0Sr zd=|nXNsuFKMBL^s8?qu}yT$UCF?XIJ{NwSVTFc@pE}d3vydR`em~74{B=w1hq9sN^-oA`@Z|d*LL|)S-fdLFL^!H_LCCAC;aZm)%Qlw$QH= zysg8HYYHO;i%i-jdXn4nC^SWEO?czv6DWH2?vv@PEX2sT%s5^-r>NsCnkhbsVyXN$y(8G2guY#-eHW;{*Y)^tAQUVLa=^5xz zX;tGmTh}{KHYXJvA5cM8@3;k9DB-?-{ptln8v?+-|8I znNG&+e55RfS#EgmH-4Ds4Yqah?*PB+s74Y_)Ku1ed_;q|?dwk(B~6FvY4Q6-KRk|w zrhkZj*j8+|2owvfUmt4bK05^GoN5+z*AS>J=Ucv)$lr@X2Tc+w8TiPal9PY_G4m=L zNAr}U=rKfvdPUZ7U?t`UQXZtJSU%vF&lA72Opw|dK$Q~s{`MXHv*TDAl>|;njf!7A z{v;;4X&bhL&!$ra#NH;Zi`_G{2K z3;jXX4=@%w>xs|A6Dn*ykA)uN`^g{Ov6Lz`D)!$pGZE#tKWT75+UpYidoMD zNQM;GeGDgtY!s?B-0;E$7A^ni0saE%7ywBK)-=X*>f&2{@k~QbAAWL$X$sCR!>POi zh2@edAC>|R{%uew+nTJ$tN;HKA&_^v_^_Txrfh%NxyX1G;}XfeyQieXEW{w z!qmlvkUN>#Lrr(Y-|A)!l*I|I@cnFjTe92v*IF>ry<&+4FRPPh({`wNN`{M$4iY41 zB=~1co;#|>X6E_>Y=4BtsFVG#HNB>r3N$0e=TkBn7qs7vdhXF*nc<026iZ0X4h3E`oTGIJD9)TOW&aMHB$}RrOE7TMLn!NV+Hq=ExGDckf=JBv@tP9Ir8$F zI_oJ21GOgVDotIv=ONs}R2QmE{9b%d=_%Ha_h&{?2ue|p99e{4 zUN-+fj?O)h>F@vJBMRNN+@&y7bEmmXa#teM+!x9vx4B<($t}6eU2->yn)`HNf5k|KX;s&i(Grg|?AOG&z%$UADk z3xx_J!3v)3jvcE(kh$ zZaf3RQ!LicNSGaoAq=)$N3QEs2c30Y5}G( z(z^H73229&1v=P>f}tXv%m&4fH(woYh)WL9-1yk2vC0!Z?CCCKUm@Y zpEdd4kNtwl)92p5^(rn<3Z6D|KBlSfI#@b=TnMrgfJ({f-`IfZwgh&m7*hipZCE~q z&=r@x+D{fUY3h{2gqkNGrEUcBrS_DC(+~ORN$2dCb5JS(iu4Y? zt0|_*<6wkW8gj{TB-kT3OhSCyw#+aQ&*?viU>d`ND2igwd6mciYfeeD*$v;rMcKu! zn-}DAS$Q;0^5}AfcI45-&y}Cyu=sfU?86y}7wn0VE&j{|!)n&3I~`$6_BBg#CBND2 zs>XO@(ect*ufkDt3-YC!3?NtgXp^;-(!bu_*_pIdn603Q8-f`42oMH+2yO)mKsfi#14M>q`TP~(m`SyEJc@;);Qf47PP zO@vQ@Cq2E|)m4G$C@;ePG@Z>#hg8Cs!r9%e%=$C-*$#g+Z8(qnP}+tGcZvxL7cGH` zBi6-0bfb6-GnYdm$pZdLx%@o*7EV7M?8TewG1IFA!=AxY+5w zrGMPbI#{AZ`{sWk7n^H>rV35UdKhIJ3*}oYG`<-@#=xkOk*Og~tP{^UhG`UVtlst< zdu02F+#VDe z#jB&M9-}g}s2B~Y@q9!l`l%xKB?zCQJ03Fx!Rfo(x4h}c3Ug8~GM+*yl+Kp~wq+}% zI2wGxJVpT>LMrOAC!H#oLMmuue5MV9B*E$MZD4W)HUPmm>~x^MYp;hg9i)YxE!)2S z@tY`w&oMH2C~|#|xACXZ^lE(p{$%uPu14C}8z8XSyawSbOE4U)h1?lYebpqXUiSLlW=){4nlSSW8tq-R1N6 z@&2-GTja)Q^Dqn6R$83QGJwl9WODR~DqTeQ(&VA5o3TUK@PG`43vSV(66Mpw^S@tgwiY%YUU@yhycuIX;D0xYoEe;lxc~ME9(``!ePhQh(Oy9$^0Q_41ZZO2k~Hfw-hR@3%B(!1{|sA zsH>%$($M#BL*+!_S52rmSH9P7_mIb7J#N7JAobOIzuqli#yi72p{{8{vZKl35$PNn zgkZib@o^;TosSNpMU!YDQ5b9j!WR)f9pssVPW4R{KY10F|6lAl=GD&#AsYjK89>Qz zQ-OTq1760dmhh)IK&uNae~s5>Gde#}dH~AF7kd$cWmt6)2ouQM+n;%JWZC}ja`?0n z6~rRo1`YrVezNu5BLOf|MoZTAB9oIk+wJ?4 ztLV=rLpOOqu1Nu%{?zVgd{;4ji|+N@!Kor?CTlxqDNx|1qbF6VeP>P<3%W@IL{2~9 z=RoIV{q%ouArVj+RFB1M(omo!0)_5my(c}8zI?p;#eDFE<5&)UZ6x`GBNXjUkEk85 zH@ceM*9h+g?>V5feZyO`>*N1;d(q;jUkG2{Rmt24g?q#N)okN#9f{=}o z5LQ`&yd2SfZO!!NY`jkLU0bu%9$-ZW)D^OT3o%fp{7Twzx?3~toRROjXDDY#OLJuF zxJUM&fTpy#k)w=w_mxh8lQPVRo6&mMwH)TZ}957vWRl*j?LVhfeX9XOODY+AO9gEP4uW z4>?afh^kJ6-O;z|E#H1?Q7nOa$mRHM{j|?Lf95N=8@d&#FrHDRl5$xN8AB-;d~bSH zrw=${s7T|k&3wN)vhV|uTtaDX+>$ui<5bA%)9pqs$Sb8A1TLMbSkb8Z$|lj_Ym=xl zo42#VM<|-!9I+_ z7`!7p&4&~bNwmk;1FW3#K}?DwEq-_#oRj8*K``G)J9~CRaC^Ts@DXrtw{A@Fmj$i( zTtHXA!9}{C($MByv~-(?U7p1yIf$Y#mY;+JHYO7xjniXTsvkbyn`+@Ne^aC_tt|_+7hT`P0)?sDpf05iFu(7pF&)C=v zpvkwmX|sulcs!yXEX$-0ng~thWbPS7QjxTL02LU-i49QI;!1Z{=&L|}K@9+Szwqg6 zLM{Hj`EUI59hm~+JfzvR{BG0o2OrhaSLvniSa{@(y4Hiy+6IFUo5!J@av-+BlXq&e z4QPm|0Hs~ir8p+&h<6lISSEAkE5($0LVI^1f3#TkN~;DCmzaS14}gQUsmMy{F7u?* z#(0u>?zxaM@|t5xqs0xe)$>M2z(gL0!?`08un22iq(`Vgnc;^KOgLxt#%qV1&jZQA zWz>!K{h&07f4}yN8lzm#@H6Y+XOTnS9Oz01rAhwPXpqRTzUUS$8Sv^ujFiqdmmrN` z{#Kp|`E#++2`UxZ{nI-vW8j**$`c`mO)+1b}nH&sI;xIW~ z&oK~i(W05m3?cK0oy%U@G^ktJS7+SD0dXveC{LLXXMMG_*VAKS0Kyqd-$$066^iW+ zae-=mw4%mbn#*+f>SU;$5&P<+f$8t2?xO)z)o+)m?k9SB+X8;CYo9el^hHn+&2pAo z#qu!&S;^v`&<4qz{)#c=I|v&CUy73F`FsdGa&zpcFXVDd@X8#fuoiRgS)Qa56C=6y zG$doY@u#ogTy2$9fakTLjTc8x$085tHyKX@JFHH+enaV^%RIQQ*o?hKemSEpugmnn zLWm8nIKV3B{`ri}pArZQY~CQQx*HPd))^_nDG)?##}8MB5!~HPtsBNaN2^>S(=~(|5moMJEOF zz_QMirB)nASf=`i zV8uUSP-JjqPoWqIy{hy&&zk}}PK818ppu&p5r}_={lcB^e};)EIF%iO_ji&WGa#!t z?v?V!_+DgnZSA6V@XC3-pav&4f^C|p0^ZaifrRQ@5o)= zqrHYB*d1lxwaBBhM_ZPYwnR0?znujw-ADVAmfr2|`TyfIyg@Mrox$X%0R34l#aA$< zgYRuz^jQ|+nEeC$+IFDo7%KBexTR8^n~!}g16m9vvVgYHyFPgkjMR-%G7`1HBBFqi zyXCQP$xLXTyQrT zgV-4Ln2Zg%d-U2>UT72$J?Jn@-L4P?=9)-Ze|fw z!=Vq2%x3UOq*SR}&r~i$|C8_-@is5KaUWS)CA1Qn)Ad8 z4o%agOw@a3{isst!ASa_F8(6ifN_V}*Tg0J*Sxc^Ps=pzOVTxoF(CTmwX8-_{Yn>K zj-avJ_9fkAT$g=N+ACZyI{xY8v`Vd%jOTHo;8bLP5>E@BK<}m!0_&NNwMDYn& zI@_~Tuu->+mGu#mGcV91Z86qO<(!FLlC*=9GTAL0!#3f5I2`0nTb<- zGV44VJSCY4JXqp}I*ZdYF!;Uk@nIKXlY1M)@e7=+0EVo8XvtLA-;KTkW}XhKmZ$5Q>M1S6ZZBMp}u1n zrnUJkO;>Fnr@NjDqch569NUyNXJ_eZ8Ax|Fme(^M16%;rLQp?$?;kvBFkM5E%U5%7 z(2^h-6hL8vs?(b|_t`PIhvE2nSL0~W@~A7VU7q4f8+jx9qHR;Tqp{v|KIAw|@7-QR z?ew&bjLT_J83H+1V0HM%LID1dcx70L+>qVwK7=9e7USuf!KRimqiKXu|KEIk*0#KIOZ-8H8wBSwCAPut`@bZR1 zMY1Cc3pP3E3|n8W`cBO%#@o=?S5S5g@b85#UQ{m}a>4dK^Q3Qqe}{)te5R5;$YU=b z-B4~msGn|$JURqSKahR6vO>dd4T3_Wo+Iyt~9)Sg}r6 zMN^h|#t+Lh4fg;Y17E&4^dbn5ZEa=!&z|px+EOnYGdFp zMCL=i8%dP|_vc9-KYMa9`m;)^5!Ll)#o3F2zlG7~xirg3p{v@i$s&Z)vO+A+{OcDT zV)7&{=R@)tAyZxr9gkrY#CdE?(xdH;kb~f4Qj82^04}glmi4=#ecuS7ce80uQ>-Mg zhC>7=mnH=thw7o#%j{wjxBN$?&iti3VY_-X9z-{Blhi-ODk*!=(|;p*ptH{u8w|q~ zp5|VmNmoE1u7kvvhSJ&&4pxxf*zXN$E~l+5&b)#-jQ|@TEjlXo-+SXfvd&jqay_N1 zB)Ogl^0PnX<8F+8hK@>-V>8RT_R6eJ+}>=s4NFIwAS@xG4p1qGXtA7Z(e~14Yn_oi znM?5<3)Dac=b8e8Yx_~8PXkKsoe%C$2~othYkS<+ce9piiW+oda9QI}CN>wE#mk@7 z^I1{7BoN#(9|EXlv?eqs+dW2M!(?gr!Te^y8#`-9*HqD@a?xZOyHl?scIkE9L0Q>l z>*FMOGA?dQIX)Z1VjMz20qrY-@EV8B&Ii_ORagZnl%dVW*x-k3&dp^S5Hsb6ZYkbg zOq~8ZIabi-xrbR&Ghs;-wG6zRKmX;P_kPL`?&irekiQ5NqdG9YiwV{`cBT59V*ss0 z9`J#>?b!06elnnLZeHG+_c^B7GmDt}H%fnVSfz0^bR-nHu?o>lIT5KGhA&$%o`B{< zK@~cy683NB?y7;~ZOW}a)>CMQ&yA_ ztN>@bzzs;@CgycvEqNF;G{||(RM_SgPzO%6K5m#^)^^38B>#dr`1O(G`1>_kp_Ou$ zJgyUz6GAOP6k!49BF9XOahZLKT}=*^N-YC)R5B8zy@Eu;0#7y(hfBO;PPhMn8;Pi87f}IYu*1RAs8lDCQm z&M{Pmd^i1ogaq26XtC{?KPb&?^AHa=e8{7xSy}Oh8gHAwx`-S!B^th~Ch=jPo*lp& z=Fg8?k0FV1BP!sF6!{2t+((O}OB=gC@q-ma8xXCX#yN=8zw3)a33@R-NRUYqg+uc& zyM(L5s*B;8$Ao`W>%ph9lF)(0J2ItNkki)hsv>$ zTv{U{`B!bpHiiIqN0%tH{ZRAjJWlFyCUy62*=@{1KHzvbxe(8+OrhMbU5_MveJX_y zT(;559yP9HJ(-Eup~eiiDCP(;jtPqh48FLbBi3oSR%)z0@iG03!y@y;gui($ZyB&Y zK22fWYT+xl>s){6@_m#OwzT+B_Jc~{gJ7Jv^Nkx<#;vYf2b_po3rXg*{`d5W_z*m+ zn9pN45Q^uD%7|qpTC>E$GYs+0Pn|sJ_2F|EiDOoGEDnCyZv9w)_YcqKh%duP;>y`G zRr$X(MJXGwh<)`gnrCM!C>0SqZebvql$kL)>891`Cqwcxzie)by*prC&6@JlX&ysV zgk;Ri4>~5Aa;M5G8u5C_O<6OvU2(k}Ye=JN*Gr()A+QVpsgx}*)2m+Kk;Kn~s`7lb zXjECeNzTj=2=t-<|&nGiy#l9-opF>TARtu_=FxCBATD0p)*>xS4h#zehsIedVWnTw{l87F|Bzd?|p zosSxplnXKp6Tf`_T^P85gz;m=$kv4^5D2@q7_vC?kJ+rocYvc;qFm!=fw&Vnv@`Bz zAj{0q2DIHrwe~5-kl6xNX8jfm7I{F_`otZ}3N&$N1=Vl1(K{2AVq5>h)JLl*fm zS+M_Ry0y7IGSI)4_X$2p-s066!#QLb-cQE*Q~D-(zHP~atz#DiN=64Ue-qm`YZUPC({sYozmUs+#w zOT7va^g2sv0upWY7qitgpX%^zlEpDn>$VX!ApDJeF2x!GO!xt6H1z1hS2WO2a59UK zQMN-m)(-KSug}?!ZPN06i<0HTr~BbS82!gFFEgUDd`?ZF=6IIQB|&bGdRU-O>OCh) zxzqI?wtxDs4;UOS>e7{__9Zr#Z(NcmS%|TTyk;R;n18@J`7a4%&x`-C?sHbCY{JQzWDvUT^ zoIlseEOTTh9Nl;>WBVd()QV=i{yw!%2K5HnSJUQ3FY~T~7$}QW(#AGpDJOoPE$=~3Yn%Syxi)O>8_Mqe8(ZU2jig>A+S`lj#tEeR0R5q!`&IvY0^|){mK7@spg`_(58AH+m?v_vnnI#<{C@Z?S+6;-!my= z`=-iziJk7xO;_(?^X|QHX#eCJ^e!zZZA)M%T@lpesFMR-nso|r#)YhiAm_`2BWe16nR};K(EPX z0F=q6_fj==5#uT8CA%eYnSQemA<~YBn{&+@-^8?v{zj%Los7HO53LGwC{vgXpdbR?$ z;uuitKoobx2NtvE+Yl`C)up5=uAJT~w^_net@Iu4wjKmUzF@t}WWxf1*JSnnx<#Ba z{d{3`)_Jh_?o75sxh7&k_vOXjrZglOF+QAb&*UruCq;bmP1NE^6^S;hAV5mwd0@H{ zauArsrT10#8pYOI@2{{Sw56r+l?`pAshqux7E$Kv=O79(>Q~Eln~1;4MQh>20Z3w) z`zFRiMOTdBaa!KeEB>lOLoU`}J_gmObX$jz!5Uwzz>v^kZ7j=~Z* z{Tc^0#LRC7e?7Sp+FE3M8S-o2&hqaSLB>L8>zN$M5qBbMHhyEatB32A7B#S7(2J@h z{XR3}+}-v`40iOUD8VUNo+eQouZK=;w6MSASX~<(+=lUoXPy0PWlu(W@Xy-?A~Q)X^` z!P<>R=99lKfNG zF%J33ZbU`eE(ZHKk2N2PJ9QN-h2>J49_S zt=Sa|wb#hO4Ir7E@JjSgygHT`gP=%t7Yx@IXU2weiJHs~40w()yIxHl;ASeUr5jBL z--lH(UD`AZ9f^3`S2+eCBg~?stop4#dEkc|7_v&J}-m{qEp;>26X_Aqv1Kz)aGG4R971M_Tf4XhLNJxI>mFmlHiV6%KieYSo zQshN~8ULJXLn1=`K#r>cB6R%9$5-;KzNyAS6S5;G4jhyHrET0pc4I4xMVX`O-Mr5z z?^Tk5l;V^Nbm+k;yBB4i8pmAKJ%CY~D}TSwjI-}WiAE{X1geJhh^!E{TmDUY-_qBo zm_Div;f9gY{9}^jfltu>7 z66A4pXr1-Q4MxM!oA!v2&7n?JLTq~R7W(C1XBJ(XFzdh*ThMw3 zEQZnooTqS2#`hj*S(LuwdoP?73p;i7>08CpkwnOGH-wmhl}+gBz*SzSXGnuv1i9mv z#@@dB$o)D>PR5RkcWAPUi>Q59vtC@l;P_8hB2Pj%X?wKtXBA<UM@#53umDS-;pz6>KbZlr)F~<-qC>3bZY8C z)I)*Gf#DZwESW?;x#iyxW^pN8Rt^G#;k2_Lxa#74((?3~vkBHF=&gFQ2!-14W|XGO zsluTgXv^2l{z@@mK(QIuH5s+bHlVe>_$u)-VOJu)EaiJ#hx`KDy-Q z@qK)-!1@O_Ce9BuHBf@X!Z z1S*c9237e)Dy4e1Z4F~6`_%P6f7g)4j-RcA{*3jHV)jRsy}ehx$^aj9#*vtn4f&;T z#`NKtDMp5R*v=h;ihjK%@3RdYv;~R&$8@CD>wjxqXLB~XRUHc-11>+?3?La(#_)y+ z@OJT}!}y~-k}C+t578g!s4D3f+v{9tfw-cs(yT#FxaAxw>+!`DPu2m$Q9o1o@EddiWJB3#%;T2D`zFW$jmQO+fq zdmf!De$9K-+DHP+93@Xq*TPIOLFy>{oo@NowI7n#JN6$ucFRK%#sxUF3pP#(i@R(F z2GS(zUghMaW<-69dD&Y_p%HkznPDP#2U($peFK7y=b)1bi-A^>lDRG8Ufe`5;(WD# zG_Mf>)#8%Z4utK9l*fxCFm+{d`FnJwjMBQ0iv+}zEu`e=R1jH0)PDg<<-+br7($;O| zcp$gA=4gu$7vCI5x)sIy4#)cUH7Z~?yuyW}%{Ve!=wADSv~%zD=`or6Zoky!%X0JY zC>2E1F1#7*S4jmz#L+5Ckp0ppKycEtLM`hP9SLE*+D>h}H7v7^E1!xPqD&DjmUjISTFJsOogT9C8h4$PbN zs9tDpTer{eSp;x-0W3o${NG=zlP2;d@=gOYEAv|`hdv)BR~l%I7_7PF?G7$SYoB9a zDK+`~jQ5m+Fo$u=KgcZGp=pq z-^s2}np`w1RZRs7yk#}Ni*a{{f?|990p1CnWMh`M=U(ciD>`13m=p+R@*gOhb_6%$~Agr+abTt;pFiMGm|yZ&P(+D2;8ag+Vr> zQMT2j@agjkLutT^B@Aj zQ2k+MEEMPS!7fFS4weDDj1WAT%k=g;&-_U*Wn+EW|`U_bY5yP^dCU^ zO7DF+vC8!9l%mLMe2U`ry9WG_4>XkV1>pb37*bfFehCsj{cg0y_k4{^i+d7CBY<-* zw|prtCM5wD(0bh6{=}^Ql8851yta2L2o_4Z?tQ;n$2L(XkKP>30HRUOSvIjd#lJ{t zClxOyKqX5)YuJrgR7H7PlWV%U0*0&6_Z99y6-@^Zoj&8Nqw>FW@Z-;* z(aZ>-53j$6P2OaJR{JAv2z7cjbI$|bL>JUlH>zt;gpi9aDzkP=A&g}DCxcz-lK=U* zuiK!Xr3v_}-_NfH$gW8a`Qk4@@6Qke1Fn7 zO}g-Fb^;SW)Jj4D`XzF^;As75NA|FPP81@C;7%e;Fcn(LV+mmthg-s)5OgD)4~P6BC26Ww;yJV>=md8KuF zhM$Jnb!^=?^EIekh+yn*fxs*^ED>Z;lFIAU^;UbY#j~Ax`P1f)pNgR1SCumd!)4Gh zd`^ftE2}i|qR>TJ;Bu2w!Zi*7eAj<^PI)ej@0*)O(}DHnG6ZM|+z&hOeL#}w}|7AjLpTxj~<8P zv7u`<4MDCXhW4Ywj*3lTA|<(8W44EI*ML_`dqgP53COHC}U zX@o=*SPv*-xGgl*2*no*Ng?ZL?sslg5zp^M@YS4&_vQ1z0%okAv#u1whV5`>eLX;R z%4WJHZ1;~S`^WORgW+x4Rh=u19-8tnl#3>uIUD`EJ#F@vEUH|$wJekdq>p4E5mdMDpA2JLMRI;Dvp!$ZzL+-OSL=fa@vWP21Vs4auj3 z4C-g1n;IMKvqP+V?~Vn|egL3NUBf=j+@_Jh!uPL7#xG10xWcZ09gXeH(%zM(n$`G; zd>@^U8CB2b?Y`U4{T*nHspXlSo#A8WmA}bc;a=~k_37hx_$!ioX-Mu$lBYJr>vn1w zTfJ`u$yjxK zp68@3tp)mpN}W9$xkIPy=O{OHG-k!#^G}spp8D!=id7qajbS@x{XTFys7s~oL#yKB z)`!yI`RrdbHLx|1_42;o8k*&xaxpDvTWHTq#pZtG)`PD4MonJJ+Pg+rmcFxq-@q#5P0qf2x>1Vr*`@Zh9Z8H z#V^rw8f5Q(>l9)_eDa(g_@0}y>tV6-GWv`mr4yX$jnkDGY4wN6OHJx8Ohi6GOAvO0 zs4)@92u2|hSrcgMkHCE*)@L%Uxkn1$(f+o)mD=moK*J!i3f_9unLO13Vx2f9-UVjn z>_FRjen_%WdXq7eAe^&8A9nmT`L|#|HfJ*+%F6uOB=fzV*zA^~rDqg1 zigDzbiQo&0zYbRgZpcZwQ&X^L5LFg97=O^Q`yamcZ2=dje{*y5y|LIl)3LnT%Z61( zj>*nNVlOGzmUv*krh9KkEDHUXBp6gwj9BLqCYK*-CiW@UL4e$G!rO3x&y+~0pAbk& z6dl8G1G6L$F~2Z@JU~e~NJVB-7g}nn8fC1jURPdv@ojm#*}j~3;r~6ULuH&OxF5mE z<&HFS?KeW7lt{Wig{Ah=1BFD(To+Cyw%UXfY=O38A=Wi--hk`^Er#M7rDLAdEEFe+ z+zx%fGcmY#xLgTL={PlSQ=LK-|o!DP3FqrQ~@y9fnqjTG9_hSunrJlDu zgs_(&ADxY2IdE5q+Sp-%xiOZd@b&x$sdn={iGiwcI<6s)SqnOm|x$s zCtVSoI5UHexJ!Pl!!!X~8%whhVe1dMP7~D5?-a)K6jS}ovG3(^e1-o1;qx@c6~53) zzE9+zguzv{wQmZ4-*lV~N_EO#c}AHc0V9-m#Lmy_!y$vxjF2LOJ_uA>lhvSi00s0| z8@qMYwUxT7wl7<1hzc8*%yr;D~IULvJ)vzHV ztR)i`87bt5p7Sd?Oqj&RdnIKX{UvqJ>DYK zACm9xS-K2~iV$p)$+Af`xygE31R5gt_ZK&NtfKv5l7l0IUKrLdoRIN-_EYfW zdD+rc)nk8?i?=~c+0Kqi^3x!%!CkdNaGJ*ypZkYTi+*$`)8HUmMH4dbELqnz^Pn~V zdd!VCk_0x;1^}ZG|5ZI*gTAuah0%WS!CL`6-VA2mpZMV9(hT`Q+zs4MTS`?qPH*L$ zr;X7kX9m9e8-rC+B->`lU0{PPirA+VTjd)!T)&>Zak(?*A~cwDH`;^sgYVU&3xN@w zMv;fh-lLc?wIp1$pW_rqtqMbPlxHp2cjc`GJ)~!Va_wenth9Q1#BI`~JC=y>+A=v` z8_fnWs3K&C*b07$3xJ_+PR$7YYvgVJhh2?&BBS8coN29;{nX?8g#)XHgalR1y52d?7M;$!ik0 zU(kD4u;L=iD?TY`?TPKD&gx*fKV_IY{fZyiwmk)IS;FeE{LuC zJ1FR-QYD}7C zF4ojwusv>m`o0bRy5c8nYh@RSFXmciDJP!&Mk{ToGOCXk6t{VXS!2e^Mgk$}hBd@!G?tKg&f%BM z*x7>)A&>sQDk@_ADr1fnAf~=d_Gy8m4D5wfDNZdfg*j z-+$?3spB!C+g6Xra*yQ6;2u{LWv)efO>jY4d0FJP z?@8X}?W^CB?%Q%07wt?bP@4D#uJs8Fsaywyag*KhA;_5Fph9M~xw41Wd&OsEa5&i1 z?JdXhitt}CED-UW6b{K`Qa*f1^s8gNla#x&e(?H?CxNfKN$5DYxtUjT6a{wmNLr(D zKWnwnj8(anRhZabK%A zJa31?EOfocAw0MQSDPR)-ShYBpRFilFU-Y4_!^%o9*a^HV!f`~p9Z^=QwwzvDoL}w zXTdFh9?As*#^>1;PPKOXE7!*PQ8<=wrd@%}m6D%)fWq81kW4<5=c224FGR)1lnyNf z>mBw4HS3z^2L_bKoCdzwDK~xFu~mbr${KF9>shkxO-GCU@f%m&mRfupek<~a{l;TS zTE|4;sy7Q@!9-e19Zgd?C}g#DTT3q+2n#DQAVcKcYSY*oyE1jSCf3F5|T@VORz+GSS2WcgoQuXFQr3;GNQuPwC4jCG@?2|nM_Y!hvhr+5GP zo8``T$JCX6FdLpQ=;P-G9-S7c4Yh`enm1<qTXPkuOW{aWa2 zgvMUM(u<1`e}?`ozPKDOA=atsR2aSN3j}9}LzRn>M?!}^3FYQ?v_Lx^)?eF}@dZm& zRg&7F;4#~)dXp0py0HL4>F$qWrlzMOgZGU;ET3)8{9)gIvdC<6S$SIRx(3~cj&U2v zrq@aJ?}i=EvM?MAETi7)xX6q*?s?Bsu*LSDnvr-82#*IeX|$#L?%vWHp_Nr&Fh*Jf z-&Fdi+Y#*=0~=I~P#Th-?Uv`5SQ)B%0eIY+B6b$Km>^>L~rk_tuM8=KAh;;}NILh;j@sLYZ|B2NEa zZO3ta-5cL!N$^^Kiju1BV)V8Mi}@8&9W7!ZXzGyAe)zUK!>+U9bPeY*ST|c#NqLC@ z9KPuKoP{2GTWfhnnY}*mVTS1JS-QpGZ)72z=fS0mnwrAxgOOi9o{G`?eVR#6B;e;T z3gRgR5mJV52x~&J-!gM3K1KRkhsWf86pwm|i}JENS({kk(cOM*K+!^QYc^4*>_7B<1L>3&E&9KW(o2WJHJe!=2VKP1MV5VcBq_m zTrhqylTwnzGx76$7q|G+0bkwRA$^P#sC z#(}RtvsIyLNS;MVdyG(HhzMk}x_46k%xpT^ec(eiaf>r*wJtpT@uGL&DKaUMm-;bKRW?oV>gkD@6 zYmurtuwk+F*BZt2zXzxuMp;=rAi5Uqw>7xHU?ydpu*to9Vh!~BDYI=rqs`z8Ppqit zakvt~3Rx}BbV}Kdq2TAmVYiKc zn!U^XxAwR0=O}r6(C@U5EiK41Xx+;tah%_}Dj+|5 zZ8Eb%P$vElw?u4gZM*q~r~zRSQz^6kIkLdeSV5Yc7A*-fHgo*D-(|17V)b)jJs7Ml z1)ID~U&iw};7T8wjn*uAR@BbhK){3}W&#Z3OMlf#89(Oa8sX`aCr>QyhMf_);+l9X z8V*;s11?Z`#%0E@up%Yvf^vzU`03WiZ%RPQ?jcCIF7yW_m7-kE8D`3qxzBmxdLq%>V~A{ztLi=`^>lU{JGL(dz0-TF)xg{y(=O+sAdOR|-52~l=j zH+y7frtDc{ucB-3nQ$rGYmba;UtIf!-|O@9XMYrV-}meFJkN6;M+0L7e}Z&FRIBdK zkmrv7PTThhxWCNu5ew1NSQ?m(tD{JHR~m78hevQ->y@Y%h@dPuYhyged10`NzpoD!s?jV{IeZu37E{EQMYf($1 zFNe-W!(Tqz5sfbXqfu6uRZxekF89jH;lp!YDqO!n436W6q_z4tyk(i|nHWZ7oEKQb}AyMsD7wys7 z-*#-4CN1P(Q(nZO_XH^;%Vs}N6!px`!w)sOKkuuM6*uThh9_6AH+0mtG&gT~EYz0* zY3+3xny1-!U*IbuMQPPNm3QvHQ$l1x8Dt2vrB9whQS~NuVaz( z-z~)1Bh7#$=mQwjx232_tVdM9pY!SS{qw@&^TmKAF72jQ^Tk&(Io<|8*7MrwEqTUW zDGV_1-Lpp=0jJNhxwsk+z23dDO>M=)je_`NRG&5W}S=(OQ?@N$pt@h|88)$Yh zUkOJ#EUm8cR9u7A3qv4ZlJpz~pdU!PzP@e-atC?G5SH+_Vr2jqanU>uO>nZ2~VTbG$Rboa- zS=J!vLcDbg-qZgzO%HwxCLHQ?VrdI^>@JJ>z4(~Tof#&x9+{v=-=@$}iWY4yH++a< zfQ{OulG0+LWJTy8{e3~|`Buw)zt%S!Y5<1&zoB^NZ#C{@KxS67YTx%lhFSS@<;U&q z?W|}Kc=y3ZeKUVl#9^D9%Yzh57IH0i@Ns(T6Tu~bT)|ee-Fl2+os8$GQbmZ|7kwKG z>-Evskj;I^t*3$HXK~uMal>~Pr&-B!ULgzv3_cfm)mI$eFrZiV{Trhdz3bBQ&(vGp zVPOAvKt?(^_A|reGM^GwH-27|eZ9)T#d~$2TJ@poHga}6)PYVqmxKMUfIwek|1hWi zt*=0iaCUxDu8B}0y+2zv9TPkgejCqbEcVxS_=_NA1g4TVH^KkM`F6-xX!4K{9Ypmv z7pBGZ9W@#eSdI~5{Pjo?ZZQ0oigIY#xOTHc*vbi^Y)EIJOHo;cXLKW$5yG?e_mhb#=7+#0^ms<#p1R6hGWiMuP|FY;+x8 zh6C+b6BPO^%I@k6hle1_H#u`;CE%2;fgS(&#_1Xa0$%Gavvee&@RZfm)I59ktY2Yp zSX@N#s%l|genN&vnRM0S3tLK=y-YfuW6>*q*roaTGs`r@S22)6q|IMxXm;r@7>*BZ zDwtnB8SP6BFKO}zV)}{ywj%e?s$?c16+B%2$6ahOUM-W;4-{(W>b`sYm?ZYQA8k#O z!^6XeclanfWV;g>zi@@&vwe4gR0hDym+;dcRH8%785>FG!P!T|Q{p)$z~85|^q%es zjM@Jk+gWJ9sl~tSf7a)@5cy7bG|C4v>FRK1FLNW#0$fsvenc1l{SK~v(i3RNpISQ@ zY~JXGWyF{D`V@CgsgR@bnwgD5Kv0*|ZX?e%nMZ#F8Ak;zf?Xx1I90nGJ>X!D>uSs) zTYi3VDCoPpTCgCRq?fXvxx(i%!bEj`>mVugjkS4Tx@<`*D_r?B|W0r=z&*y z;sAZSk;B_6*;nSGf6&vTqlOwcq6~|A-3<<&i7)Db;h^EvS?VCBP_#gJTpn0{YTI}C z?)>v4A7mDVfwq_k91i+DL0m+$*UOldkQsLdx7UJq?BAC7=haw$jAlD#GKOC#EH~1&a|GsHq7_ ztv-d>W=~inu^!8ewpVOqtDya~2h*F~?+_g;9(4JWQLVPp^#Xqx9`+4+u;5G`suy)> znf|lHo4f?D)CryTm)Yy66z;b=yMT*h;wP~N{_)Hez7Gl?1)pH=Pn=lymE8>UXAgf? z;XF9}W4UpbXdQiFhJEFw#Fpfs7gApXNOC!#(#FAo2WtLf9&BTSue65lJS^sJFIJ8S zWLGtyMpb$UOK9e4gyfiL2$?B9l@?I>o%j(bdKB&}WM+N(orW61Z1e1lA&i0jvtj(IxsOUS-#$EoWo*^#oct~*iB$;S2b_(+a>=+$?Jmsvx2FI!2Bc3ZDP|SOb%o&2+_>&Vvq%_I8m{vFk6*N+ zTW^cj8<;Jc&c1S-u>Tp@9N6E-kg$);xq{*-K4kE!^^~epvpMKZ<~Tnf=0&X1aGhI# z4#%3}UF~y^^J^g&GU^sV-N9tQg3G@NhUW8ADG`BEy&FNiB!QRfGq}d(M#?|3TjtH4 zzZs|?3W|!39vReh0asqWNFt|9rg2RxPtPu7sOt@^_KL0%J_w<3A*!RZwq$QPPM5hLE7& zyGh|7c5#0|xDZplXr{4QKXczD9*Hqx0WY|9AG=cSjv*f+%N}6K$34zv31PH~P#p3P z5VZ&(_+mrrkZzR7l2LI;FoV)DlTzK}rs9;}PdR+Gu@5Z1aL=!0mdjoU@uU3O+;ayz zr(CxbeIFqf*!3EmK$V$=MGs@c$hB>>VkSs!uy=??Bc3}*EYCZ0wi{Bn0+g|XSZ@LP z%cEw(+%GdM*g{sJB!^zKbyh2}hs|t+$-vr|8FJ(*;EpwexG(oBt!$+$!6lj|&?IX7 zbX;J9_3GRA_X*?#;4uvM=j^eWz7A96b)s^yTkc+=Qd2qV2w03e4YfEUy?qgG2??#r za5{A~Dr~!GJ&>1TUqY+nkeTnr&fWcb=&$G=2B){S>N^o*jYs!iHqc*Vlo9czaVj%M z-l#Qv+*vj-{Bk*OMh?zqB{>~5ekWtFKp4H4;mQ`%FrOi@7#Z=lA=7NiXw~Y2<*VP` zg5PB>J~Y(YUrM*Y)h${5jzUluOPf;_dI!iG&f@|OQjgyNg7_=)(UL1fv_bt;{Ww_b zvTYJcS>0aO%=9lyD{H6?Rnnx@(ss}K&?BdpeYF3xI9G=L8GkjOLDf2uQs+gePk?Y+a6W*Db#_iczPmbWM(uzA!@gXe+CQ=;|`7(VX^7R~DOe*sQ+s6niMP9M|->e4X((f(HU7#oEbh z3ViTxQRf&?LrUX;Ji(%s^}gq7rUm$6fsH zO{M|ksOJvr&+E%2RAWezr#KhCmgLDn*}b49N}jt;1_L* zN#y!B7tRpJP*6jxlT!BH*&R)n$&P8*dQ%(8ANE1;%Xc?GpDZU~FXae`mjppSOCNKc z*PIod540Q&e*D}^S@YNC>oOPFdZ0(?aPYk9yd>ZxU}tjnd_V+LU4cHM0XYUgz(kZu zSP5cKlyyN>J;21Ks;YzQ1fU%Ztm7>Lqz0={H{+Ag*G?4kggd$as_^$Ua<_@ntB;u6 z&wi>f82bdmWPp}>H469xZ^1ofPG}y+HD{ipZ7lk72F5M$` zRQC5tDk*W{(NavjZdflDD$oevvpcij!Dw9=3WI9EMj3N1Qw$%2Q=Lu30xZWPS7egx zESH7R;Uy*0&IR!G$`nG8l>1v5C*+7^=v<8V;XZ!WjG9?Iw!xC~u)($E{+PTFHL>v? z#*^@!w^&M5dg?v2i2+bdhsQssc%@yh(X)3D>Uz0gP{y!mhF2pr_Fmekz)$4cReqUQ z$$!Cw2JLDo(l<^j`@7pcLh`CJ*odmpzs*3mOjfeo(Fd!_?&+&nEp*GjR%@cb4fG9O)SFDTnJuC?R6`1Dy=q6?N728(*4<4nlHFuyJSJJmHYr zgvS1;Z$PAA4~{JFGqLBt&GdJe6x`(>#^~1qvjErvAKUUo7)&0GjqzhB_ClHp{oIRJ zvm0@nItu9)+N(y1M{rO)A3GXfeq|$*A7N&el4EdghrDox{QdwhOG7#%$n%jkxR2#TwJ zjf$Dj`?Ohne1w;7zL3&?r9b-c@6PMd4|}=A*I6>079obp)D9FZ^UKPON)eIpk5T{m zWUhn*KR}*=!^XzM;fxoT$k{Qz;)H33)>6m4Z=} z?#P-O0?Tu!lrR1!rms8H>V$048N4*cXFQ`3?JB(CZ>gC5^g^u#5*P%)Y&`9}hYdL1 zPaf%*P#V&1Rj9Qp^UEng8Fr z0c#Djr|+L-H^Re_8{MwTAVns;wA@8Ux+c#RM>=*~ccu}GEQ3GyFMQ_$tu4P-a#h#q ziSl9-<*`5GaH&PU9Su!x(6}m1y5w*9mGjl}e*s|;=ld;(T*tVY8Nohpg_{pS8`>A5 zel~q4Bg{d;bOHROZxD93@m9RP(M7~?l9#u?w=emC1HPum5XjS69#mrR%`IB&U!Rt76Pa<#Z|dOUx3F~+DCG! zigG+647GPaUaWXidwP49_7jH9Or@$60pRZvEQsL`1OFWWB-rBF8!YwCP`0Y{I_B^yM9`}9oQF@g_i`QlQ=Zh_EuNDvhFrq z1y0|gebtDs#mlbGG>&rvHeAS2=VvXmhfTcCCadr5&IELp*q2O=jkq9ZHxC-7Wv?p@ zyp+y2p@OiQ3?&CtO&D4sbO_@jJW{Kn-i>BjGF*23@&zMz~SzVQmp z*YVQ7isw*RtQYK|yVxCj1MYkAOUTT@s3t37rq+vt?Ikm{k-5$clB8Dt!X+N}BGSyE zpM||snH`hQFt|b|+qhl<|KEoTJYkS4sit;IU43k`$6MOmf1X+Vhn*&8XckF@tYmPV zxT7(^-Pm%vT2P@)7(Q%1G$l`C>uUSgrx&Q?l_8ey*x8w8&(o7n*=MQQ{-?bKlvd4` z4jJOG4!bp{%#Y!)Rpc6TTMGzWY58R_*jKpA}U{j~Omo{QzpHflf>jISU;<7HrjXJ}5 znVp?=Uu#wk)Ajm?Vj7FlmshrPC&O3-e=xJyK79OwMEBe|(YQPU{-G#^#Xwnk!^Blx z`aVYqRkayg3F=_-y@2D1C`?BO?jQr}y&yAI?#C=ipP&pgcti4my%q#FVxy$deV%UM zt+8Z}c$tET{*D1FI)}10dO~mw#;7d4FD}BH2tY)kEE>8B5!3_~(=%Qca+p^Vm%ITN*#&R-4O z>gwyydb5u%8J=#RsbVGgE+bw1T%-BX(`**L$6d)5Oga%jVkOltCX2Wz0s`c$@4=mr zA4+C7%AHl{=giDC%bqOuS_|j7Y9e1xoMX;ax3gyh?qT1rCZm-bz z|I=mp_o?MPE^Yx_R6yB!{p==q6WKe(T+`7sj_Y}aI#Pq&GFh%0@kXJni|3m)!>F$> zEW6A5?~f*fO4F1iyOl6)rQ=)sdAMp}pbEjSQubXmPNIQmVC$HpA|y}UgvSxMWkZ<$ ztL2s0pOp57UBOLnrbdwc&B^nc8?|6AzO>H!wGgyelsP#^QA zwq8T*BWlUP`9WG;(FJJt7mo!srolaUTW*gDcN zv$f3z<7?Kt8!qlT;;`m*%NySYLiP!?RE+7$cV_>G`S^1P*4H z2>;mOfy-uKSooJx_T&CtQ}eiU#psdaPa0IjO*Rtz&(5#^v zoMHiCRx$2vDa}iX=`q?}T=CVqD77zAcXA-u`@vLgy4KyBBljqW@^C>kOfJb6uAkjW zW>AG0Bp40^b!J_o64(8`*UP%yuht5acX4oK3VlcUK|=f%-z8|^giNSOLi%g#oq$&v zJ?R_>KYzui!(Qwsr_M6bxfDF>^$C9rs9^>+$Yk_D$ci-QjCdN}sN`^Yz@@5CpW61p zP3^kUGE8GB*FIoqEc}x|LEAQZpx>onMtd)-N`fK{8kFOQ2J6}npCU<2n0J=xpdTS~Ub%r0P5on=ruzaOv#?$BFN z$u;EDvCH=%#kA^KzKpGm;U5ASYg0H2HxS{jKW{Ej$Ce;@#aCbA{O{4;5mHUe9k3Deb$$`<{ju( zA2Ls{)*kKjRV&e+F9&QB{RRDLRU(9`-LQJC^ShL1GqZ;t-R%FxzkDA-CU3oK2kV$G z7R#$04K*b3`@1Cf>hN6pzM-yrU+)M`n7yI8GdkjtdAygLRwwrC0>ns500BJwJnYz9 zhAVIS{P5lFqdd^op@MWa@Lq6vzIe9va1sx4PW?c!&IV1kcsYV_{;9vr*YahSkV7&u zS>Djg*E=QIu4eLh4$6Pvr;U|M){~T()cbWhttxyE2BOVuBF+=sVS%A@y>(xRe$eI9 z;R5HDN%9Z$EVkA?MKXk~S=Z^M!vjxOD;dO-p=*mwXJqIZ;)%M?xS^+01 z%D-lk$!mEE-4qeRXzFAOwX*4*h@i`(yz;E=Uk1V*`7F@>Z`rB;EIKnNO;^7xy(a_> z5$iUZTy-}|>biNUp}<~9?iIuxL>Q}N!}~V@t%1=QEc#Vn#5aVKCrq$3uEm1O`>*JQ zrITYW8&_VpW8RKAW3+ak(H?%;*zA>xM~g`q=agaJBRR4BuZ2TRs%R|(LhIxn^FOk6 z##{^h-(ptQ&4+F63mhoyy_&U*$Dpu;b}hqssmyS)Bb3o<4e!WfZ$ByEER}5kAQJuV z;D<$!(j^knPg&Q+0H3M4t|z^wSJijmxf*%BKvxa&Kbe3JT*jV(y}Li@RT*oqGZtegT6gyPCphcT znG^;H`+IWw4g&`(a2?acL^XFzbQ$~QJeiwCp*B7Q=>MLFB<7?OR(E#Vas;YB5~G>J z?1$J){NKy`&(MhyvaF*$bM3jkC_~{#`IhqT6dg!rvT;6XRrcCCzVmj(>z!>sJkQ_C z@-iu1Eiq6rg_@JrXBtgHl}L4Z%5s*MSCE@4Fj?A!@P@~bBe2G|X!P7UI`UKmbxm2; zd#7V@3&LULL_ql%?H9%*X)AdF`AO5(|8O2F7l6jJ)Hk0T&+en}_-EK-8JC$x?|pC1 zi>6*i<))wCKH<4T$IH0nm4;gl30x3`v?~l2x6gRFg|C-@!ntGjdY3YseSvUHgX@Zq z_K2HY_MnCT@zO#^2YCFiTi3P($YwrO|C#siMhk)ApHIs!7vl%>f9Wg_brY{CTCvu; zZVqgrOc&s*w^JEmn!+<%+Y0>a^+yYqJXe z$39BrOs@gOTCc1vB2k+l0qPZ#MeiuYPyepI;o2DU5HsqPnt`g3kwMQ2zA-Z}Ez7a;HYZFCG0c!{jKQ2U z+Y;bnB{F&VV`ABc!jM%Lc8hX(f;Ju)OPl4*Hs9WNIF6z6Qp|q8eR7h&{-?5ZMf5WG zC$C+xFuid?0l_t1pAM`WAX!aJew2tB({k!pMP%gHxlV3+C?1Jz`qGzSKGMP8`Z3gT zTeZAX8hBG3<(X>gfMNCNR@_Z7+-LarPH7uVHOZR7)vZ^sc_Y_)+IGFDJ`lrrJ;4W3?*!z;&4w92 z_w-z(@_F9XXt-(Q9vcE{RURqB_w>m+Tm-JJwC|pXnzAo7Y_9@yi$NmB&ZOJ5vyThH z-m|l|X3rj$0VX!UsM3f9F*gM~E(6t986@^t&87Zq?PS6yz}FuLJ zKI9ytx9tIz=^Yer!l~G3pR^wFRojK-L*Ud{z|H$`(;QaT zw^%tY1GI6nS5|*K(N{j;cqzHB^0^w34A$SNlEC~OCE~-`W4ZDvLR@pnNVBL8@*?_w zER7@}3o!(cYGwjF0~7~xRMbJ442h(4PI?x8k1Fp{1mp_&^cx4`sxM%bJOBdW{>|b$ z*9;5?6=J_@@)Ia82OU0>dA3li$)EI*;ttJs=wZgQ`6SH?y4EZ7elD9q9_2}H@;BpG z+!ajn-w7Eof^Wu$%?j$=Qo#{B$oiuU!jJy^Mp65KE-j?as2l^cg)QOwe+T*AYPZY6 zOHEb`c}&-P4~qhuCU`mwJ%IyUqj$?9DmGdSw883$UmZ6#Ok%&x(T}|PTUnjITTe6k zl1PX=-Cgmuix5xZiGOW#oxi`gw2^4kZ+ImV_h!ClqV;7+RC+oG@w8H~aTINi;;(z2 zGB3Jk`r{@<*MUV9ifx#%F9I%lKv3GpuzbU%k(@!j;(Q*DC(#VswQ*otQrzoPaFt$5 zC@-S`1?4#k3_-4~STT*7NbKA3btt%jqbN+_I~D6?Qhy-<|TPBG#v zS7M2q3sA7YdJUINPDZF-YNduaV8e_kAUp{smwLTkDeuhg3Z3JdU%wT7D2y6=bU$R^ z-HnX12>Xb)-}V=HWH#lVH}r7_C4a7r3n9mRdS|L4`A)Pj@+x;--$#ZIh7<`hn4GHY zSNamN28?4F-4SXpCS40D-%Sd93gUBqtJ6=P!$Y5xV(%20w)trc8a0*n;4Y7+GpDL( zTH;~WPaeMCaR|5JgjA_6J^?lo?3Lj)imsC4NZPA3M3&E^_S>brZf(z)R@s-xIgzAf zw|ZSXA|W;0@|$_E@Sk?D!0`l%ia7lbOyx4#n6+>JfBd6*YNKhhQWdkl`}-g9!#j1j zzkDB1xA9oU?LTnp=;cd3j zi%CWFY^+kSuX1X3wkhDe%^$PAe$UNaL6Kl);A#BKInnfVdiDex@Nb`?=3(#0J~W+d zGM5^~eBP`2IgxrXmQ5})W6j4qY->4cAr^{U)M~_yYiyQ=EUcyhkiTjgcF;gR3V9W7 zgIljod~Qi;(Kj-;Lj;&8j^bH14op75qgDe$$1%Wnq-4CdfZI(A^0I)+QPm~eO}?cnpIGgkO4}J zGcOCbTg5tIe=o~|ZC(?K8a(JGzbgAZJly;SprRsulQk}y{J>F@533Gp>+e|*BVZEx z1(6Ytc6MhtsM>dNGD+6kMe7+J?Kve$CZXd$hk^IP*cYn)gTtc_NX{U1HhbB6Taf@} zXTIWUuuGZeI@WPV9>&Mq#`*L7Yg5__Kg$dt3?gq_c?1>bMOsR=79?0j@p1XiqC18v z9A*p#;gA>GPs)~-qVP#8H+Y2ZPnr^gkneO#6CBF5}2SFWd`ZvAI-J?5wyJLhUL~t=*##V zp^TV9#2_wE7i+x!G=|$Nz8Z8pl0UUVUf#|3xaYt=^r6~^49Z`RcGwC(f1ZkCwB8*BgYW)`e{aCTm|9mj(YnwydkccVxpA~} zf|w8}e93>-p-J&rWvbj+8R3X=vxe_3`a*d;$8+8+R@x!%y|5o0{TtTfs{Tb4zTVa- z&?!5hFQi71PrAm~WBIyFi{z-J`so141|rjr2s{7jA;v zKRzx38s8RqYqSK9#PhU{TAugb%?2?d0e}Xe4*7@|`0Kku)_wBAQqL&yMJdh%CA^5j zAI4HvepR-u3n{)$m?*vtt?2X4!XBj8CtR6M(e(24e7<};>^td$=lKC2LIJUn-3jo_3A+pk;;XL;+3CdMv5lNv+n33 z*XVHBrUy5)>s~VXvfmm2o3+lwpFUhxPs+ZBmSfCfneMyKHJLEUfPdnWd58!a+N&3c*B*v^O#;TS!A0MV zoV=QhL4sE*&6}3l^iQqsuD!Z}dL_kaO~bbJICQlLRE%5YVR2dFL=u`JRO;0v#vSR> zSm{qDA@G5#Y-cFVcEBK+(!gudg&qs*9laN)jotw?4`i(y92I-~W}LBRn< zUi~hftAOY{dw54dCe9b};=;X6otp9~6K&h?zSlU6`ad&33`Yu&`yC&?#%Wx&ba)a{ z{8NiQ!$8MYrwn2>{T_wY*MSvT4c?8no=>8SVy7T8oQ-UWoDbykL4HaL!JO=ce+%8) z)hnfr(S&zLcSJcc!*cHGs;RKHmOZ=uT~dPdq^eG%`rcp4S|<~+BiUn$57$ewS`kAp z?UGp_ZMwtujcoTyi=m4NBh^;s|GK?NZGRV>{?RNfQB6D@_z>O4V5p-nUt;p_hYeSx znEP1L0nY`~;rEe0jM57uc|TGAlpY(!3Cn#CEQA9?Sn;X0zTcSk-Xo^8{9b`q*g8yN z-TUIhnef{Fs?~b~k9az3mqAHXitUun^&7fC!EM+`+NJKMi2dZS(r0A z5>A?lz+t}yzPPr+EWs@l59UX&i$Mlh0XG3R(^W7!2B6Su&pwqo}B$P^dVi)Zh1SJbm!Q zKZW3xc85ml-A%Z;1(NrL@U=_tU!;L!$!u3co%LyXu~yfVN=$*JRKJ`g4TpStAGNJD zg`>Gt&ax%Li;MJ^1|n=o=@d(LHV+0$>cV6qespM<~j&?M7KZMOM2;M?sXcAVS{IsVvFpdnqszMct@xCh8_a51u^} z*q!gV$n{S)I@^Eue-1(+veDMtU#bC5L!(zN`LwoNF6y-g68ifITJ39f812-A=w5@X zprJ~1hb*l?Ar8m5v$_K_Xx<+$=u2psuNnQbWug!J8mK;8*>CX_m?>$hvIYs3?B*?s zKf2dVa+Wv*`$#*-fM4|gC~dScpqBuXS3e z&bH5yE4a9uJ@`-{$s1P>K#Y=u1Up3z32e?$gt}V3U46@ch_&TMvhGcF`WL`kh@^0l zi?P*fHcv8|7xrEQe+b{mtns;(_hWK1{}LYXMc;sz$zeDZ1VN;5WxoJJ;=8iO(8%v9 z=A^pJa7*Y6ej>WxCthN`vhDisDaskUkoT1)tC(}N&QmmlUfW;x%6q*68RfSAq`IW zuon_(Wo-RS)K0XLCZ)yS{A5@lRn7+7uS~_LCw^TGx=K;k#-Qj~HD}*XYo^JRIOn@$ zq(KvE7HLUk)VzWzIVuw@E{^C0|~^BsPgOT z{`}`g)iLB{f){WmQsy}7J83P~m(Tbpmtj&2NkfMn1BSh7Aq~P=+Jl0X9EXS>!X*)3 z*#TF^GC&z2K^7ZnTF>!F`KZJRep6P5yQIMuw4dKT5HDw@raGB5h3gh;Tv`BX;=&hM zg4KqK2E5$h&c)&@20?Klv1-0hu}7a8g}jUjXwHkvMjGW_hA^Tp)SdJq6I%sqy%Nc!$G75~_)-Be#Jh0DtRowCrmuoV6uJjiv zXkcttKT_SIRNjR;ldBQ+JS2oNe?Fu9*Fa@0ZtAmi@qI)zUINAeOolJmNCuSrNykg8nHT z0&Sxsjb{T$BI4_M{ajNft1h}2`Xc`>Pl)T3+~Xuu%M8`~#R3AiVlz z;?-ov9?NiE*s*q^GE6L1r{(?|`16y#;X5ZwiteYSG$V3s(St#Y?2M46V`@3PJq^*Y zt38nxN9@lWcx1)<(|09s$XCjN84s4;`-TC;s0s zUk}^rl&MQi?41X0(agp0ZBA7RI+!=nXkBUvP`L&4u>+eensrq&D*W>s!mB#Qr$s zuP)l_$UR|hN;!h)hlM##2&A@qEp?p2**AcCugwSX%$W0zj>@g@?Wlm=mNI*4rnAw2 zb&IRE#A!gmJF;XqxSfBrJdo}ZKp+7qeM7_SzumqHRPXX=5((H*rWspK#|aB(K;m1r zGhSxknmO+$9^D{MZ@DX=`J24=*Op2qzYsa1w6-xxjkaKZoe_WNTv1`AQL2qGLT$Rv zz+{y-^w_M&xZd7Q_A-a|>Oy#l@^QE(IJhf+`+4jRfMpfcS-lEu{S|%~6Pm?Lj{l(Z z!rgCCKR9|4ykR$cT)~p%Xt_#THKOlhvdw_kCi(1*tUFR%TSet%8;<>w@zH-U?Z+^3 zTR)5s^Sy`X{eCnqL`CJrP?Q)3LB6^aSnt?@=xsgrKW#oX3b`gLSo zq8_BJHXgyQADK7SA*c1J&EaKJEXAi7R0*k`J<`T|8;yKW2lNDZ0d2$d45z@x6(Njp zpVje18HUkX?2sCQkd&GfIsL;W_ARzG0)laq(2C;~k9vw=wi9d=zE^WW=|aG`RsZLa z3{t%6 zjWqwskF9gMP|w&3c|murN^$?=t!VYq%J+W^_W@cCu1juGi=Xr8X+MEh5c9ko{z8#gM*Jhdl=f}zmvA7 z$4xOxL)fhDfd4j8vqaNe^~v}3AFKW?GOBDOB3rHdLup;7|Gi`5+tv#;7?+I4W$~F! znaBvNcWL_gbk>MnyQD_V{E6YMh>L}AWd7>7dNfdMRi=z+u=lf7g60}4mu-Z~y1vfb zhqzye%Q+N=%aOYE9__y#;bwVZ#eDMMye^;}J4d!SmJ#&RFnrs9ytjYWfy}OFZx3%^ zAkNOFQVY;{pfLgayG91hAWIJ_PsK%mxJ3XQp2oW7Gpt7o2<0p&eJ9%>lU98dvI|67 z{K*4&;C8r$`!@(C)JMJ3O@SFM93|aV&+mG@- zu9J>{kV#hiwcw>ENg4Rt6iJS5|3u$ zeGrSN!#Miz)38)Jd>HVWtK6Hw8Bpq(VmzIxZe1?Up;PSYuYOng>H>T8UU`o4MBa}= z(W=O_I*Z1YNdxB68mycsh!;P3>#w%Rb5Awx;{h z6l6)u7M%(6!+$lNKLktLJiy_x@uZY62fPYcv-gMHJgUSc*|mPyc%v z*a5}nY#E$QR9T$LoXvDN>|qX0k6QFZBWs#mUu@-?BY?rD|6lLZS}i5TAKZTvnnAVD zg0qKWW)fQJX6O7H*^NUi8KkjqWNVX8n=x+{6kjbb?4;B)>nq&MCU_}sbWf(Ajxq3r z{Sjv_!f+0fo>?@VuJzR{)RI@TkBcVRYOp>k;UNX9(GdQ;axS&6nBqylkrdN12(@E= z9s(hH;Ad^b2v12~-I?DxoA;NAee`RL$ROfuA?S6wyA=+qwEd&#C$Lp&C7T}B^uPb~ z6xjI#sKWX7*+1V3FEwSOshE!S^pbMoGI|qNsLs~*|`~2SLPJyqq z9H~P&!j=y27-2fDJ_|CZtbUwWW*r@wbG8o`<}SZ&jX9)>QB(VQA5qb+$DzrjHnGM5 zznbu!oR|=zgZyu*b@l{Dv=pY{7WQGL}p?mp1Lp{-Lw;@hLU&W!GlA#}b zcgbg(cOfj-;#w+&*MX6;se9k$G&f*<{>|83p_9Stg0iT&mIhYPYV=qm2!o8xm~BiH9QQ zO(%=P+K1$hWDCFj?d@+Fu`%g)iJ4qsK=-BEp--rCj^+g2YYj%VuJdm4HwO3$N7~DZ zf*5A5vfYr|W!A6!@~UOl18J`X1p&PJtk|B0MF*$DL*R~R=3+!*uQB^>&v-2?3-y^e z_<4I%2F?MO;ls{EJrgR@tO~-EF|D;P$R|Y(y*jL|lv!{Lva|h;23M%&HOpegqu~x^ zKwsqAQXJ6>W$}MnJPS&r0mQ2ei$^1eg(MsUgT@$Qxre zNY$P1q}vy0#zVLKR>I9wG=;G~ys)t;BMqrx5oh{9EYm#)L&EeB16Z~7O?n`moQ!QH zxJ3)fZH(!?9=02eH@<++Z*972*jusU z+C-nhPgqTYmR27}{ToVa=MwjkA8hxo6&3>%a3ii8HZ&*~T2Z z>8UWIq>wpC8rlrlz3aa`S|YRfL5&4u`*kI?xowwi{a+uH!@LaTvhTn(#47z_+w%WbGUP3SV{dRU z&I~1&&1o6^U3>LJ^g_5$#m_Pl*8fCyOU1pok@&GNLXwp^zP&vtofI}%&N8J?5JUX_ zCFp|bdfd-l%5=W#h(zho%IiVpD>2V1UdO&zyFp7C`DghVP3+HjDxKv=A&Z=!L*^n> zG~V+;=BOpA7NvM?3D(={ix3K3trt0sskL&1P!39Y_eXRW=-%r!2y>s(W8N#s;U$S_CsF{PWNYz%1ArU&LjfxT;Sxcesgvb zRWnOIWBdGTtlR$y^2zCn{;TgJ5bhWXezq(n1=02W0jJqGxmP*=Q>#zl)GNt*$m3pD zN>TvWE69sv(EBC{;k;3{ZncItOdSQ%F6Dje!L{#x(l}?*1Y)?VL-##`x!dqT@e5fiUZzSKb%p2>4Od<^HK39q0 z@ubn>k%o~}mL?D(jUEx^)CSg(g7w0hGSqCq@q$UhrQLmu#oU1iel1?h)T~yXILJ$@ zKVvwLw>XIkm=`g9!0RTEb|Ojdw+4q*z5%`%#dDz+;vvCI1qyjh{+CvDc%?U<)G)o} zt4Jpk{6K>r`kES0ocb}+%FqaPM~=J7Z?FtlD)rtvOcK5>Y4j~myPzQUqMP%NrPF%c zgnbF+%#X*u|NHaJRtFIh`awWu^zTnOqAG2ms1}NU{O>Q5lHm3CQ1*dJvxnhl|M_Vghx2w04gQa%^Ny$T|KIpAlcb|4 zp>T{GStn$VI3%-}%9e5L6DK6=cYhy`pTGQx z<9^?-_v^Z@=M}H4mJqofNKt;+^9=kdr@r|nnh|6cck7i<;xxTJJe`3pHvuUMY4ao8 zin|NhSo6H}f!c1*Y2^+S;h_0AlA*7Mh0m6nr{a~>Aml0rT%7C^nODECdo|s02E43I zoNh99yxsplQJq`&Pz*yDfmn&EeDIczVI$`Uef;cc&)Wr-*>YiV6X|QBqPr=1Q zY*q3TwWuy6z$edb$y3cU{6^H-XQn(oasI}7t?t*xYi_MLz}2_PVkdegF91q9usu=3ZWdI6e2EfH&{oPj})MLhIeZ`hb+^+@(9&c3lU8)jic>_{E>M_Yu*K7SG zyTu_aLbQA9s`P&c83|l7a=%xwjpEcFm1kmCFvJNempHqUi%yS9@eRw6u1304+~6 zMM6F7h|MNx65e@2cUeRj7PnR5%JKx-n}PT}s=#jqhOoXvU}{dg$mbd?SHH`V8%rvG zzH9m}%OcUo^DEQ2fHXJ{9&eGpN%f=0%}#mJO2fk5kI}A)TK;izUVx>5t6$g^5 ztE)p9W~YG|qwZ&Ueqjc18;tXcSRm}C)95jxg0jnfc5?Bp%v%g< z8i{~MJr^5h`b^+>1hRNSUso|NzwZ_-gnM3v_Zh7p6w~DE2_oAw6zRk1WYIqJx37le zmW*%IS5+MGYQ$JHI6q}$(wE>H>U;Hy$F{-Sf%B2-JrO~P8Ox%b-kj1i#Z?C@^`zy% z>`?&4qF^jd!^_ef5%_oMTEuhL9mGB_Q?~@O!rEb|VwKLrilHCL-9Rvk_H|H>&(fm3 z+s@uzxenfkD$Q<+*VcX)1*}pee9B$1BRt`6f$BGjejrtVy^MTg4~l&KKJZ!1pSFBS zp6shzfRWw3@kp6t^F3g23Y!WzylVXEg?&U{*i`XH@<}?~?tFx#K|cUXoNM(c8HdG6 z=#koW2D1;~FhAa2)*D{qxq}M4ZShYYagdnaJc{-d0I~A3#%|Xnm_B{>lHpgbYdOZH zVXExbqmxvVh9dMfI{P+=Mn20fOP>4F1kr_sAJ&!X)^5wEXL$1ol#q>1`3@jz zimv*~f2HWcwc1zSP;zz39dx=GVMzcv-{@TiLgkOTiqyRK9iLI$L%v7v@>hQ?EUP!Q z$R{oTJ`Jr`I1DYw1iETm0E{`XXb8_OWjzH>hmgJf;wS&}vXKF@UN9h;-;nzs$Q4kG zzFi$xB)z%(Sw~3@c=R0#9{DvcSl@f_S0n5GxCC(|(UH{gP|6?O0cZ)(8YA$E` zjC`yo+A+mu`@ponD+0bwsXx3xZ1GX!#ImgxZqhk7ou{`aS=13Q*+O^T95H}0Z#3-f z;TLxL*t=WS>|Jrjsyt*CTdojek>Xck%kwiopS`XkU!`R6eXAHEWkjmJE^I`)tH8C>ELK%VmNjXsf4=J&1I$p0pwPNI{${mF3m z29BcS;uR6$c?H$$Z+SKx|Gv<%b6o3-Tzvl+e)HjOA+}Zbb=XqSf6oMxy?<)ub6a8y5*gc^k%v30(IU)J~NEi zHGSXeiDfeT3{o9`Fk^DX#xNPVCTZw8>~Pq{orwQiCM0e5S0|I_dzn0oW zilh~`yz*TBR(iBNzXn*9r$&MHk*-$^h*z`ik5ry;J_C!m&K=mLuvGN=cF;pBK{<$k zUjn=N{n_a^J^;gn=D$lBmge?zPU))fhu-DU=HB&ZeOj6WMw2F-w7=c&muY5HU@6zEiljE%lzxy(yXruuzJm#=RYpDg8k@K@w z0Wdl8h}81Z`O+opP``kS50R+u1B3tVU;(qS0$`WVm0F%vNQJ-mN&Y#ulkaeOt*wWB z zes4Xwx~j!i_0K}+@#>Vy>6hiXlhRfI&#s)&7ECIkK;tBQk-i0Bgm(E>T>Em{8oS*B z9hsSz+S7#jxnd-Z%?~MCH8H=Gbk-+PATS%r52<~_v2M#K-LeTs?2Oz=&R18B<;Tol ziDs-ZfPMP*>GE*HZo90HyHPqe9+p{NkC~5>fXLDY3?5{Hb008Vk#SFX(i3GCOPw~j zRy=9q;-&m|QFM`$<>1#&92!ll8k;hpK#mW)acnD>0p1GlXgj$G@)-042wNu~7tX2* zFM3I&3fj3n5X@JEARF1xzAoZ15k1(TPeEF8eafsDv(RP)!0ZM1LBdhV2)p6 z7q`Z9q1>(3Dz@?f^S3P76&f@$E(Ms;fQpMrbDBu%HxyjM@?L$F8QjHwoLmu2(f=QM z$F)LcYHHbC<$X!(wId*0ogv?5d3#d-!_g=H;wa4R*5WF1OE{7;6F@Ez2q7i^K7-jj z?0fE7f#do9Kpc0IblQ~|tv?)%R$r)dsI{ck#Y)#D0iF@4rm=LF)~Kt})U>d9AY0eB zqdXZNNMbBljR9x!SPRY@$l@3UYTx?To+>a^c+y;WG;h|Lv~Dy(=o4ddv8%FtAoqtC zwC<~n4cciH>#31;Yp$$~V>i^^YN;G{37Xf`yzc0`{{F@q@Y2HgB5&E%*Kk56lCPQV zz%7Vzm^7a(nUK2cR#WeCPO4-c4EIN8P~`PU%apoR0LhpxB_4!)WxTYs--|P$oY^fo zjwt~@nQIb(C&Tiji#b!`fc|> znB?Tl8aW*(x|Th0OYMHY&egh`o)!M^z`;O4)GY;CX_o8}yCKez7?r--G&A|@JU?kb z82V14`A+Es0+aLm4aI%tX;0H}VLY}L5Rbu(D=#IlI-^b;RO|`pWZ9I?n8U;9st_yqWjoJX4j$@cYY z1c0n;cvJ3f;hMt+!sEBS_GJcU)Y-YGme;T%-GBNG@=VKLFRHBno+>On@;#Y5Kd3OM z_V7rnAySta7^JT8v#QZP4e5eQ(Hl&VEcHt%vMGm*v2t0`)%QX5eHb958*mbyHs{{K z`S>O<=rCSi1_LDp0kFRDJpj@^fZaJfY3Ke&)wI{LhS*dTwLV`A@#r}Hb-dJl{O84@ zh^f!EDXq{7gAG*R9nes+(W2ZNzH`z}{#H&c^A3acblT2fc}HD-&uhibqf3>nRCmZ* ziM#A^TO@g_J`?d1lO?obUXhb)VNW;P^yY7hlrL{%!gSL8;u@vF8kXy~aa6&4B1V-} zxD}_FZy0qrz(5jCf|$EJ;{IqXAoWo|N5C_Z@l)~Sz=*=jwHinQwXfG}nDImObva|m zt*3M!sIF6w1%&s%ncBxMz~OWzy|X2t=G?8#uME!~oK={LtAtVLSr+hNjcc<_$=!=eaqOV_j0aySEo&13 zq-B#7iwnp{ae6F8(&03gv$G`TF-Ul3iUBR%-LAz)=|oF zHgPT_N{{h2^*^UKoY69Z|5`-0S`LwogyA-k%;{H}>Thg<_6;_^ZVBu%&~kpA2Eo-Nbw6m2fFw4M}sv1-qcL~u_5_stG~9(70TP!PVV=5 z>GxfZE&Uqzt{wV+>4#J;P2?-=B*N|wEVl5hx$rF6`oa&C3^XT!c|U!eK-`yd7g1Y@ zxAYwS`(j>>ZPk%jv8{A_b@Ywsu#wvDFc}`i@*0<1WAOmdjtEunZa-$ysTO>!eIK>xY+Xr#nf(!`ru ziN<^f`=~mhgrfqmJp+suR7f?|b{$+N@e-s5`zU$#mQ69{Z-Yw8tjhkqi^Yqxg>zT5 z5O)B*Q*S!^5A@(n1K|fUQcF-+L+1%h+=zwkDcsxc+&9nleR7QED;72c4^PDlS{}xm zMkS1=FdB2c`+XqHCTQmh)n$Ot$g`#s;w;gc({lj}}{!dNf2NzKRZ58s~*i{A%I*Z#JX z4L1=9DO_(Pzl=yQ!%F^Dn-{9++vM}RT>05IFxHmW!F8d>ZY(4`x>M@|>N*iS5G zrS|P1b;gjRr_imaOL8>Qo3ky-vMfAR>vcGna>$y zL`w?ePrbRv8EVX~CM8hzWum!$#oM`(v1o|N75aLn!I@eIv1;zB!rWc;xND{J!`&44dYBdx!oOAmns0ShCixvG&sth9%+BX3ElM!+#!-%a0|K%DfR(r~ zX+s3kIu{rgcE{cbIBaE;kW)9lw>W|Tn06J`%N{bjb2$3FMFhEUEzDZ`wfX=2T0sBA zG!Mcx8Ojm_QTfAL{O9zvt;*GvEv>uf0Ni&JzC$+l^-n0npbT^UBY4 zk-rPfG9BoOfR}<_*!wq<+bQoj8y%3q4Cv_OZgR0@^Kxqs4Q`|-!hW22fLK4sk_c49va2FvO_L_-jE;dNpjeym zCn9dhn~6B7w)7m)Zxxw#R-q-*)Be(ogsa(ua*oafo>xp?tG4>R+G-S#qHa;mTX8_% z^E@nK^TO>!Lh{ah${T?XV}i}kj34o6@Ygk=M73Uv{d+i(Tv^&>LO~DkXXhnGM@B!W zO(>$QqX`HW)yatj&X89#D(L8kswlO*WR!P>Tk-b$I>%`-&6btUpQUjOkXdt_jfARq zmI`Lb5-9LdEHw6&9HaS`?63xA;|iy3)SGbB_8-*{+oZ!g70LK8JcZN3mqcSmLdb)1 zq+f}*#NkWlfTI>z7XUiI6aWicKHNV%1aJ_?qm3Om><8p-*lIt?X#T98ipCkMQx8T( zCuFbU!N-VP41I^*uXzRU6{YxfMe$@;sM+LA-WTUZ+)<8?pG(d>76S;HD}K_UigAH} z?qn(>cgtJsuj9GnLmv*h?`=;FfO|uk`k9bJSiQs+M5_eJUE*8={*HwXH2zLKcd7wXYB4wW?!;g2y-N6L4WTZ=CiKX! zJGjh)aGUB295bM;Bm~>nvHX_lozT`hAC(hT7M%_Gwd4s(YL9ca%%%nl$y2!+%H@2d z5{?91MUO8%t%kFdyPNRyZQu{2&-)B6dP`H)M`0y9clG{^7oPw5dcK_n31g2tJFl71 zQy-<&*6&s)L4M#S{6z7?=QR?7I+xUf zHx8(mO*S>s5k;=}DqD-0!WTP%r87ZJGG_+n$0_6t?N!=W?gT~7=Z`!Bg0(l&9n1hX zZ^PN!oK}an_3J*!#B0yI@(ddC>vHK9SZCpN%TniGxX)KhQ&l2|0OKg|J8{NUy2MV@ z^DW8h@xp(|Yp4P&mzEwsGbN6RfM=B2vcrtpYLJU4Z*0Z`-na}fEOtHm7RUzjSd_$j zRW9UZ9|1>Xj{;^I7gzW;=cQYMU?2hplORw44+^Mk$eajP*T4KQUIn8iLl%N2?172G2SCj>|*yM<6YCg(rK?zsEOO{c0;E&^ThV zg`g68v~#p@>aAS(zpRjf!IolTLO{QGyZuZOEBz;%Pj+QIJVn#SlLlci)5Q5;8|tUd zE|8=&w(l)Ps^3^`pN{slvNL_toz0$TNzqM&FE~Jd<2Df?x1K)ZVTN%aoJX<`15hWX z)hxejTQ&8w#-C|?VU5ppD45D&$a!K+O#%;A@8DQtRkhIDi@uW5J~PsreMU3N%)%xg z|0u83_dcF9v7epLY1bFD|1GQZBYPt0+3k;4*r{qNb{cINa-Ao-OAB{I_Q=s;JEg9p-D@xXH+byfP_9gUC-M0CH#s>ohM9%EV zF>R=06F|Jv17vD|-RE%)h0g&}e@Em6>*^^#(IWX2dj^*84qozltzgRy(75dE9p=)l z)o#=^bkhQNi<8Y=&0G>-Y?<%v}G-t?mVhWhF{Z?n}@zD}Vj%`Y3BY z*z_6b3i~9}2>g9iaVu~zEp(Lif_UH!K;W{(qNdQk6|eP))xrmhsw@Vx++l{Vhd9U- zS8WCJjYON>N{W)Z7Q}I{@ZI(C)&TOpL<@(Z%zgHieDW@NVkH>gVB%KM`fvqcnjToC z_K(+AVm6tgdPI>x4a}XDt}!r+CNl>OruVN`&sx*NbBnphFgh4?lBk z@$w+tc{3zwv-^Sm;1*l~Jx z*6l8XBpsigpMaNxn?p~ARYEQ*thLFcaUV0&j_ystI2c@p#^WsBgQt_Jll8lKz(*Hn zq=w5(Wu>l7kzgkSwcxjO&c%etD7Cfh9CV~QI#%x!f;&z7mMRGH*)yf#h4BZ$n_Uuj z4)5qP$ zlFq-?+}xDbuif5mID8OgZDa=6@%>gt{MdQ?`TX?sNAOF#sWtx%$29Nxosv1Js4oZJ zcq?(7^u~2VF7NX}gK)?lfN{TYx@p>-MhZFyT%tgQ`u{))sQioVTM>tU|M)5+?SM2! z%5s{aB2eX7vb=X3j3>H8bsflmo$6hf2l_w%g*Ko5rn$#_6o8WPu#W#>KC}^9j@vES zTRQ(>!6ar~eFbb)Jp~*fc>}!H0#HDA9ZijcnI5h@P=p`yzdGdNO2^I2$N&$kV=^LU zd=OoS1~l=rI#vb> zA0<&xsoWq{z_b$pb#OG1mdVC!9t2l8Y~b_O(`&kimCrDVOod_Z^T#Oc7vMUuAM!mcTS4 zxI@TeK&jDnd-l@7{p-N%C0C$|*&5V|Z(!Q}JW8|g!9Zd$N8&0NOlyJ4uvEpx>? zBcB_9-qe4Il4{4(757F=~x|A*>nHNS9EN{Fet^6VJpj8X|4oD87VJvSjnE1#1M}pXg z%)Go)Yq8d?qm-7VS3JdKEkFOZZ9ImkF`fJNI)U(0#1<3|;vR!Av<`f=-FmTlWbRJ8wDzGyIM-W#I{^3aah<1KL{9LI^5`ZWS#8SC75C-w#rbod zrSJ~!epty$#~qNR(}ekzbfwA4qq}NeYB|L zz%f@r@nv*8)Qnz}X$%VK2wMH+b6fdYnXdtG)@v0~|4AKohZUhqxS9_#4XYgddTV56 zaFfNXp#3)CNz8j0XAXIC65*Rsp0vrL>cAJKNiI+vY|Ra0JLjk87{>z#r?tK0gMbA2 zrITQJQax2lM>XhROMWVJcABltHgdSdvg9PliW#*J*tQqWcNWxoZ8^Z{Py%Itr*R=X z`1BLzWG;O68m|Tju+<6|pAtRJh}M6vL@cFV{7t<$xHzp2XbPbmm@Z2wh6h)<0;K}D z7c9pB5yAVe^2zoNfDGFMMrH>y?ehQSBSKuq6w~I}mb64i5XG^PTVwic4gkzeIY1Y}PtG<%S>Ls`#*~I%0_Xyd8vmevCBZ=q zx&&o&6UPGo@q*u_))y;)THa<=9dY=wwN%Do03y4>x}~oyK*1T3C{lf2uDMjge0ZYHSrR&IFu=%yJi4pw1hm zqSC${c)C`Jv58J#*XmTbcHqxQH3(v8$=12NQ&M?PUE&7*9XE~X4}UL|i7Ncv?vvMW zTA>p6;6v4n9ot}Na)J2Iisv(b_h4{lDymOMi>dh>EDu6f>xjjfn=g`a6F~*oVp6 zf?_gopRPpe?szwUx=B+v2P|pUes$v5SRSp-2b1SIsZqAy`ntakVUR;J8z-Arb5nP-deJxbpjoS1e*5PrPJnd~3)_5LW>GbJs#(NHq%vE}Jb}p}aSirSx3yx8{f4BcMU%L_e6$?qxXb zH(@>2H<0^0msBz6Nz{L9G+unhv=7}}TEtWrabJU-nOQu0!Lni;VPjPT&aHgU)U>py zf8@ylkJmYv94xf7vqiOi}j7;iYO`@v#jDuRCmFfR9oui!2m9xbLDXB zU^-+$ueA}_s~zsn?fG)`KhpzJQ(0T)cU6=o$7H{mn;Z$!bOPtW(Z+cH|BODl{^WUl z$k{(>P$*?YhBWRBfb@Kg$^sW(>7NTqUSj_8{&^M!u;zsVo63`2Noj?GQEPy}_p%W0 ziY4H|KG0PZclh7Gdnn3keV+eOK!6;Hn461G zU&Bc+RfgjY85hDi#j<={!#X-b4glVz=Q@6rqA2cj3xM9|$VlsvIeXAttqJ(XX-mp= zFnHp%ViFpc;a~>(u1h-Jyj#AQAL64GO}DmN5(tndT0|&*CUt)AL#olh0%1et?CbgH zLQv?buYAD-K&Ab;6A)!)G$Ij~hG-Y?vlT5qG}msn5(9euEC7eJVtcT({#%k1An36{ zaoQfcfqp?#Qu^JpOmZhs#g&hh#`QjnoqL|Lf2pQgArLwq{v)qe7uAtzO{o;ItKJG5 ztD#h$DwT)xeO~+T^T`qBTS89sa;vuNRC}SoRv-)9wIr5l-bgFv5#NbmSYZ~PY`JMY z^-Q}e!;5NIHpQLU8QXH&ZD;_tK~8NQAS}+dI;7A3Q)&LEpQy^l$}+*kqN|W&xH1`p zwwQelm8E55u}SPVqDb7!f^lBYiK=r)%9U~cxb=+yk7U1$w&`X9AUt_I6#t=*0sg)> zAiRHQIhS6tHKK`PL&wjk3yB+0*H2pcOzqf`K+8NXQReh3 z&4;>&eSdCAIrfLeaF9^(zgZ|)l8)^vto)0gRXTM)dndS==&$^K_Ga2xrsn5NW1RTb zw7Jh1Av{8LmSLoQ=IuO{*8o-e#P<2FZR1Ri%jMNz{Na3N(B9g~NS@5tpFHc+{j;M5 z{Kff+$t~nm|248fYGJUeepgNMXmsqS8tm=({K}P+5j~NWFGz9wCmq%~8+t7+dJP0Q zIO_x04V>WENrS`|?m#))F+;XT)k*l*12rdlroQm2Pwagv@<_(&zq zhr^*FE?HZ8IGrml8w7hGFEbpP&kT>%C9{g^+P^84RTo(v`z(Gn84jt8{hk*6O&vD<5deqTHP0<wk zmJ8Ux0ry19j~PDN&}++cbHIz~>syBspn@rXT!LwMzvRdtZ&}>9**cRg6#3uxH0NA@ zxtAN>&G|ZRu~NH-1G6s2r-@&~59=1Rq2gEABYpAp4%z+zM**1YKIX1+*3}N8Cuet5 z%l)oK0clA)^3LDDeZP8tDv9p}2<<5Fr%bzP6mKc3#v?roQBoatYyULVJack9z``wK zFuoqt*#mVjjX_ptZSrp5-uYqucEXzS?xLNZVX;}$I?h`4v9Hl;47#+8PD4E~hd)8N zUA7SbZIM$jH;XR&92N05RgEbEn6wMOP91yW0~dGn)Z(~k+-|QIR5FZKBtu6iY`Q)P zCBm4M*o}LldTC1bkZ@qBrg zB_mUfS-|OP&!^NFyc2Wodha5Q{Bi|ZG%AZT=8JI{M>FZw7mam4uz3886tm6Y>8Viz zZL7%G<>r7rt6>(CZ105IEbyMNp6$<%BU`=HAk38*UQg|DMR9!NmV-M^fk=G6hDnGz zCz8n*OC1V1!my7^C71lIYYhnD@RFllfaTs|_VGJY&xF9N*akI-u9Ne|7rrh}g+EGP zpL}6IyRGVZku(44d5Mc=guwZeMOHP5p4H&@S#oh&GK*mz@3K9n!u+bE-C^!Nvvezo-9g)fUdxr6U^|5Zx$_avR$)LKnxZKRQ&C7aQ+T^J_HS93{3u5KaXxm&&+MbF?EV%)m@6_ zVT9Z`A^f;Vb|n z6LxVPmayTitgI5&Qcz&@&#N8Szg2BMF`p0TxdQb(bL&FBLl#*v0Gvi=?fMDgT!7{r|GdrFpRf#`1IRQvRv6o_XixQ{D%>w{4^dF4Ses2Cz|_^rf?> zAIxupD31)(C$BB2IO@YP5mN{%k@hRgvP6^pwD$Qhzl+~4W|`#n3uW57eNKFI+fDe-)w9B1VC zXv1F>*m1;Qu)`0&N`hiL$KAQr0o{Q$u<-EOwQG>tDxF(Y-Q@PLh2WFHDV5_LVB9C6 zPRIboD>S^_>edE^aadQ`I+-^(`$MVb!8|}Ug$Ae1028Z~IMgevod9H$K~+Q^n$N z03HDB%XQ>0HdM~J&xbGe$zC5wrT6-Sn~!7BbHreU(%;^A#qfW=flUkKYi-!>sGQEO z2jZ?~I#4VQg*st3tR6Al$7KdtfjjT8I!>E=V5dy*Q@$f%9RK<5Aj3q~`m6no-+~^E zz+Y$g*{)+RGlI|qtKNRU5O#u&k`_AUIBHe>!D}UoFN`&6%(V?~$9;L7;`ptVCt8b) zyh?bNY^4VJfLqJH+#ZzOu%RR=Nyn_(B$9|;$h}e{$6w5w zmB!%w!VpHA6_cKk=$%gW>FjTyNUcuCmUv03Q$PQo%sg>m8o*8x_eHRB{y8UQ%;k2e zGgq8BVAcXw&~Ilt9pZx{Mekd#4dRH=53bk)EF@U*g=JU%}xc1^AJM{oJ>7ON8-Lap)g3TJ}q< zT`B+N_JN%IbH%HvcS>P>k-8A2T*7oO4bQ{WHs7JX1kRDWX(g z#?Y%KOuN!OWp0%LY3P$dNubP74QCiP(bJR245!D$N}oq}J}Eg)s7< zDsW-riu0-DM1IL>0(r{uT9K2p0)}g-)|y8bgejxQiPrw-ne5$wFRX8_ z{$m}X#A_SbezL2OrG+yAE^|}=Ys0+=hydnr1(?G7HKH&mvsUYx<8}KV28K5tfymS$ zSc0OyebUFH2jT$05PkeNRQdf^-dJ3HZD#(Mtp15oX|{$y+_zv;;}WnTK`tcF5xS)) zED1p=0?GKzUhxdJ$7lx zD`Ah`idFIis;Z8NhT58_*dDW9ik{#RS>C;i&r+dzqL@nF?w43E~G_ zzR!=*tB-gjU4gE98U+QL3TC)0Q@3oA|C=qv7%JZLml5Yn7bn$sCwodW!o@hV6Fwvc zfGj<96Ewjf*DUN%n{ER5-*6c?lEzKI#bVKe8v&BIA??5bUzv`$SS(|3H#C0wjG2Iyt?<&l~A?p^y1PAc* zqwaDq1SOrJaCnV71};R^oR`kvw^YYrChM*HiWW4+5mYHl9mHCJuJpNNz}lB4B=k{h z!oPpu1iI3rTl~&RK8i8Nm*t)N7+t;Ta^>{gPz-fJ;geVR_^5Y?tY|0E2YG(!^T-W5 z(cUyfWG7d4c4R}KdGr8}%XPKUpyz89kNot~uT<@C{Mo!`4QOF|f7>#nL9n&flWupe zV2`i;_gI{91!9!Og|MksaFG^G9q%>$Kz4gwsNS->S@m#j)z(oE@KRtPKwoZ*^^ym| ztV|UO(A=neN}5hNgAkq%=r#{K{Mnjche}P<_SoEpyae@u4HKaBn)^RH6|KK!CH7aB zCEV5>|6y_Q=P$cGAhJdrY#N(QvD5hMm+wn3Ez8zZ=)ly5JVUKW?R1*PweyD2D8g^l zHEzH|1b*7Wt~w-F!Y=gVh2vsj77!o8;pSbOu_*9bkeGvX-PWqPg&vfY-{x`O_1Wv= zVBCzVVvp>~Ha9oCO+(v7zuf(2$Tqc#Tk-wVVwKZjTn6P>YDe(Wr7VQDXglHojpIJu z69%U>L*P&9#UOTVxSQV&zHN`spqQJq3Pd!Ob>*P&GLJJAtcT=^Tb;8I`=v^re;VmS zSvY@)x?_!aGzE|lx*o`!_uQM#xmWu_H-ayF$Go9iD1RpP0FxltF*@6Rna_MQ+xADhqF4H^vX*oZP ze1>3aou?gVThXi z+lFh1hI$WY;>Z(S$`%#sZ{CM=h?8>W1Vlf+y zh~G2gwKT&7xsSZ66@cq7+o`}}G#>40?OKjb;=%P3?MLI;(F5qTH1@01&s7;C;iHFt z=-H?S`$#(M{SJWX{C{6)n+}z2|79>AwK4#I9j+D8*}AT#C0CkFsIPQ&6YXm%ylcrC z91ySwX4&?}w?$l>A8$14E~iOSW6Yojy~@vAX`CDZ(Vn6jORpIVYq6X0KZ#G%fg)Fo zwYJO)3Jc*qRG*`skC?jxig*DjsNl{`0)h88@IJkUib)7PIuDU-$~Wkion4s25wrY~ zxhN8S8GKpLYrOP^K)@wwxy+Fg>W)nr&p=e+xKc^xc^tT1$op{l0+!COh2aO{S4M|* z;p%tce;yP9g&p&kql7xI8eamz;{1>*p{Je!M~k-vA}o=o60=$0sR#!hxMbkje*^|O(f8Pu%zTon%HeLZ1(RRqBi z7YdNipblm-mfkjM9H^!9^La|h`Q*Y>RCjzWVmZ=hVzhrCyV!Zt0p-+i~Z2^0whUzy`D zn>AOUA+;^~BJF~}SKzxiQ5!$22ySwf=B!mfwqH*2pso`v#EB+vFCTVsN_P`_h zc>vYF&i*_ltc7ZWcwCFGb1kn30WDXOfjDf?Fq-8U)^gSgVrTt0SyE(cmW42JJT&d= zU||OG#rL>_clLDaz;+DK;txfT=@5&|V%%Ne*gu#LHDrujmR;lJJ_K0aL_Yz$YeKwT z^Cz_gxCk<|PLo0Io;2kX6Xeaj@#JR&=$nh*PlJj{&ZZ1+8rmZEw;inqHIa#CVwyS)Sd;&HSl6>OUWgi$gIi>|V;GB_LDlQ$1nT-kJ zck<4HmHc%~n{4fqG}eCoNGEy^MNvC3MWL0>zc5YIizbS_^S&E5S8{Q+{5GhF3YymX zSA({mii(p{GcqNTg_jvBLGv+61CsPepE`4V{T0 z6enqhX8Hzh)iTzsqkc}ssYY}6J1zz7Ih=&E*f~n={(HLd_jDGqiT{VU%@u@b!7}`i zYd`-paB<7p0)P$-P?93kvQ#omQ1yQ=w7`Shw9`*FHpZpGAL5gFLqkJda7a=PI=fqT zCCCaSw8~}A_Y3wIa`-!?dk%<@!s;Rb?WeX8$;2n5CB&U9JD8t>tZvJO9G^gGk0IY6_9UxR^Em49m(`ZOtyZiK+fl^DZ7 zX#+~wyEgJE68Z|H^x1N#7t~O)_f8|&0J^;O3SBC<_NGmK^=j}|qJ#IRfn&jK2ka=L~98)-23It@< z@vY)Bq2vh{Llz01E@$?1=bWsqgCVM;6D7$;a~V-58|FX%|A6j~3=1c?v)Gj+3@}*e z-8Uy4`Sf8t=7W(^6DF$pm{jj=cS>k~gIm|w{RUDy_ zH0ptQ50atv<|(Z%(Bym64# znjgRmaKqlPONwGZ_PW#4ZypOWu&b>jPeT|j%985mpI~(;MD0daq)%qM!H;-o?0WsU zgBd~}(~|kiL)YeR>EPVH@UPw&uszy z@)1edp5^$U>-W@7R<0}S*}=Kj)z+}jD$L^gwH7yCMmF;ARUtUg{?f4VyFZ$}wC3Y3 zsqssH7lsh!9n?V^7@pBtIvSr7Z_`ItfqOzLV~#C+zx>vhwM#}1t|%+$KZ%v1wZBbO zBh>PiHsf0-EiO;0x`;`W%Hr)j+z`$u@yre6_*BC+D^7^_!*~C;^I{1y$AUsD*CHCO zJTn??6k0+gR9J#PU~yH(dd3dGS|Pv3m%SgXb_;J+oi)5V*9r*Dz)`;od;?`Z_dFCE zZpqhwDareZU5$*yhzxuG;I_hthXvf53UbF%#lnDHjsoLH;)9fKzzgNAkHt~;ng~-4 zIpR~sMgReHi)_!tLLi!B{y3POdfZeZ2?HOP#TF&>D(|6K>GEDMprlL^(mWgUy!FF< zs-X2?a|C2j&!k@R| zsZpvT->y>gH+6K7@r5amK?CaZzK3vFaVIZHF{Qgrq(DMqWd)^tBjBf_cDKYzKH7=B zR53+P&d3Qteo34+gE{ zC(W;>fv3hS-|ktTewn#Av<~$YDClvau61k)W;D zo6ukHp4o}@UixUA|7av~;Ic!b+`RuSs=ACf)lYRr`;(l+w6E{DqlAjJ$n|%A+YZ=| zK8YthwB^6ywLacg$y72PO(5NDe9Z<8(!)|5TpnXHs61!aGu7m9EZX>L0-LS@kXY zmq6xZyfow?4Ko!y^xcXpf56u!lM@Z~RVyRO8^+dbNz3f^LdC(^uhC`wAF8$r&jz_8 zj)txC3%K#Teo=z%HecCBYs8-mLH+<#EW7bYSX2LaT|!(Z{&3jOD$}^wu(SJ|=T|qF zpAqke+7COK3#h(pb+&!}QvL3<;EQ5*`z&Cr?;i&2Hb_zTjIOZ|$->yyIzv{cAKZsf zlKHR%Q@gIbN@1o0kcm9B9boujq3~Y+DB!3&O$8wIWk#0>Lm)xZz1cu>uD=EF&N*kl zlS&F|&uDq@9ry=)lXGY?+>#LeIF?DBSy%2J9Dp|GSNi~P*a|HQkh(ko$oPKAuw*SQ z{s)KiL_f2s$;{~XL+<|0B=ssRAQ83X7;B4;5V)2k^%Nk!;Rv@mp9Z_6+BrQ9<5skK z?G!?m)|xB)gc5PFY7o(V85Aqa6r<8f;F61F4OKPbmKPsQ_(shY>S1YzAjW0ky%Q zo-$0`Emc_K!uN-qN^C5pUiDMe#<)psbgQ$e%bv>ZOTOok1~Eo2M0+QN+>8H@qw|iZ zdjI42u}9(@sZPi^Nyv;c65^2Tj+J8*#j$5b93>9PDA}Ww-9h%~7@1|2ajYE4cFc}F zf1mrifA_eLhr{=KKA-pd^?E+(Anpp0{O&4NIq9|?P1koGx4t@-i4}HBUNO1;bij1#G*mRqkoW9go~Lrg^XJTvraSXTyY*;~f2(@CeoT8_XZ#AAa*OahER-0< z_#af?#C&~NDrnOK&QMiF8mo zMBOD?WYv9B8sn=!RiKOtMP;{#f2noC6BxT7!v=TC!yq(iX$h%mR1@x$w zVKsCO)IwNl8cfZ8{hA*Typ0e)v$va=U&rWrGY5T|5KrCAKAAd^kY7OkC>P}qA1R%x zBmxb#S3Q`il#%+Aiwlhn{eo-h5RzrY1`zahb{z!!$4LHremByK{1-%SBo72CX95{| z0GysSCaWO4HeC>*z!Nw7$nC{($iZ^Ag#RHhI~5Y&poAP&fq9nbyF_4X-P`cU{`WIC zyYDgW#_wrCv6pRaSr3-}nfl*;>X5lm?0QI}_MYhQFC8z808w`$*lcdBO@?sh6HScg zxLofdv*S8D^A{!`QV31xUb8ljcoY-?7?3H^?!`b_j@s#Im)< zvtP+6D15@xnF!#|C)m`~?3)8Yu40m>znV^KY<99wR-0yEu_sGA5!_~we;#n{69Fi3eWeSqO!vkp^@t=`n zZ7rKx9RViL3&YL7&67zq4B2L>0Y9aSG@USnca`Gh)n@9CGmU*~(o?`EEfBfzJm4<^ z8)rq4g#4KSqr}d*!^sad7t{=}~q}i`)%HKkxH?_X4x>+Ch?YKJ|ozEWe|B*;i<(13|Z2#Yl~j-rz$*VUwHr zs${o3+stBS_WCsrmN)Q}1+XI~J*S$)^ZooRlEz{g0O( zvKu$My>G65Mu^gM(nTgnLT{*E8`Ok2cKvWHgk z?P{i%EX^+3&2EU+-rt*^nF}1#Z2$p&k%ieYD%r5oI^K`F5jdUyp1Vj;nzJC+Pw{4? ze&_#`sF(i{$J+Ymqx%hN?97r3iyDq!D(KnG-q@8|tH%(&9A7;C9bly{`oRa}Sn%f| za!Sg(laCyW*i#q*VqnuNgq6yioLfnDuo^iZ7HBHqD#OVCT2ULAh~A6%b)w_^6>{PTVjtlzo?6)X!ug?ck`?Mb`$O;XR%RrIOT%w~d^?s*9l1pb1uJEqvroEZxAKM*W< z!~Nrvy*V!UfwWdW%i*-XoFXUbi&5)y98DiWVZi#*S1i2t_6n?^T1zv*tfl!WzBpfk ze!hDft^KTJ!+$9ki{rD=a^k*Mf42_Vwy2)_+VNJuG&|x;YMnwt@V!}%Q&eWjLY}1| z$m)N;IEae&Ua|9#c6Y9-{_S^o2j-g)V8;EBcwd!Kejx+3j!!E8jJcD2`cMB)_UR9| z?x!+z6FSXnr&P(Su6Lhv30&d)ZpO73_q1+U24Za6-VIo!4oT@%DYU`E?=}YCJHc1gHmKU&| zjIf@;%9iTO6>d8gp;punyuZJU=CjjzQ16pK6ah)hjJ{(d=XWJi2M~@R%mnI10C&D; zhMjwCl7N?*=Z3#hUYi@tlp-htAeY}w`Ezhkw+9Mb8`1KB&H#uPlV|AN{~ga~8o2h3 zq~NQnz#PL!ECQ-3IqYCbWfM9ojVg2vQBqdE9H-w;*$2{u#y$Gr`UcN916ic9!c&^F zZIIwFwMwZERA04@>!(kA^LOk`_lSo3Ji}HUe)B9^qgj=`s1poL>M4~DlG}?}P0yYO z6^rcQxZIcL`0FQQ!`-EU;jWmWpTUm{aLBeW6*$sZl(FCqMEC|D>^%$)Z54Fz%6^q& zdo$%M$A@%ZT6)J@M$)SUrl0g>Qm}qU-+pp=CF$1X*IMVy%;ScZT0>7m??tPDFnHl^ z1cbkBq!a`?r}PH}5cz|`9gBBcXXKu3zbysEA4ThUCg`2L*eLn76Zmx4udSNs~BWD!u!>*OK?f7zwI1ltb&vR7Y03-2kx3NUPu zyV)corWj3%VuPu`r>CKz4D@)WbDJj~O4)bj7^0#yKjF!NT<_k;;lK9q+49_goo;N- zgpAb)4p#QJE;2>x76ACbE@WKa9jFupu&|6;R}zqvy0`|STO#x z<8$!lJWQ(Nlh@Au8Od0XU?37e+1{^!F0BjVU6qXi3C)2t0nTEf-i zikgP?E{>0l-R{6u>9n&gF?|5@0{e4)V+Y6Vw4n%Ze)d9i;X>`>? z@}>aP7{>n>{>JHM8T2oanO%ONCaE5MSJF;H+LhCePk@U5chBKn_2d9`K7<>-MjUUl zbe|7&;!E$M4l+vwLe5#X;qaELqUTOy3h#iuSSO?q? zQbF1O?R*9KLd+olbNfg8FimxPocv?=2kI|zb~;_ZwX#d+d=vqEkuq1lr*WBEi(ssw zZ)GI;eB$%^Ek*&ANf+qEzjHN}7*$%s z4Y`tL4pnXx9F-Pf%k3TgYg4h&XkE$b5rXY_DbF%v^_Mz&RSOAz`TlRQ)It%_F+*xB zZk8xM8G`Hdt-An+{Bf8%VcPNOIhZ=~rW76YrhxG~5|rxu{ob=@o4>ufSC+W_cxd|I z>5WT&Jx29;L!bt|GqqEzVD3FiPu%v9t0YD}F(Xd81ybf+dFU0|n`Kt=$a6WDRUgsd zgaf&=H8YdRi)GHV^FN)_N7{S9c*T&Ilf$bVq9cs>^HmgaCl?71yeR4OR0a-q3@9SX zlvXiQ&pf_Rfi#Hxl!(Git^3~&2Q+EBrYT_aNFG413f2$h5~&#nW%gpv!oZhq1&HMV zhlrvQjZKUc?G9~ue*VH>fUSOs6a;(hhL|PKjhllNLk>2WP93QwwX)M-s|ixH2Z|CD zU-7DX;(g;DBjmHX5@!T3U0<@lvsmoE^vrB%RT^>ZRY$$2sRmi`367XR5p_Qn9klv4 z9W1#Yuk4;2SD@}!zljQ#HhlW<(f)34b&wPrm6#@dUm5{t!DMFTR41BF5(s| zs5F}3Mw6v0heLFwR{aL$&guj=W?+P0H!xB)yZx5(!oQ9`_At=@0yVBe{qXbSDiFq@ zWq+}Mw7;*BgKcYr=^PFI3c}-~nXgd&jjyQ5(gb}UnOIAKdh6^=Dy$m@O5+`{{px$CsrjGQYg70- z8Q$>GvsTVFYg1d&=0;~*+?MY){RoG%R1&uXl}1dcMCn;=`KVbl3^l$9enU#oUa+1> zk549!|FAIm&vr)sS!FJ*LNVBe1D9CVpypY*qs1d%tNh(s2-IFca5CpMQ+&M^SSYwl zbZ1aB`8D5GC8Fk!-AbLSb_D(Et)TyTRr0<&Y~c)?w>k7=JPy2km-t)_gkTbHS}r*x zcjx+#t?tM_{xv^F3&GleNm*()n#!fIFVMR#AJ0v%*Aqq)3AZO-?nzO@+^|k6A9Ap| zI8s6sL~s?gIfTW!P*YQ8vFU*uxfKZHRRP&^Oa12J(odrL!^W$G_$i|sR%M&s0rE{JUP{&_rig=; zivC8;rDL}B(vdoI-*b=2gsS;2CFv-B=k+kTmrz_7pNFPA|A%O>7c|ruvh6Bi#^7O~ z{>s)+4GKzYWkK5;KLg$IK*#gnJhr=BQCa!i17U}n^tCnChDmTyJK1OjCX#Ec)6*WZ zF*H)kjtgL<214>wpeFgX#Me?=OISk?F{PSV&RK9Sf9Th5XS}Rgkl7h2sf40ow24>{ zL}jb_;kz|XobO9$pqB80t$=DXWnYnHaQZ~N{i%s-)yY`)(J$Vkxh4hcQqx_zbbb{^ z#s1tRuhI2miFe*S`AiTwNvbq7u7$-edVWly#QG|8nDH3zQ%7p7uUN>d%aQyMqw0Kr z38BE;a6`&)(ci1EsGM%>~hDgJ->hB?xKv3N+M?jTYJ#Pl19 z=%9|e&02M0k(!SW0{ zJ4!1`EuN%ZESN*hPmAn%!T-&zhO1LJS3ty3l7%Kk45ZoD|JGf5n0D)d&0`4u_AUf4e8xBz|O_j-9u0N&&s!RM360<&E|&>8Eq6Y z_3&h0tG@iVMWFFGkh@)!lkXQ8eBh(NWB(($8>~K3sM)uOS(t$1gYDL0Q>k22wq4NX z!XPcW75cR?lPt~?F+J(nB*tDw{u|OT2Id~{U`?#=>e&=m;T`VF!=@F1lD4yaCov-9 z9N*cQjpboAtxM3*gTblMRllicyOa6)*(D`E zRDVFXw`StqxU*Ptw|cuB3*{>mKT|-i2aR57%8HhcNVwe1h`@E?t++&qxZ|jz+)9!f z1ip24?P`IjxnfKTI+d+83tr4O6E?Zvv z=r5m(5v(&DDZ2RWHLc=wzrtivqth7^-v;;+^a07*H7_ncwu^U zyiDK{a*vR{MH4fcJ7Ut{BN!_c0mnVz?woX+LQh>_cNEWHUqVb)v#COF%AUSc4O{f^ z_u#g~aT_vAK~#!;o0P%F?qz7f&|Et)=;x^}3lr-GZYgxMD@ph^_u5WFVPbucDN?`= z=3tU9NYARLe(_WW#%}mGyxFZw8yKcXP)QELxua*dm~C1m*LFd|!lDm9%+D6OE3tZN zeLDCy`*xnjEW1^H(b{_7Kl7h@`8psC>oG^=AV#9%tf-Rl}(H{ez<|Bj;Db%8I|&cY`2L z1A`SZ`%b=p1t-derdQzu*Vpn7jJoUf8ZNK-VWD!^VQ$KU?Wc-?%@eJy&+yy(dlr~y zN?9p;;Olekq)-00u)68*XD9oQfoK@Op2jPZkFBxf4*nh&f(H< zF@s~;0!H{cgnGHY#PKq00dT{C@+sY~&sGFr1^(V;8OWnf^Zh(l25dF5S=l_|oOZ@E>(+iC{tJxb*!$$*oqu)av z$@lv1u+Gute=*z?5j9#aM&5T=td$2!-1p)by1F+m#(aH_`tn}fEeovqlinT79*W?$ znx_52O?r8?M$$bi!ZlRi#)fbP@+L*y(4JPp=8r|JYf-@?#QPjVR3#CK*IwX})3V7E zXD*6?Qy<>pv1fcj)$?;CB>KIv(T8*NY6b?>H^w8DYYrUJTX|z(i7vZ)dw{s=$!YW` zwclY5+y|;n1CZ7uVmahiRA60R3$0s4Z77dWPm*dKdnZ1YY0e-rF>&sSGFig zHZ3LnUR|e-9RE>YqN6xF4d|^!oT@^(J>AaEZh6!dinAG!Xl>zFtuE;G)nK=(7&S*` z$~GcBj)dF>XjcGG0cIi+a-MA5OLPFtzr*z=ZU=n#aMDedbp{y*vFWO- z(#n;jY(CBCkfJ?UE-sx9cow460f&L!t)kK$_JK9W8|F#vbJ|k*i3llbDD@g!&^0G{ zl%)|s^N?lTcMdZrTBjqn9Ogmj&8~~GbfkX*)X1$(N2a9EQ!6w@a&%^MQFj1@F3po) zwP(6;*)#W48N~_9VjI*{R3rE-k%7XNs2#(&sL|xeu1rJ%BbfSkisOb^btWmBj>Y@5slIV5Kc=tPbU0_&TxP>tcDqICY`8Xx0BV zqTZ?$d2@k3cX8oeYD?MicEc7a&D8EZ4b~QZMA*5T8yBc(cDv)J`Sq%LZYHL9813mA z@7DwO(>ZA@LIfiDX0L?GK;6R}p5m_Kd9DnWGJSUXrAi3mi}jm!sMM!N^ft_0O03&l z12Q`N^tA9>;~(7nr541hh4Wb;HLFoo)v1QTlDHb)K290Kkr#Y+jI-jxI}dps-Cw%r z>U}PZ#BPy{s~M>)bZry8{pfUYlMEMlT;v_c4%ch(T;ppAuDZMdc+J z@|~9jB7Jrbq;vSR>_q4hIT|NzT;=O2)%P>m($)A_1t;&>h%5cp2(3vDnh2%t!k?-l zx8I$I^G9C_^m*GDb68_VcKESKIP*I^YvWBrIg<+a0^P*7W-dMtsLWSJ6&J2d_Xj_5 zpXtMfp;hibLy}VpA(HGqhM|?yFAd5$&lNa_JBuxNJ7VUI#?O~ z;t=+FXQh4q-C@IR*#186@zHAWAJm{A zo{(^-nk%tj2D`NHWSJakTDlg|`s&=~@!>9jTJp^|5b{Hf5q&^kT?=qySC`uMPL2;} z20>?Jy7uMh76~BpLTfMnA{~N;^c`;2kXtt>@f4Z0o-U(?F%WI#;o%9{@3ubq&U+T zZHE$kyfeWI0<_5iX5f?%jcbj!XX#n-w3ffc%!Ao|*}hRmacr8+zfB9)4l z(Izw=zMng5CBI-Lf`)RVj^4nA8@BX%gg>~=`l>=0nxeYoSRGBAAN)GD%|> zdtBQ7J7AoB1Y!n<0F+#Bc)O!Aot0jTv5H7KK%?F7Nba|>0UmEM7|dS#gg&e>>CXAz-n!(~=r0uO{XC)8MO8oz^Fp#PkX;NBnoSAD$`; zJvg)R$wlzi%_jSX95xn4NIc!pE2`geZy8m(RJtY_AHC3?93Lxjl&>r6>zP#!qNMy) z!*2&G1--k9P##oeoROn?o;p$AvJNHJ4-qZS&Bo3j9^Qs*U|mixwz#(b)ljR?ZEe;F zhZw(Gv$FNDa%22udu3^-bzK90%Qt};o;TWVDcWdn#rl)#4<@9?!Y?;Ys zu&T%zGms;cg1h9i_IFdtveKWbyqE6L6C56cuNoO3V5!WK`ErVMK4!3g9>)$35l$DivB?tlNj5XJnBv;@TJrf2R(P9CxT9E zPUok%^{<8Qfld!Fb}&HXc=XBj^Z&t(I>0r^E0(Yej%}e*EHAdKw2UduYM=37oh6Y> zE{@#V+5&^%D4Zs51h?vFx}`+&I9ey+yBP1X*#chOJ+^-VoXUNE1z=Wv6sQ(&NR*IV z@RlW}Q~k#)kChKMHd}%F+Z@w5_`O2iSquU-9m?c}uL7u~ zV>e!Vj&SG|sfCQpl=Es&#!-qE&L9pefJb88|M}bQ$m_C(CCJm&*FJP5M zB$gASyUPp4r#)0u4G9)~IRIlF-K;s|COiA38jZ1Mx!lHLbTAO42_QVlMi2Pyj5iy~ zQ(waC{Or|rx9la3nc7?zOMRqaAP8hsvP{$WsbgcXe?+T zyxa5ooN87(eeUPTzmU;l_VU6n?sqIAiikn4U;`1S8js^z1b5r+5}$|j zS@y=LEwZze$qMIEQGL-Lxl0vP<6t&YK7>ZsI6f+{Y~NjIYs2AXZ_*hW-i1X7-|!Nk zr7<>G?R%ikVGyCKR{XqXPO)HleJjhrlI;%m zmH=6ia-A96E(yxNUyOb2iPMkUnf@pt5Me7LEt%mLsdBXC<9vKEvh^gm(PH819B`XV zw)G041UPl~z)Sd~Om)9M1!iHI63VuZ;^a4&RJef_!3tQMC6X(qT*1FR+GI}v2&rw7+)wa~ z(on!Wf&&qJc+e{my8lYneTrx2kd0UQ&a*(D3|&S~!;TxfB#f($`H$PbZX)`DkOcX*jOoDFQmg(!H!~N;pcx+4T6jDi z2_W$tk=i)ub3;v4JsL#xj~f!?^7#zomnk&Vu*2$pSN1jIMN4PbZuESB$*-mjzY|kl z`Ue7!5C3*(H&3KZLA!oJwyIBR_tEZxywZG|p#=o#NNA3>O3K5tCkK;3#vvhrUOw2{ z$%?dnVHr6HOjBPndbxV$S~Ne4ynL>@v0KaakP)Q&w~VXvpT5|_tyD#|F)7v-tKxP@ zR<8D^@Hgq@!05ZZOt)h4l?4_hZ_#)dp|&DoCxG*WpYz5!W&aJSZJpL*0dMV+k^TLr zMfkPd2I3)@d0NtXs#Q1qS^(D`W#z*0eXk|LLPpcx-)`_L=eNbI1}nXu**n-KgEim4 zt#wl;Z2-sORsNbRPEvoD3!o?0=N}PSE@9Q+?C|L4(e-}{hDAie;kPJXD;|wb>0ZPg zMk3O+VkmC0K)XjlSDztA$PqHAox}+No&IQ_d>fQ%0Yl3M7=T-2p z0`mY5El*SaC6@k)&|h&ShHPxKTZh|(&Y|Yv0&u$lAHeQm_R;LMR!OLBpPJaVPvHEu z@pV7`j!Bu2qb-2S`7eOTR?6eo8jb^J;6Q=`#|z0>PsX0yp_KAV)x=VaB?gZN`OGo7 zn4(+|tnRdftm2lVXq-2*|Q$5lKVwbXw{q zd83mYER##Xm-H=_8AeFo7FWlIst8SfB*fhGT1)!vEpsv<9k)fo3Ag_av>bUS-&h}- z>+h$SSwf*}Ir0e#K0+cJH8J=d5=9c!wcwy`<_Yw5>wXQka6nxsttfIK`!z7&%g{>^i~P#&C_8^%g2T>c_6u*lgDl+$GryG@=-YVK*!Te zf)Z@}5~U?O-QBkwA98Jf8v5fLQw(;8js{PJcF0;H{v09kz7#6S=NVY z8ve)y9n+PSLFmUF6D=~~CG@v}kY(U{G`8*i1fQM}+O;XvG&CgG;Jh7V*45ER8vB8= z47>ERwEN__%i}v#-!#7e8zRB4uzwI1iE7n`5FIuLZ|6OtFPYuSG+~?${0dkWmQ+9S zx)Wmes)&CPJgR^HxQSb=vq$`eP(dK~k#~)>J<&D&srCQ@sWAK#5|~V+{rvR87m3r& zh=Q}+illlX(cF9dQ)C+iTOSLPs+?18eUgJqQHvn`*f(lW$`rqFWu2sGDNZiW%kXPB z2JCn#-I;9#ux`(a1uJ>9w%G`YlkX*5`Sj?O6QI6>v7B2?(BYcp`d@jv|Av{X9iD+9 zfsIyrtdnba?(Dea!Kf%TTyo&e_QR2NyK$;(bo6{0CKoTHaqX+4F;!bc2!Foj1Xm>P zeN97z{~fNb<(-H7Zw4>BSb1)>Q}2DOLyVR-$XGM9B>5d&;f--0Um*tae{X1~YkE^1+Q!lizS5 z8P*T}Od`$$OgHc8^|*Yej%UIl-WK*^BtCdOVhLl?{cya*`)nKRRMtPn^JtLq;vRbChy|yO&3Di(M6tY_0$F}!m#pzRgVee*^g6n!(X+|?p3jwa zA`FcaeLZ=D`(6ku^6>UwTM**B2f`b@;0rv1SkY{Rk8;U>+IB~gLv}aazD&uX9Y{>mrQbj`i z&=1{PzGpCOtU<;2UW%v)Zq(U)#5XIsxgRKc!1>?PhmnVh_TI5L#l?|aAMFD-Q zZ@|`oro=~j!vFo@;9=!fqmAV870dnF^Vvw|6V%-@EiFspqKe9+E1L1#g@0~4Az8SE z${r`A!zY!But};Md{YNcYuA>1{;J?h@+n_#+OlR*-_bQpvE!j%P^lj0zTd;GsQEHg zs}Rem83Nmtxrwwv1lV#HzeB6nr{*Q9g6=H$4J?kZO>|zzmyqWaYKU)6$?kt_Or@h1 zzkbI1`m0F|uHR8ST{?2Z6mg6;+hFd^(m&beE&4HQF0uYIB)BnEMCJ*<#_5kdOO_H! zaOQ$Hp!)9gT zOeZf9b-sY}K@4Q5KD@aqF`&`sJVLA0qZW)z*S-1@Kqse7v0oZd^uvpga$n0cYhiG6 ze=u@4fACAq1)3mafZ0Wvo2~U19*nN8cP>FGta4QD7EILs`ywe?>s2LHqE#1=w6DX= zbnj@vR`TxdKA&m*>{<|-eu?DF=7Jc)kM2$OTd6i}jbsjWTl{mpaOC^0&H6M+H}a_= zBF&BT)%C&m#c66Ht@GT?vZ8vCCtPiCzh496)exMxselt9`g2uj)oz~U`|EymYR*4l z2`{gDTy!gcCP}Trsz@7M5Rb#3ODhtq>ctdNs;-|!6Mo94H> z%eFvSYK@HB-7Vd1I2LD1-ShCTS$fdorR8qjgfgI|Zw>%uTgRW1n`d-&cN+ZJ{QYgB zHRapM`zff#rqBj)@{QLexQX>0r!F0%WoLT%&f}AK)FQqZExbL$2ClXC7*|fv6gq5( zbb_k`HC?e5kJlruuzcGt>kz-ub+9h|P~z}gmyNA7JBw$SkJf5I-H6c!o`3At$zS_C zpkcN^yn7%$P{q=0#6fIEMjjA)i*aS5*AeVrI{7}6qf@M#0`IWyZ!UdTW#Ok~9ihyD z;}ibYJ)w4$K{r6ZuLRxDQd8_0en&Xp7dZY9rholqU^T$NOQL7MQu?L*bh z)?Q_jZ_UJ{^6!Qn z@YZrsO|Wn2K8r*rm*mJ2YW4r5Z$)TA#UdcsfppGG1{ljWyJi0H6qNhrc=g<;TdJj& zl5I`EUoqmD_#h0F8Y=m|A+>?2g;-YvmwJ^f zC`q%IzRRnZuPt0$^#1z|iEud0ZV4HPY>lpl(>B*WyB3yW1U`V0Vm1L51(m|8_R;?u zwk@fTvu?h?b=N1JzcuN?_^H17Z?Ne#f44wi#CfNI%W{8ILP9yz&qGq>Y;f`~6C>+O z2+0|~@M}CMr|2gM-$JW=c-2I3VKGw#V6-e@>A|bUy_IHmuyzY(n6$RO{(w8 zEws&;U_NE=yXSwVM5XA~E1xR=b8)S2PVSBhaHO#M3JusJh_`7rHa|G>3)>ja>{yot;?}|?*62+@N z3i-8Eo0Y(z_g}tDi03j^eb)yG^R(%s73vX85N6%>w{awKdeya5I-(gd zggzcF4cRWXentZU$%x&^hN)A+)IzhqzCO6~Z~b;n(7-tqj^wt%Cs$U^rGV>#-k^-$ z%!I7F{Ug`L$(IwTlKE*UFp8Pr{u~kv zq9bl;f$k2LFsXb2!meAZ+?sy_Iar=YzbetGCcR^+Oj$1Rt~yG~SB$=TEZTefqCPUl!H(c;nmWN@Eo*Wt9;PvZ>Wh0`2fiV?WV= z^{TaLz%?~;Wy;_=DV_aSxj27Y>on9}F|&WIg#o}$;>UD)!}*{O{+u-;nwuu9+Et~J zhIDPpm}XTzseLKy@n%W1n{TcvVc{I8oT=T(qUoH3=Vx->;g*Q$?vL^`r{b#N@gT}6{R`9?1eZ+@ z*euAbtr?PF_f*&%w`lE59SUkSJu^of+{iIFo6A1TE!R9|HCcJ{Pc(mSm1qB}ud-!1 zW^O$H;q=M=ODjJ&iKI}Wkkx~MbTau~6P;W2jb~2%8+pvswVoY}=RaS08g9uqUU{y_ zS&)9feXJ6G_X!fBKDJX0=YiL&V z`N{bGQh@P}-HDUJEtUg0I=CkEQ2?c}P5I!Xa^T^7FJO7acg`&BP5Mt!tMt*Fo3kzO zrJ13INKGmKUiUvt%;@Hv4*>p-UDP;8MeUC@^f&JPeFdD6fB*zS{>QDrB3Eh`lbf`! zaOl}2O3Byf)z7!@d(mAOw664!t8;TZT^7Wrg!2Ja8jO6xp-QJK-L1A$%g#0PBSB9d zfW1p{_f;2i!3BF8R@uUr0r1hv^xs|A5F;RHm6=z#8zOI=m8CBdkMg5GE8R=sR(*PW zFg#HMgc0k=Pus+z#JN%D`@(5s<%y{4umHcyu-7w^tA1)cckVcWl*|eckr_rmFd~*m zK>fgh_~WIFzI^;M{vWM>o;yi9#bP;M$NfJt@{}w{Gyf0QuN=xNsutweq zfQI)c>|H2uMr+AuO9;;$pQzxN|J;+7e%W01xkTrcFm-deDBdJb7v|=e!v!ePDUpiR zo>s}$$AEq_Q=!QlX`AL3^4Q=V$r*2fxj6{FBjvbnA)VKiBKzeRWbeyT+sOVznIajb z$|)n&#_)agFQaqsi(b!^rSIe>ju)N#cK`I$<=FJCtCQDNT-{-QwHyLb()HCW)htSt z>MRFViq+h6{E04ScE^*WhG2Ml0DRow?CU4gcP3wI(40-w@>S!mYtR3^|0gKBmM$IR zV4PZmh#g&6>7N`yH(nSFdBpMInW=}xAQH05GwAWzq!_iKyPo@aTFU*c1J@_hF!|I+o0yuzd&1mly>zlMG3 zPTiLc)i0m@CCB0&;~i~LgnPIkd9^Cln&V5o$*0>$TzRa;vLm%<;O2ExRl7$>E#a}` z*6zuWn$^QRj250Emtrx|Tg@geC215>@o;ynaq0kwR=`E9)1rP=sq)zmvzgu6Pu+6C zM|+#Rr_&=&SNruwG3@8+Be)=|DfukS%elcWoHkufDd>0ylEK zDQ2}CXr1`RF5|D_d_b2I(K8YPQ_O*D6Xk_d-4$9UdMaZ_HEV;jN?V6>>PBAG_L#kz zSMvy?OYQ{6Z0yVIL`gIMrX(l#B7U3FE9G$nTx!y z&(y@D=xT3BMnR<|5z=p3M6lBPOfq+!5RykOk^LAOx>LN zFHZ(yn+ln+@}MsH?6j3_P^L;3!!pNx_Qwnb;KpU1Y^N&>C#a_-nzhMKZC@TiJ3FZk z?pkUEq~#gLfSnX#ApF0a6o4Ut1l7XfaY(}?`va~0PF`ifvtR#?R|6&{X2OkIWA)K_ z?pqz`1%TB)-PqU|d%VsB#%iF%y6L0jR&&^y|CsE;rCR6!r-w$#cfutW-in4$Jyo>A z6fV;yqZGNV!2YjODqO?2rwGN5C`3@tNaiKtzYHqQS$wXV)rr3P-;T+z;f{3Z;VSr1 z=HH~Yv-`T*XR1Rng|06sj!`#%d=IFYaJ`kjHs{fE)f{7nrAs6*w552*6uO$@KetNa zkq>B9?DJOriixery!D{%ovpri|M_T?4_8{PkB3i7IWz>qjIR4zYHI@Xd@m|vMy}`s zY!7NP?KkG!h-b@9N?}J-TMmYMJb%G?*=&I&6+R=M41Y`~M#Y!=wBorznfO!QK}BVW zKF*0=dM01D`1W@7$|m6|K3km!?U9TR>*yB(rHW>V%%v?sL3fewFl%I&osB#?)hdR#z3d(Qe0KfKvh7yL^6hY(uq%>_^$RdG_ z)G4DYjgfRbrz(Cmuj^WMpOr*%Mg{*lPWRWoq-8xD;kv|nxWU~QUf8d_PJWyNS&V9iMMRf^?x8Y08c zvah!Q@?o_{ZeqrOVqSny+8GusU*Dx0Hc4{M)~GfuZN?vHg3qX`fa3>Ks$&H4XX#0= z!A!M!^-0Qaz7|s!Pls{0rjTo)}4t>MNKFxk_RmH9ZBATdG+<)Vp$ht}+c5e$272eDb}?C9Q12HF6rw ztUoBHYdE#rS2yWbypQkEd1S$;B;&2+D*3?)DmC}UzAf>f*RNuNk>YXr?;9D3K#Fx~ z>*VC|M2-76RO|Z)c^f=(8(vSw=RwZjb)p?^7-QTv+Wz9t@u7cTUF-DK4gu;uvxTFJ z*`X)B`c3?5k;$##VQ%EMdRXf;?g1s}%FJbBmYYvU zDv2!FZ-{pxJWdqpF|M)N0EKeOcHkwA1scwLODFN%Px7>({xuVB{v+f$ueu`jz!5)E z(0~l1>uX&?=+h@@^K*0m8JM0dRscfj=?NjkP5!Z~udx6x;AVEDbiMZ`9mb>jmw10{Q+r?cevGQlL^7XS4Yrv58W#=DlVWW&yUo2+!L^PmHYZ~)>z(M(vnIRU}nG-WEwaHKTS^jcx z>}`}!cFGFRU9Zs{e5j4=;Cu~vy)0vLJ=CG;_*STT!HEv8xl|Mf`G;b0w)C4PUT1rr z?K8&6_}(+`)r(H&ZGj?T3njWrqLg-qVP8P3aEIB$p0vY);r83UBV|OmN>RD!-KpnO zN_Pz2Rq)+-^&z+M%-0We-3U4e72^%av|rQlGP(41Rb=u)e&;oDbrsP!@@y|Q_3)X5 zUEPJBa(5nnzin{+QDNJdmT+C)jAHE4jfK|Dqko%BDGPG?Bbe9PF(piydxkXk%q>T*$}wliZG_ynH1}09 z_fnYH56Q72Ns=P`p6|<_Ugn=Y&*%BPPqSiJ15Of`neOa^UY`q<+|Fcqn0&7%`L1Sv z1a$LJ<{t;GP2cH8`KZ;ASK%lQej_QH-$T|A4}Qn{cL{uuR2u{&%@Jv8Cvr*0#zrN% z{tU0qiAp24k#V+-B7@SH=EpiQ<|B$o!8oZ$tV#Y>k1>C7$KQjsfFswgK@c~u#oe)A z&-9WIEDkcDm8hD{y}ef`un6a*0xW23iOYVr_kD+3JW2K49q}9Yz=%YxhMub*<=UFQ z`G$d@^q_eb`gm_a=qy$=_A-a3TDP~YUOB$Aeq#+Tc8Wl#Za5JIJU^5Fek^lG9y~wZ zSy~-<=$bU%-uOdB%^zG=N{chUvo=wdc-`J?vBzlLI2 zY>mF|+mrAxL0w(FxaHUItu78E)+WHT$_ISGx4=Jx+yiNjA~pyTH(BHmqap^0{|$)k zy}hVEej4Vcy9Fnz7EZd_jSM{WCMvWmy1ow*B$5N%Bda|wV>w+}`qH2IQ-v?Sft6tr zf{wz3kG{|&zdVUFaL;=Y>NL8KuNkugei?=AM9DK@5Rf$u!qJCxs`5ZpbCN;j^S1gt z-t?dcd9KPU1>BOJYG=oJl&F<6&3PJMA!xzYLv})@FmK@58O>W!BLxyg7bMOqdwh9e zpddWCca1^x>B)k!p`wmqNUEx(&Uux~tQjwThGp-1Sm0y!>~kz;9h1s6XNc<|ZKwJT z478&!Fl`W3Sp@fZ%r??(P=?dRj-B(+&mdB^T(Ybb#}AhdOyXmhH{aIoOV-wzcX)+g zWO$>6tQOB*V`Cx)At#}IU&voWZLVurLC#2RvT&C8n|O?q@-qC;e(TIqM4BKb-mK(~sod{8n1P89jp>WO$cc&p9irl)gT_}S_Yd!`M zJ=6evQsuRfncwP&!I#=IkgWoLiqMAxX*y>gIj7^J@YkJ3K~(sYyrSag&@|uC2QG&0 zst#++Cnr8setj~=cu%KJFN=Sdc7r`n<>sqdAi>={@;ZK|&>I{Q5%OvFViQ^HYvSv*?$IU_67D%G^kCH78Y|=@aPnk&TL^LzbvU>zyV>Uqt}RW za7{g`waSjpy?3bJtKeA~psDE!Jf;_wZsO)a)#P8G?JImo7mxh+F%Ucw&+D=JLJpU1PE~{?P(Xtrzzdf`tJ(wb%lQ}R0qt)`|0{K4a2Fa3Nukwqz21mKfOlLw z#zT%e*Rz#3vCa&>>+&GV41?n;{(8{jK9}*q=Ul{L{$kkid=Pz-fo-AZ;Q?re0yS_m zcit%&XdoxQd+l62{`P$LaLuZBAB3&Y)%U#fTCR!R(c+}EcOG;;n7n>nyDUR@@{5`v zLGY+zI%qCHE*~1j>T2M%Y`RwUF&X?^61h?b~HI!7QzC? zgC#bz^60*B#lKgfbwoe?vR82Vov)>Du_D7u!r0C~zu>P2VvzcNiLo&Y1Zrebg`6>D zE&U8ot*xPK`u+c*Q=@8&IM7>%rImU(oMkWYTf2kdz-4!CQcd%7K}p>5J+BCVj!?9!xl17fZI&0=)R z(=9Jz=67c~Ej9v+xvP<;@_mVX*yo|&Nt}9_zm;@Nz z<&4%RJAKfXf9>5CwpH&8Kz(3uNq~O}yz_VO*v2yTE+U{$cPc)?`?AOz9o!dUPx(k~ z3T4CLlApo8vOWDxOz>p5ifI#{g%ku~8dycKZ|iZeXU@unXDXAw$%VN3kjoc)fv_?r?k$>VMlav_c4|EX0nhrq93+n0VLjEIV%2#Z$SYprd zQKxSk8rxaVpXw{h9oIj)G+^yYxCz0H)o%rLg59%%yQI(^Nn1D28sEu)vajDq*1hcQ zY^73BgTB=y9-jA~o`AF$Zo8R(pWj7n`yAh66_)z*;~9{C`ZfmuHtQ~(tLQuQNtNlA&8Zi~EB}j*O0-1gm7BKS%$(G4o%zIFNN=~a z+^!*kK^|bY_2lFu)MaQU#t7E$bFF}a2QN6(h$c*1TbqQ(CFx1wZ)Y9{QBDx{2`2y5 zvf!$~0^)h*@tnF4&WO@F<^Xv%=~qAWwXA7eP%M-_T8(#m&UBA8C{prVW%H!A}k^=xS3>VoRoZY0xHv3IT@ z%=3z;E&T^NdbT>e;%_>=r(1~Eg2;CYWe%OqdCK`#t*T}OuOy+aeE(DKfXzFyAJGWb zL|$I{RIs<;P@k-pv1Xgg2u>waP?lz4sN!Dq?^UAA6&qX(&RGQ!-CFgRW9Fl#r=m;5eDmE-kymPQ3=GjHH>sA zq$(lj4*Uw^H}!B@i_5GKjJ?xi_k$j+qW132{QWmp+E@ zZ0TB4=KjiqsYH50tCD##rKkPewJhZgorM}l6utcUYePyoOCH2Ran&PuYWgmBegDiw^x5awp#Q+aZtgW7g}FH@u$1{TW4VxUPlj$^AlyEDiHos*`eYrC!QsXV1V3vBun3! z{R2Ory7Tu_t1~cx#S_SgGJrIo&+5AU zdIG3VB?{SE_XYiu#Ma*P?#N^9GV}JZ?WLWDo}+#4qfgxOGusNsKNTVmHx>iJd*>Gh z_#DU4qOk#*&oQjx+Q(YtV66hfs?a#u)LMh24zE>lu>~)+0(rFX6Y?jG_5Er|;g4?oM}UhegjV*KQ#73c-^Z+)b(2t;mm{l=7ys;jC3Bv8MJnvrKmx~Ez`3!WT6 z>S#?z8MA(Q?7zS@BU!EiL@3}YQ_oLUy~*U^%<#TZhfZv|P^Pk4Mg3fR9y@jS1)HN2 z1uj@QxcvU1IudM%pg+N`b>6eHMb$n$Zun|kW2~zD zC#Kz@wfs3aP**?S<4)sYoT=XapGEaxYbA#`S}+{{8~?g^__2E7?o*L_ zH>Hn!UTayb%MwJ&EduM5PF{XNaqYWn zbi2(5N)DCgXy=Qy@^|IEp{qL6oy_iJ@b!*!L%gvwVCuLBr5z!#UVhrrfMsbBy~_oDy8~=<~}_Qqnze$X@YSmEJKu<%m)2 zJ9{~>=6KwncsKpw)XpbBnog>p$ZwT8PyS*11{#HI(EsR2IAi=4>2cQYAZ` zH*H9x;x|KvKLg}#oMKOvo>K!XHSgz!$io_6k;v_e$fMQl>mfi@ zJlgwj1JD;U?dDSc+RxhsGG!`7Xa_uD1Q=)pgm!dxZp>Ns^1LK&`Liwo8xYtO*}qk8 zqv`_lE|z?6ez$OyZe|Dw;p`2ICn;W9*RPyao>Vz^YLM8h#MiZFcZ*zU)7gBq>QhTv z8-Z5#^u3N7ndUj2D{EtO2AO?#I?&{6c|RwzHP8R#R#%S;*Yg0Xuu=Ye&(>AQrLs~X zUYv<@+bO3@tL8Wxw?JGr(ps-0BnOqAk!oy86U3^~@JaqKQr3EbVCqDrw}Si_S6UDS z-fB%^^mtw`;gkT+zCXm{NNe#1$u>JbE|jYv&E#hv-pMkQi_3mOdeBR&VQ0B<-x#K^ zGf{V|n-m!HNfX0c#m1GkB-3F~!bSl(WM`n-T3VN^tC1ZtUzsIVIpGSf(pMnn^`W?I zj8kAv=5*g1^t0yoA&E9gO8Cy(u>XP)P9VZHO)W|TVT0~@Qy`zLTAdehZ5#3W8pK*x))D{3 zD*pV3&~irqz?CFbJWX@MLyq*a$ip^Ra@ErFwx#4#)1|m(N~kgm;=Z9sR7JzBoMh!( zkAIR$7zuw^yI&KYmLAC2$7x11gC+S0P44>T-VYPOChcn4nN>w)RJXa<`jnGPd>QZY z8uIHfsyx2rHubG#=sqRasGiZ87bYRR5q4${**bgBa!Wa?G>=(IFOi+u(SoO^n#PbC zpqfN|>RsI0<{=s;h2yHmSB(zv^4AMHb~{0Eyt-j>cAASym!Nxhs^{=Wr)_pc8Y-9H ze0hUsHcs5!oK|=yv>@ipA76X<(ItuKA-$v=Q!_YF#$h3}P1&?oib}*AxL}G>^0JQ8XHk8r=uhEa_K%(f?s0QV`~=m( zYGi985=9lc^Lw5al1KKwh8Z!4hL{`XR=`$Qi1YZ?J?3-C!jg&0^6w-6&2`>&Y-Op8h&QiLo^ir+z19}RZv6x ztY<4gypme&cNPJ(%iuhMpAJ7JNIReXThCM@xppD!+F#96&zj2Jb$e%l#-E|E!bWa4 ziKIl|(8vM@mx_i=RTtHTQfvmlxflLo{WL;p|8Km1|Nb4csDYWRG{Xgulb!mNbpQHC z%u*m+`75veDZZ7E6m1Qh=SLNYMA6x3i@F#-u_PF@NNfTjWd8GBa{XO$29cC3QaM^u z@na$Grk(uq=7I`2VI?Dxa4GX6h{4$oiVC z_zWF%o598nzH}e-{`1Z|-f$?m0f{&NlfXEd!2hVRGSP5V*MQ|wrdPnJ0DFWv1Mle& zdO(eBn|vc5t7IlBq&>jsTduzVB%pixD!sr;@or$X5*;OTA_GNQmV7IdNKgH3L4|k! zj7?HWiM1zfF6ZHYvO%`Juf zx2sJ)$woT7P$^5A;`@egXsF;v0jn=XmY44OS1GwNebJr4tF^E_RvGlua#$AKVQkDn z$Hau>8LesnUEUrr#^vtRFM9SIeDBQeFJ4JEV@%6fPA3q4{P~G7x+eB?StX9gql)!D zCmhEU&6;ZyBqr!>ruX{f#+a7t(i@`D@V-q3N~e@-X=!}kt94hKa|TX-xv75q?dfrF z>!yRf{m~EC>wA*_29JNK_x@W|h;#h-`=PE`^NDEkL7ckx_un8v)T?zdbc?}M2PH!? z7Z=&O2B~KG5F$8tFPS;@srYq(4G#)+@8M@c1o@UcJ0G;A#ATaNgmr-M;RJ7$@P^8% zGIRbp`~&bYINiHTt6JkQ_reY&m@QailDS^_mZo)OwhV&&u?1h(JU77I|E242zosE# ze;im|M|%&ycZxrt9R+Juo&T}xr$h~~6MU@awENpG&)dgv1 z12KsI%${Laai7*5n_?PqSyaEsJ#XHN0|Ez7S#?C)%*M1PmTN$xB4>h!AW;-3U!5G) z6xCo?(kS%grkNGz8fI`?WbS6sspY;gs;Tia#i+)J9!f}G5`n#j-DsGX!NP3BC|y|y zH=OE-<+3o6#^LoKCzzq~`+9QE)%)re10bjA*0^yq)3oC@|DWhNGF8T5WQKWMX?@ikBqr(`c_W{gZ~Wy+BM%pYFGYQcm2 zZ`kWT7!sk^d0Pur^SeZ$P%;*VOUwe+r|$hM-bK3B#_>zrew6y>#9~w2V^wM40^TfE8#roQ@Ht=_N@ei5 z{b_1Q6JwtMoqZKN+oVdi!Estqs!EAuU%+oTCGAO;^U+x$PMkBEoY;iz>&KgmEp}Fh z_&n-dBEfoNEhID|q6Z@n(%>Tmz||E5N#H$c-;m!Asg*Ae)KF+^i>kROQ_pi)mwh!#ayIP61X8 z)au_>NE(wkIm9FHcg6L?eeQ_g<3Ol;cs+7|I=lDacMxbgWMgBahy14W(t026tR+LZ z`th4c{GCj+-9;xCeC9}4P9JVD&l*<+6; z7*6-SBbt4%ea$0Rze%>nnhT-qHaq0Y%neguGIAB_`K=~(=BNblJjc^Qw{kWVF?4t6 z2MEK9jwBR^^16-18tU>@SCXp=?(_Y+=}+mWJbMQ|2hN2l0jUp+vAv;GL?|xwcWD1nywdrG_D4QbTitpqLGnt-jF^ z6G(PWfk`|1E}Yf#PY6qk+~NAy8)6kiQgWEf3Q3oAtyIS9qDW~5lPVne7L^~*HDXyg z`+eH&%iTW!-ZbsC$ed>@@`doIOL0(^3byI^M?Uagfrn4>WI%gT`_zU?1qv6VM3yO{ z!XV@jdYBL8?8_RmcUoUsAT?xJR^3?BpdtS^*$p6zFe-K}~|PU6pKN(fyqr6&v1&RCsLU>&^LZ(S1&jl=y7k-J^9 zast8Rzzlz$>Z*OFrvP(z*=iGxO?v6}BWzMl{#%BZFGXfoX3QY? z962NfQF-HQ*~*Bj$~dqJ{aAW}wmgevSP9S^aHrmRwSkt(D<6K?Ivaqb5r$Om6mS1$ z5iMU_K@p56RTyiTPcsbZ%)|w4Evg}}ASZ#}Wv_EF-0Jf2fWyCm{8nyiyOyAyOCf#d zFUk;1-o?n{!<|u}!&(fa!s+3(Kmh)9VdB6P$duX3XZJ5n1a4}ilsS#>;Ao({ zH08yJG2~Nd_oZU+D$T5se5@*vwTpMHGxalPA_f+3J2kH8j^2NoE|m9XEH+8`rPCWd*ydA7{XD2}0fu57jE+(9jZF(RLv9>Y96;xzi z=uYNwN(xN&<`6oMDYewOQehn!hn|i^IN>KBAUm6=&<`@heYS8W9!z`r%&qL1Dm~l$ zOc~oFKT}R>^^vzzE%Q%Fx<+=o^@+T)1lil(>OLsvf~e=pmZQJ7@gEs^?Cj05%VW6A z=DzJ_|Kb%A;J>;)QKl(jKg9O$)lpr-(BkFEjYM?eyp$m1@2u6@Qm8Z*tY`Tb{)gh%`Skbi->@Eu`)70)gFOCMa;;Xi zCF4UN=mYas6>VSnOzbQ>N7>2occ)nAS}H z$0nYyPcj)x+fnG*{Tz%QmV`jdS;LwOHv{2={=@&3P@L2~4O<3@iih)uaSA=b5n&5n zEnwpunB2ims#RY&2@wy2k#JXewa&d8oOA~OG49Ocol6qbSr$EK?dS5KyHDd%G+Bu8 zk@br;jK&P%$MP~jKtQB5KxwEpm&eE<(1b}*dS$s#UYrv0{Apo5%4|8CcCcga+U*^M zSH)7gF7LyLBhv>HEp-WN$^NlP+^=mOcAwfICMmr>si#J-PDIXmyE_$qsQeG|JZqUN zotVIHQIWz*n`e1M_I*WpV0YC$_2dq?x`HAjd*{76>(4`R1xn_^fvXguE1b)_q2sWi z@LnejEcATI?-3*RANPeMJ7S+Q4gR(E=%#trHj8FGX|v>ISY6=t^?g2~-&P&ZnT2V* zq^al9G*CklQMPN9JmH_T40>&nn<%sCJ9ojkxahYbd*z`Z-;VV&x`C7}JxK|0uEzXo zPCR$9b!{2W*InT(D^y|JmI5?$KuD+Wc>Vw`osBYqsUmT;t0OWn*|N&=I%Rssuh%HU znvWj2L|wPqz`PR;)=JfLsbwo#gVr*Mve6*N$TgdCF8lAgS~44O`i0)OmrI#i>lMM} z*TYDx1d+dzP!0o_wCh2>ccdtp7oxCi?y=gD#1sVy{X7b_e@RXAC2LAGcgWQJ5sp{H#;DQy9BmN~msxa&Ha|~E@j#-hjhQu* z(dc~!!W46xI@!_-b_b-zlK_3POv-DO(6G?pG>kMMK) zwebC*hx+s1FVEW9t-WZO;N{7$vW!%XA36TBUg|7atoN`v#_%0=cTG^BeHLeJHz=Oug z33p)X*P~i{X&a|oL$}s1>Ad>$GLFY@m}Jgi>T~;|BGZ%v$m-KTO4Hp7^_^0Di8q;~ zuqq91N=K=qKr_9ds z&`U2P$8^Y-@_RQPh{T0!KYvGmM$n)SPk3~3%%MD6f-oD3DP!NdX9#pu!iQkmxBz7C z!}|Bv$xKu^ahzO-v=k|)2G zwrd#-VG4j)JO^qephspJ*PTqZgEq^r-LIR~?A$duvnFH3pGNgClBdFB&SZX3k7X*h zd|-kyoK&esIbN$xX6Fd>@4L%fgleNE=}^wf6N@2l>zkv?GxDV3Ce)A@p;QqpX7ZK# zk{E4asIpIn!KdvTnfz+0mkl)+tp3f|I!OSu{~f zJySt$Egns3bQ|krSReG7=$+ziHvUvwtT}3qP7Ed`ZDRW9|UD1BO$UJ=B8%=AJ1^HV`6BBX@tO}8u-pa>-htice8Rs&h`74zVB5o zN&HeMn9XH>(P`SH_TCpmr_brUztOoCc}+j@FIg35^chXfe>@Yce8XR0%Vv6A>E@=I z6B`ZFDae0%eySb;`2d)dgSZTU!*|XDO0OQTm$bdQTJ-GogHA5w&Mf`;YDO9>FWq!N z5U5eFiE@W6Wqbz7TZdj-*Y`3(rm0NC8A!b}oiFU8XDAL|h?QRr1K&qi=1GgFnU(_X zs|X*evcIvf&;`XmKrTM6etqXPJ@26Y;lTqs=i#4zZlIoVu-(`@=vMU-y~XfvF!Imz zs^1Y9$aR6r{?6`QkvnTZ)dkYFFQ`NmUfk%C?p)jwqYY;F^TrRQ`u9-tP)SO*xLSX5 z9=aw;)>ehp+s=3n8r}Y_ms1Ju{^rl+sOp8lNc8izqUr9Otgy)0e zDS}YvmnW@cHRKsxp?WJ`ohO`cNQ|HQfu2|0yS|^4Ardz6>`Q1jXa7WC-J0hL@lM4~ zM6at$7Q0)wI=-`sTuSsCDWhs8!(F>aSxqah(LIwuYi+te;7Dt)Rgkh+3w9-VupaqT zxRs&lGIn+IenRvVI5H&{8%QZJ=Wju_7kIy#H?F8>rZX3<)e2xJaC<5DvWz#GW`?rg zhw6IWcIkyFShWsb;ky2?JvCh5k6(;1A~r6fs)%CXGLj3v@W?_~1%M#>usUm%ZtvVOk(jsg= z7Bd&4b7as`Kgb$VAAUlM0-1PUTc3=Lmh+QiC2KBs1>Ud@Nadb)Y3}syth4#1_9Zr} zn*#qi+W5|Xv%LZqRy3EFNos@=G zSv@)!!+%^c7;)sR_Mxk?6PFN{b;4#VOenTVSsjlnAW^%Izcst(r3_B)?D(66M7DyG z1#TqGd@XNuX%$fWH%d1;P>EuZtUlcsHT=Q%Ot#%=$Vr~hyMAByd$t}5 z{P9Mi2l@dG8$t@tjJJUzt%MC zg_BzdasJuSbE|J7FgPS6-q(Jnur#CA;XP_(bd-a4v!(MvuU@IfyJeMCE#zc01Ub1v z9Q6I4r&?pnll`*Y*B~y;pL>t{;IPMBEUtL7qugCR{CYBW$xt&)>GJX=Mw0oKCrwQ@ z-R!=L9nS*__`ICYtA2&$?;kd#ok1(UN&^z4X_asW^&DPPqsQcojFMo-P`?HptI)N5>*{wS4aHwvbR?>Oay~7qRYB_>wmz7`O0-KX-l{E)H@-LJMU@d*uo1x8RP~>X zFX>i4UO#+}O4u2LTBy%?yml^Am_Z36oOmNH_Y6qnWzDv=ywqf>VsLeSgs!n`s&{5e z&)i1;P=wy)7R42CP~pnI*z4+qi;6NNAgK&wbTUN;Qw^1%{Pk+yYCV{E;MC>8s&Ba= zwaMWn&FTG-rWpW{$$D%s@`zV*k@Xse4Qj*Z-Dobjyv%r|XJ2IGas76O z=Y=AdY;{eKrBF1lvFSBgzv$_R@L{^^7UqNb|aITor@Ob}v8Baw& zGQ(fjN3`}rqAgudA4*2kR$r4wmg{mPqY8P+_h`Gc`R{)NcEJ%SuzU7K|1Ekn^J&Ek zs1*Kx#*;&`3qaI)HT+5D2FPAHK5E770t5`#pHT zWEjCfm3aM@x+D`Xto;(3W1T(^Z#Bkj#Ay@kOye!bYZVUOYKMzQn~TA5b*;UB$F~3j z5|^F*f|P<|)@6A`6$bh^RYX*de_qWSjkPfC3YDkJ8sHYz*8yC&zvlCD-}Qa@>~EMx zYRoVUl=RmKBm`!ES2Ue#*+#&JdRUzrFlV z;j43|TSPFp};6q_K)sS4u*I-3}F+PUa;*4%4-~TU! z#Jbv_a@E0b-b-&XdIWe(){WU1GjYQMnzW#E3Ad%`sJe(JIEzcS2TqOMuk~F>^f6)M z)#ZU`h;k>KPF~JIIjvoYMvaKtM8{7YC=Oc3If~w*htKSdJr>B-zvkZ^cgON(b&5C3?yCyvHB^kcFCO zJ*_n=Fzx8ZeX;I6S>==`q?Qd$RkwQ5=|@VjHmHQY=^EUy#+u5RG>K4pl7G%!M4P$J zz?0~qFYM)oh_NKMgbyx+vK66XYPvrg^8A?r#4kX*c&#_UdlY@kAW^z2u;a{H zH_LpJroASPKa@ENh3h!gaXNw3qde^D7i;WS{i=*zg_w|S)7nsFfBvOaJx2!JmmDc! zPsmiS!lz68^<0D0q#Kx@*Z``IwfhYO#7TbL%)KtCXItA?gcKE9CO=lduAHUb zXrN)KK%!Zs-Z8BH#D&I8U~R6O*k~wK75UG~%uXB!g&+FmQ+_>7V6jm612gU%v&6OA zy45jXwEMi|!106e%>NL?1}h_hVbOF+)h21&Tzw>6MCV(FC1Onj;EsF9NrJtNAD(tq z+%JKgGhfdwDTIyP_^}NhhP#^phuYf8&E*`BjQF>`Jb$RIA~_HYcR-m)3+k=;^>m4X zt`q9@E? zL?sow)5-%E286`LuM148bDYJh<=6RJx@VTV(_XW*RdTme|N`CgTz?be*HUovU>Hxb35^W0i zQbIsVUjE4d@R*>8glL6Q1t!7Iplqe~=sN~LT+S#X8H^0h>@8GY%wu4@VH(GKByj?-6$mv{nQT=;)<$PvI zX)*aN?U;rc$3Nei_m2NX9ylC-ei!N6PCTX`zdQc78Pt1_ef-lwV@p1J2dHNr&<@hq zK@{|+`h0oksb#GSUm?<@FQpsQ9|5G6l7kvi7;G$0eRg+-r=%uEw)?# zM6o@msIx7&T<=GS+D8`_9_qzr@o+7A26Zr3q8_?tN3$Y;{HNi!iCLY#Z$drZ2v+=F z$CU#DTccOFGJH8mrU)wyEPvIw)BG3!s@}qpEgKdu@%oOeudYy`Dc9Iq`uNoTtQlwLyy8fvG%t6U{ z#y-rT($i(PpYnVO1wdbr1+0iO;tyL%Z*T}d=^Lh&opz+N-ay5m7w^}-w-z7 zRPf%ZQG}jRGUdTj=9k7PO9ofO-|_?RW8zWgu_7bn#EBC;Hdw;g@li!!Z)CQA55|PB zLey6|bH9$CwSbh#5e4$2hvXk8*2<>=LbEip9M73nzsk#7TbuL!9t3(?=CKUp=z?t> z&fMLO$jqP`-ni05v^7$meM#J%o^d$(`uun{qWAfICtDA)Ocy3n0P$+0oHjSqUz15y zE|W-aD*hVgM)+3pGg#W^-nqgoG?d^}a0VhFfeY6&z|X{#tLOP%l;{`>(1UuT;%%9` zGffU3F`xf;G4imQG_ekYVsLf(Oi@EJ##2}EN+Qv;r^^whi!^v13adh_ZT?VUyzAu6Gwree!oVF_#>UK+^Y1z;Y zt-w>GF=AbA{XKrxnT)P=9|+mKom2*fAq%HBJg~ar@tXBT-WOBNWOZf`P8CF ztZf_cC1@6~`Vh+<^QMT<)sHQkz(v*G+q+m3SumvaF1WmZ+Zu=O?AbtL@pa#bk-Eh=9}KVU|r?j zS`2TquUEP$Ar!Y1pgE0hJvav`#Z*z3LP=zvPbzmR`j>*WtT&QKZ+`sxC0lc1VJSex zt+U==@)e%e+C8`YMKFbsH{sS_ekq2uc60=Uay%=8PoEC(Axg?jrc!PMTOwHbQiibPne&(6CTe=u)}NIwly-NPkb}^X`62q^>_s}G)v4s? z^jX>4;=}b87$F|OR7s41&GZ&i63k?o5EV$vk$PTE>L7q$&E1>ABrEFKeCUCbNobH34C`t!1a7|s{f5>qM+-7vjr~xl>;^6 z)m7m*)A=N!+!&*}xyV;YTpf&*z{;+g!uxIWO;-oW@S#(?i5+ zwQC^J&W^*d7T*+uk3n{efzZ2t0;w(TNlHp&l(50>_WfGHETLygHz)CH?qYE@`w^QD zYDZj6l~+2s0yrr=!5mb!d2`;e@`dUy;O?V`(~%ZfD11>^E>7$Xy| zcBdyR52Zr2(W^}JWDFVBRwJKo?nsc;{85Y2=j9!SpFD$*s2IcON8zNUF0imLz(^Tr zi|lvK+plI=IC$5qS8rm48u36id-AQix7g7@?|ydv{F*Yxe>gMq<3%i=wWC0j#ATnr z7wObzm@FlpVQX=;I9hpweW%Vo9GYQ!DpX33U@|Mb40QGwRwu674n0h9lhynQM#aA5 zJ{zMIO;OklQkO~lVY=+@CsHZMmt-vRrNohAK4G41BFL4UGmJwF4a~83*U=q3#r1)1 zk}vp(2tMRZ`-JzG{|-p3oBWYUyVZ943Uyst7cs`~faLh9mf`+d_g|^zQQ;AfRp9uh zHd)`TIY$CCHop;IWaatVc!vs=g^8Vd551dUbB?a_7Vet*eer*1u_hH9?S zxGu@&`OOOV(69)XzP+tQ?q!X^wqBaI+WeA_0m=Fa@K#+v5CkLe6@Et@n239hw-01~ zfL5S?-&`X<{`qbN6cO3r^A;x`hU|SG2dIulp~)m)MF_Pd$0&Knlfcus&A>}{AAzBj zsX2K@Frv{lQIldeCu2$`S30Q>wBD8vGrK0g6zZt!aB5acB%QjEQn9Rol=cT8j@S5`l>`J7_kWU*LP|Tajjpwwt*9Id_O(m`c~EBT3$6G#Rw^25X6QcGf7Dg~6d2arZ+k%!KvV=F zta-waP`;Tk`G&e`U9*83QTmCj?4=F{=ve=%X7aNR&(aJApWa0hp1YCtGM|@es^Xxz zVbjfKc)&AGPE+~Jn)1pQStY1n7l%{DV>eWiorVsIWL}rEb;QgE75LZtB~K<1$N;F8 zZlb2TTWZ?{{1s&4A=t!jOrKU=$V3%><}>@=K?&i8ENh)k=Sd4u9S^e)tq@qD%JtM?;GOYWQZ_3z)Vzk@PFDh@I42iwc{Hb6S^B3*s zO<9}?1_`jTFSV2YJPaX^7}$83as;+c#Ooy~DO)j_2l!g}CI-f~o9tjXt~^%k4d46z z?&#Y);Nj_QqB1<@j>rKD(UJ7j>9>O@k9d+mr)UFff^MBnUvSM$9coPZ*Zqlr>hHEm zpp0aXy+Mwji}&s|qtsNU8-Y&(&U}>X=;4pCy_q^g54%S7K11~@Z=jMI))O*2Pu!5; z71-TyVqsEkV=o>Z&vAY3YQ{gR$9&J!@_lBZJ^Dz%(qrI+l~R0^Y{J-`;_21o$?DjX z5FU*Cn=*JXvs>izC0iC*Oxj)R0g=(O81_xBn(6!dw;LKyQAG5bwBC0(%FNM9-XnZC zB$hSyCn!oloRQJY-pBL_(1PMUe{^*U#l#!>Q~k4u749OjO)Klf|CYnuP`KDr!k^!p zo%3G6C4n*q=7rH!ns@KP{ze1c{b=4hvUT1|Ol;xXUQlO&W?uqHjSe28i`u$0Ccqh(y@$ed;)bG$Kha2D;^nL-ai>h6O0~?b+naU@;pdDc0^ZNeP~MjUPwY#tPjsr= zmJGn36}e;35oqgIP!6Yi7PQkA*@m`*nH~XuZcCW}9iv>XeByO)mqy_=v2xLk7eYopJoHWWHH0xn`2$2(HK377 z9obRw2Hmh;6+um23h@S$OuPm*JBp?+Eb_|XqTJ519*bgTAf+gOoGL`y>kdV^<>ifx zncEjf2)xo^xzyM0RR1kJD#NhKuHik+#-UV+B`wJ&I*$;H+GZ^Yq^9QHXG}$Vd@DJP zUKb75E?Cw&(Wio3*HMr_vL1AH0w}u$Z$!y7Vl^6;EI|S0j?!N4{!8BSqQmquY$ns6 zkxB1@G(jnW;IB=d1=5!pt#buNKmmN{g5`{#%J8c*Hjv)gx+hQ9g@-ATq0MO=QmnIZg;_PqW0prKDh8X<72 z2=g+pFB{5a?g48zuO*!D$LFb8yC(5gmYLu9RUAk`Z^-x$R|)7BmQH;pTCLY-ynHLi z;RYWrtEBeI=GN#q5sX($?$!~$vptb``QxyEK61zVXk6jw-#cQEgkO6hHh}`a=9?^S zWbOx3Jzvtq!uo%X&OM%~|BvG%B=>u+u@xcYo?8+lVeXW<6mq|t`#s5>klY$d=DxX( z+{-oy%e!rh*n><@p&vIXWwq>z-OEJhw zRLjz3eeT zv2FuxP1^m2vCDJvQSL5;LFJzeiA@~WbTpqC8};)Mnl0($`co`t=fo-+K3Nx1Ym7d4 z5NAEsv&9P9ld|>r`d5+lkw!qMneS9Wg#W{weE+w|+3$;6`NV-&M*j)6lXkYZ&$qYJU6Tx^ zzu-+n+82CUR2>;uQ>VW9C<5fFSFeo3-T(mxER>BPY4E?uVDuP(Jf zGV~}uvA}(*$w4iPfwuHEJbe|b*9(xZWy;*F1#JL#lnYkCf$I}JxH1YqDksP1ft;c@ zgz8UwE;I!PhifFtF!g5W&o+JRB2nBxeGfvLtoe;76Pms5MI6@6dn&&5k_0fbyAhp` zOso`c@Z=qbevymSwiN@6U86&8Nf`zk2C_+KtOQvCTtsmjNRNdvZmnndW--)b;)&rK zWW`TsB67&cf2@tF%%U!P4P8mor#vSw34s#}%C5Yhurc#+aRh8tdR9R#`ZBYBf~>jf zaaB{W=KRF6P{8F(R0ZiOzd>fkoeV@R^c-gaOLR z$eR+hAzeuQSx&c?0UE>vNE7d48~xE%Ho$1YpMV5Dk;UEWqNrKZ{T8A<2VVVv3vj= z8u>O8kLht7_CgJj-IZ9J65orZKrj^sSSY(-BsR2k!#3y%{dIz@_5Q-r)v?zvefph3 zr3)RSl;jP7(k{(BL+C;GqR(p;FV1e?jG>H~Qlf+)6lRr3z(MCx=GVb=Nsf8-aVs0o z4fD&4`$B+f!ARj&EC^-L0X6&mm%r+Yp%V>7ps{0)@|a%J?2L)Q{(;9k+6H_%y>yD- zHuJ>>;oik+ar+N{DXgU<`tn0%)#us*j&W9Xv(_=XlmTJ?@`DMyNVxE z>aHt{shQ|ac{Krd&H)_LKA9}`XTKxqN)dYr;3KHw> z+vWijyxPCw|6Bp_dPsx|7EH_Is3(P8&xEb|Z^jiU*GZUkwHY5r^bu}`iDhn6;182=)t1b? zaIC*VW1##(Q&bt4i+sIz@#Dp#&F4WqGJu4`m0>@U5IH{6)4WwsHZg7ml|Ug4fqaGI zu8WqYuv$cEd)WMBIl0_9@#ro)W>Xi*0{ppO&}H9;^Hd7{_(Lf)`KO z@%PSG?|HWaK`I?;cfoZB*J2!zWp2)xrBKZp6dX7y=s%Zde_?s?NsVBtFUDY?n|RG~ z&v`56T}~BEEqxTEUwQPR1}wj%oJRDr2Fgl+GL=toO$AEgMWd*{>@QoT=28;BFW$AO zy*kAk*lxT@Rg>1SE#1#CnyNefZ?Pee?{#9u-FKjD8`Ck}0QJIG2_NF|*+|D!fFbkg z_-CsnkRY0um%GPhmYg1d`*s#JC-3DN8{9Q3%YbqF>;I3nVBbh@(^sj<_NoH?x3-}6 zd_sr-M&Hl9NA0Uvx&6mv-9f9j+i>}RS8#rliaB#f8h2wUG=ej|xe~(!a&C)CJf*xe zz->f$4Y03nm83V77M1{5pdb&l0H!yPTh(1(5|o9|jDyo5I_++wvH_uxda=70C3goS zSm|oV0a*TXxTEBw#y|OQ;H^1q781!Gzpcekq!XZ{;B)-XYXiU4b^O(V7SL9RFKR=@ zr0!cnXD=@|r^$F9S%Q;qkb3c75XkF1e{qK_$i^Q+xt6P1g$Fj(OUnoYvb>5%=YeuL za#D%0wY1+VQ~OD(or|Nv5J1PKxUZj{*q0rq9XWpbh%{hdGwk=-p@w3G+iZwBvj!~v zNg|gRimWMWlYv90V^q>O=8Ry~9}P>7g(aMJFC<%CV_Ov$+<)EIg>vP*LC;0)6ctL#aZ~Xm8DA>q+T5-(~}))LC@+`$4S6)b7$>VL|Vz%d5+e zT|Ud;y0EevJ6-d=h^wh`zElOneY7b- z_m8Mn+Zi7hho#oMr1O^e*(My+d)=><<73Z@+7FoXHm1jP3iRLA>((A}8JH*Th~${z z8{JfV89{+w?SO@_oe|QnRI5=8?g8@Pfrt^2xb+2}=&O!+@BdsAsM+3?cRX||PU}cO z;q^OHiZJ}@UR15_m#FRwAkVV6xCmft0B&q}4xzP3;54Y2*pu zFDjOrd%p_?xXo#?e=kSxm~n(eMV%iWuD(sU1M+RPVH(h~7b(q4KWRT%Jl{4t-!lS2 zHdFrFMJ`{=bic(tXUV&N$1#9c8b>R{YvutMfSen_j_dy|F4WcUE_c1uf0u-n5xMIc z5c&NQu5YTlAJ5qh- zp$FkM@@`ab>U1H;P<=o;s02!aLUgu5`85lVpl0f(f2Thh|E_|&<_cIMXdVjH1^Pjm zb)=ag4CJxZFqepf0|h&YL~6qn6OMdC>SyD}aY`QFU8m%e<$vSZA$4`@S*3t^^FPR> z;#<30CC>B}dD{C=h3M@))BKi*irKTA82V-?)Yp3bvaYhNRL8t$8^*;MdsRo`m2oj+#7{5|Nu`?zARi8NSy6FxG4&fvL`r+d@ zvtXl~!ub-NeW475j0o2n3+}HCx`VCL z4z#SL+5D@2z}d))(K~-H4j=rin)nDxwDB;MS<6E-&&G?mKB#HO#boL-a-Z&olnS>7 zB#oN)yg~V+=@cG0pFW#!offAQU*@!mb8b^DHIWEgdXKwz2^8z2*}ZVXw&NBjNDpnA z3~{e3jdvJq_#;Cvly&xN8ux}S7WMprW6dtGa>z*4k@^XdO0YJ`S~JdB03C?yJYgm~ z)rGu)OI%>9`qnH4g(AW;jOIUR8~QtT+8G7R=nK8b*!c0_w@vJ`)I`5AC8Mw)%zc63 zJ`_as9bGkov$qP*Qs`5P$y?GpTG4|^7VJ9-O|AN@9qsHpT1RPAw~DwxHiA?!xet~T zzkBjv9ZMd42?(8hdGGOGH*d141p6M*;>q*{R3$hIdltlpM+>Q3Wm#!9Npz3IeGlCF zFC*kXkd$Hh#b4&=djk(u!yx{00$x!Rhm5{&)7N*tp5OInz4V9SeDG#e?fUnxl_vhi81BFtoVASF6hwZ@l@wgwDxm!Y>!Pa+0OvNSGBE| z9Po+Px1(OszS>P{9U~(re{vO@TE4$f8^!)r``UK(OU*x@XXwBsAH}!G*P4l|O8!n1 z0NE0@+A+1m>LE!B9A!v^@jVV3R%sAPEo`4z6IKF-EL#77TNrUf=&DQJvXY?$&8gTV z40U)8@v#@suqq%IRD%Iw?RGFk!h)O2)e!wKPZ>vBSJ!-OKLbD&LsSH?*X02^ndhp- z)P{}mlhN#T4i~K)2IV;w7i@6LFLNeCUD_T_?_X_Yk~#F8q);rv5`J zLKVfdLbIm+hD3L{jhmQ-D6uv8K8;Os7nSTdKL==Lu3>IuD3WHgWS}~M zlpY7iNpUM?=w3AD`}8tpL&0D4C7g=(U~&<9Btv~`E7;ydC!=WNW7SDzpAanA}D zJnx9#x|}_=3A_<2Aq?Pbn?wVrsL-h0rD~m^bibnVY&ODgYB$%ut~5?L}nc z5bafpVDJcvsm3zI*`+2h@tMSmzv^=zUi(2|Bl0xX*0rF_OFlE_3rPI6Nk$#9&Bour zgyJ#j%>6uv0FVLl{PEs*%BPObx1(AXfP}O&=6e9Fiu*&b;e;*(sv%oOR82F3iCr&X zFJ$_w-Z(0$T_XY-@JP{P=MI?jj)c ztArriWNQt`%B4`*LxDvP?~b)M6Wa~<>2Y}&5BWdTsxh>=z8_xAOq{cl%UjdhLd?hrJR{&Xp*MeOFkO&(_kgqCx zHmYSW={Y5j%GTOxptIh4rBJrj_9?Z%QjS4J2VP@m{QLpLf9wMV$rC-&GuGy&m(aTc zFywIDnj0tQ09+Q8U0!}O?P*zm>T>Oa0wody&l)VP9?qaw*+a>dh`eSZZq8actE^ce zT&6_GhL&DQV`Q=jfZ?o&$nJKzS-3{P((eR^v|D08(kmPx8CrznccJo~HIM;p`tS?!mgdU*9k(J90>&GkH4Ga{I?OO;iRL@}fyLFn)I8;`MQ$84>N|VejJ|omQNU z-H)4VIDnr~L0|n*2zpFkRikKj%-*m;$PIm|?W+Bq#c#sIJ>YrU)#`o8_woH!S5vVT zu=HEY$$MMcgPL;U26PQ}Y5VWh#j09lX<9%|i(Y1NSDi+gdjlJ6{H)qc;`FC{+w~Z3 zonbljJ8Vt+jQYhT#Dz6qKGc4ipR{X9yAO`xnrT9)Q!O{a98-aSW3_9qK=0a8&(00g zf{lRV-tgdntzi6s4}q3nJP6d>=E++hM%w&p2Fp(jKLkh*Q z=ID+_SrYE@$PHLQJMO@yxE;MQ0Yj1=5!bExG^d&v>8r__gKGimz^;)S=N1(fb`A_- z0V(t`?Beh+M6KxKEYb{}8nYC-T7ys|l!EO(MwH(8B)0BWFknB8UFN>dszM5IdJs2b z^)I5!_4ess|3tqfPes^=V%`l_AaIF716Eh&$HoFvNk>dGifc>6{s-jqS?*QtpT;lAN4Pch^a5}3{&{{5?F_*%iad)X&g)kl(t$hXsV3k3`}#wBReY3EcJ z=RyH3yf8ujE6SN=!g##^AvTlnqi}Uddnk6C_XRB~8&GK|k7edYKnb0XoT)&0+KuxL zz!V%JJ5p1H)Mk9fDAAWHiSm_vHqyZ2oOA2Ckoo?^V@3=Ijd31T=ChZ#t_*Td_k}x- zK-8|XvSLh8yri}+pLRunj|a{)^*|fYV*h%?r*y7PX;*myo$!N#%oP6Wj&*ID7oONwK6jBbUD00$dtunN1lKCXdoWOBG z?F+$?Ps9OT3sG7g{O929S=1{4xFkcL>9juTOr8P4ACBj`yO6hMTiegN+zQIe30pzr zgy=)^qW8hU#Ttp#ulv8HxFEm!-KSe&wkr712gah4b{0^CZ>@1bqG3gn0p5;mVowtKK(U-^7!rpDe6nqh z+_G(SAW(AUG+g0)wQNKLVg(J()1Tn$QwDxETsnpbz_aFB?*ocK5;#(L4cmSwz8wW{ zP1rk7iB{03-rmlxU_uSsy`^xEz#VF+ueg1n-CGyO8efx(^sH5-uf+SeY2TfIP}x~O zZHCp1=7^y!K+?cM**=ZE9(i}C!{7!MpaLTk?2))iw*uI5ZP1zor3L7U*3JT;F%#+k z(kdRhf(`^r_|a%B?wm(2W10OVDp-l`Kw2dOqj zM$sO;v=uS$mIUS988+IvHeHw~Vx%_9m&;(5U(ssW`*7fk|hWITMT>1QToOaiK)(tGuhH4g+03LU4eD#Sty{ z>=xT=ippbuyM5u>yqiNmZLiez1h-se6n_~Ox#|9UU(B|gnt86m0R5QuV};10!s^-b zM-%J*upJ9GafzDDyiY_dm_k&dmG-)%+^u2Fyvkp`=v);g0P(PJJpRs)FdWED!#cm$ ziNFw52&bD7D#eDgz&lfjwcA=`F@9vGkB(eU7zeb%EnZ~O#o5K*h2``0mGk?O4BOj+ z?qXmnLd}79!CA}lz3z@LY%Wq@Dv}zjrFS6~f4iKc+^uLmlpC-((1HL+SUpfv z1LgqJR|&nlAvYS{^8`i?@eU~u01%USzmGNvraXaOUq3-Y5K;uBnfS}J>!cS7(9*JM z)HLt|)!>f@geaf0Hm|~h*8K7kMdO?oDd3L%HN1}8ol7XkN zI8lO9oG;vZR}>2*7gT)5f@@HeUrO>=22rlKte=xYN=(2;aMic7EIU&)IF4 z;q%B_;P_B~Bb9DQ0cJdzuI)Ynu1(ZkbC*zn0rxaFrhK?EC#jFS zvWEqvPPcTg*#TRMvz9b~^j@ z_R-6_M0^};QVlA!YBi+O24f>9M5m8_+4_xoy|nC#slFa!Y%^@kG-bL+;mM*~(duRh zfR%@tp(Q%t+mtcWiXY3aU}piGj#bQf-j7Ql_vG8vR&iutd`xt=cS!+HHG7R7t%XR> z*4&j5XMKux;$*^HpwnW<3UD>o=pi`aOaVFjp8%FQ!){IGj*iov-UE{MZ(hyT$Md!7J>p#2k5Rnwpm!ZA?3kLbVKsEp z8(-W~%^np!TEX26Qg@?)ODTT*?J`DHGX1 zZYEh%A#uCXX$xD;ZoD^YdZy$l{t})yx^H&j|pZlsF;fvt`0vj7~>osOUdBL@E z@_7OxWI!4E^|Ra_r5vaMdlO$7{X$0`%pCkEr(-+_m}COx{n5XH1R4r0THEFQ$d-A} zn)=%bxy1b;EM`J-oGQ9y8R%+(@QCt!A|Zcp%s0(Xuw7y!@QZ1Zom`8sSV zseyjPAsv4lrKOuobmA?CSCI|F-?3;K0%zciX_F9#Hg%!16VM83O@TC!gz-3$!)yn= zI9(tQVDgvT-OKG=TAzTnvCNdtDg`>G%C2eB334!WOK;`!TVg#T?(>?_#Nk?5cnLKM zV=9eh1EYl1&7(~}-Up0l1vr_}ObIMZ3xq3R;eSNnpD`V9n+_Zy5FWS`=6r45O>1eL z_T#4RvvMhP%*lQYx^7sJ2wwkR;?H2fM28|tPg0kKx7Z(p9LygryXtJO zh+EGZ@N+G2ZeOI;tVCt|4?fg-LYtZDY|A67^0!_^hKJGXs-f7Gk{yQu)Jkzl&S^}d z>)@Ayu?cxb+ES$12sZgA1_3x7@v1^44HM@NXKX1@^`N|WUSU7a}W`-rVX){p*T!fQ}n*v>)l2>>S^ z5njvAs}3}u7os5mifBNlxiAVuI1z00`ju13nm?*S8s3fB`=t|xl?e_E&tqh$K0T{(ZlUDtxJj^A8 zi9>(JL z)5ykpn-MOORvtgq8td2++zJBkfy>SHa{v{U-GqsJ+_O|Y?#I)5?LdgEXQ3kohzR+7 zTs8~Or~a(8uphb5G!|}ix=qF6pI+|QgoA+C6ucg|nhqXU_Ge?}hJkb-HTt@1uW!Mc ze3@fFC_36*+4q0jN@^NEBGOYO#mEho7Wc>DlajPhS(-QUP84snjtp$P9@+J>x>#9_Aly)!emZUOmyIXGgDTrXa6-hC5{cCfz^{P%vHmsLiI=coiPPsM!WQJii zecp=TILyqGBthntQ)*W<=9&RmsPG(yD7+tZl zf)qu@S>@;E*eCX^F)Xo{ji5`a5XZu`o)LvO;LlIZ9{4f}jnUDz=YPadeg#qlgkEL# z@6KgU#jpqak;{R$$cz8t1H*63XDZSbDD^CAfpkrhpWV5qa8*_H;=*TS`;Qtq%{{2` zVa^vQCF{y>(Z){c15i~k8JyBT5|^maSMOMBZ`Hq*%7p~U;0L}P3@-Kmi4JWYr@jvh&H0Tvr3KRH|!AlBa;u?t~r(vz%ihl6}qMak|5z>U7 z0{34^RX<_il*ubZB4vPHg zQy4z6kn)!e%ebl_KE zLu#gh&vXbo6U9AZ;zD>;(Io!X^Re(7_sV%^-|aT;c5iOOu>=to2XktN-)-sNO5fC7 z3g1B95z(fKZAsPpwe?^SJ?qDhrlCMTn{Xb&+;DJkxFHC=abK2y<`a0nv)TKWyQcF7 zb77TyC74EY1j`L6Kv)QAc_is_*v_BMjV!(NdKylm*f=7RmC_>bF(iPgmKi%=O{g+~ z-)T{yMg62U5Ef+oTF?xhpa}DoQbh?Rg&CXBmo5n*m|RJ2xO?HNHGD(WkpiL78OZyL9Aa z#UdU51Bv*0YOj(OHv@{wUuM)yF16>G9uM4$ zYF4$HV}#)0VqA7NO+E7ASWTDkFaNSbND1#>$-r@wYOjXdt}zdM3na_ex-qKvvHah& zLf#7ULMsy-Y0IYo!PIvR|0x7v*8MeC`!@ zD8P<-{Hn1+TBr`e;H+X^l9rB_`y*Cv<8C|U?%_P9ZQe;g*6L(cR(ZC|9Q`fsWh=;4 zY#yTn)C@v64ACKRO!f7>8e$e=^Q4Q|=46F*97s<@lC2uVA0II_>Wfjk;kdmpj&3C? zMsg6Qvrn0bAy_HtWCiF`J+J`?$X*t?z*33<1k0Yo5>*BmXwizQ{EmqRsdOn)V8BJ2 zd;1CKX=8L=F549qcdNfz9o5}M`ilV)%)F;&MqswEAg&Q31K?n> zF*B@~4OlzrYWexb$!gXKOW_gHP!vjZIir?;oX<`0Y1uE${!hgY&qcSERG(+cW3a`g5!v=NeS8Iv!kqedse{ z%*~4Mp452L;}IDW5f%}#(FP170oGqxc30F{SZJrvO7GE1 zfcl3)Q*KG`57WtsfATwt9LK%B-0^<=od-+Q7J=%g8#* zj-nCWN?Xlvo57U1;tEKvTl?_sYrH#n%bhdUC5YUxSP$*fFyzEyzAFI9vh&IirlkSQ z6ihb_gon$CJ=bR2i!{=$EQ7e>xS$#4$ud2=t!R`m* zY9`upAAb(l@fSm7A*wBqjTn5Ez99|0s>9wdmU?2FR6PE7Nc;Ke@NII42QVZt=FCeH zqWbEZ%V7LwZf*`0B+?c9j{@A2; z9{8yWk~qOZ8pMUL#=>qN7c!Uiq2OHYnI{fS>%5@5bpGYvhAQq~(5%n6fK2`JU*fmR zB22b>U=^49z3+C`XWDC}Xf;OuTxyN^W1Bc$QAx@9sbh5egnJ2>oBYMr{0d*j&3D%7 zBwLw_^B1Hs%AS^2@=tjW0F&ya7ll6?tPFiG%%I^wh&Z(|XR^B#P{fh{v3_1x`*ikV z`x*F%#o_7;l0)K>^^9?qLM>6HHpS7!X;g;F>>Xn-7$-$lY^qXd$V_NVWEkjO32^?I zn|rmryt|_#BJzA_SjO{TF*m0#LZe?MJ^?oc^5S8VCG80wr*O~9eMJ1|45nv zKy{uS?bc@}H(jC7+&LQ;jDNwr9oAaV)1w@$Qp_Ul!VD2^ta20T62B?ep0ZYet+rGJKakF zHhjRZNjHRa*UYJGbypNZmd(vd3fVaMan@GTszB-g{Azgr7r6{C1l*^&yOSQkZ=UgcbUyBySdN7;=A zmiGsrv+hHM(N{RX)gL^+GGFiZfb{TA&bMdryUfMc@5x02Tz8ze0 zU|~P0yIx|+mmyU0W$bz#J-Es8K5VUYxD7L0BQC-_DWVa__lp)*pv)?nAkduB+2r-v zOQfVZi$YV2ca-u`Qqy|@S5Z?B8e^S6B(R_GjZzEJszDpB(#C<~zwPU0c+jwl=x7om z(KE@5qjr>UO(Rl*>$2^QT`kP1{z)%09>>81Y2#<}C)hH#KX_;{GJ18$DIJX5{4hfz zkeg$?F0g|d2h^Sy*YPcv7p*?Fb6+g(^+v{nRb9kYm)=Zu^A%l$}1vH68UQVip-brPvpb5qD8L{J;AC|UB%wr zE$puUTom61Tp;OA_T2GQd)L0T*Lsp>mq*!Fkjy}J_L1eA3^=ADxXX95ZTr8Z=@l8= zmtP>0(uZ0f?DfRHKIw+rbTYr64dS^?R-2jClS<@vI6YiU=pvcv5gSUgO<|J-6qM?5 z3tK8M1X}WNoZ4sj^H$gykmn~4qupugzBnV20U9&i&Q8d-!ySI6b(&GJ=)osNMc1N_ zHvtd(A)w%jHW0h`O9YaQTB6_PVp9aUmGsbnP!IIDO1~;7UP?|p-@ZCs0Mc>_2{x2Xj+Hg%4?YpIcRpvG}D9fYsoA3su`gb*%%UQ!<1dtOh@!ZEeR)A&u& zwgUrL$N5_PeT&hX524B~8Ug9;U6*4Pydt_HZFBpD%%EW*A)^0%N42X;`?fxTGszjI zlo~BA*%j_Xn?dcN1paVNv~^mo#Bc*>3uA&@^d2^GnxZ1`||U2+qiUv6tl@||@z z+4eFGUxsKR-+&2mtvSP>wk$p zUvuZ?vbjJS_;+(#lwRkOnF8NH&ZZRP-o+;&|8l@W_@O?&WOH=tJ4Rl4_B|`#%R*JI zQkzF>z)NGxAFYRD?_%kd?LSK{#Zo@k1WyEj4?J~bzY z-b!1nl=_yb)4sT2nbeuazsO=j-9En6Im>tC)XV92=bBFt<#(QjV8z`|noz+G2Ygx; z>LCU{4O#a(!$Y%tJpQEb7D$T3ZH(S{FrF+bDi?illrS*xz}X#UE)KHs0ZNJCwvmM1 zoxMYOLo>Zwas*^H9{s?-ycSMdpCf^fe zN+1yCOoumpfm&2EBu!?FTwtadBNYVk%6Fy=iwJpITE>QS>~n$*tCeRn_4+DZORx`7 zDi*vZXh5g-bqak$wE}8l3i8DiNZrbX zK!zKBA(O`2(D|WOx!IOlKfGq->H{kk;zQJ~nGIo^@yS)x75d8ubqiD!O*sySsbnS= z2^)$lg~8Zq;u>7f8~ZPk;WOWc5S>_gD6g3wS|P|YD?i(GX!F`H_a{A*k?z&B8Tyu> zAnzgqv47}BA?0J=k{KJBM&>Ss`xTI$KHFu$m{irX6=pHc?U?vHF|RPuK)OnteJjJY zk-~Wil%|p` z@BaHellX=*)8to6^^Fmg+>xn%R?e9Prr7tiuB>#i70^#3db$QeqC%53{TW+#zn3oJ zSXuP}(v2H;Isj`^97J{5NnMl^pzS=aO{cb7civTxNK$jv(M!`xTTC zYM+?F-+nodHNmgFB&%JDxe}6OY!IX4T3701Qsp|-n(bik4=I>Up-(luTaT!*c&-Q@ ziBhkGz_V;5M-zON1oS&aHG=;XtE+(lnB0HoctHcN!Vhi`3viOoQwz3nO9sU$zGSK+ z$7i&jE%`ku_FVdAYg!ZjeMu5%$3qu`6$YZJy0ne{KsvX4mbDXt2NS48 z7AL-VDTEpEzc+D81dv!vw;QPFzuyQibNKdBM3KsqT07`h$18#Ny787ojOwysB({lN zzXop;(0y`ydo_56I%}DWs{7C@X6aV>3rSX!Tn!12 z>;!&>n3QOhqMy>5>xWix=*e1;??Dy{M9PaG)K46D*+1WjL;tzlAcrD&rX%B08 zwSyv92KTl2o~oeU82FHUBmtRQu;4t7;vR6bW0RAuuc&bw6{{UihL%rN5|04`=bQ`i zSw7)tb-<|jUbIdhoT!v8wr0msO9!>TQgkpk$LGFGBQ4lGH1i)99I)423mo;*4D%dbvVxX2xu3ZJ#zU>;gUD#U2d@X+*i&+ZoaZMi zG86x-CxhH<+qoT6)el)-B<9T@$*ASPwYA^wMY_=Of^VJJNZEz&K=;sNxHG;~-A@4X<-mlDb5bN(zPj!R1 zJmT;%N)&o$ zm58`z?mjrzG(wy#RT(ICwNe*{Er6+D(-$+y=UKdkxw5ync^&eihq%@ce|DnPUFI2sT08joM?;ly*IM#yyVS7!@kEi98?RA>J|6vz5b7as_n8hI1>SmGt@|?o9Or)&Lv*~ zaeioGqV3p0(Ja9b5}qA?eXf1wx$5)Npo^)y$>*@b+V0hle_Gtw!@s$J=f#o1KN+C) z6**O?`*QUA-d@YKwC%^e?o=!k8Tpa4-V2?y6z*QDO14tD+`U_hGjRP%H(SKRmW4#< zK(DV_p}yGd+qES?1K=OpDM186iatdH@m_Yf=bUVrfYhRfnv~E=GkD@L;AUGHD2|d) zxL6gux9shBkAc?ptnAe*z#SKTvi68-FCBqF>v{FJ>Kat!%_0$Z`O}NlBgd4D03m%@ zmZ&oL=QuN2ZO#<9G+9~M1OSxQ3b#YTJ0n6uarC(|!RBq>2CMX14V|ERlqc4z`Hy^h zJ&_4$D?^(4r%&o6$ZEHXIuD|d-q5{O=6iioQD=YG))uM{KJQUab^OZmwDKE^Jl@Nk z2L_f-wxiaJF(~slD6Dhv?dtI6tGEVCjPr#-i7bOw^?q3ElK1LXkX*IDRKJ`uLGYU4 zm@+58C7G5y)~(Aibsuci_29TO|2{b{w`^wipp$$K@bU@Kr>9$MsM(+22U!+6VUvi6oURBTPmTDII0T6iw{ALs+ptTEUo*qC$4yI<^SgYU~!7CG>C~^_`Tm8 zwW5wz6exiSYAkZSYEix%^A2}?`!$G#Wzd4(i5$~gj0?hPQ;U9 z5XHQ*pyo?Bx$3~j9l_I!bKbNRZ;lSNh`7qo9sPUsM%}CEw=sP05>AxnuVs2x&ycp3 z_$J-{q0wl>i+jLEo3FruU4k(=2K~g+bkN(Qwd3~qfxUZ+2+4L%Td-rsBKCL3$Z%W5 zZ`s=6hR1ngs=+D(g$L26y@KaE3HJ)6j0fvJO0uXIGnvCyQQT^I1v?OpNZW&+|% zq#4jyKmKy!M=9bsbM)y{aeUVaS9jV}4FuIJ!WIFPiBNc@WM7_{Izw;q#p%~7-`0-Q z)jWjX*j;tCYeqMnFB`+XWCC&`ixU0EhShAJbczG3!)wf1f5Mz~fO>qNOxal{TN4-o}~C3XCFAr~~gC!u+6pwvbY>?Y%Z zwg@pUN$@1_-zWFZHjDpLU%z!?P4oe#&qP3PsNl_uEFuTZ&lA zgmd}5%+AJ15eGdFw*SyOd{Z=usC5P5%F!dB0M4@=%TFrFe1_fYt&?P^Adralqc^~ix2pyA$Q%e)h@0;s=?$W>EUF++ zm_+KgR$eWZ%w0wCjVQUDSIn=R5W_vwB67}eOx>%4XbTDx^YinPUQ!MI9pKS0(-XdG ztnt(4@9?m`o^pAOIR1r;PGhw4)pl=sZO_rr*G<%fkJzAAJB$6*0cJ%gtk_iliVW{V zox~9)e<|h2qrk3?E_}tmG1ja>6sj<;RXQC9*y+%GEahY6%E5V1VkrN>!hYFg#+T6U zLQ8Z5f~Pj+o%q2B%Qji0_QK;BY8&{sVL z->M77b{6&qe@W+fX1)H8-DX(>w>2lQJLv`bysp?xNMu<%zjMGyvx$Lq^kyAkx4hW# zbsb)RH%MJV1&wKa$}HWKLym+_c;`u=)l$wVz8M!Cpx7fvk#s^TqMKCeB!+| z9kx&GxtYrFrc}e4)Fx11K4SDrFnvI43~1S!v-1yu`Qf!!K(w%)?|Fne;0X<~y%P{Q zBvw~l`E56#J?&!P;=CKU73zs6y>-Co-Vf>iy1aZ~e4m^(mz|rP@5UXr2*G}Wrvi-| z=xG5Cc6`GWjeSocB7tKUZPn@)_>)jNm%GcS{TK^OpC*+6r z*P9o-!RqYPHC2HhgLNO%)2eFV481UTlEAwg=bf8%(t+)60S$1|q^E_5LNx8nZzP93GPvw>HG1Ec zr~NK57}r8+ZK_3cQx?*hE$KBo*r4fVk*E8~Z)<1)AA=G8>u~v4{SNb_g#m{>&&#e_ z?B4Njfv|T|jsD##WG;{V0hh}*D68_9y*&=M!Ou{%NR*}KvlNSZ!0_JVpr|~6my_~v zFzIgd$jx*)F9Wf!?BjQWN0xBL1(+GG@i-u9IsXtDM zLs&L$CTpFLS0WGPjjgqM1ea2h!loH;$@nW*d4CrM)0$f*$`HLlR*!5f9^^{qdTLuH zx3tFvUIL&PD%Yyan4>*k#ER`dG<_jZBuU#aP=>TBnOL_M7uGTLP`Rd4EGY@x07Cy4 zMa6zRC}>s;nSc7u^X5W>G+ORqhB%Y3u(yU?5IgIJs`LFP@a}+n&w5KMbD8^;Xn8mN z<0lQ9W3Jn*F177M6bc_aISRVW%@=v-ZA}yta=tL4Ub{b7a@HkF$$#!}*4r?kbrShx z&P=yACE}H(1wh&unF#fYrdbmQnOV;Mc2lS?RZF_Kh+b+cH6#hGp4N@At_5Yi%pW0k zam{NhW9WU)u9qOH%^!l@FGA5)2-;%4e})8#QOwrVzr)iuP1j9FpI_&ZbdqA_>1p~l znN78cPCRX_G=u=aAwB_QI4qN&p300~;3!}%2WF7cJ9NzAT-wI3qNA*;&%dP$?6mShHzbzuR9kGR}{)7(#dd9Inj*Ib|b227mMZOlz%y2db1^ z29oWDD@rHSz|WyxG0?QK+rX8`@+hX?fF6O`=`-Oefr?MaMqmAzPADZfisfKd1Y$oV zejqf|8ldPcpnO*o_4OanwMx&itu^0z>c}+QUV9@tw2_U$#JzGXo+-zcrGC|`j#*ab zs@^+>mUz7dVF>${HqS>R)(dGBD4LgUZbLKb&kHrewTeCX;S$|#c~f%gR!k&M4z7{~ z?-E_YTXbT^6Mbn(h>^vWOLyx0S?38bA`Gu~XHoPo2qm&>IzMAMP(29O zw1|LGWnqsFoQ@(5OuQm;Jbnn|N$2{Bdv5 zu@rOk?CfuqV+iko17^XSw2`a(em21^Dh8DBJgRl$^4%u?vo{IROiXXTW4<5`zpnF^`;K1lvDV(YK`<#pndzFg~oTl*WcC3OcT5 zx2*gND98$txoRM-)PIOKiK?45)n^!Uv5q`FBEu-az85I(2GX!upGMD)ea>2IE7SFR zDY@(tP;_gY*>BaZn>){x3y(X6j)9Al!$F_&^B7oAFah`JeuR%R{7*rCo1OHsVkXQf z$?MC*T{OlImg(Yw2FlTt9Ry5M=JAdDwLnjbLgSDAEt_c6!tb>Cq7b~HTPKM2ksZiH z_#btKtsYnVXC!tE%zrwdxWUT<>}}2AN;f4{Q(*;V>298r!anO*STC46h=2zeNtE`c z_~X#rTiP1C;xykX>W2FjtS?WsiqZ*-53qu!fz_tt!4?4qA8M)^a!{lbvZ)3pm%=I2 z^_Wca$cTuG>bnt9s)B8`jdwIvo+2wAXggD8T#9Gt3B=%bDX+wJw= za)AYxZKVR1O4Dl^0&23%iZTO6><4#wQC6d*Aciq;L;iCJDsy6Bb9vxR%;E8IXq(-| z3%1kfmGzDbGF6^ZTr@d~vGzPqS~)I8OO(r_B*bQi`&jrwbSHAJl!+BXTIAbUM9i4R z?JUq3*6-WDB!aujnjVnAsdb6<7C_Xn+c@`CWA_%kpkbo>1dB)2Z+;(ZSt<=7cSfXd zJ$D7>>7onaa(6s276z4a`$F}V9_hTD^EJR+b**WT5BnwkyG$yZ*`PA&gz|J+?B^3; zTP$Q;ezgq*Dr=>sqpv~VZWF(xZNFt9nsq4tW2U<)xwk(2W#$E$YDb*)y?m!B2786q z3lbfLKilS(^6xnH?EF#Rz+Yz03I?gz_2HCAOHEHkcgBPP-3%D(8vaDr0t~<^^6hQYp+eo!|RaT&R zQ;hx!)E`NY=+p{K-jE0Izn&|y?5LjY6SH8%pL@kc5sQV!Zunb|a~6Jvw!9ha+!7MR zarbWKUFgoyZ6NdyPh-W%6P{8n%4EkRWK%61wRb%MX|)Q0$n>%oPo+MwyS2J-4|?^s z7E@8KLA-8#jQzlirhbwQ(?~918rv6zE_~^yCzfHj?g{Uv4>X469n@1MauHxg;jLG4 zfX-3+LRjG1OSS=gd_3m3T0BF~GJcBs*1W~8HZeebN-Ji#U^bqa;HmNY$xu7q!01W^ z!3w3(9eF>a)f=8rh0cW0d})7*CU^k!#QMazWp?ej-dmq;QF5C{-Ya$b^wu@BH8#G+=w zZ1%gghU~8>V6xxwz`!%^8Svv=(9YjH1$q@kllQCra+gvI~N#A2b96p?iTX zD|u9hZ%WhneL0Ni1mT!&(Y~cMu z)pG=1kZ=rp`+dkVVE2A-?hCKV7spR>vuw&cA8Z1ciyP;>fhel%^2VlcHablx77u}j z(PIdn9?>?(KWt@8;r`53W1fwJ!c`#0Tul*~NHC29hYT&G5)=k0rtBRro;9DH0QsH zi&rH!Z_MvMhUY-gS%7kIHL{R>{|J-+yW_N*p`W-In{R3M`BCtAZ6)W! z#)NXKYK$4MBWi+`S~hwwY^!y!-G+k1j!&Ep*%rT$VrZ%64s2mIRE+#Hs$lcTB0CHU z_h$fs8Z;#=r#1?8zb!b4Y{8|3!R+0q&)3u>qJ&&Q(azH2nqj4OFplftz}Ww}az+}U z6UqGhyW^@BJ{6EczF~M(@q62s_qY=siv$#u4HhMkFN!H3dz?8qA|zx`iKrl(a+l}d zQTV=kiG0-2->J5-gA?G&Ld=ea*iQF_u~no*xEV?o)2P_k12pQJ5>kX@gC?#Sq3}P3 zMz9<=N-ZbXmYU2BGZfm=nyfa!%}K*g4HLAM=#x=db+Mkv4gWJNO?S4`1879ZWGIQ~ z!RNT%CY{w#HkGzG(E#~zpb`GNHQV2M8bDC>IxtlQLhkl>LPhC(zR5!L;7UAB?bJ@U zf%h`!KBPk@18kusd9vUz6I&j0{e6~$6HqjIYGlN?cFR@~a?ea?H2x zu-O|1$Ld!Sc%9a%BvlYT+y)qU`=ds_zC2g)>oe$sJU17}6~iOClI|}}V6`!jZ_74% z$_FbuH64cy$@Ktx4nd)8U~-UdhDy%%6eII~+1e8Cd3n=G%d;AtuBHa0nfKfV8q0IT zwSAPx3zYKzGN!1E&DeHX^&|Kg=V=|um=e_z+jr84m$lss^*7wY7AD&^)JX|x`*MHzpot{!oTo8^m3!z* zXsD_N+BV-bhnJ%DV|G+Wb|w!1va9xD0O${Z6achaTTk9e2@+E>!t#YhV!s1~@P0qo zv=}+O^ofR2B=!Hc14ScE7{aKG4bA zS>?YEAeqI1uCZx#LhH}K@}QE!Hw&($=|Acfz(ZZ)b%7sTV0;Z9Z0-E>*Ymu78(g)2 z_OnY<tFwyKk-5(8mY&$ecE}yiJMDWsoX56OW@A7oDq3P ziLUErWxC_xwOJw*LoB)Y(3QCxfNZ(gPCNIs>GK_n^s}BHhTepr(CKE2L6NsEHNc29UH^L%(yHdHU|v^cmH1%SxT-?u1xLu10cw5zgmtk5VM{JW z{5#37>mC^gtPut3k&h=DN-VdGOUjr>9^ajbnQtiN@g~xGwZb{I8lvmBk4n*>O1}P! zI^=Dw4GRxHjyg3t!$%!`I+O25BIj=E9&Xf4hsB6Q>sOb0|6~ok64qd9BE!j=p6nOW z#lNw_vniETw{7PrUUAo%wR^1b0M021LfBmUx@FV9zu<}OTCmEC`0*=Zl7=KS9>L&! z|LdKs_KL|oLRuq++0|sGY+k{~>$MitLQ`ul@|{A)z=<0sVw$h0-Qd?Rh_3j{3CH|LWnZ57wGYWXIuC&_3zzFqAAnkwJ*xDSl9roJor2UWRdLmu)*w>2?YF8X4n zq7&J(VLVQMaP>PfF=BM4rm3}wZ%qChKMuGU84d@c7y@t^PmDC9)C+S$3V5)_P^hbJ z42(}k=uMn-;Le>K2ZE9hAJUtg=_=cC@f>ZFRVg~%**0_e=y1tI<7axmA4_-C@xKpK zxjQg;?Z~+=KSM(zB3z!8P7*Ndz0;2-+e8J%aS{V50%2QY;+`xZ(^6D(^#aAxnN3mG z^a|@a(0Yakuq60HDwcyGJZzKta>_h@qS3CGr@bE%~kkN@d4Xb%^%iTw&&%j*Jrp+x_ zm2xr=mK)V!>pz4UnXhg3(Al5+&%L$laav!tnB)(uTaJY4 z^qYYLWaL4m+8sSvZn(||93?97h zh9uc^8sQOnBtY5q;M)mAMF-t8gg`qKxx)*?l{3M{WAF2y-8ImlW7 zQ8uMhglxsY`95H?L+DlV5v7q`>udG}J)duOBtk4g)VzTc=Hyl9*NuU{lI@nNuoX zU1tV@+cY;;8+zFXa@MokP1Fv?HXC)^xK6{}R=c+YVQoy7TBUo5~yk71s zXcssi1Z0pyzq;gXO7tl4M5~S_N*RKRic|9a_2@|V+oDy` zhxb@}9BhFBCE3hVdv@{sW1`i!;#Q$7X3!N!7WJ9@FOP2+3p<2ah z@6>$I7+}waE6U{md~ctyQ4GSAK+^PVr=c-8;-)c41+J~D8R(3Jqr}Re*l2s`qG-MV z1uzuAzQgrz>~xbhNwgalG#Jf~Aq76uL|_w6;5e_-x3WwMtC@-s+Skhp$he1!vN!rQ zsZfpfPswpiikN;haCcxi`#lo}^16(NM5z^*scGAvDJyV2#`BXVy%ahL@9w0_1qD{> zUq8&qM00>q1ZRR~^G^na{!}0`tGf7(tnxhCiNWV>G`ra=*SN63KJQEJ^o5QoVJ4Ri?v<-EaqxlLI&#yE!O0@`4@UR3O`;ceEo*=@1O{Cf$WKXIq`St znVCOaCFe!Ie_?I+YI5Yt`2Nv;$UR;04?QRQXJ#V&+*`+HeT(*r3FTe8jIg$lh9 zvqb%c%`Q-&&^0H%?nw^&fg$ct8vTRk3kPj;y4AFoe!Z95P8PgcMzv~F<9)wWgq$?= z3@-l;dhwQ1(*X-1iZ^$+^J#nZ4427%p>{=6Da7Y*16xe#xIA{`ZeeTYT5Go)Ev9&Z z;n+zGQ!_DGIp0xEK15$kO;wfOq}&QFu~!>uBvJ1#qdTxk+-U~Ha9dr}Bs!((&aL>6 zky))IU|!As>*r4ZKFHF;UhI}q$|(wnfwusWBPer1I%_(>7LJJBdQZThT?H(PTgxV4 z8#)#r()V`{75QVO4}1K4Q?gtnN^if_rW+7BEN6RTVgkhEwRUd3Ec_w&MphW)&C@`j z8!QCW9dV5ZOMwEnvJ992n_Pes0cKfJpD3bC=1g07l?hZs=yb!FV$Zh{;Jpa9Px|Is zLa`qx)NO^$*#>^VzyY->07S!jioN$7%zaXpXXaL0>{iA40n*M)P)CNPJ{;#)ji$8j z9IfZuWYL3LJ58v!w*EONX5%=hh0@)1CeW+qXTM!gu?NJ$tyOw%vG(64hZUB5eUk1w$tCV`s5O@_fL~R8Amgz z1qZ8yH#Z8zW7?A=9?jjyORG#qSZrWpI=3JDn0vXcaDDmWJD>2CMTMyGV_}-4u`ckk zqPR}R9u6b7;tYk&N?dPe97hgc1%Y##v6y9ql)QEWE$q3-_4Ss&#NE)l1)!({VN5B` z`PIR2Br#VZoXt70i&5BOf&O(&No?CCCSjTdkDj2X3>SUoqhY4tId-P`yzS!Bm9)mp zSdiFH3gvad+D`fCn<{1?a~fE8uRU=`0Yp8ID+ZC=slkeYGFt0QetBi-Qv1Fj&;{+# z)BRywWfQxaY+;I9pdBXxBUypWY#@2xbyd$d8CdQPn+}lf=WWB)%v1q{@Y^KsuG4)R zRG*v^w`&E+sOf=rul*5NdJ@JZN2W6aPstGGJS({a50;kk07CA770g+(Di03}4Htgi z?AN~Fjh6%Cf$Zah1>mHM{O4S@{PYOhKO8pN&f}s)X0n{SGrA)O=xt%VX||&^-#-&r zK*Ke(%;hd*QEs*s-r3*9wo0_ zlHO38D9K*`5NedW=p-BQ2V;2?D_P~*zSmvI+5_~M>#g5Af~2#|SR8RmSvNi8u`iIk zviAT4GumoHH_E6I2tvqn8C5AW)0gU#;n;D#kB~aICN~9ZKM2HnPb8tPmp-=z2|Hp6 zAL`FieU$}r;z4jYM!YO9j}0lu0@!6K4bLOC^7+bt5qD%|3P;O3x7v79Zy%Zt{UTzs zo<^SLAT4Zo+AK5E%*4L_9($fsggH?mdm*?|=Mubgux*3)OVW!oeM-jkQ<;lR^{u8b z4;^c&di4yU&Z*r$N@~&f403h)eh(#GGY9GESOa)07QP1!`66W*pqZ+lA=Pd3`#xLw z_wWpwA}#q6k4R0ne0qI2uU|@8^Dv@P70Ne<{jF&(h+PHO;?6O*q}(RvbI74-UWpRX z{PTh0C@le~J(Opn*|;pVo*8nDeOIGAxYy(*dEbzUQ~UXFslx2U9w^7+qMtRj$G%f` z{UrM$r)%30XbV@z`W{pi+p6yk%7 z-*D|9^o!9_WE?o4wCssR zi{3B$*}R*CzjX4vM2kF*s;){0peHD2E~Uaz(zc=zDnsrTnse)MwxMXn!6Zv7di`Egw;ZB7tP3>Gsy&?1R}WGky49;fnkM zZS4P!{_#4^hhEmq)#J%y&A(;_Hkd}CR@2ELy0jF=U(^qY;I{z1q#P(05sL%0{-i8epvY4%l!%{{ROu7_%dty%vPsmxWudmtyK!UK=B!UoyE2~u3V(rE z(q~)$!&HA%f(J8F&%$3C>3$M}PER+014}a4`zlp+RYkHV0@z~l<68wN-2I=WFR18# zaahy-v(uivjWxTh(<|)56Fz-Fn{7Zt|7PBlzD_=$y#4cMDdzZaw-;xV5>;u54kdeq zf+0%#xJLh?u|3=MR?5_WK&s`v(DNdX??1p<8_2@Oso(i+gp8^Im!y(JMgF2;2{T;Z zCs4a&b1iOLrV;SExEaGhw1Q|$2U(#|%&$d08 zx-L>@z7R4!tlC|}Y?OXwKES4T7^3&s02KiE^q!^Y;rt|Yx=CksHxxFj7&*DbMgKAp z9_R+HvpO(|)HzX%?9BDy`jx`8kUpuqm%uj*;{;j=xqYt?2OAexM9KMgDkU|Y0%z*d zTW{I%4S$?qDJ=BH|A(F)HDN2Bv&fzeDTFWlQM~h5e^S+rSu*)8O%UA$iP+n_ert?$ z>99->LRs<^T17HvQB^H`2>l#%u<%^S-p8B)r0^k@#x!&kM2B#Vpx^mq@Kq}$jNsh_A%Aqx!fW}SJRwbsK2l9*`N8|W)P%rx^Sr(F+hPpNMTiXV{12|-8INvJtyz{K)dfRbOL>nX0 zf9h)*z2xgvoQ2&B?GP#Q*zP#M1%NWUjL>L;sJneJP?7rY5`CU}H&d2;*IK>#2Vl>T z6OnFqJZdS0I5`?1&tq4#flCrIPpA0Xhc>x8;k*cIslA%EU#V-ph8^leQ}s3Xr_ zm9122gL%w-11mTS)A&(WOv9J)NqKFJPjzRI7%PME0LxuTgwqRCoyDI5kollb8>$Ld ze5zHuBic_`Pl%e`&GPQ=o_bMUGj~$q3WEwE0}*B$)k#3g+h=nd0}CCPrGzAx<_Gz4 z&!x6i{~qefy{4w4h|6=4QDOc*>6d<#B|Mr-ze-GEWY>(mTNISBqA-G)(&w~j_H_n< zM7c6f2(JD9P^MXu?r93B1K+tV5b+(w&RgUgE3uoBBx*1+bYm$J9Nb#~ee46Grcss-%zm_Sw5 z#fZ~z#odTSmHkhjrf)rsdQVpBXH5l`Q|7s4GUa=LeA?^bf6&e#cL){;kOH5xXY#Yf ztD5XTV^0k!*f{A)X;yrs`K-OB`ek-C4=>Ud-cS}``zY`qgOJEq?wJH`T^A$Xp4&6k zF*)bQl>|9qaHb+jXSL^~_Z2=T7|fh!wQ(N(SPsjxt@q-V?r6a2|DW%lKep3SK2f6uzB(@wK4_uLz#~U?~0-y=xa{lOPF} zdAPU-s=Jw}#~0hJ5Z@%O6F_yV-g%__tAR6~>B1^gfuZrh*|x3oDV|pLaH}2D(enfzWD1!L^4nn!MTO zb|YoIP?AzcV252YJYMFK8S>sebGw0%hAigPkbryz50pgJ!Tesnlcy1r$J)6|3jn6L z%TwD7mW6Xd8L1qb)&7`f5`gQ zbniB3;tJ5aKJdtK0~})(1`HYaZEd(zxjYoe&+hpZ42_{6TL0|_v@ZZv-?Fb_FqyM% z@^^$HKdT;c9lEkSF#2EafQiQG=NfF+KWrDTAtR`i0K0Ho!d@bvpxng>*YaO;J6t=d zd<);xlaoORK*NhMWwcf1(m%02PXW38)SMPme^I&x!R7Km{mQ>RioZm_6|#q*d8JXz z?CeTKDP&^F*Hkod$|39*<5PH}_n?EKqMKf<2Jj9DCG}8&{=ZfFWE-%-LkKYMX1{`8 z+uP$xw;-qtd9q(_t{e)cFN1~QX8#5tXJSo&v4}`N&Pt6clTcCFn7J}uNW-C8g-8XY{@RLR+0UahGFIzk6SfJU**CaTbx9BI>C}*yZZsW z3_IF4@fzc;U@0YbE_$!}=M&>{bEfV*I|d@_$@jdc(|xo&*#|S4#pxWfVPu-72Z=IO z6~W*Uw3SEOWxuE#TFA_G%Mqmk-u8oD(cI#(QbmwXk09<}5_hB!b6`OJ+;x(-4Z ztry%vJ%m7>lWrjWlfLT@0{$Gp=;OF1!MitJk%5^UVPawEO{9(9A;sA@5@17w5aS(~ zO+?5k=4SQ6HAAX}=id50Uk5T;I25o?S_9LomPyZX(t5fYwke(Sl&u8GQ zT3hw7fNADH0D^{P3}yzjl|6oK$5l^!wki8wr^0*g?=`~ymBKYP^i_8VJ%zzIq@d$M|-6OI7Q z_{c{#G9zDKzXN&3lz+8T)=DJ!+LmMlFe%l?BktA-39PHccp@@vbhIvHitYu~Ke zZcF;nX}a+}XEPYmx$Qu`)gG0Rzc&}lU5vixJu|0}^Ssa@`0OyKtUftRR)0ic$f{bO zo(o|q#W{XTHz}udF&f&<&xS;9nT3MyNqLbj!8h@S$*G~{E8qLwCZvfoLi3sG#mtEJ zcAwRtC2h{*AT3=6^EdO24HlprWd5?$L3jN5`wrD`Mh*-=j1G+tOT1PIjsT`&7I1j%xBkub!o(LIAE0xV=7XoC3-~ zV96I89L?x%WM-U&1@=&oo~6U19rBWIMd`fvGT`UDla&YlPf*XsC?0hAQx{;~3hkKZ z@G?LEey2Z%Xmr=f#umzw9vx{DcGph&kw<8xVx#+z5}gLiIpKQ$DxliEm|dGF^SsOe zc+<;Bs`)q}jmY&@Z|g^iU4KRRSs5Wf=)84SfYGN~sa1=%tL5RkzIs`K{FI~dSDxT#IbMdWnsOJ9 zLh`R$FLQrxgzlQ0F#mCW%zlqDUfB~5A>z{*pWGW$ljc_GAc0O>x)5e;Ah zDkdH8z!N!P_^PU5er+$5ozD$0z>~TNNGLk0O5`)r!EZ5tOw7QJT2p3x5Li-!6XWN~ zy_W~;s$X3X?tSg?V-lJ7HXm$Rd4j3CBA+tA`)aI@ce&HhH;psgG}?$@!c@#1`gM3< z4E&yjCVMP2q;%4(I>?)sb@;P9tn$jTiV1-2UQJ84+UPHzL%kIpvm zMnQmuEBeanOA<}AoQ&T(qCF!I4p0Ed=^Z=j_Pap-0*;EgPrT$fy(ifqFM#joc#I=# zdh7sU)ZrYUDYHayaP+s7S8Sd;b|q7-339V{%X#zICUTd~K=xYm1k5oDPK_ z#FD|jqe4e+^5glY7n)2B=d4sXms0iUGTTAjJmrzIt6Sh4Ji?raqnl1TliXWCNDcMb}EgxFONk zi?0Z@Rq{1EMBB@~vCyKO`X0_S&YAa_(R8L_<}i=?*KhQ2tT|`jShMTFP|(py)FJwo z#AhFmuJ_juP-`bCO(M0epJG}KB$M&7kcXJxLx^>DS!Dd`1Iy;Y*PmtW!}>FhsF+Wm zr}XB|;qyU_XFk7EFG}~eNz+eWVoesDR9lV29?`eq*k9-v<@AjmDrlMM4yf*)+}S z`;+SZThc>-WN$y55Zl21T;$2#qc;33*mIU~Ut3=Z7)P2>mV1_$m$jz@(7-nWaEo%8 z!0p>CwmdZU)|WV>@OM&{RX@FX!zQE=E<6B;2(uufpj?BNl)IocYNR5t_!8MNH3+Nu6w zAgJSwT#-b~Tf&sFrqD}?h#X*TDq*za{NowquShR1?44>#aj3GOb`oB(%2Pr<>+3Mu z3ad~f#la0k3X+r_>E{EG&a7I>$PVRX`Sz|7S$t%hA@?%>BTsp}CCj*=twak!mx+*$ zEV$WWXW3SJRkZOhC|X83#+l9N;B--Z#9|ev5=Ls@sjF<}qS2T3uRQNnx zn_Y~^anln0Q7g-N<3@!-eHAD6_Aaikc84?|UDkvlv>!-hc;tEBfQ-}_AcZBFG@%2_ zKt;{xQz|NM49_jA%=C=4qZjEX+~iQ;co#%c!ic7SZ*O?#;sGgj-U=1B!zUm~e4it# z(F|JxjZ4O)mN}Sybv|x*I0;@Acz>HTJ+ZF&2`%!$pOpqV^Xvf-F9w6_Xboikv55B) zyrHswYz!^)u*)iqztV4vvwr)TOMx~zHL&fDjz12<-BfL_a0ja8>`0FwCOZ&sA*!oU z^g@nI#rmb+GS{rU;u^h-M4cN@26_GmeA!M`$x!o^$%Qf?`y#DcamJ7 z_j*Z7J7g1?<8G3&q5;XGB1PG zBaeeaIwEtr8vKhq!F20+C&L@$AIJq9?FTNSP5r+IMLC<-AqkRKkJ(#!J@D&WLorl- zBJ{G)s};masK%wBwQhnKm8#1(?*{ft<)8-rjr88oW^ImPJEyrO$S^^hWVStX6#g`Ojk7*qJYhvdU>fL~cYvnK^SoO7`E(|wQPm~7W?O~YxF z_^CTRN5E7gGy zcU0(CL+n=R*<&4$_1M|o+|KD zkPGRwGoLfl%~{6lOkFXbxgS=RUXgO9;h)uQ`^fuB!oX7WXZkZqrD?MlsR|-LI-=u8 z&ffa7b}MU`{~=8R;ae|qQpGK;U5Ax^+0o27RpY|TBKvN~)wGM+?bFP+X{K(8f()-eQRQa^**A+lM3sUx z^#|48lvZ%mTYeOIC{ZeHX}O_=d2lXA3yHFV{*}p%`1PLqjFAl$(ji_rx0q^IhrPe)%)jpV z#^`a^87?BUlElSQcQyHQ)I!j^C3SlYD*=POx|<7WI>Xmt!Lm*P6%Bd>~` zoJ-6k*8s>DqM*EI=^JUGe_Z`&x##?O4OR8Mg+HMow&b7N;rk+GkkW66yj(|(U8Cq6 zePia|`OP~A!HTf@lD!Mlur~wC+rc$cp0BffMmc2n;y(5zK`zN;rjY~G1laz|JyY#BNR(^Q+t-a7?!ToCJL#C*R7S|kDP@%YpNI8sGYK0nfK^jJLKMT z%F@t+6FIpY$n8NO;7`qT|ElD=4W!sif3OAPde%g6nvpZsM8O+9b$O$W#OR|@@%Z^y(0zbB_J_h42@C+B2PE*Y#OJ&>V*K}cqmCD^2`?a z+;a+0O?u6@-@@EFNLa;ci><+b2+J5GaBYQw{w)#$I+N9*`y{aJhoIW?wAZ`djc++7 z8~wYsm6;vk2?Cih)l=e{^(8-+JU1sUw}JO{A9M&D5CzduTEte8m#t4|xYN{r_pUhl zey^Hw2-|4Kr$CT>h!ci?Lw0yx6eqh~#??+Sw>XGdZBEA9MNwC}1@1L%?WjvPi-^%i zH+_utjWhKVvQt_7d`53M)6q|CmmT9nA~)fMwCCkpfjCAE z$8=b7@+TuL7E&A`*U5F2{iFW-%%iNQQTKW^PM6r!12xW$i90)uP5VG<{`QW!q&yk8 z-1L7r8b|-9l^XTO9ld3XbqyA3Kd^G`s-tytUsOcR70s{&F;{|j1M z3T$ED{uTa=q^7Q(+Y6>S?tBz2PFwY~uq%HQaG@mWwQ!T$CFML!MIUWC3TfYL@j90C z5H&P}5p%7=CJfy*W0c4q1%O`)-Lqsb4cqDI=N&N|VkyUeSqI$Fr~tc6?9WX9*Qh?9 z!kYm_W*aq?RYlGMn6z4wND~5H1O&TNKzcj6Rsm|AlaEq9h{?-|DSw zH~+Sn_o|AZ&qpq%bk9ql6PVT`Y@gv?=KwfgAR z(%sJ6b-MAu1!vtGvgO1SvG|&oAV0EW7(+TT^VF|)a?2eAP6%Bmq7Huuy=Y*}V^4?T zJXsp+TivbF^;^Sg;$zP5ni1y@%7!TM8`qoh9|;Ms*T1Xh8=s4=STS?t5opt$@-jok z+xGbbUGZF+rvYjqtS0J9Nf2~h;Hu1aY+ML>4i3JZHSWQn|uW1>oNe zLSh?|ns0b{9NXRVl!6*Yuh+d<@KvSTvJlIf_jHLvqlp1ErMJ!3i-S%F3OmpCvI+;b zt@Ih_JVxAa(%Or?)O%x{qn)5dl}K|s_jd_8e=#G%AA(MORRd-0d;p16mWzd@IFK&5 z)~ZfcO@8O%5C#C$1Au}SfCSO9L!TQ?Q>4lRIFJe)m9*gfI3SO|xAtlodxUf(KmN|G zUjCO`j^$O$a#Gl@h9pK=1p7NZIM>B@m8HV7!&{=i@@oi1bHFNH`bL%+LHp`0ATEi@CFnEMCvvhg*tr`A>rW6y+DRrtLUBEsiEkeX1uO2Z%r}76 zGGF?ryxssA#c<^vbTiUbqN);hj4f9^B0Jg@M}hEFGC>}^9d$e!RZxC(gl)`FPSmTm z84wil0bk{-3)IL6vi?wY2@ymmp0Y5;#@Hs-F@2@;;ydc@T+W9@tn%)uM(|lwX7F|j zV_VdPL+Pa77mJ7|4#f5ub;izz=2Y$3bh#AZytrRoKO6KVNF!r0VtD@r_u8{URqBDs z^Rv@+e=lpSjV`$@2ma-EEW>YUi`h@~`)%UWz9PDHfv)?SNt$c4e(qmdrsjs^Ms`#= z8r{hepqOcj%gFijg@Qt!2w)5s9v&VX(eZDe=wpCD$#*T#AWm?uy(>E>kXF%AL92FLQ~x-$N{yM50_u za%bc+_fZiQ=00*6a#>m|VRHX{e*4#h2ea+{dcV#&&y$&Dio;VGZU8`Wj${Y{n>PNk z+f7up5a8}sHo4i8#1*~nQ-D?e^8V6N#G?TFz^|oE+xt7~+vTum#Ku7cBM5+q0TmPM zKL-X^8IdfS`NoOZMo1Ym`{^88xzF-QOYK{{&jd6+8PZZs|a|6tT9 zYUv7)aBj{st;9p&Hw67cZVA0skAfX&nuFl~&S`1Hx!Kv^#DPr-RwyIcNg3NTusVjq zQ{V5AS^gtyg+4I5n2D*?1NtnJEgs{atWm=4-F-zxJH^*0ucQF?L_DdM>XToCSLx3= zH;NiidD5hat50o!p5#joJj|9BxZqjs#CZP26`;NbS4$PpI$G}say++=j%l&o^L`f3 z%?q@dD2xn1UPE^h<&BP>Qlk*zLP=myjg5xptN8}09JYK@_carnG$_P(_90Pv+a&OH z89*z{Ce_kVzK9pu4Tcdn5-w+Y3?o5u_ZzuihiR{KDs}XqYhUy+Z*VE~dx>Ny0xUFL zZ(63}l+Ii_2YstkgEz6Mtjx}Ctm5c#Ow8HqQgpgPnI;C^RoFkY-(CZ0>O^tmgz85U z8qQ(^5r%oox4*#ZHE~$eY6euGyzmRX)O6?U&=$-Ez}xchu0A4n%$vvj3JVj1D=mtZ zd`9iXO@@mA0K8y3&!l>?Rb29psd4_$A%~|kf8;D1w(N)Ih{b1jIBnj}g5-Q(>KgpX z>^pMR>) zo*((d^r~?C%Jo3{?O%saBHrA6NU3$Ssa`m~7;QsPBAX(i9|B+hcl-Wc*zrbN!jNu~ zXJ*nY-wfLi2sjhgnb89sfkKnXeK3__^_!V)dTrS;us~w;LU@Rwj(M(^@rZ?l+R0Wq z?aie(5U*F1+SJ7J9Y)Bp_Pdm)$BPM!m9h(fEj4-Z_V{i@W$BMIg!x+hq}OY%{-v$; zk3SBj=J9%wC-hr}&*#B|?CUSvA-`XonW@=k0Qt34-+rz_$vP(@XF63Vq-Ly#pcMEB z0to%4E=N&`Hhnbf!94fE@lO96V4?#8AuN;QTYv7H+&Sq#UFtsO#!sG}ESw%XtaX<( z8I2o)BP3%@err+Y%a~#1Uw6K}SIKq)KvuiW#2adWl&_IZ6G+$sS~2a45@xLCk9 znr&cIISJ4`{T9dQYwykenKWhlsNaMGXY;?K{pa2;bYdF-$(b6E>)aJ@0)l#Ea;Y=4c0zLd;qQo{i_8XEKX&>v+_)FYZFT5-p9X#vM zeG3TMOb1sjhEeBcU?TnUOjl4-owaBbpMk!@mr;5DUS_8>=-^($2oef@Qow!q_AS!$ zHK%CcpxvKA&FS-@Lo7F-ob#C%@e-oa<-NThCQwvvZC05&J>Tx;*Uv3LntA^mH|1yI z_stWw6R+VOc~+u%aHa1ta3=5&)mxGeD#-AgaKo{zi6#EZy{2(IJ}1yPvmuoI-{Mdj z4R`({7ZV6vba8|G?RHEh4qU7dKvbcj5zT?}BNk7du(G)>#$jqFZM}V^S8dUfrTkK> z32KGh$8d6AfY4R&?n?fAtyzIb&@~D3syNZ5PAR}1;?Q+^a>y;Lez53`j%PERcfVtN zg^S4ZP&)nMb+uV)!MnG#_)y#$uuO~IKTu3$Wn%y!XveNk`vAy<7fe;mUG&Ou6nFTX zZIA!+WutMQHIyiy^Btk~+HD(6#yLyGTnJaRk{kUI-Op{liLc$}*5C#QJ|C~e@ebW=7<+YIv*JZN5WX>~TOW<)ZS}3FD~}O%z~}mFI3_uE&N+%ugl_^+0r9(NI7no z4wjbIm!een?CvL4PGkcuIxcAWyR`YvnkU~AK?4o?`M-mjKdnydh<4i5m6{oge@aev z5k3SqPq9V_;Q1AV#{lkO{3PrUMO`?dw+yTTd&~vMXt+f%P$3iCdn||jLS4mEBd+8D zaQlYRGS&X4X<8bi7F5|gr=N&$=d`U3n)>BG-6z!UjLwiKmz}OliAqAI$$?<0K|BQPl0V_J=!qGO;3eK1t=qGzQW87 z%0YR5UF%OV3^Y<{%eIJ|r5^)nw4IMxGi3p`<3Db5_dv9eHKneI9|fVr*&aWo(9_L7p_pyTIJ!{-i#!0_pRa`C8FAZ4cddIOH0+!>%qV3BdKq zu+VNbMclerc)2p%dJSd3N>-u0MEjC5Qn0_8DjBi~DLH27S{*Z`T@~YZGbP0Fw2nL3 z5HwEZsyX<>6^4d%NQVZ-QIbC0Dk(Pp8!Mp}^3mlvitSmvcjX1}mwkw6(YoL}%w_~Y z?9~6Ar=Z3OFtg$_8LW@uT>5p5%zOzpWtHanw#6V~hjhG26DdX1P>eE>myHw5`|$5p z{0Js==a=eg`+ueGU~w-2n0hoI2MBi$sC3)%cI@2PQYNo?5!-G>hlyIa_pi|B<6xse zGU<1xsaI(esu2s}k<4h>-CboU{;1-f!D_gA_@;|n<{zY!y-GvZ(d7KJ8FW@?l(VKc z`q4Dg>0m7l(4E304-!HfSBEr@t_;m8;SNuNuFg!b6<%<~=q@$KN%|_vb=!RSy>Row z8HNhKj}j6ys0#@+xiE`9yWjPXo;W)efwhh-${suNCaDdcq0yxod1A%t zmB=KS86n>=OGAcEy{*w77MWH)o)O%=Y^*?W%l*?Qg`q@YA|QxG=b9YcIi;PVZGkAW zDe-v8^Meu(&TXiTNw{y8^E}XXo&4tHj|*bsbob7w?Dj1%fMJ%uu+X#Yf{XeiHG`dnJZ^EK(N>cO$XiGZ!w*+W) z%bkOG zX6Gy3Ft*p^7f?&DrCtlMyT!q%Yh!AplACoH1DF|T?SK?k^=m!oTd69&c_$I}-B+bb z6cuQc$oh-m;_fI_3K{R&2To>NHYisQprqS??v)ud2hwo;*i01b_*&yInYT| zV~zP0ADewc`moS708N=GdkUyFvyFAl601j=YHB+FO{ z2c-?|JWCQ91bBslNeT6S?hJ=O2^}C%HPsF%7_|h<@z!O%!UDi3F)dICSz!dn47HwG zDZF~K#hN9~BsFx3`@Zl&j9fDx)EkF)r-pxrhT_#)<5elF@0xPZc59Do#&nt;I$yl5 z-m<6L`;2{a-guf1`_{x=^&|Sd29N8_f|QFlmKvmHZl2s)wdD+8-f;0OYxv$9EFwX_ z{GLb4+!$Z!XK)IqK{{YqAYjOU&`?ztxq}vS+5wd4+;~V~Nl!CGoGx+4 zD$iXC884A=8<|sg_4uj^G%58}so`2kHXe@d_Bs2zyQR%lsH-nlCQ!I4cVrr`nVmEv zz1{x9?#cpLJ+UQ5jzI+NE7TVtP#8R`(f-$MO8Y=Hj4U*CN7^X=P`^3w>rA9+ZZXky zKC5%jztF;8Kchvnd8n3*_7(9~^v?@jB;05^JF->oA68hNg7EZ`wLe4Kow9gMy_((^ z9As}yG&HG@g@L?Tmdx>QUDy`vHKujdy5jQ=90cH6qIb1KHP4~JdTHQ5Zdx}A5XfwE z&gZOd(&?7n2Y5VCv{8?bQSD+_ge)s3!9K-wJCNLBP||t9b}^^(G(f|%cd%deUSWs} zrCkCR9##hXOT!C5_svt6YFr@DU*9tO*+~kpDPd;P@xe<(QT$~1Ym7c|ACjPz>H2fN z17O@Z!IIs8?Ig*{k3gblzs>;Ga8c?GIR%Y-m*k;N{?f|+g>Fm7PIZ|=@IEi0uOs8* zrqYvVD4q9^qBpoYg7vyjdqQ#eH^;wcVaxl0mH0kbt{ZTn1qebj!gM7iC3zNi^*8r( z2j;wi^rrl#l`>9`KQjHTb}xiLw>x_SC78Y?4;l4q;d?avru$ZB1aeLR;KLSC^>|f}_bW^~@EHbJco^bjxUiNSvapRr$}h7x zX!)<39B#)4l|zyvwabF&A0I=`3%e{G`^72?q0p4=S86sC&{nc=%+Urdrg|CEHHe^W zd}m=|fBnU3Jm_pudinbA95*5&eHlRC7S7&sh@iSwkPdcsR*v~I6?O_)bH!U`0*&Ok zSmRhSjjwuEOW&mxl)6v3HKs6zQH0{g%l2e!s#8+VUPzl1wyO0bnRt7=25QFpl{tPD zQ*mnLmBI?c=&#lqFH86Q&YFs%AH3}(wF_Kr{^<1|jnFJ1LWWJc^YBAPx7=})YuvQk zY`Pu_MHdO>A{u@(*Qt@r_T#shb$F3` z4odq!w=YY-dC=AHVIRyJ)%w^Gr*qCl>f8&IH4|iVQ$ttdAJ3&EQP3R9KizeCJSvTw zcIS6U&%pYVx6^U|U0{9Y7%HeQoU`MTzW-gedw(i+dny3iKfWz|f)&w7Qb0>{Y10TL zgFPdH&y9uiuYa6X(8)n{bgc~%T5?QJcGmmL%Vm*QnEY|@1cK$~8#VGoH4O!cJ3xo# zAq!@G)f_A**1mZqQJ2&%m$t0nkCrta^+!@h^6ywVg3IGgbCgOimjVe3^Ssx zyQb}OsY1`Cbhh!tm`t+A@%raxp=64{WV6U80@&VaETyIFx5lVH?{&sM&wGph3@a>jyd{E#i?Cxat}mSY zUMR@9C5x8Czb3v>ze8SFh&~)@S(b}=!v^}jcL+NpeAHeu?x*y$r@+Dk#|l6`K-ch} zVQ7G2w$X9$SvKZ-#NlS>0->vGGo+*Mb_)<97b+(m0Cj&RU+3Bb=$j`?Cv=O%T({`x zXfOz!2LFX;ruI3F-0Hc1Cnn-{SD*(TB%Ja8rtdaepvtCG%;Q*@lYq~4)6AZoo$WYO zLv|L(;LItX6b$8=M53aj+Xnd{j!D^LkavxKg}K8SZz$pFb3!OeKc#%Y%|d%?J?P|U z|AxrZ=m+-$a1x@9=1MP$19)^cWQ*cm61iy$-I1p!fhu-HOl9zE4@&1ZaZ>QcC*0)r z{`vSl_|I50&X*8LONN;mv5DEBorUx((Gf%hdiqQwY&pp(h*)`8j_1r<#|3YC*)cO^ z7BsRdKYd4XT`|et6zH#?sOsrUjss9II``!tAD&-&P6nE3B?kY){*Z#EFTZSVr__kv z)tXUShx0DzK?xHL|YTA=9Oy}-k9-R^r@5J z$M=ELbYEh=A<)fl?bRs!JKqJ9<(caErcF}z?C<<#J6J7%KC`ZYi!LX~wv}XaWcs{? z&8bbH61YiOAS^c6Pw3v}(ahnkVQdizOH~w5WfxIFb0=$02-!fi#=e~OOY@B4!0n+L ze&}(0E3BLyfsE2gTRC-aX*vjI@E6gy7k=8QrdC8bKJQ$*GP3kO|9r=&)yL_3FJ|Gxs|Z zwCdZ-?I{B`Rbv+(n5mrPb3N=?X>B2Sf2HP)nb<@>kAheWGr&;>W9fbh4Q(Sg1B%s6 z@YzjLIPWrVX_cv&G<=^k-_evyUQG6 z9uzcRU7JeTMf>xZc-_zvNpS6j-Ml-(0EbDEK+hm&;f+DP!m&s3t~)yz;$zsI*a)e% zn4@3S+`?TE(YKS7t0gp!h-<#nC`vJ%aQR{;}?o-fJ!`)V;zQjcr zp*1(43mZoF;iZ4SeDu#!jL+*Ft0|F$+gDJxulKz z^%w5?L%*t)`zCjIS?V7Hi$W9qh}S;@j{dCz<|2NL2scqS`1^p+?DdW0*s@kx+6MB1nSsRJm~`~ zF>-Tel?>6${Vctu=^~bhcG+|j0v6z7?Gq@k?XOyt*CwJR5W@9LflFnNUscsAlvLdY zw#o%+7*;1>_<=>Uf=tlN8t6jSh8l(+gkhCB_RDN=7!6gu-A+e&`|5k4Kg|hlI;Vj@ zkkr_$*hwOA?<_H|FRDfGafn{M@KUXEG_C!kpvwZ2hpw(h)yYphA~i{{y=lH`iLLWo zX555DCTj`fyPf7Tb7bds;soj*lY%RbQ~l|9NlemA9`R;+1didUb@bk+xx-Ub8=bG3 zJqs^Z6mTO(sOw}ce~-4%9}osSBKEfwu7Y_TANN^>wWQ)caRc(^=3b~Uxm&nAKqz^< z^5YASU7`&&@GD2OKqzq^_Gx3D5TvU@ZNcY2(zJ$9FYOJSc|)b^6>}U7K4%j<*8o5X zX4Kk4s-kqz52Xh-=6fH7X#w9I9iJlxV=SAIY4mo*DVlZnCeR0c&W7>QgE=p*z zH?Yrj>ndpqte(=;@J5Uech~a*bH4Pohs&*zb@-ZrPMOA=3UqtSH@qT6TPS{~j{sfD zS!}Wft)z>2tb5Nt7%fR6+;A}bhWs`P_j$4}I9Ym_Ek6UT)U?U>TTLeewD`MXvtAn? z1vFi$FzNsAjRyp7b-S6K1x20wE#_ABz1?hL5E&l6s~)wz(K6Q-7z+^WIe68Czjcqc*9}7e$_5)QeWsLi^2w&CMkr$!^FAtPIvF(9&Ww2qjob2|| z@tad2t%nG_~A^M}*pB7fuOxpfP`z@Yy% zk{hd1TJRiyrSD}RX@!{<&5;l+gVPff()agwY8_VqaG8)|jJ>u-ttCr^`Hk4ddKRP! zDv}ClOh>?xEhK4>LW-BmH%nod(E*s2LVZWLij+Q7lLu( zl`lD46*;xD%Cd|!3aR9_Y@mxLlRwF6Nat}l;wrAhdGxQttB?*dQ28PHqu?Hrm!$YP z@;*x>BlDZWJ!b=~eR@+; z4QrV2=B6;y!&e^Y!e1%oI;&^<132U>uC_E^sYw^KeF+}5&ERwK8jpU0)xen(cnHBP znJp6*aa2$Oi9J-a*F>}0WJhA7(==EdlS{7plYez59dyRY?#UMc1747gi_^_MpO3Sa zDVp88NukjFpONUnbIu~@L>-P^rwJ^-h^xC@9mH6Jn421BWk6Cp)4YZXoJ`CkO)EEp zaKWW1iqDu`F*Y6-mpI^wFcuBhXKzk_<(|_0Pf5umkI8#yJur^qfV3I2=rK z+M1fsLRvDv{ZWb}B0?v7`;zb7E{>h~@MlAee2$TWAC2Q)-h9|vNIxUY=r@-A=GpC; z-pG8RXJCzii_ooh?M482Z7&17A1AV2&F=nkl9EjDwW&7h3}>%=U|95CuB8gTPEi;p zm^*FaNGJuCCpKu@%%CC*%dMsz5W>#<%{}uBjoh2!-)aFxN}S!-K%RVgQeiX$Ir>J{cOtrM^=tcP zAfNUY07&S<(b{;q>~_O7ZVn9~oUyFD5pqx|7LS^3FM~t@jv|AeNJQT?w|(;d9W|4$cg6j^ExPW|1uN;fNpCVx5sM`EP29Tq;Mf?*M|*QHP>&q z<(=6%GW2S^1~-w@G~k1dhI^zpr{|%E-2JDl%qh2=Db#&$9J?mHBa<04)F9(!C~^Fd zh!}foI;3Whh!0GY=c>=nB8f>^EK5JbVk4)X8#QhxaJwtWBVPno$Itmh$murFiP#Se zuAYAqifvHgH+n0AvN1+4?^Cu{qegYbGFd>x_5cAFp`CulYY{x9)dr!ngAsg?|2*p? z%3Fpqo_SXif>Vs4IFul31VxLrlvn$1cl=a};#|u230ZVHBYchA_U_}nUwIaBk$?}e z%OR&U?9Gn#%$5FfoArCg6`QvZCo+eI8n?@nP@}LRwnegyZ<|G{Q4P`NL+2GZmQ`Sd z_@Y{l^$W|koWDXkc|Ue$|AT-_jix9si*}r6!3D5~9i=SaZCQr!*-1kine+%+e|-r$4f8X0C@J4rq({(*I}kIJx_ zBvz=6WkhvD5OU`uH*kd1FImy6=EosJK19!W*77^}IXLGa>9l zi-CQrl!U#2m-LSO*?^hM0M=H;1+dQ)Yq;q!yThtXd)C=!d5Yc$uUEPIorAdrYh9BI z>y&|W{N`_u9>1`96IZ=lJJ|^cY}Ui&1(xTGBXNmjG#WqYoOUTC_G2eFE=Wf< z(*ILhEPpt2M5zYU#AtiNHwI{se5eJIzM6=u(WKd86TJh?;!$7da_{S%Ii9a03lLib27PmkW5ZVwDlm)t$l0M-VI z0^k;I87;@ObaZz|?wt@qyF%UlyzKVIbb#FNyY4o^JS`O7O8{)=;YWY1&_j6X0XB}l z(ajSXmRL;d_l?jhR*@ODSIAz0sfs-dG;a9*a9#g9eHZZJVYnB;C{Z*@{UQd@SCnJ2 zrYN0c)(o`yG!$Tr0GR2pAR4q3rM_C8g;^pVEIxI4Pei%Y1m_O8z2<|kxPZh;YYs~< z3CXv@lF`$hB~6`|93E?00a0QkQWs$Z)O**sCG|{1-ft6$t1ru*^PXIhMswC%Ppujb zyK`K0u$L(x-;V-o42`tg%Jfao`z24U7-=UGcgbF zdUF}L2I42l{Fx~KZMQD{V&r{MufqCVlV=YE&0qFf4Mg4kfge!2>*AxYw?$y*eFeg%opV8Y7F>2w6nI_AsNqY{qmnY=$O)6*q$h=*FP}Qk zMAY8g*hiX{@Vj&!(DRc+X~JZ};r?*!$)u$)_#-MQy&PfTH6FUAMQLOuxA#fk;LjK zP=1(v2um8L>Y%lH*e1F)iI&0U|EVOZfF05D-%fRuB51 z54TqwIy8MYSX<=YwfXa1KUk%>G zRm61d$;{Q&zar!mUySaxGrNU|pP4e%AF9l$;S4N#wv%&QVN(YvTn{dTZT0HkWG5zm zY2ZXpdiq`0i!-EYCAv-@{JM<(9nYrJOdO!5(+>E$)uTFOy!0^ADW@S~`}FF%)5Ft~ z1wdFi8CcF8-+yfM%wQN#o)hx(^SA0zl*!G;e0w4_FGLquY9BN}02J0)LwOcy+dXo+ zya>YIP^GkAHluANc&K#33FZ$}2DP-vGjZOm(V0|_w&p#)l~g^UoU|CN{F@u$f&S0q z`v$i}`+#^t+?v?BopRlJkm`*A<);VCz}?UlPgO~5mD*k&A72gBLohmey?6}0I_8mE z0l3WIU`l6wb0}vq4|Iu$4T_9R&>fU6-CbMzb25LZSZb&@YGaS7G=F4|PTPCC``W}} zsEScU0Xe6ZYPR`1jj_n2O0M6jwS(Y=e|?{lZ6aY_%bY-JhcSL&aV{{rcUxvMP4($= zd##9b+UN&%-dY@6sy)m_NQ)x^V8L2&@R+1&%K*Omg2vh)tS(D#@GA;_QbNT0UNA|8 zR)fbio1|i4#d2Zb5kMOhOhpP8Y-1+`%d-`fzK74+42#>i6sr2VNv12frpdQo^Vjlu zhVHm*mmvMEy_OJ)PH*ILegIA0OK7GB^8!p6K*;*ybMvy+^)jsu+*eYuLEgvSnG$BX zM2NHNaEhoz;588vj7E^>g4+12@b%WFzaw`n#`pcM`m58?zoRmDi{lTy(?{?R(>qLU zuhT1F^r9BXTLsqQI(NswAk3uB1H^gFXI$$+F0;2nAPn` zhBVz2nP#%Wf8EVxS94i|+!|Xcj_qrHgN`oQ{6%koZxLq>8i)Jm|mg#!7X?p>slQ*4mMU?5mmg@Oy*U~D)8?}Y{qSFK}RP`(Qx3htm#x#QGu(c-W@z)j*moMi?sGI?}wgL20@7N)gW9FvulVK;s7qlVmh4y)EGxw(S$+Cqix(#vY?j zun04?{+M*k@3J!{ZPUR16~Dw&xAKe57Hc5By=MN|w6rh@JjZ3)Z(ryfr*HOFMfG`etsbx zh*Gn_*J0&9N7{jt33&Te!+t-++%Qyg`O;reoR1pF%lh@6TPfKhWd;V)pnVxMxp^C_ zBTAb;KJGt`x!;#<8)#H|-uzWR?8zWdnLcLG_vLq$9s>HRfIUUX&!0?)ee?e=JCX>u z%Wr^QZ`N*-`;(AL~)7 zZ-(KCq1@b{xV#c|s{4r29a4~< zuk;I)=Z$I=Xv1eJwa&q2+Zs`B zSU1kLNowipU$8;Pb9c-eWgKQd=XH3bFND*)&M;VLuJ!4YF3eZ1PvF~3&`2aa5v9jm ztm^Oj9CK~wDl(Dbh;K<9<-YI=us7$FPD&6Eq$;!A>yo%o!lRa?J`nV5Y9NneS|jmX zGYVa8*IrNbdEvrh_GhC2FVnj;-_O0Q+y@!UYu$6R?v6T87-k1epwffmKYYSlHvGDG z0rb1D&Q*Pf%<1!w7aC6aG^s@tAq0auV?$550Wj{u(UFHd7sCV83nHrzD_Rm~j(0pa zGbVX1@C;d1mN_xfv^E>t8&0*=m}s3svoSG1Q7)P9TdsVHuv{cR{w%^@Jih*Kb$~Ch zRWr|2zotKKt*%L6!-jBT|Wj$ItGXCytvC>~ryKK}L_<(~fy%L-gs#!v>Era##aQ`jI3snN?#1lX=+yiK;s9jqp-ikWaVj` zmsjJ@+|Dw$uU=oNoidaxJ}C88rShet=B3b*sTeRKDH(l0!db?9m$^(h-I%apnn z-Yzt{fB(KpDLm1?>!d;e(V>U5wmN_9YP|A)!rdnan=<8!8+#pq91?6DbF?E~y0d8~ z+;z!1%x>&DkaXK`=;iJ>OiqSH>7zZoq$i|ntI%41!qDG5&L1Pbps4D|NLsm&s9Uu{Uvkv6#agW zr$3xdhD7=!aCrrJfzG@`*Ux|pcanQFtqMYWl)1DW{jd)Vc%WKAWS-11yFxXE2m4*( zE`;{k(%z)UWz z(^b_immeb9@b2lMH$f0j%#6r{M37y7sjqpiuCRF;O5?_*1V-$44;P(e<4vgWal~x0-QJU}|x3VbsSU+&Zo@>a|VA;TG)_SnQt& zpM30hLi^r@crp~^>~hEc8w!p6XZY<2_^TDu?-$k`kN7!Q-^E+G;+Q@919sFy$U@Hu z!RO}0Xe5{mR1box{=lIDPh8yCKLR)_ePQUrJyN)fG{2cSwrX7(zGn~2y#BNW&hd{D zm$06M4D0I;g8Mkf{bE~Rg&Zn#k?&CnSL6tfTN!m}bKc8DtY^)I^AJD6WP{#{;=S(z zmq~JaO+XGFh4^GR(HZq3_S(wIGNfOgV|4#LzDcgdWDG#9m6qU3+E!lJOx`ttkbYfc z0vmTV2h`zmHlt~G-`+L@)vsik=uyp80w4bc1*Dd)P@5A$3_%z6{Cx-8-M-EWnf`t- zc{iV3;2Cbisp2L!C(2J(wOigHe>w7S=1=OnByE0U!?0GdcWINpNv-QTIH1<`d0j^W z`W}-%SBuIj5Fk;1Go@%ZX-j{9CyX!f>w(^-Bqv^L9ewG61|f$GZklkA-IePaV`?q6 zCcpe}+)BngS&1sRf$I*>JYk}lZMOEON5^|AlmMa7eOz_x6kVM^mCfCAu^2P9F< z%G_QJmz!1+CPLH;bKLlo;9SP7f;=^t+o4cm+tHt%JukyT@uWq<8;VM7 zKJ6XqA!`&fGKS|mwqu0)Y#Z|EyJr}uwcN|S{;QlIF%{KS|8)fh7ST>P*Ry1}M9);j zl$bZ+vmiS0^RxQCz=cnU0#oJ`>m1+4(WG39YC1DdCU;EttXEO)6%aOT*$1l$vZGVxo1WK1;-jpH!IpT&?>bxeoQ?V26;uop^G{pWQ6D6|T zd9_O(E2m)O*!B5(QQkMdCcoFq{8|CzA^b z;WYjK?uu2h!sso(KQhYctBOtV9uAG9g3vitOI~+TZ-AmlaZ^NojMNlyu@YJBEJKnQ z(Sug^G(3F4Di`kq5$)ISy8BPfW0!N@@6(nmGGr@00^gDwtS-CV_F_W+N1hw|Gjni75u^whDS}Ckf3m8#U8T2)W+dRKH&hK~UVwbPMLC3%vhYMK8&w5lF{!2fa|B%J_TkwVn z<8pA8FGem2=6vfnofN5|`g^Z0FWegB)-?#=ZTyG(G5P>=+gycXzNUMho?oXPvwLuG zu)ocR7j_cYSRujz!!vMq2-&Pre2+IjCsqRM0d&l8bvt6;Gw|Oo&Mo@!geY+LT4tbv6*vvL;ZP7xaNAE7$^2p_FF4ds*V(!Mh*?9$$uYdA~3v z4btPe@&U!(M<ZLjFRM0_NaWpyocsT0*^u&l59j!{dBYKHe)J z8KAZUJYq$~X)7?b4o#5xE6-LPFVi2QzvP&pa0K}Zlb&lTXTo;-d^H4%BqVV4Er-z@ zTrV#=SFcB@SW_9k|GvB#l6*XLeDaW0zS#Jh^{!{lz}XwQwxsO9Z=l6#e&jQI8#{_P zZo6FuftwQ)eIjpeS(Ih1{VQ9YL35@^$|!#n3Z=i!AV14Rl&_64JZVnkIV7!D8Oh zq>B7~d!I=JRUM%s`=%>O2G2zErz`a#wXWxA#hH8?Uj{C#`d(dagjXS^l&&=~hK~6a zoQ{}qLcJ_mO#>J(fx~*}uDr=JN1VBbA5o_jT!f)N#Jns+l&=)b()K`}SdPr3TpM&j zs0!akk72m^1Ea)pFD;Nup+4Nih|n1Fw1!$Gm94(;D`+&O%&S0)qc9S;5eU5dIY9DE z6e%nWO^yxgUZCkXTCe3&atET;)0vr|eNH?1sFKMUv&@Q*26JurvN4dG7EqB+N_lUg zS`onvo@%RnTFB;e=Y-)q(LVMG&Ig9nYFhODXP(m!&%40MUtZ{%iW^jp5*U`>_t ze@!MwbC8)m2zcI`)||P)m5QmlFwxX7VFQ_@3)-ed8HM65Qjf%4P!v>3iZx`^ibu{T zfKaHEshWxL#AzafnX2Q@f6r*_o+g`CUxh!PXC!>dvb^07LGvNTd;XP4sxR8fUBu>*A!s~~B z`?kaD-=E1Thv?(o{tO2j>P<<=IS|If zj|O^0iMrtXz)@+fJVLyIGzb96$-Ny@h6$0zw@iBA` zIZn{$LcS$map(Gqnl5$sTlLg~R6Hr(4g7mfUs}AJRQ2$T79oq*9T`8gSmXXrY4Nb! z{A|mSwltz=W7hxQ<#q2lA4fjdMo}N=CBdzVA!5;MEM>E zswaA=t)##JP1wEU%&HznL*oFetuTPOcWbMuy4u;+*4EKY`$TPDD`?%zPW&Oq!6-UU zp1VXpmK6QDM^JqP(R>*x zW4?StqZjfP|844ph29-0(fBM{q#pG4!&_akDnNzf}7OTID=# zxU38WFqt9>W$^;q-i1#slX>DedWvjPS?>AvnO~PEb6j(KZG+b3%dF*xv|pP*X<E=Pq(tOKMTe)U_l6Dp!eI>#5g!v1XL_OFld0n zAUb+tUiNM8H#W1lSyQSi$ftnEQzVIm z-BjcxRDqDY2|Wut5-SZ- z7V6xac`X$$75|Bt1UmyBM2{_A(9Vk`9K@75 zE(m!0{e!CpsP*IBSI2cy4*mfi0G;JlRAC@PFkq>QZnY%>wN8z7T_^w6$0v^OVmKAp2>|1A6p)`aWqisl& zhd*mvym+IrqiB^Z`DMAzOBo`??jp2f<^QUFJV#^Mq4V?eGTQ)iusmx(VS|2nv9ZAD z>UtUrvz4B@Q+PxKQH6T@0}O+~sBt7zwb;;n0TsrS^o)He09HPK-US5GR^V7xNC!O( z=HCWHU_x7TM1(%#J5|p&JsmFc7H?tI2`o>EsJ*{`O#-11bWJc9H1MkmA8l?mYX3NN z!A^!-zVH7*aiuakk(w8T74<`(0RbpwJuB;=!43!`8-oB^$11a)B*6-x^f-Dtr3O0X z8d2|9&lZ^OYWgZBI^w>sC1dg??zx%?Wv~+4;CFqY1GjC^;J2YydS_mRcZk}W3g(5r zRs(vbBCZxM_Rxn39!n%aXk~^KQxhI^6ROf6XE$Hznij--9>}B%A1Q1ADfCie;oya^ z8zpmZ1tuxsT^$|lA6v>?E`UC$lwV;>A0!o);WwPsIWk$JIZ{$X+|kChMvCX<`d}V~ zjYm%g5Ne zJ@a+0+RHM9>2L;vibh7}b@~ZFSKSgmhREiY=KNh<-)}02q>4DFgEhd@hR*|C5~m|+ zuUbZbf4+f+JW$Sk|fw?lav?-Z!$?tI?S=c(^2f>T38RAkicOBr>fm;aBObj)@*!2sFC8O!>G;ME|+z}e$i zqsRjIv1!Nq`@pa`Z;9J&xd88jMZGMW?*^O`5lS2S(x(Hl2X9UX?wkTHnzQ6d`$Eie z6NZ>Ez7l`tkp)KI`o0zKSs4-01fh(pF9OiS&kBY_l||_*MJrhbF)`t{-xz@&#>NUK zKN`X6QgwLB)b<+A(`<G2(9iw1S$IseMXf(AaGYz0ps1f*_S`XyA)HA1Dz6<2V2Yc zM62*J1I3)zMQehH?PXbwydF&V$&u*)5N#7D&6#&-XDwpgp&jN^s%O!fO z2=Vp&*8fp--qBS5e;mJrWL_#=qi~CFgk0HVQxTOF*Y4UkWMz*K;+ol`Wsgh7wJ#Z6 z6|StSjElrg*GShEe(&Es{&gJpGv2T9d_JD7O>ItA)*mkuV7d}=^C;R5390|U<_!It zx31AN@^?e8D8ge6hEFornFAFCw3yp^JYD}AFx&1&h83T-Mzj2*ROtiT**5h47;Ath zHtSLR*HnkE()jQs%I+I(MIBUpu9cesh>@GIGID$6f?hqEEkuZxZ2Ku!eC&xUzsn8h z)-Q zjF9d(57?Q#T#i+cF`#PK;)@)k9D~R=pDzP# z8P-5RIM;RZXK4V)5PT`?9ytU+H0sb zN&{aHsDy%m!6o8eW7-0iFGL$3Sm!IFO zFc*4*t#@(pGH)Lffp0e4PtW$$+2ZYo_A7%PZf=H=_DQ10W{fk$|I}V3_e%dVas1Rc zMwx@Kd*UVK$2*~&8OG;7KUfpyN^d3x89M$UV+Y=C zD#TcXczgxR@3<7f7r*xR_Yc)0?EJ0rT(qXvo_fdUeI^WIo6Jb$5Pdfo6IRV)EYFzR znK~nD`1?Ic@`@kns)OV#PnsNd%}=Wjs*SdZVf^;iqYToldFhrk40VG|7e;01&;Iw%(RY(NK~IzdA!t7XrbAj0>nhSU3A*RkXW~ogmfG38dn$C#cSY?+)-Wm;_VU6dS*gVCIkex_j zAJe&Ami@5Mp|BmX415{)PBzs`8;^(P{^hr~hftysBqg*2VxYbtJR&qac;6>?!}<@n z1<%G%DA}U}g!e4a?^5gkbJdw_m!juY) z`X~K6$0hd+U?qN!u_WV=>pmU@)1q8LU-HB0YE#qd$SN1K1*!$L1VWi(2lK5eo6XS+ zL}vR>pPpX)3g0+hu9_;p+d* z8W5&?=H#tf)O$JG&NXKp(jjct@3y0fxJ?H2_jKFS!pQjYkgL{~X!ybRt@9A_>?~KV zxsQ~Y&mDt{8w3jD{r!^N^@53O zO=`~Z^V)xeU00vCb&q>(P4uTkY**P$RLTP^(GuAJTPbpb;5znAI$0^Z(OJh6pIPs8 zq2dFl&L^0|9WhB7w-$`hZ5f~_@c#lZJmQm;{0$z+z6i7>Hi*>@!y|og z|4sUze8t5)M`}1ZIT1QnOj)1cNp`rY zWhe7b!2$$E9Y%&ks`nPO2ZBg*Xu*FHVi3c8V8IO%k{Hd=R|Pl0+I}EWw0j>j7`7x*skDgjVJ^A z83xAh=0lZshcc=Pcp6Dgi=wGkRoHPPoAK5&I!s;ttiflC_e>CI{FZRslP}i z7A}&+foi?89rk;G?>fOtqUIO zKeHxM=8le=Q|{fvk@iD(h{?Ty-!?YZJ4L zRG(zT@j>nJ@133XI6<|em6V7p8v@EWfk28W|2ujpb>xO!$G^sd4Un*ui zm=mZv&a*TS)&r7tvx3w*FEQ%03T#X;EI@QF*H`<;=vfL{pM6ta9nIzz97YU!b)D2= z?1Po0tm47+gzFPwyYrh z-)8?J)!QlN@wIn!$>6KLr^EaUzkzRf?=xucA&J|y zwPII;X**(kFWpxOt;CsM?3|)SIkq$)n9eZjY?^VoVQoeXWL+_1Z0I@?R2L;{Y?I@} zcZ#pb=~*yd6wVVK76g`{oSgg1J9;s`Vli6w{{oS&<8^XsP*9km7+vC*Ajns~)BMW} zK(2`z5&^2E&@)6gZ#sD03HLq#A#EJ4}rB5z}wib|_ct~IFIF&XDz!+AlcYabw zacgqZKc|kPJ}G;JeT@;P6!||h6@rx83q_WP8Fn4W&D);P_4v;`zLxfHiSwHNe@N#8 zw`w|WM!_djyczS=!V+RHmJxQDM+quk9%5GE<2*5<2Q;YdS$K3}uk^(PMC&*ypH5gi z={fK1b5j4@o>s^(I}RlLTRKLgJgT<0wZ79U;4FffVhlI2_rx#wuL% zJ)!)ivq97*`8qA#m<5qxP=Q-cN5x}(w$dUK2AIeBR44%Lu<18497O!yFWsk^b3Ge0 zt!HPqq}KAXq;u7|1(gAwTfkmGSJ^&ZA~bsaA+uS>=3Q8oa{xL@!LJ3Xmh&L01Q zngw`dg9FCsqam;GD%lBWT-C1k4#AbsRcr62&K=!`G%n_JG z5ml~14rkRaV5_{S^?fi;4h1}pn3T)@Ly?WdyMB=YT_u4h1$Pb}&ieWKT7c z?GS20TJ}X2r1U!o>?$;Cku8cEtJd!tbqE(-wq7VtYPZ+?F!b~E)eg-JjFn`(IOjs% zc%|@-U{ClvO?a_W?u|6s@lijgo6Ws`lh5Cglj!qVw(VO;#EIMfdOx_AHsjP!ypDIe zXd~_Oj4h-uLN?#ppZJDQJG!i;sPzEt@!^-;=}0KP0ilDQ^{t>S)${)c`C6|5f$&gf zpr;vD`3}2hia}PCAh<7|gzS-!sHRXghjTLay2|P^>Z?2L-Y|m-ky_k2^%!Vuety7` z^#q@xmw2=xy!2FcbE6b(DSd1zm_^cbzJ`A)BmC z4a=nffrrrE6U9}qY~6_av$V;Yru5oqK=rgB!Mk5GctiYGJxOgwZFT3ctiOzGcfPTX zbdjTX;r+vtEdLm$JM+ss1laDu0buN}R?}tV_jW6ByX|r+EI#>WcnI|4pv3+hqKPX3)w4Gxq*YHt%F|jSZ>IL zP+PVTVUB32!q%U((3%oH)YHQw*`(%XP)cTJXFJ*8l z6;OBbD?gEHsYnwMxR&vWt3b_WcMn^~(K?)fJnz0xL4&c_=A&>5zTJA6(tm5h7)klm;;nAfZWFKB*mqiEx=IU!ZOUlq z3@bGIY*@|~8(Dg6sXhAO`SSGLQ!<1VO4>8V4@&nOqXL9EbjmBP2}$6~2L5fWoE0_- zP+%S{ITcmM#F(q_BEdH~B!Kk$S<7-Yra<{UFT=BVQ@a{^DEodp`Bk%&y{1;t?Le_S z=t4mvSrC+p8|!}BjEI>t7SN;{B-rl{!*-V<0FWqRpK2%k$m=3k@M#`Lo-1cjek3N! zq`#q?9D!rx)T95d$c%(AJjP*Z;$JLfv|U^nQww~%Pp{D1z}h1lkw8Sf^>-`K*H_xn z^kyUj|9g>n{M?3~psG+qBbnuGAf-hc(~kW3V>-RD)xSCZ3SPW416(kXYeSbIg_bn& zpDe^<*@K#23;tY}AzE|{KrSX2#YtjxIc0fE8Q(N}YhR5-f8NukhOuIDfw9d?oa7>J zmni;Efy40Z2Z(HC=h-x04sV_H7@-+==jEE!D1FIKk9jy|TJE3YLKU*Ux_zC)6;nrO zgM|rvCQOcr+?hi@gfjLCA>KS@IW%OCnij1HQUA9#c~aT1MT+Jmw$6HTcaOQ*X2}j4 zpeL00wH>pZhTt+B-KSMJqH*SscPV(>;$IeeL^9!k5N4T0cFJRNu@*DC_;pbcqJ`ex zU((G1W^y1AoCAV-O%tq~S?2@^$ljy&=0B%Ov)Bu~>&ze?0`~=*(BDN)^^DORC4y#t zUT!PtN5L6kSLF-5f60uL4qsBvt{|X8{dtyZYYT|ON-jFT3sLc~I57K2T9zn**W-vV z-;jvIKTp&#he(lZfRTCi>UEYA5J4?l+oRNlDcq=z*6%k4WQOIkK|J1be8GyZ%!XD~7#d^cA3WrBq+Yjy50!+NZIwklUF)=!j z;?}36saa2jB$bKc7^shtyWsyxN2kdwut)&wC8V|`BGKYgKdF7XGg{a3xNYE$9vk>7 zLFNys-lTLyVMx5D*LgmfJdzn>`4oJix@hPgp;}mOP#ADM(Nz>$P`=b)$+XfykHNZ(|YTW52b}6?RR`8dG0;=^l+!4;6VQ7dpok6Vp z&CB!4c7G8}Iua!beA^Z}S0t&~!*$F{!5J4fB%D^ zbh4#y{o;e&$wVzl?#3t8>-SQI?K)NIK5YixbrV6MnDGEGI_iEsv;SAXqBQ$hcyj~s z)o}K}(u@_SuxIP}lAnup8Qeu(UF{07wsy%xOPR&P-3<()w)Q$M>vb4y+jDA&CX($J z5kf6k!}6l5>)_Dn3#qjIQKU^)CYzgvrOV1pE=X;8p}z*e8KE9#!+BGD#+`WlY*M?i zftV$O?JK)iuU}d29UX#FAfPQ8i0QHgc)uulyD+vu>oQ~vUm|vv_6?9@(tS|irYjpz z?z3U`QL;mNfKzCL+y#yZa1ueIa4$(B!x!AaV96}FqTN&sP>{&p66^)t>A8yz9#E7I=QBVf21%wZ9a|7KkqjNWz4D30K!XK)pxW`Sn0GJQRMPz=z9IPu3v)OBt)1+49lJp7gIJBnAKb;fnJwXq ziiFeujX2spIn97zDq-F2l%i2r96vP?Z9V|vyWFdh!e|?N-_ra-yMlYehWVP%nhdETeW>O9DzuR^O;-T=Z?Mq*F2-d z)r|NZy0)wGT*X+w_36ePP1LN?jGAOQo65 zRiEgbZbgt_W5|h@ChS4+<(5W~8u6HN;ke+I8@|Yw$Bev1RTHs`uAa66@pr4RlW@3~ zIeVLE27NrjIsO!Uh&EU#S5ZZZx=n7+Xwbi^{YT<1!NLU(gj~8IrajMZL&lqhGToEp zNKHs$nJ+)n0umrkUsAEhe5$tt!#*{M)Hl3w{WHA;s+-BFoOaPXXc}<5rmYvS3J+%M z<|OSRl65XI>`nP?6rdhyG?M(B4R11P%E-pJR`bRol238rklSlS_a|c8Ys|~!XJ7fI zOEyJYzR^F`Wc}TG3T{dJb9^L?c^$qd`}#WH(jWtPbIuqpcFq}~C6~@an!@k_eM7G% zbRjVq@1MK=MH#+Sj~PQC@)~Me zc7RzhXpsEFg2L1ahf7RnRVJMnBVnlcZeDb}PVw$I8rWP-^bbfoq;pnOEN@H_?E2Eq zjyz$Zx$DqIGB?%O(#|d=>3sG2OxurPocu=b(5#J-66FWpm$+>0^TjH1HN5t8&6fa5 z5Kv3Sy%{&QeTDVM<6w&mPo5AbrjTg8XdZ}3MTwaX29dnjGJOx?iuK6vjL`UyL6aFK z@yXTwI@XNs@V(~XVW_F=%#_yYPfE=jH5HDEL2&5ba=RVDOjq+IkZXm1xqI+U$S8oA zDyzjD9v%XK8;JVpL^)T;q5q@+8`6BiV_uZp|5k!EA3slk2a)E zog)j=>j2TxtD2^9QhOso0!jaZ{$fj0a%{owRr?PR3p0K9n!pMV703H)bcB%L_TA`9 zX9-s=kTtW#5|83rd>7pB?vhm~@6V?4YGsg(`Wxz;lRn2q>bSD;YQfi#9$2vxofsWG zhOKvcB<9;#GZ7oyL^o&`=G?mgY(?g(ZY*;=Sy{4~1#uN}_jaTImqg?#Mu(ZT@Ez6w zCv!WYre3Hh1G`>bM{^ZXHvVZ1z2o-G0s{fBBx+8LfInwdAg}Wqz@cQVV zOV_{s;mLu|o$N2FOcQcjoAXHiHlyZZTTj$Xt0Rdl@sYqVa&<8?ZE=uwGdmF&Z% zuFe|rp>abN>dccrH5?<#7no9FN`8{78XTc252*;klM`w3?8T;uX_)3Yw?Ws!X+cl* z(7+3*YTOa6>)+({lzWIihxo$$sjJ)c;jPwx+b-Sf&(6L;k?@TG0Mf9V;b7IdzHT&K z7OMy`_UJf0-e|7vY%|#?YTz8dx%}i*RLVWO4E2`QRzM@mBV4xwzrMKSbLj`@?D^x$ z({=)y;{fM1!We%_|RkiB_ zf-h#M1Dh-Rxy*6ip(pEM%;CA&)_;$VzzpW<;9%ZJifanxC2slXaJN^{o$trOd%?K3 zq#0hFpEAS!V|7hCLke>xA=d8%T@^7sK& zh~B52f7^{sc#!t*-`3;8i$9=R_sJ!=r$k<^-9tQWeeWP$AEAeRd{?LYkZc(wGtqu_ zq;cBgG1OPTV*8RpJiWe@n2X0}V{vjwSDJgw?c$)8fi@u_H#Tk**3e|G)LSNcB;*d# zM9Q+sUV7BG+I(a$9on({cT7PaAvnY4IQ(++*j5T8)_!ZXTI5R8ZF2R~{yja7 zTu+5~NBF{G5!rAFQt*Zol4~IB` zeVEKH;LL~sknIqPm+lXebij#z`NqNzZ`B;`9_A0W`kk zpS6e>N6c=wWg?*iUoD)NPBl^g{qpO)qjS8HdJ$Y0pxa_45A8{t{qY_!8X&ZfOCHQ-Q(f;~C0Xb2XyJXAH~Egw6+~fh9AjAOfgjGUgJ%&z zKa*Yg22S22dB)-IBvV2x#4K6Pm`g9))P_@?Xyx0cKSn>MdN=J zlgAX1Xxz8eKvl8Rmgs!K%Fd2n9821RM|WZhH-(uXS4)T>quzI87FqTv&fgM1VO*Tv z$A5xNtNiMCfNsr`@z*8GR>c#)BlEwU-qQV6S*IXlCinSGS&qIbvI;Jp`&lIpirk2N z13c8L(j0l;9U_1G-(P7afnAd&`kMtRDT;s~WSB44nB538<95?5FTJNkk%xBy-ml~B z94ixD--nUNXZL5Go_pl*I1<3TQ9pRRi<$TbgpAQHisORa+L635%ZEM^Mbgnp>ar45D45xV7PF3#5HB$E zt{Q?BCH(~3DSM_5L9ezk`V|~$;LNY8?E)i;Z6YFI?aHX?IsaQ!&NtJx8on)f|8Dnn zd9G*PaDiS}SbVsmZ8APU+wn7V{DT)Ak6(mOA>69lLt4O(*=`*UfY_26j38#klB2u~ z_c3w-yPO1L?DN)Bf6PateiiT&-Y$v43kz=#EA2WL;Dq~}Z1pS1%B&AxOvWyr30N=& zkw;m7ORG^#3q#JFTUq~M=ifbeY!iYCsTzC)fqZ)I04sH9X6^pXuMk8p z`iu6H@ObY(4o}Ed8`$MwF3%qvX!nXieAZ~|8fV;edwYL>zxr$2Ia&FZwD7hR4>Ci$g72c@p8twd95-JMa>L%vb`TJ} zv_6MFX(`?MKi%+N#6c%co^3I1SJ0h;{Jj}SKM3d62L7H0xdocWX5UnYuV23|w$!^I zPxz*SmhNBiGeYx~c`K~xI$BPW^1~z1SZdz*x$ia51SY9pT^HjPmY1EGH)R0Ax_|`L zHWJf`Fi^-2vdj{Rv1jx5+?c}y2;3}~=Bb4pnEJ>Q$g8_{VgHZ*2ocmGIu(G0S$=KT ze%ntLtw;h=!A=ToG(0@KsQB9nz~6i=vx7RVArJf+HZHCAyEPa4TS~=ZI6HTM7_ra_rRPq0(?3P&=q1Oup)MInrQr0?448ZAR|__SB99}ng|jzA)X$2qK5f$ zu`fxUO3g!BaOa6^dO$Dr6gaJ6>aO!ym0FEMwwvWDm06>8uTZYWeCP>&oN28;d%OCY zXlY+Cj@>6do#No{OXC9t>vm;-Aaht)1$X8ty^l$ykGv28_ zXUmXJ?hk%n;$nZG-+Z`Z~DKW6H^6?ExE{anX3urIaFfNVBZWcP&Rn3Q$3(5et>2 z++Ij4Qfl}V8VPZS4h2ss14&*PQCEKM0+Xvd;pL}Hhpo7A)0`ZJno;>?owX5zK4BuE50f<|`pA2^0 zzj~o@BdzuYa-?_iKD{`;TURJQn-$SFIC^Cb;qbYz=GQ zKtKTTP%vU|p?&UT69|mxMpk#@8Z#EXiC9G{-4%i&M{j!o=k6|NJkUG?PNxsh%h70K zi!|CbE;aQo7z${ta0M?D7V(MLW+YnDO8cIE>-gV4PflonQOp(bZy^p;+DG%asrdVe z`o{VLATp9csC@r`7&Bi;16V{EQq+@;wd3RU#ePm|o0{6q11RS0tY@>O)5Nsns$y~i z`r=MNuXLt$jM9=SC>=H(F=lr->Qmo(^XXn8%y)yDzV-kYKmxUEIvz% zcsB$Xz*!Y3(A1EJ_N^K34sT)2KJvw1k?3E1aMVFB-Xm=H) z*!cF@3}XOi;Ec?s>0&SNi#wI$D!*tS4JLk)*4e=S{{Hw0IS38Lb|jWaan>!e6&hVA zM|lEcf>@B8axa|6mfs(+@n0Z^2!={-S6+KtkjNpA!i-AlxOxb|&%48;0@PxmaoxzW zl|>~Nvs$`HU#oBF?pBbPASyegZysy@I%d9h3fjb@vMYzxan2Kkn-x=~nIn7!y$U_8)gXoTDjYPgPl`EERH9bjpkyzWYV}o#e=+y%bd!>ztLGQ4<&uUk* z$;H8F$S*BsZ)R<_-e20|I-xV^Ec1pe_n0RAH6fIn2DR8s((=G+$6?n|+{vGlJ;9S( z8Y4VH_S}&~$mp5X{KlXAD{Q|^j=f(E)9Nv(ReV1*sg8&gY9tDsG5D`%KviA+q&4Gr zbyo=9pG>Ns5iA7DlhSrQW<$aX-gajVoBXWfQYy}8=ucc!Nu4d8rJ8pn~i2)+nVe-CMd+* zHH|&BvC}r||IkLSkVWGG=|06!iB{F{pee&K@M@(6+3Jg7Cxbc_T^>mjQXw#u#?O_N z6(3uaK&!V@5G)o14}_|Q?f+$b@pMc&W?^TibYkc4-_ekipCCL9uWctzTPqHsFriL{+0vExBFI3*S8Uf3{9^N?d+4f{9BDq-$GC`EE$Y zNV66BT$=PTr~fdvKOQnCxC6O;Cdr?4AA^W3pJISK1t7v0-LQ5yo7P>B1Jv25}jb8tU-m`u-XP_6FS?hr^0 z#b5)&9P8RVM`!#QNGg>s%zR2I4?@PnI!=8cRgR+Gub`54Zh6_Ydfm(gam@oPLW{Cu z*455MK|1M`CHfMjY`C{$FmK`)Q`r0kr4Jo3)FOtSG#ayA^OQ$p~4Mk47nH+K!|$ z*TOP6?=iQl^Qni}l5xYJrd3r{4McHb?wI;4&7XqLUE-L1-&HUSk|?#93328O1)%yp zGS?vl6_#!KD82aP*?cw)*-CA53x<^q$PSBKz`Jhj9@bC`MQ@>MGAThd&$B@qx-RF} zCbYKe)AqMn$W}EqH4AD}@ip1EZqd?k7}{5A%zFevhpO=4m*x3{8I!5q^y`EO-4OKT zw3)tik7E4Sv)Px}yjEbGM-VFwRI{^pjrg~DifcWfz1FrXC^)dKz$k_D)-qpSsarl# z-m5FPx5tWor{Df)$&du@fEU`+{+A@k3ShTmRb|Cs)_G=W1{03ZeiJ0!d(N?S+V zu0ZzYlMuN2N+;Cao*&Ta^OVP9O#X&_El6sMInS^J;-kN%*)-l;uJF_||0Hi46C&=( zXT}#=%p@4)CLE@X%V2x|mIHV(ektoyd4Eav!L(T3K2S5)H*-*(u$6c z;TJ_LN$6EMT(ApW@z0pZ=dC}jS*oH;t}w!!!kU~J^(wiV{i zGal2yl%}vtZz2_`+KY7Oe>0S06zI5wv|J?N@LYDkVo3&i-bvU!LT0)S3~I_!dsjAV zP0B!n;Y!p~U+YN3)8=VJcF?Qea(4Vs$bN8~16>nAd2X*%Q>UiQf{URpwvU%VGaAA` zf8PDPC>{K&S^PPDIQ(;4`ro~!<4$y3e*OiUi2IJu|Eql}!zwh)qRl?_V7G-v7Rstd zttT14@|xBTny7_d7@R8xmp8I7zG}&c*@pb;K<63F>XLpBVE^lUA}LlwmHFtDJRq=` z4t7rEX`PPNPm)b9qpNXQLIq&Gbg-ba~``h8A-NO(@fd z=*<#~YgW{$CXY5LS*$oFl&BgN`Z_Fq9EqagSeZB$)Lg(!`^k=NYqM&9-WCwzsj9ft0tcIaQS_ z;b(jH_1L#vts&ciCs&{Q4$}Al;Ss3MP6GNDS5Ee7PqsG4ZlQ;+hi|Tdn93@ul%cpc z`MYCY>G!p#eCrryk7?-0(NQmD__av_O8{br9M6rst(SE_%p!u(R~Telw7 zdz1k&Lb=V8u_od)Qv97|R*ugL1W5Nf^}!Q{sY85hS_5XZFRdBD-@q+pR$;3TlJk-& zWs55nq<@?zf8u(>9~!We*PhODXE*UfeukM3mH{qv zEqtRRqNPjy^>Fgt=_VygP%guH<y?Xr#$V2b~y2TJnuQ`du79VS}ttz z#fKF?V@`8dqytEz>;2kfh-IX+>j>QZJyAv52YK?hV@P~5j^AMD8c8lh;&_`>m2?0w zxMDeQB!6tj$4aM(#Yy+-2M1jA2^t9%!yKK^SeF{AauQ0sB;%bj$HfjSRQ!{6ADOZOj=1RAzFz)p z}2gM0=${12e^hg~J+our46)`^|2BH0CBItEF~Kr@nxd)hT;Ny#9l zKJp`4ZHSa0TS@d1FBX?&lDSNeyd)u!Z#bspPeZtDPxK(r)Gun#HAtc` z>T~+Y&_y`J*AQ2kmNJ3J9ACU{CjOQwFpFw_+mQ7+6bCG`)DfSTx@j#ID3HpbntQB z?7YU%v6#{`Z{-MX5n_W9W*L-j_r;VF9vMgNKOT!=?@5*oZWd65cWS~fo-F8F`i-qr zOOgP9gp8o|_JVjFSnSE<$PUW7mk0(d$HP(PQEblyYg~VMPJiRhgUyNGc8F8|(adO<;?y&c7lcp-^cz4o& zO!Wv5!c4s$kI51(;mE8dKV?u62iX+4zd@hh7?NJaybXHguOwiC%u*tpPn#;;B=e&| z4~`@CJl>A#;@av*l+F0hEl#i ze9qqdyL3#3UgS84qoz}*KmDs;z zYVUXdS?(Fes~?m+ zxYCpUL6B@c>0 zh}^=<22ygqhsJgX-z6)3uBn`<+omtd2~ zswpRTNeLA}`l`*upoyv+oXzx-F~Qr%2&{t;d-pca_lJI@l8Y9Lp+?Dra<+P&i*5(={vDPE4kZX7vg~QJ&64skO$OVT zE+;ShtaT?$fi0TnKRK?;?^raQE_;t7(HLr*p7SL*S=h+wGF3jD;zomeB&%R z2N9nFA=)C>%m#v72D7lj^`E;G+NIzuP`-w@A3yM!v@CODFH4owmv_)Vh^)oR=Sul8 zyT<#7DXVJ0HH3tO9$a{)S&1X%{Y*I3hmQ1L%6k<26zd^TP0eTKvzC%J99Zo2&ZX_k zz;P5BEcAjH6ZL83$~P9+!!;lM3W-P6b+O|qPWqb!XztfW!|8ydH#K7L=;_5OtO)dFW-Sl<88Q-W$yLDg ze92FBedo}t^Vqq0F6`teU_h2)45`~_@7&NS{@DYUd&uLOm&Mi*aU#qxw+q$-#s3br z`O&klkGJ|aBKEANcO1`Y+`n|`QkK&zij$=|yNT`O_LFaj{~x*fVmiyq%hJ~yf=#!j ztXq*Tac={Ng+^uPULu@r#$*i#>Scw$+1qsnpP^q2%%xON+wG42KE#m#lQDch^aLcT z_UvfdWZ}%$4~Zai$05rp*Pvpct$ILa^=W*d5-=nSN@XW9Uyb`5M?%hCF`bhY-a}HC zd^!gQ7LT+mfYD#?uot-L>9d1IsQ=%J6(e6Kg)|Z#*4lY5wSqhFHwR}D9<$iiSLy+j z?ghTyNds->6HmM@BMFg1 z%BgZFYI4XqQi&-MGG|7F!Ynz=`Bcd%hsfCwG3PnX`B2Hk7#Uj;l2|N_mUH;MKHs1J zT$jrn_I@3n_kBO^;;l)(^eh|Cn$~H0eR}1yYv(NPwkOk&1KcORl%!{t5?YsEhv&*T z%amkW%~!J*#Fr~-@;ldf#p#lp9Ln-p<_LNBGWCn@a=%_h>#JQh-q`J-QT0VN zsniPAL~>A=Z&x4)Gp_P;H7d8?xAUwl737b=Ii5^aeI;2ctQn0(iSk5_#1|fTp16A< z)__^ck)e2)@LDbU+uuvI?>Qjv%MUlE*R)*_(T|hkd<+FNCkZNXX{$eS&$^*Gv-rg- z09=Ulr#DFNE)UbG;En`3aMr6@blf>^%>G9G)TL-;_?^97$u1B##^t1wRilQ zwPNu&54$&qV(DDV?M1A#UiN(DyqtYozY9`8xT3cYkrz~xXq^hux7@TZo&yK_CI>1b zw^@y3?=t%GWOtfMnlnTd%N{_%JG?PJCiN`F;vJ|d$5UMT4rRfO6PDnHGmOA@8d`B01UY1w)*10L1v?SeZC3nJiAuod?2}6Xh48Wv(yjxoOJnw6 zb)L%T7LCa-8fJTQ{utFx_C-{(u|k#^EXElaJD{bVVcWhv*R`)*+uNE4E3FiG%ySnp zQ5rlr9SyVkXkZHub2122DfE zGSfHpSVlV{;S&8PoH_~U8&naz`WF<{Q!YxQ{fSLXcTZcJW?{eLz2D*~w{Re=9OHb6 zu!ucN?F2R3n`wY~DnoOFb5jkx|7{()|(!9<>dM zV&Zc9EGbW;LDQ#yTwxsW8JP4{Gh=zk8izIe7=nJnVcJOUqvT;g8wj@Zq^ ztq^=VF(Y;ywjXR`Pmd2~SR2C&uodYr7=UJpBC9J()x%%JF{q!Y!U9!>nyCIUjc6PE(38RFFPjqce0r2{}d3I?Ag1 zp<$^n$Cj;Hji4np)~d`e!d{!9w;M71SSco*fAuSA5&lPv5oCiKM@ zQ5T03&T|$)j+9{Ark&+ooN2nJYPMvS-BD_B@zn?H$e~f5YeA7G_H%|ep0SEz?8ngL z6oZva+;`p85M~v=Jk@D8`?B~`p$E6`wpH$&;zw`VBKuI+|K7pCBgN{t=#Uzv&)_du zm>|R|@+?kT2p7_vD#@xNC?DHKsm{1qhG>1yE->a|$;TmLl45#5i`-V!TCwi0@>Gh> z;ouwwDqrLZs4{lk-v@N`J;wewJ#$ZK_z{i0q5Y`ks@=ca^Wq18YqN-FA+PWtmbtO< z=x1Qz*Ya=gOk!hUSBV{ z2iJ!>@k+zLo#9@@1`8^1K3 zA7>#tgqUU?hgtt{aEFyuzjGXS|D;SYsAfj^B3^m3@}DMxdJfZhua|6UE`A-#khhVb zW_+^wbU=sTe6FkGN%R9%wO> zuK>pENjgHllo8vEQr)@5Sd`9cMg?@7R=KB;#h07c`%dHnsDh6z8qvHCPy zycX{~K8s1}Ch(7>AQS3dN-7L>e$r=oz0z)7>!>!ou)lvTtGJVy?m`Mq8nB_p-Zk3$ zx4>+W6XlhcLL%(X(J+!c3jE5|j~{;KyJ2OV8+YOTPDvHi^OT}y)?S2rZt`>UJ9N>S zn##W`yUA^pO*7e{W(G=wa=vGcDq=5M}lAfO8#>TqB4 z>VE{z@svrKXi#JCA9FD!Jx5JRXF>~aMIQo+Ol)@AF$28ir!?>CxxvboWTQl}jUjY_ipB3qoYk>wKw3;M#+%$0r`R;O ztL8H896X_*F`U(WP+I5#=etnx$!mD8%pUOD)Do0;frt9 zNNpY#7s$w7d=G(V^S{)O1E>6Ym}`9NkLw2uikrN=_*F*4!4X=QvTO^t%eci&*BCzC zSzZY>8i$3SER9Gm8@b+W9QbchsOMi_?Nfj#F^$|MZi-(AfzF^N(*8zZ&)#C5cNp1S zywV2k@=>&`@kXoTsGouzpW(oGQ%qgvAZxV0{_a`+)V@=|l_Y-VOrdY3PYoVo*QB6Y z#3NZZKEJd-pEBDOd63_gKto!3szoUgY|M{xT*#l^`PSuK!z5@*y;-4}hCwHq4*fef zJT>{?%cSHSe6Ssu%nJIMTBe%P--@eb*K;+&j1Ac#shaN-(Tj#iM6tYKh>PB5=+ryG z+J(ojkSGgFha~LUg>}@`>o3H^8_j>VZ%$sY+c{?SrJ>BN5nI|V{_!(hQ}x|TutEKR z7bd50-So`euvVPeUnlk#ZaLxi=jh&~EMjHwB(#=3$O;Zg)~C%eq_QRLCsBSlaOLrt z$+EL_8pAtK*E^gewenpODIL~|_d3CmBkYi{9e{B+O{_IEL`ttdaiFWvU5!7&Z?sZ8?keU zjPNyM!;D!c)n{bfmf?8Kkr7;Q(Yk%{#j>b+X^E0J%vC<_rBaNDQH9b?%b$GC2%!nT z<%b_Nsm(>GE1>@2M;h#P`uFOI--hy0#CADm@k!Q~C2|h5=G&*gQ&M91Add^M#knNA ziQ+n(8La7337r>a(ddUm+fw1OQgM&(EQNvZF*T}c?3dK`{oB8EteXR}CLvq*J&vxs zO0c0g-a7A#@KdP#b@xpRj^ZiL>Xcveo~bPMEF`@YV>GPenm-Jc)Va=$AFs#U;ENx9 z$#RzvUEn13`U_c$*Ff`xdUUIFp;B4);|@h{`4oj$&CJtg#AcF#IXGAjo?zH}vg;8f zF0NTC-Fi#W(e}E=KM!4*u<|ScBu>wAC5wm~ZOm zj>{adJb1$PmwemfqIjJ}`?IA)^EKb#9czkb z4(qzkJwCxFPlx*smVQ7>M&mp(UWo11_`?&iJ1BO=faol`YH5912cs0kS_*?t^;_nfj zx9Q5J#30*6rLvFG&t@L-$0stoS9WmXVN;@v0b#c9$foRL#{AEm%YJJ1%8ID6GnK(m zZATtW1s#0vd(n|6giM&fY)9jJoP?C`183HsI$&)ObMaSTV93L+q_#{u!$%!BidM3V zf|(~k>VFpBXlViI-Yfh$iyrZ7OG{gS{S$qr8DRD1g|*ItEnBP%O1obJsy&IyoDvfG zCg)+RGuAjbb9nf_`;`&2E$o}!;SYA?=F?LDjq<|QeQjwe;@2}QGTk^O{ZZCc*sDey z?5T!ONbwuqy+(@N^YNdy_dMAlE<=}oWe4Y$p>6v?_23(Tpd26MSpB=@-B_u+|@Dm|m5{;xlA|AmOlgM zt$N-%?jS{9?bSqjJ*LgG40Y9HF%&o13U8|}&A3eH$T1t4V3`F_iiKU<%8r#`fbfcDo6>d0vsY6r6TDSUnDXaC3OJL%N zl1DpG-12Mi8XB?i<@TG%q`*h_KWg4n5K@y-R9Za6DRDW)x~(%)s`5wIc@X=Sg5L(# zj?#2rD?%RAq+cc^I!%&QszbVXAM5%=X2FR;h!8$z`%EIu4R^!n1J+k?*&yF#}sS9atVVpp}|CQUrqBwp|Bul}Y7 z-upc}Lf2Q3IjSP?Blpai>sRPzAA`wF-GWx|Si5<%mruCklw1eWDtVs!DLj44{jLQ? z_K~kD%vYNcRX)S7UoW+sd-I2E7P0NeulyGE)zg5BV*J8nh32DdbK|>nk15^gVusQ5 z{@FaAC*KU?>PZ(37Cn2z*gd@L?tEtVsjK+0R(AX6ge?KTC`@%LSPVAwsVQ|ohrJ(| zNJt;XwL#G*;J@+ys`+UVh~GIGb$fFdApkKnfU34yzZFoC@2T{&FD#ocQOs!Yv{3$Kxsk16^M4B zB3aczf##9x*crqh(Ce$6ulSJ7#*dXnk09DO#RGKh@J13bNTiKJ0Gm$m0KzX#P4ECnwZrBTO`tULD|Vh!>`RyO_xGE zFFjAIGElu-EZ6rlCWc!;3SJ@m1aiaXzE!fHdv4@b-~qs<&51j9Ml5xS_pkkD@BwS? zy2fm0#2%LO$@BU4Gbv@Uioh4R36gCQAwkup%UXqOad)PRO$`ETT=x_KIOAZe=ihZ; zn0@)S*tz0QnD!%?P(CNxuNT0Tp%F-9n7;MOSAvP2;ekAVp;5HL%7Gze@x9-FtfMPo z)}E?TIVY=h@(CA5E{|2!rK6I-NzV)9M|yZJFYsnUPYyRVJ-m; zB@RJQ%9=#yL(^vPFV}bKR6Y5wrre4)caJw2>u_>7Oj#R2t)l6LGwFGwC|lNVlUjy>s0&>>qKK32{H0Sj2}zXz5PE~lF^v4%f?DK z=PD1~nfDsdpAb1$oPUvSlcScNb7C3H95t#ah7=KHQZOV@Nt4E7R-=zI8BNmbS;S30RCE(lb#krl1sis$)#-nI-`P?=(7UX^ndaBY+|vwmV^Ml&B;$+w|WG z@Jav&I)5BBDErGA-RZe-kTaXzTlX%!9$t_I47JXolTU(ZdCkazrht=h}mRU+&bgs+~wRgrUeA({(@Eek442CC3>j6Two%yZ_jaIbqE>)^dbyaq1yD zrXBz|3{z!G#}Oh=85nQ9`3@Ow)Mti0tBD7TvvC5>UPCv=fLchggDIR~E24Ld#*ml( zZWFWMQg!i}BvTrA@#Z1?_>kzEQoCH^v^q6|$yZlLyld;6{@t!)S*>NwMk{ZsYTh~egYqJx-Mb<7L~Xi+4>#jwo-R0OS`83+<@UMrVGqTEGk;Rs4_ zyv{?3?Y_AQH_ZOj^}QCNTW3$tEf_L(=ieJaMqupT6S;+S*+$3Tx*Y5k=?&P{6)zXW zpJKfd%nNit?jmz3%+IfmVlqGUey2&EC}4j7XB@X#cv#To0+TXwefrMga9%e9gC8FK zZ#hPG03={N;|kBxfTqW9QkEi^SADYG{k}kK@pg>G=Fq-`9;~ju%>XxJ4i7+Oy{z~m zjypUsG?4i?^5E}d|Cz3cy&ZsHy{@eZOzIm#AUjnXxi>XG-@EuN@ekNy|ILZ-ukLML z|F;QlT@a@lu`=N7+}hf@l>FvTFpn(}BM`)3NrFraf(L%4=OF4e1YVA}amk@yPKJ!4 z!*4^et==1?Hn$`wA>D9%@@|{XwbLVo*CG&$&}hOD@y;V9b^?C+S^BSx_y5p(!k8~= zJDQWiYY$xww@6Y#krT~N@H3v_e_)u7(6AE_!-_0oj>ywMf}P9D(LhHlkJU)!woKiZ0{V}V_{)hCeg5)$sc*j?(K;|`}H)+elc4)z#&9FcqU zdmyytVp757S3|J5ZzflwWOc&7^RPOq9i_+4qS2>ZZ1|D`&94t@9cq;Hzvhl~ON8p* z+6>v$hf0^WKKf%v?^4vH%JUg|Ru~$eE5S5wIL2^uT&u-q7kP*w>q<&;1-q`$SwCyN zpgJK>nkhvo$Ry#l*38b@E7-m>;r^CLkDbf&j#T2la&G=cmwT46t->gVR+kq zz&{7|Mh(SP8fU>pI0kGE{!C0!y}Macef3qbP49v^>rtVqgvoDucjqKiqbxqNoqTm~Z8@TEsNV}% zJLg4wfual4tpF=bRyDa5JOq96HlpWVrZlay`l!kX`7?T@j-E}vcd*m%cAElU0Nd>X znU*3As8`78&puiQoHJcvYhdUsa~{-@gm3k{vEg z>mD|^0xzuWK<$cH_jI73bIk{1yvY7QKMQ~C$WzDFo#10h-%}+;`+i>MaDT?g{p}A} zPGQ}cEH_D)xN;A>(r^D|sENw-%2tobW;*N~O)X1seyq7h`7HRfJ)j^9m{vD6GXNRD z;~Mhuz@@(}EgCbscEkl`&jkO9=W(69wT?Xf^ME_gE1~?iw*eUR2*Q?48pemXfrvJA zzN>8kp(bcB;t!OA=6Abp4*f(ybHT;YCPTRO)~lGBAL%d4JtlF;i5}qlLuQ-NUQLF| z6N}Y5f(r;f7D~x+kL71_K*$App7kZ#)(}eC#-h@7oH6nAN>=F%(Q8+)Vlh3t`yI@M zVyz%4sr1&HUDIW0oTDPT78!DY;n$Ak+}SG;-o+ig1~G6KJ()v23^5!)+U<6O`AdkKym6A3PjJ(t*jji?*!6in`9EM$hN9XX#=U`E}C{{E73>)9%iVtXc z{NoxMGo=Dbt~}czHOCU=qtD9Cd03+R>~9(P*|v8d3^VqBgPnOuAPg+|n-kXe)Epk> zlcH}-c(A|Zvl)GP5nCz1^9f9>IO_&9E8Qco^`zHZ-==UA(39Rc`S0+y68ZY)w%3p1 zPx2W+WsUXFZH8OzZQ&xh6vyBBYRvX(Xzma0%0Jay7B^Fe?>b(*Z9Z>9sAK zSd8;5Iv=0pXH5N@#Cu_Tez#{o>)>x8$my-EQ@t@|*0nQQNOJ|=&6F~7MYW)vb;8Kej?^V zZ}%6-FL!+Q3JtpI(`DFZ*=U{hr0s8EmnK6$HC|$K5`%k32H4knP>G1PWgO~R%qJ^( zyxAd$pup!X)u?h@ZB67kU6)jDP~!%sU-RVo_Uc>>r=)lC{8+4;NSS`iPZYI#cX_2s zqjs|SXw66|i3r}f{lt%Qo*&_{#ogJOG=$+SI=1)NORUeJX5K|+Od=|eJr%|!C8@-( zN!yIoGx4zfK5@Gcd3%$0vwxmk3-;QHsw}eh)vuADcI&%a=hw(>WfP%u^O&iXosJHn znO%$ub&@xHmKUFifs>n=$|)|=mG5RP9sqpB4`8|6)=Nm~ccJ1@vxjF1Y6T+?yZD5W z6Un))3d|;w`M~6~n1VpAQ=&0oTR)^ZtU9(gQ?yLxr&r`9pf@SOf!FS(OIB5mrvKUY zH)+!{+MQng(>Fc32`d+Cjrf`G2mLcWTr|teR92)v1!3Ue0)?!`4Lv1n+EJ+xdd+OZ zn`-=zYJMvjP?8iIWq`NP|1tgb>(8Vzy4HgN+V%vo!@0-hB+{#5-T&Ir7kGtI|8B&6 z1hCDmOkcpcLd;k~*qY?jaBJ;`5M#4!5UUWZ7O&h59*vwz1>PL_By}uKEZ7V{! zS}7{qkEFSPp=78PoAXlA8V%l1_0>ezhUDc+%r{e_Xm0$4=$8{B?vrf6Fx?1p8 zHf~~GVEdnsUZ$bHmh{?78GK@mYCQHZ5xTsE#l7f`f|d@R5|z?ZgKj@fKZcdo&N`>B zd)yP+cFtbyR*VaxrjanZeYvkS6T@l7cVfpge_972s>x-az$KtXgBS#gxuZCW#wYz( zfYf-f%6l-X%9GY}Eb$UHy7f~|i#eaH)XN*OY}b#AA;JyUw}(z-RXnY^h{&Mr<|{_ii`PvVjEHwIe)b!8IkH}i}y)l%Si*4FlX|K|1myURNZ#4 zO;)<>SGE4jrLD7SeX?u44Z@=+VNYb{?HkWMX`m+ZTUtTr$oQWL_Qy zOggBW+Q<%HK<{s$`X?Q2>~Kg z(39{aO|}bdB(lJ~XZ4}aY06>qz%U&cPE}^M6S7a13i&P&gH;L7f#cpq?*0I@;+19W zgUMb^T5dB+w-HFhWx)kI-HL5d2IOW)aTNE-iVVwjjp4@j@Ra`J5Wc}j)V#i0B3Iry zmMX8}7+_>9Ub+`o{;rksh%FF>_Zx8i7&o8^&=$0=w7qnGF37cLAb9iW0ljZA*m-fy z_eP1GX6Z*f=*i-lNz8)#CL%fJ79YECgQOWtKre3mY2Dlz&l=fEc%p=#yrvOHdpQYs z$gXB$!Z~9192X4GMjL5vwu`}(hI7=e{`Q+)mAO?46^u}bf#%Y=o5aRrbpOes6r561 zjwj#d?p|g<=+#Sx@|}+wF>p_LH_IS4k-%vtzpUTIEI9lQKE2rkfEcbtGq+3PuOmc` zRyyUkDVH&WU)_7&{nfQeOx)N1&GFXGc1!Qcfd0)O`p2!jz3C# zdVJ=o`sKqZsNS!!BadS@~@_|z>IVWAzM zM(!+-$x|)&dLo!u@tAKKyAMdV(KRYTjX>}s_M6`GO#-j>zZ?Sb-d=INRizx!Q|;oM zE!T)NE$#V5EmV9&bEIyNdgQ>!;2?U0tyS&qU~XH&1^?Tg`Mmk)=1*G!RaGV~>NKuv z2dOpFJK`^>%@shxeY48ui5ZLhe_R@SGD-_pX7K<*3T)sAu z`N9GzkN^5B;0mq1h`Q7dW+$|4b7&{FoaqE};jZyfq6DDCaHXGtLMxwq0`T?IrFG=dyF>GorK}cHqL-J-ZTVg0IwEP1}aI34F1mllRU-AWuksz{NkvwS#k6^gtLm39}dHdm$kP70FqCQ~0R6 zzH{(hXFe91`dUHvt+pRS8xxO1{X%!Y_Yr$nPOBUcd;a}*^^dGHTs@QwP0oTY_+Kle zNH4-&5@XV0EYPSb*BuOT+kaOd*Bt;-#y=c7$YptKnGx!^brdZ{BL#h`JQpE=bZ7`t&MI4ap)dQs9BfNFRy*G$uKbE*#vheNC9AXCXlMNO(gZ@w_(kPNE*BX_#=0>3J)B-r103f#3e9E4oP zuLdT5vYiUZSCh$tE3*o!`8dk!qhtZT3IE_!*NW&{0{V$BpoApQOe93 z#zD!kGfG_8@wT`nTJgj3`LUQgW_v~0WRW)=|K!Zw;tZ;ZKd#X~s#CI~JLL{$)q zZ=BRo%&JefF#@rAvEQ6OyBkXqqhRCgG~KgrkqFBMgSf}xHcta<9lLEQ>$Q=2s*}ci zun&BA)X}G~Xts=;ble$T>PGlf3yR?&0dZ!NOs5&Yg6PUG`HIkjIp4M3H6mRP+w6@6 z1$lvi_ad2qLEJTcntgqQMR#s&YjL5EPDvYR179 zo#6@`T2kO>6}vH z={vuttr=|Jm|5TjJb*zM89v_)>ZX7p8-Po8sNq!3Z)J-2W)47xC6?E)ln9OG6Z{^b zRY-OF^w~;padG28&n(@y?@E6N{?;YZ%0@WHwbDOSM#Rtd-@{WH(2V&t;XB8lQh%lF zMR*Splv9)lttIJ+KCGkaNq!21zgw$(F5HrDw_W37oZUMzKPIeRHug!9iUKqZ>Sp-$ zrlj+#nl!FrV@E^VS0Rb`sJE6GrvrpzupXIi$xz?Q+x0)CLK-xRJaX%18DImafu0jA zU2EM;o~WD*Hj3rbzdPR93_>~mrqn4~utCjaa!HXfK~&!rFp&WcLgAu-dXe)3Dc>0! zz4D8QF}ZGa?(yf905ICe;G5~84_Df8G%&10j0aQj@{sbwb(}YhtT~LQ!0j7g5j;4M z@}-!Ohf>S!pworrsokf;KXTO$aZNid;^^Jmi*$fO5zObFmn$kMla*`X8FH*8@)s_`UI zRVjq;7xzurvO6W|Y;omj?z(b+6)(85x##xZx%t9_m08Em*3i8j`Kwo6>hQ&BK6@LJ zp?j|J_LsZD5JN{J1!x~QsF1Qe(z zQ_Xr)8`Gn_j+{czS(Mnq-=#jE4A@-n@K&TTQy4w9@I+}TWlG4b2z~hEUv5%cxA;!8 zAL)n?2FVAp;jo0btB>SCqV6!m4i<T`MFNiWCZ!^^>SE%I+xAMPq_ z1?RKqeD(ZD!Ahq2D0qFjNV#^Xl}*s;-`C4aE%m6L2vO-`*~>XOwLJ+VvL9=nKH4-9 zU`rqV=f%y%b;&|0B+{(M4Rjov0-{bF9d2~;TvTPc(X*yfe|w&>a}Y$Qd}*feS!%n&(HCrtRD)~)o0DYzU48u_ zcly!lOsKa}g@ch~G-6QHr3T$I8{4b|!>w>Yu|F}4kW*!;vX3rx1^@1N6nWXe(jNgrDfRBL>8m_;I}tOfXx5D509kRqe8%9`qz}X^~lYjt_kT5gY7lu zE~ly4$yXq*#J`=Yo#_tWeb6#x46Fj=pd7fYW@&8yEoV>;RgD)Y`sj%NMQq4~^Hnf* z)tj1Dg2uE>kqBfB3oGO$zPFC~ZUF&*(Dr^u_2w5Xa8;jbj!U~Q3%y!cg3p9UQ~N?^ zRcrjL`5)1sA z%B4TRrQx<4l=LON;h5J7`yw!IKj7B4S=2vlu66U6aUID5H)C&_%yC>=Z#?icBzgWL z;<wv< zpBuRX{0j%n$er29y}a4Zu)UC

C;kIRhg~gM!vAOC`Sn4{$JCXdp2~=e39X=Y38A zFV6&940n{oQ-vTtSGIFgs;KMSSN4>CJz_li4~a+;e#ZV|xl&)k<((EiAGZL5JbCx> z_zf4#Mn;IZFG*}3a}*{MG#juo%KPq$uHfsYn@$|>uF$L3*h&!Bb-&pqo0|5MIfdh^ z>u;XT=Dgh3ppZrO*je3Y-gEp4ni{uKynBMP3=v9&rb4QJR}>@Hepl5d;U(Y~FIn8E zv)`+%iH_uO+%o^ygc)^}BBX6c1lpl0RfnaoD#~kTAh=ySZ2R)ayt*nYzcZVq zWFyBtHv_>=)6&IsmSJuAsB}aAC@>-(E*+2PIcf~t7hHalx^ubaH4{=}qdYN*uupvh z4GIp_=goj7pt;!eRk4g&L8%<2^NAv-Rg+Rozt~+dK5ntSm{%LQ_99S8A!U=duqO;W ztG&6t$0tYGX{U?2eL(NguRy(7kh9p|noogZu^#!B9MxZ^XVg%{jQ*uG__1r3X(Sr( zg%ox%htlAxF{&yBabPvgnqLz%SCC%UJ4iX$7e83tofXmB+`PBHP>pYj%Ub#;lv6j_m^73m1BGQo?D?QgmewfgSJEZ%`*czPwj zSSkC*qO|2fb1^t>vxFP)8FPUNN>@RIU zntFoWDNdS|0&;i%HW(4%tzQ;$s~`fp`nvV@!SGwI$%v{n=@xqk$vJW{~WPC8d0q2S^7u04=O?L@AU8S^3X0cFzVD zB^V`^`{455n5fRIZh1|s2bSDbR7)5<*|nVBfp&17-Z64XMBgBvZ(GGg6 zbkUUl^DaXTa-`2dWLL--p#dw<-V~^bpalm|q;j!%Ky2l~Tj>S#T<6y7A-LyzPg+6l zn{==3?vX-kh31&SFxAp((|&MP&F-`9AA-8kzdrhz*KGM~>Az~^{UB|y4D8w=?|9^7 z_3W*5FuHfAz;X^Z)Rb0Wg`mi2hZNYn+uwKUMMWnAIzTTePcI+!KkdH^Pz%e-UdWc| z!=mH-kDq+%lIqbpE(`RaxX-F6Aj`9UdQTYRv(FSGs`y`k1lck**iC?J}l!>gMojS*Np=v@wVBOeveL!;&RH&U7`12 zwJ|AUdPnYpo5G?-I7pQB@)mJcddq@lIzyh-cU0M-DZ{r%KnwEkXWSrxP(u+$I-tco zKMFS_dqN>U!sjs~PN@-pdo868(`ySq4z+>zpXUWbuEz+BctVqDzt$O9j zdP3|3n*HN1%=%2#i90A_=_P9!J-Vpn<09FTW|aRIEHx4dTq=_RDwBI<&Kf;(7M#aFf$aiZ2(bH#~|KkZD`Urohz zd-xwiAfFifl140h0Uq1T)W7D0$R{YovhShj{IO&MxxVboVAiO#hy>O3wuk5pN4{ z5>3>CifvgTB*q6;&}{9+WC}>8n!hcu(W^Xq!U=Lacz?cUpMJ2>vsVkuXc~cm5Jg^% zj&6FcdJyO^Cj`-+s>u+x=X+zxOPvbDEJeW63EL&s>dn8jP;l&08(LpmBKPbOBM-p0 zp{74neflxl>0@~>+uKVjhpxEX>+TL+`%bWBRfbeB_J&c^fB@TDI(J`x%S^Zh;eHJ- z2gaWA>tdD79tClbH^-KPkWJ041}xF?e$M9dl`{Sp70Vmu!F9>cI##M771P|$uytd~ z@qBu0!qR3aQTRM1X!=vUm%69q*MI)kzu;IPCCibL6@?*ymSdWt+5hkz6ycV}@%l_X z+!ygcCztk|WeNuxOl|HXsM)f5BwGk??6*Xpm~08Q$jiTK&>3!>iBQ7cew|p1+D&;Q zlBVagg}g)(VfEs8@=5>lpPl_M~}uT zy;kd(j*4Ncw*U~$uV0s>sU27=z{Z~2-+zIXABR(0m(#x5y!qNEq}ZO9cL?HsSKQnYbw2d}9BzT<3iqQBK7>WziK0^zJN^gcohK6eVijld@0JqF} zH|>1VRGeCLno)(Da7^uN_}<1!-t;2_&3qFflr7fdCYaT_)@(0NTa%k`NVX@~6^liQ z`*1^Wv|96x_tAgSykgNT80Mb)`p*;Mg0EISh)8=%>8^@uRU^+#^BSF?R}=2Byls$+ z9$!DLLPLx*FD1Lw{ObB|vKctLCm&tchSh=S^!wu}#0A!@m2HaPH$ihlC26Mz;+>1D z>c&p|hho~a7U*I16%EI+X}l9wy4jdug&yX>lCpjQc2%DMk0q zr3UBzrz@?3t(n-{8%|nhmhsLngoOqkT^+VwK{EIyN>xYv^08}6b8Ef*DMani*}v1j zr|xx!?k*;K>9c%*!y^D=?%yhC4*TcUdFo#0nfCnc{f+rHoz zkSy{mDCJU#5-yorlu?zW0pSjGYAgkf*>`g3!4p~k{HnT$zeA3T-rY-nDad;kf`4qt zDcF+Ynsl@Vz(4eg(T0dmxueZV^|C^aG-6N%nV3}zYNwu*mASaxlwui`t(uA}1Njl- zrardsD=R4Ib0m*sG}ggIDDegQyqm|v6UHQ}5qaOs23$28TJMJGvjAAf;uvVN|wA%6LoP!-|&wBVKc((a+ zJ#u|X^7zt|JWqfr`6L^Jg4Tyg8L{Uef#-w%=-3hdY%ITgg3W`1#V-`+2xrg@4So#lj-szJUpt?)d!p_T z6X&P?@KPM02{~M3Wj2%uzF2&AT(d>rwNKb+U}y{tj`;*(RN78AH7gK9C@U+otaWp! z&A$r;_=t?tneFk=xC=sfcJB|^R&4h7c3h0RtQyw6Mh#%S=PxJ7v}JMS9cfAU2>h|q z4^Bp5UfldrLrgQWDte+9lI+Sg!~b4;q-@?CVePzVCk#7G$fY6@l&7=XoO*IKI&vT72v-)?^GZ{Lw> zjBmX3LkQm-+SZrk(-rl@yE_A9kfzMY?G=rle-kahFtWeD z+v7-ZYdPwg2wE_2j|=zhMNl_q4nv#<@IU0-Y{{bLSAMax4C_j|vb>YO7mKXWPjLx4 zGwQMV>@DQOd$tSjmx<=}or_*5lIm8ix@l85u!A7eEWh4bc8JU^yyuF{W;GY z6Av~QUI+n<32;&%elr+lKDttjXE&-#6X$J^O4!Jd5cu0*HzgRD_k%CG`>W_!{v&UY zH-_JV-aJ>`=FSmf;<0$Y=5Q)dUe|lyBK%|tq6!(@&%tOSEO_hB0+TjRJ_`Y0)vsDf z^JMaI$P6gSI2M~e);VO#dy8%L9j^l~Yc5S0AVzP_wa@ZMUM>ay<)9pJR5PU=m&8l{ zMG5783wO_x>4U{!eMI|#d%JADONDJ}|qX*b_rh@Z5jLtGs*?JDaZIJg)>x191V*QN!m*;FC*HyLR0 zH|KM`{*R+`k7xRQ|M-Y}9*IrF=}KmA*JJj&kh`@XK%>-oHYt&07f0?9QY_VdCB4U^}Z1F^y; zu!R^6=J&7{rRRZx>#CIqVH9kVdV`hC}cHqS0@r8s!VX^RU20 zDA|Rq&V;X%7#l!H%7`!_KiQhv>V>0b8a~Y{7L<9i!V+7r@P4GfopDa2IMJSls#v|O zE3X3lq3-VybT)7Ek4&fIPp;fr%ccrpPeTgpd$fsokqn%^nE}yfU4%{EOjP$W9=bM@t>+^?- zou+Kl-s)Nt*MfGoktStst^ICNwfO;dGwR7zNpI1pN^MQy-@VDck!ejHp5JSh-3j|s zj|Bo~KI4m(B48aeS9tcHZJ_*Y?sLF?%`UZgH%JnVr3t z!B70ec+>Y~Uv)aD1}ZD!)_BjlrdbGt=eeZ5H|^HbM#qS;G{x;=@4w_Yfxm6F8!~vf z>aBk5+-r4uwM(oRlvwB7wCH{Z)mQRo!9IrDNMJYHJ2)ud+m(0C0TD62*@}0uu`&-7 zm5$+ks*_UDHpwF{YtMX^>ct&5id%%LO&Mp-edHc!m=AEsU6OBidzY-4mHY zE;mpCS4QfXU4KL=Lmkp^%ny3h3&4P2pwmgHbWF^9Ie|+|$gxRODUjZ@RYaOVo`i^^ zKM3jAQw2pw)5F9ZWyI-h5Za`B?3M_$+sd{=@xp0N4+f0yS--X@O`hY9c9#I3Y}Mys zOmya%R!Df2XSM;n$^c<)h2-Q8d&LHkvEn=av+|+aXgIWaZ^h&2UuP(eTvhZR;TB4q zahAnKz4M7i7O2tZgYGV(&buE$0NR4o-no6>ztJ9{>r3;p@%#>4%Y*;6f9ch>wA>s0 zv(m9L#|Vx|UZ1ONPvm;X%J-RMS|+Kr%v{|KH^l(~~5)r_PTQTT+mn@6ni7UGZh)q9+xrKWrz@ z{o6}jp8`m&jYw4-sy3p9gE>Bd zq~`c}Sh`qV$)d!{JSD&Uqrl7`zu}uMwCYn^r+ijl7SB22>W_Rc(kvR>MC5-T$DYvE zFHAtk5BiU)eAVcEV~Fswop$|f*%Ww7ya1(7u1h#`T1b)CS~=1dK)C}4Z?<4zXRV^KbPq(6~khcq&IC+j7nq^!fvX3{C1S@5|eFq+N=wt%sQRykZ6eN z?0?rU+CEu?_Pr;xn4Q!o=*b9@0Yq9QLyQGxtbP}}@%k?XH~)k;M785-MEI7a@$<)oL0 zop=;EKaxsyvfjDnQz9un&(dT*QZ<|kVpLA7% zlZaer=b|@U$8iDECh&}y2NP%)j6QByNbk*^)+N0sGOT9-QfpSXrA15;KVO72k1J3v z`dJPFa-qBX-2vA&n#!l#PabUwR1jj!zgh$KPtkO9%&I*!S?Y7=zPap*n2M2_+>^5P9~`bVLlr zFGh;d_kn=C{|s}I&5!E3hCqPilsc~oNw6Wg+`@58+@&|-Y)A9b!HhkTaMv+-t!+?> zDL59;4GP^FXlc5ml^VMLCQh91x=mK6{Z5|&4z~rxe7Cy?bkI=TdVybX(z&U(`$Y}Q z{U(#OD#<@Hd<^&uN(WkE#p%D(^JKh&-z9=6Fmic(d|WLqxmTQ|Tp&C;a#_%}-P(k- z^rCb|i)Dj6+A{;%3SW9kz7|Q}6{u54c(QN20Kn|lV96xYbX^;8t*tZl+!7|#zsPKq zGXneXi#%-rOPO#Uidq~TZ7@Wy$ggC*FBYb#R>BmP&pvSiWq{}q@F{Z1tUGsum01S` zvJKH8t{a~A^T4N!Gd?#~)P81udFetmwJ53F)yUuxp|G!4vv_3mz43cP+0?Qye2{p| zBj?I06_#sUfG^PC5Qf2yq?~}s0G>(lx8bhK(JIuQqIW#crXClj6?4H{4_xHitIB1s zv$4m9=|}aIw3=Nt=iz~HX1I<1Hu?zlJ_a$`Q=0_5BBG2GGas+#Ox^NwT9Aw zlS0IgY8Rb?-chtcxA@#eNNZOWo8>Z+Wzidx@~QT*#5WwG zTjNBx&^_lk`xXpy%@OJ;vloa?sx75ql?M{AXh zF0Mx^V~KHd&_(}S1s~l8UAJ(>->E0uh_hMIP?$r-S-W-Zte59TCAVVswaxN|*NxrY zJ+Qg8{PXu1_;K?hi`7Y8`zqj_9S0kG(yIi`*)c-@N z(*d$fGh>$H5u4aT*|#n5sc|!|8-9)o12suctqi(4wTG7-QZo}h%>$M5~_=KAxUYt?DB;hCC}fWcmsZfUOfJK?wdaM#?b*N3~+$tJ~P* zkz#opYisgy;6nh3N1Huy7=wzDFhEnUr#<;0axbvy{V^7lWFE6lgJ2~LW*Sqlo`N0u zaA(~4sz){S&ZH>erU=ipE5W=~3=;ND9i^Q?r}dwzNj&vt=voe!fY_~Tbe{nzjn zfRUPQNa=5v7JdGv6o8RE;UzK0iKWn{M#*?{r3=#4r_f=fo1K_e9CrD~%ucIqoqO9Y zn6rd8Y8I^6@0hAog(-mPn zM7&w$Ul`L{aYyYiHl~|1#oDNt__7H>XXlX=3w^9kgiX&*&?_(VMGi>VphRnP&llEP z{lATlmO9ef$V~kZ&eQzI4JR~A@NjXZt){(Yj|ukbLdYAnhUe+*e4K(PuBaM9*-o0r zQ`@7o_w6r%vD&zIM_8)8M_Xm|w4kCrAL0R5)ESrELnF^p?SD3G;kQwr^3}D14!^4$ zCb#^C-6{Hx;gE{A`|rs$3cFVH#SqD}C3^aqfrz28$^7hX|4|I9iI=;Dum;LB4@jUk&1 z=rz56vzP0~-AA8-o(~nncr3{dc^avEMzaEnmf75Xf_zXRpYAf2L6sNw((FOp|0pg`~C0A$g4ANqB22%Uw*@u zCN?w|A0215Y!P1(;YGV;QXXqodlGB0uDyQc{0 z)?nw+TOK}$*I&za5yED`*s~pyd~AJoEK@Qn+}Ml&Q^iJ*4S64sakiStbFN{No>0QrAyGxNFTV@K~-;Qc7cOIWx zfgoJ&SyM()kck)`!)F>_t1VCo+mdiHW~RxG=T!`c7VHiq-Jm$8Y@k(d*>v+AqhwtS zxXgwwon2|i6if#dF`w&n$&_%s^)SG>nV7{DmSAIczQrrN8^2b61NAsn8%46}uv{vx zM2JdWp>K%rIc|K7l-LM9A*w*?-FY0JDk0j6G@+;!{+f{#PVVtGuf_o!zk8DdizIen{t2)<+3sOjjS`Mw2$4ExOii zfwju}4(==*F4PhCqJFR%W15of{sZ|%>MK7J_cKusnPzuKeO@x@Hdsaa*skw*BM|fw z#|ONE&#ogYec%C^77h+m_f1UZMv5Eq-PoLSn5B>Nen^y3$}u~p@bJr27{2|=b?yK7 z!s`rVo}@4oGpTw}w;$0$=;`m?TO^^(1V4Fq=b71GbR1;fbvk}xFZ+O$dey#7&-Lb) zIx4y+K>hr+{WSpH+?eJ(1CsVwvC7gV< z{##sFwd7tU#ZR!X-Z+1fU#D<$aj{!~atcL;IZ(o@-K;Lt`?#823~ZA8UonIlv9x@u zgEc=)D>JUQ9Pdf1idX;~KrdV0y@US#STRa6;ZDiTt~=qDQDo(lCB^%Atm4 zjhzvS&8v1hwRbGKq!$jE2#lTmOdY{I$seu+T{7zK_O?|D?1Nhxado0GxUDJ#l-%-$ ztNN!u zXemSW44|@b)uAE3TDM5A*8*y?%s@&-l}hsC@3{#Z`~B*dx+nIku`fsjC=HzM`Cuc+{A_E3JNKR0b@V4rFug+jv-J(c9m6x~rF)AA zXhcU~fdEy&k%p}C_5E@F3P7T~PUU*c0P4U0=T>Am%` z6DQ6kEgcR-4k8`E2FzMKLER%TAKE(bLmI}^G}q|A9#BIw0qYD$0f2BsbP}qOW9Wy&%bt57z zJ}c7IWdRB`5zU)&8P04I1^FW6mD*_pt+C|~Nd-NS>q_svsr3I^1a-bCl&?KCALV`h zSYstCWF)&Lg3Eo9i_JU;H3CtL5h-Rl;EIdAW)o~V+aQhv|E{ds@wfL_oG zV(@kcE57%tWFoZL9_gj)(uHPTR6OU!J@+TS6!R+wkuL+#&k8v@NIoB;7V3A+B-x5H zvf2M&Wxl56h*YB}N2#C1+#XE8bhAW~{Y>;J?|UVQ!LZ*_xhiup34}QpjnM72(ay+$ zs^Nhwk&>O6$piAald4H|?r}5AW=A)DWx1|d;%x2(5R@&aMcCY#w}DCBa%b--?{FHI zO+&r9CeuVby-j}xcQ1k+U^inYEeh_=f|kM=m{mCOo}S^ODA0P;3fifarQ~0jY7H_^ zU$}I;!YV=2+x*ioI`PKPR>qazbQjD_R27Di;g$55Xro+!n+9_N$8#p?p@0r>}3WT76yv^0MgZ#k6rF^sn1!{m_G3eVy!AV>2amX3rq1i&qpO#4FAX zbBc7`6&yqhWyZ?E>N_y@aIPmSI5;>XUo4Lq@>Nw#jUfzgN+?SHy6ntoyoBXVQXnCX z(hS8R+}i@3L+Xt5bDd?w(tTee4^~LlEwg>nY0LkfXf!o9$IN^WpL&rs?{EDJIe-y( zjUxYmHYyDJ*b=ISi}rgwwK$pK#xw0z%z_RlCu)p7UaY>+<65l~^Cw6oV!DJsHH30z zh(eiBi!gk*bo+Uv6r|D(hx;h=#{2}D zEQnG`i=`U=)yue!^4;2U8||l5N>@~!{P2mG%?JS71K6AN3lPzZu*-QGom8vUM(3m} z*s3OrtY&`;w1oLIx_n6D9z;1NTA!u@v=Zxo{8H&R6&&dMLMc7ct$8-i=KuQAad)PS zrO=?REThg3JK4~*<(_&2_1(QXEd<0K6D=X~(LNX9>qyuelH%eseJKJfww4Ekpfl&- zcsjb6cuz<@)K<;_5nv02g=TNXRMphffZi|R8B8)yfjZoyU`#>I+dNSdbWE)tsa1w` z0R!mn+4}!IdZ5Vpte0R9C96(PPb5}Qg?z@<=_yXHqT$4V!HPAtxHkDpw-+et`{q?c zGDVFqhqLrWAM+Osxg;D36Os>7hdn7@VsgUI<`m`|dS58{87ZL1fmrOYeHMi_wBc)t zeC0Yj0_Scdx0&<%PaDe~eZt7(UzSD`nE#OZz*JDH;sz0g7I6yluG;kv^Xsrcc($nA zyaYWcgs7L#0}_(-!dWi1CmHFGFqTiLkh}+yqGe(7!x$V9F%x=p6tus5?dls=xaY-) zL3P^Gd>3w5yH&EDv92;-=wUP%l=xsjYXt`!uGd@(IU4uWYE?XD`T6+em^SH!=U8-M zg?J^^I-07Eec_u%q=}_ercX52?Q}QXtLZ-%T^PjItpF^*6i`{-3!RfR`mW{;fG-@` z9-$N~E7M)xQz+}jxy?B{UorG>GK-wbw0T!S%DP6Th4u8yY35~jJG~d9i$2d-W7)DmQJKW zgFQuc9%<($EoJO=tKO^VV*J_O-#5U!Q3P6Tn3yEj-+t39UR4v-i7{Mz7Bljgn$UZi zpH9F(DtC*yV_r=aVC-vjy-XIM`^zHkI@S|?$wIp7z7*s}8gPJ@a;GHpAO{L4E}<1i zpvkp4JrfyvFnPobQQbRmt_TeAKUZMQK8ul(_jS7jK{&yHw)}}EB0$NA(w4S2?(VCV z;ccv1vvs6e1~NXjld4AFSGNLQzt1mRRka;md3qNn7+FRIW1BQpkFLS~kW11ga&oU< zo4~z&1NYJ<_l?+A=H@{Db!)W+UXBBh&R)d}D#5higTwKLn$?JP5SReJ@Y-$ivu}OK z<3E4rBTlw`uWC}rC74l=BR#5nO5JRc>b;i}Hly{)70fujNxyy(Qo<3lY5m1Lrt}|LG<{FV*db zOXhn)lo`>Pz@5$cb)Z58{Y@=S;RYYsh1a2QBSNZc0FEZCRV4jDstufKUKb^~F}Z`3ROs>51=s5{f0^VOh>su3JaTHl{pY@D1^^=RRE zTCqI&bE1jU{j6iPh25zc-_XP9Q!PjTG-JN*(*L9|INzT55<;eA3f2E%y8xSJo{jy9oL|!)>gtSZ+ zBq;lyd;=Ye7yE_2Z|u@C=`(IQ(XhVrGh92I`h&RakRIva7bmy$WVB?YdfA}Rjj|gL zWZ3E6sRb_9Ir0}iY?0LyZIR@ZhP{Sy_pROi-R+H>(+^j5Mm<1!RAi!2M!m}I&DSIbgKGh%Zy z7^|~)YXZ@#y1KZ^JCNGwf-|b(jvFT)8oRaKR zMpXu`MyOMYD=oYlo3ZtvM44>$5%IH*hdWoG69!AK0Q(+*NzRxDk@nYZ^6fm_gm1^O z%TEEI7440vauI?x8;rcdTZ=!xA@>9G2WSKNB=Rnb0&rXKJ%b_6>*SPpuuljy%6iLI z_W)H64%6(G>s7nVX%Z!_%sh;e=L1qaAAd3}dRv(G=;>Z|<(|0vkTn4G|z%GbK7bIPLT(0RA)QerOf$fu2i-x&(% zzA`Y2`L`EtThzR9FD7GF)4P_=cIU}^65yl$L1MvX(gjOAwt}oy5WOgg#upzJPK80$ zWb@_k4LEn3zZ#A{`~sN@ddmu_<@oFL&hb7JdUpF@VC87Za&kM4^FR8F>vu}o976V= zgzO?CnE~9HcZ-$6 z9*L(y|6o@931!XR7{(xUe2B`r9N<-ZGFy(Xuvfo9`sU~du3%Z}=|{oW{9(uS;JeWDKksi3a_5As{E@WS)PM;QDq|IjS<@!=Ek| zkg?m|Ui{f!#IzF>rG0&UFet)P8BH4*58bm!GH8ja?=3f8p^ zRnOwBl|?~hSvv2@ICvW<8x3+dB&2@JkvPjY_HR6-AtQfr+dIf;hqBYQ+CpkPXtMLC9c_{XD9mWOiy~EE=3fI%;G>=L z{CdgRFKWiAn;(AZF4ujk!{hS@#wRA2rx?AAouF#!@zoWPm2hA)PVl~^kpDfLgegcF z=BWF`3dlMGretx_LK9~;5KN$5b#51CvULj3szD_d`-3Fp$mLxaHn#`(`iZQ= zMe>j#-ARGsG>aZY_~)_5)ii?pw;44mI<0zfZ$jv<>nnP$*KP6++SR&HXi$O$OYpI~ zmI@lkvm;kyKdFl%^kO|^#YcO@wwos(*PK0XBU8nr8q-#JYewE~udVW!T(~w+5hChbZ+IG8&y301M~gmJkVw{yIR^Rtv|)a_ z*@foxEp@a9ZqDg~(qov@<1+Q0WY|^N*oEdszt%_LVyt%eE5hb5omx$x*(iyJKP z9(?!vS8P0YzbWWF>ay`v9IB6HxD?vOnRmu|>UAXIj={b>#~Z+0`4c(L`Db`?e17{` zlr2XT50&S{wrikSt20{;7Y`>7+6L+~Qr2Iwv3RDgD@s6ad4>3E#6DCJZJpm%4BFe> zUu@g=7I=?~!*wece1CG-|6(V)WxF_p5!cye4>-pKDE+9Lh+ow% zA}gyIxX2k7Yv=ja$ADcR*4QjE?@}LHySEj3v??{~BGg#L1;0!>?Tl@_z!gl1YAuI!SJT-tV~lIgtFHziV_ zUvlhiKU1X@=53G`*GFqZC|6chf=7MM;ktp16ZlQ?K516xduwo4QPEb*H^d(Xuh0S5 zPJWcQ3Z~vFY20uCtAMVQ_X+)5X;NkR8fi57ZSHv&H3C{W@Hn*+q9F}zkR8@F<15JV z^X!SGeEQ~CJnPLTicrOI~kxdf@tZ1)o}auZVG0=Ij^ z-*|Gry?3*ivHi^yEHPuU*g4Jm9-ouH+=|{@t%!Ouo5KQ4sQL)ytnk#UOYrN`o2mW_ zXNcbukbL&pVBgaCzf-sC=fOpQd9WXey z+MDrM>MEA3!oL+Izw0$&+GOU0*s=heHkb5cbNs2>kfN@<5 zq#|lF6_lEcD7f64P^ak4a09$}+4T9v#Xo;MV|o)f0iu}TCA(pC2B73N8YchB9sW&= z+)E3fkTib^l&22lel8Xj)TO#wi3i8QhuO0k61}XVDxS6l@x->p=&P?bg5UOHk2+GH zP7fV=D7lZ1mwuV5|NTd!=|OOtoKPI+`yksEdttKN>n!6)ewk+@SMm>8H#2H~5xTeX(Vun#`#%O(@w!^j(fFzVUyE9SRbO6`!+XGIF7S_9{|*S#r&9)z z+N2#%jf`aRqN^{9FF(_ar8v_@A_%yb*z_D_qSVgj=A56}Y&<-=eQn)IU`McAKwVqd5PE4-*<@>x7c7lU_e<3Y^No zrE*R*mh#?oH->&8Op?EgS=+df=Nsok8$RbhT2^#)-|sqx2E9c2_^V%0i4KTFHi90C z2W9uCdqg%*7k?dpIj@9V-RlxpWZdrK1>?>fbu5?#UR26Dg~7nerkLAsT&|i&r;I^Q zyPTv1q!YyWr9`$<-nWQ62rzFJ`IYQ>TpJE6Of|@*KB-bxlE3HuH;gJ58ToQ%?t1=Wapi_i3li)4wDkQb+_=#Bn738tlRKFiCWJc$?kztJ#o>H$ffY2mqSn=e zot+(;i^kzT|FymOOCB`ptj1j(C?>5QI;4(mQldhK^b}j%4U#vvS`Aty1#LVy`8A%L zQ{d9KEb$E%Att9p*Ul(SzT^+{8fSe5m3<;^+2J*+>Z~U+7o{h9`Qas-B?e{Y&!)I+ zg#yW%6Ou0V!s6GrIqQqOxPpY*q@JI^9Q5q8HzCSxK0({Q!AGA8*5x_XQCiwr`82iyMqS!8Fz%3M3# z24YFm`lhji*|btx_2jZU67k=Edd7n6h6p+Z^2Lbqb8<4ulkgJnUQ^>@9pMlZu+L+7 zhc)QTkLJlor>8Ur+B~bt;IM2{o@tuLXLT_T8GC{A%EK31gxz>Hq>YxW%fUS{(M9t& z9A?@CWQ|IAgX$@<@b~M&$zH-(C+uIfm?A?elSQ2c(TELn^)${EATSRTBc(vh0onhq z0!Vgm2_0o8A9svl`y4i$T9J^zBEY0e@hi2yA|!vyFb2`_cHO;VZ1Sho3=3$X*RGu= zROp*Ul@}DsAJdO{Wu5K|Znmk01k|J05!}7#m`#xz`Z~|?lE1p@7I39yfAVcVLni-90Y~&*7E33=g-5#b<{-cmavI905t+ zs;L1j=#}M$v-GK*MTOD21YTBwxWzR^lM4$z_l2ET|C%(9Apuz7;6N^9i1Yj4Mw*d_ zFq92)v`Cu_-WFi;6#nLMvGsPcIXaMcxXee?R)71deO=-s}F1F2plbUdwo}P@ng`z3#;1PAit1+ zV1JwWGDYl{@U4(HbxAqhk5RWrYlYg96-N|}%nbmm?5jMEP<#=QSNqMmRowZ*lFmm} zIJR(EgVEy;x(~WUWj|nn%3GIb4g<)c`Vb0E#>nbQB~n43yP{W7u2(VU1>cHHFCd_& z8B*8S{<~q~2?x#BI`X==hKOm85luxb9*_~pW5iYZa<1eH&bkl5z9Se~wHwF=7?KU< zm2kA`w0@Gn|HeB3!BGg-)h3UG7_l6G>vZMG;4L~>oPwHpc0&EwW956BPk=T3zj$S( z%r3nCs50QF=u?ml2X=R=OkQW%dF!-;J7_+4B;C>x;di#tt(B~P?BZQUHEkIg2T$dA zR&9N^{eFzxfE-smS}`}}_ig6Avblo1sMjL^oJR6MGtvbT|3VSK;g#?F#eYz$O) zx_3$5y8JgKIi)^ z=ztekC+Mf!42(flrhm`1ui~QTSl+l8v5%ZuV^!t5&I7j{-&i6py&L$&klM_dIa17f z#QW<{Vv6;&%)I%`m*m7VquOxvVY*aXz^)=EF#6re$T{@7cpD`dZ*!)s@x-caxAg7i z-KG8pU*B<3`0*sQ*a6cyn`hV#m+Y`f;#5;48CrVEB z=~*55%Tm!1AQ4V^y)OS5KE5k12#!F=82&>p_CkY^vzJB7<&(=%kap4uw+qT7HAcEm z>UGgpk_O~f9!Xs`gae=LqL;gRmpJz%9>{}${ATlY8OaaeZA@O9?23!M@dW|+Na(!N z&h|8Ay*Il2r0S9N3Jfex{p4>K;gy=)am9lRp*_5nj?0|yR`-b2%T33Tl|<*$2?`YCs>f5e z(Vr7P^;w~1q=W=tQ2Y;^TS9#oLXra}qx54832(jMjy$~JHkzL<(ZBJ-jW%dOy`pAQ zbUmAqYBXt0|Iwo$VVjNbbFP@i+%t)E5+23!<5%Jk`E>fj{sn%BE-4Z$U`71<^Z`}k zQEPc$oBSs?#{B%#iZN-g=^m+v&vH4K`>VC(DM-taubmz7ez_0-su3kp4KF3d(ENzUKfx&e|j6fArTf@%fr z5}0iJ0Q?0ood#y%6i_%>^@q^glCns#e6<0bY)4?JM`V5H@a%{7WpUr{0sx`*l8;Gy z=#G5Ua4Zs&+#&8e5NnHf&!W2+^WCRxnRib-}P_)OscK9t(8d<%z!=`f4l_PMB7qGe?} z`9{{C_$?AlKcNH6VUmBAxe6fu#LqJRg=mdJI{Fp^a`DDV~T3X_B1<`&81;k zHH{kvA_lH~jPiEC;udAgO~4l3{I(O+Oj1yz3%MZIm|?Fw|JSX@=k}R`SyFO0kX~(% znS_FRDe^VSFAxCceJMUg4TkF*Pc+htgw4HU9-vc||775i>03`UOuSwxAgqbr4Rz=2^p?2npUurMecG~f>b!$mv&d`6;O=0x3=aAX?b^f6DTq}$xS`JGVC>Rg z^oT}WZm4#h`GAh(CwKlRGg%l$RCB9(k`O-eb$%?y3w%G;g$ z&V;pZn|_qeUpjq`5z25I!b^?x4N>OYfH4=v^`0*TE73J4EYjI$rkss~qjf|kg6)fW z>8*_pK|_@HScesRnNNsI|K|@J51z18ELi3EWP0T(Z+AhNnAsP1qWHDZ7tYV2KeLPG z#>z7@-^vd;=pP5OmHvOEaQh4#`831;=BN|5_BUvEV|tEKaOvXWF860a($40bX1}@~ zr0jvdK#{yyl5`b^pt^9xXxq_dTjcA)p3pw3ASM9;c&HbnNU z^C_DdRs_LD(#sO0Ku<@HJ(v9LxIb(APc)h(e@oe1Z#aez7mRVCJ$+?}s7feNGI7i< z4Xww2epD(|Tqm1+|MU*&okFn-y_<~xDi5b_!uRM znv4ScLzN2Bh{EE-GmK#k=adG!&uTZ0>@q6zeVZhksF1Errme46R)fDtAgMn_>{&h; z^Rq}-iGC0}Go!emG{|Bs4X4&h*HPI~3eJ&$~^`{LcJ+ZD;^9wCdIB_-aUN;nYcUn65W`5@?9(!4BEy{w!zQfb6U zE+!$Yg#xQ!e9@DuM4UE@ei*NdSy&-e5}sGmsgIb;@qt0?d1BQH`Mu9Q(Y8xcYCILJa-2<# z=_UmVq*gBY#nwb6rq5>0F?Tv>Ow*g2o1SXJnE2H6B!Bft(gc8tuFbpA9&;&^kz~QI zJiYgo_1R_9F52XMNWN3~`~~9&D!G$=Zuf=Xo%YZP^HN97mVJU&G;)pe?QhN-41c z3Xm@e(Ki@E@TSKL!KSv{WdBueQL3auN2_K!*^EM8>lioNP4@z_{10y%y#}&jOfbNU2YW~2d7oMd^qqSk1A)g`}&@$8+Nkp5`aIU zRvY2jQ~WXWXm9B;3)MJNf{o$v5B_$US@#S9pN|#Q=mTsEnRGz@01UR_(~s7dx(aAgvSh4EwJ*R~yY##@56IwzZqj}61AgLgPu zGU@6`m`z@a6i*IM;pbKY7yh`6p+i#t)77&kNnQ5R?A*$y5^k_XHe8;vXF>^nEfpC=T<=e(~D z?c{lXmb966b$WQG{fm{9o~vQ%OoWN@G5z zZrJ37T`~v8@NfBI%kKX;I`2TLzyFV4vzw8TgoY7)x;EL#yh>zd-i+*VZCPcN zy(*jQiqs|COXwOIx6rk(bh*k6$++nzbd&7g`TqLPUm5ql?>Vp6^Z9t_mO4)d>^_w_ zljJ*zCc55aE^B4v6R&7-DpNr`Zc^SZJ7 ztYS*n=s&Bs%YvwCYhAK^ivE9-&sRqe9d6DEdu*w|FfR~`%}aSaHMbDo4rb3u*fnT+ zT^*hh`50k}_Yip_mMfNhmz(M&|Esx_4zHNCaoxC(clmbb z`6Z$lXUWw2t}1!0<+~F;eBO>mEC%qm_}){yhBmnq%XSS>uFu6SJmtiIih}n;^BD5j zH?JRJPlC++ZP4DwHwMAi9>j|IEP;i{`FAIBJcZHl3+agkH zf8SC{iYRFKeCOWxOsq44b#TObMe|A<5*3LCt-Yzji2Y$_U` zq(y4Q8X98kg{z_87k+jn9F6UzsYiJlC$hEs8BA|GEi5nFfVCShm%W~VvS(xetclr! zU%eYU5#4ia@<9!pPVJGt?@qJ)V6ayQtVaBEmSRlLBlI11w24c<9abCfj8)VcMab`J zzN8bD3_N~^Xl3{L8L8PGk3Y5Wof@2&$YC$Bh$dfVg)H%lv|jwgKZR%yOMM8rqeRPQ zoms!`3xUy;&GMt3%?F{og=;W@iyDNYl0Y|)s|8x)rZ@SLJ6rvzl%2{q;^$USQ)99^ zpQ`;t&;M%c#I;o~DVrH)-_Qd~>A9X5hoa@>WiN9j%u63AKoB_K(5;+mQQ|BMqP=8Hsf)4*&N6!dgW*|lQCv~2Bds1 zbCKs(#O19RGi!(kEs@2mQv1UU98O70^)%$Arad##uiNh=PO@9fKo0X`$7|46SyDpz z0wVR~O^ne={9*{KX76-12*=Jnm|;=O|6HqpI4;lysA*#T`{-T_4eq*lkq*?M<>nRVb-ORbGFEV$&z`tFkdos25p|z8_TD|0lVdp@-Jhclyj?DiZ85PL zd ztK*NwB?29l)cddCL(znAJ5qm=UuS2p`73CxfN};?d?{J z5d(QMz}gleu|WFUpD49OwMi(B14woq9y|on_VzxgNK}2gGlY>q^4x+Iri0Tfl({nh z5!VVjj9t&>Jx3Msls8fiDyE0S7brh^u2Y5aHsV$}Ep$90+&c53d=n)v&wr$&AOQDS zjoP|4X?t;*Un8sFR(xFCRlw}|ERe4?^yAHkEA*CPxVGz9Yax}2K;g6zaRVQ*m1EOw&|CUzZfmn%YrKIpuMs-;~FwYh^XngvL zHcaGl&t?jNOBP@0WauPUzR4D9fi}K6liTA9)b-BODmjQc{kKCM-EE=t=>>nNVP+&j#J+l4|br|TM|3-$9~ zI>1d*3OyyvWdD0BZ6GUC%XZD#Eg2r^%q8F~=9J?=zjlc}M&&)uY+q@*nst6?xA5C( z+J6#G)+&_x?sN@g7G6n8@_C(-Rd-i<&*Uy=y`68>9LM)Cs>3#xofl(r~Vwt-c;z5 zH?2Ekz-j_23L_u>Axs`EE^It}*yE@FR*RO?&C$ngz3e%4USGsALRd2d!>6#w!N;`P z!sXE|;naK29{YrKxjDx9hvS_s0jC|6bL+8FU!5`$mPZj8S0!l?_76aG0bXTp2^-6lw!w@eMdFD4u@V|Rhr*^62F z()#ZHF_!zm@aU(hF{o4qaEKvi{Zk9fMMMfo4nYTMQCQFK>YTq_@cTkI+ z;F5AMp~u;MirkC+TnFCd?U#S6tp#b_s>zpX7H_Cr`iP2oB%T~dXd(ga$_cXKBZ-f# zvym;KbG`k>pH!VQq~!SOcoUJ#!2Ecb5hD}^LPw*enj} z`Crln33GIKXhU=QuejbkQxg8A@7$H(a@P5~&Wq)B-N@mazTG^5RIQp7;={LdS0rq@ z&Z<&@cJfYha(c&7NuJV04ECCLM4_ z3(?^rtGxHpH$dW`qhxfLx68ZZfs~#vmJejkc$9t6q;(-nwl>nFRWUCkn08YbQnS_m zkY|wTA6*^|qqZ!uwvo@I^6&k(F<0!4W(@m&cX~V`A*+c~{Xi61M_**9m>EhZ!KLv_ zV9e}fD*rjjM^JAe&xn;3-GUcnvJ9JadeVaC40n$3PZYB>f;QQRJ3PK{&YKHG8rEY^ z&MCUx@Zael+^N&XnH-79-rciFGn}BWIr;A7(54I0mqyw268(f@w2DVwi+Nvrhu~y(kc-rx?By?aF+nj;a@u3wUlwG8 zTEgRZTPhCHj{b?xB`~Hyu;LT8P$o&=APd?>^5(s3;E?;?g@gC*v)ji`LX@Gn%SH#h z(L3W2^oQd=KSfyMIHz|P?>>`NI5WMqU0gqTZHb{k*uN_PeoJz@Us{Ov8Q7CI+bO79 zCL`Jww8D)pZ)2A95Yq9t8G0e+R{xMs(wv?q(#u(Ru}z~9IT{y|(oZLpdw5*>GN zY>+>5+hxwMtyrz0NK^48bg_^jRIj8XpslPS5Gb;L@BaarOKe2&oOVDP)ml0n7$BX~ z(v^thO_fI%_&^kVJMhX0vhD%-=w3{mL2f7YHZ2Zp_6I`IMd)diyxdDEX^c>k=U{H| z!}+z=K&rn%thg5zdZ{kU?6CUNdlAfJvnaVn6E@CcLhR7{456*`WXy=f?`F`9Pi8J$ZTMmI|3cU&r(QhE1eRlN+@iHoO;$*akUB+YVek|zBu z^o4`F3FEadBOfS$tP=+`M`VHR5xmsi9rGAfu`oO*zu(4+P@F3U z#f))Yj~FVWg))4h%hoNu(!0N-*UKYzC+s}DsI47K%U!HSB2p657oC38OeZNNUEM@oZ>9piZZgi(JgNw&9J(%d2f&(g6*!%tQPXf zRvoS9aCuepU)7<{d4SUg@Krvs-Lq>As+l1-den)$mhvgnRmyx5N(q{o z)A(v;OWc>OS&M|fmqlkn(P@64{v1@(cY_#vy8GX;)Y$-iiuOe&_DjarVffMPWXWsr zL8m8I%dI(T^5k7!H#ogqXtzgiNRPBPbQBL7I{I8rbn?}An)g|5^o$$$`g?{icTOnT zRIY>4Z2h)tMLf)l`_Jta7|FwY)s*ymIJmxKc-8H-JC~TupC*wRK1&A^&`^5=?qkZ)%vil_T4AMP&SO03)`S9A= z>fo|gEm4n2^$YJ=ON4J`^6|N66HCae?Xq=tG87>KH5`YrQL?1xg{cwI7PLH5YU_db zNs)OKGxLt)|M9I-8z!p)Ze=S?f7~vA9#unL-CD+12V^^ksZlr*&l^I0xkQi(H)<+t zok0`Qv8b#pj@k;I5~~ivzk=2cP7-eOMC^w+xLnX`l&Hex13YaPTI%AdG`_ zMgs0j^(GJi6uykGTkkq*x(h6Ai<^2x2PCR`id+mmcVDdg2>!tQ*yJM5prv`t&F3G*Ig*#q6r^sYoumIV##V2sHA&M?+$X!KpiWfy>Sr7t zQ{d$o?d?6c2I4`WSh%S|lu9xtIS%>q$veR-k8|^{6y%5_1D8?O%~OEV7ilu_K>Q;_ z6`Auhr4;<=#YQDL&l+Y^FM^-;GKY`yO?67I0P{)19w>WVQLk(Z9Q)z9qhr3>9* z;xjw{L}oD}hMjVi_AoeCiG8)s{Uu&vNTmLSW15LXo)vR|$n^tGv0U*ay~@L8TaFrY z2t!KvuCvXbJ!Wg=%g`2*>1)!Zt?Rzwq}~9mRr99n(K1hCjawU?0NTO{M^Oa%BIwwT z##0MglIalVKF>w?k0I!{82O8Jc2#e}Ozo_6$ndGn+pqNwU}Hq<^}{i~{jp;9kh z_O?%6EF}^;%yBQ?9Gyr1^CtE{>m$^`3R-Kc?^5nAqVJ4!tX3jc3&L=+Z&jCtd~lXd-+FC_(pAh(w)B_y*OtZ8ol8d0w*(Wvo`dEq zCB96Qg-lu_VP+d^eMwCe$1>}gKXKFu;H)|iRvi0Q(vKHe!9;{v&zCw@eJs(*pX68S zGxqqB=JYTHAt9OMncbi&VQm_SvQv1fpr8Qs*I_V6IO+qF?7N-WNIH7r?H9n<_X?W% z8`-9mI>>&;KpKw>p4a{oyvt3iYB~)XUCSF)IwU_4|JuawXnJRlA1crOLJSojT58n} ztn>9MAGJZTvwHnDyBIUAJRPzzyY{OrxaVng(+mD^RSGbE-t8R2kmxoZoKZGy;+>9+ANTA+`fBZ?;x#j3cGop=kh7FTFj0WQx>pJe(HLrZF!K==17ulhCBrasAHvLJ|-@^r+n*HBlmV&n&S!zv= zDRG_z^-TcA(*%qA>Vo#)Z@AA~U{02OsdmOd?-a`gz1l)9=Tm;rJJ{vn5b-DJ#>s9u zDo&3DB*xy}wU2#_G=d=JPvXl(?5=i06Q78$Qv`xL?uOxfm|H?ATGx%2t$!PsY&SIX zi5vd71gMy7rEg{y-sf~@p3a3@*{Y@58PHmu#GV@CY;lla?ULoV2XADII@~t3y7I|O zGwF>W)OYIh>5`(s!s$s>F`D~?vfaYR?b$J~8l9cf{W3o{3zPk`3MlK3MO`a+`V;}Q zbWayhT|q8pSs~#s-StSy>fo?b_e4$c9`ztb<7nexyKTE{FRil`JZ8fiPF-7ZcVsnT zW34}ch|wQ5$1A?}hA8|zU2DvKe0d)3{nhJEoc`U_Eq$hyLdQIXqqrEd%DG#R$sj3a zYt!Yd?kA;=c8@!4NSb5h5&?I9wN#t%OU*rGlQh57RUaiD{>7=Co}Rhl4AXQ6Bn(0k zGB6}UN~LN@>Xa0;yYz9UuI&}(DlMAO=AW?tE)g7%t@PfQ z5-=k#A1ajUrok+P1o6~#JB5GPPz@)X>NXE({R%?H`#K6_A5!q*$!3JBX{{T!e^w^Kqs3-IP3ahDTbz4kVhtf=9W6s07m zdiv$Y#>PsG?%t~C(VsP*`2-7i$C8_ne}gj>AN7vk%S|t6CbDx4|6aOAQjF*z8k@aI zuCY-1k5z?I)nN5P{_F;+TZf=nvbRU|a(q;C0n+)ETe!Qz|7W-L3C&_#K_4!$mF9a- zV|}}}DJ2ZWn`C(Uv*QxOrwUbBdI}K8X*~SJDGrRU*nc!9o2(aG>RQfWB>U2a7Qxj# z`z$(trK(DYcMe2KGHxWB$shnlysscj)|NA! zhX?ohcK)p1xFy)VZAR5V;_$}b_c?4N+xuTXzh2HC0QWrWXlierR$3pV28G&8 zUx-NIeUQ#O(;7}ew$%|oyh;~;e!V`lUS;?4Pkayp>*}p|9t^Hw*94XPFEEKqdaOws ze{hAfSAH)}Ek0+?qF2Xm=Q>IX(frFay=!jxg4b{AqP1-Uv*5%V^PJ-sYppRqgzHo~ z4La$@A2Ljr4F6FHdu4>9v;?rF8jC0VXUO0$JO_d{eMkEhNB@90P{CsSx<^?f*x*+j zkrQ^aDiXR!6&U|QpXvzZx86REGxgIdzqxVFe)#REm=iYj8IwVZNquePEdB#lxo})$ zN8$IXE%XgZx?L7aU%(MriY_T~@wCKM@eeKMZ3RAtKv(FiGY;c5Tp#5_gDmC!P_hRY$iZ?L@dG(O` zyfKHC*#K0vm40h$sE7xjQZy3VLaB|j6+edsdGO_evg(eG?zp|bpiB{W^=!v)jj_j! zMeUr<* z%YxZ@FSBI7Ye4>Cz5W(K*S5&?&3IK5r#S6!n&w1(O>*SsoLHE^J5O(URxY8#TvpnF z;MewAfpsHC9YQscopR`&?B(bVk}@f0-&)GE$(ObgMUbyu(6=%%OT5|;*%4+5N=J3z z)?%?{RyyAP0mSJ+IAlW1Qu#n72f)(~Mn=f?>jZ=TgZKgPOZ0oD>aSFiD1PBukr(B> zs!w@5qC6_}21C0z4|sw1=CoKOOwq2cz)NK|JrXqCirf9xh^9n#o=er&`v~yVppxo? zGt(SrWFqJZ-Z}evEP`CZdJ{s<&8(32Y7vb%=Ek(_H?tia!sWtdP9yG+HV5vi6yEFmhXl~ek###XF&&h@soL+5Z>C*!1tz8 zPn#M}GLHrnNH?4FNY&G5g*pOE6T+{1HD44-Bl!FT+bPq7*9vwf2-sW+sC}gD&onCX5?6xF!MmFh=;U*QAM^m zBtKmGJpfnuF$p?pWROs|7wj62Qk)|?Y}t2uUIWz-K$rx#@FGMPX`2N-MYL6QopCHAn`v z?H!zloEXTDl)4hU$gjHaZTE&-1GzxOyk#Be0{#AAu5CC9_JbCNt+=BO ziuTj)&>_tdKmgR$_ZrqTC&wahxA0CVk7MkZlul2mL6-6c{BFXwK#?nolNpFxz{SCg zT&MZQZn)znW3Z6gikwlSo%Q&bGOEQ{JovJVTGr~voeNQ5D1*Sm_y~WEpSk9P%6mon zFYJQ4j=k_I)vtg5#9waIoy)Hx3$j%3XC0IVkdns~p}Cv!s+=;)0Xb~&?)$Kd7mM#q zW%-|Ls56L28d9x@H)7}%uemr`x^o4nl^w(@FsDr$4iDX*|5s`1gL4`N<@q~ses2k- zmw`fnjkd=tqAXC%2EnRg1m2HMWAiy5wy~=!Ul8`$I{!vw3gXZm1b^w^=6pfzYx5`* zKjL}L;zO5_@&1^{K#{0gP?+Zxs5Ms1)M|&)piR0sn|OP!Xx$_SCmd1JqN0cgfxc8H zX$VK3ijTpU`7itbo6sKpC?<4t-v>H`wJZ4y%a3^@9cw$k#`>t+3*$gt^v_SiH`~L* zlJ}gx;H%92A;hU}e#hud;l7lDzCzlbcArjP+R+M?u&_Rv|DT9p*k?lWj_LAyn(mT8 zYN#C0HX|qoBZv-)Kyi%7YZr^jkF5=4XB)>K z5~C%<2Mfzc7iR^cu8oHu&phY!8D3pn%x?l9*I$Q+V_o|i<}DDFKR^IH8xMMWPE^fgO* zyrO{RXJ-Qd|7lc|5s}aCn;e&0&O83@Nlbf!FN(DHwM&#}RYJX}k`wIYOKqKOzL{Sz z^VbQE zkQ$imET%n%CMTCsW(nQ*8y?x2Ds|(JI{?m9jym>tWrZSAW_{C-Qcr+r`V54+H`L_INy(YMreN*94X0>M zSKa)txQaPs-IO?|1B9mTI?eP_oiIbP+E zC1M{cjhQKVQo4zjIPCAUE=h9H7#d#T%(Erz__*{xtGhO=l$p^f#)-3;}a9!yPh>*qUAZLMP7e{CU+T|01@ymBa*Q9 zqrTCA?BJ0#Ih^hJKl;<*@8b2&26c86q%r(p_ULeNgudRYzU)J2zt7nEHBH$0ymsLC zG!gl^TE{4D3A?#PbjjCmX7JdLN17&f$ z_O2A>d-tW3S>bSGrPED#Yv652kO6tSyy@>om`3?y7_HYJof*uZ8|M<@&XYh&AX;Ru z>v`My9RI{ZNIHm7%K%4#O~YHS(gClh2GGC_5|$J^uaf*J`H88UAgeae*(N%Jekg!wVX!25LJ?)l%(CK1$z zt#|5Xrk9kYc<;&^Ux2^3Aqvz9=a~R8fN5gyEBdk0iN!&#GP8vH;ijoN@&7=Xv7g!V zXFP$fz82IX-V2%+3ukZr>;%0)kvvhI*n2u?TyWWkqP&!jupz00=-p3IXMDU+M7L;o zzdBDawddpmHz5-_&!5_6qswDuZNwDDD%d@e*U?c76&ga0Q#c*gtjM3r8}xT?Ht2ft z+)Qg=JEi)D62@BQG=(cHE&xxM;1w6<8`6AV0`I>fYVg7G2naU}Poi72HuBh6!sImX zivrA7E45r{B_tY$U(sgL!D^g#sy>Z@fo=*EZPIG&DT2jD`yZqx%OIAh7 zr5`W1vt>k?AzZd}>r?Jh9`N>py3x{x8kpC);CphGRZRxA-)}%zp&`!5KfcPI?aG$# zO|*`H35`m0?Cf8T*7ARqm8roiBOxYz`$iG3%G3m6`Auf^9YyEf@a`NG{SWqoeuvkSsPu8BZz6 z_g5S$c{}wGsqV~HCU!=T@!lg{tsj8o4xmPFuV3+2TTpb^P`(?p)!4Py$#)-d;c0ybGv!JUrWk z-Kj>bxSmpx4G4nkC(=|NE!MuMbAR9zX7c{NF4|m789iPo426podSU}R1ru3?_{z6$ z`{=(2eFQDVQ0fXhdkalFI%nGgFXxy#Ri!e&Z5A@s(V(p}y;z$OdfrrxyzNce0k7}S z>YxT<&~NC|MFr%Y zyOg(fv{>@%KtOE|A|%Ae#K+!~g#j^1>p8w0vIhE?o9~Pun}aGdE!na4O8316@7~nUj1pPZjm+#;hB`cceH(Y^yxa<5FQYnJDnG7og*`=$o zSS1pIRT4Q#w+58F6$B2?P;Lvn?nw;Qs36rfZjyF*ZS35!;>u1)k|T?kIq>-jTUncj z!knZ4^u3h=V!LTcPIqLcE-3=??C=iYg}CX%BmOCclN^(9OvbF$eiF*!z+Gw^RBUYk z$Fp6bwU*6-&{RtJu%0KkKaErTR~LQH1>fkIJ}6NYE>A0H5iEU(w_7;vkv-R7r)|*^ z+IW*avDXV_UlkcPruE@;N5lI|l|$*tP$E`|UltL-^m1!n501}ll@zPmZV06I1T#s| zY)WZv>7jh>AB^ehHhuL?sdnrhf!{t55+S@*6U)ub6ZDRvUvkpRqmMR5`2Crj9rC6tH@dtvP=S`mNLn4r~NEz)W8 zl{!BF8iK$6K8raTqo$>`0VYp4OXqA5;^LiVBH|8TUHS_S1Vlhka_c0->BUNM3Z3(d zg;WB3rmynU_34P0VRD#in@pQKkSW4fuh2bpLhe^UA}I+s$JV+@_zzdqh?8!~E$`+> z#7y5^vMtc>M_q?GIi`E2SlMw)z4N+)tqbq|q{6EACwOEk(+Ct9hlC)P3Qd6)jAzWd zGrUJX4C9M!E%r*1+=I&nKu@yWE7WbeUq#)AO%nx$shjCDuY;jWyO(K zDUzff1wO62@T3!c0PCxJG(uKU0I$@wpNeA)?3S=`2yNXuTQ;_Zu=?iWG8{_eh4xF4 z$Y;TI{~CT($V92oCIbs4N^$qaYCXl`4_Z(11{;!MP~Q0VLPT4c8DZ6Su-x?2Ga~If zl9Z)N*}H=^RKfr58pSt9oZ$N=@VLeU2jH(~49Km{Ck<)~JAQ6=_N?;`X?c{-2DD)) zQbh7jI#hy9Z&&R_0aGpY=eN8H*QX@f^UZkL^2V;#SiUOp3K+~>9J3(Xd<;wIOkn)Q z=>*p-BBz5BexD|!kL>R;KB$ewxBXY;hg!5QS%?-XaU*ZTs$)=lA9udWC0>^2RBpL= za#wBA+SJ5!IApa?s%p)<{>+rJLNC_#3gqm=Hi{p!%T1+XZ9qja)A>iavfiD@YXx=D zYo|~Q8F^HjPf^+5d!9GBzoJNB{rO7>a^k~m|MNpb-g?b z@@dEaI&68Y(<;t~OkcZvx$%4(e;F#pAgl%YB=#`iYu-vc?YW zB*u%^pZQO#vZOMA&YW1TJEkOXFlQBKzrpL2ALEQ~2u)6qrbx!NwWctj5)0Qpl25A#91#H&Jj7d76ziX5<&9 zTkT%%a0leurbfbc--x=dT$_4%U{{MBCi5}4=jq1XrF0(+sn2?weuu;TFW!+|-v7N1 z>V|1k#l4nlDpzIyquPEl)^=TtaR8!Q#kB&FP ze+4kM*PnH>eyRMy!)86ioZ+9+WgYOQ2Ns%gIyKxj-s!E;3L;Ei^(6Fi4Vt*1&231! z(f2AvSV*4vVnj$J?lt0Q#6Wn^t+hwZ>%vP;(Yc0ynU#n9oNIC+K5hq)Qe5+l{_@Q z=)>&`POY7|E%|{zfA*KqR^5(|5WC0Nalr=j5oTws<)4>ce_9>0vq=Ulr;!*Tk#)d1 z8Rg(!E->%~TYtqz;ZBbRSi}nu*sak!Ior=Z2Sv)tW!?=u9q@`sh@q;*0r+#*w&(Y| z+8kNp&)@9?lGOtLZ7dkd#}MZ4i$X2QxoM&3VyWVv<;Umpl_;Nv70oZ_$+0~exXjXh z|LCONCCqDFl#Ag9$!X?vY41;e%E~(T)`Cti@)!HQn)|7yz8ALfQ{JY)hfOPohZHTt)He+>QtD&F7b(^r=tbrF{DWAL8ox+V zg!9RVK1HD43b%2%-gFU(^jc2}Z(ZNZO0eVch2-)wi`T}Op0TjG{JGW4B*w8w;5n*z z=!w!bG_tcbDF`q{9m8OK8ob{lp>=82Jf%_K8l$iJK#Fu_1xf*JdLUBh4=g+U*w1C+X}C;ZLm%`fZx)_N<+{Kn{Bwo;vuF zY%cGWN4hfGJ-JZd<|4rY#oKfz)JP$}3PLDO`PUSho9&sQCSM;I#Nf43__+D%0xSgl zSp-uWWJ}}8$3)Q)o_?qz;6#^gS!q&g`IRmkFv#oZ#%r)X>(A*?fCxR{e*(d7r@jPZ zER9?|A|2vSO0G{Um!S*Uyw8*hD69VchC`%7BX+Bx`z2N;Tn_KvZBvpbZUW1pJ?noF z!{&+Oe5dZxN$|+*eebAXS<|vqkemw*yQ#VCBrTTvta@#?C+gsEGc@)psqCQ0)Fft; z#Pk(t;S&xTk5u3PJNlP)^r!9U*E*`f!cWP%ds6uXW3#sUhS+5rZd=I!E3VT|l@&}i zd!8b3FHUVxH2G?sjwfb6F#YhYZsfr!#;p)WOQuU8JDlQwm=iS@vtD@4_N->HtP|eW z*laz$eqgUT)Ojg0dgpSosX5;JV9_52W~ihZA)lN)R>)e1V;^JcZtST?=`ZoTO!_yJ zt&P$quTpJH+-TOC9w<4vf$AChC90?|BAs!r3}$jYz-#g$=)V># z48Mgv3^tOV6al#M;d|HpUPs2lCLmp4K<$~uij(*LI<>Vng5M3-I;^=$;zmu1@uIF( z=zy!MojiwJ<|KNg;_we{djTw?)#qak;r9ZiQ*LFBT&%Hr>>+rZyCA>~G= zI`9P!fo?w@&&~f#@e(?=re)=F4uVbR$hJ3nsq~8dfp1r1v&cP%E0X-GYf~td4;V^b z%QYX^;q$OS8aAkn#?ZuMLrl@7pF36AJaXPQy%ER0LlA}5ldwf1ibQGnwSLH;i_Hr; zS&{bq@H*MKj*_An?~YHwynf*9kNPG03mqY70o&Y{MHeLyj@{=p3cf+h4JFd-M zNK{SrQ_b_I0eG%V?s4{9G+rk;uZ5#gr{V!~Di@0f&R^&^=?W60Av-%;h}q=3THuM! zq?hyC%{wrN{Nm@9!Q(sU@ZuWl#aD#GS zqrEVxh{W1fOCv41udCyY2}$4CZqP$A6H!hXnel%34c*;I*jXtH4p>FW>blrqq4D6IpA^xjwiV6+_r%RuQ^FY)c*#KbBOssnTW6P_np&F0Viu)2k z(Q!=cX#MbTy?ncRh0LHoo_8$)m-hqx6MB~w7Tf+XLJ-D@PY=B-NCxonlanm<2jn*ljsRm)d zXmC|bdn{TL(@dH9Dq2xtKn~h%px)gTDs&nwS*vz+H__DcIr^rMa4?;)WeUM@M?9>yCsx(N?u8B1el>~&@D|AEg+d!+YoNLSLHdcVmu!h`Gz+6=< zW%GGE^M?(EAzl0hf9`U{cufqdJUo8!7HJgMVt(N_$gm7atu|Ylm}HnbnjuorZAJDE zSRm4{hw1o9^yPGo^;T-&G#)j2<&lR87sd%I{E5s&i|xtC?9T6uy80(tJ1 z@fCZ^)NA9{veX$G3(+E?!xfn;U{WqfEgm%;A0k-4T?uN7T zkiS=W{b)C&w^%h3=F_h-*5ZG3H9Zv0E+kORug6;p{jhxy*%`C<_mEsuqnofj%UCo; zpKiF+-xIz^<<{HXSa{sA!lV0k;4$G~k8spU*rO7psGws8_*yWzZvpC02aXQsk1Ui| z)D!mTU%#y0-K8b$Ob_sg(n?mOo~c6nA-*4C2%m$#0oR5+o}(r8(2;x1y`w5t#{ClV z$*H6##zvCdu1+J6*LG=k5!Z2S$yt{rYzqV5t7p(qVEwfEJ)+n&bS_x^>OC;ASqRZ6H>Ux96|jPL5(sUS{f(qEX)Vy= zJcstlJpUZc+Ck6vlop89_Eu=RzrX%C$B_ew7(b~)jjxZ5x-UMI)y%5tvGD7 z5Y@@><-6jrULHl(z8xl#pUytEW8)vc|L5f+lKM8!Iw8Jo_9rn04Ac?tg%-P!XB1>< zp<3nDF;7i#iM$U9;9qxV$LzYn#=zoc-vH0RVlDk1WUcH3W@Wl>AzHf?NBF#sYf9@2 zMR}_xsXMn5I-OVio?dHk{3Mn;188v0(!UL$&hs+QDJr0Vhr>q-5eEh=Y8Wx7(-KZA zQpF!r)v8Ov>M#XPy!sB8NnmJuw6INZEn3#*`hUu<@Qa4^Zl1Eh8a_~A#SZ0+1Yko+ zTI;76pn9#|)&>JlsQ=N6VVG}S9z`(?JYbc5e@{(*guIv!f(- z7r}ZIVxDpcO@CtQxPl2pwFVkRKc)GhRGPGsElVTd|FehhPQmUe6c4*fgpmARG$IB~ zaZE!(AN4Ly^*%_f>o1N8l}~*6u6}NQo)EVO%)jG|k&&!A!zq~mAa9;3rKGTxhT!z8 z@-l)P=p(r*o@&L&oGTwLgxy$j~>}{qQyAIaLad9V~ z9OgvyBLCtj-b4;upS8O#s{Y6Stj>sHavJ}_S;Nw%g|$U-qYh*sdYv4hO$DRV3J6QM z&~e8~yWFXfASm0D6f%D}X|AP7O>C9sL|{zZbGTIFIzK;;H;aymiHW=V z<;&dMmpMfSeIfdM_3+gHeC%s$K=h)3zchpu(<60I!gMTPw2FgoK7_-q#&B>kCmc>M zY<)%7bKw;|QqaMXovsHz>`6QGN56O!#v4ORL4EJE+%|LW!d+m^xIXW2=c43d&a2*o z*ZrYrA|jm3P7jd7wv|}_UI=k802AJt#m`sTW&ss@qJpL7@3r!}EL2At60h=CG+9KIP3E?aAnhNv2Ch*Q}^aNR`o|?g$ojXI*axA zlVoGQT=9dU`bWxTJdqVFS9cy{8S^cK&UX$xDqTCE!v2Fn(!~yFzXEW^l)tU|*3~%+aA-Zd8?Q(dLf(>(o4>zK zsh^siJy?zAwMj5JZgr*6-kIKglKT&`SSQr+!mUReaBm)y=8iEbUD6g`FMRGt8y2BK2!Ej#&vog?#W+F z;}Ybuj0k+7U5b=;p@5A{Bh^7PJ?5hfg#a2~4ro0mXrKM6gyX80OI>WD$yWxAB=-7) zdyc^Ms{7&IE&~MeD1_{nkCc}1w=rvOsxBV;v3Dg8{l1lio&b)gJEJ5>>$FaxgAI+Itl z6)^_)z;M{CLlrlbt4|xpYmLa?L-qux{jB(TSFB|$NlGt+>ziVB=nu$!<}jFekAke1 z7-zOAi(T%BaXDk-y3FP20OTnIGLK{uB^!%@xj z(I#J5eyfqNXja%)XQ=^3U2?}qpN+1Ul`ztFU|$k!#X0kVx%h9MFiFqc=A5Y_b@v}=C+`q=Y{tE3@ypniFBk4T`2}0c|?~`G!@`Zbs zI%t!$K0U^&F-9Yr(3m)=hs_ebF|4UiF278}LG33O#3d z{EPaMb7*Kt;VFBjF5fd&)|dww1GerQ6^(GmBO-k{cBCKrGLxz##3t=K%a|JWG z@%gO*&iGeuFp+)J`{Mh5-m`M^jVw!PvUSZ%>T4Eay>wyW`u1<2Y$D24m{80^d)BWp zUHo6A7!zYn+BvjFqbLm?PIdg^h@w^QT)Gf8Vc2GfZwKb&6*x zT(p|-L&_s+#A+-wUn60@c>K&Dg%3W!6@R!EnEeab)V06pM7h!R0h?u&yU={(bfiqi zw~eE(+f@ph*K9_$`uZ2!i8Wj{u;`B2NS6m1B4>Z9fbh?mfPfcZGEmmBHh!f?0}9>I zC+jO;0VHw%y(1p+>eZG>1_Fl9nV1n|Ai)PQ9!0)BM&60jE2NoON~7z|gx$CEL8lu_ z@k?%vknh+Ka%;rA+hGV4Ogk9zl|DX)nsn^}MDci;&ejoaEnGIGMa`{SL~q}qpZ0Mz z{O01)hFecud&knt@xl=3I5mts>xah&5oa9xNCw4%togN}5LhXgHg}sL>U5#~lCZbAn z2em}kBRwYFlxX^I3!6cNS>}+x9f3@`wYi(V9@njCl@2fLZ?jLtShI@8QtP>OWPkZ{ z9E58r{~go3OZI{wU;$GPVB4iWGg`|VUXNV51--&7-P*a1Z!dr^MBp;VuwZvUau^vTD!09e1y&p zA-HN%O-7WU2B3~Mt2k674Qo9)Wxgp(<_XEZm3}!ry}B58ovwn1A;Z(OsWE-nez|@c zp<)^E?wP!_pZK@2ojhxK35!WA5?PHTqgF~PL&(y)l6PPTG}IHjDi~T|&L+_9=5Egm zDh4lEV|abuF$nlL+uUPo$*_mLT~s$iq2veu*%J_Jo%0K<{)Bz}{p#2^dR1wUjU6#M z_j8tu4)}aHkFu%KcJmC-gvNHXo?O+P;NeISvTxYu#?-z5X_Z^=e-}>a8RVATLVNj) zWF$F#s=nj@{p^`!cJX*&7^$ZA%jwq?2xuKVJl>D~ao*L2nx&>^xygi?F3=q+@4Ny|kcek`T)Wp~X~t|)Gj=$2`Rno7>Dg)U_~wJ2^$24;$)2l&58Ruu(aIrOX27cQ>Sr81nSt5oW0;5K;3~HIebKLh{V$Jx0>6T+*!ar+288VXO=6azN+6 zag$%cNNU>lln^4~jgen6b}-1e`3@ud9Bxxhs97(`@a_{ooENr9KkR-AXR$kkt#A~1 zcd}eDHzUJVkjPOnCL4&5oN}__;|j17wu8Mq@CC6JnYpYnDlFG*fFIy7Fau_v*lh&{ zxsdobnDdtKln@aFY|-xh@MONYw7u`jom?Hp$2O&DW(Jr^1aMo=UAzmT;$Hc%2es4{4q0Cm~wIO4i9YZnb!>N zQ?Zz|59R+yy<#*)ugLodC7#6pZ)&=|^m&NzW3x$=t6z?D)4lzuC!#oBz0vllEi3ee zyx;8Q7(&d!&lY}jN$x_4*pvZzNfC#43@ncf){7sdM8VGQi|8}u`z|gmyq-zx6D`Cx zEAsIvUwn&v1I3Bb>!z?X8aXkFOQ(oli$B{=1NDPvzbuw|;tq~ivI<{p1WemUFcI68fZpLvdF#R2`Q4VX%zT6!TZL5h7RqpTI$Cy-ZvnvVZy)Y+9;KT+|XjR?m zRxZ8${jbX+A82ESw)A^@d$egSDrk3nQe{0FO>lHVcRJZEK3G^ED+K1J&9Lm?+ZTLcu=)m!&?Mk{Z&?W-M<%CMTWXLMIC(~u7t9h-pyv} zg?<*%GtEc@XY6-e4|jGbK%?zg0Nuq8fL?IfGkbnx72q9H(+R4}iYb~TQ95gD3VaZp zr9;ifU+PfiL^z)6>#&3oYiX3mGWEc1U=sI->H)4zr9WMHZD&ZOrIpl&=6KL*T(=Q} zx3bu7amByvi2_G^a`f4@O;3UxJLX1ugd)#810JU$iL|(FUlxc8`rf^Uk~qY zNQ^G=t*o$u8N9oOo+^K9`EGCwgABR{m&8l_S{PJJ;SxVAC`P2F8w>F6*3;k7#8c{e zPUd@J4}Pz#gdR{5PTIgRFLd_Ivht&%on;2;)*jAu(02F=W8EU7RBw%-m(c|BJ z(MV#G;jvdqlPKi!T;eoU(HygZT&Z$KxPwcn69UTOyg$CG=1ChxoY!< z#|59M<1YcmekMi>snPjLL29NMcIvd~P< zW0f*UN%U)xysL!p<*SL>a_x?KpYYF4-&$4MS)}&EZdY@}Gwu^VgjJb6%rLDzNC+PS zJ%`V^UvJ0|6U)a;XL+;iMfQDnZRNuG)r;KIIaRmVo9Y;G1%cAiSIFYKvQoV^R|bbo zKlB+*NsPreKz&qv4!ZcgviObP0870ynTwCO5Om(2ev5~hL!zQL~=A;P@(4r*7-$5M`(mh8D z!Qh2;h3Tg}1-Q^LD^W_m8{SUS@U<$p|1xCs>%NA?Ewr^5%Lr7pHSoMMxe8r$_5*!( zxC0JB-5!;DI%mhn?q{HT0sgB>A?r+Qw$?%mjEU0VE(JXb&bQ<+rNOP; z`O2D!>d9aGYs*KcF{1Qn?8J;c(9pIyxIYnj`*!B2*m^?6%lN3M>e>K;<3Vjq#DfQC z_jR81{r6wRM=Yr5eZKhAKt8nagJRnD%C}OcZ|^R5?m5;hf4yUn!c&m}zcc*uv(XH@ zK|LggK)!CH@sTdSsln(jnsgepw>~XQ{Rw^$m=(lyh*Tm_`4QxT51Qj4bep1!DiP`b zs9boK=PV=qQ`@57P}sA@`y2LkSVO}L={$&Y=YgI}J#yP#87n=QkzjiQZkNZ5uyQ;}3W@|zUXK^>zG(pl?O&D3j;D$bM zQV1-i#kMV3&7NL}j{zPcol#s{Tf07L-{lZI*Se`dnOx)2zc)AMXTt8E;pFgLSV@L* zuK3RBwE%e&^$;o?VNPybQJnlF@klN17Psq=2T z)(rPF1se@Y-J&_S%G)tE>(wU|3dc)O+Z*5PIozeCrJX;=wHAzk4djNFtZG)h$v&5B zSWdFWIsJ%W;ffOdY;S3v6CXjicJx@EVh%Y1$iq(N7)15vG4km z!%SJyGq&lWR`$qION{hlM`c6(I|sFwb1nGRnM^Gc{#Mqh_=NMUhiD5@o>o+stIoqi zrQO#N^3=TTFDikPGlNC$P)JnAf`)IJJ7hqVRNOS-2$N4yS_xXX*#WK?QTxXS+luVz zLeT68A?^#SBE~0R; zD>c>84Q0$nZfoZ(8<Evprq5ERnFf6v9a zqNE5ZR!mo6ly*t?BVe(4W-1|Kr0<~m%|}r>#qan7b5EjPv9i$bhwqfWF$FX z4}{&I0~w5@8NQ@S;j1=`k`>SjXi)vTYD4X8$iLUAb%#Zw#IibA{fdU_I&Y4DYY&R_0M%-XuUJ13GQz?C38HP3T!C{nUG^hr%76#E7j7~;3 zVG`1Y2Geo+yewnYTv4(z&W9Ryq z{}!Wi8W;ZngOZ%+eRY<>P#lrPW_lx4V${P@J$4ouAH=cBBz`Votu!V=MbojV9y_gHO`^ZVZI}2 zDbD4V-OlDDSvK+2&r>@p+0I}QZq%CQq>QaNQ>KI3d~ntyPt%5qJ}-ae^8A?=GOYri zSo^a!R%lI$m|v*;9-5G__tDNR{{xWvbuL*%peQIpu&}3Dy1P(SU!nCYz5Zfm{#CqEoKviyY1FatZ@^a4`4C`cMM=@0wNK0-UL$KtIj)xQiYDJB zW$W5pgX*9SDll8Klw6@^3R6xBYsJXc>fPE4Y7?e*{t#CUu#wf$nLW^>v#Y-^7#N7D z4Sw)o7UNc6zEhGmFy&opU5$_?y%Ds6ZNxb!@fGeJ+J5-Q?=?i6v>n%NI%#TC)IUVL zlzQv+sR<4OE-75fSmzLe5jt=bM*UgZNON59<8I@-#OJYw(@knN=ZG~xA+W2)r52OC ztiQC=ocaGMiLe#MTh)~#V1tQBDK&R74_$m>v6UQ`EbZPaJ=3sZ9Peuad{F4MmCTgp z%6rpSLHPE@L{Z(`0cZf}bBYtKv&UJ!?^BlNZ8ddxGMA6fN*7=`F4`52d6zuz{XHdT zBU{NxGMo3C@_5hh1i~4@AN##(EQ_fQxf`HLpkxr~CGG_CY@lRGZDqqnLFUj`6pzXx9JCh6D zvG!1UM>t$cK5jk(uKZXU0U;uoPk1_*9h0w+9G--oyYXjoPY0N_PY!96#RKT_Ckr@l zAiE=Dl0{}_8k@0A@gsjw+kUWJ=!w0(^y6b|34N(Wm*^@o^E>dB8>~I44kbfZ(mZ`j zvmckqCO7Hx{CpKRD`9M+CG2pGUi;VT*BZ5jT1uX0*S+THimAfG+ZKrz4k{SvWEdr^ z2#~vYtFfu{qFb-&UlXQ#dsBGD-Z7fV-1?n<&n;Q%!~dMsxRn07B5528G3v*7bDp16 z$$v!|Yz_3y#@8ay%Hzt?|5a#!x*1`9z6kGZ%}f8Cuj8~t*GKKx*_LkIsnlPRF`zmx z?Lppx={IaKmiEP`ivu=IINO~M?m1nrdj*cFX*yVoYnHtT1}QurB$>-I9Wv)Q84*Zn zFoB)y)Dt#s7d~-9-Yx{aXq~O6besec9RGT9jV7!3^2s$%xr@|1^#$VPhN?+T+;^Ah z7F6b|+LH%8`wcW(o5d7ZfOmR&{@l_A?|O)cHCf3yPkOU!y!}mX_1jLD&>wz*#2{66 z2JVOjfJ4rxa>Uz$3Fpvq0Kqj_?Wp4hTs>kTVgp=3Dx>qJ4t8Q(D5pKAZD3750b6I7 zMyW#KV(i(pof{h|!GE9CA=gi@|RudE6atX^AoOJKfXNNzKY? z%e=osM(nIJtEEzY55=(-I!JA$&-vvZKOw;-q#12tOgzv^g6*8Fs|BuQkc*xhqbCX+ zK^WwWgIn}gr=xt0l?b;De^cQdMFZ==YzJ-EIeWA|`=mxC1{?NmI^XZDI2Dp-)b(Z1 z(&Vhazcm!$PM$bg|N51V4=YdQofKQwM@0u`XTe2l8_)}r5mI+O2_ZIg%-rtMcJi+bn zJ*l~LSE~N(4qfJ?F7E{=?AL)6e!5P8l#*1GMgN%7N)C4CHg z-{jNVs4G3U1@mi&ZGC!?)yO`Iy&ReRCg+vj$$vvh-cHF!P;jnW$)1zK(Q#oMf>aIZ zWSFXXn;v1a;JZBEG;ZV7asRGGsR>=+(=|ZnfY2Rdxwxp_=L;C*ZR97|FT+jff=J$N}Wcivo2VU>z|-?OywV19$_ zs6PJnfBVC|Gw<`pR4nF7TK)S(7JCwO%ssgib6Vj8Awt%s?FQ)50+$Lk_Ah&O6Rd2} zb-!=o7<>QIr*6cmv7hLiPU`%{d44U%6ZFXt1c$g2X~Y>LLf{700wjRK+np_8Xg~dzdXObBt`mR!KTy-h|Bbjd$Hm31>tarcF3LwPqL}ZbtZG6&6)HR}+@99b zajgP9Q>pPk8ub-wf1I2Ki3}|SovnVX1L?_|ZY?vEZNLa->;D?MaSJZ}^C({Ant?w1 z&=~4-wIw|%=T=VL4#EPE5C6O0#xw_&90v91NIN%DTqxXyt${gefs`lPri=<@Iz1g7 zCU7X)md3<f~!@GRA_*bf3F7GyZY`tQxh)2jSof(D&&z(rg}ds)J9V*H^)qarxJK z*WeeGs*X<7RVb4~>`k}1R&KkvC7{mTOhf{ytY^utTi)hL+OmaCH(>aW<(R>?j(wV2 zXY!G}`IY{J8RKv9Me~*NVCOdEcS<)gb${|Z-_s|D$aP^-Bh50DUmb-ysPvDV--v9R zP6w51+HMV+)SVb$_&^m;dwKpI>&ru^fp=3 zm!--U!F`b{@f^Ayt5*GtO)zj+mUSKT+Wu?I&{Hs?FcGQ%_Qj_2t4+ ze=;idJR^lE#56@xdrvB4R`P^amLrM;?2V{$K6S)V)ji06I%mC836{AQ|Fqs7(s zo`3)S9J31I;8QT~4v5by43FozT;@XWWT}_+UtH=V%59e100qQjLEgaTd!(qIxcg&& zAjn3St%j5gCbg^NC}^5l%jzx)PMPoXKjoUsB;F^-?m=B4q#S zLpFHJSMo|p()kXDgTA!sh&-~8ryeBbUVSll&fefGyJfnvm3($N1R%0-SFV-^b;u7! zwO=Cq9^Z7WFgFr@Zju3mIY%MbZn9ljseo|hldeH9ddW@M^v3;vT4cR(|1LSoFtPpn zKNjU4cFFJZ_hT>fW|6wz!mkuc+A`^~5KUk#_404sv5^f_AMSm8>dZ!WtI#yy z)yV6pMJ5pmeZGk;0qNnaA9v#a)-z0ef#hf7L;nouTDO(K^*7-e%LP^y%_mpl%aO_GJO7dpD7ejsYnR@t zY1r7@dO2 zbS&IU2sI)#uZA8KqQgr|VBr08ICGYD$ryRrH8Qo6Od*`$!CH?({4 zo+S-<2mx=e(jiz4Nacl3_hY&`&kk-t>^;1VV zUh+bssSdrPsFkhV>yVXBu&EwpX6R?T%p-Uon){1LiamkDAA@Hd28^#|mBq#Vx|1pw z5<#j9m}<+e`KZkd_w+d8Tj%i);!AwF-Cyf@=2TNl_ZAN@7a97wK8V~)Z&CaU%$dVT z+G&b8VHNZwR?XzQEj4ukU5ovSdlcG1oNe`sO73FRbku8sn9!oW7wW;TJ@FA^$gjkP z90y5&wJWYvhG`bm_z9+rJna0v7%2yNvTTVk>c?hjczf$25hI!~CUP{@g*hqx1rd1S zu!xC4s5fdwCfb(l;8}3@=yYx@l2w;Q&Q|Y6JuXDVP*m(vePz&K2smiKjb58AOob|O zJ!_k>TNM1ro-*P~ZwaopE-uq^e%zmgLC2HJsK3Y{QVLhimD04j^X)}<21mh!9}4y8 zFYXT)C-YEIv6h&yyvz0^_b4`vFPjfu?D#;_6wljK^U+I7GQ)YS+8i?w=pRGfmqRO< zT110bD}aQxUGR`g!So+&O6whu_o3vA&vJKgv-wkeAz@=b?x3&I8&()X(}tD)HwyC+ zGnVJ0Dt#$;b5wX&U@+ilJ$>2%SOaGVXS)f}+w1NB4AJ?O(G<_$o$|Zo&KeC51l9o6 z2*P~OrONc?ngBepso2ZZpQ3&@6=23`G+kC1kDii|Ve z?z{qH&$BQ=e}vTV8j9mv>?s0WEMLC06}IN^qc17L{c$U4n4Kn@9}-`b z7$>ZU_Zt0$Q-BSZ{V`+>uUh^6oJ%y7My#oI{IH~#>CORZsi&FT%28sE*47TPl_@^z ziryw-{`2v^*y=f3{G!XCDM2F;W@0yqLCp*OT%k!wC8?Dzr^f3{;To5q8kIi)m%-@G zcp=SO=o%Q3zpq_C3G&6eSALzle2Eioh$i(vr(cM0LB;ykxfiW!j$Qh!dG>R*Pm)wP zX1MgZjN@sV&dHj^+3D8VB~RXKpA^|C=*yvb&mLxiVB8q$aXJv7rFPb~hcKVFeaGv7 zAU5adgd*r#n*+kkNvtPWt!g_dgOK~C6G9!XjbX30500ZWfF}olH!B+c9E4Vy!jto? zDQbfO=x`^yzE5ssU%(fPB(==f=hS(dX#B|Q*hG~fa7`l~cV+pvQ3}C!-clM(+EafiTbq;q=ZS8#WcYwTHg$U? zP75TEPUDE|ec@Uc3>bA;-v!Qh903CE0wK_a67g_!Yl#!wbKGIZ0Z&!jJ2CVuf})y^ zrOm8U+T#*Kd0^1jJ&&JjAa*R-P3xBH%S&W!GH_YOEMFsOSs5koL_XYT1WAYRPFc7s z`xP&RgX8{22y?Zizok06@vaW1&13?649FaDv)Ddq9N1<7T$;(rQ|tlv#d z9cg*EN7eJxcTqh>P{OCTT>mE6%?HtYk8s|F@Og;K2TrC-i5FtGd1B-a3fg9`ZhuVJ zR>8$d7g#hva#aQ4kQOsAQ&KTwLV!gVSx=$u1gM(-Hw#d?VIEcnlHb#TZ5^Yr9(nW6 zC$t3qxc2Wk5?eMRZ`(9b&@fz3330d4wJQ^T3JPTYGwmZ_WP6|Zo!dw#2}!-aW!CnhE?j#q0vi>lB%{}84o%S9^W` z)ViU_LESc%zU6|l8U1?yN`*T#HRSMk+rMLw)(61(PeqLayPRQOTOg~a@?`EkHf)Jg)aY@me@Sgal4gHym>%z^>jFUU(zHAN`#9Y0WMkj&t6yD-!m`F*hL4W$QNj`0%GZ=?5y5FQ$R8_%4qlS znSJ~S$&NU7U35U>A1uaJ@eaSjzW(*U``O>W5P`{q)L&y6D`JhS8L3>LmCt^_OY6lV>p6y@! zU}Rb54dE9XS2XeKoooY7PIs=NuHm!(p|#HE&~fZz5m|bBu8maWI?OO#py7j9XhC2D zuN(+pb~n&+^WMe)9tX443!MDuDB`rF$}*HYCxiLz3I#qh+KlfU*HlQ=e5NYO1Fz6S zzOf7_@?#cP-H5Dmi#ImF1-BUC{--7kdcte(C_weRAfDot6UO%K+4qa-CZc?;mSegC z=39F>dJd|VvaJ)e<2XXrj{s>>sG(0Km8(hhiSF>;w}I74{DY$fP!|~lu8k2gmc*?i z9k#tP4*Lu}TeiQ?B*kTo=A+ACzHWlJsenn;;&7-2lEPYS#~!Lt#Z;E{Ol@@K3@!hp{Z0sec-&O*{8bVWw>3=C8e}E~PI5K3 znWGmtyLkSH^+q;NIKH&FP45^{SNq!(YYj^YMTD`RXk16Xs63Y#B2s{2^q8E2{KRj~ z6%;RZA01F1=ujdSTIE*0b7U9_AJaRjmUb?4F16~U4oAK?g zX?cR&lsZ&f&{9Iw+5W!Nb5dJ_?%W1Vxx}L3|338;_8cU1G&J17t;Z%Igh_= zWe-jVM6^2tL{0GAMi8~L$Ccb4_W)W6Xs4A^^iaK^$TA3Mdbp7G_Fw=k}fkGl>|(iyQq^T3b?LNA^RHp~CKG*5GGni1lX zSyH?uEr|C)GV&sA5Ca$kSHElrbppL=N;x-E>!Wm-!~x{{Q7g!qr8zB=1FJGtCN5&A zH(=skCjvp;U2<3#o(4sZf~n9%g!SIZ{dQ*^J2%(vnD&LuP|<^dHQSz`6Q0ot1)aAl z7z6ApjmK?@*Pn-7s`M8bnp{yby~x0djyGmJFK_DXHQN#;UNYD?v#=9-qf7x3_xuO6 zdH67-#*v<#H@@M!&GR>b{-t3P)lBDcK~dLS9p@y(4KrVu1{c~Qla39PswW5U!rEZH zc|CRrS0RfOu`U_$p(gsf1NDSg!X$>4DDF2O^j-b5^643Ip~kE`72(eD+{iXr35WR) zau`#7N)vy9t*P|KwQ!Pza8Bb=TuOB+`Xy2?#RzFtDm*m1u{Y-j2_>5?V@&=F-@FJ* zwMxN2qC~E3xy-l1aAPZ(%z^VW5jft#A`Srdv=CEc5miCqxB=lp=NPn3z3%nThWBa2 z@BdF!(s=O$F%-7aw>;sXZ`0XE*7QKKX5U&>8V3Md8)iP}>!DXVrA=}8A^*3wTf0t> zQ_i!mTU(cRd(QR}&f37oSo<~GUVHVAS$5-X9fWNkAe-O3snqP%V!{ye-~mM~aKyV~ zzCiE8`0nC$Dw0I{36z^Kve~2-RVU%_3*qB7b(})u3zJW=Lciwb!q>IUFKC!As|2YD z|0-bRIgPv76LoOBb-D?b6#WDn9Unju>dsw-5@_hH*0|8xq+Xh z5i>9*oR>rDUY}ylazIxDZQa(9{jnj=u5KELg-0|lkKF5OiJ|1J*6V?Pc4Gb-5~cE3 z)0boNa+57#2t!xn8J)-q@>$0|1}d~o$Q9QnQt-K zzYGx@%=Hi{cHp^MbgSq`p9a?a8U)A#W|Ih<*CXh2y{_374-j?al_qQYZfKZkN_e)+ z@Rt^p@f1`HJHCcTz%WPvN|}^$0Zt8}Yo0mGT0w%NAtjQR5h#XdFyp4q>%ztay z+2tGMhs`(&*5xBjc^^yd(D739(q1XxY$BL?f&K}tw9@7?b15{{Phh%25TOjSjcIT_ zgBu}h4gdb$`8>Ayi?Ub!wLW*q+nMy##P9tQc{5Hh5GfnvU6#Gwk}e)}t|PR=cWlO% zA$PA~4a`!mk1LC9Ms|#?eBQG&U`lLrfUzu?*%$J-zM7D-gh}U+1-(*3MABfD7`_j6 zg$ief!SN6G?)&0FQ9dLxq_x zYmlGo2`{skL*^DX+y61lnP_VDX2Oh#cr}S{ibW0<=!u@gO(}A)4>O9fdP{;S76GEj z{#t|E13Xu{Km`|2aQ%)Y`(^}l_A_zIY$2_ypbeC|?N*Ff%#T6^H!a`O6*usauy5^})-@S%Q!3-Moz;~wyYaGx zwCKbAUyli`HNyOMi=kGYS8U1YuL8?_UMcIuMxKqio&4@gnj$paC6Mo7k{7rpJ2%0?}^ zj`({q_G+x^RSrK(u)~HS21~B>*qE}Y29>DxIyN9G#H?7uTeBRVrr8<~O%?A&O5t7p7SGs;~GOk2-vzkMyt17qQMW9Bn&zBa67>^%u#>kf9oh~f>+!bc=ivci1FM$UMOm~ZaK`|R*ksh97=X0);pN41BWg}})FZD4tM`RlK&X7ePiQj0|g zA0JB>%Bv5U=Fg7&oDtSCmVmPvf3mt$M^Q4vcOIoB>~EzF^~ImnwJANSTF;tivG+B#^@2Y;P7BQGWTOAhTzax`T%m5+Zv%~q(11J=FZWq zC&4D1$2nF~8AL=q$WpKZgL3E~&!|eyL!Iwf8gX$8=!~|0lI~?zSEVZT*pyv8DS;gg zplqe2@>GkxFBj~bS0C-=zZdysn`md){U{CD5F2mmf$UG@iHP)oW}eTcFdeVcRoczU_+C%qXWvBo<4q0Vq;fI~nM&(JS$(Rn*t zUpLCotAS*87tI(J%o^21<0U*eFdk}Z`FUM?@Bv`BubRxblV0-Pl)EPJf0uMw{RlDD;hMo z8VVTNu=0`F{c=$CL3YrqBf-dauj-RGKb~_}z$L4Yyl9&4{+ZI|;}=P62A_z`i^aN0 zWEev;{XJqqx?T|zi{y>m@Q)Q{9(riGTs<3!i&tfE<+4}q>V~uCtM*EJ|7v3>mr_Ew zC+bx{W8ruBxpD2YBTJ5{oQ)eKk@tB7KNv}lK%aYRgJjki>XjN9#ssp*aW^m4pmj)J zf%Rb|8k-u;5#W!U&}egQY@6TtOoiK^aEbDix|x^-ylxG~A6o^G2d&Q-MZOk|ET8=O z6*f}yJ?wgGdncw{;YqfL!Xh3sY{X8FPYmc@@XbPaSTY~86pTPJ>%k7);=~%@w zRrF)P_^b0l^+2}aq;^`gQ3~Q}{)b-8m1vGJj3J$(J*~5mHxW9FG_SUNrDSB+s)t*R z_DOwpS&tbeX%N16^_$2CjU2a2I^ixypv3IkN;6j_Kn8U=Lz`?%+HDA`^%eDq* zH8`Kxs09DBBJM=1!I(zNNSV9kujVR7z-*&B$A2z0F@xEF`y+`o^AC30I{OI$l!qTw zfhE=&W*Csj|L9@`uV+YN#9Fiz<7Mk-MGv|@R$hjOQr%5tJk{nhfAxXmT{{H-j z>vA#g-S_M8d_JB&&rZ3%t+X^J$-mv~txco2%Gr76_8I*Pu_Ldo`}=7hw6z=fX7tAP z%l{2^^TnhmywoID8EDHQHhY_+^0gkuGi4rEP>M>qb2MbKvVNCevM*$v$Vf`$?CdW|e90m( zKO^~wFD*`=u!e>Ug{Dh9eW>)riH$(l9DUYAL)uJRSxj>T5Rc!GM+~7}+Xn6a^nDNl z!bBX@UwUvcWd^RP$%1u^c8Wu5yZs67-q-y4@b?=bCKez+Fx74H67{`z-gSmWiTfxf zTKF+{CdyF1{^Q+_ZPP&why5C?a{=$f?`6#~Sij&}xg%J4S>1mCkEPe_jAScWm|l%I zdc?UFZKXH>!6lK*W?SOhH|{-4p_hxV^ewP)KwyspyE$p^JLc-v=u!N_oUqX#NEFch z3)yR35feKN)Lu-tt*o{h=BXBMFR%1%-qtLl1krO^{Pw#y_9OGC|J$OuW)Z1-5y!F8Mup1h(%Sr1%l+!J!Fj0&`b)Bo-G-bKA_xGJ!pW4SB8uMypV+WG zChgB0I6U4B6PBk>;(MNv(zWdd{NBs-NaX7UxB5W~jWN%m z2>#NXKt;Q|jHo;&7IYb(e0Y|gb`-7MMYQm)o21k`+{oK>fV9l0fK{X+KsltACAr2K zTj@cqr1*DOhl=(-NV|FAV$M!DnUAv%m>ZS_IUgLa{TFu8%}k`;9PVtF$EWR^=h-y3 zlmT^Vo&PvDfAXnU>ujQ&2K5h!uMvOnXOpoDhAlV|5i_se_388ti`IO+dz{x zKHnL6>gC3#!+t_-@a%e);NM8ehsM6Aa5qg%2}w&?OIQ}&SJ)B%liHGvG&4yu0QCze zltSAI+S@)YmABN!HgiQA8RGEAqa)so71x&G)4gvgvc7w0$i!Oxx;$Nn2_r_#&}4y( zoU3)u`c4sirtyn{$QtNBZ>kdO1!+8PNU_t;R*?j1#9mFefLrpZf|BetENK#t26U?- zmO5|BY`NtQ&g7x(dqv%;VT&bG!Mp!T$ZK_e1=|N2coK03(EJ0MaJND(-vru%Xeni* z)?wEEx4`OLpPe*OZYDGc4E+b)SWJU3bu$n-KG{E-y{1NWZvN!24$&HiF-i%StdT2f zzA;<3uQ*Wy?tHQ+Atq7of=GAyHLD9X!tYkX&vr*3xfTi702&B@7jmO|b0R>I{I};{ z#gT8<9>A~uk7)IURP>mr^3?drj&drZ93+3eqcFc|vc{0?R|Gu^Y)0qqSC|d***55R z;mFOGYOKH}BWYyk@8pwP72Y%%2BEVR?NO4Zd@E#jKQ0dFHm*PjP8<-7ge4I{-#t;DQ$znSXls0R1H-_}8(j|l$9#t(|lU`tFf@#696{NUyL$>5!v z^1g+PpnCns+4t?vxrZGc`}zOw3l4q*YnDeHV|8_Sx8za6YP-v(4IEZ6%eaMQTL^C}C%Us_W0@b1c{-?{f`DiO=#oY%Vug5- zfAcqoPA4#HfZCOtvtPp$em=!^n$oZ`v%9E)urO8S)U}#=>0emUy>YUEqR3D&nba_a zyaQ4*D+Co8Q(fI&r`sP^=$agFYo&%*CZsh~*T@_rnXw@%q?ORg=6EuMfs9WW6LgqP z;f%3Rd?^GWBWAvO7xGost;Zu7G4V9bA&0g7zNdeqK&qnaLj>L6@7bGKMiYddMf$i*%K7mk1br=$rxSozR*uo)VUNGSB>u!wJ1$N8O#%9y=ZqGjwx`+%&0 zDm*lPvf^@_85*32jr|(l_J)UU-c0Xhfij741LGDbISs$`zjKPO%k0I*XqFodVeA~2 zJYUKOhyu2-(nw5%=yIt*Le~57>gsq~>3U9y5dY2mGiEq%vN?RI(Pra9CQqaS3`?IT3y%dMpx6M;NKCM$5E`w@L-FPE1retj@QkWujdJt5{0MNoKvy} z-vcSxvYh^^pj0VXXyY$ia>7Cx-|vcm=88jSZF5t*IrCQ7)nBf@Xg?b(P}vnI!pc#~ z<4+|})BsESA9wSstCQ7m-svX+JYq;Ly1shOKSWCmOl&}ZG=H=?KY#d30HW#&qsbWg z9OLHY^UV5>$%o9>x@M)$3EuOj4XXM6T{EgCB0-u&u0;*D?QeVAH)(Bs3qL`^@Z>k? zwYO6~b1jsQwd9~JzqXFqp0Mqe+Zwg)ishDNJ4>U#nXaIEQkyU&Y}!I$z(J ztpoW9!sa>WM!(mR*KqaIoXT-_g+NVi;vj;zNQ_g8>m5R#Zw=P>>Y(rYi0aFTz*w9O z5UIKkx```cck8xj;|MAgqu5;?bCt)UnSSNeX1O;9EOdk0`~qKTr`n|$jd2UM^qge1 zFJ3EpIcWe8!C>cJYuB+v=$$aVaE!A}+F7i^P_`8lIBJ0cOq+}sWt9Ft0Ww&#lMLEV ztPmOn3Z@&Mu)#1{RwcdY${o+9x`?UMU0OHna`-cvucn~~>(oQswd_@#-2(M}dLtTg!d zQ2L|Fcq{JdFh%X*Qrb(Bd*P7aV(r?heq;zcCC!q>`&}}E^wRIv9g)l2Lrk)c`e8~@ zcIcc^^X$7OVxRXSxTV}2rYFHP5YStM6Ou$%j#FBM*~*xC)9-5DWVXUA04?#l($UD# z*46sE_~R$BHRUp@SjKmTw0^wkKZTf?Oe5c?Uyi8l+VXfupdDW#>lv%xz2 z(eiI+!ljhwExK%=(tD(WQ{eWHIJo0!T`Vs=q~;>~76j0NLJY)TC6c#K_P>??{jI0i z(iVayPb_7eL@?i2<8v3X8j(i7_$ev3A0*eGp~^Ai#gd7>{D4=cI==%w*L@9pkDIb9 zY<_ty&@d?HB)=>C<(Ad8JqNb5TfvUcWiYqxAS(5Pvz~Rz9zs8qGK$VC^`3H2I{FVG zq5Se8)~0%i8Kci6d6VSwBD`j;S@z<^YnOoIf9;7AT}8A zHLe`5>gA*!6wd+nBa9JbOtIR<>;Ta|(Y*G^oxf8~2UAW!O@DNFy<)M%`^JWSGMp+e zKM}?SZf%CWP4L~U1i1~$r8WZ1)~uLf<71d<5zD5`UTc6wnm!bUA zFs&Mk=&unc6F=@|ZWtFYB-?(b&W_yC;wR+X=_q4}7d-A}JWVE3&@v2_RPZxO z?X^KdOI~>b98v>Lv51s0&ylx*(Pgw+_I z)JO%Pr>kI9(c;IqACgGZl;l8wUf&;2HkneT;i5x8z(%F*O}*d7XkkhNh%>!UGBZEd zaEs9k>xb$lo1c?=`%76nN0-K$k^tvP7gdCxQ5>)xN7FT9{~R6we-M~FBu~I6mo*6} z`V2A9GtotHlrH3EjK2eWTY08OSm73w+bfo&958fp>z#q<^<*95=f*#T+=~h)>ciV_ z9)-Q`*MoZ`dwS*uj%3G4+e|9X)?zF^N^RGEtJo!xAqtzwkoDJZbZXlg~OQ8R~C6~FmDU3${00clQADV8$9$y!~av!PXDLEQbsRvh> zkz`FSjt67T?Qct6O`lAHl7m!f_a>N2=)G?X7`3jIZOMnq43 zurWMkYw_k6Rx$bT;ru42@>UKBl62MJ^-(UC*Xr})5J6+-&qEka_54ElPBglA^rXmR zL$NQV-qOrbk}oVv2~Ee*GEAX=R7o-tEM5O51ScTef@ESayR?((Q02u67VDA!>ilLJ z=j$(NRRI4WiiOo`8HD70iUo_3&t0csDwQohy2A%R@(+6qom4qIZU)zzxNw2BVsUf5 zBV=6cho0~W2yMT*u1^$;L3nbj4`1+3o=`XbqdHF4d=?Dk|H0Q_KB~?%#&+=6*fsfX zQ_0mhm!5g6PuUrafOEs*SGtSJ*)0 z6pUNISVnm4?jLwIfs-b0WpT4i4Y$S}Jc}dmXryM3AXyB1NacQ*A)Ltf{Re=(bV_$# z)$b8G{$UxiCPL=bD-u-BYsQm;8?p%B-M$B@T+7R?@OG68cDU6#Tjw3arWjW;T3Qe^ zyY2`NzT!k+-l*B}%oSfEa-{EO61UJF?uk4rDF5~|^gCb@*kE)N2kzZWo?xTRtC_6b zPpJfY(on5JNO4_FbzKl6rh~LT?{g4(xK2 zi5%a<2%kCUDY4NR@9UO`&?6}`+> z*7-ycXv<;i%#csWwHnOfS zbm}9hKudJ*=30wxN}7w{AmH4T_v$r13zrNsT^{!H8*=jEFq5`GDEKK<7Cu%#$ z)uUPl=VNUNzc_O3@aytk$uKH55jF6(ttmWwEr(V1ntb)Cp((y6Ii6`6E393wI0S|4 zqL&hfZk-Ktxi)mW7&SNw8N?8S>1Zvh``u z_RD5ym%ZY&SbU1w?0S1uJ^?%@pW0g1q6}cEi$9fm4(u`3kf0o0HWP(#Xju8P5< zod*XGy6^3qMy0_4&5*-|vKp8Oi&)&v)Y79EFFPxbSDCbIyCL-(GWiN*W zzshR45{8u((2ujh7~Gix=fa1>@>WVV7)WZ)8R5VMRMSVL6(qPH?9gW|+)Kl_{3iWd z^4C2@+&UeV(3a}piMCd7ddtXL5Pg3r|S zC8Xjt8G%}2se@A7HknQOBDeMeMwnP!WRAX~FVK}NgPeIx ziC=$aEz2MTBD@pcvD$8Jy9Ifp8l0SGkf>h97i;T6=ErWUyu8L2nyG&gNM^<{@Mj89 zjr3X@kuPs|b=h?ImXn}{&nV?vfFPxLz9{{>B{5h(zoLg1rTy-c&8SvtBZ#_->}C%V zG#A0g3-V`W3s>m-0H|(Ttq4^D|1khkL1KZ2(X(E$=_Y@@X`(_zoYu70U7x@NqBB zNMa6t00y1Dd~%F`a|ib)Tc|nnhugOp-j1|TXSZLa3+MP<|A7HZ_S*6puoe0FU8wZZ zw$?o-Z#PxoG-i%?n#cr^8&%&Eml*y<^iN60R;DFE40Wh6uSZG~&cuJ|y;~+CYOzf8 zh^gQckSvufZo)P77TqSAO2x z^ZIsJd&B~7*b=>E#BV^*t$_cRka@y3(d|#52G?ZghaVnEjDj&z8Q=Bu>?JldJIi8# zF_;GEvXu-YWFZasj`-DQVl7GM9_^*Q}S0e#CI09L3 zjo&EvSiIKmKYN_}dd0;r@0-^ndz51TmfUo8nu3@GdZOJvYM73${`kyYdXy8j&v z+PD@}iA>S%)3`N21YTRed&U*uTADf&;|*d3FE&-~Z;7thS5|A4ev31IcFDR)9-@+9 zf8%S>mr2~M_H|E-z3Ms<{6|%jnkmo>$0%RENnf*?v}8;!-`4PQ3sMATP#cibdjo&v z!Cpi=uxMu1o4c^Vmff2{P(jVkuotfdCxY!%bbN1Cd3s ziIyDR?}8@{+)MA=nyao^1M6);Ddl2)ysyJ_l6Od9MS8p;x6ygVh3R(`#N%kAq3bLs zCK9yx?}0s!)QsRZD4Ous39J<3{>H|P&Bw)>1gxd(00GMZcuO%e_mqx_%LU@yo$uDw ztzq#0nYyv*NS6aU`;Y43kpf+J6%-@mk}6eoa4&0-l-ZjBFW2DIlSMGEsJG0y%C>a!+Y@RKrAnwRLb(;$jdD{uqu^6|S9<2sf9*E&8`^e_|KAGVrzQ0gy?IU+wx&>`4$zA#C((jzy{+ zW_)pB3I)Ng2KH`L_keGMYK#DKnpe6@K>8vN7V5@!W{^^>)1h zp5;2*-!7F!%_6Q74)$M@Zr)glthI#e4R!V+>$%Cnqh(Om7^SmOyP0?17~O)8o5S~r zO9@x}=E^hz>{=Z34mMu=!P4U6r5CTmza1^<1=0kD;d`i^=R?i z-M^pg@-T|X|33J~IQn+AEAqexu-GG!!sLz}fHF`W7Ch^wsU?>4A?Nq2 zE3&HEmKb#G>{t>QBfeW1S$t-#bs~3`f9U;KTYQQozG>EfnYYuvei0mCG~Bbxct7K+ zszbf(Gx^l`vzsr5_u z!AMBfugl2kKsZ8wgSc%9D<=eEV2>>u8|cV5eO;_YPK7rXa^2iz#QS9wraaw@gPRv3 z;)Z@-#V?z}43oDHG(`;BVuu=T4-x1!DRQe3n!LKxcRy(i_t_Qz!P6mI=Dq^MzB98?Pa~fjZ(CS4QmMV>xZmbco zfWB)D7abHU#FDh9OhClv`;5C-wF3mZb{R_pGK+%NvEv;I>&V1n5uZO6+Y#w6%5 zBGclEmU?Xe&d9zx?Y|{!uzqps*~7s3W6#MCDn*r@s35o*yh*29<5>wOnMyO*Dp0iv zZP14pSU6*~yn!xVa{9uUeZ1McyUyCq?pM4U-zynC`I^X3EPgz##(Oa&C08KT&)5vx4TD{E5R*?PV8)N=tr zU<_2|0s;@eR&$QEc5Z$ZeoayF{lnpYWdC~PHoywr-yLxRIRL5gGv&Xntfr^JyDm{h z?><31UcbA$=5&O^-vjuz&Ic8zd|UICfyCs>a7+E~LLKjvt+QiD?o2@F1Cd{IKWeHq zi%b#MVl}CJx~lX`!DE~>JM#-=1X@MYoh_5e&>mi=q(3e0gH zUF7wYh-S8)2XKnt8u4<0Qt$m%>&y5d^T`(Y%}p3gF?xm*3c`OKfQMFPxs?uk7{tXF z;?VJ-wjhYE!G4g(_;C45Uj)*Lcrh0<3;5Lz%@mrZkumV))q2oo@VYHBF=yV1Z?G|mVRbi*_rFRB1e4{q+FtM|Hb_0p)Bd*f&% z3*OIa+9GG*ay>3Fa6xip89Y1w89SWeJ1~NhW}wvri(VhZYh)+$ zFmq~*^GW+{m=U6niB0s^@qyAK;>85X)(bXm!}qdsEMom;ZQYFI zgfhOSo!M0|e8d0xuNK2BZ1?Y4{qK&mkY9bbH49P)-5#v>5t0q~GZ3^7<=$?|VVwf5 z&WhEtI$qvmqYW_`Nt|DTaL(o@DZcE#a0X^u4%4`+uO!d##C~anMmMDWDO{j)T_CFg z9q|g0DN$Mns6uQa3w7f+v+gpf4A4%trORkq!%CluUX-{W^NVi()aYDOGdR$cm~#~9nTrEvN8lJ^oETJZ^A^hYh^^Nuw4m4 zup9tAtv;Q9+WmQP?~do=o$#ifdzJZgyJN$@?jHdGEP#UQ5Hdk)v3~uQQ%~gHy1LWR zuN{znAl9W${|fC><(i%Q4cd-VFKHia#>Lxret(#=mg3F?mw9AZgryf$O|HrD7N6_f zyHEGzJh)&CtvD#^b(ksQ$+kQBgP%GHugfOGz|W7&-Efba`ATs@=vvui)=N))3)K+D z%h}R+mU$6DqvRGKRbY3}TR4VqCCuZhlr2hiUTkqLzu(vU$? z4ykU^gsaa17qk+IFc#i~3$}7VGSpun3xH5Kl9@!Lbk}umkMdav$HB5|Aq6489qPtF z+Sl7iHd0HiE%|(WXO8oYZvXnSoOE-%aOeKf-uC$f3ZY4%?1j4)&mgqYtgMXSD#2y1 zo?^_EGI6&f0q#J{?%R~zkdfn?sW)Di_ExD-Ez>DMkrlLu6NFTKE_J%6_iOhr0#XtX zm(EyA9eMim8Y80#02ZSg%Pt;!)Q}girV`bzAT^NfC$~YS7|YvbJZx5ymbaE1 zE`Etaldv2$RzO~p1iOp}QqaLL(Q_n%-lHf73-)cLfl9MzW^xLw1^@(@bm9XWwHrh4*DXy?3Od!Fm6|AL6*aJ9L`t}83_{VCNk8dO;rbq<$UTs`1V2ke>tW{ePDrb_~2ks z{O~sL12K$Xf?mqF%Vgp!_!GbyzFK+-UWUxPUepb~moGXc>vx}2t2S4?4uAn-#|^8k z=&ORfJ|F$mr@OD7eZqkN3=q2KkeM>wzeQxDOP_@l!;1iQnJK5eIz^RkVpZRMByMrE z<6)Oou5;5^ZG@GCq-!5bS*;!(9lky2*ZS{m zpH{kaYNP?@9sO&>_xH%&-5a|*6zdJGC$6r^aoM_XFdJC^eb#Ub)+uUweSDCRF_AK% zkp30Oq$wS9)N5CtG@+aC*XN(>SMFF26dD}S;#%yKXRWrr+l4bSB(1cl(N}TR`DfIT zlg%@g<&@ps5bCPn+fI4LWNsM&Ri1OfX!V9$a|%q*55;-q#qFOs#Ac_r@8$gEJDMpT zs!3g@?cA(}GW?UdfG2gcMa~Ydz7{-OMZh%#-iAtuogO4M(s2?d0PU1Mwr|G7Psau& z*W%t0)iRAyzB!9w032SFrzS2t|OT+FF9CBI1X+(sDYLO?jVGV2AsZFYsfk z-~-dKkO1A6suT{jf!7CX!ESNWqvuL71}2jd?})s-!udd7enC4PhKl`qpDn=g<4%vZ zwq+1pMuu6QqP#M=kBsr3+T|UEu`sd%^03DGxmXSi-$RT7avEC$<$+#!NN6O;#r6*i zY7fItM~)&JVx=kwLBDBxFE=w^G76#6u}dYEv~O!mzpeUpSXDw=eMOY@rFgWbaLko9 z+>60}tuOd2SSnHRJ3#S2SAh7t{4=o49*g;CxuCWqXo!)?G{Z7_*{|71*8j1&*GNEJ zAZH4o5i7XWxr6ok?juvV&CSW|$#1V6w_f+QKeP!Ea7{L6t0wgel2rIP(#$ZHvLsJf zWtLo;K&zIP7Kl}oK*K6M?abunf(0s*N5bx&JbIW6eDoprn$oq5ofnQzdXgz{nNMFK z*64*UH(2e`2hB-M9}s^tjWA`Bg+~|HTyJySIcuYSCPVvm{X@OTFcvJlO@I$8VLus| zV201+31D+1r{1T)FH$Xk9%3Gj>dDzvS0O!$P|2HltISdIliHOw0g_PmN_nk+!fPne z&a6NkCaeLEWsm?G-tS%5v;PxVF=1gI#tKskihmlGT<=H$wqq?~26xH76$@0&C~xCQ zio>Y4ou{}cUpz=(NTI-D0AQ51eG)Q?5yIP=sQyoG1+YHY;r}f`O?p&K(h3am70*iI0?yvi^_a`eSzvY~4o?E@s9XLstWSXAo1BbjcHFZl4R*GrKGHX1=8mpuLTwwI|f5F`=iPJBL{Qyi)#R_D45> z+3UFG>BCti6t=&2)tlwoNyXTbt!sA51>zC6FTUG#n6#CAWy%Nx5tXPkglCgo9CRg& zsgf!T$tkZQpwJjOZQ1(m5QZ!5B;@wuK*;G^ETQOJSNGJ=*}HwWbIZdVKWZt#Wk{@+ z&MC)FmJ|d6L1C0HV+!!z4Nhfcg#S8b^ z<&%<-7r(|xXYy=km_y5IR&W@rlNx6%Z$T3V-qUR-*oJJEGTai4O+LHV*PF>W`tYcH zsoA2UF&|pyJeyL%3IDZ=QGhU-g+R0onMQfgom-DEipT)PyUY-`W@w&T*O$aM_tlQ` z$&)Z)CO7LK(#qx1l<)0a5L3p}pww;L9?J@qi#ziT24BU0eNi{1-s%Ku0C&Vb~lfrNj2D5W@H+FDq!1S?TiRVV6WTnkd zt`O~FtT73&`sst7J@p3xfcv{V(l>tfVDw4HX-Dr&BRJ*x)bz}nCuL2T91N$d?dRv; z=YYi#g2Iuaj$9|mnSS}Wg+PFtr(~JcToQh+NK+V*^a%VECeGptN7lQwQCZ5Nqx!$7 zs%C)y@86uq*bCK?EVcC6{GpP1u5#=yUR!o{AS^62RI_m0pwatpIT*l9s)}(9x`^7s z7Cqz8a6K5B6GZ#s*n48OHwl4Y+l0wA5|AbaEoyMUn=Z8(Ka8P=Ke(5VXNn3C?8_7d z5f?J>u-=Q~7N!Z4>R8hjgx~B6L0n91Ff51BMt4o-WCB%;a*Lk@CIvQ@^aPkDK*Jh!E z#*?wV!WI|gN0QNgvutp?pPuAXSHO0H%z2BYe`Fb3QfqUN4?IsD9d+xj$v`l7TWeXE zL`d;V^UAFtNJ+BuU<^^l2!>_nu9K9kosTUhw;ZlH-8@$RDFPVB=j;DY_4V~Nj{d(_ zsTIVe15Y{$93=6tFBkRp94^4eDM^yi@x=VzD>VQ(w>lWbSqCGuH@*p^pTWJngbvUOYz#r8m|>6q8KDoVL8 z)@2s%OEk!m{g|HlDcA_=c$5$y0F0qylVYs1@_?WJXx%z zE4ojjQ>KG7ZVq{9uD!$uelnRXqpWsx``rjo!|h=?PG{DY+@SEd4A~6rdQMtbCWX|D ztscbfiVF3ObA~u4yRY2RE|!L;eGTpnBR$wu5{@dvV&CvzHQ&11u@4xuZX)}OU^(;e z&qdyeP_dWS{(~V{cUKFnr=7!5D0Wn$XdpcId@F!aR)F_E2pa9%eCcp6b;34ic7xuL zlGg7cjOCCa5ikC3juKzwk5@dFK)ol}^*-DEqqoiZQH;~qG7dk#$4E@tLJQsdD zokU19Z;E?dZ*GT)=169r7G`oaZjgoxdFvTCiBR$4-pMPaTx^2@mbd~XwSH)4QC2R* z);CC_(39~fsm#m(i}i8Crn1@GHKl>Mw|o;|Wi7I4gJr-Zcz8BLo%Iqj5>1NZB}iXq zE@d-Qx;r~HrtAKi;tMXCPHH7fiHqWD!t$IPxt5nXJ}?*anRlZ;5wHj}Y{`d0pUEaX zEQ;GklH_B)UlN&pkHwg>i(DMrUbyWwbGK>k@XzA7U(d|OZ+Ip%vR{-(bzSaU&dM`{ z$$UKqs2MC2w^g&SEwc(v&TxIkql|yE@R2XpHI&nbpjVc@-7&YE(Y5UhRV3*%LX&P( zt>DHP+gepQuAJ!x7(MJof^3}V$1lL)Wr{3!Zx$|(>lz{r49X(G>BXU8bqL!E|lBPxS}L8e#!;A#lNZOh-jRYj!qW@N-S)9=s2 z$r!~uJF9H7s()r~u))$KsOWgiw;JRLX31~U)!AK|{60TnXz_>VOY6LgQ3{!cN6Dww z+QE9CI{~USsLdd<1}f$y!l4aaX^tLzR%{=dy9fcD$=fQ|7ncgI6?fw|r+&twfxt_%K)!tZha$ix1$9QcCEiAuZ!oVKaRKgDgK}VTa1FUSK%g`sf`5j^fIWP z&X6Lo))y3h0YH(3q6gq!CpM-@GSJ|N{?ya9rg?ck)=h6uWr$>^kO7j3qoq$4z5mSb zCSRX;R%}`4RnE!N9Npoxyf`>kN`b@XMIe8M|0!j zmzkCcTpW~m(qUTN#7*R)Bdub=OwMak7EgJL!33QOD64Z)ODSL=NuN6>F&r$YG}Lck zD)a;?w{^G@4?~C*{sz9w{_)5j8ynUD<}tsst))a`VA@fRHw&!DBtta{pe zdMF_H!qKs?sIX{H*ctwmkbXuiMXeU$<_f8>&CE#oG*ixvy>b=o9Z?y-JLWnD8laCFTiU^w9wC~1P(_YI#Lv-Q@s_Zame zd-`l%z0VbYr1=p)qkq9K*$^pHl;yE7p5l~9ym*2;{z95k1>wC{kp<1hBy>Vw1dnuy zTK2^ZL79z^&Dh!;5TJ|&j`fZ@Yc?Q~Wnn=X7O(!xu&6U55!jaQ$0O_&x+LQBKVxIk zunrgf<6q-y%9gZ(q-Qqn_8d;_?yvuzR040|Z(#e8Nw@i9;WE*u7O?4a`n7XH*Y!-GoC{KyOX1f|0V!WKNIc$5OUg*@Qv1fpEh)jeZ%f^TEP9<^~ zRNyx>A|nEWUq5vks-+TCB+Eepv-EQ`RgRf`=!XDH8b*f<*DJKz@*(5%@WYoyFe()x zPHIvpXgYLj6*q~S#x`WR0Oth;11l5AJDuIpF*|?wqZNhO(2@*4!=XJ(4=7a1Ho6f` ziVcd)y+7LwIG28dH!s|hv@lJkp?k}{AwKgvbl}8%JR;~egj^zAnq7}gSe1eA4q~0l@O{Rz}f)-pL&X2y)~Ia&m#ueO*68y7x((zia=kO zj5X|$A6FVT!B-5h%RqdpZVOfHXdSKYUro9K&W=;rQtSjwKJ_ud{^u(sDY;=xRUyUi z#%w7crp9XiBy5JNy4xBrOHrYg6Pz3!X^fGP1rlm%=KHj|3 zk%;hMAjK!K0 z_(t1hbBv)jm#G{n16`x`F2l8s_3?J>bsw@NO(^<4k>7Syn+2X}Uq%1|;OaM}sAaeR zNWU_xEds+?GoE^<_{qaIi%+MsybC;aiq_{LSfRs~fmyaM*v5{utou%)V$&ocziKjr zV2q&DHV|aWTQPLo)v0n~>~O}27X!~EVBz}gO50z$LTF465#<~ESPW-6OG9XWTJyxK zxDVrq{v6jKV*Q1^IL)H#AHA1|ZOn}JYw`HHD&e<-xyy-zCa4#sZi!SZiwQ}sF6LaWaSPiF4!9C`j2y#0Fcm&PB&3b9rlBOwISUWof3Wu17_gC~uX|U$SzlLxv2w*HE)9(*ozJM*(VP~w z@VyfhbZ_x~@}?~5ZFy6Q9uwp%G3Zl;eKD$MH9f|XnVTA50GEpL%iC$cHQTbL36Ir& z@83#q%i(e5kMX@dgnU|@KRcVBpMP4sdju&0+?TKyVY#Q3l%8C_PK&c?6?=yxq}`wP zZ&@Qg4Vt)IR42{q{VTI}Sf`Hk&JyH#T=Q&3GfWxV>tO7e4`N3SRu&dE?}Woj==rA} zl}{f4!8n7cYoM;KzNn!%(&$L8MNF#l@KJnzcXXxkn6bf|trs;S!a`MX3J|(^R9N{7Z_P`@#zozqiP}(0uUWcapB$yddyW4@`bwBi@2>bK0C5XB z@cB<^y8OzFvjEs=`;E=4WgL? zCqc;A@4_Zx8hZ^@5N|?$IcJM8XWPMbJ1-=g&|J^)NozY}xS2rk7vA(Di#AFQq=B7j zg+w82BFB!F;&f%986mU^=m&iVraGzJy6G8-t@j*E&)0A@e)BtV*^lxIPK#Z@kn^Z3 z#B7f`ia|*z$M7(X$XleBd!oWD<&wiRI}a(Vdjkpm5!lA2FXh-2agrG$b5udwsa}-$ zVtp!11ZTv=jttr?!Yklzpn-A-%d((8A2E4%%tMqIva>qO8kKi){-R99Xb+bsQ&5}& z2{ZcjAr23CRriDL?Jwlt^8bAg^>1_V>ixri0P#-9!tf@Uy+1ANDkEnWc-#=p(mCgj zxxZ}x3YSp1HsdX-Q~}c%KV+q^F)ZUo2tnZIw6%SZd-g$`ljV+r9`pi_Bsb>n${?N% z8kkQVk&Uj0ovRP$JABz}xs#cgT%WgN6L&TXi{i9-e|qfQ4T%``^Bp)Sh;>%Ockl|Ifx^S zvcW67%q(P&N6H)y2!n@vQiCd3usxg;X7|`70~K6jh0m|^zP{{=oBi!D{~J4G|Mp8( zfCL2P-Xw6MZgPG7@bLHVP|f4wclQr~=lOl*tZmc$1}3_1b16$tDomZ;c&q9MAyO&9KXoei=7GJtu9J2zns5u&cb&On6bo$|FzJ8Dn z?6Y4O=lQUIWZ}>D^{YoaJ^M91|3-SWnRg_A%zu0UUIIr)$H>3mq_{`7E%EE^pgyxX z1qbu>fwtxR)3?-XQoMar;pcxmeFkmZ2q#&=cb1p+5c4r!dF44gJq_UQbC^w?{GP?i z3bz=!E+lAflw;t<+!0df9?9?Z=hZ= z(lf^cE8JNAez`a5C-MxLk|v0nm?jqArv%pp7{u*Y zya|z%VdP-W;{{i;$7yKK8AOcWQ|oHF=6Dm~ju36)8r-X6E^>EnEV5^Q?z;u%=vUl> z|KsS~q9iZw!F<&-9eDAt@Ml{t^ZoDY># z&Uek>u7w%|`bB@7_?^kKUspiU zG85iNq^3B+zX1zLRn_b=m?&J8svZY^cmAj=n7YcrpQR%g4!hHUF@P`p%iK4pbcl$Q zTUD*U@LlW`jrvTxMyhI%z;MS$z+wI!cs#7$Nwuq9>b0YH`?ic-N!i;N{}V}e2_>_l z=*Y6R%6;Nf(BHgkYb>hkx>u0kg7cXXoAv( zYZyTE%AcFKO(D=|>2_SH$@A3t~pm$J#=jMM;AhJ$IWwWO_JOvghim zd{)`cO@s>!*T&%WwHcmnPraf9u9h5%l>wQ2IUy_JMSh7_{iwu1rZ@~QXk35?+a&yf z7*Cm$R=$J;7#wTw{e)NmL6=O>4Wpwp2zNVNAqfDmsGDnI;dgp{9rlVIZbB{9*cFzd zf=(W8o;7PF+`XP#M$1m_G?Nq(!yybK4A?HGqM|=m)(IzhR2RQVF%-(4tWbW8c;zXr zVr!Ci0+G^cf(Ada*jkDrEyyga04`f%cXOP3P;8%=f@8^7@oh&9A@I4I1HFZoV!!3v z8Y?r`Xp{u`6sd=UwU-%Q3}g<}34$+^^#&4~WtVtPUe2pZ9(QhJIGM)%SMhU$+objN zjjFpZOJ^UZbz`GywQ3cR?IzUY(5)V6LP`DStI*zEHg+jXF<3##@Tyz}-(J3~8vcD5$Y?<_uj(IgvF z{2 zy$0Yt#lV8Wz?-+F_T0^I139Q(vRpS>&AWhEF}RS~LMULGW1SpJqUGpiC(U}#9*5Wa z!j3umyl(()k5#0 z;0`XYkH3pb@Z&eMn@7_f={@(p7Y$2>2U6tn2i+Lk!0Q%R$3QHt&n&2&-&6;VrP836 zAGB|v6a|Bb?~?xMC1>>+_G{Xq$&j} zGsEBz@V>?{`j`jCv79Hbzsgmx<{);0rZVH$gEkYV%t8> z``uku0QzySq|$$8h!N7Rx=vFxf`E|3N5oaWSA*$(2aVdKte>FUiqtbVZhj0`pcqnS ze~Jl;3&~fp=YXSSaCV;)+U6pWwLE;U;S*l(d=p?Ff|OM`92Q@?GBTfrgX0+0b9u#rpK$!_aA%R<2Gj2tlHg$NUz1InwE z1Q;mgVuJmc{Zh}$zyh_1@mAuH8%r}}l--u=bg87~9gy|;n>YjhaFM4=C^&(QZmkT3 zE!&5A7P}-bp5M-E+Q~b=38o3g3oAW9=spal?RigMWcLF!!Ti08<9=AR$2)7h8xo>S zn#UsNs(uc2R>w?cw+cNDGwl>Ua|ZjuJdakcUCU%Zu>xp%P>h}$?`^g4zYcYdJ`TQr zhNDEAHhNx!{Q12;Gt=>7KCo#kp7aZp)4}k@Ix@2e)8;U|8q(`4-F}K2X#@Ra%}gfO z4Bk6)Zs#{I&*`&wV0jZckFhZF91M5HYM4ED%Sq`^jB_L!T)(}|q1A=ThkTz>5J%J5 zl|Z$Kz0v!Yzj$PW8ap2^0nLIx5=Vae;~s<$jiJpvsMX1u;s2iI{I>fsFDo*bpfnH= zGW2y2kEDZvdbj_=1JT)@x0P+*egS_YjCQp;cym3Ew=M#k^OC20?raR%xGuGOKE|}b zX*gxbul$2nf`zb2Y)~AZWbgurY+p^k!O7H4r=)_e)xnoED&apPadTdegh%|fPT1qbahIEi*o|>j=*$SUtCB*?8GIvY2 zkNJV9D#hRLnI2QJDHmiGVkn0;fkzPEqO5V>m$Odf1Dw5io`0uj9Y|#;(XX%w_%y94 zSdxHmorwV6#BkEs0xgHQ?Y$e744JB6LFi`M_g?6|d3It`6pB?9Jnk0vID=1i4G`d39`E!~X40=)`rW?jcrbT035vX8 zaj%=+PpBBYm6MoBZmU$pUA)w2A=m0%)g|9I(c<-6`lcg>Z*G=F!UmPNx!S)HWd6 z1M_!$K|b*^?B3ea+WGB`oi%pT)@5AT$Sr(iYt*|ncQisagc@77MIC)76HU)>(|Vy1J* zzIiOq+wA8n&5o{P|<-*xe>@BbyqTU%C6#7yx_ko<+CJ z+%ZAl$=p#Pmf4w*(8_Qzl-{fjh=hYOl@B8N2>M?#TMDR^8I%p0*1bLP(PCP z^H&@uT29cbKtLbVYEhB>Uqw-VV32)QHci8_>ICEU5 zbr&LHRmQM!*gFz|c?9OQI^e2qQggF_#DeKB97|YBZBzPiBn}fUDHgGM>1YOL{Z3;J zEvesmRE(edE{A5QMRy`zV;?S>KqML%&(HfbZmbxn&dkh!sL~Io)Kllx4%qzIa4~6B zY=MMl)pBT*AJ%aNZ$5vPDvF;9j-&2Hyb6~=iC-1N4rM+LMAj)=($l}at)KjnTLx$x zo+Bfp4kXLYF~87IEV$gMpY1d0ES6N34|Kj!;l@0cNCg_j5csm|^S25?{w6wR*3rlo zIB!R{1r;o>^}j45v`75K90H%1b#KQ8ErQey9JfB9%NgelJ^--L(x$G{BPoLiI01@W zj%AxMGCj8}es06NjveT%2K4ZBITQ}9Jqt9}*3i)xqC1;?pyL(x2Y>+^-3$-z5v$EL zJu?L{NydfHm{z-319i`ajF*<*?L63@ZqX>DD8R^xTlumF@X?JG7XA2&TKdkMzaIzHWGBW^rcf zc0!JT5Qwt#XRq>g4#@n@3tQ*&N`$6p2RWvNXlvB~-z@seHfeDh1VV$(LukgK(`UI! zb^t_)Q^lqL*z`Pilwr4}^CYTyudI+93K^+M*lVV#!kUb15j-Y+oVW$S_j0 z^#T`Qle+e1E8zz)y*5wB>_PBqSZyeKRJWPy`En4*w611MPfu%uRkJAU-__?#?c4u) zC|akJjZ0_{go(FAnaIQN(d6_LaN{k&t~$SFdKZg<%&)u1mKX0v)1UG$WDoZ6bx@V) zQFnqV@%}Xs!MR+n6^@MOO=$Yg#AMbvTQh8lfZT>w>oq|iKJs411y>>C26d^MgC^?m zTOA~Yc4jKo+HJs>5s-B#OBLldM9QfE4a^GdblzC@b)-&LS>tY;O>wH6Vjhg5UzI)q=y ziXqltaYq-JCRXV<5jajnGgTe^eED7CrwaR>yh=K5oNxbepkL8)l4h_-$o^3`09|029Blw+myZ+(@4O#KobzG|V^qr@BL|`QLG+C$^AqHx?wIg~H->VG zHq~w`aay|-_o#|@%~K=_Jjw5jK!aIR{M*vtm&Hm z?w;ZV$kRO@Sa@dV3d{C2eKfRDl1-$!4FJ1hVeTJ`u+8?S4WF>p3DK~0PMRh7AQzuR zDgKed#U)XYRNO1M+yVdQ4~j@6+$$|_Di5r00ph*SBC5;xV@btbc@su(KgX^{9DBpdTYD!XE zTcw-?7fz^B)Z28z0*IfKMozbVoI50XxMt)6GU(_B*U5Vdd*8phwsIej9v-jvAHfny z#Ei@$#7CQe$_s0VVhsK;*8rcGnET2OCyR7xw>#{$C9K?pw}RC;9+Y$NQIP;IICG?- zc!_5*Edue}pQrnD;*D~-ekNxfB)&u@S?u{m)LPyylS)FYr| zzl!T&)hl;ts%mi{+1zkrd}nP?2l+UT3Rxm^e;l;;0=LF>lY$O(k|((Lvsf(ark^o} zO@u(_|A`zzOZF4?wn#<3gMcV*kPtxH6^)**oXvI{()4*5%Ii6uH$esg*Ap*OQ>Jd~ znm5z@K?s<)xIp8Lhy0n{SuWifSKFCmjjgRsiCIlLsdBESw|S&hqhc^Z7dEySz(v+F z`ye3%M0My^qa!23Bl8?GIW*KU`JzdT+?zq(ZSDAY^dAJhA4gMH<0(jGvF?xnze?x1 z#SOdnzWnLX_)ycNNwE) z!JfJjcCnr7v15+ha^)vrCtZfWj7QVW(ax#{tRER$$yNVC9f0H>G+J?#YK-pb(lrA+ zgs~d~1XB?7CL9?_p*cz2NbyhQIN?WbX+zPrqiVeCGDdp;(iPrsNVr8O3X$;WL5Csm zfQ;87vEM{~+K?5isybaaY;Jl|#0pOa0NI22%k$ffBx&5+uZr|G-^mLyuwOpjisAm= z>DRgE`0+^V#@rHEztlrnk9ZZ!y*f{PN|=})D5pI!XrsaiOZ8=)i`?reP&b|5rZ~ahrPOZKXWrXqCBCdnqp4mO z9QLQ~9L9Ytun~826nFDj$Beq*3%)mN_DrbTgjW(NH^&D|l1px*#Zw^$(=&bL%nKAU z$1WDpWluO0BZ8&N1Yg@i!r(6}+A=cOt~-CK2u00ZT^edw06@qzwZgwrzlHj2cbV;d zw=SRwB6!i)bWh=6IFN#>EE;%Qz4OO)n-{izey6Mb#rY|)($gI)W==cc@oD!}JLE$6 zhLppL0x#e_GaflNaCi$~)f*fdbocQg9K%lOwfn`?wi%EbAKuuoZ93M5Zng1>^cy(0 zkg_q8cO1}MZ2Xu%Nw3|a`z-o6EE7dKef$gR)NVm}Icx2b)~`xtulZ?ELVJv1%!!*Z zYjg2PzY3`%AJ@KK$yQ^}#tq5|Oo^b&f*+Qu>ILdFMF|Adx??0CN2>m%5)Trz;%)KQ zi}C0uEgnY~0vzpycj=VKbVJn{j}-B$NrrS{?a2$0kO@)<4N2f5r8;fmFqLIRq4Aps z4kTp)vWu)D{rWo&hjlv*Bq$!Qx+L6W8{sYSKH8U6-ID;?6*T~)F26Q_siMb#touTh zr7n!L?&awYK{=4IoS-5-T42#lcN)PjdNDyjvu&3VPNarM01vTn0J ztj}N(n0^?`1G>i3cHZs7nSt;i|FGlkf9jfcHaxO?R_>!d;}5&%*a*JdYh~()KS_~t zW-J}w9(&>SplQ1a3@YcsHp29DgRwHX?d@GG9_#jDe21h4i~ul>v-O;MarZc=mIO!u zTwEo4OBb{YkEy1VLyuC~gje+EIYxINM;l&^YE!W-@$P}%``Pp#! z`ZvZX_iml1W4$Og#X&eZLLcWPPJxAtlC*)C?`b=yau!(T{Nso{H6pAQzLwq^sRQDm zyQKFW!mns$(N;?bQV2{M6x~S|M@?m>w|Q4xPUc~1UEt}Qi=HaTABB<=x?yrUh5xJ`>LC>TWN-*jYOSMmG!+ErF0hxGHn1N|`yBOA`CH zr$VBu2|AtS<9Yc>y^g$(c%oqG(k*XuF-z+A@7GBch=@Z` zns_4}JJaVkdqy|8JEfeIL;MhaI@3uJmP45oEh=ksP@-8fztJsZkO^Vz~qM!FL9<_#$X9izsk}$9yP(^4c{w&0LeimOjs*T;$y6A^tZ|L zPhR-rqf0#T>CtgPb8Jm~l%`4d014 z(Iut?yJ~_}y8D9f&7nzGpMPLk#8$*(1z2{x7(}D_Q|0IznROXU|!)K*sOskFs&wzYoDB^Rir?kAsC{sUz zVq_DQ!m3|O=Yf%%OR4qmuHZySq!}n@Q8BrmEyMsHk~`^$J1{UgjAh@68@toEaO!yS zMYQD0d&i9ts70qTeFk=~I8e4Wu}una3x9osY65%@sy`shxM$Il|2z5>z*)63=Il(C zd41=n2&S!FQ%k`*OL0TeyI^Ll;y(jFuA?XM#c~~~nU|9@O=AitZHU{8oTtONJ4Iou zhqmw=JKMYToT0cPTOY>JM^_z_b3MOSE{}-vWFok=-c6&(N@I99;Rq+WOC9zO455WS zkvS13WM3ySfeEf51V~c>^-2{>chWd+z|(kco-%wl<3QdDZwAeRVY9H&p;=YC6xQuB7K zZR7;v;f2ewK2?L$PU*Q{Tv&QVt9mKFGC7ln7X@JuF;ml7b7zL-$zkRTYfzM z=doP=LCR1u=-0Sz(vzt#?Y_TJHLo~zK`wD$X$=iw057AVnAcIV#B?u4*RQCq$=7P= z*9p)A+6M;^v>Uo>He~WTZR6w5JM!{;aQU1B5-Y1OW#{*4V0HWrE)bqhgTv{Fn06oI zTTu!4vL$236HHWVxSlLbR<FReVDH@D zB2HYcrqJ5@i%b@n?_+@!LNEp=(X3VQtTYo6eyaR=R^{W~5hVkWGL*@QLQCMqTtF^C zXF%3%pS#GjEB~!0nu?)Yi!w?N!mN}wmUXDwS81|j*oVVd)FD|3oVvw5@N7tp?q%;8NuiiOZhD%|;%(^XaPRqzcLDBy{zn(}xs^Oz<1Q8{55-O5`B?X$rQ zNwH(1dydp&2>6RQIMS;FNon06XDY&k#bJ;4zD;x2v=w>|7#bvCQ_c-S9JbN@)C(2C@x&CTDxe+Qj8c8EhP z`L)NjbV#l5Y1G8kCiY5EO}YT6jHGP! zG6{4p8;_2+ zW#Zt?>QY2-4yGrAoiCyuv@CW!)5*f^rw!p*SHlfaZ8~ue155hd;_5Lrw=r}|ZY3Xb>&8aDD=O{W{!>Eo@5FU3Uss`np=5O&OW=}j0bTgQ z);$%v!?dT=^?ZdSReE>1zjq?Fwq)X)vU@QYVUCXM;qGeKorDyoUt^`8{d&3R2-}z* z&os=b=#;nMO-{G=c&9n5Mt)k#R2;}Q!+?EHUpsj9bfF*JtPq1b>~9^NEiB{DaC&Be zJ#_ayPjcX{3X;bW~M9?t zEbgpXG<(#38I_{@b-IqF6?Yww(}&~>;gX4x+uu&25PS;q#RD}t-)A-Vv(yhzEo1k2 zQfz*v3z&5&;|KA1v>GEJJ(xJsuHIZQsMO9&Ui?y@ehYL#wJR>VTlwn5J%(10Yk)`d zJ{SyjqGpY|c4m?WwJOzU9DeO;`sTx|>j(i`MI$Uqzm`b?*|uQ8`{g)IkWxy*V=g2j zzK~r^JG9XM0^FWkZ_L81P;pGU zBY7wqx`wW?QwOvYA_JY5SuP49(C>gL;(EAOoS`A7ZKCYk@W?hhYy${Zb{3Cs1Jrvv z;4^&zLly+awZ0Z%RKUs(SR8I>*)5)>MLd_bv9$_cL47nSP97}!8~IQR?_G7u=( zO~!M7<5!mhHnz<0GgDLF8`!tB(Ko7e*qRWY8w|f-d{Isc6&x0}GnOF=u1HbM526!( zOa-;D96is8HtY7tU0Li+oo?O`eLi=v=_Uj0N{7cE`4Sq3n>JS1+)%Ka0vpo-EZ?Iu z`7J3T^Igc@YzOvTQ&3ms>1Kf+98~JKITEmsQ&&a@E5=jMN*;{!f>Du-u3$gcetKLh#ib|tk~F!|J1g}-cRU#o>Xn6=T{dz zwy%73zZkLA7wlvh3<*mm@4&M}m9D!%ef^crL(GsSaeltXRDRg>7of7V=u&r9ZAQ3d z$M)Xm)Js4&J9Z%%0gT(aK#iQ4hiO-)6o$iF(5GrnPAlD}SoYE<`I*U1VsAW;=$#Tv zL?4kCSh+)_*K7d{q0nnXR)x>}C`G~|?*Qs1-7&I;&7J?Vv3#BPJ9ur4*Dm#1;@XiT zuS=QAU3Y@WdQT4>kD%hc$PJZ|-|t0eWrF1-(@_cORUl`KFhRy^i~wITpj-6q{C?q@ z5n`GVNm_(--Z8~=j3!^dBG2{&aT`W=$3)9Fy^u=7AkhXq!@{)wqg@r4sp|x76N>sxKnXh7vG4h7g2pA?w9QP6X8o~S^Ip|Wr-!EKa$M;QkfPzk0 z!hgL$PqK>$ai^q;Tc}#cS!gmiA3Eeic*$+a4Ipf4^w|z`CTbcmrnd>13FqU)(&86ht10iQ`x69uP;q7lN099@|w9G*~k(BbI@jt|wo9>SA;4-YrG27E(I zIu&aTrbb19*W%cVK2a7!>~DL;{WK-{r*&U7a6hO}IZOOfoEV8q+en?BB2^%XDE3MSooQ zEQJgIdCr2TZ~Q3RsN~+1?L{;q+}Ie6SAD-ne6k_TgFv`W-;0v>pyFpylyA2NArr1M znwsd0Y0HFEu&voNuZ-Pq;*nnRD;s9!V4jkB!-t`jixkJ+K;25u9sK!FNj|U*{0^y1 z8~s}Z_Uh5V(@sjqE%N#R=!_ejDprW#H=#id7VQ(hcOTOzk`K0wpe1D+;V*&bET)NQ zX1GmkIe(70u~bAU9CI;$fX^UG6170-BCrC_JzR9V1QL4I=4lrfAd}=f1owQgXdM+6 z5Iy4H#Xr6Ns-+x=qmAV9FLS}hU7L$j>otR3@n%Tli)QZlUJHw-oUXJS0%J&8E9&B# zrs(ryAR_0jeObF_fbidkBsaardf+bKS@hXnZCamVsZnzfepx5cIeWpFx>ZHF?$j&! zAY(`P1xWpq*nb(1Wy1lh*u6Lr7j(SsYJP$7O9616YK{f zEdHh@TS8WxQZo%D&X6y?v&4TyoH%)9`O1UYw#Powch@vBEfobVvD;5J)bnj+@$$A- zF+~xyb)jjMn;nisv*-y;dG6tGXQkUq?11#NXA+MB9wK+^|Jo=KBZ2FLLe9ji}HQo3M(~ZYeb5yD|3TGzHBY~+A!Zk zy4?eS?U&};=cYkxbc20-d&=UM#5F--AFb?zhKh8th&a#M5>WJ~r{@CBh0GrAL`D@? z&W;uCv$p$qMN;d-35Vei#ywRQA zerc-Um|FhXuVKcmn10geW$EC#|5*?f2Gc>2VvulO_gG(Ed8G;Qu)B1-6&_>ed)vVL zi*}`JKyMH4mnph#?uq+e;85BB(WS`L*^{ameksku$}pkj^&vd;I(q`sn9la7gNvc z-cWhPudhdGC8K^)e$<*D2|1IM?0I#-#Qal@RB$0Y+5^PLWomw0w-VY>?t1$x5JW22 zSF10vBn;#x{mq5MYfg%%nlAP(!k;jg>2ECxWpN$rKj#Fxv1q#8matI$YWHE^k#$6l z#Dvb_RWuFiNOKPS7=^xZ(M9zy%SAT}QjTWQZWhnijYwGwt9GS5 zGs~gLNnRE*?ZtXD;wnDrQdMdQ>dtYpQC`LFa5BikfKwm#6gmlWJ+Pd+v8)+?K4cUALhs!9C)sMIe38t!%&0vZXwH}SXt)o9H>Z^uQV}Hj*{Tk0F)OrTFR70 z=AP;N1LBBDiH6np8#aC{&4!ssL_OCb)BR0Afuz6S1ttxDM{tkdJ1LZy7t(_fX>|qa z1WNEdFa>NBq5`rXQdjRxIKPN5Jt}poJ$QL`(Z85^z3RCQUQwR%uBdjmfLKwgfSM45 zij#{L0I8I?zZ378Q~td?e-RkI2>iF^2RM#DhGKGqw31%RYAYC`1~aZO1cmu;wDAWI ztfsW=3QWLDk_8V~U&0?|Ynrh&y0rcz_#Ml`OwL{>Rf|-X!L{<7`~pTUU~dJU9Wk}n zpU#}WyHVvcN^p->v@4d(^}I*MDtDoB)Z2=yGAif9e5E^WT;ILCls- zke|{SUk?tM_3I_E01Izrt`3}`%~)@hp^i#Vy4}~*5^nwK-e)SAmr2?iTl*9Io&J^1s<_R$EjDNjXAHJUIjRZ5bc1d2Hu*^4 z*<`LX_uQY4f+_Oj>93yJe%Gxd-$k}Ik9}k2a({VOJ(M9{*u_uBWy>T>OazXG`Ui%U ze`x)C71*#6Fkl)jtzsI3ZnGy@#p_K%FUtUJWR$m7MiWUI zs!%Y#Qxw`j*k!{>&U9zL$)ej((iPqPtuHa{UTghs@Nc9QQ}n~hcL_=-pMqK6>KG_) z-PWNwWk*mo?5jnrg`*PcwQ`A<&1ZvFx1U3Q&O8tuyQAAK1$ANouG9Prp@@-|67xE~ zX59{7Zkpo4RbNTF0#{Fzp-O`@wI0E|~qA!5b%~C`X zit0j1#lvv%h^&Ww^3$0HvCzld8K|?%Pn!VtC&B@lZ7I!ro3f??n)8^-@+njbBKSjQ zU-Q_V{oUP?pQV(NWDO6!UUebTX>PTe)_EJ=0|Czwnn1**$N7kdzkV|qE14PCi!IPR z#XwA?3wqM6@m=EzzPC>aqqF@KjXb{OT$Xu}A1rq_gkrdPY|C|ST9=3gd3gvBWS zA`E||o&<}(lG{ArwkDTekD zBWZZJuS|RMSdN^~1$keAk{cIn3f=%jeWg9&>4c8j0U=L%JO(8saQpG);{O>T2TNvY zHq3+i(db;7T~QLP^JeqJmy%i2f?o1#Qq#|jN=d`ERMt8m(5H9|;+*77x-$KZ@>#lq zoMXpB|H9JQ_W9V>)L;;xmCKf)J{45ldI~BG;MgD5jU~$am(=SBQlN5u{``>|1n$zz z^`=qoYCCHN;0(GnI8Kx+&zz*A5?6V{g&-mA@Z8@FDV{4qgnLLwT?PA)skx{se7wYygwLaKwy{BI*^{Nvx<*0JWp1!PWbw(w1Qd^6f$$omzpZxEL2-uz}p`W(1CMKS( zhNd5aE%aJU^lR?xyD!Mc--dux0EOUDaalZbB1q`!Vy}Pj&8D9|KC8U9Qtbb7$pLj; zkXO$|cq0BV2h1x}c;dhU0hSD2L=}Wv9MVa(Swju}Z@_cf+cLAdVY&pLrHN3qT*(oS z{Y6_#Q)4qTF?3@wqve%{Del}uwB>p1#aNBN^?d@Ep#AwyoP{As`0 zL8Cd6^OCL^RKz9HVv$Eh_egMtCZ%VcbY4VxWc_FLz25&Zn2X%s+1}#xy#W8L@l2S$ z#~g+~Asg}+)X!ej$a-{DO6P{-9p|diQ3v;^tHx+5;;S_Om8WRy%k0YXcxGFRl$%Jg zj|5J*^OYC{m6ZCwe*$BN0P+u$pdMGJR8#%qZ`{J@0|)Z!`E-%HnN*$bFyU&`@UsS?QWi;7 zxGlH`mH7EB>GEq${(HywtAW5MNQE8@%n{1AWy&azOURkf3HaLDy5NP>Al1Co z*U{DeVBpYVs9f+5j2;l3I868=DS&_!3gDQ5TAg;+_YMbwtDSmBYD&1E(d`m1R!`^D z8#Y!1S;WAm`NJd4v;&?B*+IpB z{{Q#F?ovOecaD=O{Y3$7_PUg{gNTrz(-8bGUb5%zWpN}v+}1blj|K-jO z;iO^3#=RS?#h@AysIz1?zOu;n1sE=?bWO@kORR2OHuUS=m-2FQX}Twbb=T;0w0bsuasRqw?%Z`o5`kfdFZ7g#^M;4b3YJs)nrH;L{L4#p#Q}YbuYJnX{rD`GMC_ze}bYTo?13pfSm%uiT{ zkiGUrxzX--kX88M7Ve0lmOd zB4d7v-tS#UUL6CmfkMw>8#k%kC{8=#LulRojrlE|9*aIT<|}P8^O#d8oN>q|6J|R@ejX_vQ>iN_K0!_ zTULpA+{m8uY1)|2;5Kc6BO4#B%OhxWX=$Bz;1yi2QTJ$i8wjTnh#=?C#RCuw`8J2W z#>k=B09HkJQFjdNE%u$(#xH6i=``~L6*ir-TuQ6E9sPI8X`v= zP-Uf+ES=j}tO--Nkf1<5EWuQ35Ya*H)Tog$?UA3>>3MefAfrbMvWmWhu*TT&^%JQ2 zvI+teirwk3ZT6@su~<~^k^KpOb2|bfAhrOPq?=VYQWUKp{lL>xGMg5wwWo4$@5>7Q z{IS>-{-9RUn@_0-*Jr1!RW4q$Qo8+LC2YF-+5>94Wp53w{L>lw@4}LIrFd6V<2%0s zW{$D3%wkft)AyhwDDy<4SN2~dg$ocm%}WE7P-&RM0~5-~hSa!@NVK{L2Sn-TyjNF- zj~v&%A@4vm9gCj%VW1yn_*<(m%3A1>J4j45)yRJ!pWy};d8CD|Po?m``SQ!U1exoZ z29VUHI1vsn%{JYzKLB7XIhl5T4I{7*x`)hf2Q70XE8F>L9$9U^5OffEXB3Z)8q-jF zVywhCoe>Y^GjS3d84ZD*3sg4@hpjDy);aIm1BZVptppgkpfh6X!gh`IG8_*F#@9eD5Yz|g${^klYa!xL zV92PL(8x&_d2zyNvjgt4=Z=K`RF5;1a=X(d4uQz9)}vKU+|LJ40}pf;%co?r`~7sC z4suRskWR>PqW+>pdB56#Vm5N%-tXA26Yo^q0d!!thaiimqmKXkQ9f8soOxY75|ir* zP*00Ny*27)ol;6INh0q)q-D+mu33)oz@8Uw;{s`flC&t&5VJB7FIM*`G1((4ufXm{ zZOPnOv~3)wHFxk81toB77>;IW74O>}F8q%3S`^H(y2v`N=FH>)FD7AmB+6HYmfdz! z;uQ=`H$BDtssU7qUIql8(L+}fsbd@^aP{&OMpx}I`&Shh)dLjFX^^;EPjSsj05A|xe+pY` zKHkeI*WL~B!wPIh1L|{(G19LeJ_Vnl<;*szo)SDt`q9<22E;XnVE%|b1HAh{I3(`k z2Z{0V=}cZyse%CF5kg0n_V6h?_iEoeO*8BbNn5CF658mLzu#L^*opjvK51An=+_;n zSTH#L&;q^qpgZrNn9`WxjSNt`UAPgpQnVH7bHtqqE&#!L=g<9300&`3G1C)^a+H6K z!obUzoYic5=b@n-o%(#dwB!9C+C%RJwy#@Zmt}4NzgskhgUb1KtBS-Z2fn6KelTRA7ES1jY#K5u6^g-%g*9V}es@S^hbW%K6>3{a|YW-AeMfp77 zQ_|W%>~Cky4L2lsm47it*9R8od?qyD|mZ)8F5OYM}tq*Z!?687%g8<^4P|Lsm8+u(1Vz8~<~ZusX@P*rNrI~9sG7VbSF zlNXbGu@ej6Ary&`f4I*P{i{lbREW%de%a4Fq^Y){X6Ea`7)S)$N;B<@F$l=01eu?B zX7l>w!)EB&Akq*$wF+L7+go)Y_FUtXUKzvFI19u*@*o^cB9*Vn=hAUrgb;s!1Gz`O z5qL9Ji-bQI(<#xb#R2M2jG|iZHH-lnkA`iqJ|ARLv zp-+E!_J@E!JS=5>ZSx=Vp$Ewlu2x4Sy@uxk**$k1zlnzZc?S6n0L@wR*cw<$k+X6T zI0aO(r6B5%fs#1B0Fvd<{0k%>T{XslP-_By$yCCY3Rb0(xOpERZ8nqVEybxMReY9I zGHr9{M;pVO4G>8cAQ26!=Pb=ip!*>2E~3TiDWeg>$SIv3`SLrF;-#hfNHO=~FEMIK zREqqm2d^->lPEb)un=G9aowJBZMwhONR;jfp;DK=_DwoH-(#I<8UFpIA*{n7hVVe` z4eh|5F4e5O#P*gnha;R-u`Xlt;*Y3v}+bJT+Veua)7!G)Fe5V$Y2m+a$nzz3S zB*f(+DZ)?;;nRi_{^a&+xw=L?Xsi;s0cKZAx?9JthIA&vu;ik_2!{&HkR?eRoGoMw zz@8=DM7Fdudz{DGE8OA2uU&0y0%<#7lAK)5d*=r@9wg3z5RCXSv8s)&89(W^u?=n` zcjLNDij*6xVzw<=BziLQ6oBIHKBz8ff{=D^zk24mi=XP?SArK_k3;awO8FA?rrY|7 z(9Z$;t8wIw6*C6FU}D#@X-El{F6bUpiJ&Z-*Ek!Q&0GHs{z9}(FtizIBc5r?VIq(%FX=WVcO{G1jSNytpwEgdluJ#bN%6}OXlP+C&j7Zy4AWly(FbPvF zQ-q|fS~=z%e%I&M|9S9uZ1;U#@AvEVeCA^5d%le|0;SPr*Mf}K`_b;-L49Y|$YzSS zn7xvR2#(zU6hSAv)BU0+|1PogYcPzxx~4-$NGOi}S(k_ueFV6GQ0t?4z{7}b${ z&(-$Jfs&L=v5b0u)?n}(WMX!O{|{V9B2JB);AORBcvwVh`lip*-1HM1iYIuL`%8mn zh9X}rUzVPlvirT%5yb(Zqa**KH#sV-m4`^Cg{ZomI^_RxJcsCZK+lH7TB;*f5~TeF zpP3^$pp7I?#wooU9K6xd;oVumego9GyckaF((KQLK*EZ&%m zaIVh?;=RCtpV>sbYEmxwP%rclfDMnWU;txr_iOZCIZ$JeNM$G6`AV4-lCQqIIYP&k z;!f;c_KC%veEPg8l;eqo{KPL<@++t>b@JvE;|Z_A+?ZMWxDJ_tf0eEzfL?i{%@5G*`T{`ewV4S7kDc@rgK=7 zK~C_9h3F{on5%g2C({ceWBfdx$(ZCzlf&tW9)^#Ha9nT{@Ha762rH(RkjW=p20sX( zx&x)N@Tq5<#w2Ne@xXCIx_>$74Wv4z3nGS5CA0&S+d?l9_UU)|d`-SwI`F{Mv^;va z;jobcZ^v3mZ||2jMF~u7{hJ{e4-95uYHY9H+FIli_oTrUNmZ+UeHzWV%9PN*`2z)FZMNt&?ra0ohDm2)q82xJXLdXhkZ0#6+9WXFC9g7_)po<^W(AXET>JYXPb{Kb zzFTH<#i@^U1!y=IEzW+sr3$xic0)RM_LSKrVix zYL0k{NZ#R(J#beQ@$m1f2P_#o-G&>&rZ| zz590|G-)AmFnliz^PIddUMc>@b+rovH>4627_qTUWPGk4^FDE69iN-@+&>RINWq$` z*kb^-=AAl0O@9JuD2I0FAG|xad4v87VtsQVCd1AlUC()cJwEL$k~m$v)&S0lnu+xH zjg6b*HBjNXs2e+emMb^iSrNFlugRh6;g!&;oMf)XGe zs{Pq9w>cc{4bb6tpoD&W&e6iFEM25B;lu5=GmnRs3=ckMeH9+kQu;7^d*@H%WQgky zNQfjtP`rRLJs%V(^WXpazV0oZ*_4%) z^`&boFG85~<+!Tif4p-Euk2#)f;u+N{Coam)?L#Z3-?8s=M=@TC(`Qtalk)Z>ffR* zIIZ17&hGda5;fA$^erqW&2O@Slyo{@`0gbH=J)MG4ik-lk_=b)ARsgB@DxX&e)tC# zd}mBK2|z#h5m3yS@4%wv#25MaTN5Q@y}V#Abw}dFW*|kl#Ds(g^tJKNPvlF*VcprG zTn9^w>cA)>BVPsl-Yx@D-b>2 zBN>?u2(A6z6q5l;4e~#`Kmgn9kb-G)pO&U5j?i4W3&zRxcEbo}O^JDa@$<=@3CmT@ zyfjA7ABIx9ZnnEXWjRaNzHkMU-3c$sPtwEZo)#C=+1Y>oOf=mzJWOXd&+(Lz%P>+5 z4;cmm!_pr61LmiF%I(dm*%SSqLd}ZDm5{H(wfU}o{21QmfYa#j#E205M?%!qfIa&6 z^@~^zSy+ohj-&xfdcW-TljDyHZ!<#P{EKHX+d=Y?H%V24KVJ z8rFuUr29<5ipW2AhK6IJFW+``Ech}SEf+F)a-mb6SDxphc#c?xbGphU$C2mnmU}~1 zuX#Hbn_hgjOLW4JIF1^t9F}88y_OM*yeA6p2GYy60ZIolUJQc2(i0KoS55lv1R_&uy}SF7>VOLgmjvfIFo#Nc*YU4MOH)%5T>fn* zcYXrs4ATSIbo87G=(F7FX0oZaR1v+Eu-2Rk3usMHUP(TjPqN2 zkfndv_PlZiyPhK#oU{$zD+ZE2@+fm_V&tl@7oYd1kb z!Q9AZStMrde>$@(D<@%YXPdxK^k5d3n|JK-ndpuADLa?sfYJ83wBvC~d>}|%#OJck`KTO~_0xhK`)h^pBxg^jA)l$4Lk^ioq z7qUW`(k_iP^an*3v=P8mYb2Oj8{jGwm6+iPM+KIpD#i|mv$PA0b)zN+SxUr-GV3=w zCFXF>`3J8JY>i-{!NK%#w}10pA_{!LD~T60shO&ASBIB(v|TX1 zK!|#yH*r^8#yypuZi^-e>zl6wxUFDbg`>3*eVcMo$aAK@Hf{JzsmF0@vu7#}fEsxw zP@yD)Oi2lV>*c_bj!S4W>Jk7XOSH@INJ-l+1dNrp=2}U2=%xh0J!H-oboHjCRTBIxXknS4e$mZ57r(bBT6xFOQ&bLNBPDZgt+4+c#s7woTCva ziT&j+*f&2ne*OpS8)@8}uDLQ*^AvHNv)|~M7#0}^iYkTHBWuNh9F{aP0pX>(8+nck zobbSW%*DRK7Z7qjX-z;@zrKC8!162Ldr>O z7G>yV1dPm?tG^;IIrocF*BbGha>xnw72TU}tD+eCtxp<;uLosRBD^9=NFkxRTFwlq z%&hf!Md~HLztMP1i5jetkz`ZSgT4&z4{)P?F>qpTg~{s`+uo?8it}9~NsYP@oP)lf zNa=WF1zZ37vk(@!K760cEgw1iZ~XzFPeq4((Ld>%QfH;aeCsWTa`DQT_J5NvpE?G1`j#r1U(Dp}l~NG@ad$r* zwPB@3G?ei(_P-90m13~N!h1QtS3L9V%IFiHTmOA>kLKpCEOQ$lpN$FI{`XzaeaGH; zd3~t_kjd38VM5j{V67bt_a@2qK9t+#g;VJ6S?SbE9kf9E!o=mRXs#eab3SfDujAi$ zEywbyL^-#e?bVpKqc<(=2(F&h%$mdJv6xQKvOb|!8cFn-*R$@*-r;B!bVRQI`}He5 z;E{K(?5vzk2{2XrYP*j$L{1JZeGZ|K-(TalB_roV61%3-JQq_4xd}qqC^Pxyrmzz! z0B5tm_7WB~LRpudh-e?mj+c-7cta~;_z7JAv`S(2G$+C!G6P3KFvQ z(vAH$L{0}zZ_rruPaaRc?>i>K!9)P>3N9RJs+0_a6baeOb&-%@z@Nx#Q-sqhwLo;qa0tJrWs+n?2-!7}>z zBok7?9Cd-2$3hU8Hn*3WZ+4w2DoT0yA8!MTiA6XYMP1DWak)#+yqy_=B%~S@tBwYL zELKXPJ}rr^&xyCk`q8XeFDeOR4Jl~V!d8Mq`4qRR&bd4TdLt{` zBDtjJ9^+5`_xGo+rpJ#{cR5DTa~mJSX%Moq2)02OWq z9rqH$V*UI(KR-V^-a>%}r5=*m%$7l)mQQXB$*|-MA{M{MR6}|tLobJyhgV~^qosY* zI;9_3dYq|B!|?MMi7}rz&C!UNNl)EDgbs!)RW!A67AP(98Ri$;XlT;ixFwAy|GH@? z?d8yQQ@R8S>N|I6&?_O`5^Dj6dHK<}!~c1z6T5i$BnkBpGXkl!Z@zWOwz3MGlC*7D zh8YyJL~c@%n3a#}dZZ|bixNtE(o#{Awj#Y-ngjiKoF^7*A=@lz7mvvM6Q5Rg#*mC) zezIAt7%QV^l10nkkD@tF<{V1;8_DT5?|rvfy4ZY^O~k{Hb!67&=JcO%5#ds1uG67E;t8^7ogFSbm0|Ev3{PPo%a8iV} zRjeNj#9b4Elk~L|imTOc8Sv`mN1eUpng`9CD&Rk3$|q0R=Rc@boUGmbCh7q?<3U2j^%z5=$vs9Jsl1H7q=!Tea@4dqVs4%h_~z%48Xd8z zU%&iXK55>ea*{86qqK%}{dzCCKQk=Q(&ij~ygo%YTzDzGZDS_S5nSK3+`AF5`NdzI zn=`BuTl53!Ogop|^)`6WaA*$HT6boa!Kd1>%B?MYefhscS!?uOSXY@X^2~498*;K3 z^SkDjL<4IH5hLvigSd_D>BPj%mf~f{5l8cC_5SFNRi@0!e=q0b)=M*<;48z%WqF5v zJpFfT{?X&erC-DS#b5;j&H!lHm89WoiS$l)ZAF30 z*x>T=^5CPq^o2%1R>{*l8;5AAeDMF;0dw;>%5#L*=pWgNVb>80q-E)9hzNA=&x&W8 zTLd7Jw6|@g6TvY_CLsd~cbjQxVSz7#;l%hPfWFUd=}rEN`$=4?Es`YAEa8^fRNl^X z{i-y1^?mT;3fGecO5;ueB<)b{L2rH$-nn1N6m-BU5NdR3T2@#p%3t@g@})Nx*h@hkuT^+>FN|qp zlW-^DuaeW1`GRGa9V^b6XC9W8-QLH8<|K~Kh8jwF>E>yOz13F*EFicC#g}o`V9o&i zfnG`IW1P11AbthL3%4qw#^ZpF`H#a9J70VD6)xZCcwGDtUO!CFq&$Ms5B`xXYcMlM zYuPcS;6>@Cu}PGL&_pLoTxPtjEZ@6Zr#*#0y~~jRI0{KK-_qXE`oapS0nW2d7(>|yM$fq6^4T+`nSP`*y$TMz zN)?w77ig&O;;g+z%1l&T-Gy^VJ_FeYbbJ55j_z%1z*(MB0|K-#Pg0pbo!x)p!L%es z{O-L7^(i)U^!EwEl5k?zpK;o1^p8_s`RNd$EiOLbD53h#Xn((@^0Kxq?4_OQ+^u9Z z!-=5qazVS+x{iBlc9b8k=FnZqr4X&mZP6eDo^bn+Zydrr$&?}aRC)6%c2JD$@J1y= z%|baN4f}Y%8|+Hmr}oh8zh6C%7|S~_Cr1CQG(HZ+Fu@q)tm$CB7xu8z<)Z-L#RR!?=ccLL(~sI^6VQO4>-t21|WK zd8}7(C1AWk$N{dB3c3U$q09(9EJN5gt$QOEnCJ%ISA{Y)AN(r(d7KU&qyFA#mrF>l zFg?*KVYeX|`R$wNw|#B^D~zHM6k5?J{~J`Y^k6NaCP~!J5L+ zXOho!%mUNHkw|ahQr=g|g%v_7RFXvY`&xAcB{SI)3M25Z*?#%+n8Y*^-G9WuS9%ig z4=mfFKA!$9D=bm-LE7$GEzfi4Zlf9hJq($`!4K#REqw?cU*g!>PA^P?d7^v!XE3&l ztt8~vF>yR!+PIsui=LMW+ke+tn<8^J$!5uLd?MN{v&Wf0J0}JXG}+rCDE0Qhk&zMG$8AX{u+RpJkLB>F;HY>f zIoVm5JmUiK03TL>vvog9i^Lvr%(4ZqDHr!;*T=+Bxs{!bfR0l1cS_8v+)f~q?&@)Ix3=b%d zdJIiVxPl5zZSj(#O^F?RpV4kb!eNK(;!pbHYndA0Cn1KTf25$WeOu8xJ6j#wRdUg5 z+lya4ooLdNp!ioHdWQLF4$u+5dTtK&$81d{jwE*VHMZD34oDnM-g^OQhWL-Dn2 zW~*xpJZ@G@c4q#7p*7tBE~I$6wjm>yc$I)fes`(yqs8;_@YZ`j^tPE1YIgJL5HYXs^J%wz~2S*cBs2I#0Ubv*lsgy@-UlFc$s_)TEm?GvG zd4I2B_~hTMG$Po*`IBn!qr>4DL|Iz^7A2tqTZ_nLbK#uRb&6SGIa%w1nHnloaY)p& zEPnT4*vo?C2K;9GL9CN6yz~Ht^XXbwUlPnT7A1kC(;dp|c#LARJCmHG`kvdnWIibq z=S@@Lyofuef~3|n@%}~js6B{LP%M0llk!saF@8e-Iy)&nw)}_XtLHe9<&Y=7i>F-R z+t&D2%j#V5v`i)`iGF}!tt!fg=2kQ!m7U|>OT)lv=|QjE7=2=MxO!uyB_L*t8#54` zljVG<4$vtOkm4}nK=-p}e8!_vc5b{m#>h46%Bl?WcVI_$DCkogt8Yt5X3|i(3foio+=X~#*4gwy@N7e z^P=dM%h}n`3s~q3@{=K?D*D)aGYfufY1x$_^Rm)dF_mp^F zM2QzSv#`HXWPH~p=$sW&v4N%E4z#zc9h+N;BA~O0FgN<3?=|v2x^4-{DN9*Kqy$Q* zuPiP?a0!$2l$8mKQfyz>TcL7~h#DzduSubl00OQ2l2(y8CLH9FG+>gn(aQZ8>m?UL zDfRbii})^H^HT6BIQ}J94{+Zqdq206d6)wzk7^+jb){dnchNX$m>{rmSfw`>S@{GF{Ipv|W->%-s> zG=feXvz03eQ{d&zvCJvXF_%yOPYvXE3GQ+4{AsKL#z{EQBq^}X?Z`|0R2J!A*1>X* z{WGEZ1#z(yAQ9B?T$%81$O9r58y4UcWo;&I2Ie}HKV3*!wT2V1=!LEY)QpM*5eX+};<&ux0lr|f=4v4i&~ zK9I}Zik2xMKSMw;C*P6s<>BGw`&sT8xU-QP^>OPr4L^^MFIJDEH@|K4l|PPTflFW4 z>@CCSjkT$nKF~Q}o8_FJ7ZN$!+`h~XFj(oglbUj2;UaV&>ZK8S2`f}zHehb{ z2!}zRkW7hVJ9AF&MUqNTMHO)>%3A%c>VVe_M$@jHizR2HMdBu-H^&<%qwhO`|1*QH zq|VDuZLb{AtkNAzq3?mW)+r1aRpYAa?fb>Dzi3jg)ez2{+HS zQ@pzz-sV=-wEj_Ug|0-irb)!sLq+127s;8{EQC=X^@Ar}+4!%1mfG$(gnT@mEp3M# zL~AI_KLhCtLLc?>6V?~HlPu(>f8NrK z;we~9Lk5uvR(;OpDaiE-F%{LisQvu(6NAM^`1aTZU1s$8)8*lTfq_$PmY{9G2U8G@ zt?`x5TOIUkTckBI^#jO00>s^9_0Vf*dM2BV7VzeX{CK^x~t(|Ne2#p6?d78(U^%|MAH?K^!8| z=$X=fO6oY|_X;GHO%ysxVlPt~tgai`;ok7lXTIY7!~QjE~NRg8RMV`1pa;KII|yIQj98yEwe{@TOc6l0Pm3R$46H(oVny zP*Cgw20n))S!n-WIwC6S56R5RAAnK@%PDWOo_+Y;Dgji%jl%?jnGIKjlk;f>;|*=jz`VFql896>u58_j6d0ixbi7e`n5a1GeMBT+AA4^{hOGc@F+5+?T{4 zHHL_Q;qE}hqad571FVV+3*^@P#&W9_cuC}Rd*J+x#c(RM4WjZPEhbhMk;?EQ#Hr76 z5A4qATOVO}CNCWaX6v+@AuX3d>h3IXNrCcI$+`CT_Cakv5_x!@JE{ie@K(wV+mrzrW^R@8hT}@S5&`nQMc}97vW$82M`Q zHwpo_^L=FnJnmWbYsJg$c*-M;=rf&SE(qRi+90=92Du|ulf~XVN~vx*p>n`8h+dz+ zxwA5uheI*ern*|d<{J-YHq`4W3Z2tarb+!L{HS1&G5K$BC*}Dz0QkFSK8X{9p-j!f zgB;QkoLeG_d>Ed%B&^ZVH%(4nFjV4|eD4#0FM_qGSdQ)pDd_T*=CqQJQLm2Bxy!ki z-j)OkhVa0dKQ+wuKWW(L8yqaJ>R_UPg$Lw#SortY8p+x#)Z~gn8)Q!!WDftZ&Xz1E z59Cb06c9V^Xi)q@{p4dW!C!RtO4QKNa4O%aBuKL)a2qA|(P^ZSi5|*c6!%KHGn`hw=1Xpcf05hn0(s!7F2 zm70KZRy(f7|OQBThTI3w_VtWC_+$CE_>nP=q&L^?3u-1zeaZE zxm9ja=U|p_oYZc=lND{mkX10TnG9?HVuwXBo7t-I5Z^w@ z%V%ENL{#GC^>jX%pFdYbZhrBxpnhU*HPYkCxt~<1*grQccUEr2=xxKFshe-mcMx=k z$&ZzU#P$pbZnWEbe(@>QiTg;; z4>{a*3QHrK6L!f%Sn4-h_x3o+?G>k2oc3qcg(LYhNfIej}N0 zXP{96A>b)=BsHy;&Ri@r?aO|@I=KUrhe*KhBx5Ush>-$pmwQn1!!B)qZpTAMjq@ z0=L@&qh>5nF_K~C$~!Z99qWYk#|18vt@r1!3CrO^@}zg0T{ifIZ!=(3KmAiu#k0)l zN2`D25&Gvj5$e(MdU3^(iuyjZ2Ft#5gRvhHAS~fcbyFTV486M_iv|q7yzT|+CJm30y|9IBe z7=%?nchQyR;5ahkI{CJ6CTZjg%F!8)ywv?uJGyBzNdI?!FqS*)*V82QK$%iOpS``8 zH;4%)H;ddXt@h|;x~@+_$>i4TDj+}X{QJI73=cF5%gz27P|E5As4uq^<1IDK?M2lx z8DNo3RpMa2QNmK4f<#T>bn>mGzxRM6;D!35)xozQyMCyUN2OcedP(dqxppoQ?-pc} zbP4aBzw1eZe^pf#t?uB#vBb?S{S-=}1J>J%N)}Kn7R&NJ5izc9eeNH3rb??}3=F$E zVsD`Si*f?%>RDc~)07M!hNk9POtKZSR8So?QHz z%T;xh&2%=gfqC1c*XbQZgg>-!ig!NUbh9jhUgmH{DZBGQ*INsu9d>hErZYXrnmUiW z@gsthKa5qoWXb6DDQ6p`9?4ijGkzXBVW!j#N%TFH!y-u@OU%`%4z~bJEln(PTAPXh zaGoc0O=^*()1w?9F_KmsK%>rEsC97-c&{{PfRM>%x#b}uM!r-j(5et+l)Vp7ZcIO( zrmgzXKM8hW)k^uqjVdw_q>ETfjY|*HFtQIyl-1F`%6x!=oi3qvU+~#$*O5adDHo)e zrs>1s%iu^CwR&(8PrdjA!tmk?@BLWH(n{>A;WoE(*PcAk)pPB-c5g{4<+ao0_qd(V z>sdIVACO0#%xp!`xbswgp5G!7vBTecMkCIqvZijPK_@@ngH=BRE!}eZ z8>+dOPS!J_r(UmQdBWzJ+kcKR^iL1jB|r5%@XYNxc+r}?nVam`nOe@Hyly=FRAqOD zHturjiIWbNrJl)X$GJoPrqi1(V=ZTQ)|Xi&L)-lT+~wXDx$M5P{+Jjx9`Zk(8*OgO zuKP@P6D;r)6XqvL%eW;mboQ9!UQ^RBw_?gKLu-_`Lbst7Dxq@0X zTH2$70vD+Ef;Be}UMA{d(Rh>8#H|IO$Q=K(;_)dYN-gdUa-HGs>ULc(uiZa`tWb<# z{nQsrBVQriXg%^C@FnVND)FO%l%fMmhF%&9nkkDooaPY-LjbXMw!u-?!KLf8CE)wP z=;Nu?@_1#l?R|4?m^-{6U<6CO%3P^u6v>zf(#ikv3hrJ5+oD6vrbe?+BRSbd%J;5;G)aM_Mr)i~rt-l3n%k0lc))x*j5whU0&M*+Xs(ky z@9>5myGt+8f^&~{myyBJisnC-fdf}|=Di4a|660aq>ib>V!O8UDy0&pKC$vZdDQi~ z)|tr2ovqcF_KwMvUY7O{Q(D2)rq8Lo9*Zsken>a~hGb_YFdd0$}*h) zmdA0ct^`sK6c6*V?`#IamfyaU)+`XxIY#~aR)l$qKq1#=8Wg0qBu2dvg;Q492HOr< z=_UN18ZJM~WzhW(Wo84q3Yybgc^}1G5`^_={{jA*u2Xk&&u z-tx5xjDZPCYSgZ0HOQx^U_Cs>Q z=fS6GMesI831M$yROFIuFAeilw}iVk9>DCZOvbnqwm^zJW``Ri$u9TCc_v8^;Kph~ zn6=`Q4QT7>(A*>=>?JG@IVqdSufVh%K=x!~1QV8yNY7Q|7nd6B<(+7c9?>2QZ(AR( ziVltlaeo+8i7cs)gUX;F1n2Qa{ZCamxQv-B?Hf;gzrL;~vS0j)=#WJ~G$(kXh*vNg*S7w`knAHnD?EE#uou zS$uK0yh?6;4T^y)3odq0NA7?4Ub^(S+h@s=34i`bVG42Jj&eb*!N33gR}ZHE-Mr}q$s>pA<8?Aa43#;Re-eg`V!4C!-}(mo4{DjX zY;OY13|NfiaFnU!pj!5ssBkY*SZL_Sxz*8Y>_ys0xrbzKLm*RB;y}+gvKOgj`Xbm! za>w?p^`mm((vy-jOAY9Gc4w%;cS`FL>HBA=epzFfeoAj{slW7a1Lc3uGax7!D2`^Q zJW4Wt0e_}V$s_vhmNpe?E65XpyhKRLUWM4SCZPm41SgqZlOF0zoj|7^d4Li?OhkCy zq6N1A`CUXRYRQ@vT6|Oo9MT?4zfkXA35r<$HX7m1Z;bUP+hf2uI-fmIq`pgN;EpNA zSbn!kIi1ay9mHcmq)LWm07LEF3+&ut|2&4{8@kvoIZhaa>7fuwRc`J7fF^6~&+SJ+ zZ*d%PpaApceJj>RWP_R`0-Wmpj6UTO`kFu7%_(#+s_n`3da3{89A(-rLk!}-$WsFV z%zaqZdWYBxxaqlb3K4=4T4ZtDDJMas^8bC^Q?2_|q>lhgkk|IDcgvVZzD ze{?;8Wp?NYH5ts+XT1B6RSy4qFF?42RrHa+B6muW4S5Zi`qs3Q%ec*dd=SvU64-tv zy%hmB2}<1r(TTZu6No-a@&CYaAR)LaBN_F1a{P>;Az9G1@n>_iG-o{W`l<2dwhsdQ z1Ph|m41(m~dg^%m(sYT|>Z(ZjG8Qh8x%PeKHjy5+yj*_&&x%OLPS@k@wXLqM%FOF2 zENmnAM(Q$kLPA<7bwuHZLH7)!b7p{LzR*>{^sL}e!I`%9)>};N38XUpfO6LKPjRUI zqnkNxWpk_6RPo5^I0Q_|SV*BQVs-Y{uTg`DhzMK0uD7QMDK#)jI|CWU^g?&T?SAat z;=U#ty*s-Y&%ba1d)Ps3GO6KDD;--J{bQ`52DkJn7pef312| z2t|Bf1gnuntoqNV4L?u*=@eUS`5*M_mLWfq&Yozg^dDmoZ46-%N7NkTi=eV}tg3%c z*p<^-#+-C09U8w5zD7bk000WoW&Bt|&leT>GAHr}NChe3k&}7u+PhE@|I0yu-*?K| zZu$^&*0Kh1K{eyYKn0S1=J#zBgN1hi#{%C9rTEX+^>F2q5ONr|OP*Ad5@4t;Z)9QY zL>soA4!av7ZOaVj`Bw3iZ1M&Zt8>&bJ1a+(UqO2A;ruN_hB*B_syoar>6*;3=ngWQ z#um(BG4y@u231ejuUa9;C&t?%*URP-|Ewr-fIbNA{IILmw)Y4^iVC~u>uf#kP;N}1 zPtHxYZX1KCUzm5k?~oBm)JRcLRMS#E_Rq@Qt$fl;h&OQFu?2u0ZQX%7#^_V0f4)%(yu8w<- zD~CD_ynw-BlMyzx+njgQ?K(A`%Ekd4`(Pd03@bXLOa+adO*d=?5pX&qc6NF@In9%j z`w7>_m`)?1kBK%&OC;R^dPDI-0tI#36oB*&L+gyo1m%brD&JdcHB|?m?#iodJ>cQUgX!D~ChUp0Hifw)IkvECl zDOMatxu**9Z;bgOLW+U>2csb1k zVlZw6dhKD0-)1}`Ks>wR9sL^%|3RP#0zh&b54VkF0lwcWt0;1Af2g$a^J4#^yTC6G zq;td7lX8AQenU0n?ar2!y)AbpvKu?m_&dw9VE>(Kxs~|Mh7^V3Xh}9VBdyl6#|8a& zpAm^w%C4Z_z;)n605s8CKsifZ0;L|fS{-v%cJ~2gs!jvq^W1~g77$}YkJrqBg)1o- zQr+WN@(btXTwO8&-Ee>CbJ;INcMXBKB%lX5SxdWk&~6RDG`N1%xV2$ zmyfUYD-sv96{qxV(7sgd#t63aG|8dXlSd(_vh}158s|l0!xX8nxgPSzvAr^C9-69D zanCeN`ZLMt@$vDWw*)MIY%otfh*LfoodPkP9-b|izMDPcAz753xaDD(GO-D=)%+ey zhVj+&SCqTSmcqz8z-jFY%&2FZjy{`ScrhmD`DQfRrS=OS1)-D}yI(-T5|jf(c;jlM zl`Qyql`OrmNF3hl@AvOLD?x`?i<_WR0d`X6w`$se5r&dmJYF#Z7Au6&nWd~D1wIai zctDhcFBV7&4T))-oTH}w6Ey~dF5-mKAq*vJi2fn00I~sRAq{ArAw$w-qM7EZIzr_c z?Bvt7*wSG?8elM>OO~EJ6EEl)#)aPElMg+Mf&V)c;@)mmvR|~OPB+2c-T_M`i@uBS zr7Z#4zfed7;})oGJAs9=5i0&Kg3gAX<5%aSaWL~kUL@mANt5K7pSgj#p7$71(RZ`s zi^yNABNTagaD zu$w>mu1bC+}UnX`XhIxnJWmHBeba|AT$)Z@HYQ z<>~5+@GmhL_P)YN2zq92c^XEF28!isPlb}w%VGR5vAC09)F=nA1w|C1@<_v$<&gc+6A62 zx_LUjp9hF9DQRn0wU_jx4WkTYZR1{yy9t(qHU#4bfDnI=Q}*b=ar>*H>+iHrut?HT5D6G)~3t5G(Ox!{JeZHLjk=ar^Fe{Um!6ZN6S7En6;(HG5WK;GW$f$HS&j%U|o-=vAH^RBG^7wpf)x!4I1E6 zJZy_oBls%}DC4~~H5)){wft!a6Ho9QP|#HNaWaa&!iu8v?Gg{? zDgR6U{qM*#9C|JLQ~;jlO1o->N`Pl%+ea|}?SBHc4$`!EPfNw~(ifOIobGMBuLe&M zB?C$#c>S8X*&6ea2R@fC@g?u*n^+j86sY*#YnTKaJn(4#m%&g?l4 z!cGRiS0A5=5$->sK@?obPIY=%w31CYsY|h0XcggU3CXdq!FjcYOq@b` zk2G}od6P;dypkT}k<$m{bw=G>t`p9A2`XxOOmU8JIujK{1ON0Si`h77G%2RPFC&NR z%dBly6-hh%uTB{L?EdvZpL;~OLiYtM3b*UxIeRQR0F0XwSom%nk15>4`7R=z75gwv z!gmB}@FyHn%ea|R$6g3!T`Nqs>{8&85D*7s>#dh6sf9QME! z+V@{NjMuk_RHYq#o;XOQ`x9lTQp+tW|Kcn46)_C_X*5M9s6^T6Q0ZE)$iTLd=U*jT zU8Ry&{8ygoE*PXT{m#8|f0Skp4kgcvxen#8a1S1kOO=beqK{E;rarFw_wjsxyWEDG zxwog)?)zzqB~Eq&=)wnA!2Te>qSQmk zj4*z%Y|1@GFP16(D^09f^7^HtagHjlXRZvk|(HjDpk z%zk2(Fh7lrwT#ck#+gjwX;{t2E06t@k>`$qbtZrrXh%zKh|Ha(q;6K?Nf%k#;H*_` z#T^!?Ns!F2SI!DV<24-1w-0vw^PUs|@ve!<*&rm!=V|;N)|WK@J_``bC$ygl|%`GVnNB- z(6961scf8@wNa9ISEQj#+?t+(e2;n<{eMMdRXRC6uXu^sou(?AqPw{@wLQDN_^Xy+ z!0S=5)P4!VD}OrMWl1=bM0$sLsa*gie-@3HtB1;me}R830UeL9+Y5m404{qs4G*X2 zV#)($_)FPhUeykVK}DF|SLZhQ z`SbM#0&=MD0-6&h%)w3ieGymnPC`8SMu9Yq?%D;B&{;pxoRe9X?67Ec%cPnEK*sf5 zYA4k2a8rj;QSw?g;rUD$$h!HDiFqVjV2#-|?~SqEG%K$9ZWjSjXRfDJlj%fr zENoKS_3U4h$*S;4eSaZ%n*6$-;p03b!HIne-ZKUTA~)HGVik=rZXfcaSSSgv~Y7U(^0r&}=-|)@%DIcIi zUI?{bb|9kU8{=GZvP4o4wrJ?iXNC$}Qo4r|Z9|tq&5Sfi4RGtY z&6E=cZ_jOyohKPh?ad%3xm$62;lbqouiH1P8Mty{s?MoKSyCoGXOz(adLh%>U+QIh z!E0w{K+f{e>0AFJdkPnl=Q(G0<`;*{4{8;}Y%Pz-O+-5dHJA}yJDjUVE3MIvgbZyy zM5mY+h`A%9rdvZsY%B8^Q_%@dC$c>G!f(NxlK7<2%HG?NV*G&ai``v%NqQ%kTW!`&w$rF=ejem>H`tAE-)I!a*02QMdPp?` zz~)W}IAgQ3?!h&dg!APRUj3KHcI>LoE&eIev9$#f_p-9Gab)@j^*Z9Lb}ITD4I~lr z?t{vmbLPi2Gh~kpQSoc_8}Uc)-&~t+Qv*hOC-zX|NO)V=@~`2+!4%IM?cz;En~CEK zH@{qK^cHR+x6d_hKVu8e%cU%dxrE$O zZp|eTX)ZA&X1R>ymV3EO2uUX7QZD->t@apL5>t*X#N0-EPT>3=)=0 z_r+qk5Kk_;u!l9Xb4uKfv?KILFk_(@BN|PiGl)u>tKFO1>a;0}x55ZH15iE*obzqe zmQ|-oMnCKHAWJx^6~BSz9S@)LgYRS{W|9;qj0aMZo|yCOeo;nIz-(hMbhm4NWw7aY zT_ed3(;*SyK}{~CO7O8*IHXNqKUZ!4D=VE37e1SNlXEmf&f4x`y7DJ0`NEe#l2ncA&haKI11U2ec zVr&76MEvTQmr6^U|6|Lw=-s`0-$fE8?<)*bE!VbNT!rEpXoK-L85u<-^YvbNRyK(I zA4=VBHu4st5f|m#TTG=YM?E)Bhx+zH|PA=#aA3@Io6*vJ>b5h}`~ zVk)fWSKkh~%gI+qPnOJI8n2nX|FzT3J?iPa(b1KyrRDCB(7^X~nrEN=mOOzzkXfCl z5&!(Fl854o4x(nE_Lj{+EK-l-jV153SSe!KvAX`*3Yi>!y0i}TxR?`d=UZP;L>Ss& z5hRPoRFMb$Pst*ebWZ5#;`3}sm`ODR0OOa@VbO5tSIkG-(-Nd4j<-Zx?r<9;V~Q2~ z8>Vb&D=zU3AA85ug5zZby)bd2L=;Xs<3Czil`@J6q#vN!b-^sF!UK@*;I@QE{l(Tr zw+80hFHKCggC+@r38Z$a*9>mOflT)^ym7|^>AWxK!w4+Y*MK2~r}?Td0Yj64d=M(! z-ndAsj6t+YP~2$};wR_5Df16=ldBl-iWCOLAX@hpxXHghV$EI=ZIY3vtOpsx0{HRO zFX&FhG=S(z1TO~3=2f~0HQ|n6<|mi$7UP@8R##tSP*t^l?QX9(?SgD%&2Ey#OiUsF z3*{2GIJl$)=vrM|p{LPNSrXn`2?QbdM4-mBrP`H20g2={1yZtH;bFs#UKs#5Rh_As zwWezqIT#&X2Zk=kwKZ3?luXJiOP0G)UFL{8Y=LXTU>>Sok7yX82DRxE~2O?jbEw) zZ9DH%0;cVS3}g0YKJY|DF{@@H`JjP9?XzlUk4BG-j0D*VEvkd-_!aVR9*0CnCYF2Ee~9D|PJ?{E2Yiz^FnNT2cIKdbdQ&@=iScX zmzM3((O{1X6upLs41KYvm%Df9v$dQ3^WZaV21#`}zG}yPPG2Ban6izV#bE#5G~ZFH znp!x8KPR5)sN>+YJoa>3lR}zE!a}^4xa(p>LLXm}6{V=P+a^wk2SPws+JjOYbC4s^ zCglUij6g19>+j#Muv=}UUrL!6Im$Q7v?Oh;6dPFNX211i;G8qx+wj9@y@S?rs|iF+Sqg}A9OgZU{BNt5ugcI# zmVI@THY~Wr!5)c}CM;aAT*03L@}jsS(rDmW1=iL_yK~cDRE%N~fU1==T;*Dy8w>E# z_cc(96!S_((R>m38!JhCgUZau+oUu)+Pjy|cn9t7&I9y}%_^u{I5a*~=II`op1xD( zIqbi_WZo5_tkhEFaYpT!m!A^ZkKEB_$xa=LiC0os7Z5rdOj9I4SMJQ37x456HS!p^mEmF(+U z_e?~pWG6ppc)_1??{v8Rpr>j^j!2whrKQ{#|2kmqvO!QgA9FK3WDM`=}VXEq(F(LN`My+ctFv@}KcXI6h z+DPdB0@#M_?j8_xrPX*OVR2pWAEJ0h@6>(7VWGN~Ml+W$yCky#zoXSegXp)2G~eV- zK2b(-@>`swHfxq2oq{Vj`?L4Q!Ep3gL%CuF>?J9g|+Vdd=h zf`3uvc_J8teZ;b#^f96(BROoi-=$@Qe$axUZ?0;a7ECouw!!$GZxlK1tSs*RT+F@_ zlw_d>KxfCLwev{!<9?4^Y-F#!OTJ?}BO&wpH8`mV&bgum+*duz zJVlfPXw*LkQ_dB`4nk?W;}X*lDju_OSM$_oU0q2fure$JBDW8*!vd6Vxg~qv1DseW zJgpDS6z~#F!j@Rg=Pv`=l6{YJsK%1PEt}Hs5&r6#;0yrjL?pCv4Zm)n?wIsB&L`b& zt5Y3*9Up_`BXKF_ohvJ|$0eqjy&}h=@q?=w-NU&4jhU^j+U4$__jur|IxeJmBmyy9 zGJuKZ4+}FeKf#%rR35N4TYdp&nCspq6<^f@g)@h5H#zn|iXKSlI3zvcG3< z7oot%Aq!7I$9t(}JjJ!$3IOW#{$rtg76{v&wQm()Y42<0C0OI`aSqK2@%T%(_N{m^@XWK5nwrOG2sB@@=b(^SS# zznVHT*{X8NPQm46mNMafN%YHdUx#<+7#Ne0!t&TO#H+g%Y#gRDpbqLj!5bEXk%IG! z?Y+-FASz1YtIDYKwIZD3tPkGa>ZgvFIeMl}Yh0NYy>R7K)|E$dZbH6_G=N!72S}>@fouZCrGq zHq!GZWEL39W@4pGETFd>?zCA3ibY-Z-J|hixdnA%ATrmJu>$ybWo!pY5j&uN?phBb zYO?Z=t<*D3CppHp!7xuRifYzh|M~DD@5|R6MkmJ>-+iV{ zC0T5(kMC_~H|>w%9(mU^g!t^P%&Dh7WUaXNS8{v^Ta9fEV?$`2_K2G%-zcy&DVV}a z3vza;jgDWIVdW*y);?2yAWCG+%*+gp?~aZ)JXlqjRs*3IAx?vN_x_5(K~>_x4_m~5@Xo@Xw^~P& zM%@PrZ0QZYSn|K$clFg3i?qHZq!pC=^KukPhr@`F!XLYttD=zjb?`E%#0|twf^JX! z=Ptg-NPX8`fO&MbjagEPXDKo5g5ukcNHTD4v-j(UiW@g)q+J$(!V=7LQtW78#k;lE zCEF$P;3)mPt+cBLV;%tRJVs2rgwaw3Ru6yc5DejM!j%;UVaa*eO3PdmpsIu>iVdni zV}XNW-H_ehzjOR(%g61gL??EHINYibSJeIBQBaU~iVyEyiZ+ZF;ml!xrzxwn27X^S zscJS^{#X-~eoQ9GzPK4ZjEOFSiBsgdPv`BsQw~n)FIn5`;2~TGc7$%eHJfuqrDR2r zc08RB?feqcF{Ub`Szxn%Y#YqJUDNZ37tFHl96XczfVtIJTjTdLuowKXu(-ZG`se4z zOM}N+QhV2%_IClPaMY~v!E*BREyw*;)qX_a;95%|i4T7_gYKz<0I1@;WU%&kB}O!g zqy-3rG}_`|d=!HBNE79ZrO#Bu9IbP9l{=8TDq{Dn- zPgp-XBq?M?UFMY2+S)rKHYmm!?fHg+fjGz*$v#PZ%g1N%QTs!TcwS+p6awYXc6OZi zeCO$0%R$y;_kg9W1XcLdc1C<_%rB}s;QRZQ|OM;k2Fxq!1lwdJB^p0_a7TR3XZ zYoArR_p8n;9^0=#fw6ViOlMlqxnT&P`ydoxxf6*e6s??6gSeTsAK9 zpw|^9*tj7O5GofL*ef2%I2bOKWg(1sOlrNt7WiYl?;P*rCk@4=mY4xd&21P?sw2?r zXd(&Ar7Nno{uhfu7-4w!oVmoR9kGZ#fgd$ln*8C3OEA>n$mOH|E<#=78*qNc5(7Z+4D_lF#JyF&M? z>*}_5Z}qlZ{?~AmK4D=>0cAvywM%#!vghs%%Omg7$1)P(%CFl0JpXw;ma1d!%c!MZVp~7ROuS@uxKsfcMFC4tG zE3V=bS?KzGh7hQ(J_R2OudFMa%`HD+c%PnWvX3?eG`v%q z)rPS5vS!FWV5BetY>ov*0~m~-+HZ;_-3D<&j9YtW^1nJTnP_X%GX~3~fd_4`&soK2 z2+nmfOFt_`jQIh8XP^T~ zo7fvpLR-E!y3?xV^aT zka&~5TgjwemsSbBSyNkvG!6qk-Tq4Pp4+q}BPr!%thBeQg6VPIytLv~+z6Oi(LBjb z4Gp`aiy_%qnIKuxNoIfcaEaH5XZeuLylH#?=z=Mf7ppFO46LY!7J(jp5tyJAi&Ypy zxU$8lfh~6FZr?XH`_oPPGi^^t2TL3OAMx4;g74G-k+IcB>jT#E7+LD1YeP-o)=rV# zT!w0?$e%@GA_IkRj(f7u!cy?6I)&-DH6Tq-#j((m=#U9TC#HZAyS_L0}CMTiJpWj`Bia zYu>zA!a{9GIdWKlw(?Hqp=b(r^V?l;5pH->33B(&&*0qft;WSI|MIcBxx9eWE8q3* zb6l8{RgT|q&?)xABFC+FF$MV^wN>TxyU&Er1Eb@A!RW2O(?)T9wxBy2b`uA}`(ysB zfb*%wx|(vqF%XS!Bx!S~pd3ySAcK9g2I#v~ZC~S% zjl9VCm-0U7d22m-_npjMT&;obn`Bc|xj=lvO{;A74{dF2{TWnpyGpOGX-$d(y*2u* zb?;av?e+FolwRshqSAC@7o>1IwsY0*63=C^cxr!zQp|P2BembWPmxiO%mA#RsQXCO{H1vrCYJ$%_sJ1U zcJS~wY&$2ZV%^3MtVJNe#ZQ@5lI2LV`1i#7Yqr0VFtVT`!vf3gx-(*Kv{dJg8X-d9 z2h$h{CN7qB_N9Nmc%qgVV@d3)oWU2FMA7hiQ}uRJZe8!HypMt{+O67Zx-$7wypA*j z%kP?>RBphypNPx15FruRp|Voi^J({SBTY~K_gAI^FXjWE*Grye{FT6+L%b)XMQtsy z+BE`A-Fu=@nzu*JyR=%&>$Flv8t%q%Z@c5nJ@Dtl50r45z zKus-AZTSzhKg5&P*Z+wv&#oQ9|K1&KX=`&H_dAuET6D{|rUp#Bz!|>jQJ|zU-xC}m ztyw864s^o7z?k==(lf+#(i|V_&SjpWa5glGX=g6wocxZF1yFDrks>3w>k?(uvwC}H(}NaD$52i}BsuY>Hpo`@xDn6n`yO7!KR+lo z53d(h*=K?#e_j$x!So;Lu2*yWZxhhy?Kka?@2~Ig$?tnq*ByA(eFuK^mtM1d#(L?{|4Tt31!P0opSC1YbL`~Xy2q4D?~ zaEgv6YnDT2a=GtKREfUoaMpUd%za48S9AZcnGvAw?!MTqmP zty;w4J%09!{C-f^@V*{gB!YLhy1}eT`YHzUHZCOng8nOtfOXMoTie=f_X}!omdZZ_ z$HLvjfZ z<@YNo^m*H+yd{F;dXQ6h9&#p*Av68?zzg=CC!{BRk(DGY#`w8ydi}=T8;fN80cqdM zfwFFKRSb{n5o~7&AX0f}ozk_$NDdrpxyaLC`3~F)$vSDwnyKH#z6>b|qXG>S02EGw zJ!NP9a?8bUI3rWAV}B?!8>nH309q!RkQV#Bo~lQsl_lUF@j8~wXJv_d4CDCyLqgON z%Owl@m-gEBsUBaLOnSe0k@2tipt)PRzX5h<*uNSkT^h;lU3La-Pl0rAq)Ib(HAP@i z! zgos5(8U$UyZoMl`@##^uX9 zdYVza9U^T0s>w|v!02katHd?^RIgX@bw*9eypO)PK>;brI%B}BG02ZKB__+t-rt!4 zPnSw==CG#?ib+Fa4Vl8JXBcUK^Hp_#gXrrn&{BlvVGn~E^_LM9ou}m$$Qkw;8MwleYVy3v?`Ju} z$Y0e=P!v0br#wR;fLx;)?(Tm!DtY}Z?@&j{P&1d776y@3lZ^GjP@g{*k|M;2#Fxar zS$byWT2Ofaf?xQuJ~kpC7789e(=_Qy8KbH1vhbfE#pTyfIRlv82~>d>NSxt76^-$r zQpl#uR2MzF28aaGxsXMUH_o`zC0%j*JAe-%VE`Zp^k^g^A{AiRTd6W%DF!#O?sq zQ1YcN0qmUbhzh2oaXzyTjPk{au>R%y++bg?8sD$;;8V?#P||WgHWqg{p&-5tL%{P~ zUAve5P;Peb=g+;p@aIb$Q<(u{_V)Hcizvx!VG|eJ()II;4TI=cVA?dO7OR2;nj{Qc zhZqC+aMj>2JCvPFMMcHLkl%i8vmNn=f8M?fg)^P6#OHbm;qUh=0alodWaSulE=`#T zf=j7EhN$I&=3|9^&sajzr^O*85=$dwZzD+EgyyyF3B!`e8I8Lm`+tkF_Xixgxo%|< z4C_~2_>!j@64bqdHrmhNc@W;6Ye65Zu4#Gm$jBk`Kb(9}P`Ohkk%U!aewRLDq1(!e zoxkP~$WZ1=MEBV)%5ib!{H3upDZb0kxXyC6uI?ay1=+lN)l=aiNLM z|KWr!b7!JrcQ~zeizN*}ED@`fS8;_PxtZdm`ZCGIRFDlj-NntT4cj$fJE+yzlC}xz zra~_Ew9VCd&EBrp6>jMaRI2YmG%e36Y5$k2@Hz$Z+)ABlCnfgvVlmrX_i9@|RTVJ< zY9N9Hf-D7i##-tE96jR`UlZz{X50S>Xz$H#B%7XlJmJ(icklbcpV~%c`o$G-87^p4 zqa$rAXTE}QY_ehm6xVYZjULgB1nn5tbPH373%MSHL)~shM4(}P{%tPshas1ImZl>8 z?~o-yPpUp?`jS5PP!)||jCq%KGRbsOoBQ6F!dD#vr!Ep?Ic*lwtJWK@Vb31j_I{b- zj1o4D-u=m92Gf*$O)r_-kK5bX86kWTERBTrG6q@;f|iPhB3gCZaIz=z1g;svE!!qr zMs<3;E9V&V-A_;TmXwxKT{lDbn)V%m5p4CTnoWNKBOW@4OQ@)dResv=$k9>BVy@2$$(K%ceB1x$$8F#Oy zr@N-pgOE5{lmj9Y`x(W{XU3wshI|J_sl9fa*XaHVeS|KDH)Hm24x}fa0K3?$4RNnJ zg*DIx6YF=xAcqv^Dqzuap(*2zCk2!=xRQXtx>a#3F{b%l9_VZWSvII!o6J{OL7cK& z;OQmv8#kW&o)jhZX%34R^&mMYBy4&z|07Mv$AtyiuzNjYni|GHd}7hB^k|%!_~bgB1h zA~NMhBG!#Z5h!U=KHvn?m<9YKY*ph6%!381Q3#(;_x5N=x@k)xn9uC|9w@SIbj|Od z4e;8Vyr-6Lm+xKX(x29Ykd4uw$HZ}5GBSn~st6U?rNAs?C{Rg;E&kwng+ZKAK^NaY zk$?;0+eIGiCvTx3qgJhxkT;bK?jE4g$s_67_&h<1y|dmTcXxY{v2G2v*+J#{MsUDp zV-BI14U_M(sEr9idK8kFF8vG)c332jPv*ibAfn4Q9D_Sis$4scJK6(+er^2^PnFYR zF-RoTP?{nxno?BM1>!3uRy?c21&Ykwyc1Vj3l)w<6?`_!0^sPPC?cJ-qEVK^_LBpEAJ z8R#{bG2kZP1P1;o=~#{NOkreRgID0)t(a=G80ai&-0<1x?xm0y#X59(9tg{3RarpRYcQ{cis6&82jNAoZ8PM-`f*AY-$xR^DGT)+-Awnxz}aef(O?h zGaiuh>*{Wx=e9}+HwX)EHme??YQOV?-u0v;HT@XlqL5dy&8yRq{66V3;I6RtD8TPg zL8X2k!1VeZs{{FA2Jki98VDeuFEaj;DyGP84ky*Jj@r%IFlma2DR8`3UqGwzju8`U zOM=%vCnoBTV6j}BycV5&uTIzIIC?8nD@rQQCPeoyZwwTbX4z?$F~siBNH7oyXsu{< z<|><#0fI}{8;cFII8`2x!9Bl%WGRGfW;OZ1lM4HkS0uJXFzZ5_;v#h56q_ zsg8oV@^TE2Rg|7K6VYtiUn-b?P+;8YS2`JN!k?|K>?5o=P%FLZgp?p_xclX!G{D607}>~}Bb>P`;+|7QiUz<+sJ!(#6dOzJFIXN^C&YE2B4N_b1xis$yH*u+Pg#3&b6bHLAv;UY2L(W3GF~~waaw}NZ-FUrTBvg{u*FeY<5 z=P+QAScBc!-@UiJi3C|uQR?TeLoP@9Mc$l56u5efDAWHdnFs9OGKW&$dB9%UUy9cJ z5mLj+=P~Y~Vv3%Q|^2_`bKa=c3O}-TM#4 zcgVI?0B_Sg8(0iRuk^d{R;AuaKzelZAci^H0u$z#NCLhi83IIetDI`ZkL>mw=;Da%}rn7-@5UaIq}8ogj@r8HL0=XlzM-hV$` zA99hd)Hqw)BhcMZ&B15$^%|S_&5r`VsAeMTbpYE>m`s5sf9ZA5*rooT5ae+@tI_$L^bG(!Xt8?4woZ0c-R2VNr!_-Se?|8z&^nF_ww&b8OQHmCa+SA{h zEUw6n{cEe)_DN?tl2zPFMiaQ77xLd$9U2vnp5ocxmw)d&Sl_Y=AdI8G4{}RBPZpe$ z9GqEyeDx%g-rO8v@Jj03lgS@40H^}lXjRPcNI>9rH{kl zpxeU_(-ZH?aIRz=$ylhXUL3A^(%0J`saQMY?$8vXEZ?P2I?y7dV}*OE zf25()1`V~->b}&@A)Xf@Y!DAS41ildcUGotF`G+sIkUs(DFW9Dp7{Q3a9n6c8P@b{ zd3}n$y@Fw&-QXq zh~SiauOh_T%Y3?SdN0H`*aE`^|IGTkvtyS!7PpLz_4O=!YPOtR7rM2(7#bQF(t~_) zSht6ZO~2#fefgk3_OiIiK=n*sz>}lgBq1oS4@#y2g43ku?!NatE2{uW@=M5iw`wR$waEilF4NVkxx2c#iD61 z&Jyncq&OOi7pL-LW8sPp2o26xQ*mR5M8s}Y^qJ_rQD^ErEbP32xDV~yHjo_gtRg0n z`S8E*gjy(HP5y)%m&1AOQ_ShyS|%NBc6h1j>w_s4qhWOZhIK|sQDqMfE{?xnSyg4% z^^Pcjvs%crW;5t2zPUjZVwmSf{L2;mUeP12xwlBKyh)=$9h-sex8rPf6P__(vM?52k= zX9`@?Qx8kfa-{s0K7}U=95^XCu*z<3hHW;)7@Xr|kMs2~j&-$9P2+Q@T+rewYB0)- zWe|+%JAN?+h=!CJc8)M}tgRc-^6B^M01%leR|!gd#gNES&SE?5M)Opc!Tq9wjpI2^zvv&(UFC%kv9uxAf!U^l0{SH7!o^TYr@{}Zd^?R_%nK3 zl-a+zE8mpW+h6-IaKCGNYkwKFJ5#M0vc6Cqu&&zmP60!KfgT;uxDvmV%p2pka;T&& zSP!*4i4RijhX80V12*%E7lzyo&EO3CVJU&hevQi^d%_XhKcg4Dyu8A+reB`?gw*>iLH+Iz{#lrD zKlt@!WnH0l=^~1festeyLE+=ZM@R2JdWz!hOI0W3 zlP-X-S5Ove9V3MoLWuLacg1{tSZ`e9hdd9r9Y02FHVfS)Af_JHes2>?N$>9D4BcL+ z8{b`6B$|T=#ZH%F$ZL{&loj{pNYiGwSLpT)-FI?5(KkuANCN`cd6_#J*>Y&;D7Fz=iFAUs8O60Rn~E-@t^y7{#7&%+wwW4$~d zf~`|UFayCSxR8v82k{jQEVu2e+WS-io#b#yIJHe1K}w2MeHap=g7Nd{7ibvqs8l&2 zWSby>zKd|J8#O4H3Hp1*ypxnPvXJlDR5RhxPa)@!$k-Sl8@H`HC^F|{H=m0G@ZuX{HK(!5APX$~tf$R3v@V}P&B%G7$MR#rGAo~y%$IBl5q%F4C0oeU&tF<32aGwfhyBB~ zg6tz62ZSBP4io6VY46Mn|0MH?w%SQ^<4MO=5?)ji!WB$kkkS?W1A^DLR_9^t6eViw z8&P)792;)l`Z+U1&DW8lUZI@Z*^hl=hEUcXaJ`s0tq|{VT{>z_5u9+MfIN^9Yum$S=CY zm#b4aQJ6U2Q&fUL6oteX3!alVaoF{Ld($Hu+f!kR*z7V(OC)rQnL{lV2 zzr3fPLyGuP>EV|X1xsDBb@B}U3VE761n7*tYYW7%DXoxy0RT4k^AM6~H zY-Ccff}|$KTOsn9Ez70g5#Pna53bov%x;QCD#RC9ejY4cRC57yK@7YH$@>cHBMS5V zii6Ujr>^M3`6Vm`IXS|`wV#^Tr++`3^MS|;NA?BiLy-xrcQPe^6vh3Bf`5)c3O@^okj0c=jJUp_K_=vanxR`xMR;un+tJd%MpV!ER{hN2HyTo0Dh!<6w$ z85|(X?bw`sbJ7I_Op6( zv2!e@e33opG)9uxthOX+z|$8^a&k!Y^{h~_FCSeqAht|%9saqD@#0wX&B zu*rUQh$-x(Ey?l0pk~A)687Gl>N+*mcSIvky&iMWde97hX)N(TYah1GY07 z7*f-g!PbOSZ=u1VC@}YD&=7hB({*0sd;dgxtuVfm7m#gqrx4H@Ob0P^Nwe;eXtxsYIcdi)2Z5ml7-st!%%diJw5cZY(h$wE=#KC9DR-ojVmw+91o8EHN|aJo%f3v!z53+5bmDdH0R0#82L zoKibQdIvBb-VqUOirSL=NnLvA5e!{`DU<(Ru4`a0Su3W*btRX6*=4fA2cq{pNV!ezazSv`Aj%a}y7_c6%+;N6o zfBSxdF)-|Bzo2f$x~|Ia)AoR?M~#{+cG={`dXC44nsqy#dkpBo`jqd9(*Mh3T`}!) zt5qSs&9Y{maj9Rx(Uf8A{M=d{oE9Y?*Cc+i z5h1uAH2m;KE>3hIT^p>&f)*liRjJY3?R2QoWSoy&SS z7b9Wj7!TB8Qc-4&DCa>+>s{#|4)^4e#Yj#aoTU{%w#d_`4*PaK;X{M!K4EXtF9hmn zWOHWj`Khd1yLI!LHHDm|nC2~sG*;!n0nyFaWyy`drA-Xk_k~=ueu{@k%|;yGL-$PG z?p-q>>FIucX&w>NQzIojya2)@1|<~}=xgi~zqe-lnJ}Fk)ImoD#!_y+#~%>~ws0`X z0C?y+?BCkYaQZI`MC&jN?__+hhvsXR;0Wnj732)h4H%It3Bh=oqWou^wn7dnEq^8d7$B zOqL=*H8k}1^=%Od=!u-j;{$Z%=;%RQNB^vkn(%47k1jiZ8TX|}bf3U0D>}e`4CtFz z9&W4*?r&@|vqQsq?4FGNygLkZnxmf5k#AW5ng;eaR)maa-ww!06kCY&q*-LIaAD_qxpfVJIsXqpIe?xVo+O)!)!P zMaqxh#YfIzdLlX|IV}sEL^;M-=?p);&&Y#4 zC>UDs1bVeaWqN00L&>B+nJG@OjD64~H#N_OMqI~KdbDt!A|KZ`VvGKW5}7m>G((_V z%3dX5Pt)=W^ZvrHOJ^Z(Z}L0=r*Sul8bDALd3gD{Pszcx2pg;^k?&kQBfshp1E<>> zfy}Sx{bUfc>?ZWNZ`H^Gsg}-lCMzK|NM)pIoCk|3Sb00@@8Ns- zZI0(&qS(oBEHuezW!Xo)yWc5Jo9Jgh))=(@uWbc(J~ek%%uiaB8FpmZsY zLXrUcZ5bP%xVYtfS3wHP$(2%0{c%*E@WOYEp*J7sd;~(Nd{lI~Q#tZQnSjnH2;*9; zLGpezOXfy&7B#-@Ife+|xgO}9YzmQ04ChVhx^C&%6uLFgY&=Kh{xoR$qLe=U052;~ zpPZNBh1UEj3*%=8ZGqZ9@1?fZr(bLhNZTK>9jG@(+#)Mt00zTXB%)W4w$tQ{7l^?YQ^GCRiqT#75HJ;sC zbxh~QiX_&=aSGWyiW0bZG@2Rl1$-_9;8Q1l9>>ewBn7U{g;bArvkVY`aF>?dHIUSF-&33A|dk)yBI=(f9a=sylrtj!hI`# z=xzt)YoWtU>4+(x<@QN7MGU2I7>($wIwEo3_M}LJN;)~3 zKQOTOE2ZJtY#5Pej!mjj)1bEyR3URQHTzwKXJGEfE3KOhxtOyN(sV11{Osyy>q9Oh zDiCvDZJlSRnUaO?A7H-r{Pz8el6tMIS+XzKtk~v9eD}A<^N@a#TkOKZU}E`UDv=Ww zJV0!cIErW^PrPZ8r3F+)l{oD=so8bbFS?lqP?T|awR zzf%e9Gcm5hS0(ebmd7qJJCkRk2N>No1}N10jPq(wMrWK?Iv^F6-AVr@ayi%7fD7_5 zn2AP+5#C3AEZg!_Ri6E!cXf5MzXJ9ubgmMmP&oUDJt) zMA+g1{^qvpg3Mx8Pt%*r7RoY&f4U%j#h!Q-+CNy+ zeGJ~;t*+_Tbt-XUDOK}co@>eE+2SKem}s1$!)*{mD>80D9FXaC%+d_m5_bB%{qrGk z*n#}rZmHLJ0~`{^3xR&5k_i(!CxlxCQk{RsxzP||35bL1O5I_4(v)NbQEd?)hj`$6 zUz3N#m#fV7n$k>P%6FX$@Keupg5nCQJXA7FK#h1-;Qt!yXH3>m?-dsw1m?LT2@sk^ z&xxIsd~JDJ6pa{Y2S5}wjJM=LAjhjEzv-3i%>Kb)&g+ARiqZh z$6p_mk2F09{E#HiEcY!CQR#dmU@`LFg8!vnup63JIQ%hSBpe2gTS_J|-|yyDmV8X5 z3y5M2w4uvAL5qTTaHv%fQiMs#F@1TE5^F^&%a0d)Q#-Sw2Qoig>F^v?>sM^W`~}kZu(piNw`pL zwe5S!T?O0e{4zWH*rRcefXlh3>1s~9+HVKF#)L*eHuRLR>1RR0Ddk?@m_K!kV~xS< zo;bQ=$nHd@G*okMO%?m5<>$p7{Gd|lc)fOOA<8rTj%~{1GTEmj#1~^Qct66|<&1}< zQL(yfJp1qRY zX*8Azp+Y5&3u9>Pntpzfd`z1=2kcl8w8vnFkQ#w=Ey%;h*8f*1DRF{f%U#ZBhn@d8 zl79_%l1QambR1fIgeEgN%cM>j#C?*a-O-}!UhZ--P7dM7yAKH3zqg;2<17rrAJ%-P z_P<1yfT;`e{IKaXSs<5m82-(gOTVWEG{;mB6hB*htlxBaj9(^0$ph;Bl1WxXci6l! zBPNnd0R=S?%ZKLUW}sx=#6N)xn_ZYMW<01Kf!|rwah}0qbf7UcuT?7)P?WTHGJL$! zG4BAWgb15)HbwGbh>0fn^ePGBO?ua~JFVP*75;r7YQhLXW(jm?n328;Sj2hTr(!XK ztH~Lh5Yja`g^@pBVzq)X(a{CWWEGrir=dytSB&%L7>cV9f`U9x{GuW|`57fo5hap) zQ(e;`VMvUi_3&+(eucpb;ufp$s7{UJPfB>`sy2JXh%O_JG;JhX`aYWoiMDo9RLK0Ne zf}0EE5P9_t&KvM5IB1&^6HCQfhR+UMea#<6=zv4aeSoOR7+zZcaA&x7z`Y9?&q)x( zw0ihX)9vVKy`#30oOE4h?tA)~O`R(jnSu7#U;_Y5!{IotdX)`UHlJ~;d?D%xWoP!@ zy&l|r=Lyp^6!X!b1v=}jNCNdmz@Ix!(iuN}1hccVv2k%&%#(6ycq>Xe`S&oR#$2;s z>y5%4BU?=#)#`wF*S*!yy;8@}-4C3};2Sa;(BS*A=3?(tm0v$^$>*tOKKr;$E;r|c zo# z05qD{k1gi$h3>Jk!xOi+2P0Un!kQru>ze>)7Hk%>x*l8@)o#D zu!WgRQTQU-%}vg225MZS4_Ec$s^g$=0BR^&9XpMmTd6zikC|QYKSvUf@88t$?)e`_ z=N`}W|3~o=HJ36{O(m_;*AUGmCZUm-`&>dUA%rBCx#gB7(ny+1%q=##O}XY;!o-+r zA>@`=ETrY@&hP#G^=I{Xlx?5)-mmjI=Xqj9bP_**FU_1hMTk=xxNy$1oYP1Sur6^> za08*kTNPztZ5Tl$#YUskwlqa32A6J`t6+KmMOvvZ%B5rT4b7-8GGx4%L`rq|4R__Y zHt!)`*v#I(Q(tzQpM42dHM#(Mietjn?7K+V%WlL$9QJ;uKwRtxiGK)nCeylnCm|j2 zuN34?+T2NpeTJVS@Wt>atkQfh+U(h{qohawJH)Q$O4tj0I}kAjnA>Q}i)TNj){ z0#yer9vIyYTXOQonAS6n@^!zwLsgyXEX@|`C8Lu@nLYY!_guBw^1 z;h5l$Ge*{Is)trWi}aAzW*!wufs0&rKUedRyhFnFosfI?3TvsP#kx_LuC;c60`y&T zFPr-*%o!H%QdoOR+?6;!%s~iJ#GxyTPUqbKtBDRa2&=(lI(MCyudXRcsnLFr_-SBM z&u7EBXZ&8w3&L%aBh9;WK#%L+U9RjmGxKGI9Sgv&iquc+{@y+>47JT^T4^f7J@jGl z@I&2WA108L=?ywJYYsN$gp@g+n-a!R3H}QC`PXwqEP!_79dw$~alC<4Mf$V0_O+%@ zZ6fMoAB)R+R-Nv|5rpxQmkXo@0eU$=pJGK!iEThh!fq<+VFdXe2Q$%2m>#gLfYM9E zkuxy2g-UGS^D_|h52_&rUt-BZt_pk*ndTolDV47c25{I)`Hz3_dDi!kF}Q-nn$I(( z#)T&%UE)Z*sy{5Tc3A3Doy%AQg&8@*IZ6$!ETzgfY@p<#=kpZ8qp)1XX@`mgq&xG9 zyEPavn(BOVMdIJ?{IwLmj4VkL1fi_8u-uHHO6LW?VDwZ~IddI5UheT!&=2_AWDi+l z(IL#m;8DNU$a~=HyldLyE0D(O+h3vc=)7TeZ&JRf^o-0zXt;Nb-MNYuK?s6wT;D$B zGlVaOYC0tnPY5aEy6-#CEe_PWg`MA<6Tm@fF1WLFSCRKbe_G z`kZOwWoNk6#qx6UkX{L{*FJHTjcF?{{kHdYF4A{&MOP$YfLB$qdT=&=V(a(Z@@cm( zHH}_D_m3asO<}FC1+BR*%x}38Z+o@JeZYpq?R(rNvPRFtDlWfD&eR$C*-3_k*DGyrYL zzW*#;V%iwl*%11)6jW;d=Ud0=3w)fO64y;M&(c>L30*Qo;_e1ktI@v<3sv6NqgYk? zIC|PBHHueZDLnb7W&`N3EzLZ3nBAbevar1YU4|qU0WU78+FzgpZM*1P(Sz*NSaKzL z6;3pV1y)Z)udPk3Z#m2)M^5a4u{Ax6k^Xeo_W&+>*=gZ>xn%L{qVn1k5BFnU*IjTF z6*}shRrd)it>-hgu4xML=$qJrq9T&RYqbQ(ySSetHU_2wVW|F(VOh)LO?loVVBRSB4t#j)qCP3^iK!J}fAqp*d{v7DUH?pYy&0nMs z9(mzqd~RyIn6=K!Z`lK8UNM@MR8a#9++W?UH<`2if-b~sA26Lf zw|sGeq*Qp=0vsnC->#hgCw^jkD%)=&a${{7Y?ryAXpw9X!r6e~1QF+ZVsJ<+RuKMy zx#>#=zh;N$GFP*T;epKE~Di0yMR>!H#rVYPTPtrK7hq zcK-!-&R@Yz>~`*qW^iOk^;=P_?5R6Tcc2LDT%Fhb6tKNlKdyM>o>pEnFd+$V-_^`3 z=2B>to4&hiYwCU6xd12XBh=t(%c_?ClJ5=T!oQn+9!7#HSHq-FPVKA=PVeAh*AZyZ*hUPeSngl0e~5ekc4{N|lZUPL=wJI;Z{^ zHCS_W>DxMX^DrSL@wN-8h)ne=g|{$f$jUZ5poh->Pepa@>E;(V1ipP6zg?gGAO4r6 z;_Z84QL2%)A8|Z6p8Q(&S3wHH-54qPLGgBuF?>tXN61`YFV*~}nsn5xcsm#fQ*5ugK_(XqopLB5 zy%%o@OmiwA<;Qw~6@q1=S${v;6M4o{lU-hfvxi~kH{EiMpocW?gKpsCqB*swE0C9# ztGw@I`p}h8eF?H>mm=Ceue5y5fq2M2B0{E7Pv0bt zI(V^VVU9hP{!E_xjoL4(C*=}KVlD)z35$rV0^^>2*?)q87F`@ItvdvAFe{w+eP`F2 z8&}^(t^J;}WXcfkj$hBp+~(`CXx zW_kkqbV?LXW1to9yN*N$ZNJwr&L@0%BJL(X^P@ZsXO5LPn7|`C<(_N-X%;zj*2E`0 zVuB1h97Dg|29N8GpQxc}eo->Ybz-sZI_4*L>aON-iMWC#@Y#fM=YsM{u025FbIN`Q znQ@KuzQ0s7zuNx>svlINZDoOdcQXyXcBh%Fw zSy|?;*$E5OXGbvK(5hiozM#%+V2<~N)t{Z6agE3P(Ob)E6A|3SA!qF!&hA#;IJ(4| zr^;T6Q!SW+y0`SejG%L0tk4W4D#)2HuVs86R6S3Ziz#jOvDUa;i41Ev^K0==niawp ztAu<*9Q)Ws`P;8MM6dE% zPL&f~2yupTGOqN^K0Pa(&C<|ZP!=`JZkAk)bFCg;SZ%8FvIIV|uLGvsUk_GMR+9E^ zSqmQA`Kdz%=lfWit4VXgO^!Js1N^1nflEYR($Z37*b4`b zlbEJJJe#Y$hw%R*90l(F&7xuiN*Yb%V>0mICn<1ClU#_Mu!`$Yg!L0$C*B|4l;I^m%x`WLDYeX@+VoLTN^2;F z_G+ZDfOg=m{dl>f&y@lK7FmNbar0)I@6j%U71ExTJ6iS(gsYncwsZ<1NLFf_k`Utb z%w6@w3{qM67g^+J-62dDyvwUR6v%MJP09W?g10LC0;vR<3ZGVhok2{mufJ*y|2lU2 zwfF}@04>dxAeoo9Gu@HSLvqxQWIGD#^D$?zTs-cS*JG7S!=s$2dYtuL|X# z9edRy+A{lAE)7R3=)yq@tW~uH5P`iXp7KKV<8DChynrQcdhUaasy(;#2^1x(UlD?! z<8-ig0m-N^Her(diTX(d^n`6nO5fGc+WkGYy1S6KE9Ck*$^->@A(krMJh2qLRkyn{ z>KDEKcP^+kD6*ag)1|H+z_)dEK6V3&Q?CDD$9?(BqgVLOb9n(*xkA#g)NzUBG=0En#}Yp9B3b-S!N0I z9UNZ_jvUg`maI)3D;vW6A-nPKBb%`sKeY@`+^TpR5q4a$aCLmR)+foPA~4u#NTV0k zf}#RfY=z7oWZ316IWD`n>#EPC8DoJG5-j%e{l2KH(91C-CD?aenK%>^S+TQEv1l)Z zy`^F@7>?}@Zdx9faQoF5?rDi7`7^^tYt_{1fs)wbps>5Mv>T=2xOm4e^G7kOv&%f7 z`>U^Y_xyxBL1& z*3w_PJN2{p;_em@Qmm9nu3?SL4X^a)4%TTnhM4g0v~{BvlDB*_IUeK@8zP zR0ZJ`?EQK~saPA;3yK(|aW#k5(#WR^tOQM@EwQs?rCIX-0bH-f7Dnp!Kzc(pPZax; zGx$+H=ZvP1Z1^5X3F{0d2K65OEcI9|XN1PNJ_eeBU>Gcu_HRIgJjeRmFWnRj7U@dl zYFGsvYd+J`L^tg z8gT+q0_qZLgqXium?-J3_l)F#>>IYWb1*Rg_Oq-d^9XQTm66ELq#|_KVOXSWN`v^> zHy{P1R5tWbNmjjb+!o_Cv7)Bd1hm@yCPeOb6r=Ny>yZdZPg=?;YMDtcPT>uTu0l1X z;IO32L-oPcHYZVc2xi>Z&3>7E&!7za8)XUuXJFIfRC0X zj+rc18Zt7Wmnau{kqc6&pRl0bh4=~WXIwy-B}LvvIiwxM8p=#In<2sX(u`M@ma{@e zmh834$OP%mGZYz$46=#llU3*aa|jRWs7lLgtxJF@6^Hd@*`N(uCZwM9GgVDk<~`cL@v2!t>mSyV+E@gfGT6l8ayRp$z}{lvPFZbH zoKtd_^Th53%Q-4MJp9iE*n3F+g>NI7V9H+2mWn1}Pv*lgce^DRnU#`4kkdp#d&zcU z3NOFXY-|)h4mOM=O1mA#;d=08fd9t^Bjt;b@ybCOL&K=skmuZ?i38bxVy`En4W(;w z(w*XA(BY_P*XfzK?yrGj?un*+1(n$WZ4U(5)|j zoccXT1BX4vQegdEUk<#K*7}~>lUB*2sG)n(mdET`L)YWfR&y+EY~XPKXC%ULp?gSs zpvr3MQEj!Zp41lps3A7tX`sash9gXDVJ*m$*L;siGqA2 zzsINMpONm6VFg#fa7GfbLZ}P+7uob>P2u)e5+j2^Mw=O_U;y!F{dW+V3UV22mcQLK z1nt*r5O@!~*RQTEnTJaxy&em$BoRNkFsYv$M-LO)*EN$LPkJ{^EYxMs#~C1#r#sNf zCftny-Tg{bQW!@cYYN8K2r3CwO@Jdn+B()+@UlF9n?~wEJ1Z0h9*B{lV1&z7INr#L z*ErUukXWn>^(p46kd%N02U!U*6KnLwB&afQ}54q;+S-rGoc5kNQjFAhgn|&YU zkV4;Ghw!zmW_8bEfBT9jNSn*iLwH^7=CJLL!M}+^_?z;9JTuPqC_qkMaW_5c^aBr3TbrOQBytPb8qy2IRPjRG<^Dk`+A++TB> zt!4v8Yl=>T&W~)DK=tEVQCkUU3vA0Opd*f{I5q=0ky14H5Qf{F3%`p1OI$Z12ds_6 zF~5R0NuRl_z&@u22YokV7O8O?(~F)t>MdygHQEE}i7A1JiT0$8GSrW>dz@Cks?*U- zlPe$zT}Cb|Y{1mrY7M_G23M>)=eBNbaQn$Da_Z+lq3Sz-xSP}5PuKHai_0MQ%IF{; zqqLgC*R}z8ZDZwFPd?BO^^7SxN$)y{y+bVGrJFwe4d6>$;lbXw z`nYeY!st^!z`Aip?p(Zm9&~`M1=n}-qKdu9(Dc@Y4dxG6hMfjGST{O4qEf5@6K2&!RT>@_i}A8iqtng&Ry{(tCT#wY0Xw!!&yn8WScm)O1b zoN~wV6$A|Xut3eEU4j#@HAD!dpJ%)0 z1$yYN$`CEy%Sc|kr;*2Zq^&8;i2&n=65hQ5;X67J938b4CEg_RQ(g)Qyx{-Do_C_R zb48VB9bAQrS&xK7U4LtZFS$Y?=KG+BoU<{r!T|?3Rf8R0HOLjn(ORZ^@1Vxg15bj* z3z91-Jr>Min~Muge&YdjT%weAMELGbWP*SoI+sqaQlTzy&%bYMGcRCzbg@4T=~d(fLdB}7$lay4qW_Dio)Gh;5!(Lc&};3 zW`O5JDpo8J+AU#?eT8*-=Kv7tI84i`>J9ng+eg9Jem>)Mt3pO2!Vq;)YThCf_ZbUt zc>nDvCaw+aG`=AY+o>rkQ@%8uqI(51lWtE_d~=ZtF^V?$(W0 z1KCXEZ3ebOT)@Q^cVBqjd%y%~%mS(N+8oqTs5{9O2~G^LlvK+T#gAd%7Z;h8oS=Uh zjlC>rf`r@qUidlkY1f{x=NawG9q4I=4!m7)Sa@Lc>U78ia5Q#qx;W0708zSe?wIAC zK1&|H4lmjT=(RJ7Fzmdu^N`!2?yE2vijI>K+Q;&t z^gqIgy9zjC^y&4Z2Oq%QWcVY07qhMy%JKsrgv;@kC*$$MBYp4gU+&376_ahLr1$6l z+H<@>MyS%crIgbncp1r`Hi)Z>^;u%6B%!CMOHrHIew6S1@zXN_v8VVlwD`Qf1Z&^k zSPW?RHa>p-?61b#YS*!b$-_U>10^sAM0NiXU&FpPeuY0HxxyQpHnXWHG~m8Wug&u} zpL$aHT!?h-v7vO{47q}fpp%uR$&_EZFF74}{9qXS+f2lA8#t`yJui8BwyZwIB?|DQ zdui=s*JT5PL+e80EI!1qW^Oa8mH5Q@gmz@OCLl0OQ|2}|o32UB-M&^q^cs=;uEHGL zh;bpaqd|!pEp2f#s(Lt}O@e~#N{Qv3fOUa%$5K34IYkQbyBbfcq#O0vmgBLq7~2G| zTo1OSj#pM4sixP68TR#W_JR}kIcnn-JtgO{fLxXg@aRrI>VUoYJTGmZ9PHQ+bsv(r zv9iV@D42Po&VxcS zUWgmmVvG@b7{k%J$RVF|Zo&ZIcNovJ^TOkEqbTk!y$m0cla8;oe+&Wy_cK$uiyFru zK#)NK_lZy`O%^raa7kLdt5M6Mv8sXtlal%3j&N%}{0}oM;l9H^5{R_~^g(RH4Khj( zcRCjEmdmM~yPxuFI$neyJM`Zc5BQqyg>NYJyLCe;@UdCa zTg?;AA&l=?bp=HDIrx$6zB=FQOS-~O4R{hhZK_J&N(qpN_}w-%(QHFS$y0vD_GJ?1 zh=csqj|-ff`H$#8(|aGmR1K8RGQ%eD*f}a{u(8jB0hcC(FAcfVx)V?YqKrD5Xvs^6 zf$>@jN-OsSk5(v!JI#jC<}^KOr-B;(w$E-(Y^f`9W`h23s1PayY5y3=f=@fGdDN9?5ZJ^mzOI-{L>VF z4LLg;UUG)VK>GBLzB}BI-QTObYyG=5!@jP>I5Di3i@D%=oVpu&CX<)P3VZAHrir5Z zzW3?w2&9-PltN^M^-YIi$8$`JwRbk(N-1Ulma5v)I1EQ;Kojf*OugEUV@QotgB9`9 z-RpOoMq9(U=8@J*%h3-SEWVvIm#4wE~k)yvcsZUr+dzXaL#cvCm z9+iBZ**ZG$?F~LsOY179JZC5jBhS$6=Q;>d%>@yZFCz}lWLWtZ#{f!zc!%8Vj=RfV zenIJ)@eGcruG1T-A4d;6zO53+ls<1sr7sH)D z^F-pq>c351+owQj}BHJ^TDHfirfVkco*70fc024YxA13U)0J` zX5yEkn;V+4*>e#`yBKHm9>kqE@Q*@Qte2+qe8Dr(Vs`;#8ac3SwCSP_alDpVl8L2Q zE4+jIHZ9J2RMxr>DVZ23>2<1T{!L>d&Swl@TxLKds{tazs9Kv0NFrQ?;cUi&Y5sh^ zNONz3W7@mHa#*jelp{se?b|)6SihtOAeu~!fi~HH9kr9eCp{W1nS0H%3($)mndjm( z96?q-QX5(@pY2DSb=Z#_plo zHk+*oGQ)`|3OP47cfQ{oYl^%+q{>cpJxf68x715%NB(6#g#DV&F~eX_n!|8CT&4I) zOXL3nZB4C~ST@=F-?N^*@PXA!Zl~zY;dz$$$A=*wBqT@X%kKhLAlzFmE!A;|gd4p8 zuGflE#Pj`(M$`6>c*i4_t;9g09OBi)QW7e*TEkih5&Cv;^Xm4NU;XIj=0${iON9V( z))1lPQ#ew0S-xX)G9pp~bl)^WtYlA6hDvQp%}tPY15GnmgvhR?;oBQ;5TKu_e1x2e zi{N>G2&p&4i^_Pz5NR@(o>}M1X(+D|!$qu)sf{lD3vw8& zE;E$$yW2Ii&0HGCd&iF_QG&<7H1pudz&S>0pa;1aF57tGHJ@KbScz_e{j7k=z)weZ zfOWpbtDAb5cbn!DW|T8JC8%7IlAEySx6PQ$CYmBY_=n;1O;b6=#5xnpiFb|sx03)~ z#|t%mUYL|h+Eb{947_*m-~*7Y-QH!W8=^Ftcy$s{1RV(qah8bi7Sj$o;CIBQH z;e0aNNBPjR!N#Q~+3zol21%c10+19)!`Ts^ik3jG(&nVZLG%m^)Am`Vk)wh2`E$x@ z;Ry=hx>RK?xjn z{5`wAa(7*7*T3mlbN$eR=e)%#otgIqnS!ou@&t2?)n;8_YaN>ugL@AwDMmvoc7eN`6jJLE(J#>;ATWN1E8zL+B$aG9LTQW2`3$xjN&d1!7 zis<50q4a zuKt$XAJ0XuK?vV|ts*HP%hce2BO0G)>Jd>LeCBn!yBiCmF4dQu6~JFB?ka|3dAuvG zzU%6`%CtM=ISLzeKfwb!aYI5JN0}^JNZR3%58HPFcQ>@wg3xBMjI3_4Kp^+~G{8tG zndf4*8DSu7jJMAwi`{ri*8Vn;SIn4+m?9lCm6(>O4pE@v^a{S%vP<43czVYcE9{F(0N&!KE6;&>_C1DCq2_<_9%iMYl@=uqECy@$DGd4gTO10h4`-q zLMJH%)LEpo4h&RXUDfVC!sad3oN?ezrr{|0t2}`=xCKAjs-gpDxvw=Lj)Vfaf z9dRDt-T1qu2Q@&*h~j-8CHPJ_zm4>is642bD$0*YcvJpZ97y+I%_Y+i$`*Y*nDtl= zuGk4RSDzlY+vl_2*_~{Ok#RBph|8ybg(ByHxfkG~us7(LffpWTq5>5*Xr8@1AhaGQ zj=Z8TQ}7OMlz@8`_0x^0pe_}`e61DKHO;LW@jp>O@)1w4!Tbr!>H?atG^H0#F$Xfk zd8>N67I{I$Lzf5qsMEekiq$deCU2%4a~LfGL3A+Gd1C$p^hsQ==tIZ98X2&_G`0GU z1_REg7gON1RnPY-`LqM&HKn<)!g57 zg@;k^NWe-NAGH-hl8M7Vw`ejxZ6QzIHhxXK8@!*X(L53r*bF9Vk8Gq1bX(*|}KC zdCX-9f8_p=B%4;|pUO+ezPLcQ*u0CSh5tbCmvcnfVLK~BUxUXE$qCzGYlpon`czYk zQ$W6~0e2U^m;O@xGc7plA5e_}AXVDhBLlN!g7`$_=9q}~R*Gm|Yed*;A7^y*B4^7v zdSeAdMV~zDD;ZBgW9%}nVEscjbsBEk3c~0g?!2A2Q$G}(CZN#uzS)p;T&Cq2!ea0xx7%4=UPsaYR64b^wlrwx(1Ar1Pvd0$ zhdc_eI3Xp4lxD7z13LrZET4*FZn#7?!~HoMZ>QUMD$+8Mxt`dgn)C>j+t23C7{>rm zTYoibcBP$D=Zh>n#6=LA-};3#okPw9X}Qs_`99_^=SpX|JAU)dt{bpF$Iwg$jRwGN zIpzet&ksCD+h>YNrScUXmqj7}G3Dj(8o9e0LvM3UJt^P-GqlzP+9vPcvi$m<7$5AR zb0ie@`ODLukdtY3`d*iBkVW6iM#X%x7eu-_NL$0wzC;dzxPfvTyqbAHclY1E>xi}W z2$1KX2q2OG)`gU^3X*QoyIOAJchoRYiWSr)m|5YI&@l4)NpT@8plR%H(s$xeMfPeg z3@2g0Qm1CZUU~bMrFOzu!ROrMskbEfWyoS!^kLyv4IOARS%3gV>b;RYQ`s=;a2}y& zvQg1zBhB^bus@1~^%%sZ*j@LoI0torQB?QbZQC1SXghC{XsO?$8lrE2pfIP!EmNz8 z|K?l>ZyEy=Q0)cO*^`rzFK;R_`Q7B%8%Fo&aM!0e9KBhD+0eSQs&)_aE?!3_3DH3+ zGItyW&Ol!`C`>*yE?L8KAzUK|q`w+yK5^nh zB_4m$j@tR=SFbyyM*T0|6ScN;4`cA#N$!yDOZDJ3iF=}V`Id^q0ytQKLztVrgCk_i zg!{O`m9ePm86pKfh+koP^paBh&I46?zCN0JjQwjSAS^swBMc`Q2zJcMLsyO!0u}|L~!~|t)mMepc?f#mJY0ljXnTNkpu)EGzFu6 zRgWH)kbCM2BP#_X_Iead$=Z2}obOcsQ(8BBQFN~?r?+81s8ov;TGH)7rVtv6-~tH# z`$9BQa({$5ucPwNgYIj?N zxxrAM6-Q5J-hG}SsI<1WT)66loFP{pzYdP!o_hf5oiH8`6LT@fT10*t94-}I`#q(b z<3B)+`1&u_a0$~Qg7)k3C*dMM>elb=y{iI}{l)VdM;|(QAB^0ZV>^S5i;9Ua3m{$> zg6rjvqv{KDR7zGCN0wX`3j!mwOR$_E4vl zzyZ_idHG~qiHknry#Bf>S0Lr4e402vU;d5mOrEE(9%PyQ!;QZvI`nDe&_Hrq(_~9K zuv^E1!t7{(2CW?&{C$A=-0BjKl1cR{(03W@r_%l^m}7Hl+v2ffpq0!^R{~s17*&FZ z%_swjz~I`#P9owVcP2n9C%_nI2pW>8;Z0cNO61i;-4e(s<=v7l zo|K4R2=>XMCNBk;LinDZbY;mfkvcs2hk8O<9R}SgL<3~fXI#MhThO+R8x?G#Dk!#m zfiIIivkwZ=w;hE>py4IT+lHjqKb_L(i{{xO8o~B zdV;0`5FJwsIcbWf(H66*TgvSe1KjUWsvku?` z4vwO?6p{h67R4X4+b1&7H3?iAmu`e{H+DSg4N+&nmjLGH$Nj#ez!OZ#Y^l#+tb?+% zuG7cOx{6t>JJq|hE2EoD7kMQtVXB3a2v!IK^eU0&t_0kaMv#v3^#ne17DpQzQ| zS=-|Zt|%gr+8w0YJA{;AN2#}(&h)v(oqzLo*YhrnE-e2BSf|~YWrp&RQk~>h*Xpkx zaGYs|ze7gny?*|s&8V+`$;+7!92YdrbSGLs{;GQTR-svOm$w+e8sqFhjQ+P^>CJNS zZ+Gf(NTxIiJMTcB6N}ybJ2maz3>2=}*$V`9#p=MrMy3{n?1aFxgvr?Jo^rE=C;D&+ zvfT>=8PL>5uIj6M5h}5d@AH0LJxxdEU?Ygh)5e4!z?3??Ra}}D>KdIueYP~V`@Aq4 zc*oF(I>K_z*q|7qgUD!8!b5->?!hMBlmx z;z5qSP*b1n*@bp6%ii(3b3zJ$cs9gZ+{x=dUwsUFUS7>o312ee(aR?VD3sTtz#2sl z*^Q)-T0|t23y%xV-J>PR#`O1JyP1b8RcKtRMOuOFwE+>emVz6X$*h+>THZUp3WH4fje2>z@m?bYH0+^}^p7o<& zEUW=7@YfTg(cppmG13CvfWHvyU9nvsy;*M+w@Ky!;Uv;$6%Y8{j#JhH+lEbVSnKAm zz=j%t#Cg%eJu%1X(A+cJ3o9KYav#TG5!_HV=CaxF=k`Yj1L! zt9eOVyDL+!G+SxBuCI~ycI!`>m9^OOGLvWKFKRuA3JeXC=|&o;KdT+NT?g)uwDdn= zPNv0fst->i+F#lIU(6ss1@j5yyx&GHDb@AmJ8damv8v%;BQ7_{l27&_IsgoOPinwf zlGP)nlfXm_MXlkVrwzsmv5r-ULpm}~36NsEg$qdj)Y=0mdru#m;z=@YP^H2O%Xuht z<<<`}k^+TMNM6F8^7{Sg2k}iUC(UJC_~4ZGU`yQZalnC^UJh07NEvU9l;ZEN4@6;TF0^pOD+cvLV(;3oQ|A+`j^%wTlJ$4KM^j)&=i?Ms$`4if+iX2WEq zMRcP>Lh1oht|k1C>kE=~d0H*AX=SA>zRoM%eSK}+-?1EZj%{H0VOSSQw&<~)$RgU7 z)$4%$3;FlxE{+yBz0<=J4&`^YqOc1+9NlaRLYnL3=q?jqg24U&Y6lcIUJM z^m{^wg4=-uV)ge7V;TfvlobCBb+}e$a{7%q7U6m={}cVMO#~cBea0$g6fNb>U{7%=MXpvT_!9WSXr_R%dZC}uGX&- z+vb7-I4^)c(jXm@5(O`^DV}c7M{lJ%(in3QvAF;4wD?Fl3g09FRcOLo=T2MrP)K`Ef?=^D#oKW(%udNe|M_PO zwsuVT&_G~l)Xvsgg6+l~SnRdKzAMVo#$;5gte@h71RXsPi@A83^kP^>croX}AZWn<&ol>&f{u_m)St5!c3olvr z0uNRqDj5*>BrSyj++t~y>g9&zRsjJQCFfoQW1=yMIN9`H&1L0O8t31p5a5m}$uc)U zu8#Bn*RQ1B7x-T0r}ec*wlOf|%|3G3tg8ayPOp+;Je%$AI0}d%G)#c#rxE^l=jns^ z+Cg5OA-wNst9v zp#^Eh*|-(Z7-Bw940#8rneJzbtSrOgLR3ty>$cwiUH0=KMmWkTos@bZynbTU(U~S~s-HOoY zXAv_S#5c1q9Z40s55)>VBqKf`ZMx1MA;VHFXN%cg$Dd8g&os*$b&Ce5*xM*&h6LN) zM&8Ww&`0d+GPJ-3m_~0+0pa@^=e9_3``llODn{^dGo?s{I=oO9nU@jPND;9F$J%LkrQg}^xUJw7g6!E%!RxJJqqKmO@N+L$ zgz{$_o%&pC6^-I}bZV1D+JA%VwCG7-Wd1-Y=XmE-q}Bmk*1?200fY>{%%KNo;$GUF zV~9m<|6XRX`zM+=sXp0tUbF5z`LT}&Rrpg;_Sk18C4#%&t(X}yxTnOz z95|I>GE~y9=2hklk))Hf+329~-}EvU^idy09UZ)THX?!Y1NXU#$>t)@mftzkq_L!n z9k&1HP3X$TA$A1V-Ayg+Xm$ObSvSHO8l<`I?H~tY4WX^0enRP_z?QmaSzwv%S{}SD zNS54aF) z+8_ziy1Slnt@6Z%3;bb{$bBUM#3?Lt7@OA(jSA15eFPE3;C&eq#l&I!a<%f=!}6$3 zShGc`-*vVdG7MfvX3>rsI+J=W?+9E}HV$f;k$c~y)A?=I9&oJx0&T35BHiTH>-=zrYJI5v=S z#;vBuhv$!q2`Sb4=@ogj`_i6*qP^g8Bay(S24uAZTNY1Z=`?f=QJGgyc8wE=}=owWr<|f>Afg&3_SZpp+%T#z=VYSv{ zLQ8S@LFDa5AH;x6(bD2#BkdsOi~1o9g54YUb+K;y{4Gimj$+kgMOD1D1lXN)Y^}}Zf9Ah$I3|ym`kUn>PY;$>Bk0IH87u+>Z zB(HESi@Q|4?4eJU>2L@(JZu0W$h2L-1ep|)AXpHtD3iF4cf?*s=&^z@J?57;P(p~> z!TOMTz^NgTKJNolXAD+$I8#im?Vi3@!ev?Mv?8MaNsORE==~Q-C9P{TP6mDkXCN)% zj+JZ6gryp(nw&=H2)GcN+p1Z?{EZ?( zf@Oa!SWS)im@&2P$_~phW70{H##S%;?(A-?_?jZVO6waLJZjShk=Z22As-5q1cL-( zddmV?N3R_#3(pmc{Y%k^wB9TB0ewWC;A9=OTYtB(&H;bh_TsBQ)-JY5n|#0L9rj|N zY;jTKy{9PN=B;bCzMi#RmCxvnotGq^G%bKhz$)J(=V~BR`9*4$P_AW(S{BHt4IfCb ztqzvCKcDH(1aK07QAk%}#@!vX#DM0-k6?`_y#mxh=M#BUFKHqMK^CewiV;k~VS|C) zC*~tA5nso&qmVuUssRebs19N(IDaCVZLE-V2CUv;0kzC~@e;aF<_P>W4EGA6 zgMD4~V6?9684UB%JT*W=xE=WELI~pale^rB-AxecDUbpz`>er+H#8qCK$dM@tK9iF z(|EhEilJ+)9M`iPaSn6DKIe2O_N&u;`0ndMYD&r9xB z=W}mGgzuc<^BYaZ$x<-#{buosPs}p7asE9Gw~W=2xs7ieZt=fA-VaJ513|I!l1+xI zM=VlWAco{!EdgrR8b+5a!Ce9#*n^xASIb^lir(GC+g0FkCGhQ;fD>JY*zuaN^MB>8 z2PV^B;)-Cgfy4N|sdaTHh4IZ+sVMIBlX|s#J_}t>q%!aVD5_Oas$)oDt$xQm6P?%J z)xNwQBu~*PK>owD8^VKUo%lKHQzNY|iis@5mV}$mkS4P(62|X&Ix-CfFUznU^q$lG z!=tpm*gO!EErKFk6o_0poF1xL2e(9q-RFIbI3d#2S&4x%!&q=+XK<4{F(;WV`?^fc zf}V-9_mXtINdn4k`_vm6XPSMLmDvig#-qM!C_4BpdG5OASz~^KZuk4H3*I${Q!`SF z#66VD%N#Rm#Upn9Kr?mq<$snJlJ3-H(^Uw#-l1lwUnFz=zf8_JXk_Rl%if2HO3Z+L zG^=ThMtx?+BDvlq53%0elhS=D^@_USeT^h)38qQPO;&BJ@{%Rbw}lYwm|D^u<7u+? zaVD!7_MT7#ejZ!X63%8T=glP26LLzOW$(|VkjPIGe#&}%-IVPIEru-(4Zfp$QA4iW zs!#vme@{2}BLmyw@=34BpWU;-@;syS)c238K7cZgE7fo=NiHNm!)4*HF$M^Z1rSMz zk1To3mCrm&cfBQ@E*87z9d)iKmEm2%bvUK~_q>9{hlEVj=1e+#$f`+^@cb`SehMnj z>1_ZOQ3|P(mSEt#)g*z^$OyplHP($Stn%M@?!o_#kN_-VR#2QEmC%rU^E4%2T#<0T zXN|ffbZ~R~g4@WQdh^89ouHngEI#-_96@|bC8z&tDz|L2$#+8OY=6ew-G&IXWNX9@ zzJ4wwJ6Eh)a^q*a<~fj6X*Qk@GO+dvh<8H{Zk2wR6da*=BvzT=0HI^5az5 zj!B|(!BJ;P*`o73PCIoF-Is_OCi3YB9TP&J`EuIH>_^o08AF%NcKL ze2?k=M%)EGbZoLEh%uATvr}-qC$-jCbKto%HIx-dAUJiuN6xV3&hT}Wsv!wtpF_46 z`u6j{i{~O*)sGZ?ZnTIE&_^T8Pac$EG}`(g4<&g?AXqA$q|$HBKEOt&)!Y~z75)bS zdq0T3ly_F%neYQpRr{!WWEh&dNPZ!=7qgE#6514r{)PM9i&71Bf2XO}(t=+HAHT|c zGno4E1*Kf4-<~p0dSEW7m~?H1k_adYC_3IQQrH@r5@0_VM!tGZy5JoeZ$66`#cK37 z)>+$!F9AOHcKzGUQE*b)Z!Ya_Mn{K5{o1q%4*E{^!Q)3vxgBvoH-RPD726g^ zIyn2QoZGg?yf8cJXWD11mB$5ZYzGN^kUHZk%UF$5#t!t?kOPsG-D@mmYo2(bDT8J<9>P8P5+kjxB_Yn)J*86=_{5oUQH#(Mxn4NftZorhYtmpvltR zPm8ZZ%x#uf>svi`yOGvJ?hm%}w(-Q;L6PeFP9oMu6oxMDo3Vn5My7lY@>RQif%w?n z?m&P1$e0p6({;cQBQfl^F%~SRUdedW=aGVAa>XOX9LzCa&jerrJ{o;$=MUpv1s;n( z#jCZ3J-4>37#A9aD5)5;Bhf}7Hzq$ zxHoQ6HAfFR9@0X3u~$$Kp*PwA`vvlC=gi@5P22zD=-dOD`u{(EXgDr$hBD<7o5Bzq z(rCHNr7V{anJ6rTR7mcZ)k9Lmq4IqY`zF)fn2Z-}{7Nfqo%@qB z@^%&N!GKL!UMMZE`5h!H;77k_Ab2Ty#51G2>0(pVsJd?H87vsUnJD-?P8N*`JP7E$bu) zvVc6?Hm|9~(%nwKzEwZ6SY(o5U|P14bGjN^&@iiCMc-XmXY2K%C2DzkWHInITVpv= zKcyT`Ut(R?qG+A!G$^dIyLEhkWJ>}&?7(7{*27k6+U(S#fQZ1kU^%91P}hBF@`bq% zfmphoQ6#eKw1PnEh4X5lym0Zr8?lp+Hc*sV?$RnP247-%3}BM=#rNZYWH%&=Hr+dN zt!fz_hNFlW-(tq#@t1lwfU2w`nR0(Jv(ApJ%ISJM!@&3Wro>aZ$IIDu&aYAKaCi&k z%MGZ{d;1D8U>7kj6R~8VQ}xb#HD^#q_3k9CS1*7|x{r-xF2|>&*w_dK)T_LI>W@;i zdzW_WUE0Xc@-jH}hop)vpAyMn+FNIdK9POOdZJ&>mF=s=(1CGw1wrnv8I6nZ_V94S;X|6A zF1ViVCEtQ3rnEFPay1$nBAa!GrRKTdD-Vjn>J3-|*es=gUNm zo>!>L8(!acy6s63x=)9~gCf%HoU3}V{j{DMIqTALTG^)yRV%)8jn3EyqhIn<;x+ru z*{M2XNuSQi2)Q>MABQenKAdKPu zGwpS`E^B)6DzTl3f}IRr>gc%gYww&tih8o`Ozz$84B92-IQL~_uF4KpgL=ANKl#SO zV)W^7@3rqkZ~>Fg%jpWm-yA`fzUM!7&A&R?*>Sew(-16*4x^k_zo}hZtRb8H4xWtn zj%>C_Yn5Ephl5XBX?8AE#dITM?Bv$8HF?H_#e-dA!4$hh7{*(5DiOcWehFP9Ejy8E z3qCqDeDQGN2^o0LeUBByh4US&Y`D);aoE}Wzv&MWQN4WfeclR2s4@jBmHW)bY}+6~ zauIGI&1rKU`@r?6XI)&1W{;QveVl``gCEj{9Cx`={;A&C!|bM%d;`ASaG2;WjoXRB z$f%PJL_fg)yB4EJe+-}R?a6dWtbo8C(=I2GvvVf0aHu8`&;J0gL{5Oz1>TgMS20j$ z*&fJBYb?^TI>8kHb4#%8rl`GfY?buoY>|t)dr31hS4BoKUf8KiO}zCCyAb#$BmYRMZq(X10Oq=~^r*K?#dLnW7nVlFJD*i8k1FJJVJG-r>nR=0!AE-3eiUa0rlzmC z1gin278J#+vW3gUGfkM?9MfGxs#WwuKTAEv+lJ!E*~v8-J;^L?)uVRQT8dbL+i z+HXs#pt;uHgBuBNu_qoVH0JbAitY63EeY5<*n6S*!INZ@rBpciKY(}S4k-#@h<|H{*$1I6X( znx$6Me7bZmza?B8|3-w5@9OCBGFnpLsnSPBVoS+1*7&b)#yFSqn9CaTF!ak2wYu+XnZ+DDXKN<1^FOB*wx9=wJQ3b9Kl$TW`^=5m!Socq~Z4#`4DXVa6(lvt@W{cBkUZs!wWZ#Z2 z3>J?ysM@hiRHV0qv{jJGQwMOu)%D_RA-bEZL$K=+@7zQEMO^>kRTF+?kDtBoV89`& zKzG#1B)~-@dmx-GgcmD>^kM-z!Ov@WPN^@cU36dkb%hUacCzCX&`TJ=6V zjGW@B41q%RyZ;a}n7%%qZf+|YVTmhN@6k5@zIOGCyu>CU4ieN-+S&}Prvpl1I;o|*4chtcCQ6%aEEZ^o=I2lxnG_0+Q@_Z2x`K9b zIUDt7O(tpT+pj+p(J#^`>P8;vR@{8q{7BUgrc0?7qhy|sL$Lv#GV^H`Nh^ca*Bc=9d;vh*eE%r^fAEt%@0w`))hRzZYWTCJc^q>CdEo#bm~CWkTUkCAepJ)XvMgn5qBq&?%`w9ni!-?i zIuM#%2G4;$KAn61U!4$`1~jj*k0k(cK=IP~p&RIgJP#UpQ#q5P{CNIF`{B1QVjloT zU@T~l{V7~{$EzJr0!WOqp0r2j(1PQ}i3%8cq@|TP9TK!rcJ-j}fwr`y9gr~RHD)JY zAWZ+N(q`IP%NEmv@@`eCiu&c&a)iz~c)EU^dAd(=;eYzAL+DWQt@`O_W4V3adcGM` zSW!vpDOJz6M|q5(JmEXIGRR0Hl(O4I9$Qj|abDBc6yeatMW`=T1Z;v&@L0wmVjgD> zAQEjIWMEjbIk@$=%_jX6%AMoXe=5Q4&aDS~ArecJ0~46!l~Ck4nVE1MLM2#sB!kj`%bn~ZknU9>ZJW1@)Y#c;#9q8$h zbZJ|I}-cMQq$nSgD#Ii(oqVr8=Iq)HUc-u8Vj>>!Zes^==O47jYpc( zJyTOt#fM7ftlxz}@aFDkqx<(`^P3lS)e_Ti1@*Vl&b;n&)gBQbRDWM*fMo%^{NSnI zO~jEV6gHw^o>-M#TveGGeX%mDj~4{Qo{}_mnyxSA9Fg}(wX^PL)(C@zDC6Jn%!Kd& zbUuu$QqNJTy4h7AlzGz6Dr3qlk0WtoaEPL9k>&URb`h1=5DS?~rj7Nwx$QH`!;eJa zKz4VHM&kR8D+VDcDG_>Bg)EoAXC_P+k$%FSc>u#oBWh-Uz8Li6ng)^hYifV)Vwl4R ztKvFYb77AJdicI0^hbc<)ekb-hp!LKVs?vX+0h3yl#pk>M}7m` z7T6Il!{Tr`je}=S;JTBVvwD&4AabG+ccvomwIn#f*!X&gz2Ob}7Z3`)eHIt{)YVXs}Q)g6M! zOX@c4Zx+^r*(k*cX;X&66>j_Y(@ATxDN%;L#gb>)6gB0UT+s)y5Fdao0RY~?zV9n5 zyXaj<3@zK%J|No}*Wib@pC4&NgJ|Zk#~Nr=tte2(7B4Gp|4TYN+dDls95SK9XglQ) zEV4_BCaZ>8`+QA4-aWGf-$RFN9F9@vaAf+)JYH7Ud*%I_ODL0!Vl~lY0L2exh}Jrv zFQ_ETzgoMx@0hUBDf3bUId96r;gHo1m(A?uL}8clChkNQpCdtk~vm9zH$7zg4wa4I+X)9@oJgYOWHb$f2R zHS7FXh{>1#v0v&Atej_1`+qz#NyDH)(7Nx0bZb(vqhg>@S)biThy zoQl=yA&K?tpy_-GpgR}E*&lkj>V#qlb}CZ0Z|RG3h_B7u^Bm4=6gB;co+;;@GCr%& zG8AQB{3lfZxm%7`tFy*2d@ygfdnn<*dHH;Vk(n)%vdzT-HF8d+u?rQ0r`+f(HCk;E-?43 z@1y=qq7}b@UyO9Dw}R=s9F5IqXw{|yM;Q0h6*{9LVzyyTjG>EbRqNkxy|W)RIqsh> zX-02WRImS0ziXk8k<=p^iy;AKrV=)w?9j&`LG2V`OiLv-$*`6Bn8OK@Lj-y{0inM8VF(NxZSdJAB!#QDhmG! z7dwfkXk>tGlaslV$8ll4U5W_g`OMJECJT1u1?qW*5l@{zFF%h8{Sp#&Ir$b6Az!D3 zum^3Oo2j(o?8^GkTbfU0$XWd}I#_I}DN+0N%b6QO3L0L!q2If|I%clM0aW_X(_-edC)yO|DL5Y(qhj(&%yO3Sdzi`7qxGGj%9vw#A-&461tKX{&LsXC(SE!=ebdyy+g2z{m7-*lm_4<4IW`-?;X6h?~8K z>M}5(V+CLAodJVbi_nA_0PD381A_mkK4qOu9e%v6{;v)gRp z!F>FqSr0n?@A>WWSzm+v(%B`Pn16(>xf!6vMw+7Am!DhB%a=KygkuOqdcw&eT$qMR z4}y-P;lyv$p0%*R9zf^>AIkq2#zD6SrddbG7rf_CL=~H+@l zAC~78ruI?kOa&l!6=?r(_4i8}s$W!tCr$d^e2>iUMu!+$X$STqgSdo>s06l<-+!D0 zyWKbT-erIahyn@W2eHDk94!5$BdnjzE@_=tAaRcXm{?gV({`F=rI1pp-sDF1r7}eK zbwKFH@HcT|we~XuiX_NQeJbH8({b0mDi28uqiAXA_4-A6X#s%G@hqVSD&^ z4JTraLkv)JQCIPT5d7NtJfH}G?sNro=o2S5dYt?5snr0vVis6TI`4@_O=&#yc{_+I zte0RbnhFm%C#g>`cA4GAi;otJqZbfxBm(@mQX$imjK=gF?kzbYU-!rLNc6wZJ}?n* zIeRH)Q~BIBZ+kxGPGEPUh)4Uw`_g6-aL6T`-#x+IF3tUYZqur~;g@;&%7C1yz^1c( zyc4RSW2@gMAQihY8}-&%sfW{d`bnwcFJbwAzsIF$RC*O`hAck7EAUYTQiIQu z?|y&8-ay?@L>NPL^Be4l)n)quls{t^=0v`4)!h(lkc^#c5(kc}OrtL~oS~ID`tCVK zPs=={QWg1|Cd$io-1F@jx_0Qu<Pf)W;Xue0HE zZZS*A5XpeA;!BYRIojqnyr8k*%yFAk9Du$$Sb9d--08|4g9c|m`N;lLaF8l;>)&Ub zSW1e7a8^LTlS4z6b`GAx$VK#3SgLNJdV(is3eX?IDhY`7K_FF98qJEc@JmLXt%Aui z7YCDS#wna@8zWz@T+vXO0*ts3{>&h8zE~tb5^b5b`4ScKN#5E&A|$4|h#v?`A zHSulKu?FXvc8mg(21-s2IjdzVFiD_7BFKhls~JHR2F4mw(Z~>oX zl5kUj3om<^c79gi!9-$T5MY#-<4*Z_Wlo++B!W2ID zpH;UUg)WnRLt4C+j}PXLEy-_9oI88=>^~TjdpmxI)D+=I;Xbr+&%n>A9`CVA-s&~%m_QBOi%80O}J=~(-OLUr3w+oI)7T~uP>^%r}DxOFH!fj zg3;!F2MZZ)n8L}-i@LL)FK)rq3$ab6LMG|}l<&rnN&pp^_leZyFnfnJ9T?i@a`x<@ zm7l-1rmyOs)EJHV8~VH@e0drklP-=l7|MwYc^^f9ik0=F^DxPJDhzgrqo}a#1!V^A z#++040`FT`vxSO-hgabL%I9KMrUIY5H8&&@nP!xMU|VVYdfndBHf~0j){CPh zY4>^#OuW%g!m$(h!IotX1Sk|ENGf;REhrF442SHGT6jGd5gcs0D>y?GRS3}~7ye%P zxU%_HqQ9tLQ_jg@j*v6&?Z$i8pdq(oZC{>6!o`cXb5(qlHTq0uBvygsB5{#B_+IBu42!B2Vk)qdU=As)+R6^zF<*yAV&GpE9e6d+;D7)QrPF{Qt=No%mb#sy? zg;?d#-OvfL?!c?Hz=2r32_pZeorR+A&p*GJ*52FP^gQO%e6jG*I>`6;b0$m23gXZF z7BeoDvGDp;<|Q4&bD16t?Bl<5rqJt7m2Z+^oH+h z7Ui0Nqy_&7)gG-rQLY)ruvkk7O?QihE(~W#28KO}TKndiw+q%w#jO5{vX#D9(W_%U z7PWd~@KNQD@^bNwHUgQW47THb+o{r1AvqvbCXBM%KjPQMKBLTAfzOMII{ZH>3vM@s zz@G4L9Wv)47iZ6H`u7J-o80+hqzxA?-(6ET=s_EPx;Zkx^-?7c6h?a--%NAM&0(5%*?ogxr}?~z{-t6T zUWNnw`W*^A7UU;I&0wgYdJLu4PooHu$`t-Adn%zM3RCEtlH-rv}|I%<486qvEw z=b?Txv)9|q7BOG^egdabwWO}rEr~v#%tlaYm9=(QfR9HDx(y9FC%2_~{Tvz!*z^8L zuF9Jk?Z2n~#e-7cN@hZdZuFcIv~+Sm^JgD>W6C!yD)^3YYv$$tK(0_MK5Bg-nlu}W zlGxZ>e5|NKLV1W$IPi_m+{)e-Tw?4Bu?Qx~df35|7e>hjr58Y=NA?RI67%H8E zB1k#7y3>{z=PrKip|r^!&Am-Ev_qsQOVRv(G^#E|qoh(l9=Z4aLm>;*K{}TbWn2TeRv;)1v!5$ebSk^68Jn0uP!56eCmDL89x_2QGx4M_>zo$%o7vevz|2_&nxW&g3(v%B`P$F@6j^xj(!=F!p7%wdpJ7+@*}! zxup-+7~1p;;QiUoXgM5RAx=vw$;|Z@t+q0M^2zKk`CndUEtD$5?WGf{-Sl^oG zu}6smNd+jkLnfvKK0@A`3_ern{mQSI)%0Ii`w0d#LVj;Xk{N>QY=u3N+tj*us_1=B zC4M^6;07uP-XnG=eTm2c4h{}MMl_*74j+t})6ztE(-nY1J9uY^ian+OuIiZ^^c5-$ zvX-URS*uJGl-Ib>QB3B{ApBe9dYFNiCsG;8%So%`>_Yk=u6q zViPbcz$xPZui3wEf}rnR-YC~u=HEA%08#tj)L80RrDxV!*&mHrgt@E)p8ZT9$~^rG zhNNKHy1lhgu6m9b5xxHHU*Gn>CTfIr49L?dg-3fe<&aQ@5y{qjjE=G z27E6!qN)Q%rq;sO*j5v7BZM#>Oy^mb1%`IQeO+_EqZU|%W>0r~7pf)tPxGUSx-hcC z-l|Ufh<%Od6$dK9!NHaW(P=@9`x5R3eu*`)Anb`~2Y|v49~v-lTr&D?DYYSjI49mB z!3;1)aZM>qfd^g3`oO-&sey8F?t6}L%_t7clLznKzAntwi&uawLHD5xdRDJr8{yCh zn;Y-VMd7bW5UD>e48P#te`21WAVH)=436B#vv2Gvp%@hJcgR`BP+3x>(g6L2jnZqb z3^e_e(0OJ;WB2kGR8_qo(B*>w6d0t0KUh9QiBFoZe!jIGmel22?^Ja{mB9WCS618T{Q`e0Wy`-H3u$IYD8z(qViL4qITWtX|!;H%A-E zDk8k$Ap}+XvSY=8zQ2{1Za3Y~eX5d3(U!`3*j?kJO1|GKWmkQ;-Yg7;9P$<$|INs_3hS^^57u3`9GXZB;!t0%soHvZRQ5(4jF24>81?EkenX9ZoCrRg8BLl+1ZsPgy?Xil0LU`Hv zvHI6HGQaJI;C^t1C)c@Zr8!HR4FDW?b_@UGo5(k{{~nJu_*dV3IuwTjA7mv>5M&E+ zuUrx=x|>z-Bu35mFBe=t*0c8(zq$0{WA`Zsv$~9?7N*ymom}Sa-^+dY z?&-X<3_m?V%UrkvA!UAuUDhRvfj;Z@EY)S55L<$pSM%^R1soS*;9t$*ey$2J-onEa z9493Ob~d@|R@Y(-_i;HCMu6&$j_>^oxpw?`NrqixI4Mk0%PO4-uR_Ddv|GQ7hC zN$rSyYvsb7wBxEXv=ChH^gS-lA4epJNe;a`yk|JjuG zM>jNWhPLSmLHazB*sEIsdpZMj2G0|FmgL+gsaMhm$$Ii0meXpcpjCaTJF5}(h`>F+E7UiIUGODEQ@ns!y+OLnuI zb!UYGn2v{Kv!4sT@+%^mwN=fj(sYQ#jH{?!qXq4QH$E}M|N5@%XMU|i88-Y^$dJsq z3n%>SNZ@PwTk|A!&7OjztK2Zovmi(N9UiDSV{!+|oh3yxRf1lbQLb>%3=>0flRiMv zOc+4LMZYlIXVVGs8>@6ARIWm37$r=%b-wEf@)_N)zceiFWQ2=@9(~+P3mC3P|1D6@ z`OLuoSzqZnU48bZ+rWqUkS3V3_Gr~W+Vk#|o;M4>Tk-q)*Q@&OV2yeyjN&>xd&vFY z75x#zFQRPl?3tr@P6{vRuvo9hrgzNZeDqq5VOdYNUdxLYo3KC~=8mt=L=yJ;sZ8m~ zWR3@*R-0Pzdyqd|a2z*URejV)J)Tj*%sboZHLoVP+kryzD&GkjDQFpkjm?drTT6O{ zuw3z9e9Ga&9DR*XN%#6H3hpB)h3d@5U2%^N`|@)U-gRQY#CcB-y7L65)$rOuKw<7i z{h(d<_>Hc1vC`GY-x=5*N&dE4t`&^^ zlxJh6k>~=N6DF3|B3~HZ1dYH1ShEFJF8TS;bP)}bn9BmJNxyLRd{~Bq8C!O7aGykJ zXU=fbh7e%u`f;?WpOQB;*p&K_A9GwbH%@r@=i0Kjl|2dm+(+ZVSP3NBDWjOjz2Ki5 zCh5laun2w{^=_OZTT8-s%C9sbp8URjQw06_bRCBTIqGCf?Vw7P3Wt=?(dhy@h#l@HF3jW`Uapq=*FN!AuMDMlJiI5rf;wXvfjPhrpvi}G zN+%U#6-pjCG0I+wQAzLY6NT9Ty-_vk;&tHz>Kj*rgn;hESD%ekjKM#y4P$eRyg;_( zYdyj|OZ43dy|8)wb+Q`d!QeC2`Ry8EuwtTV>Nx3{Asl=Ey*@oNmI^77^}4mZbA#G+ ztX5YgdO8Zq)wmRg47xqS4S1<1Q)mVX%%`UnX7y_5=9FZ~i*IypQuDki8F*gRGFHWK1A{)O5KiI^I?N2SyWH-2Hu=Ncw)|O9M}P zf`sk33Bv7x8ub%nM|eNiuBMSOi0l$cl9OUZ(fCRP-Hhh)-o)Ao4L4OPLd{N ziSneUAwq0qH2tN;kW%d`xA5!Z%=c*el-DiLV4wAb&fKaH2lZfoUu`o4oN`&H^J9Z& z{Fooi_sa9bDLpc14hd^qX&k}n-tNB?gX(-u*vmR?XMx3=)9x{}G@KG1%sSH**eFv7 zU76!m@9bPv8t`@q09r)hQR$o>!T-a;(#rDT>M{E9Mr(UV?zEcf!&?2FEcaL+aRwZD zcA9N^>Iu_v3Esi|gjtf9C5|LaLtq{H*^PuY=r&2b{0#M zbbiBf!_hF_(N~#n#GJ#y_F*wFA~~N}{Gnkin#U4$EGZ#!G9|O&>E<5lRc_okD>y7e zb9Zge9OnpJ8Hd-LpH}9c@5KOc{qXbb$IA+ghQkd&EF7acQw7_7dU=}Fu#xQg$36|i zh{d0)FE)5Dw&PmV1zcjvG z=VXBS4nPQu&Z!jN$7o|+NK8`q9cIusKAMm;yrN-K6QVw zC479b?Cqo-Vy`A=5Fly(s44C*fM#bFg(Q3TJ@T?rB;@vbIbc!BvVF`ac2@EB!>c^{ z-y*tB@Y+&%7HG5WS$`L5ClGaL~*c zdx#t2@7>}pm+WlM$f+?$gz+Z#YqJwArNiRDYlE`9c=si~Q^C2rm%!SA#_lBF7{c5i zAycqKOo1efrVGlmmJ`q9O=Z4yJ8EQ8alkTbbr5c$>X7$RA&$c#H9m;Nd*0il#5uX! zvtNg4tG6@$=znpxYP`zy^hCBIh(u2itoluHufVwPdQ_`g*dSKs$9Z_lJ^>w`6CkKy z=e#4Kr-poZ1R}zbh_@0Zm_@iDPYoAu5K-~*HVD_P2-yBVE35p4nvbajl2JVbB+!mJ+Ly43UI&O*@pUFmhH~|I z@a0-1w#V0?qFSRtffMlk3c4T!-EPs{)D%zYUr_H^`Z<-vn;`sgl~DxP0^Tqe4aVZw zDHU(P%XuUuT*z2vNZ!W3ULx|RzP!jiMpvM>>&IM-@U#5565nQPZO>@U>LXC!Va`s= zoYtJvq3GQbn;VO18+cS!eo2m~pjVye&--rkB0Q2fdh}OiFqZ-(U~g(W?h!g^3cdV3 zThrY5<1H)wW^x@AdWsJqHpH(~wS z_{uvt_8ZbPqIfeENy9zLmZe>Y`GiDuH-=to5d<6$^gFkK%6eAzg2>m2Uz+aM@d2Hv z`i#K~x1Iz=#}KUwihsj_G{W=+RMm>tjc(W=o=%*E?Qp_E+b-z8P(4 z`uyLJd#cG;XF_?&^G}z`?c7jao>_N;xz8K~O^tkR!@Dz68Gev!oM1cf!OS)m5|3}& zS~SMK*3pL&PUNz)*ibR^c(~DV^L_DaLHu) z2=Q;JwxBuGVq;tl-w_+lkN$TGd+`pp1o)cnyVgIdHuC(v&+2dOudmfa?ciJwN>L0@ z8uUj1(X?=gW`EnDURAEUAq6)XQ7$@ah#%TW;3qsLbmH?tVf8 zK;vZ#Q{z)!qIAhu!an||bLzX6myzEru+ zbs-uauOWk+)FjYuj2)aB^$@fmwcZf`5}vFgG0s!Z2OJyOvNBYhb9sP%ZXc4KwwtvC z73dD>?{G^{Ab2wdfmqao5NkCaYeL_l7GJERnb@pirzzxaDg0#XR*w*$DahrmcpW{a z63*jXJH;`Lg;!@@P1CGDQHH<&iv>EmP{WrrYb+r~onzCcIKPqXA{3fF=mgXyOY&Y$$uYklF1-Evgy%U{&j)S9A$*OSHynhxao~&dk(&7`pLL(v|%s<$2)8{)s10KPOo5`-1ESj zruZ^@E<=HN@>lnTIhLcK@pz?eKLYbI0R!q4b9W>pK<6{x%#F0ItiXfm=Pgkmh7>Fn z+AR~lKU}`;R&ZD3O%NCKp`G|zd|c()po8ZVmxBPT{Cph|54D3@WR8yrd$*Z6J`uRB zaCL6&^U9R>i+{zz*W^k=Tb= z^7!5a=TK{J);o-ZJ$;z1y76~w#Kun9OEQ%dcd3~j>s5s{lKA788wRBr^WUfF($lERoEBqr{2 zko1N1j-I)Wj=7P|n2nqldbz{W4JIf%Ms`hRua`kMXq(VlzSM|K!kp89vlOnIi>opJ z7JezqPk7)(kwiFNuLD;cj5wb_w+tEdtLX2Hr`$DA9cFvqeuttdCIpgq*Zvr&;DOhD zP{(iH%UM~j@Pltz*H3y?H8sVY|4C%*fcYRzi0|Jn4l<`|vY%jffEWBr0_wP5@~lUX zOnkZYLJ?f(8><`1O2y?5D46=a`nu>=_l;3~I%g$(7vJ|z_vfvlYoB@QYVX_CC|Jqu zF4WvDo3P6eO;^`+?9Gn@XD7`rCBq9P64Nd#RJDKtSHsYameRovx735+Qd3e5KT>Q0 zq8g!0ND8rXXzwb=?#@^hPsF|8nkZx5L0AxMcBfaSwy_-L*%Msrv8^oK#SgFZM_U_4HmfEYe+Z@{yhbzk8uG_KW*6+K44B>5QDr3QW zNm`7rNcQxVtr^iMG6{~_Q-$66(+?qyK$GG104=MLhZY#1A!h}?4ZcPl$+K?SG`@`q zC6EtUY4N9e>P>wS>XKDUYA!E7eEIC|Fh0`vp+6F1|DyK@#loLDG2+_hzqMte=CzH@ z&CQnj=oaTox@xbU)lY{KttO<=?U@2VSeLa=uZw9TN1J(^G17}>`c8N6btCQ~>Fb2x3B%l@~0+58p? z4^B#bPMT4Wmhl0{zg{F`&25Mo<1KhW>SSf@pe(hj_O*NVCpQKDGUj`-&rW>hY`8(b zT2=k~Hk^|m8i7iO4Q@~FogVIpG=q^B4e=(hOkzx}*oZ+<>Mkf^s1{G@U=LE^`>^@U z+82FqVNX~Mdfe3!M9Megb=zZwZ)k|OO|o{pyPgWNxTxBD>A~jU)6vS=62#m2Kb_;Y z36J-N)3DUK@+2b68a!|SaUJ=FpsaIm;)BFd(*5%6B2x@vPC^;s?(mVTXAZKDgS#W& zXdC0iUmgy==~g#-b@O>vZ+Uq7z0cwG1+Z(T%05ubKjhcY6M1Kh8jcQ!-2#iU9rkX7r2Ud5`0zt}8z~YGpoZ!UJot-d_li z#xy87`L&30oM=y0jnBJG-aO>E=g3S^Wl}9=^V`~8yC1*)3|baG(Vb}S>TZ0b#m))q z*Qu}H<8=Du7imh!)6IvWcLsL8PISKcNqx6W->2b*dGXS=C$nD5R(JZUA61{8y~%2v z2noqJ*10#;=ZM=slRnl`CXx%8D{c43TH;aq zel2)pd*_I?-$+V2v)r#KTzBJJ1-uY=Dn={ONFtUp*CCD%G5?JG4%7SPYUWNM0qYC> z!%zIYtycbZB$bq{#@l}k-e_BlyeK?TTl_K;1xxsR+`h3qrR_=G#GF2_oE2wKLu+55 zwxB3_canqaF6U&|;rdkx?uI)GrFdCa6Inu@?Tinca-|K| z72wA7px0~daUO4}k2|T~w_Dm>rXU!WU}67%t1NOSdE)W#eANJJw?b^|tgL4y9)T=G zP$jRgzW%lo?d>uf3#E~LuzEa{yXA#}H}tMbTY6AIJqc(Waov8Y5@)PUTqI(sS|w;x?4I{ zNq>Fm6LBYcCw(-Y`92Qd2Tj1tRAQfZZ5E_wjMGyXdRlL;2$=lt>F{{&{5s$t%q*OV z%q~K3xBpJqASOT(JrFg#5))Pg^N+{*NpYUn++qZ5MJo0#3?{E0~wXc=%BHM(aKsP`D#*UMa5id9B66zR; zM!b#KDVB(Kg5Qdj-8GM9)q8Ho+mG$EQ0fGQ2zxwfAD#V*E%TBJDph(wBt{v=ST4e4 z=av4VW#YN2Y4{IzO|aPdRci@Al>zw10thzK_;?LwMzi3C_!VN_BaLc!ltZv~jqsB0 z^H$>u07iZmAf@W*xu{J6^S@h*U;k`r2psZGqTKOzaJ34&ID8ut#Qbcm#Xp@J8Xg`l zP=A_}MAWRj3lFX5gN#|~RjC(U#xZ@_KG(ntTPbdVkwsA7QbPdbvjIB@ zJ54%Q!dnjZ@bit-FRU+N`V!;gb2GXH_WqYPaV021+geglPGOvJ`Ke5P)Z$ur(>3ns z*cdhTmibKKe5*V_2HM;r zW4WaKTR0zIS74s-*IfJlRPQyfmH?$lc&lV5PPSsbWAwujLW=80m-E~(qt~fmty|o^3Yo`Jf=;u(-XU7@U(d&6^6# z;5pn97GJzIwG|o@qo2zhG;`4HRY%z1YB-FEOmB)3{T*kI5bYS`PBzQfjC6g%K~aS# z@}5M>9iu-!R6g4{5NNx5puEx9_d1y5ocs(m-)@iUO6E)fX^2wu$-+s!2!r zC@mnG74u6nlFzkegxv91P|5P>yxx5l2Y2|D_t|>wlFzvi`DY>WN8``V=1o6Bme}Ns zb1wRC`v`JFVw9ymLu%$O97AYie_p~F=&;kqK%P=Wf*LJE3z(LhQgyiXom`b7#I#sx z02esstP`vdj+x1I(7+m8(aevplI?-B3k3?C9F3eT0g6+2`0=0AMbN^2P#ACP_C7-m zF(1C?dAP0=;rZTD+V5J(0o`kDJs6KJLMsDP6va%0s3G6Bn^$jG)*bJy{dcvvB8wf0 z6xG2ykAIcuPK&!t7S^y04G0NYYC#WfwthkvqlxfmyIc;RVOI(^=86|@LB`okjLkEX%?uPFssvNBqPUae@y@tFJEJVEo+k-#@ z9#_Aar#h}gEXFsG%>hc9c22fjtNFCS<39`(OJ1D#_h>DEje~{+i3*kLz`T@m>J76c zh=E&n)5jp zI-a-(Q0N{2sCm<&LZTfAYy^QB<1;f$?ZKd^a`^ENt89R!8DFi}wE3e`7y2~ttN_Wpg_ zrkaVyDxRP7KMoB4dY4k@4>n-)N{l^4sRMCtO_@@)e-^d@I{9V;;v+oMUzqgDjD-)5noinbzp|Z4E3T!4n1D3eBxa0i^ z6BPq!ezC|P>F~{OY&?U_23in|r+PKm|5ut;TX&Q>O^V@luEJi`lgz&v58V(Y7#148 zG-%HlVRs^$3rTf)a+E$C2X&KSG0cP*!EsDdiuV59$;R3B|3u5|l^;N0@zO8|f>&!4 z;a>R?^4Ou||8^^5pFiMHBY(C2;#c)bt2E7luUR9?3b`YtWcEu>75zQJLr%dZ?~hE! z@$&Bck=NtH0>NOp;*Z?;QD;hPy@&?Pu$c*>itfdM;145+MVt|k{0QMar%e3Ylk$@o ziAcFEHJh7j{^GQ3gE2RuA_?KDBHH}xx}PIn?3fXAFSGK?`NQ;iJ^P71)-%sgdoY?V zD9>N@H58)qb_^1N-T9)yz-+Hu)4F9LVC}#Nakzn?AC+5`8+5occg>L0`?OYQVB@FP z={osZG0{c+aSv$QXAfT|ei}3r-LU+$BV4Mj{mrj(GKzuA-dr@Dx*$JNJJd@$MB>bK zC)PcKG?LJ-Ug8avw*d1^6VzHSU_N0(M zg70Fh8mOs=c;R!A_7h^HMsdXZ6$8a>e!FjCOzs{$S1R6r*y0R)>EH@&hBzR*{Ae>C zR~SK%F-777`@YJ#E$yonzJ!)qA6kREaa8#2a6$_G*RzYgI@piDebFq;x@x1kVwqt zEiB3m`f&_yL70KLwhmrghef+kET?%6^S|h|*+!4k6OU8pwwyBv0taGbb#>l$;)mBGFFp~S-?I0@fJ0um zF#Fp7=gc2exHL>9^&}F(SK$8lCx2j_?cABzC7M*l^$BVKmdK zN0hZ;tjx?dB`-8Au*py7cx&RG&b#fPi7yNBO|vS>Bu6#)^fFhfv3KUhJ_ilYCTn@u zIThtkUcW7cGrv7*vKV3W8-AX-B->EBIXg>ka?fF^byRvFLaT)}u~rJVHlE~5ZIJeB z(3<3pHS5XEX{9XFu3wDYpKcl(DDsrtt zwP20IJI@E=ko8xvC*LS}r<37_Gho83qF-}$d$JZxG>1Hz`;JdHPft!|x2^%R#uas6 z5@^#f$OJ@)ol~ZRpMj8?w$A0yvc3yiC}iSNW$GRo5P{rhoZE-;;ZM);Tqbvn0w@+N zpO5M@7fi{ejlM_Wj6{aX&tPDvv4x2U7FtG#s@MS>f+HkvRUa=oCxAp74-{@k``^V! zNrh--2fVFVd@?*OeN;iToxOSVfi+p+a@MM38g7g76biqg+*>N z!w+P%_t(kvLJK`$3^ltf`N<2xz~6DQ4UiIHwHB3uL>dUU%3K7Zk-EL$;#ZF)`K35& zHrISafCN8)`~9bq-BPt0ZQwE_$WI7jt23l`xOiB^zV&kRZ^(U%+YoR`>9x1Rr|RU6 zIPv#^qf7xHTjOKJuV>(X2%(<&2wgAaq&-K=h>C*2W0Yt~Jgz;X+=m_1-IfU_59L!C zaAxJ)DFq}7d=CtTgFSWtJ#*8NvP`T5FJVhPuNfO?t%}=def;kcB1j0Ftj@AA*nOAO z6R2tZL~S14T=JC%^wKAs*WqeYPlpeMM=sf%LE#~iMA=m&uD>#uO^#xDlmGgn`hU`q zF$`J}YLyrGiv%~mg^(mKYrrD^=9t`j;HV0$TBd|?FGRH6+AvCkW%l}r1S-Hv26z9R zqd^+J2A=J65Ta}pE0mvs!J#5L<*w8F$>(sdtUgUVET~8ow*nvEhUK1Zz4$P_!*@wv zC=fhMHXcV$tgoDeWzunj;=A|wFdbguo1`ItZp_`46A}V9-;VulkM_Nz&7QE+XqBU% zO!J{C3F=m+X6a_&v(Y|>)_H9G8{SP`l*AW~jb|+OwCt>}gtdJWPrDqbtamB$zX~0l zJ0_EXuU`C6UkjZFy{C3F4ttz-31Ov~6g&byt9(O94F`V(r@`P@9!GA9AQ_t*HoE{u z!)YY9ko3U(u7W8NQNr z8P>3>@XgP(|HhynRHuAmaqEN1(#ZcG(2^LOVl-I0_$RK0g&ew@#FaRUG`Ql%_FMa6 zbPJn06mk=SiKFG|)u2^vBc+#`)HQs&a^c-_#V&R7#t5#K?!|w>9~5#G2KL;fogiDG zuI~k6gwxC6mbsw)yQ7%ha^=$ZFBeK+&+4tU3onyDKlV9Y`&-f0+AI?1l3ld>^WQfU z2^+rFe|F(}Cx_hO2aBbBvm%3WQy$hHGHmQUbDe|Be}KOR?0J6JpnYec9a9>YI`-kS{?2$*Xf+3gf*ljOv<|y&t)`z2RN21u<@VyutYFP04$Y2h`9h{{f5Sm*s4tRo1OZLB9g!Y=oJ+ zOkcxuiF1zDJuv0g!?nGD4)a?gqN4Th2|q&Ke|^$OKvt%Xm#qd1T`pwAe3A5l{`{Iq z7G#Sn5%i{ptW{BiiXTpXw#O$j9e!&a-r%9lu z{yJ0$d>>o{WnjO#l{qVoh}}Eiqi?UCX>D)c@){@oc6>c$k~Y;f9exrWe#CV8Q}lEe z_=yr)X2TCokDX{~x<5YFgP%ip-ht@51~Boc!Yy1a4O@zn6uKb4d`LIS+gRbeXZ zhRBfdcOmk|o!2`>!9aJ-R{#aW#5SFLZqRLe}3PcyWt2L4Jazc-jhaIPnq`; zCy#w)7lI97fE@oB2d*+h(R47l1{X6PJwCjYcw3qg5&_{sAs~q7_a?3v4*m4h9mh=u z9d7h)mb?S77XK#y8lT4au)g}bA!|VJ&MZ7 z*3O^(O{!?e(Gj=_9R(;K?;R&nj8x*wsKMn!E9>SalRNgjfa*%ew%LlrxyfWL*pJmO zFNR=X&JHEcbwpZlLeddpeC7W%gDxG}DpI6y#f}w{gcF_HFS^M~5F^$oKDhP&=<+H! zlq9`Wyp;V$`|2P}07e7b6waFj!E_JnDs0ps!8VmY7zy9fUW*u9c|XT@qrl77b! z{)*;Q6wbuN%G%Pc@d1SVq|wac3eOqHhnio6EBhRsAA$oAT1_n-??o_}XlL5F=WtcL zPI+JkFBFIx5XHW0UAjh)O^FalF#P1Wt(#NBax4%Px-bym(*t78OOY z$g^rlEe$c+wT%t$_urRx4io)>d}V&+Kc?!cs;R7aape(c1UT>SEe&NAR6JRE(&iJG zn)uv#CJa$p9#n5W9CyP9q^g+uo|E*iDhQ=6^lonh(24vKDEzMc*#QF1O9E+G7t}tu zkN8P|n{@Av{RMklLJ)U}_C1|z`lUv}b*2gqyb!;FosaNV70$$r(VDkqk1nd|-aBlaoLk`o%cqRk2v(v?Ji>#5{rz{h&A`ZUdusS_ zVjrsZAuO!kNrH$va>mk#e3h6 za+JfEYsdW8ZerjwhLkrdzEot2%Q zXr`7Nu0D|2v`^AGfMd2OJBs*?o(m$eJR?5TRzeWRYG(8BV6xNe_bUNUcB7A5c2{>3 z72Zijz*O)2d$c)d^V5IzWIp^<B5ZqEl87|8duSXOIwEv+pv&^EuWV3;ioVvoO!A9H1Ey?2}c(G})@l2)={yy!c@vP*o)w$CFi*Z0Nj9 z@R~-rFD);J&NM&*TyZ8enq3u>L`Y0hc7^mofGnt)C@ao|?oYC%r>EzIp8R17-vQ!KABn76e|&^eH>(73B%*~(zxK_uUQod(fJA{_8qUZrs+E>4 z{Q|WQI9bAC!P;UD0+Gowv6>2IRjQ7XBAm%-OGMkO(F& z2U=cQ?yrx&SZaFunPs0`O9RgEdJ>|* zDP%p8**6QmJ6f+n>Li`*|9{XjGhjF10JZXvxlEZ|uk+s-y-;Y3&AnO)B!K{Lzk4_t z)P%hiC(MRXjbJJTr>W=S-qLweeiu;LLmXYbb{!KI-1nlZR##S68OYgVS-bYqCGIVs^IO)~Z~NaiH3qO^k5$5eZ4MZlR#mw4OUtRimTg@{ zx=X&$bzk4zm`;;&4RJYOeFK~86n;DvE}@&^(_ZB=EaC9oUe5K;pjWnj%j~1zenDzW z8eDvfkG9j>;PMNfcBMvmN`WA|6RFt_QE8x^ETTc$Rf-TCl#uc*onYqGfD0gaA-p)O zf#gs~^2VR_s^w;9L*m!EU+kp@H%(0cPpIOROXv~PsVp+Z-PhF|PfhCB7d;JavIsxf zcRK9Z-|aY^Gmk*VK|)$Pc*U+nnhj;Ybm5qM+T?aFY>r#RTIqqM`zfX4NJS-lohQOG zcys-~`FrFd0^J+VBE_6xVjxitXU=}D;_lu`68tSpz{w%jD}36UKm8mYH5s30nW7SG zG%kAgeZroQ&)y;<6jlJw6W`jKNUWRErivldVrXtdRdpw=Ajs~=?O%=k0_Ahyn0__I zCeX&MoYJ{KeuJmG_g)(>MhAaDLEVv2L>O$XFqD20JcAz4o|^})w8ohW#Sb&JoR-Bp zg~=py2g%ZjPdQzWfJj$sXV-@aReWz|60??@+}^Qgz=z$HR3r4CuVX6XL# z?Mc`^)kc`DJPT9=mA}g}U5vi%`lb0@tazg)iHUr?B4&=5#+7aWYs z*EzE{S3E<62MemszvDxJa>m2JgTk~F#hiQ2^HjGx^J!+pB9uR7R`boQT_1D5kGRMhgUG8l8&%fERo3nNEgXs zy2o+pqTsP!mj+{oy)vakWjIEP>kH-M{scB;REGuahGjpT_$*BYYj)f@TCX+bOCZi* z>{3xKrvY>5KAYm5U9x^~;8Oby9$WtiOxTbtuRv@>O51&_X_*04KA8;PIGhjnQREtI z&J_KV0cW!uZ-;Upto+W7qWaC}0HrBm+|l3<`|t@?1I2ZL5A4fBee zI=sZ&s{m9F0F`Gyv4hmJ7U1K*(zpDgOk5hj+?TbR`2Q9|+Bn{D zsiIcPk$>jPV0yK+J!6iCGOS)A+=(mc5Pbs}624W9I4WZqj*nvGAzu zH*fuJaz|G=nyhd1X7xVun_hjg`Xt&8nIZh=Zshu+?;|`IlbH}q42|SmWd*RW%uJY3 zb-+(Xsf=e9d*z6wL#-)uCeq{X8$5L8W}OVhjsES_|&rIfsgXV`#vq37Mk@Wf$c;{P9xo2t89{zhZ)X-Y_=J?>GW! zY$znu=bW5U2jsgn-Ow|!Lj0_#jEA;KJUW6}f*0<3ao9#dP@Z!gp@%8Ppvz!~&)&s* z!PZvRO$QcTf|>0<;%xEK)cE&5$wRjzK7j#7dO6@Slx51Vui(cJA9uN*#J}%2=w-ud z<@g{93o7awmftBYO+O+@nz%FegLlWbqNMthe;fAo7vL;pjLAZ^jfxTY@1&b($|-T( zJM7Wd?@=3!TL#w-uVFbV82K3#-YR)mZ!PE4;?psc2TacLx0E#O>TT@t>T8?(6=enE zTCh1uJxutu#J;TA9?5Sb%BM>Q-|BrtM)m-yKJu;b-Iyd(Eg}P53^I!Ok=WhWqsC`> z5mS1gbBQhiucd79G_XKt)4a$#ssm2k+;ica(d|`PgXQN3KJ11y&NvsBz?-Wa2$6`* zf>9!1!*V_QguqstV$)sZY3al+%mBU(1bgu7Py>i5{ z;J*f0DNqO`BwZv}Vm>fk2pLm_dB3Wbxr)Dow2{;ip4^; zFpIEfackSu^sMbzF=VM%lW?_&8H@!A=>{PG9v7jF8;8p_M=tLz!&(Dd+rH(Ev{`%^ zkaN)82p@1(gxO6meek0?Eb`oz5K1BA4FHC;g-a}icKC0(L%>F^;Min4%=Ba z?Br+WNJD*DI;~N$i?iH`V`bFkr>0l7>}Pg+HugEy?g%Mt%}VN`4>rA2wvO}N?^)f2 zdO^Wf%%`bMzLBYK<>&g=(hUb3_`+jf$vE!sQ>s(bR6=%VXNRJh8XFJE{bzJFnr5w4 zp*+w{Ju6Yt%(9l}bk0Q)V%X{1;;OP{!JClxq!MnTumiRCJNu=OJD7h_j?>sxUd^|{ z<;~73E33(YW>Gz*#9eS5Uch^v$~ zg&fkZib$N0WaPy_j3VwYP18WkWQJG@s;;)rAA>WN9=?-42Hk~_0PtfItc|smnTbH@ z$=-+Hz~h6)@Pkszn8U2ZV`Lk^>nOm;Y&PwIC%;}BAB3!^;WFMR4i45>R;OnT$%~4U zD+xweR>{HFnl7+*B!e7Y*VutQ-EDDbnfyUmK%A3@aU{qgw29HcBVCptVa2DJANd`Q} zzoB<-T%=sfmz3%R2uj+RH$EfM!9Di1T)P< z=L0A!s~+K}LyZ%H@nh0-pxJnD3I(x;pfw_t$NVNtmMC0PBbz-iWR!F2SuvC>55NfF zOs5s{+JBEc3yNWec48^uhKP*hfkGX0VSB8^#x8YTYAYYEO8PwyDV{+ZTBu#5K}^?!Qi2pQXtfP2t;TM1Kfj&fDWcyu zvH#t*SoOnoDA?hb`s^Mjs$AXR!MWnB4Q zU{HJlNY+8*jftLKy!=85RAM^eTBm{Dzx8#`mFR-1%>B>u=hc1_e%B5(|KK5v`yR)@ zZ^aH^6@1UAK_RwQkWrK_5`*{RuJF0Sqai0TfN(%-k&(NJhr;Bpz)l{JBNiWKFE-Ym z^qXDx9UlPLjCEi65sHL}EPiDJWVEhEYhUd_T;8YUt%!d*EbqEd)V7PW$0fbzaJU0w zX1^EiIzj;(Z?cLH@lTvU!0z7K=(ZK~nU5B_A5lGEsPmHf>521N)@4l-vkAV$zCzO* zWbi{Xyt2c!)yG#{ka|;+Mc3JUs4AQXi3d|lD6>}OGTBQep>S9K;b*~P5X1*m zW!Hc2xl_|%4#Nm1Je=j3najB^|39+gIN>Rq zEB?yL%H|>amAzvT6L`L_#`VUan&BYjlhzZbC~kvFOh*~Ym&_-Ko!yj>w{a$Y`y0W zOk;PtG<@-rVd}Qnmj5mjOcf^7bz8t|$B?#J z2rNP>q>Y(}jc39cTdDP~h%noC9N00>%gQ$BDB@js0ro~XwmH)=Fu(qFQ6wKNTu9*c zCsLM)*3{o2aKD}rHX|uSig0r!fH<4_{L@IITMLisyK%E2A5>O$hAM2S7vPM+3VwY* z#6N$n$nApR>mu1UzX|h1R9u~2FKy&b=#$=i9MRr(`j-Y||HDI%kIu4HxE^{TXd-DM zYbtm#a63k?Ad0bJ?mZ`*XMsB3FF{8F^Pg=0xnLoOn2h_w43YP*-ijWh_g@*_1tU5c znXbTBK=<&HF=vD_A-M+=6G*a!*lkJE#XuP0VE+dNL3>XY=V!2fo8#?yar={*`C}t| z(!|mbr5b%n`QuE>OqD{wHFjP+3p`}G9Z`+*yNen+C56XB91W9K}bq0$B z1g|{d-tlK@vEyXmhe33}xE>d4$9VuozAtSJO&6Zl{!wSLk zkZ#X^^?n3TV#XvCMa#1`T-sfYCO*D^i9xEe7FU$JM8O+K0i~O*V%8w(>OK>+D(9Fr zr1h|yV^G$>Fn&T35a9s-IOK0%`|5*L|LEIJl~VEneTc^4<{xKbBI9rccK9Ku+J{4e zRBw*UsE4~BB#5oyyEK>pKfiuC3JMa@(vVeTNR)MjC!1Wd)%E^Uf(qpgFd3IOH{ja)wLPJ~2bHmggSF z!|e&d_JE@Hv@z9?1y_h980wd%fTb#!9*`8QxS1?;ZkcetINwc>Ht}OAh=%zYR>_nn zF;jeJNQN_2+}(mNyVri6tnWKgpEWjfr+%Ah^eB)$hssy0vrw5=YW1CkYiYlx6!uqq z^7(yDjnV%5)1KMclq*&m>%aa<{jzKNU|~7S=Zt?4*{=MXw`?D9+=cT|$>c!%%#W1_N(}Tn$)K>frZkh=X3JCHX*6>Q=6M|&G zS)#gvn%t-&+APFqDm}R1-JvXEllw0y7%f!&$cT4)(es~1 zOHi1ciQZI$TIZ%>JyEekal4HVW@BmX9fdcT^m6i3;Yz=x$j|zKKjQfu7N^P@mDRK2 z*koO4I&yF1d6IKx>cqDN&zDCevGif4<2&@t53xi<9>as1)7|HV?Llk5qedXgOd~3g*g2g6S zeXHU+U-RzPFT@EJIZ4c80sWeYpkeBNQ$PN1H+;|e??Q`=N1{pntjKNhdP}3(B--7@ z>ucWcQIoz<#Qn-zi8UG)OMTJy{b8hSX4(GM{=`4W7fAiM$Y_IXKJCmeCO#GVhB}E6 zQlv9?!~jbmIDtL4ErN8sy%h#pyC{9QHw|BI4w33CmR92UyV?oKTcc0xdyb`F)o{H#^yeZ~LeAc{3 z81EL+jTz@~NakZFezKrff+5sle>5|lgt}F=1Lf;koFNcQ2099kwg6S|C{(dzEU&BH z3<*vvw7t0ZqxCjSJwo5)*2hu59JGM;FTQin8YdUu)?r{m#fMe&T!!5UCR#GQ%N{ZO zTQzQ2+4@IYDb7%r0Y#h!@FqS60C#uFaiu00if0Xrb^BQj+64q+X<(vP*eDuIj*3t* z#-8dSCeH-cf13iAAfwXmGxekl*)0ku#C5!)N6$>?D@*;>27}&D)e*MGhP*$;$1%4;O7yl96(gr7kWEf`y*Ys*D zgk!0LRIziE+WpCeg@@8RoySKM?$aZRN_cxXhu$OY%$u&vw9aXgdW|7L?=H5Cl~bvM znky~E?3Zolwz(oN;mX0-^`bj* zlA(b?-}B0fgsbO6QgDhF5Mm8x(n5oZqGFLyd5)S&0uNP^%*+(o&Sqs*ZIipId}=vW31{IUIIf2@#E>hN5Bo|>jr`65)dZHM6nd$+ zE_oW;;bP{T*%o@(z$SYxyu`kqo@1+-FR*+AQyk6RUpPZLh~zYsrlYmh*h0qYuFx@u zgzBE|DXq=fBoZQyLD>WQ($89miScN?w3Pf#bO0*hh8G4kkVl814}LHB%JaiGcUv0V zxw*NNG@K1Wc8@&-YI_gR3U1P@T}kwMywX&q3pY5t2bz+D?u#>?(TJf}6+Fus0SmSr zi7uz_REI`9?^KUL;zoO_CmkSdn{#KN`C~-6ICK{@IDFU@C?JZz1f5shbNx%Uq*F%# z?nh&1OSPbo!-Wl)+TYg8-3D5%{w5}Jl+nPP;`+@MP9Ay)<_??|0)aw^d)X=YLRm(2 zA_Cg(s_?GJuw*$X0$G@05DRAvfS@@wY;wv<+y@KrzXCc`j7`XG62xpN@ud4gx0wFh ze&Z1w19K3q?Z>f5$8=#YBY5d1{gkwmYdGyHPd3uIr~FmQ+Sw1TL>%o*)>Gd5D&|>! zW;25nG*?!y&#(HJWN5?e#zO4Gvz;xn!tYB2jFmBIuC4t96cD(+d$5*Bc2h zAh=9}W+q2vUrI83+V9vdTz(Z-SxYkKTI16OXMZ<&14C0iJ=Q9!)(vgJNsHCpXk=`2 z!wU-gcL&`?tmZvf{{|0?hx#4)!m!LavwgJ(357haY^l16lWn;giTtJZrYIyZ?At1n zO)!;ck4cub%k~~dMdHRM*1tB~!X+7&)SK#TTit_^@$9athR@+TH#}}dB#C2SJ-ElW zXdnm_*V=wa)yR=n<;eGEVIVpSNIYT~(xtN`?^yUF0v(Nk zp=@-j-{H31zHju8AFXu*0tS);7}t>bhHMRk?rRYR?Ej~=9t2cSbuRwd-(Lq$J`4kN zEI|BZvN}vE?^Ye*^x#nCXg^v7+-=Rv(+AT_s-7d;4>y_?gCa&lU4~JIP1$5 zfYDZA)SJ-s`aEpL2+=RCZT*C=H+Ow!Jo5$eOY$W>{-Eki=Usk=-ibvwNe&dW(ku{Q z*Z)DX^-2kFj#|W|SZZg!#@pC!X3-|Ya9R$%;ZVliJx>7#VFtsUhHee7s8T3!;TdNaF z1qp>?57EG)dtlcgmu}idkNH&Vje6;zi)4T??2o=%{uzT_q+@{CtG&hR7d~t-SA`Y9 z@hmZ3F>sWzYv|5jdwl(vp-^F}1V_LR#39Fr=bhMSxIGR^!%b(foOo}Rd=e0X6}dNQ zaFK;pg)8w%b@lbaWJA*K^Ca(ji`B%wcJ4aRGQz{V5s*am!Th0VxKw;-&HxeuNmA2Z zdd4zmaXKiP=*Zr7x+Wi+B5JeJNX?j0qxf##V?0V$+bIsIApOk+bN5MMMGH<3 zTftb#K`pWuI92zof6k=+WT*R!kKsAQ*0M3j*vae#;Pe~5HNZk?mla(1;PPCF%uys4RR#g@>x3;wg(0Hyxrd|El6B6}Kr|3} z`y#@ODUufozbeev`5BoFJ*5YzKO-IHZoLUHt)8Sl?c`!->=g@ks#~YJ?wS~5>BLK0 zw+cC%=QJQPMtt5}RZpUnvIB&9jA6(LzZ_Tj;i0IB2>=`IcAH;{QbU~8%vGmn#$DVi zaV}cAce!_zw=|UixSlc6v4#y{*qr(^g=2#EY9nf%BCJgnAfwC6m;2xdG9PDi8k{vj z%L{AxtCHkywJbdtlbl6$t-KIn{WpI`aIjykIcc{f}?kEonx6l|-CPoXgr8&t9T6I&-9{d8=F&`4isaxx**{1J?Vr`jcF$ zMy!ij{?&r#6;+|H)*llt_s?HB%g=~?;BGeYP1ZlaI3d0sCJT!o7&56(RcHICb;37* zHRVlxXW-UGtJ^;gyBHV_wvH6Q=@dh7B%qy8ia17sNIBwBpaJ15%3S*ex`6}%jA5F> zl(9P;?v=dw{)zPfMM=Gh20eo!#os+|h!f5%FG|T_;x|CoRIrdb^r(KCgI-ibbgn&^ zThl<44xdE8Xw&3GLsl9NMe{l!Ft*LO^@rmPL$9BgreY92pRH%VYY2Zz@iP2vr>kiu z15~bZUoD&+Qe`aAs@m;nB$ zp!+j_3n6#OsK1O)9ueeSuSGezO;Yldl{$oHs%G;N{3ht^?i2Pyq#x)yIp*^INNvcd z*$Mc%ymQ!N;Oc;Xfw*@)r9ikaW>@b)z25w850VZIi9kj`nPc97IFUAvb(EHtT?64` z?!R&Jq_^WG@*!;f$>Y@(ZXw5XHlH6>EH5U;-Yz!(2?*HR@?kkX*vaw`c~aeQPc;1C zm?_#_DQAch=Y9*tr&g!Qdw&18we+P}{-j2U4v!yX@UK^T=h!!K~;I&Ynto<^>f z^L{&X2gcA7V|XKqh9^j0%y3w+kccgv?a3ln=!R6!Hnp zRkKeTbQo5Z@`d$k_IqE{KWUkbQ?td>L~wP=_WzRo2n7{9V7NK&erN}IldD@Y9r#Z% zpGM%bEFc8}ZrNO~GcYj^WA^?`b>ySU32?o6hVpooYl$RsGke2~o=lxzy_f09N@GW% z@V6xwE~44gl=H@z3x6?-j*F{MSIXk22chzpSLk=Q>V7j)0-2%j4K0ND7i{Cb=a@0S z#0DlkG4=6`LZGOY-PI}c!5sJ2owoWh8lW)K>{7*b(AGgW% zu;gsd1yL)r-(zyIJRd7;rX^qn{PiwUI@@2D+{zE}E~3-i%YwgF}3l=Sp6mqx1IOk6v6#X8p> z@*I$6r}CqUs6nohU9xy{`1=U^>->Yar~fv%Yx1)ias@z-+~f;kGNmpP{y9m}&F3Q@ zcg)-yU}R~v_D~5wP|O($+hq#Rb9yzQUvAg<03_?qLPih^sOJ#wi!)@e$z6BOM^iG@ zS_>J5M;igBYipKf;R)Tc`+Hs?feeL5`{DcP|1Sr?YO|J$ynJjN^8pbH=jK}pVGCl{rF&Ux-#14A}j&Tiz-$togP3>68z{Z|h!RZj> zhD*Df`>g_^aA52+l}=k1ShvMje_21G_e?H(Y*`X+ssSlXl-=PrHQ;2p*O*V31&={jw>GvkRP*H zeX8)zIJ-Clo{M(%osxv8t+tvD1O_yy-w}$JI>XF*2@=sedShNPrv@PY`bwkOZq<8D zTd2IoJJl`@ZEu6o6aqE5==*hb7i=qir&j@_>?$?KhcFgbK3G0DxE{cH+s-IQ`f%1O z{QL6m^ka8PaKbOBTpw1TyllJ@fMZb0K|I2`Ob^HSUKC$i< z%#QfnTe5*D>8{6KPd`zgGj`75Yi?%6Cj}H1ZaUz0v2^(fZxJrS7D7Xd@0*TjpyDU7 z77wwCIiFKL=+?UUSJ>p{#R)4nj#kui$$os^uJlM-QY59ZT9mNF)HiU|z|0a_QNWNb|pdF1ikR@*|><7XNgm06_9ybC(^L?T!q*gdfzt0Yl+i&Z{r^3&w z`Xk4m^wf}~M@D^0mefz?OGuFwV2um&oZD&p8#SlUSs6vTAzRGzt zu!DvPovV&CNs6!xC)Jz9b+s2&^E8UD&aLK5Z7v@DJBXsUW2HlvYHc&^`h)Sz=8h7-@A1a>+{}FH&W!skCt1*&>jmz* zb~0ssxm=So+5mCg+1hOOz;UAPhPD<#p8E1S=L-XU@?!4-ci2%kCmk%e?2_*=mYn0U zqhRL?kGdGS9poc>jau>^+`dp6Dc^+cKL&rW_8`=2<_0(VpgB#+_rtkjG8+S zMv2~FiLuBelyxYK0Fu5)M%ljXOEDM>mbH|)Ke6KLLcbJ&2H3yf$|E)3<(w)U@QK%X zG5wEf+rxqH_~P(E6&08$zy^RUxb|_|ehom8#h@`_#q|Vm%YO>5-7gT_;7g#{ik`LW zP|zb5%2dxia^5=llg#|JUT@0(a5k)EUgYuj3smmY4f0Rlk2oIhaW`^AUM* z>CYdN%-?zw7~HsGVhV!iZ9LjSfr4=C0b|t8zzjYijRu}(-x~r(-!jkvKtN+K0BYSJKCF$Cac@8Cidyv zEa#*6qO%@pvs`Bm2~Z|o=O=7WtKWe=gR_K452$;z`42mEGb5u*=xxK_XN<6&Cq7S4 zFFpQAxQsP2Ap$9SBtu(AID8}AyuBh>kP!irPK_Q_r+KHxr{`_W6UJ+V$%1GLpV%Cd z8Y1=EN0jmTpB4E8OibHhv!G)1gX~($^;gwe-)d2e7|8H{f|R!B5FS{?E`nDFNXJ7= zBC(Y3O#&|%1Tr8HcqHLIiXKIWaqeUCtVD*S3CG z=>5GxZ=Q)nvr6N@`ag=!J)Y_RkK%JFQ({VtkRegZCS@2g_vMmnDEHiwTkdzU5OWDp z3?YCft6KA*i_uXE1xK*EqRS=R`!49zrFKU~KD zz+sC^rN}&3_?Lf6cB^qioc_GX^LTq$9KT3+VLv&JX%{u}MtqibRr3qQOk z{4n+ZHLW)ABU0>~c%u$F+Th6rro2&jtGwGHZ*J*8?{uA3f(ke)dCF$iTWDOt@(Y8h zQjg)L4WkR;B1@Ws!F_tPDv{K z|8m^&6V1m>@@n)6*7Wis8awiMfp^6FMRTxIO7ItXk}VeB{&;abyU(XPFWMi2qR^>^ zDLt$UR$S`uUvjIcc@8TNju@UF>&*0gqM3e%*u6^>xc4zo`FZ7@M!J|QCJZ1~h zp0oC4&{?b_u@jNQF){f_?+eov6D=|K^DJbjn7Q#BMUS=7VohgdwbnIbNjQ$7DCJr# z=K9s%gS7dDC8yH3SGYA@Z=woC@AUh%1E2dvqKdqVi1#Tm9>+5sbwI^c$IDJfAm5ma zBiqPkYrObG$yEEFGNV7VhilXxsyV$;vF2dHgh8zEncmvNA;*V%X}a0twSn6U05TCA z1u?m43GbACkmkf|nqS+{PZ_4!H$8`|S*t+xaHHO<)x?2?a?2Ln_h|ig8ShlNyav~b zE-jJd=Cmewiqxbjbm*jHmM9*$+2U4QnI>V=5k=sGw-8|yw>;K zmLxN8y?eMN7?_{mTQ&FpfIsJhswGudVoQC9Ij7ykkHKGKegT2rO>ZQM>ir*;9dD&& zuUT3Y*}HQpMpv^R7rTDHWQ%~{^$D+PEZtg3+mBDS-BqF15Z-nurRw}QC9n8Ezo1lm zpQp!Jvtyp|;MClmsF=BPHuTOEfqF0PFJ)44(b z)uay7N%~@PF#neoG7SuzX{u#-chc`iq;f7#l?o+A(UAx8&tchpHiS-sTppi0lwxCu zUYSKRkk}%YnQp(;E|-vq5l6I6hVIR3D|Wnd(ZyS-y-78z4sVULe&$&dmUz~Mu9e(T zdx;fni(ZEM(%R$RHB~&20nrEu9i;jW?yD#(tgEYDa^Qs_7Fv3xE!qmRb=2jEgPz5P zzfCGy0bB|?cMo{hsE1PP>sj2#0;go*)BGA@%n1Lisu94>h(usDu4#(RrzoFp$|F4& zU?BpjgHK2O%I$m(!D!fKt9S5KMkug88hOu`=u#N-d*rh1xgDtckR&sr&c(2wa=HNj zt8pbi5ds0Kg}0FrNH133{$*DD`X!KqzePY0VCsRZl7B`;G41^IPNg0VuKENv{*$@0eQwH73^3kkD+zNIEwoLSSVu9lj7|kU6;XNrWits&$D%I3EEaqxAFeXE1(y~L2vyV6K zp8TUZ+-o1QX@e)ZOc3LBi;rv+LW7;r<)yph6V^4r(r~p@8?GL`zJ2V4amDkZDXi$l zq2a#1PemnFSPd~1_zS%h2xPfnV%5joR|2$5jty?(-+9Q)>KTsq17q9AvP)Pe*cA** z6{cKgq|-(RkU@|8S`I_%Cho_)yRFY^E?wQ!L$g_A0?z!<-Id{`$MWJa8zUqAUKq{U z3OC)pk!OKueKiClPBZ@nSiLSs1);J}6O~zFF}tTH_QPH`4ngzj4k7w9_HN{-}Ye=(5QNh;lIVdmASaD!@}_15yKPLWs6TV4A--X+vpO2 zHtsSaaG!Shl~j;;#!IW!wTvcg5cgIwyH~bA{%bVC3aKMGa_dNd=(!Hc;7x_d5-aC~ z(9>;?A0-L==GR#{QpkAxppJHOa6)NmXb8+fNAfRP;Ms{+|&ZIAwK9qugwQEusn z*DpUP3c^Z!5KcO}cr`TMRuLx#QsOKE|Lziu86i>Z1uRk48ls=bO3`7vR2B+%`E*m7 zKHQn4&hrHpe49rFnybVD?-=?}P2TE&RUo1CYv$5h-OawN0(juZq_xt8!Bp;p74#YD ziHiWE9*0L46XI8MM&8%|-l;>?pl1+7PnYr2))@W-=KSF?DV^Ew?x^}Qnt((ML9!h^ zFkHgLz|MmWN4YjwINiX%V|1$Bl+|Pdo;v1v*1)?E%7y|pB zel$muT)8cauEE@i@`~oVVB-5q(eydEL)OyzWKTTDv zS(oCv(i3R{)8zCH*Nl9pR))1dOJ0~v9cWJdDojOxTr|F8Fbce8KN;Ur8_;Q_ZHro0 zmlYKh*XTPe?fAEQl9V0jRQ$7;5c$r3GPAV`_29ndAl}g&bc|=-hKbq1)F97m|1Q@X zR7!^FE+YY#@*rNh)TszO3jPbY{C>TW?NcV8A=LM5iz+3N&UG7Iv$P zU2OXTE_HMe4VkS7C_O(T-Mu`)d$E}Ll#fbg#hl3-j*lRWVBb#D?u5n3kJT!-@g4*ROV8`v#+!IEAw?gYQlFxfO61@CEl-`bZu~TT&Z9C zwf9suyI<^Vvg4!7Cm*B4UtXo;mI42)&c#6!MedO8GZv2~zhEkGO&EngY;ufBGGdVy zXcxCKw~zzrmJAXXekf;UyzCr3Ni4ae6`HB54jQEF{yPlv9T2;NE2`DZMMG^NUvr9# zGwjZv&a*Lmz_%r(eJ^^ycI6#-qDnyGamW4lgF&Gd*&u(vnsGK_Wqu2cQ=ej=g~Rn5uu0@GO;pDFARx=Biu3knY*T`Y4cFDZJd9QpI?Z-(>OxA ziY6#KO(*gg!`H3%v+n<6%3A2jfP_S!cpL++zBXBZfXYRB-XLM=_k~@_$UuT~bZxQ; z_=RRt0*W6xr}fQGbO7g$2hR&@m?|c|Ln^)(Gcap%Pw)12z|pgxMAng)&(N6!`%bvc zO}<`rTb;2EN*>uB!=vp`6&uTK_dzC$HEO}B$rALj%p^=TyD@DvS`=!YS7ld&Ac)jF zVX@38vSa@1-T3onqcw*%1vG!-RD!2Uv>^|c5!85OzA$`~h>l?oCwk_`rChT|fhw4v zCt$=O7uS4W7+H~aN;S3Z8|q<@j&74-fS~zz3Mn2{rpn=5X?bV_ch>4?+#c}1%h zOpt3jxTd%y)YQO)o+dI(W(!3@1^>N1bqWDWBG=^cjMgmd^w2NXJq%^9YsmtPO}nH3 zs8SEZoOSDmPsYsot9u@%nQRybzWg7`Oii{af&_#L9!7doCwuQg@A{JAb-RORE`8Db z_l5xUsFJvKEj|y;yea6sY+t?uo4FO9P^GAW+UjVNgDP>HhJ#y~qJxzXt577Wk}P@y z0Wsx_>%M{oiJ~V8*+xbazVZ+#-Fe2G#7!Wnny<*CA65q<8gG-H7rDAxTB>h+D3;47 z4)t<(h%$?vniOIpND_vmZs5-oy9tuePtyE;D`5<+(k@Nou5@20OIr&Ig7<7l)0QEJ z|G7Lho?8`wcBH(b#LXSaA4d`sra0!dD zf!Bw$;x(LpHQyrRJ^VaScZJq#KEc9-HN#LTuh6WS-UPj9&7?2p!z@@n-PhheFx;6o zhO94|9H)sM?Qj)D8=Ow~#n$BY{D$U-<)huhUjyAq9vd*RFlR)KaHmhtr+GgIo*R061SI)mQZJlrb0 z{Fm8!avu{{C3=SYf*G;sS0EgjRiHXx~JFArzlv|hc=$!zgsl|C7R_xv1ccD4Qf7gzWW^F1h%6%FN zhaP9;zo^|grcuDp{t?G1yJ{zA%E)T*_|j9H=%xLcy*)AQVT-NcsY3L*2u0l3kD7Dz zjO-39e$*7Ku_u%}2Q+|>uMWv|iyG+rMnF;y{~oOAcuPbn^hvVKJQ+XS+$(co!uJT+ zxj|-Mfly2Y+y}O0W8zY0XZl!klUCSr7<#gC85mog*>bBjI`H{Rj*mF~8A}|~Cj(dz zN&0kaD>^ktgUs^KJTEFS3T_MSKraw%IJ->s;;%l1r`N9iDib~9J45cP`Kh^rx$-Y# z*NvTC3HFV8YfTYWAyQaG$}I z(`-R0DyFY2EG-CEWpW-jjmazAc=4I7Lv#+KJG*M{eKKz_m6e3Qc%`MuT&s{0_A2aD zYuo-0%y0R>J_j93P?HV=eE<{WJ<@+oxW7u@3lgIAimLH2UI4`HrcN}U$(`#c9(9{)QiU?3R6Nkp zlMlI6UopBq0Lh`=O0Rm8Z=77#L=)KT!%hoaMYIJt+w%TOUp0d#@ zTQ@D#2`jI%$eRG_5v*RJHiO1=kOiR2r}{bg1Nnu~mcEqM`jC1OFbP?)9zRPJXY3K$ z+-ls@J^6Rn!GCk3Lz;2;Xvb35s3SVqLP6$ihK?1F38eIH6?0Am1j2@4T)h5o<`GL}E7YCL{zlIuB(kgI zldi5rs~kkB)fDC)a+7yL=!7^ z1uqDVBfA*Xm>(Q&{&~j_{E!%k6eB^WCIie(!z&Z>u+qkk%ma#1< zT^@c`BlU}*c17*YsS;wsRCH`%Hxoh&z9FnD=0I1d_XQ6RM>e)R+|Go3mzj2``s(+n zizq~3$G=ewA6lw#&KM3t=_W@D{A9C%zo(ISXtxOw9*Rq#=Ub-`7J!OAI$mswqu3A~ zz_#32#d?1hf;Q?8X^{T|b{9IEZ*aCe(SfSIPRxM5L#8B15M|bo#jYfD8;tI14|@ei zG3yMcAboBcii*8vbeTXGXJ5{K@h1+zd$fA>vbsEY?M(ms1xTOGm*-|oxQk&MZkCoU z%}s65e|(s1Z*)l!1jE@23~?7DG4WUz5Kf}!#Ul_bh@ZRiLZSl(*u+mhix;m*NvJ^u zq0c|5UC)2L7|8CyFNtpwfiGzO^gaALV+vJ8V9HY6ET-P8b|D_?ec?{dof6OHet{}f zHn5&M7(QM~(~Xmox^X<85K5qz>~BZ1URavN5z21@to7@<2%lVMXG)D^$o<-)ab$d% zgankbt1Hnrug}^6xXuo=rA<5Vi@2q0G#auyQO@3JvxpQa{d4z(b{Vo+KJ~jXfVQx_ z8N)O)7>i6wRD@cO-}W9izfpFR^DJsBJ9N)ZHdCw6z;^Y?fBSveC&CR2&lEZ_&tTkC zP%}B>B;%Dyvyy6fr}S7}wmYD>ARxrwb$`+iLac)^dGxj;&-k}O*?xWyf6obC?`)%^ z(i{|O_WF2~U@exYh9L%LiPVRlXIR9+C~T9x?w z6ogi7L&rx~^2&U6h{L2MZt(yWp z-}M0dEmdQTuc*K^{pGgHxP`@J(7%y#*vvrj$WhwB!O={`RN$Hk%$>;$0oqa~jwVi) zmiMJwj&CEj0L^1$Ln>Q4^Y$-i>fA=ftuA}hR4&!?SMruHng(k`{qMju8r=>f()7DF z6K|1F2q?RbvMs;_|8`An=xAejcCZY-mH?hE4OgVJ1)*o-ReNK_%UDvnh4k{^u{5^; zNr7(Dq3v8(9%sIGYIxbH!H1M6B`Ym4Tc`lryX{t4%i9xueF33p2n2kKBzsO?6i|fV zcL;V^5LZxTulURB?|PO8#SN@i3TY?f}Yi>Rj$h@gbs6#aI8fiKgU!+^2{QwyH*HTS(~ z5oMkDMH7C?Aq9Fxg(^;YHpn zy99yg!P!Z1w{76ayeoQ9nc_OF{ntIBa#ON%VlgChYZEfWrr z$$h=Ig3&()P1l+a{t6X{<7NezBbB&|^S}oPWo{P;4frKSS%@`7U2F1fMdRPqkWDW| zDy7>2(YM;{=G8oQheN{R)i+2iogibf5)MD)Cq+Mb50PF_mhmg|(zjNCaIAV`CGMV{&xN`V?)5l6ZD5n@@V-;;K$|T?WmLa&?C!(BA4-nr42-Rd+gp*!+4oIZ=)N%kMHm$ zB>%i8Rsz<-(Jga-kN(`pzG!qfovk(8BF-V%_cBvAKCyC5!mVtyuX`e-Wg^RXTzNb; zw)h^taR{ez4hb4nH^4W2ulBN`4JS$%~U*i0%?_XLpuP)p(cPor52_<9T8@3(Q<}TU9{XF(egC) z9%QI<YKTbz{}*g3^V6PgsE8@b1prVeXG?~we+~jV(Ko&)OMv_ zL^y~xbM9P(ao_M?tLLui7q)6Y!V&MqaXJ<344tTsvu^z%p_`K_{%>Eaj{0;{+5&cW z`irH@Ennz;o+x4M&RhJo;g?P|HP0q~DtC3OxA&_L)mHN`>GXQ1Vpd30<6A8e zySFch4AA9a&8?;p9_;0Q)OjA=ty%R*Rqi@73g`Znh~r5xX|k z+^n7ZZjC1f!8j@Bn_7Y7wCX~1^_noz8l{<4bZm{DlL62nfynIu+EN;@ z@blTVbz26XY&5LJod(stIR=K%8%PStfhiz~c{u zAk&AB&y{XZa+k(^A{}^69+mu}Z_O*GfLeKaO)`SZ@!BnsYsIW71 zZR=lO`8^6@c83MsFcDfKzTh--zJVtL(6(HZT!}BE;J?w_H^w-zQ|uLF2o>b^t(SfX zy#H>-qAwN!TOcY!xk3L4^m&SYU~!xbwc@3S_V`1l6n*H0uxR5hhRf^GrwFj6l8$Hy zG**0p=wp7eM>B`5H;T*H^5IkT#ZJ3?n;%i-?M`50a~Xaj#BJw2jEJsxh308E<&T6h zzn9XeYP|HNP&fPk=3ca#wSMZAHU|=8JJif}`ageG-aKJ#H4ol%*fQf{a7!v%Y4$(K zx*v)B!Ku8qU%NVfXV&mqX&i#!>k{nm-zoD+EjA|)PojFV4O+Kc-nS2y>#NKFSsPt0 z(CL?(oVzkUgIN_M_`#=|xlegC2brqVb+i&a9)bNivT;Rrt}Ww1IjCL^M7_lie}&r$ zzIJK&YbXuS6pwUVGwRoXK2RkdOj|ITkTM^KUr9pqUIfcvAw@~_6zVbECEcq^Fc`M< zlYSA90z;?ZpvO@Op3s$C>(o#tO*V5jZG|a8h1l5hQed3w?2yl^vUcKLtqjygUlIez z0ZqQCEox-IM8~4PwFF-kMYR0y6U@$J#0j$-^Q#CHJh;N&X z;_8eBD@0G7a5{K|0EDQ-)QGYu5`eNlo)jIUKYKW0v)|v~R#sMyq*sfTVs}Bz*lUvOw z%lsw+D0~^aQ;k&OKmnytBKyIBF1$nZOtI}1AV|uq!3FgRIBF0K|7LK=@rSR==!1FD z3__?hbfYrJ|8wY>hv1zS^2YtD_c)E$`#kwKz)2xw zx5;pSY2(HDKg2e;DtiaiNersI0e`O5K{PYdII`;VMRn#c}Z5 zHJ0Do^Rqk4z5X9+K-I%xVd%d67yQPcGF;Ml!O3D_G%d|#^!HS->vGq_2xL6PG06X7 zWD1JsRHF>JaQM@b9Fw`swMr=jjA1@JEZ<2(%;t>}HN9Y^zF;NLu7m-0f#ucOzVS80 zi=yKCm2qo0R4KMINH$Y&F^)S)7~~rPXgoG{&kw^Y%Gwd8XZ)al;z@7`;CzY7Sm7(B z$}=Wq(3uiyb}wQ2YS+cGv$96VuexX@3Fi%QKz?r9{8vBqbO!)58>RxMW!U(tOnI+* zG@6#?*TnA4?eAY3NSfcAkGgz}eyi1T^3VJ&)s!E_lCj_=`;>^naSz4TuCgpjl)23c z50QIw*xFlVeKA=`#4nQ-UN}dd7ktjXDiDNCTy`TClck#f3-&%(z7yXpxc`nR#fq^) zF96oj*GJoMhsUYqRaSI)nXdtV{Apm<)f;`1|GKJ622=|B!5Q_2Yga~mTE>C(+@j}J z2b=oVg2p* z3qu2jc-f+s-*j1j9&-a`WS#Ag7fZ=VXfB|(+KmkshahopiprW^(`9?~mR1>a^Sf)^ zH-7-l8Lg{>DE4abMn+qezBUvY@FeYw0~nr}5I&|-EBWyw&N^vuOyw(H_5w9VpX*ID zP>1UsY*>~Q5B_zel~a<4UO2m+83Tldk-Ob5AYAYhK6*!c6f`D7tauBdZAGQ|u+|!n z9`#N%6#Ma94h#L9{NR%?5=w0t-J67dNgoYk!xA)&YElL}tEYLD7dr>#?>>z;QfBW` zs*vN|_niFxU)jn*4imqyX}j1`#%XK{m#Wzu8J=%6>P0dmJZ(ir5MjM=UF_z<6XUtD z)or`1eAD#nLc0yxsVay(rB>vOWW~wG+DR&6xpef&V*EW4Y^zKw&tBo2%-X@s%!X59 zd*B1#i2LuBjc&1+5`(Y;344z*xk|#~xl132-`K-i$B^%)norjE_xq=gCPI@JI{)mS z_q$;iW%36B>#zbB8nA~ndy<3pHbGmiJ~4l&voUD*?`_3KS2>5EhtecD4J(d95<2{) zv21*v=$Xc3!Yfgh?^4{q6y_TL%7$%>*8xX>e_ly>`d?b1E-BqZ?~(aU0{e&uZS#q6ku0xRCs?TevNG{MYj-q|HZBr{cISnwMOU@Ty{rDXT;Kv+wyg|W zFSiv49a8YB4GHwD`S-UWfh)ET9TOg=7oM^I*%Snl42*6U z+8RA5O`%6P#yS1UhU-p-(v-v=-p>5A;3rGeF&eLSGS;GUVT%mROH%<{^7A-fy{qF3 zMi}7SVqe1dg@M6jsZjUss0)ZLy579WHAFJz$60SET1i&V5R?A1BQ`vGy^Xpt>B=Oi0JSGjah?jjDIw-^U zj7BFJqQz9yxZ_YFL{m0`XV)9ri53$tBf==6~$0lY@f#QmE{u@M=4YH0~qAgsY_sw!A zt)BvDpg}W%7sh66%DMuQsdoP0K0XZS$kfI=JoFm=irf_OxkuQaj-|fi6C3VlunPRI zJ%H7vf_!Eua5`ByKKFxYpSU&xuis-07ueDxUdu0y7@wF~+BoIM%Luw*)?V=KMOT*Z z$*`4-7|TVnUom0rSgIH%qjgbOAfIgej6ZSiqA=%s>2rdw{i9B=k|+gDo3%eP?uO(q z9vWhkC8@+u<15#@J{#gj!(R8IBZuqX-r4!f8%8+p=Kai}wyW;$h!$KH8zEjDG3`k|S(A z@m6Ai^7v`Oj=4dmj$SJRE<8-PAX!-ZSo>Bq`*WysUUq6Dwxzntgh W{vqoXvb{`%VS%WMs+AypQUKtQ7_{2@A^ytA-f zyyW%DPA2cs7N44ec1N>IZ84YpR%|Cht4rJbXlA!XA|{FUt}G+x@_Q*lg#Zf_&N@C~ zUFR~M%8ucW>kwGZH!Wk+cea&+==O%wrV!ntFa5GQ#bY{z6Z(Mn$s1!FwXN^%owY!W zA_;N6pvAuZblzr>-4|j7SVd69>7mCMCK97{qOj9>$;a~}BT_o&KMqZ#c~7ufE0&BZ z@yaB;KHvJ*dhxlZk|GUr7WB(N0G0LaoyqdLH`CkCY~v()Z46U6gb=E{?vd70UXw33 zgv*Qn+U?CD_;kB8Q=jQTDsnGF+JI7HjpR!h76&e5?TI;6I>^&>Gf?5AS@|Z?t8U!Z zc2kc|Z0P{;{&~%l8Z!XWsYY=D;Vz9!kK>|hR|nu$Q-8lbDyWvkH3w`jcSx`0iI%OH zu>_BLb!kZhB}I1_Z|?KhE9$DMB5sh1ry)nkBki0m@Z zi4c7T!7C3|p9}wK##PAvg(e&`6-=|2x(1_%xAnz#Zg<+jBuCiBxA*;i^RqGKSKldc zI%rOdNSuriATV9UFIO9o5m7y0-U(?jP@|)E=b`K#`(nf&6puU$md$*xPkJt@llSJ1 zOWh+kKn&$z!5Vw>;SsHKhrjY+xSqb^;?5v)?MQa$znKQh;A9bx;c7yqly)4~5%TFy z)Y!H*0C<=}=xh&JXlHYw)>@+T5Gz9VWy_G`d1}Y8UvThI{{fr$wD_4^v_9~8u&bCk zubHQJ9ZVemrCrtf>WInGcF^P4H7|VaT3@yrReO1-@ItuXos2pEGmTu>Xr&+8foR@+ zB^#}l@OFm3!t10!NYm~wI2)vW-Ff#6Wb;9{;I2C~)vzMGM8t># z7i|fjII(z`Fy7>BBU5x3nnj}grn;SnEGY1^9sWU<>54#VwIhG1U2@*=cZ!7MOF_|W zg4nN?luHF`0l1H-ogf=M9CO|v131PH1`6b3p@NLuv8eWll#W5==mCV<%J02tPm<~N zOd64Wt?|WVU;^y@n}BJ$uwU@}+ig7;}Dh1Ans=kWN+|An%?u&gOoi@Dm z$M3hy6_Gs2flA?*y54rxG0BTNBR5_|3z%HK({R*$Nf?2S_fO^Eugagi<> zA1K~VJD}F{O*RHzexb+Psn=S~VLl$XwqjtLIZU>Wj?v>*eWl)GTDU){)Cxa`rU5_` z(HGh-{xVMh3XiAfcV}(9T<=q3c5+CQ7~TK_eXfSVnD21qw>}Tpsqdm|;)pM%fgopE zI6Pz0RAQWQ)JB(mKZXV~^mXpz=zi;gJwnrQdANwn4#(bSfYuNWcI@)b%3J$Gt z{-GOstk;KNeH(>e4}*oXpP>*{hn}Jw^qxK4pSRzmNmwo)*;fUGl;K+&=d`tPq36`9uXX z(cLi%TLxB1Kl$$wuop}1PD*530&pkoW=wL%N}n)m`q7=(j)bWe z28cWc!xr(@{87`d1m0`bd8h;S3kd%~cd zn)bl%l{2NNNRJf=huhW?O|NkQgnD}@x|`*?Gia?F?yGbO&|;8xTnUB@&{f_VMZpsa zRkPUiHN-M%0|NrSqzG$1^Y8#1hu$B-a@FSMm&+x%;Lnlq-N_#Z8|Hl?Iz;ANSJAIpAWK*3&5ECy%u4GL+6YeRh|pWF~@8{b}DTH>}n zJU%*DdK!bl5aSQ}NpP;np@1s$wcR5DMEI`{wFAs;F5?fP@(g?W;+WSh?%5%}Km#D61=O*7Vi!U1sKm((#gZip zYBDi>l|vs06vu{f`MYqERc-UG&H_Au`T;%DHyIQrA@ zJSG_pu|jwtQ%b7-HD`h$R&%~&;;)Y#YX2|Acd{eoK5l9F;~lafzO<%qzW0f?D5{Id zZN`89%Zn7jV3>G<=694;ugIvS1qT1K+u z4;>-z;~ET%G~;r*TAvXLssQag`Rh*^MW;_=f8rvdZ1bMhjPVJ)uN$RhtUMIMr^a0Q zWVBpeo+Kp`5uAs%C!R&nlMo0JH{_8b11B;XsRoWh@(K!}$D7=?LBaots;l;hwGBRr zMRvjBdN=zs9z5WpZ9f|!_je~ukrvV-I1VTwdz+g;M(jZH#*etxrRlTd`fR-Xy;0c% z?DTAGx{QLVcB?3{GC$4lyzFGsPz zfc$V#XFh|@XH?VM+TSkrb+fqY#7J~59_C!tEnJviV!z-O?9Cl%ohmZ9D9G-z2)cxQYIaSZ2pMAw zenY5y%F(SkE^Q6h61gzV7a7ft?Qm~?(-h+Qy1=T%{6=;Q5=(Eh+)-q?n<+U-lOoZC zulIq+lpT2{XVgs0PITTWJx)ysuL*N^xS{m<(*Ds7l4WAMvqa;9z(L>m-r<5x)#NQ4 zyA9AkqoUI0pEQ{c6jZUTO={cI>|t^Z+R1nUoA+w`SDbL%F6pQJP9}UWt2wbUkBu4y zqOD2Hg{rB4cN+?+OocN~LtYj9_g!Dpzvw!XIQG8NMfX%;=C=0_%ne3t65&cr}do-UmVD~mbJ@2dI7icGbyCY<;aDKxpUFRz4zhEQ2mGmg! z5gGw7n8+C-M>JfB(K)}-GZDLXwB)tF)hCexe&eD};N341bJOVczW|bUHxv;@9K!Xe zy`1oQIO6?^>XG8dR8;>YJ@S+ito3LH6zdfiodRQ8L)rLhf;Id+w%WUwC=<>q$i}V$ zyX4%&1gBK-U&!&~J`+gItb*Ex3*e#tYXTTcX9OX!O6L-v8oP+e_0qTk_$B8|6rJtH zgJS4AxO2W3B6dtl!t>*gi6e9K+pu=Dj#wuIQdr8%2ttu85rcAP+0REj%^>-+C0>YF zJfKt5fX=6E2&{)TaaNL8Z6IW80(&^sCEqjE3Rsfik!{C6w%g^R#cI2r$)36hrf_O+vxuICe&q4_D>!Je{N&^A4e z%u_lrh=chQTSCGBFcg18sR7G91Y-5(k%hRco23$C=+q z8J2G3;v=8Jnone^e?V}c0mpymv6q@Yn^=Xf)JHhGWO@<=q4v`cIkK(CRdP&p^BQk z0Zi%j{lik4l;X}S;qG-4E@Wrtr*FA9J}Jc@W73B;uLXPfYra}5%H!u@QBnk&JtLS* zNQ-L1Xc2{bW=;^th735XOj`#y%$hRR_Z=w5WCiM8^XtcYK+8Ko68F>HQ zgPysQI8*0lfg;5|!@MOmpB&r581*$*6j5)@n+_K2eE-c9W>5Wo6#8%dxt<~4?)cFV zme*mduW!>G9TyXHvi{pU^Nws^;pl^MON+n3Jh~#u0)*v)wO_Ta37nsEdkE@geMS1J zrpHky>vhQ+bAG?eEwZjwZTPnA`+0jth=v4!@%V0V!Ox?p&=Y6po|fYc_2^mg1W1kt ztXQcmRf3#8=GBEBQRB`lWw4W(#r#^`X_z268qI-^aeMYzS(4_1WKJu! zc0euwFX|^bL>v<+B^S}2-43Yx^(R79C;6x8l;Oqj3h{f?iIBR$fDj)1;=%FG<~7_b zj-`GDb_Mdhz9^$8Yv3k5zfx?E*KZ$fqs+)=dHw{md!+yB^m>#_18oV5ozI&1-+%tE zo?o|%Q<31F^`6x)j9pQ{iik4HyjH5+>fibgxz$bC+1Sf}k`K#WV=5h)AL)$RT}u{b zgh8fXbGS;j!>LY~Yf0!hIxn*0ZLbs$Qr>XLZsFZy=Lfzi_pi*)R760U0%$h}&N{aV zj6AEESl!V~?Wb1Jnhr*`FXsF>MS@%>|nq9(IdrvZi3H-pRTEnk0 zcQIaghT&Ho?ebt0C`fU3d65XGblS8Xaf$6oNL@!LfNM@clF@S9s4ZXo;NN%w3fQN3a1FI&CrK#9D2aluB~7cC0{1 z@PVwvGswF{L7gf+8GU`1JL3i;qIPsDW)DI5O2diLrik2bRgRVq!)oT&Firm!r*EZO zWT&UIrv5k;-eXc6cl;|C3*EbQ;|l|oxU(OW${wDF)K~G&^dw;+&@LczFaxM6ue-vv zUNF|pLLd&+TO@Z9i5FWMH^k%TR+^vkuH3hfEHh3A8L*7H#g8V*bTQH|Hm)`N<=w?X zO&v--5mcyk=?zG$F_c1A{h|2J@f+2QndwT@AHNHwV zy~o@JKWp8@WIi0<8*WuS1zejG-skDP{5`h&58RTFKi62FEmn$}X%)ig8O(t`l==k0 zO=+0Ok=cgNWQ_l#QKJqD!9S9}^4OH*5UVPCgIh?G2-5;pGZawseJkd*es>4I@|*V{ zj2k?v*Grl@$9$2BxpzTk>)>erFKyz8VgTy~cN-A%JNP#Tt<>D9JbyGh8-U5U;ac8gVEV1gZl@8m)ig~sXvcH(_6jGQealnM zG!n@2ew!apSyBzd|MAf-ElJTw%>JCqg-m*{GDFTIyZj*n9Lj-3IuJjI^*Ael<`0yN zI3>B0Y}t24slo!Uzu*?N$=mRf%yj46(q74=DLV7v>8@{r+n=3` zAl}499AZ9|Z@STa_t@j8a`_kQ5hFogHC@db%3cjMwGF$a$>DV*@cw4s$4|?4HAa== zCDKM$U;og@;XJ%CygPDY5oi{GE~uPa-n5a;)E3DnSNp}+O?@@8zp(tQCg6B) z_^PuDToSk8g}KoFv@R-#D`impUPM?z%p<3XYQFVWs~O?XSFpLU^gPMCb zOUkZO9;FtN)608rGBW2w3kD;$pIpKB&}L^hB_*D@#3i=#6y{vP`$OK}>6L}Bhp{S7 zgZsckrjlC@cv6uPZ@_e8ow15)V8==|*V{m3_VOx1rg?HL!XKlDKV4TrTDR@TW5wskzMrK~kT> z0lOmT-~}ukrYWXdxE$1D>O4@$1!)B@ntTG?S`FEu-?K+1|B%;1WwH3%Dcc~OO0*lD zBCK8bWjk_~D1;Y)mlIw> zJS)$E1wv~UZHx0<&%lf zgU~8ggjQME%^E$9w!OzsOZ|fa7MFQs(=O&meg3;|-*5mbpNsZ6P6xpQN=t5&;Jce! zl9}-*Gc9}IubfJwfCRSewD@!i3cIr7hyEiV0@Gs(+qF{2C~*2+cUvipJ!PKRorfoJ z;~V#IopPSAa?itq#!0+2@}_6IW>&l`!mrh>yu4{xdFql&)?a=)6+I4Sl)IX`9lJ_S z&BDfU8$-pL0luwPo`hHM_ny-g<1smRGvi^cjiRq5^`GQvk9rfL$}J}D0M)FNt^4Rn z&6$X3rspUhN$z0UsyZat8@;34XF5wLTfTsZK7aV{-{-Ma`Sz?&OP!rlCkLTikwM#A zKS!_rIT|h;fQNNHeUxeU}ZUDM=^%YZz!Mhbk zwB28}!z_wOtl%MDVq*0js4y3W`0tm3;qNhieqQvtnm!AJ-nMDc%;gcROY^*? zxp{$U4Dvhfo){C#B#&+#BsddcHZ5a5s4GncdnWtiv$^jhf{&7C3ZFvUeW#X)~x`&wLnX%*C(B7ap_^8)T;m9f(cfpEwV zZ=rmSXJIhn1~=RW0zlWE;9SrJr$@64{;#4lk7xRi@VJ*i(?%((4Zyr26zI%T^pZELqem+B- zD_E${#JB+O88`6oHq%^HW=y#5EeyDQS5;ZA@GOso={aP+t{xkMd@#wl_2;NFKth`-GnvE~LjR zp^M2+3mo$yr%2%;fHO!by11NFPeKLhXIRl7U69?|^y!?uKv% z!*t*+T2O-dSKa=k>ScOvDBY3=Uv_*1qz9}d;A3@-Xd$zzv0ow zcg9O}R5CM`ey_}LY|$6M|Vsyfmp!t>Z-50{J!1q?dBy)hRhH8kbT>S zR+xl!s-Us#g94gzSv41hy(JM7$IiK@;@nvlY6a5OGbh< z1h&yUy4ZJ^(V8~n05O1(#>YANu+mD=pRH;&JRuSkMM3?qv=xsp(NC zTn%!%bMJn)m9Tb~3kwMiwee)5HJOYUAGb^gPQNOW4YJ^`Q9B7MmKlU}&0N#2uD3Ke z^=N;cQET1-tJM%X-sRB)fU6qUt?o-kIx0z4ZIMy z4h^_q^IDgg!{TAF4FPd}>e$QeX6;>H)gQikL8ckNEr8EJ0Nqi~fM<1vC1r%2Ip-u$__{gaJZQ>xBJ=E0rgJy3!Y_T7au zGtYq2KB&9hZShOuMK4f)6S;qha3$(3vcaRQ_-*W*p+FQuf=fpmYKavjAV?4j3eqWR zZip9Nc3qRrC0mlBIyd~wX($kZ?h0PMqB`gQHhH97N#KjBP;3yHOk`|LO{wtg%qGX5 z1`%HR<@RwEMW1`JludsTK(h>j`c--x`WNR(Rpve!u#`~3V+TyUp9Ih zzzt`9QI8$dslp32wVrPh$JEH)9R9-Wk-D1YD|7&In9P?vBh@SgWb`M= zxMvbZwf(_PxKaf2uvl^0t|h4{CvgOX;9F^fSM?+tcKADz4K8wOM>Ihegs#)Re@S;O zO5+3PGH`tB4J2t%+NB~5%RGm;)N3=4hBeeTg9TG2KD!jwvq1$aiukP4jU;ayNkzSJ z6)Zee^qF9yL)Gu-|74aOi_nIAa(E8aeunIodZ%fapLz+eXiSU3;ddXOi-p5k;h$rF z#o3*|&;{4wcW}w^Aw9(9)RI*z>PeqfFQ2I@#kwKw`>D06pWK(@D_07ou8_cSp$?Fi zLZ}8K&`A9$Q>|K`{)tT)HzTW-#H6&thiJDhOYRy0j0=FC~)+xrYj4!AX2O&yW3YL(UD;%E>N~xp+8mypCi?C3% z8yEHPElOM0iSf4k4f0Czq2fsq5tW#cyBfjYrfuzW6r3DKTh-dU6FX2#y{u+psgv%? zW3UIB?P1aH#sncIP}CVoqg(uJ`6Avx>Mh67nd@QKgcEBS;6lRrpS!u|MD^RIXY)*^ zX2aj-yf&+z_7uq@AjjrQN=gb=A5&UXW0~x7q>n z;C6$o0`bKkj8FeA$vdu9est{5YAkRdx=JF_M`AB>Elgku*vq*hej``!s$CJr$JWr;NesS3|$=EKjNYGc@lx z=fE#J-iJBM-hN^(eTVMUD96)W2+L04-&Mr0xU;{i2|L@h(g>D1a8yL!a4nSM>KH1N zC6e=5@UObmObersvca=FHT0#xwsBbT6*DKqM9D10Y^j)Vvl$#T^x1KO&8 z3@_uM+6Gd*dOl$8_*xG$yU>tftOG=J=E8P>3csU7y_tSFI z_F9ny#KRcU@Yc0hhkH2}*eds#nAp4dZW7?EFfr*OBzpaNbag7$(1XCuZ124iVsNL2 zU3LAF4Lk5Tn017twCME$ihoC~7X3c~TXabf%H8V|U9?2Q(1>qMZ43YN|c z***n@vD%)`JBQQTTjpo>7ll!rj~QXcBT4pRQ@ zdg8->8sg=Z&X~CWVLRF-a`GP!52l2I_4^G8!*#R}vk{xOeOu@!i^b)3%7glD`g6po zDK8$=t@d+fe}8UQ{MtYMyEmRe%@8eCX&mRir;)IL5^hSrB_r2E8{Skb7gdJ*UYS_yvfS+n1b4RmL`byo@*$N+*2xU`>k(71%qmlt} zV`77PnuN%EflzgX^!sZ0CkyS)kWUSzLHi$-Nvbyy$>BV_!wp-3&DZ!J8GT*MFjeog z{Bjsr6$dw#{66I0)K3u_;Ct3#nb1IYq9E3<-y8V7RWI^>NbodXdh4a45yjFaY3^pV zSLEAqnuX@a<{Fu@&Fz8au1)vG3B?xp(EVtzd!+?o;!pi5YoB9s>fDg|famc^rik#6 ziC=bQHNX3~p}#^R=6JcLyC!}OY@N{;HH_I2nVGS(b8-k)6JA?uvvylh3}QepgbX+Trc8h#v}7{MRdPe z5luFq%^ttg$TeBQoPSxoLh%b%yU_KjK#>V%)05)r6nu zK=fe^4N}B6eE%g?kCw{FxGG7wU)ho;|5LW4{bYU@oIg!fD#&{&R8hq*GI3uAbSdY< z-JAE!pHz0WBi)pQ1GJU7IvX3u%eK1u7)Mo+hda|WW@EdbBAe)hy+6bxO)j9~mMQ3<#SD zpDIKJaNmh9D=p8u^z^CJQ?jM==N-$kS{jiielvAAn*pL_C@Vud7v7EGWFgE{5K5mp z%la4!hcM}WVW^Q~Q<-r!Qy$>9B6@v(brp@d%0A~?y&eN-{wPneIq!@VTU2wGtzL+} z#tkdX#AAcHWa> zP8H|1n1Jqw7=X71kShF;ip5lnCK$7TC8*HS%uNU4M1c1~@dT_P2Wu23H=$bpJT6Qd zQgjx;@pi#b?BILB;B66h}aWY{UL|5T*kS?XMk!z?&+!!ZnPgqrW)ZfP8{sKx$zX zsNLvgldh;&fxi_09SBZ!Opv4UXAGFh2#K`9XRu@-M(s^ zf;kEfI&PZ4w`Nq3le-W5A|z%1_DyUa>{;ga(zf)ztdCChp@98i@zk6 zZBYV`w`d>F#epZM+Qfi`U9)9Lhd=4haXtrIl#&ekn3YOTTD>s^<>oP^IGow#`>%M< z(o$$ZgC;)Pq=@1h-U{_y-Xr0}vt)~3)^=`7aCdrqJ>FaSXQrMqLLAsErCoC+Fy*i1 zaNoftiRd|Gse|J$;g6lHT5gI9cUc%)V!%BIoEIpq%G;`bVvl$uQ~^ zpT0oE`~8|}cd%t8x}_d*e7)$XWWPDSIlg;<@ya~?LEG_Bv&Qj!H;VB7c?3+-M;9uU zi0f9x*-dfrfc);kXN;}~%0!de5u1|N(q$)k`EK}3)rSw7c3~d+HK6TfGw{P0b=Ot> z6Z)lyK?aJ9xSu$YQtf?Z>aZ&E_}QR+D-Z=l;h)PUewYhMH{O&rPLX{Ae|N6TV)|6> z8oQPzM_6cBO&p^&DU5_Lk9I5CcZ!Np)IKTAZSggYu9z@WIS%_k9q^l?x!0&Y^Ssn! zerj)On$7B9ktcFHJQ7VjVKx$Ear>Nwg~2DJcCG7gN9pXNN)XwO^ z@Z^aVah=*0Dbv2CgXXo?Mcc4P?}s)a?H`&oTw3Qt$$oT>eq+)k=Zf3Fs;tkZ!vd;h2?}iAtO{4f?0j<|b=~&l_`Y~2G z9Iw=Fa}(l#OzeO-yiQXlX|i6>Y}>*z-%E;X=Zm||cOKkl0j!*v{0sA0Z|MWTn82b@ zE|r$pCwlT}0ZPbJXYkYUu@Y-D4J;VY2 zdH$V>{7?9lu#%I(b2!I`3YsOL^57bBZQIOP>}?VPf`$O8h}fE zK3iPa-01@qTMx%q&)G^l63js5LQZum9}tDKyj)SIJUJF#gomi5OYq_m7(YV(NND5K zsN-W`dFr?M*8 zeBo(osyL!YwzagW=Y<_3Uvb@LD<9RDcAPA}dKTsZWdm_2(?g9nB!*FNZhE2&`sScQ zRrzw+qML=qUCFgP6$e3E+e#hK68_Hh(M$ki_s_us_A>o8x=-@!n)O_DgUrqpU7_s= z?c%p}bVzUBv0NTl^rM((2j#Uzusvt~g@GfrC}GM(BdE6JP3hp-dSbV8QPGR=DQ+!T zylaPzTyk1^OJ%RZYkol9tmpr-3-#>>1_}%-Wu_d zg6%SU^?B}g%Fl_t&A7j2|LaMwp7U+6wFZWJn5&<&m@CF8H_*{w9J-jG7MVpI!CessI2%9G`yu8R;J=TgBl--DO7Tr4=*q{2~O&!i10 zoG_;!6?msV+SRi&;roHBWkKWhbmC~R;;<_Y!FEs>!9FD%$#m|s*%B5#lyk5f%(qJ<6?*6!kK zp4@QAb=-Y$znr!|A5uHf+5&@gAX-|RR(LuTB;6^Pr~m{}f@3-H`;2o;kP<6TbC z{h|s0y6vi*yiXU0cGQ@8Ye?;F)IZ=w&6`X&uyc|?-R?b#V8a@+Xm|rxS3}fwV99_$2LN`nhHWmAguJ2fF}rIQ1^3|=)eW1)5mcS)&Gf93^(M#OzYtm8|76Xn`ugHH0VwRh=X?qlbB&Cq z5iHwzqARI9ot%Z=Bw;$|@e#iyw4m}o{Ssa+h+(eekG!FHWP#uis7*iBxz7l99wia}ke|D$$` zD`}LYU^QPjAD{dn`Da;d_m}OZB3fMKA_-NPw01&&Wx;zLwD6(UtsIT5E%2evi3eb= zz!h!JWz6t8-i}m1q_$;j@rEeo^jlIh5_hi{*NPkF_;syF?ZLS_CLY{Ry8g>Fpju?| zQnKFVz>YNN`xGn}3JKnVyRotkedWmoB-CCM|^oVFDL71RCE<#@Cx;wdV0vdto)Cp_l4`yEDhDev{e5XPw6p%0ikecoP&^k zr2-gYZE1nYL6fhMLAO^_#^KStYo}_E;UnRr-$7m+V#asZ_QzbOx#{!XX!B|Dp3oT| zIP}vC>O;Fmy=$s>W?I^|I`NzeY6v999jtkp@q4pD)@C$^z+xQM&~HmV4g<(IP`4x0p!p6!BeX(4Fqm{4QtIb zAs!x$C{927zr%|@ci)A0g}bmTtcWm0-zNMejvG(cGj@OZhUZc-{QAO5#k;TuSSe zkfuianLL)nFE`IHNxfcGRbfL>L)BrpL(z3vMayUVvVc~~rHpel)RQStq#9^pamTG9 zK>u>Ws~7;aPmE#K##bW^AFB3rieoxO#bI!Kb$uXL!n2&mSOWmA>-{5JUz*Kvr2Fw& z2<#$`Ab2(P;>6xDTNWv^?V17z1;;qcGGOpr>Q%@Kb8MZqs~$&wYEpc$A4yQ|wWI8| zRBYS8Udv~6;p1Bmm)Qx`&%X=0fttV8-f7R~+YedmL?RNmMa^&(t_30CH*4g_XDCn<{m`l_e7yi5^%)@um350|#^-D_nMf8DCQI z4_OSX6F^BXc{X0|eTw|K&!ms%v`d!&k6QN!o$*7814(1phY7E!ym)}BGs{DU&F&yS z0n7lCzSI+)dvdI2bzHI;99pTaZWW-$F!s}Emq5Xf)on1>d0-%Q_{Uy@=TP&lX=Y?d z0Pzw?_?GD_uB4!>^Xyq2zMP1h_zi|WkR%Tk7*2ckhW@RmNtO`Y!mdg0!{w(7ZGi*NCir=#qF^LL9Te=&5^o5W+Y(pi5(FXf!v& ze^0avBtfAuVPOa9MNhD$w0#7oasiV{?OS37{_fROtUd%AakY3ifufh<7)V#Z-H)Ei z;Zx_`YQezAPrFc!R(FhIjEaY|*(?&I$u zaJ%_-mU!yd#gznz>aS#Q8|VU$Sl+H(mN>ry>l`BC(1EACXH)uIjq85bgtan}1@ff3 zMQTlA=Kw)=7EXNhwZ~yypmV6@wW+Y9h&niH`(1G^_7I__(e@jh_!$XFQq(xZ^LP#^ zi=cW0^=mtIzEPzy6Wj4+B{+C5P=w7P`$q?}?sbDV$e&bujWfmydR6iAHy0bny;8!i z$MuuUM{eu&W9pzrkjG8Lze{iV4_DrgJX}X) z4W842O0FpOTS6Z<586l7Tc_|DZ6>aab{XD3SepN8;97pBSJ8GXCU~NyMVK2vs+J$ihbGdIG>% z!)No1ifuQWTn|JE9a8Jz9{pQeGJ_Y9ucEZ1%`MWy6-?r_FB>C?reIijecHErE<4C0 zXlE&LjO45i4B4>2!7d;di#Eq6-OGBAbiOgi+v!~ou6zt<#39CbSfUWgID#}I6Pe}b zKoE8`fxVmvJF5o=xIq)lrSv!l{_dJ`VuMGw_tOU4>VOvhg14bGfwySJU%mO`hu)O1 z#@P#|lImQLi}-gV-%*J`9E8wUxbZX=I?RUXC8s-QWSk4hJxz_`@35lnw#qS+m1`#pS!o6yd9{ zf>NhR)mKF|fn0H!untu6^$y48ork(%cW`k30;+JK!<=d~@=`zJ6aWz;sa2q&8t9+9Mi=%)j^8)6 zGLb#=?AO|6tZ@b6cJgHKhC?jwNwM-}A~>dwbiI8zHNc<*M#50eBP`HI>Y!yj*zXYs zQ}EvdJr+pz)taU3pbkvYV@g&~xWc8YmRt0dfQx(bd2$rQNDiuAspiVM)t7I+A6kML zWZvgt+!Rz_ZI2bslK+f1Aclt%3L#dKU_B2Cj1Z1j)zxemn4XQ8RP!XK_tj3Y<8~=O zPiV>(1&67j>E(eow4MhkOR9xN-ZP0zC~u!?^5Tyr$0^3XC`YYg*5{c}rLxjchVK^& zihH*8(GK_NgT1{y%Akg`bzoq_I}#hj4=KtXEvgG?Fz&u&r=D|~eX3!>*2uY*XBIqp z9?+(ztKT@)f;HY;3I7~nB2EpkxMiBLwDNwo(O9W&+8^yhvU0Ad$3lAFh}?qzzXD#Y{XQ@qs47}3nZ;*i{P0e2PkvZ&+BV63ORAM^IU$PWyeD~WySXYIU@AmwzHF<+N@}mmR{&>x;~op zSTq?tkz9IN=Qc&9V<0T$i7D&!{aJgvSxuwnR4jutv`p{NvrY5>iq3OSd39U<7ll!vf}I~ z@_4C7gpCJ*MgzQP&#)0>rTUe~YpPk%)`KjCf1D{R{Q|dDPSuik=Un7~_03J0gn}l| zY!;XUq*~-}JBXaQF;!Kc4DHW6$9fxQe%FHJk}ocf8mK7+Q&T!-=3~s-yzkPlplh(v zJlQ;{!@FVI;H?DC6+J-tNgHo2K%LMskUovQLq)(L+Q6^O3hzbbel&Oo=mPnc&V5%Q bzm8AbuVoE9qaMlyfL|Cr6WvPfd(r;^ifYg6 literal 0 HcmV?d00001 diff --git a/ldm/modules/image_degradation/utils_image.py b/ldm/modules/image_degradation/utils_image.py new file mode 100644 index 00000000..0175f155 --- /dev/null +++ b/ldm/modules/image_degradation/utils_image.py @@ -0,0 +1,916 @@ +import os +import math +import random +import numpy as np +import torch +import cv2 +from torchvision.utils import make_grid +from datetime import datetime +#import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py + + +os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" + + +''' +# -------------------------------------------- +# Kai Zhang (github: https://github.com/cszn) +# 03/Mar/2019 +# -------------------------------------------- +# https://github.com/twhui/SRGAN-pyTorch +# https://github.com/xinntao/BasicSR +# -------------------------------------------- +''' + + +IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif'] + + +def is_image_file(filename): + return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) + + +def get_timestamp(): + return datetime.now().strftime('%y%m%d-%H%M%S') + + +def imshow(x, title=None, cbar=False, figsize=None): + plt.figure(figsize=figsize) + plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray') + if title: + plt.title(title) + if cbar: + plt.colorbar() + plt.show() + + +def surf(Z, cmap='rainbow', figsize=None): + plt.figure(figsize=figsize) + ax3 = plt.axes(projection='3d') + + w, h = Z.shape[:2] + xx = np.arange(0,w,1) + yy = np.arange(0,h,1) + X, Y = np.meshgrid(xx, yy) + ax3.plot_surface(X,Y,Z,cmap=cmap) + #ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap) + plt.show() + + +''' +# -------------------------------------------- +# get image pathes +# -------------------------------------------- +''' + + +def get_image_paths(dataroot): + paths = None # return None if dataroot is None + if dataroot is not None: + paths = sorted(_get_paths_from_images(dataroot)) + return paths + + +def _get_paths_from_images(path): + assert os.path.isdir(path), '{:s} is not a valid directory'.format(path) + images = [] + for dirpath, _, fnames in sorted(os.walk(path)): + for fname in sorted(fnames): + if is_image_file(fname): + img_path = os.path.join(dirpath, fname) + images.append(img_path) + assert images, '{:s} has no valid image file'.format(path) + return images + + +''' +# -------------------------------------------- +# split large images into small images +# -------------------------------------------- +''' + + +def patches_from_image(img, p_size=512, p_overlap=64, p_max=800): + w, h = img.shape[:2] + patches = [] + if w > p_max and h > p_max: + w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int)) + h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int)) + w1.append(w-p_size) + h1.append(h-p_size) +# print(w1) +# print(h1) + for i in w1: + for j in h1: + patches.append(img[i:i+p_size, j:j+p_size,:]) + else: + patches.append(img) + + return patches + + +def imssave(imgs, img_path): + """ + imgs: list, N images of size WxHxC + """ + img_name, ext = os.path.splitext(os.path.basename(img_path)) + + for i, img in enumerate(imgs): + if img.ndim == 3: + img = img[:, :, [2, 1, 0]] + new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png') + cv2.imwrite(new_path, img) + + +def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000): + """ + split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size), + and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max) + will be splitted. + Args: + original_dataroot: + taget_dataroot: + p_size: size of small images + p_overlap: patch size in training is a good choice + p_max: images with smaller size than (p_max)x(p_max) keep unchanged. + """ + paths = get_image_paths(original_dataroot) + for img_path in paths: + # img_name, ext = os.path.splitext(os.path.basename(img_path)) + img = imread_uint(img_path, n_channels=n_channels) + patches = patches_from_image(img, p_size, p_overlap, p_max) + imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path))) + #if original_dataroot == taget_dataroot: + #del img_path + +''' +# -------------------------------------------- +# makedir +# -------------------------------------------- +''' + + +def mkdir(path): + if not os.path.exists(path): + os.makedirs(path) + + +def mkdirs(paths): + if isinstance(paths, str): + mkdir(paths) + else: + for path in paths: + mkdir(path) + + +def mkdir_and_rename(path): + if os.path.exists(path): + new_name = path + '_archived_' + get_timestamp() + print('Path already exists. Rename it to [{:s}]'.format(new_name)) + os.rename(path, new_name) + os.makedirs(path) + + +''' +# -------------------------------------------- +# read image from path +# opencv is fast, but read BGR numpy image +# -------------------------------------------- +''' + + +# -------------------------------------------- +# get uint8 image of size HxWxn_channles (RGB) +# -------------------------------------------- +def imread_uint(path, n_channels=3): + # input: path + # output: HxWx3(RGB or GGG), or HxWx1 (G) + if n_channels == 1: + img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE + img = np.expand_dims(img, axis=2) # HxWx1 + elif n_channels == 3: + img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G + if img.ndim == 2: + img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG + else: + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB + return img + + +# -------------------------------------------- +# matlab's imwrite +# -------------------------------------------- +def imsave(img, img_path): + img = np.squeeze(img) + if img.ndim == 3: + img = img[:, :, [2, 1, 0]] + cv2.imwrite(img_path, img) + +def imwrite(img, img_path): + img = np.squeeze(img) + if img.ndim == 3: + img = img[:, :, [2, 1, 0]] + cv2.imwrite(img_path, img) + + + +# -------------------------------------------- +# get single image of size HxWxn_channles (BGR) +# -------------------------------------------- +def read_img(path): + # read image by cv2 + # return: Numpy float32, HWC, BGR, [0,1] + img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE + img = img.astype(np.float32) / 255. + if img.ndim == 2: + img = np.expand_dims(img, axis=2) + # some images have 4 channels + if img.shape[2] > 3: + img = img[:, :, :3] + return img + + +''' +# -------------------------------------------- +# image format conversion +# -------------------------------------------- +# numpy(single) <---> numpy(unit) +# numpy(single) <---> tensor +# numpy(unit) <---> tensor +# -------------------------------------------- +''' + + +# -------------------------------------------- +# numpy(single) [0, 1] <---> numpy(unit) +# -------------------------------------------- + + +def uint2single(img): + + return np.float32(img/255.) + + +def single2uint(img): + + return np.uint8((img.clip(0, 1)*255.).round()) + + +def uint162single(img): + + return np.float32(img/65535.) + + +def single2uint16(img): + + return np.uint16((img.clip(0, 1)*65535.).round()) + + +# -------------------------------------------- +# numpy(unit) (HxWxC or HxW) <---> tensor +# -------------------------------------------- + + +# convert uint to 4-dimensional torch tensor +def uint2tensor4(img): + if img.ndim == 2: + img = np.expand_dims(img, axis=2) + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0) + + +# convert uint to 3-dimensional torch tensor +def uint2tensor3(img): + if img.ndim == 2: + img = np.expand_dims(img, axis=2) + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.) + + +# convert 2/3/4-dimensional torch tensor to uint +def tensor2uint(img): + img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy() + if img.ndim == 3: + img = np.transpose(img, (1, 2, 0)) + return np.uint8((img*255.0).round()) + + +# -------------------------------------------- +# numpy(single) (HxWxC) <---> tensor +# -------------------------------------------- + + +# convert single (HxWxC) to 3-dimensional torch tensor +def single2tensor3(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float() + + +# convert single (HxWxC) to 4-dimensional torch tensor +def single2tensor4(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0) + + +# convert torch tensor to single +def tensor2single(img): + img = img.data.squeeze().float().cpu().numpy() + if img.ndim == 3: + img = np.transpose(img, (1, 2, 0)) + + return img + +# convert torch tensor to single +def tensor2single3(img): + img = img.data.squeeze().float().cpu().numpy() + if img.ndim == 3: + img = np.transpose(img, (1, 2, 0)) + elif img.ndim == 2: + img = np.expand_dims(img, axis=2) + return img + + +def single2tensor5(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0) + + +def single32tensor5(img): + return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0) + + +def single42tensor4(img): + return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float() + + +# from skimage.io import imread, imsave +def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): + ''' + Converts a torch Tensor into an image Numpy array of BGR channel order + Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order + Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) + ''' + tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp + tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1] + n_dim = tensor.dim() + if n_dim == 4: + n_img = len(tensor) + img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy() + img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR + elif n_dim == 3: + img_np = tensor.numpy() + img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR + elif n_dim == 2: + img_np = tensor.numpy() + else: + raise TypeError( + 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim)) + if out_type == np.uint8: + img_np = (img_np * 255.0).round() + # Important. Unlike matlab, numpy.unit8() WILL NOT round by default. + return img_np.astype(out_type) + + +''' +# -------------------------------------------- +# Augmentation, flipe and/or rotate +# -------------------------------------------- +# The following two are enough. +# (1) augmet_img: numpy image of WxHxC or WxH +# (2) augment_img_tensor4: tensor image 1xCxWxH +# -------------------------------------------- +''' + + +def augment_img(img, mode=0): + '''Kai Zhang (github: https://github.com/cszn) + ''' + if mode == 0: + return img + elif mode == 1: + return np.flipud(np.rot90(img)) + elif mode == 2: + return np.flipud(img) + elif mode == 3: + return np.rot90(img, k=3) + elif mode == 4: + return np.flipud(np.rot90(img, k=2)) + elif mode == 5: + return np.rot90(img) + elif mode == 6: + return np.rot90(img, k=2) + elif mode == 7: + return np.flipud(np.rot90(img, k=3)) + + +def augment_img_tensor4(img, mode=0): + '''Kai Zhang (github: https://github.com/cszn) + ''' + if mode == 0: + return img + elif mode == 1: + return img.rot90(1, [2, 3]).flip([2]) + elif mode == 2: + return img.flip([2]) + elif mode == 3: + return img.rot90(3, [2, 3]) + elif mode == 4: + return img.rot90(2, [2, 3]).flip([2]) + elif mode == 5: + return img.rot90(1, [2, 3]) + elif mode == 6: + return img.rot90(2, [2, 3]) + elif mode == 7: + return img.rot90(3, [2, 3]).flip([2]) + + +def augment_img_tensor(img, mode=0): + '''Kai Zhang (github: https://github.com/cszn) + ''' + img_size = img.size() + img_np = img.data.cpu().numpy() + if len(img_size) == 3: + img_np = np.transpose(img_np, (1, 2, 0)) + elif len(img_size) == 4: + img_np = np.transpose(img_np, (2, 3, 1, 0)) + img_np = augment_img(img_np, mode=mode) + img_tensor = torch.from_numpy(np.ascontiguousarray(img_np)) + if len(img_size) == 3: + img_tensor = img_tensor.permute(2, 0, 1) + elif len(img_size) == 4: + img_tensor = img_tensor.permute(3, 2, 0, 1) + + return img_tensor.type_as(img) + + +def augment_img_np3(img, mode=0): + if mode == 0: + return img + elif mode == 1: + return img.transpose(1, 0, 2) + elif mode == 2: + return img[::-1, :, :] + elif mode == 3: + img = img[::-1, :, :] + img = img.transpose(1, 0, 2) + return img + elif mode == 4: + return img[:, ::-1, :] + elif mode == 5: + img = img[:, ::-1, :] + img = img.transpose(1, 0, 2) + return img + elif mode == 6: + img = img[:, ::-1, :] + img = img[::-1, :, :] + return img + elif mode == 7: + img = img[:, ::-1, :] + img = img[::-1, :, :] + img = img.transpose(1, 0, 2) + return img + + +def augment_imgs(img_list, hflip=True, rot=True): + # horizontal flip OR rotate + hflip = hflip and random.random() < 0.5 + vflip = rot and random.random() < 0.5 + rot90 = rot and random.random() < 0.5 + + def _augment(img): + if hflip: + img = img[:, ::-1, :] + if vflip: + img = img[::-1, :, :] + if rot90: + img = img.transpose(1, 0, 2) + return img + + return [_augment(img) for img in img_list] + + +''' +# -------------------------------------------- +# modcrop and shave +# -------------------------------------------- +''' + + +def modcrop(img_in, scale): + # img_in: Numpy, HWC or HW + img = np.copy(img_in) + if img.ndim == 2: + H, W = img.shape + H_r, W_r = H % scale, W % scale + img = img[:H - H_r, :W - W_r] + elif img.ndim == 3: + H, W, C = img.shape + H_r, W_r = H % scale, W % scale + img = img[:H - H_r, :W - W_r, :] + else: + raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim)) + return img + + +def shave(img_in, border=0): + # img_in: Numpy, HWC or HW + img = np.copy(img_in) + h, w = img.shape[:2] + img = img[border:h-border, border:w-border] + return img + + +''' +# -------------------------------------------- +# image processing process on numpy image +# channel_convert(in_c, tar_type, img_list): +# rgb2ycbcr(img, only_y=True): +# bgr2ycbcr(img, only_y=True): +# ycbcr2rgb(img): +# -------------------------------------------- +''' + + +def rgb2ycbcr(img, only_y=True): + '''same as matlab rgb2ycbcr + only_y: only return Y channel + Input: + uint8, [0, 255] + float, [0, 1] + ''' + in_img_type = img.dtype + img.astype(np.float32) + if in_img_type != np.uint8: + img *= 255. + # convert + if only_y: + rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0 + else: + rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], + [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128] + if in_img_type == np.uint8: + rlt = rlt.round() + else: + rlt /= 255. + return rlt.astype(in_img_type) + + +def ycbcr2rgb(img): + '''same as matlab ycbcr2rgb + Input: + uint8, [0, 255] + float, [0, 1] + ''' + in_img_type = img.dtype + img.astype(np.float32) + if in_img_type != np.uint8: + img *= 255. + # convert + rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071], + [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836] + if in_img_type == np.uint8: + rlt = rlt.round() + else: + rlt /= 255. + return rlt.astype(in_img_type) + + +def bgr2ycbcr(img, only_y=True): + '''bgr version of rgb2ycbcr + only_y: only return Y channel + Input: + uint8, [0, 255] + float, [0, 1] + ''' + in_img_type = img.dtype + img.astype(np.float32) + if in_img_type != np.uint8: + img *= 255. + # convert + if only_y: + rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0 + else: + rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], + [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128] + if in_img_type == np.uint8: + rlt = rlt.round() + else: + rlt /= 255. + return rlt.astype(in_img_type) + + +def channel_convert(in_c, tar_type, img_list): + # conversion among BGR, gray and y + if in_c == 3 and tar_type == 'gray': # BGR to gray + gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list] + return [np.expand_dims(img, axis=2) for img in gray_list] + elif in_c == 3 and tar_type == 'y': # BGR to y + y_list = [bgr2ycbcr(img, only_y=True) for img in img_list] + return [np.expand_dims(img, axis=2) for img in y_list] + elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR + return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list] + else: + return img_list + + +''' +# -------------------------------------------- +# metric, PSNR and SSIM +# -------------------------------------------- +''' + + +# -------------------------------------------- +# PSNR +# -------------------------------------------- +def calculate_psnr(img1, img2, border=0): + # img1 and img2 have range [0, 255] + #img1 = img1.squeeze() + #img2 = img2.squeeze() + if not img1.shape == img2.shape: + raise ValueError('Input images must have the same dimensions.') + h, w = img1.shape[:2] + img1 = img1[border:h-border, border:w-border] + img2 = img2[border:h-border, border:w-border] + + img1 = img1.astype(np.float64) + img2 = img2.astype(np.float64) + mse = np.mean((img1 - img2)**2) + if mse == 0: + return float('inf') + return 20 * math.log10(255.0 / math.sqrt(mse)) + + +# -------------------------------------------- +# SSIM +# -------------------------------------------- +def calculate_ssim(img1, img2, border=0): + '''calculate SSIM + the same outputs as MATLAB's + img1, img2: [0, 255] + ''' + #img1 = img1.squeeze() + #img2 = img2.squeeze() + if not img1.shape == img2.shape: + raise ValueError('Input images must have the same dimensions.') + h, w = img1.shape[:2] + img1 = img1[border:h-border, border:w-border] + img2 = img2[border:h-border, border:w-border] + + if img1.ndim == 2: + return ssim(img1, img2) + elif img1.ndim == 3: + if img1.shape[2] == 3: + ssims = [] + for i in range(3): + ssims.append(ssim(img1[:,:,i], img2[:,:,i])) + return np.array(ssims).mean() + elif img1.shape[2] == 1: + return ssim(np.squeeze(img1), np.squeeze(img2)) + else: + raise ValueError('Wrong input image dimensions.') + + +def ssim(img1, img2): + C1 = (0.01 * 255)**2 + C2 = (0.03 * 255)**2 + + img1 = img1.astype(np.float64) + img2 = img2.astype(np.float64) + kernel = cv2.getGaussianKernel(11, 1.5) + window = np.outer(kernel, kernel.transpose()) + + mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid + mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] + mu1_sq = mu1**2 + mu2_sq = mu2**2 + mu1_mu2 = mu1 * mu2 + sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq + sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq + sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 + + ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * + (sigma1_sq + sigma2_sq + C2)) + return ssim_map.mean() + + +''' +# -------------------------------------------- +# matlab's bicubic imresize (numpy and torch) [0, 1] +# -------------------------------------------- +''' + + +# matlab 'imresize' function, now only support 'bicubic' +def cubic(x): + absx = torch.abs(x) + absx2 = absx**2 + absx3 = absx**3 + return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \ + (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx)) + + +def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing): + if (scale < 1) and (antialiasing): + # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width + kernel_width = kernel_width / scale + + # Output-space coordinates + x = torch.linspace(1, out_length, out_length) + + # Input-space coordinates. Calculate the inverse mapping such that 0.5 + # in output space maps to 0.5 in input space, and 0.5+scale in output + # space maps to 1.5 in input space. + u = x / scale + 0.5 * (1 - 1 / scale) + + # What is the left-most pixel that can be involved in the computation? + left = torch.floor(u - kernel_width / 2) + + # What is the maximum number of pixels that can be involved in the + # computation? Note: it's OK to use an extra pixel here; if the + # corresponding weights are all zero, it will be eliminated at the end + # of this function. + P = math.ceil(kernel_width) + 2 + + # The indices of the input pixels involved in computing the k-th output + # pixel are in row k of the indices matrix. + indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view( + 1, P).expand(out_length, P) + + # The weights used to compute the k-th output pixel are in row k of the + # weights matrix. + distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices + # apply cubic kernel + if (scale < 1) and (antialiasing): + weights = scale * cubic(distance_to_center * scale) + else: + weights = cubic(distance_to_center) + # Normalize the weights matrix so that each row sums to 1. + weights_sum = torch.sum(weights, 1).view(out_length, 1) + weights = weights / weights_sum.expand(out_length, P) + + # If a column in weights is all zero, get rid of it. only consider the first and last column. + weights_zero_tmp = torch.sum((weights == 0), 0) + if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6): + indices = indices.narrow(1, 1, P - 2) + weights = weights.narrow(1, 1, P - 2) + if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6): + indices = indices.narrow(1, 0, P - 2) + weights = weights.narrow(1, 0, P - 2) + weights = weights.contiguous() + indices = indices.contiguous() + sym_len_s = -indices.min() + 1 + sym_len_e = indices.max() - in_length + indices = indices + sym_len_s - 1 + return weights, indices, int(sym_len_s), int(sym_len_e) + + +# -------------------------------------------- +# imresize for tensor image [0, 1] +# -------------------------------------------- +def imresize(img, scale, antialiasing=True): + # Now the scale should be the same for H and W + # input: img: pytorch tensor, CHW or HW [0,1] + # output: CHW or HW [0,1] w/o round + need_squeeze = True if img.dim() == 2 else False + if need_squeeze: + img.unsqueeze_(0) + in_C, in_H, in_W = img.size() + out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) + kernel_width = 4 + kernel = 'cubic' + + # Return the desired dimension order for performing the resize. The + # strategy is to perform the resize first along the dimension with the + # smallest scale factor. + # Now we do not support this. + + # get weights and indices + weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( + in_H, out_H, scale, kernel, kernel_width, antialiasing) + weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( + in_W, out_W, scale, kernel, kernel_width, antialiasing) + # process H dimension + # symmetric copying + img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W) + img_aug.narrow(1, sym_len_Hs, in_H).copy_(img) + + sym_patch = img[:, :sym_len_Hs, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv) + + sym_patch = img[:, -sym_len_He:, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) + + out_1 = torch.FloatTensor(in_C, out_H, in_W) + kernel_width = weights_H.size(1) + for i in range(out_H): + idx = int(indices_H[i][0]) + for j in range(out_C): + out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i]) + + # process W dimension + # symmetric copying + out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We) + out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1) + + sym_patch = out_1[:, :, :sym_len_Ws] + inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(2, inv_idx) + out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv) + + sym_patch = out_1[:, :, -sym_len_We:] + inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(2, inv_idx) + out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) + + out_2 = torch.FloatTensor(in_C, out_H, out_W) + kernel_width = weights_W.size(1) + for i in range(out_W): + idx = int(indices_W[i][0]) + for j in range(out_C): + out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i]) + if need_squeeze: + out_2.squeeze_() + return out_2 + + +# -------------------------------------------- +# imresize for numpy image [0, 1] +# -------------------------------------------- +def imresize_np(img, scale, antialiasing=True): + # Now the scale should be the same for H and W + # input: img: Numpy, HWC or HW [0,1] + # output: HWC or HW [0,1] w/o round + img = torch.from_numpy(img) + need_squeeze = True if img.dim() == 2 else False + if need_squeeze: + img.unsqueeze_(2) + + in_H, in_W, in_C = img.size() + out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) + kernel_width = 4 + kernel = 'cubic' + + # Return the desired dimension order for performing the resize. The + # strategy is to perform the resize first along the dimension with the + # smallest scale factor. + # Now we do not support this. + + # get weights and indices + weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( + in_H, out_H, scale, kernel, kernel_width, antialiasing) + weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( + in_W, out_W, scale, kernel, kernel_width, antialiasing) + # process H dimension + # symmetric copying + img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C) + img_aug.narrow(0, sym_len_Hs, in_H).copy_(img) + + sym_patch = img[:sym_len_Hs, :, :] + inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(0, inv_idx) + img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv) + + sym_patch = img[-sym_len_He:, :, :] + inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(0, inv_idx) + img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) + + out_1 = torch.FloatTensor(out_H, in_W, in_C) + kernel_width = weights_H.size(1) + for i in range(out_H): + idx = int(indices_H[i][0]) + for j in range(out_C): + out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i]) + + # process W dimension + # symmetric copying + out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C) + out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1) + + sym_patch = out_1[:, :sym_len_Ws, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv) + + sym_patch = out_1[:, -sym_len_We:, :] + inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() + sym_patch_inv = sym_patch.index_select(1, inv_idx) + out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) + + out_2 = torch.FloatTensor(out_H, out_W, in_C) + kernel_width = weights_W.size(1) + for i in range(out_W): + idx = int(indices_W[i][0]) + for j in range(out_C): + out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i]) + if need_squeeze: + out_2.squeeze_() + + return out_2.numpy() + + +if __name__ == '__main__': + print('---') +# img = imread_uint('test.bmp', 3) +# img = uint2single(img) +# img_bicubic = imresize_np(img, 1/4) \ No newline at end of file diff --git a/ldm/modules/losses/__init__.py b/ldm/modules/losses/__init__.py new file mode 100644 index 00000000..876d7c5b --- /dev/null +++ b/ldm/modules/losses/__init__.py @@ -0,0 +1 @@ +from ldm.modules.losses.contperceptual import LPIPSWithDiscriminator \ No newline at end of file diff --git a/ldm/modules/losses/contperceptual.py b/ldm/modules/losses/contperceptual.py new file mode 100644 index 00000000..672c1e32 --- /dev/null +++ b/ldm/modules/losses/contperceptual.py @@ -0,0 +1,111 @@ +import torch +import torch.nn as nn + +from taming.modules.losses.vqperceptual import * # TODO: taming dependency yes/no? + + +class LPIPSWithDiscriminator(nn.Module): + def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0, + disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, + perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, + disc_loss="hinge"): + + super().__init__() + assert disc_loss in ["hinge", "vanilla"] + self.kl_weight = kl_weight + self.pixel_weight = pixelloss_weight + self.perceptual_loss = LPIPS().eval() + self.perceptual_weight = perceptual_weight + # output log variance + self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init) + + self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, + n_layers=disc_num_layers, + use_actnorm=use_actnorm + ).apply(weights_init) + self.discriminator_iter_start = disc_start + self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss + self.disc_factor = disc_factor + self.discriminator_weight = disc_weight + self.disc_conditional = disc_conditional + + def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): + if last_layer is not None: + nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] + else: + nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] + + d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) + d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() + d_weight = d_weight * self.discriminator_weight + return d_weight + + def forward(self, inputs, reconstructions, posteriors, optimizer_idx, + global_step, last_layer=None, cond=None, split="train", + weights=None): + rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) + if self.perceptual_weight > 0: + p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) + rec_loss = rec_loss + self.perceptual_weight * p_loss + + nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar + weighted_nll_loss = nll_loss + if weights is not None: + weighted_nll_loss = weights*nll_loss + weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0] + nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] + kl_loss = posteriors.kl() + kl_loss = torch.sum(kl_loss) / kl_loss.shape[0] + + # now the GAN part + if optimizer_idx == 0: + # generator update + if cond is None: + assert not self.disc_conditional + logits_fake = self.discriminator(reconstructions.contiguous()) + else: + assert self.disc_conditional + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) + g_loss = -torch.mean(logits_fake) + + if self.disc_factor > 0.0: + try: + d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) + except RuntimeError: + assert not self.training + d_weight = torch.tensor(0.0) + else: + d_weight = torch.tensor(0.0) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss + + log = {"{}/total_loss".format(split): loss.clone().detach().mean(), "{}/logvar".format(split): self.logvar.detach(), + "{}/kl_loss".format(split): kl_loss.detach().mean(), "{}/nll_loss".format(split): nll_loss.detach().mean(), + "{}/rec_loss".format(split): rec_loss.detach().mean(), + "{}/d_weight".format(split): d_weight.detach(), + "{}/disc_factor".format(split): torch.tensor(disc_factor), + "{}/g_loss".format(split): g_loss.detach().mean(), + } + return loss, log + + if optimizer_idx == 1: + # second pass for discriminator update + if cond is None: + logits_real = self.discriminator(inputs.contiguous().detach()) + logits_fake = self.discriminator(reconstructions.contiguous().detach()) + else: + logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) + + log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), + "{}/logits_real".format(split): logits_real.detach().mean(), + "{}/logits_fake".format(split): logits_fake.detach().mean() + } + return d_loss, log + diff --git a/ldm/modules/losses/vqperceptual.py b/ldm/modules/losses/vqperceptual.py new file mode 100644 index 00000000..f6998176 --- /dev/null +++ b/ldm/modules/losses/vqperceptual.py @@ -0,0 +1,167 @@ +import torch +from torch import nn +import torch.nn.functional as F +from einops import repeat + +from taming.modules.discriminator.model import NLayerDiscriminator, weights_init +from taming.modules.losses.lpips import LPIPS +from taming.modules.losses.vqperceptual import hinge_d_loss, vanilla_d_loss + + +def hinge_d_loss_with_exemplar_weights(logits_real, logits_fake, weights): + assert weights.shape[0] == logits_real.shape[0] == logits_fake.shape[0] + loss_real = torch.mean(F.relu(1. - logits_real), dim=[1,2,3]) + loss_fake = torch.mean(F.relu(1. + logits_fake), dim=[1,2,3]) + loss_real = (weights * loss_real).sum() / weights.sum() + loss_fake = (weights * loss_fake).sum() / weights.sum() + d_loss = 0.5 * (loss_real + loss_fake) + return d_loss + +def adopt_weight(weight, global_step, threshold=0, value=0.): + if global_step < threshold: + weight = value + return weight + + +def measure_perplexity(predicted_indices, n_embed): + # src: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py + # eval cluster perplexity. when perplexity == num_embeddings then all clusters are used exactly equally + encodings = F.one_hot(predicted_indices, n_embed).float().reshape(-1, n_embed) + avg_probs = encodings.mean(0) + perplexity = (-(avg_probs * torch.log(avg_probs + 1e-10)).sum()).exp() + cluster_use = torch.sum(avg_probs > 0) + return perplexity, cluster_use + +def l1(x, y): + return torch.abs(x-y) + + +def l2(x, y): + return torch.pow((x-y), 2) + + +class VQLPIPSWithDiscriminator(nn.Module): + def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0, + disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, + perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, + disc_ndf=64, disc_loss="hinge", n_classes=None, perceptual_loss="lpips", + pixel_loss="l1"): + super().__init__() + assert disc_loss in ["hinge", "vanilla"] + assert perceptual_loss in ["lpips", "clips", "dists"] + assert pixel_loss in ["l1", "l2"] + self.codebook_weight = codebook_weight + self.pixel_weight = pixelloss_weight + if perceptual_loss == "lpips": + print(f"{self.__class__.__name__}: Running with LPIPS.") + self.perceptual_loss = LPIPS().eval() + else: + raise ValueError(f"Unknown perceptual loss: >> {perceptual_loss} <<") + self.perceptual_weight = perceptual_weight + + if pixel_loss == "l1": + self.pixel_loss = l1 + else: + self.pixel_loss = l2 + + self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, + n_layers=disc_num_layers, + use_actnorm=use_actnorm, + ndf=disc_ndf + ).apply(weights_init) + self.discriminator_iter_start = disc_start + if disc_loss == "hinge": + self.disc_loss = hinge_d_loss + elif disc_loss == "vanilla": + self.disc_loss = vanilla_d_loss + else: + raise ValueError(f"Unknown GAN loss '{disc_loss}'.") + print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.") + self.disc_factor = disc_factor + self.discriminator_weight = disc_weight + self.disc_conditional = disc_conditional + self.n_classes = n_classes + + def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): + if last_layer is not None: + nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] + else: + nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] + g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] + + d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) + d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() + d_weight = d_weight * self.discriminator_weight + return d_weight + + def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx, + global_step, last_layer=None, cond=None, split="train", predicted_indices=None): + if not exists(codebook_loss): + codebook_loss = torch.tensor([0.]).to(inputs.device) + #rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) + rec_loss = self.pixel_loss(inputs.contiguous(), reconstructions.contiguous()) + if self.perceptual_weight > 0: + p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) + rec_loss = rec_loss + self.perceptual_weight * p_loss + else: + p_loss = torch.tensor([0.0]) + + nll_loss = rec_loss + #nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] + nll_loss = torch.mean(nll_loss) + + # now the GAN part + if optimizer_idx == 0: + # generator update + if cond is None: + assert not self.disc_conditional + logits_fake = self.discriminator(reconstructions.contiguous()) + else: + assert self.disc_conditional + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) + g_loss = -torch.mean(logits_fake) + + try: + d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) + except RuntimeError: + assert not self.training + d_weight = torch.tensor(0.0) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean() + + log = {"{}/total_loss".format(split): loss.clone().detach().mean(), + "{}/quant_loss".format(split): codebook_loss.detach().mean(), + "{}/nll_loss".format(split): nll_loss.detach().mean(), + "{}/rec_loss".format(split): rec_loss.detach().mean(), + "{}/p_loss".format(split): p_loss.detach().mean(), + "{}/d_weight".format(split): d_weight.detach(), + "{}/disc_factor".format(split): torch.tensor(disc_factor), + "{}/g_loss".format(split): g_loss.detach().mean(), + } + if predicted_indices is not None: + assert self.n_classes is not None + with torch.no_grad(): + perplexity, cluster_usage = measure_perplexity(predicted_indices, self.n_classes) + log[f"{split}/perplexity"] = perplexity + log[f"{split}/cluster_usage"] = cluster_usage + return loss, log + + if optimizer_idx == 1: + # second pass for discriminator update + if cond is None: + logits_real = self.discriminator(inputs.contiguous().detach()) + logits_fake = self.discriminator(reconstructions.contiguous().detach()) + else: + logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) + logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) + + disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) + d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) + + log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), + "{}/logits_real".format(split): logits_real.detach().mean(), + "{}/logits_fake".format(split): logits_fake.detach().mean() + } + return d_loss, log diff --git a/ldm/modules/x_transformer.py b/ldm/modules/x_transformer.py new file mode 100644 index 00000000..5fc15bf9 --- /dev/null +++ b/ldm/modules/x_transformer.py @@ -0,0 +1,641 @@ +"""shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers""" +import torch +from torch import nn, einsum +import torch.nn.functional as F +from functools import partial +from inspect import isfunction +from collections import namedtuple +from einops import rearrange, repeat, reduce + +# constants + +DEFAULT_DIM_HEAD = 64 + +Intermediates = namedtuple('Intermediates', [ + 'pre_softmax_attn', + 'post_softmax_attn' +]) + +LayerIntermediates = namedtuple('Intermediates', [ + 'hiddens', + 'attn_intermediates' +]) + + +class AbsolutePositionalEmbedding(nn.Module): + def __init__(self, dim, max_seq_len): + super().__init__() + self.emb = nn.Embedding(max_seq_len, dim) + self.init_() + + def init_(self): + nn.init.normal_(self.emb.weight, std=0.02) + + def forward(self, x): + n = torch.arange(x.shape[1], device=x.device) + return self.emb(n)[None, :, :] + + +class FixedPositionalEmbedding(nn.Module): + def __init__(self, dim): + super().__init__() + inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) + self.register_buffer('inv_freq', inv_freq) + + def forward(self, x, seq_dim=1, offset=0): + t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset + sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq) + emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) + return emb[None, :, :] + + +# helpers + +def exists(val): + return val is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def always(val): + def inner(*args, **kwargs): + return val + return inner + + +def not_equals(val): + def inner(x): + return x != val + return inner + + +def equals(val): + def inner(x): + return x == val + return inner + + +def max_neg_value(tensor): + return -torch.finfo(tensor.dtype).max + + +# keyword argument helpers + +def pick_and_pop(keys, d): + values = list(map(lambda key: d.pop(key), keys)) + return dict(zip(keys, values)) + + +def group_dict_by_key(cond, d): + return_val = [dict(), dict()] + for key in d.keys(): + match = bool(cond(key)) + ind = int(not match) + return_val[ind][key] = d[key] + return (*return_val,) + + +def string_begins_with(prefix, str): + return str.startswith(prefix) + + +def group_by_key_prefix(prefix, d): + return group_dict_by_key(partial(string_begins_with, prefix), d) + + +def groupby_prefix_and_trim(prefix, d): + kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) + kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) + return kwargs_without_prefix, kwargs + + +# classes +class Scale(nn.Module): + def __init__(self, value, fn): + super().__init__() + self.value = value + self.fn = fn + + def forward(self, x, **kwargs): + x, *rest = self.fn(x, **kwargs) + return (x * self.value, *rest) + + +class Rezero(nn.Module): + def __init__(self, fn): + super().__init__() + self.fn = fn + self.g = nn.Parameter(torch.zeros(1)) + + def forward(self, x, **kwargs): + x, *rest = self.fn(x, **kwargs) + return (x * self.g, *rest) + + +class ScaleNorm(nn.Module): + def __init__(self, dim, eps=1e-5): + super().__init__() + self.scale = dim ** -0.5 + self.eps = eps + self.g = nn.Parameter(torch.ones(1)) + + def forward(self, x): + norm = torch.norm(x, dim=-1, keepdim=True) * self.scale + return x / norm.clamp(min=self.eps) * self.g + + +class RMSNorm(nn.Module): + def __init__(self, dim, eps=1e-8): + super().__init__() + self.scale = dim ** -0.5 + self.eps = eps + self.g = nn.Parameter(torch.ones(dim)) + + def forward(self, x): + norm = torch.norm(x, dim=-1, keepdim=True) * self.scale + return x / norm.clamp(min=self.eps) * self.g + + +class Residual(nn.Module): + def forward(self, x, residual): + return x + residual + + +class GRUGating(nn.Module): + def __init__(self, dim): + super().__init__() + self.gru = nn.GRUCell(dim, dim) + + def forward(self, x, residual): + gated_output = self.gru( + rearrange(x, 'b n d -> (b n) d'), + rearrange(residual, 'b n d -> (b n) d') + ) + + return gated_output.reshape_as(x) + + +# feedforward + +class GEGLU(nn.Module): + def __init__(self, dim_in, dim_out): + super().__init__() + self.proj = nn.Linear(dim_in, dim_out * 2) + + def forward(self, x): + x, gate = self.proj(x).chunk(2, dim=-1) + return x * F.gelu(gate) + + +class FeedForward(nn.Module): + def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): + super().__init__() + inner_dim = int(dim * mult) + dim_out = default(dim_out, dim) + project_in = nn.Sequential( + nn.Linear(dim, inner_dim), + nn.GELU() + ) if not glu else GEGLU(dim, inner_dim) + + self.net = nn.Sequential( + project_in, + nn.Dropout(dropout), + nn.Linear(inner_dim, dim_out) + ) + + def forward(self, x): + return self.net(x) + + +# attention. +class Attention(nn.Module): + def __init__( + self, + dim, + dim_head=DEFAULT_DIM_HEAD, + heads=8, + causal=False, + mask=None, + talking_heads=False, + sparse_topk=None, + use_entmax15=False, + num_mem_kv=0, + dropout=0., + on_attn=False + ): + super().__init__() + if use_entmax15: + raise NotImplementedError("Check out entmax activation instead of softmax activation!") + self.scale = dim_head ** -0.5 + self.heads = heads + self.causal = causal + self.mask = mask + + inner_dim = dim_head * heads + + self.to_q = nn.Linear(dim, inner_dim, bias=False) + self.to_k = nn.Linear(dim, inner_dim, bias=False) + self.to_v = nn.Linear(dim, inner_dim, bias=False) + self.dropout = nn.Dropout(dropout) + + # talking heads + self.talking_heads = talking_heads + if talking_heads: + self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads)) + self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads)) + + # explicit topk sparse attention + self.sparse_topk = sparse_topk + + # entmax + #self.attn_fn = entmax15 if use_entmax15 else F.softmax + self.attn_fn = F.softmax + + # add memory key / values + self.num_mem_kv = num_mem_kv + if num_mem_kv > 0: + self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) + self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) + + # attention on attention + self.attn_on_attn = on_attn + self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim) + + def forward( + self, + x, + context=None, + mask=None, + context_mask=None, + rel_pos=None, + sinusoidal_emb=None, + prev_attn=None, + mem=None + ): + b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device + kv_input = default(context, x) + + q_input = x + k_input = kv_input + v_input = kv_input + + if exists(mem): + k_input = torch.cat((mem, k_input), dim=-2) + v_input = torch.cat((mem, v_input), dim=-2) + + if exists(sinusoidal_emb): + # in shortformer, the query would start at a position offset depending on the past cached memory + offset = k_input.shape[-2] - q_input.shape[-2] + q_input = q_input + sinusoidal_emb(q_input, offset=offset) + k_input = k_input + sinusoidal_emb(k_input) + + q = self.to_q(q_input) + k = self.to_k(k_input) + v = self.to_v(v_input) + + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v)) + + input_mask = None + if any(map(exists, (mask, context_mask))): + q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) + k_mask = q_mask if not exists(context) else context_mask + k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool()) + q_mask = rearrange(q_mask, 'b i -> b () i ()') + k_mask = rearrange(k_mask, 'b j -> b () () j') + input_mask = q_mask * k_mask + + if self.num_mem_kv > 0: + mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v)) + k = torch.cat((mem_k, k), dim=-2) + v = torch.cat((mem_v, v), dim=-2) + if exists(input_mask): + input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True) + + dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale + mask_value = max_neg_value(dots) + + if exists(prev_attn): + dots = dots + prev_attn + + pre_softmax_attn = dots + + if talking_heads: + dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous() + + if exists(rel_pos): + dots = rel_pos(dots) + + if exists(input_mask): + dots.masked_fill_(~input_mask, mask_value) + del input_mask + + if self.causal: + i, j = dots.shape[-2:] + r = torch.arange(i, device=device) + mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j') + mask = F.pad(mask, (j - i, 0), value=False) + dots.masked_fill_(mask, mask_value) + del mask + + if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]: + top, _ = dots.topk(self.sparse_topk, dim=-1) + vk = top[..., -1].unsqueeze(-1).expand_as(dots) + mask = dots < vk + dots.masked_fill_(mask, mask_value) + del mask + + attn = self.attn_fn(dots, dim=-1) + post_softmax_attn = attn + + attn = self.dropout(attn) + + if talking_heads: + attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous() + + out = einsum('b h i j, b h j d -> b h i d', attn, v) + out = rearrange(out, 'b h n d -> b n (h d)') + + intermediates = Intermediates( + pre_softmax_attn=pre_softmax_attn, + post_softmax_attn=post_softmax_attn + ) + + return self.to_out(out), intermediates + + +class AttentionLayers(nn.Module): + def __init__( + self, + dim, + depth, + heads=8, + causal=False, + cross_attend=False, + only_cross=False, + use_scalenorm=False, + use_rmsnorm=False, + use_rezero=False, + rel_pos_num_buckets=32, + rel_pos_max_distance=128, + position_infused_attn=False, + custom_layers=None, + sandwich_coef=None, + par_ratio=None, + residual_attn=False, + cross_residual_attn=False, + macaron=False, + pre_norm=True, + gate_residual=False, + **kwargs + ): + super().__init__() + ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) + attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs) + + dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) + + self.dim = dim + self.depth = depth + self.layers = nn.ModuleList([]) + + self.has_pos_emb = position_infused_attn + self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None + self.rotary_pos_emb = always(None) + + assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' + self.rel_pos = None + + self.pre_norm = pre_norm + + self.residual_attn = residual_attn + self.cross_residual_attn = cross_residual_attn + + norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm + norm_class = RMSNorm if use_rmsnorm else norm_class + norm_fn = partial(norm_class, dim) + + norm_fn = nn.Identity if use_rezero else norm_fn + branch_fn = Rezero if use_rezero else None + + if cross_attend and not only_cross: + default_block = ('a', 'c', 'f') + elif cross_attend and only_cross: + default_block = ('c', 'f') + else: + default_block = ('a', 'f') + + if macaron: + default_block = ('f',) + default_block + + if exists(custom_layers): + layer_types = custom_layers + elif exists(par_ratio): + par_depth = depth * len(default_block) + assert 1 < par_ratio <= par_depth, 'par ratio out of range' + default_block = tuple(filter(not_equals('f'), default_block)) + par_attn = par_depth // par_ratio + depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper + par_width = (depth_cut + depth_cut // par_attn) // par_attn + assert len(default_block) <= par_width, 'default block is too large for par_ratio' + par_block = default_block + ('f',) * (par_width - len(default_block)) + par_head = par_block * par_attn + layer_types = par_head + ('f',) * (par_depth - len(par_head)) + elif exists(sandwich_coef): + assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' + layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef + else: + layer_types = default_block * depth + + self.layer_types = layer_types + self.num_attn_layers = len(list(filter(equals('a'), layer_types))) + + for layer_type in self.layer_types: + if layer_type == 'a': + layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs) + elif layer_type == 'c': + layer = Attention(dim, heads=heads, **attn_kwargs) + elif layer_type == 'f': + layer = FeedForward(dim, **ff_kwargs) + layer = layer if not macaron else Scale(0.5, layer) + else: + raise Exception(f'invalid layer type {layer_type}') + + if isinstance(layer, Attention) and exists(branch_fn): + layer = branch_fn(layer) + + if gate_residual: + residual_fn = GRUGating(dim) + else: + residual_fn = Residual() + + self.layers.append(nn.ModuleList([ + norm_fn(), + layer, + residual_fn + ])) + + def forward( + self, + x, + context=None, + mask=None, + context_mask=None, + mems=None, + return_hiddens=False + ): + hiddens = [] + intermediates = [] + prev_attn = None + prev_cross_attn = None + + mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers + + for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)): + is_last = ind == (len(self.layers) - 1) + + if layer_type == 'a': + hiddens.append(x) + layer_mem = mems.pop(0) + + residual = x + + if self.pre_norm: + x = norm(x) + + if layer_type == 'a': + out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos, + prev_attn=prev_attn, mem=layer_mem) + elif layer_type == 'c': + out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn) + elif layer_type == 'f': + out = block(x) + + x = residual_fn(out, residual) + + if layer_type in ('a', 'c'): + intermediates.append(inter) + + if layer_type == 'a' and self.residual_attn: + prev_attn = inter.pre_softmax_attn + elif layer_type == 'c' and self.cross_residual_attn: + prev_cross_attn = inter.pre_softmax_attn + + if not self.pre_norm and not is_last: + x = norm(x) + + if return_hiddens: + intermediates = LayerIntermediates( + hiddens=hiddens, + attn_intermediates=intermediates + ) + + return x, intermediates + + return x + + +class Encoder(AttentionLayers): + def __init__(self, **kwargs): + assert 'causal' not in kwargs, 'cannot set causality on encoder' + super().__init__(causal=False, **kwargs) + + + +class TransformerWrapper(nn.Module): + def __init__( + self, + *, + num_tokens, + max_seq_len, + attn_layers, + emb_dim=None, + max_mem_len=0., + emb_dropout=0., + num_memory_tokens=None, + tie_embedding=False, + use_pos_emb=True + ): + super().__init__() + assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' + + dim = attn_layers.dim + emb_dim = default(emb_dim, dim) + + self.max_seq_len = max_seq_len + self.max_mem_len = max_mem_len + self.num_tokens = num_tokens + + self.token_emb = nn.Embedding(num_tokens, emb_dim) + self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if ( + use_pos_emb and not attn_layers.has_pos_emb) else always(0) + self.emb_dropout = nn.Dropout(emb_dropout) + + self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() + self.attn_layers = attn_layers + self.norm = nn.LayerNorm(dim) + + self.init_() + + self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t() + + # memory tokens (like [cls]) from Memory Transformers paper + num_memory_tokens = default(num_memory_tokens, 0) + self.num_memory_tokens = num_memory_tokens + if num_memory_tokens > 0: + self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) + + # let funnel encoder know number of memory tokens, if specified + if hasattr(attn_layers, 'num_memory_tokens'): + attn_layers.num_memory_tokens = num_memory_tokens + + def init_(self): + nn.init.normal_(self.token_emb.weight, std=0.02) + + def forward( + self, + x, + return_embeddings=False, + mask=None, + return_mems=False, + return_attn=False, + mems=None, + **kwargs + ): + b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens + x = self.token_emb(x) + x += self.pos_emb(x) + x = self.emb_dropout(x) + + x = self.project_emb(x) + + if num_mem > 0: + mem = repeat(self.memory_tokens, 'n d -> b n d', b=b) + x = torch.cat((mem, x), dim=1) + + # auto-handle masking after appending memory tokens + if exists(mask): + mask = F.pad(mask, (num_mem, 0), value=True) + + x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) + x = self.norm(x) + + mem, x = x[:, :num_mem], x[:, num_mem:] + + out = self.to_logits(x) if not return_embeddings else x + + if return_mems: + hiddens = intermediates.hiddens + new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens + new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) + return out, new_mems + + if return_attn: + attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) + return out, attn_maps + + return out + diff --git a/ldm/util.py b/ldm/util.py new file mode 100644 index 00000000..8ba38853 --- /dev/null +++ b/ldm/util.py @@ -0,0 +1,203 @@ +import importlib + +import torch +import numpy as np +from collections import abc +from einops import rearrange +from functools import partial + +import multiprocessing as mp +from threading import Thread +from queue import Queue + +from inspect import isfunction +from PIL import Image, ImageDraw, ImageFont + + +def log_txt_as_img(wh, xc, size=10): + # wh a tuple of (width, height) + # xc a list of captions to plot + b = len(xc) + txts = list() + for bi in range(b): + txt = Image.new("RGB", wh, color="white") + draw = ImageDraw.Draw(txt) + font = ImageFont.truetype('data/DejaVuSans.ttf', size=size) + nc = int(40 * (wh[0] / 256)) + lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc)) + + try: + draw.text((0, 0), lines, fill="black", font=font) + except UnicodeEncodeError: + print("Cant encode string for logging. Skipping.") + + txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 + txts.append(txt) + txts = np.stack(txts) + txts = torch.tensor(txts) + return txts + + +def ismap(x): + if not isinstance(x, torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] > 3) + + +def isimage(x): + if not isinstance(x, torch.Tensor): + return False + return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) + + +def exists(x): + return x is not None + + +def default(val, d): + if exists(val): + return val + return d() if isfunction(d) else d + + +def mean_flat(tensor): + """ + https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 + Take the mean over all non-batch dimensions. + """ + return tensor.mean(dim=list(range(1, len(tensor.shape)))) + + +def count_params(model, verbose=False): + total_params = sum(p.numel() for p in model.parameters()) + if verbose: + print(f"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.") + return total_params + + +def instantiate_from_config(config): + if not "target" in config: + if config == '__is_first_stage__': + return None + elif config == "__is_unconditional__": + return None + raise KeyError("Expected key `target` to instantiate.") + return get_obj_from_str(config["target"])(**config.get("params", dict())) + + +def get_obj_from_str(string, reload=False): + module, cls = string.rsplit(".", 1) + if reload: + module_imp = importlib.import_module(module) + importlib.reload(module_imp) + return getattr(importlib.import_module(module, package=None), cls) + + +def _do_parallel_data_prefetch(func, Q, data, idx, idx_to_fn=False): + # create dummy dataset instance + + # run prefetching + if idx_to_fn: + res = func(data, worker_id=idx) + else: + res = func(data) + Q.put([idx, res]) + Q.put("Done") + + +def parallel_data_prefetch( + func: callable, data, n_proc, target_data_type="ndarray", cpu_intensive=True, use_worker_id=False +): + # if target_data_type not in ["ndarray", "list"]: + # raise ValueError( + # "Data, which is passed to parallel_data_prefetch has to be either of type list or ndarray." + # ) + if isinstance(data, np.ndarray) and target_data_type == "list": + raise ValueError("list expected but function got ndarray.") + elif isinstance(data, abc.Iterable): + if isinstance(data, dict): + print( + f'WARNING:"data" argument passed to parallel_data_prefetch is a dict: Using only its values and disregarding keys.' + ) + data = list(data.values()) + if target_data_type == "ndarray": + data = np.asarray(data) + else: + data = list(data) + else: + raise TypeError( + f"The data, that shall be processed parallel has to be either an np.ndarray or an Iterable, but is actually {type(data)}." + ) + + if cpu_intensive: + Q = mp.Queue(1000) + proc = mp.Process + else: + Q = Queue(1000) + proc = Thread + # spawn processes + if target_data_type == "ndarray": + arguments = [ + [func, Q, part, i, use_worker_id] + for i, part in enumerate(np.array_split(data, n_proc)) + ] + else: + step = ( + int(len(data) / n_proc + 1) + if len(data) % n_proc != 0 + else int(len(data) / n_proc) + ) + arguments = [ + [func, Q, part, i, use_worker_id] + for i, part in enumerate( + [data[i: i + step] for i in range(0, len(data), step)] + ) + ] + processes = [] + for i in range(n_proc): + p = proc(target=_do_parallel_data_prefetch, args=arguments[i]) + processes += [p] + + # start processes + print(f"Start prefetching...") + import time + + start = time.time() + gather_res = [[] for _ in range(n_proc)] + try: + for p in processes: + p.start() + + k = 0 + while k < n_proc: + # get result + res = Q.get() + if res == "Done": + k += 1 + else: + gather_res[res[0]] = res[1] + + except Exception as e: + print("Exception: ", e) + for p in processes: + p.terminate() + + raise e + finally: + for p in processes: + p.join() + print(f"Prefetching complete. [{time.time() - start} sec.]") + + if target_data_type == 'ndarray': + if not isinstance(gather_res[0], np.ndarray): + return np.concatenate([np.asarray(r) for r in gather_res], axis=0) + + # order outputs + return np.concatenate(gather_res, axis=0) + elif target_data_type == 'list': + out = [] + for r in gather_res: + out.extend(r) + return out + else: + return gather_res diff --git a/modules/devices.py b/modules/devices.py index 67165bf6..f30b6ebc 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -36,8 +36,8 @@ def get_optimal_device(): else: return torch.device("cuda") - if has_mps(): - return torch.device("mps") + # if has_mps(): + # return torch.device("mps") return cpu diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index eaedac13..26280fe4 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -70,14 +70,19 @@ class StableDiffusionModelHijack: embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase(cmd_opts.embeddings_dir) def hijack(self, m): - model_embeddings = m.cond_stage_model.transformer.text_model.embeddings + + if shared.text_model_name == "XLMR-Large": + model_embeddings = m.cond_stage_model.roberta.embeddings + model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self) + else : + model_embeddings = m.cond_stage_model.transformer.text_model.embeddings + model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embeddings, self) - model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self) m.cond_stage_model = FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) self.clip = m.cond_stage_model - apply_optimizations() + # apply_optimizations() def flatten(el): flattened = [flatten(children) for children in el.children()] @@ -125,8 +130,11 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): self.tokenizer = wrapped.tokenizer self.token_mults = {} - self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ','][0] - + try: + self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ','][0] + except: + self.comma_token = None + tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k] for text, ident in tokens_with_parens: mult = 1.0 @@ -298,6 +306,9 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count def forward(self, text): + if shared.text_model_name == "XLMR-Large": + return self.wrapped.encode(text) + use_old = opts.use_old_emphasis_implementation if use_old: batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text) @@ -359,7 +370,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): z = self.wrapped.transformer.text_model.final_layer_norm(z) else: z = outputs.last_hidden_state - + # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise batch_multipliers_of_same_length = [x + [1.0] * (75 - len(x)) for x in batch_multipliers] batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(device) diff --git a/modules/shared.py b/modules/shared.py index c93ae2a3..9941d2f4 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -21,7 +21,7 @@ from modules.paths import models_path, script_path, sd_path sd_model_file = os.path.join(script_path, 'model.ckpt') default_sd_model_file = sd_model_file parser = argparse.ArgumentParser() -parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",) +parser.add_argument("--config", type=str, default="configs/altdiffusion/ad-inference.yaml", help="path to config which constructs model",) parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",) parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints") parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) @@ -106,6 +106,10 @@ restricted_opts = { "outdir_txt2img_grids", "outdir_save", } +from omegaconf import OmegaConf +config = OmegaConf.load(f"{cmd_opts.config}") +# XLMR-Large +text_model_name = config.model.params.cond_stage_config.params.name cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access From ee3f5ea3eeb31f1ed72e2f0cbed2c00a782497d8 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Tue, 29 Nov 2022 10:30:19 +0800 Subject: [PATCH 026/461] delete old config file Signed-off-by: zhaohu xing <920232796@qq.com> --- configs/stable-diffusion/v1-inference.yaml | 71 ---------------------- 1 file changed, 71 deletions(-) delete mode 100644 configs/stable-diffusion/v1-inference.yaml diff --git a/configs/stable-diffusion/v1-inference.yaml b/configs/stable-diffusion/v1-inference.yaml deleted file mode 100644 index 2e6ef0f2..00000000 --- a/configs/stable-diffusion/v1-inference.yaml +++ /dev/null @@ -1,71 +0,0 @@ -model: - base_learning_rate: 1.0e-04 - target: ldm.models.diffusion.ddpm.LatentDiffusion - params: - linear_start: 0.00085 - linear_end: 0.0120 - num_timesteps_cond: 1 - log_every_t: 200 - timesteps: 1000 - first_stage_key: "jpg" - cond_stage_key: "txt" - image_size: 64 - channels: 4 - cond_stage_trainable: false # Note: different from the one we trained before - conditioning_key: crossattn - monitor: val/loss_simple_ema - scale_factor: 0.18215 - use_ema: False - - scheduler_config: # 10000 warmup steps - target: ldm.lr_scheduler.LambdaLinearScheduler - params: - warm_up_steps: [ 10000 ] - cycle_lengths: [ 10000000000000 ] # incredibly large number to prevent corner cases - f_start: [ 1.e-6 ] - f_max: [ 1. ] - f_min: [ 1. ] - - unet_config: - target: ldm.modules.diffusionmodules.openaimodel.UNetModel - params: - image_size: 32 # unused - in_channels: 4 - out_channels: 4 - model_channels: 320 - attention_resolutions: [ 4, 2, 1 ] - num_res_blocks: 2 - channel_mult: [ 1, 2, 4, 4 ] - num_heads: 8 - use_spatial_transformer: True - transformer_depth: 1 - context_dim: 768 - use_checkpoint: True - legacy: False - - first_stage_config: - target: ldm.models.autoencoder.AutoencoderKL - params: - embed_dim: 4 - monitor: val/rec_loss - ddconfig: - double_z: true - z_channels: 4 - resolution: 256 - in_channels: 3 - out_ch: 3 - ch: 128 - ch_mult: - - 1 - - 2 - - 4 - - 4 - num_res_blocks: 2 - attn_resolutions: [] - dropout: 0.0 - lossconfig: - target: torch.nn.Identity - - cond_stage_config: - # target: ldm.modules.encoders.modules.FrozenCLIPEmbedder - target: altclip.model.AltCLIPEmbedder \ No newline at end of file From 52cc83d36b7663a77b79fd2258d2ca871af73e55 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Wed, 30 Nov 2022 14:56:12 +0800 Subject: [PATCH 027/461] fix bugs Signed-off-by: zhaohu xing <920232796@qq.com> --- configs/altdiffusion/ad-inference.yaml | 2 +- launch.py | 10 +- ldm/data/__init__.py | 0 ldm/data/base.py | 23 - ldm/data/imagenet.py | 394 ----- ldm/data/lsun.py | 92 -- ldm/lr_scheduler.py | 98 -- ldm/models/autoencoder.py | 443 ----- ldm/models/diffusion/__init__.py | 0 ldm/models/diffusion/classifier.py | 267 --- ldm/models/diffusion/ddim.py | 241 --- ldm/models/diffusion/ddpm.py | 1445 ----------------- ldm/models/diffusion/dpm_solver/__init__.py | 1 - ldm/models/diffusion/dpm_solver/dpm_solver.py | 1184 -------------- ldm/models/diffusion/dpm_solver/sampler.py | 82 - ldm/models/diffusion/plms.py | 236 --- ldm/modules/attention.py | 261 --- ldm/modules/diffusionmodules/__init__.py | 0 ldm/modules/diffusionmodules/model.py | 835 ---------- ldm/modules/diffusionmodules/openaimodel.py | 961 ----------- ldm/modules/diffusionmodules/util.py | 267 --- ldm/modules/distributions/__init__.py | 0 ldm/modules/distributions/distributions.py | 92 -- ldm/modules/ema.py | 76 - ldm/modules/encoders/__init__.py | 0 ldm/modules/encoders/modules.py | 234 --- ldm/modules/image_degradation/__init__.py | 2 - ldm/modules/image_degradation/bsrgan.py | 730 --------- ldm/modules/image_degradation/bsrgan_light.py | 650 -------- ldm/modules/image_degradation/utils/test.png | Bin 441072 -> 0 bytes ldm/modules/image_degradation/utils_image.py | 916 ----------- ldm/modules/losses/__init__.py | 1 - ldm/modules/losses/contperceptual.py | 111 -- ldm/modules/losses/vqperceptual.py | 167 -- ldm/modules/x_transformer.py | 641 -------- ldm/util.py | 203 --- modules/sd_hijack.py | 15 +- modules/sd_hijack_clip.py | 10 +- {ldm/modules/encoders => modules}/xlmr.py | 0 39 files changed, 22 insertions(+), 10668 deletions(-) delete mode 100644 ldm/data/__init__.py delete mode 100644 ldm/data/base.py delete mode 100644 ldm/data/imagenet.py delete mode 100644 ldm/data/lsun.py delete mode 100644 ldm/lr_scheduler.py delete mode 100644 ldm/models/autoencoder.py delete mode 100644 ldm/models/diffusion/__init__.py delete mode 100644 ldm/models/diffusion/classifier.py delete mode 100644 ldm/models/diffusion/ddim.py delete mode 100644 ldm/models/diffusion/ddpm.py delete mode 100644 ldm/models/diffusion/dpm_solver/__init__.py delete mode 100644 ldm/models/diffusion/dpm_solver/dpm_solver.py delete mode 100644 ldm/models/diffusion/dpm_solver/sampler.py delete mode 100644 ldm/models/diffusion/plms.py delete mode 100644 ldm/modules/attention.py delete mode 100644 ldm/modules/diffusionmodules/__init__.py delete mode 100644 ldm/modules/diffusionmodules/model.py delete mode 100644 ldm/modules/diffusionmodules/openaimodel.py delete mode 100644 ldm/modules/diffusionmodules/util.py delete mode 100644 ldm/modules/distributions/__init__.py delete mode 100644 ldm/modules/distributions/distributions.py delete mode 100644 ldm/modules/ema.py delete mode 100644 ldm/modules/encoders/__init__.py delete mode 100644 ldm/modules/encoders/modules.py delete mode 100644 ldm/modules/image_degradation/__init__.py delete mode 100644 ldm/modules/image_degradation/bsrgan.py delete mode 100644 ldm/modules/image_degradation/bsrgan_light.py delete mode 100644 ldm/modules/image_degradation/utils/test.png delete mode 100644 ldm/modules/image_degradation/utils_image.py delete mode 100644 ldm/modules/losses/__init__.py delete mode 100644 ldm/modules/losses/contperceptual.py delete mode 100644 ldm/modules/losses/vqperceptual.py delete mode 100644 ldm/modules/x_transformer.py delete mode 100644 ldm/util.py rename {ldm/modules/encoders => modules}/xlmr.py (100%) diff --git a/configs/altdiffusion/ad-inference.yaml b/configs/altdiffusion/ad-inference.yaml index 1b11b63e..cfbee72d 100644 --- a/configs/altdiffusion/ad-inference.yaml +++ b/configs/altdiffusion/ad-inference.yaml @@ -67,6 +67,6 @@ model: target: torch.nn.Identity cond_stage_config: - target: ldm.modules.encoders.xlmr.BertSeriesModelWithTransformation + target: modules.xlmr.BertSeriesModelWithTransformation params: name: "XLMR-Large" \ No newline at end of file diff --git a/launch.py b/launch.py index ad9ddd5a..3f4dc870 100644 --- a/launch.py +++ b/launch.py @@ -233,11 +233,11 @@ def prepare_enviroment(): os.makedirs(dir_repos, exist_ok=True) - git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", stable_diffusion_commit_hash) - git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash) - git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash) - git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash) - git_clone(blip_repo, repo_dir('BLIP'), "BLIP", blip_commit_hash) + git_clone(stable_diffusion_repo, repo_dir('stable-diffusion-stability-ai'), "Stable Diffusion", ) + git_clone(taming_transformers_repo, repo_dir('taming-transformers'), "Taming Transformers", ) + git_clone(k_diffusion_repo, repo_dir('k-diffusion'), "K-diffusion", ) + git_clone(codeformer_repo, repo_dir('CodeFormer'), "CodeFormer", ) + git_clone(blip_repo, repo_dir('BLIP'), "BLIP", ) if not is_installed("lpips"): run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer") diff --git a/ldm/data/__init__.py b/ldm/data/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ldm/data/base.py b/ldm/data/base.py deleted file mode 100644 index b196c2f7..00000000 --- a/ldm/data/base.py +++ /dev/null @@ -1,23 +0,0 @@ -from abc import abstractmethod -from torch.utils.data import Dataset, ConcatDataset, ChainDataset, IterableDataset - - -class Txt2ImgIterableBaseDataset(IterableDataset): - ''' - Define an interface to make the IterableDatasets for text2img data chainable - ''' - def __init__(self, num_records=0, valid_ids=None, size=256): - super().__init__() - self.num_records = num_records - self.valid_ids = valid_ids - self.sample_ids = valid_ids - self.size = size - - print(f'{self.__class__.__name__} dataset contains {self.__len__()} examples.') - - def __len__(self): - return self.num_records - - @abstractmethod - def __iter__(self): - pass \ No newline at end of file diff --git a/ldm/data/imagenet.py b/ldm/data/imagenet.py deleted file mode 100644 index 1c473f9c..00000000 --- a/ldm/data/imagenet.py +++ /dev/null @@ -1,394 +0,0 @@ -import os, yaml, pickle, shutil, tarfile, glob -import cv2 -import albumentations -import PIL -import numpy as np -import torchvision.transforms.functional as TF -from omegaconf import OmegaConf -from functools import partial -from PIL import Image -from tqdm import tqdm -from torch.utils.data import Dataset, Subset - -import taming.data.utils as tdu -from taming.data.imagenet import str_to_indices, give_synsets_from_indices, download, retrieve -from taming.data.imagenet import ImagePaths - -from ldm.modules.image_degradation import degradation_fn_bsr, degradation_fn_bsr_light - - -def synset2idx(path_to_yaml="data/index_synset.yaml"): - with open(path_to_yaml) as f: - di2s = yaml.load(f) - return dict((v,k) for k,v in di2s.items()) - - -class ImageNetBase(Dataset): - def __init__(self, config=None): - self.config = config or OmegaConf.create() - if not type(self.config)==dict: - self.config = OmegaConf.to_container(self.config) - self.keep_orig_class_label = self.config.get("keep_orig_class_label", False) - self.process_images = True # if False we skip loading & processing images and self.data contains filepaths - self._prepare() - self._prepare_synset_to_human() - self._prepare_idx_to_synset() - self._prepare_human_to_integer_label() - self._load() - - def __len__(self): - return len(self.data) - - def __getitem__(self, i): - return self.data[i] - - def _prepare(self): - raise NotImplementedError() - - def _filter_relpaths(self, relpaths): - ignore = set([ - "n06596364_9591.JPEG", - ]) - relpaths = [rpath for rpath in relpaths if not rpath.split("/")[-1] in ignore] - if "sub_indices" in self.config: - indices = str_to_indices(self.config["sub_indices"]) - synsets = give_synsets_from_indices(indices, path_to_yaml=self.idx2syn) # returns a list of strings - self.synset2idx = synset2idx(path_to_yaml=self.idx2syn) - files = [] - for rpath in relpaths: - syn = rpath.split("/")[0] - if syn in synsets: - files.append(rpath) - return files - else: - return relpaths - - def _prepare_synset_to_human(self): - SIZE = 2655750 - URL = "https://heibox.uni-heidelberg.de/f/9f28e956cd304264bb82/?dl=1" - self.human_dict = os.path.join(self.root, "synset_human.txt") - if (not os.path.exists(self.human_dict) or - not os.path.getsize(self.human_dict)==SIZE): - download(URL, self.human_dict) - - def _prepare_idx_to_synset(self): - URL = "https://heibox.uni-heidelberg.de/f/d835d5b6ceda4d3aa910/?dl=1" - self.idx2syn = os.path.join(self.root, "index_synset.yaml") - if (not os.path.exists(self.idx2syn)): - download(URL, self.idx2syn) - - def _prepare_human_to_integer_label(self): - URL = "https://heibox.uni-heidelberg.de/f/2362b797d5be43b883f6/?dl=1" - self.human2integer = os.path.join(self.root, "imagenet1000_clsidx_to_labels.txt") - if (not os.path.exists(self.human2integer)): - download(URL, self.human2integer) - with open(self.human2integer, "r") as f: - lines = f.read().splitlines() - assert len(lines) == 1000 - self.human2integer_dict = dict() - for line in lines: - value, key = line.split(":") - self.human2integer_dict[key] = int(value) - - def _load(self): - with open(self.txt_filelist, "r") as f: - self.relpaths = f.read().splitlines() - l1 = len(self.relpaths) - self.relpaths = self._filter_relpaths(self.relpaths) - print("Removed {} files from filelist during filtering.".format(l1 - len(self.relpaths))) - - self.synsets = [p.split("/")[0] for p in self.relpaths] - self.abspaths = [os.path.join(self.datadir, p) for p in self.relpaths] - - unique_synsets = np.unique(self.synsets) - class_dict = dict((synset, i) for i, synset in enumerate(unique_synsets)) - if not self.keep_orig_class_label: - self.class_labels = [class_dict[s] for s in self.synsets] - else: - self.class_labels = [self.synset2idx[s] for s in self.synsets] - - with open(self.human_dict, "r") as f: - human_dict = f.read().splitlines() - human_dict = dict(line.split(maxsplit=1) for line in human_dict) - - self.human_labels = [human_dict[s] for s in self.synsets] - - labels = { - "relpath": np.array(self.relpaths), - "synsets": np.array(self.synsets), - "class_label": np.array(self.class_labels), - "human_label": np.array(self.human_labels), - } - - if self.process_images: - self.size = retrieve(self.config, "size", default=256) - self.data = ImagePaths(self.abspaths, - labels=labels, - size=self.size, - random_crop=self.random_crop, - ) - else: - self.data = self.abspaths - - -class ImageNetTrain(ImageNetBase): - NAME = "ILSVRC2012_train" - URL = "http://www.image-net.org/challenges/LSVRC/2012/" - AT_HASH = "a306397ccf9c2ead27155983c254227c0fd938e2" - FILES = [ - "ILSVRC2012_img_train.tar", - ] - SIZES = [ - 147897477120, - ] - - def __init__(self, process_images=True, data_root=None, **kwargs): - self.process_images = process_images - self.data_root = data_root - super().__init__(**kwargs) - - def _prepare(self): - if self.data_root: - self.root = os.path.join(self.data_root, self.NAME) - else: - cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) - self.root = os.path.join(cachedir, "autoencoders/data", self.NAME) - - self.datadir = os.path.join(self.root, "data") - self.txt_filelist = os.path.join(self.root, "filelist.txt") - self.expected_length = 1281167 - self.random_crop = retrieve(self.config, "ImageNetTrain/random_crop", - default=True) - if not tdu.is_prepared(self.root): - # prep - print("Preparing dataset {} in {}".format(self.NAME, self.root)) - - datadir = self.datadir - if not os.path.exists(datadir): - path = os.path.join(self.root, self.FILES[0]) - if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]: - import academictorrents as at - atpath = at.get(self.AT_HASH, datastore=self.root) - assert atpath == path - - print("Extracting {} to {}".format(path, datadir)) - os.makedirs(datadir, exist_ok=True) - with tarfile.open(path, "r:") as tar: - tar.extractall(path=datadir) - - print("Extracting sub-tars.") - subpaths = sorted(glob.glob(os.path.join(datadir, "*.tar"))) - for subpath in tqdm(subpaths): - subdir = subpath[:-len(".tar")] - os.makedirs(subdir, exist_ok=True) - with tarfile.open(subpath, "r:") as tar: - tar.extractall(path=subdir) - - filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG")) - filelist = [os.path.relpath(p, start=datadir) for p in filelist] - filelist = sorted(filelist) - filelist = "\n".join(filelist)+"\n" - with open(self.txt_filelist, "w") as f: - f.write(filelist) - - tdu.mark_prepared(self.root) - - -class ImageNetValidation(ImageNetBase): - NAME = "ILSVRC2012_validation" - URL = "http://www.image-net.org/challenges/LSVRC/2012/" - AT_HASH = "5d6d0df7ed81efd49ca99ea4737e0ae5e3a5f2e5" - VS_URL = "https://heibox.uni-heidelberg.de/f/3e0f6e9c624e45f2bd73/?dl=1" - FILES = [ - "ILSVRC2012_img_val.tar", - "validation_synset.txt", - ] - SIZES = [ - 6744924160, - 1950000, - ] - - def __init__(self, process_images=True, data_root=None, **kwargs): - self.data_root = data_root - self.process_images = process_images - super().__init__(**kwargs) - - def _prepare(self): - if self.data_root: - self.root = os.path.join(self.data_root, self.NAME) - else: - cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache")) - self.root = os.path.join(cachedir, "autoencoders/data", self.NAME) - self.datadir = os.path.join(self.root, "data") - self.txt_filelist = os.path.join(self.root, "filelist.txt") - self.expected_length = 50000 - self.random_crop = retrieve(self.config, "ImageNetValidation/random_crop", - default=False) - if not tdu.is_prepared(self.root): - # prep - print("Preparing dataset {} in {}".format(self.NAME, self.root)) - - datadir = self.datadir - if not os.path.exists(datadir): - path = os.path.join(self.root, self.FILES[0]) - if not os.path.exists(path) or not os.path.getsize(path)==self.SIZES[0]: - import academictorrents as at - atpath = at.get(self.AT_HASH, datastore=self.root) - assert atpath == path - - print("Extracting {} to {}".format(path, datadir)) - os.makedirs(datadir, exist_ok=True) - with tarfile.open(path, "r:") as tar: - tar.extractall(path=datadir) - - vspath = os.path.join(self.root, self.FILES[1]) - if not os.path.exists(vspath) or not os.path.getsize(vspath)==self.SIZES[1]: - download(self.VS_URL, vspath) - - with open(vspath, "r") as f: - synset_dict = f.read().splitlines() - synset_dict = dict(line.split() for line in synset_dict) - - print("Reorganizing into synset folders") - synsets = np.unique(list(synset_dict.values())) - for s in synsets: - os.makedirs(os.path.join(datadir, s), exist_ok=True) - for k, v in synset_dict.items(): - src = os.path.join(datadir, k) - dst = os.path.join(datadir, v) - shutil.move(src, dst) - - filelist = glob.glob(os.path.join(datadir, "**", "*.JPEG")) - filelist = [os.path.relpath(p, start=datadir) for p in filelist] - filelist = sorted(filelist) - filelist = "\n".join(filelist)+"\n" - with open(self.txt_filelist, "w") as f: - f.write(filelist) - - tdu.mark_prepared(self.root) - - - -class ImageNetSR(Dataset): - def __init__(self, size=None, - degradation=None, downscale_f=4, min_crop_f=0.5, max_crop_f=1., - random_crop=True): - """ - Imagenet Superresolution Dataloader - Performs following ops in order: - 1. crops a crop of size s from image either as random or center crop - 2. resizes crop to size with cv2.area_interpolation - 3. degrades resized crop with degradation_fn - - :param size: resizing to size after cropping - :param degradation: degradation_fn, e.g. cv_bicubic or bsrgan_light - :param downscale_f: Low Resolution Downsample factor - :param min_crop_f: determines crop size s, - where s = c * min_img_side_len with c sampled from interval (min_crop_f, max_crop_f) - :param max_crop_f: "" - :param data_root: - :param random_crop: - """ - self.base = self.get_base() - assert size - assert (size / downscale_f).is_integer() - self.size = size - self.LR_size = int(size / downscale_f) - self.min_crop_f = min_crop_f - self.max_crop_f = max_crop_f - assert(max_crop_f <= 1.) - self.center_crop = not random_crop - - self.image_rescaler = albumentations.SmallestMaxSize(max_size=size, interpolation=cv2.INTER_AREA) - - self.pil_interpolation = False # gets reset later if incase interp_op is from pillow - - if degradation == "bsrgan": - self.degradation_process = partial(degradation_fn_bsr, sf=downscale_f) - - elif degradation == "bsrgan_light": - self.degradation_process = partial(degradation_fn_bsr_light, sf=downscale_f) - - else: - interpolation_fn = { - "cv_nearest": cv2.INTER_NEAREST, - "cv_bilinear": cv2.INTER_LINEAR, - "cv_bicubic": cv2.INTER_CUBIC, - "cv_area": cv2.INTER_AREA, - "cv_lanczos": cv2.INTER_LANCZOS4, - "pil_nearest": PIL.Image.NEAREST, - "pil_bilinear": PIL.Image.BILINEAR, - "pil_bicubic": PIL.Image.BICUBIC, - "pil_box": PIL.Image.BOX, - "pil_hamming": PIL.Image.HAMMING, - "pil_lanczos": PIL.Image.LANCZOS, - }[degradation] - - self.pil_interpolation = degradation.startswith("pil_") - - if self.pil_interpolation: - self.degradation_process = partial(TF.resize, size=self.LR_size, interpolation=interpolation_fn) - - else: - self.degradation_process = albumentations.SmallestMaxSize(max_size=self.LR_size, - interpolation=interpolation_fn) - - def __len__(self): - return len(self.base) - - def __getitem__(self, i): - example = self.base[i] - image = Image.open(example["file_path_"]) - - if not image.mode == "RGB": - image = image.convert("RGB") - - image = np.array(image).astype(np.uint8) - - min_side_len = min(image.shape[:2]) - crop_side_len = min_side_len * np.random.uniform(self.min_crop_f, self.max_crop_f, size=None) - crop_side_len = int(crop_side_len) - - if self.center_crop: - self.cropper = albumentations.CenterCrop(height=crop_side_len, width=crop_side_len) - - else: - self.cropper = albumentations.RandomCrop(height=crop_side_len, width=crop_side_len) - - image = self.cropper(image=image)["image"] - image = self.image_rescaler(image=image)["image"] - - if self.pil_interpolation: - image_pil = PIL.Image.fromarray(image) - LR_image = self.degradation_process(image_pil) - LR_image = np.array(LR_image).astype(np.uint8) - - else: - LR_image = self.degradation_process(image=image)["image"] - - example["image"] = (image/127.5 - 1.0).astype(np.float32) - example["LR_image"] = (LR_image/127.5 - 1.0).astype(np.float32) - - return example - - -class ImageNetSRTrain(ImageNetSR): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def get_base(self): - with open("data/imagenet_train_hr_indices.p", "rb") as f: - indices = pickle.load(f) - dset = ImageNetTrain(process_images=False,) - return Subset(dset, indices) - - -class ImageNetSRValidation(ImageNetSR): - def __init__(self, **kwargs): - super().__init__(**kwargs) - - def get_base(self): - with open("data/imagenet_val_hr_indices.p", "rb") as f: - indices = pickle.load(f) - dset = ImageNetValidation(process_images=False,) - return Subset(dset, indices) diff --git a/ldm/data/lsun.py b/ldm/data/lsun.py deleted file mode 100644 index 6256e457..00000000 --- a/ldm/data/lsun.py +++ /dev/null @@ -1,92 +0,0 @@ -import os -import numpy as np -import PIL -from PIL import Image -from torch.utils.data import Dataset -from torchvision import transforms - - -class LSUNBase(Dataset): - def __init__(self, - txt_file, - data_root, - size=None, - interpolation="bicubic", - flip_p=0.5 - ): - self.data_paths = txt_file - self.data_root = data_root - with open(self.data_paths, "r") as f: - self.image_paths = f.read().splitlines() - self._length = len(self.image_paths) - self.labels = { - "relative_file_path_": [l for l in self.image_paths], - "file_path_": [os.path.join(self.data_root, l) - for l in self.image_paths], - } - - self.size = size - self.interpolation = {"linear": PIL.Image.LINEAR, - "bilinear": PIL.Image.BILINEAR, - "bicubic": PIL.Image.BICUBIC, - "lanczos": PIL.Image.LANCZOS, - }[interpolation] - self.flip = transforms.RandomHorizontalFlip(p=flip_p) - - def __len__(self): - return self._length - - def __getitem__(self, i): - example = dict((k, self.labels[k][i]) for k in self.labels) - image = Image.open(example["file_path_"]) - if not image.mode == "RGB": - image = image.convert("RGB") - - # default to score-sde preprocessing - img = np.array(image).astype(np.uint8) - crop = min(img.shape[0], img.shape[1]) - h, w, = img.shape[0], img.shape[1] - img = img[(h - crop) // 2:(h + crop) // 2, - (w - crop) // 2:(w + crop) // 2] - - image = Image.fromarray(img) - if self.size is not None: - image = image.resize((self.size, self.size), resample=self.interpolation) - - image = self.flip(image) - image = np.array(image).astype(np.uint8) - example["image"] = (image / 127.5 - 1.0).astype(np.float32) - return example - - -class LSUNChurchesTrain(LSUNBase): - def __init__(self, **kwargs): - super().__init__(txt_file="data/lsun/church_outdoor_train.txt", data_root="data/lsun/churches", **kwargs) - - -class LSUNChurchesValidation(LSUNBase): - def __init__(self, flip_p=0., **kwargs): - super().__init__(txt_file="data/lsun/church_outdoor_val.txt", data_root="data/lsun/churches", - flip_p=flip_p, **kwargs) - - -class LSUNBedroomsTrain(LSUNBase): - def __init__(self, **kwargs): - super().__init__(txt_file="data/lsun/bedrooms_train.txt", data_root="data/lsun/bedrooms", **kwargs) - - -class LSUNBedroomsValidation(LSUNBase): - def __init__(self, flip_p=0.0, **kwargs): - super().__init__(txt_file="data/lsun/bedrooms_val.txt", data_root="data/lsun/bedrooms", - flip_p=flip_p, **kwargs) - - -class LSUNCatsTrain(LSUNBase): - def __init__(self, **kwargs): - super().__init__(txt_file="data/lsun/cat_train.txt", data_root="data/lsun/cats", **kwargs) - - -class LSUNCatsValidation(LSUNBase): - def __init__(self, flip_p=0., **kwargs): - super().__init__(txt_file="data/lsun/cat_val.txt", data_root="data/lsun/cats", - flip_p=flip_p, **kwargs) diff --git a/ldm/lr_scheduler.py b/ldm/lr_scheduler.py deleted file mode 100644 index be39da9c..00000000 --- a/ldm/lr_scheduler.py +++ /dev/null @@ -1,98 +0,0 @@ -import numpy as np - - -class LambdaWarmUpCosineScheduler: - """ - note: use with a base_lr of 1.0 - """ - def __init__(self, warm_up_steps, lr_min, lr_max, lr_start, max_decay_steps, verbosity_interval=0): - self.lr_warm_up_steps = warm_up_steps - self.lr_start = lr_start - self.lr_min = lr_min - self.lr_max = lr_max - self.lr_max_decay_steps = max_decay_steps - self.last_lr = 0. - self.verbosity_interval = verbosity_interval - - def schedule(self, n, **kwargs): - if self.verbosity_interval > 0: - if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_lr}") - if n < self.lr_warm_up_steps: - lr = (self.lr_max - self.lr_start) / self.lr_warm_up_steps * n + self.lr_start - self.last_lr = lr - return lr - else: - t = (n - self.lr_warm_up_steps) / (self.lr_max_decay_steps - self.lr_warm_up_steps) - t = min(t, 1.0) - lr = self.lr_min + 0.5 * (self.lr_max - self.lr_min) * ( - 1 + np.cos(t * np.pi)) - self.last_lr = lr - return lr - - def __call__(self, n, **kwargs): - return self.schedule(n,**kwargs) - - -class LambdaWarmUpCosineScheduler2: - """ - supports repeated iterations, configurable via lists - note: use with a base_lr of 1.0. - """ - def __init__(self, warm_up_steps, f_min, f_max, f_start, cycle_lengths, verbosity_interval=0): - assert len(warm_up_steps) == len(f_min) == len(f_max) == len(f_start) == len(cycle_lengths) - self.lr_warm_up_steps = warm_up_steps - self.f_start = f_start - self.f_min = f_min - self.f_max = f_max - self.cycle_lengths = cycle_lengths - self.cum_cycles = np.cumsum([0] + list(self.cycle_lengths)) - self.last_f = 0. - self.verbosity_interval = verbosity_interval - - def find_in_interval(self, n): - interval = 0 - for cl in self.cum_cycles[1:]: - if n <= cl: - return interval - interval += 1 - - def schedule(self, n, **kwargs): - cycle = self.find_in_interval(n) - n = n - self.cum_cycles[cycle] - if self.verbosity_interval > 0: - if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " - f"current cycle {cycle}") - if n < self.lr_warm_up_steps[cycle]: - f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] - self.last_f = f - return f - else: - t = (n - self.lr_warm_up_steps[cycle]) / (self.cycle_lengths[cycle] - self.lr_warm_up_steps[cycle]) - t = min(t, 1.0) - f = self.f_min[cycle] + 0.5 * (self.f_max[cycle] - self.f_min[cycle]) * ( - 1 + np.cos(t * np.pi)) - self.last_f = f - return f - - def __call__(self, n, **kwargs): - return self.schedule(n, **kwargs) - - -class LambdaLinearScheduler(LambdaWarmUpCosineScheduler2): - - def schedule(self, n, **kwargs): - cycle = self.find_in_interval(n) - n = n - self.cum_cycles[cycle] - if self.verbosity_interval > 0: - if n % self.verbosity_interval == 0: print(f"current step: {n}, recent lr-multiplier: {self.last_f}, " - f"current cycle {cycle}") - - if n < self.lr_warm_up_steps[cycle]: - f = (self.f_max[cycle] - self.f_start[cycle]) / self.lr_warm_up_steps[cycle] * n + self.f_start[cycle] - self.last_f = f - return f - else: - f = self.f_min[cycle] + (self.f_max[cycle] - self.f_min[cycle]) * (self.cycle_lengths[cycle] - n) / (self.cycle_lengths[cycle]) - self.last_f = f - return f - diff --git a/ldm/models/autoencoder.py b/ldm/models/autoencoder.py deleted file mode 100644 index 6a9c4f45..00000000 --- a/ldm/models/autoencoder.py +++ /dev/null @@ -1,443 +0,0 @@ -import torch -import pytorch_lightning as pl -import torch.nn.functional as F -from contextlib import contextmanager - -from taming.modules.vqvae.quantize import VectorQuantizer2 as VectorQuantizer - -from ldm.modules.diffusionmodules.model import Encoder, Decoder -from ldm.modules.distributions.distributions import DiagonalGaussianDistribution - -from ldm.util import instantiate_from_config - - -class VQModel(pl.LightningModule): - def __init__(self, - ddconfig, - lossconfig, - n_embed, - embed_dim, - ckpt_path=None, - ignore_keys=[], - image_key="image", - colorize_nlabels=None, - monitor=None, - batch_resize_range=None, - scheduler_config=None, - lr_g_factor=1.0, - remap=None, - sane_index_shape=False, # tell vector quantizer to return indices as bhw - use_ema=False - ): - super().__init__() - self.embed_dim = embed_dim - self.n_embed = n_embed - self.image_key = image_key - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - self.loss = instantiate_from_config(lossconfig) - self.quantize = VectorQuantizer(n_embed, embed_dim, beta=0.25, - remap=remap, - sane_index_shape=sane_index_shape) - self.quant_conv = torch.nn.Conv2d(ddconfig["z_channels"], embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) - if colorize_nlabels is not None: - assert type(colorize_nlabels)==int - self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) - if monitor is not None: - self.monitor = monitor - self.batch_resize_range = batch_resize_range - if self.batch_resize_range is not None: - print(f"{self.__class__.__name__}: Using per-batch resizing in range {batch_resize_range}.") - - self.use_ema = use_ema - if self.use_ema: - self.model_ema = LitEma(self) - print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) - self.scheduler_config = scheduler_config - self.lr_g_factor = lr_g_factor - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.parameters()) - self.model_ema.copy_to(self) - if context is not None: - print(f"{context}: Switched to EMA weights") - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.parameters()) - if context is not None: - print(f"{context}: Restored training weights") - - def init_from_ckpt(self, path, ignore_keys=list()): - sd = torch.load(path, map_location="cpu")["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - print(f"Unexpected Keys: {unexpected}") - - def on_train_batch_end(self, *args, **kwargs): - if self.use_ema: - self.model_ema(self) - - def encode(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - quant, emb_loss, info = self.quantize(h) - return quant, emb_loss, info - - def encode_to_prequant(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - return h - - def decode(self, quant): - quant = self.post_quant_conv(quant) - dec = self.decoder(quant) - return dec - - def decode_code(self, code_b): - quant_b = self.quantize.embed_code(code_b) - dec = self.decode(quant_b) - return dec - - def forward(self, input, return_pred_indices=False): - quant, diff, (_,_,ind) = self.encode(input) - dec = self.decode(quant) - if return_pred_indices: - return dec, diff, ind - return dec, diff - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() - if self.batch_resize_range is not None: - lower_size = self.batch_resize_range[0] - upper_size = self.batch_resize_range[1] - if self.global_step <= 4: - # do the first few batches with max size to avoid later oom - new_resize = upper_size - else: - new_resize = np.random.choice(np.arange(lower_size, upper_size+16, 16)) - if new_resize != x.shape[2]: - x = F.interpolate(x, size=new_resize, mode="bicubic") - x = x.detach() - return x - - def training_step(self, batch, batch_idx, optimizer_idx): - # https://github.com/pytorch/pytorch/issues/37142 - # try not to fool the heuristics - x = self.get_input(batch, self.image_key) - xrec, qloss, ind = self(x, return_pred_indices=True) - - if optimizer_idx == 0: - # autoencode - aeloss, log_dict_ae = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train", - predicted_indices=ind) - - self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=True) - return aeloss - - if optimizer_idx == 1: - # discriminator - discloss, log_dict_disc = self.loss(qloss, x, xrec, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=True) - return discloss - - def validation_step(self, batch, batch_idx): - log_dict = self._validation_step(batch, batch_idx) - with self.ema_scope(): - log_dict_ema = self._validation_step(batch, batch_idx, suffix="_ema") - return log_dict - - def _validation_step(self, batch, batch_idx, suffix=""): - x = self.get_input(batch, self.image_key) - xrec, qloss, ind = self(x, return_pred_indices=True) - aeloss, log_dict_ae = self.loss(qloss, x, xrec, 0, - self.global_step, - last_layer=self.get_last_layer(), - split="val"+suffix, - predicted_indices=ind - ) - - discloss, log_dict_disc = self.loss(qloss, x, xrec, 1, - self.global_step, - last_layer=self.get_last_layer(), - split="val"+suffix, - predicted_indices=ind - ) - rec_loss = log_dict_ae[f"val{suffix}/rec_loss"] - self.log(f"val{suffix}/rec_loss", rec_loss, - prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) - self.log(f"val{suffix}/aeloss", aeloss, - prog_bar=True, logger=True, on_step=False, on_epoch=True, sync_dist=True) - if version.parse(pl.__version__) >= version.parse('1.4.0'): - del log_dict_ae[f"val{suffix}/rec_loss"] - self.log_dict(log_dict_ae) - self.log_dict(log_dict_disc) - return self.log_dict - - def configure_optimizers(self): - lr_d = self.learning_rate - lr_g = self.lr_g_factor*self.learning_rate - print("lr_d", lr_d) - print("lr_g", lr_g) - opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ - list(self.decoder.parameters())+ - list(self.quantize.parameters())+ - list(self.quant_conv.parameters())+ - list(self.post_quant_conv.parameters()), - lr=lr_g, betas=(0.5, 0.9)) - opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), - lr=lr_d, betas=(0.5, 0.9)) - - if self.scheduler_config is not None: - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(opt_ae, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }, - { - 'scheduler': LambdaLR(opt_disc, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }, - ] - return [opt_ae, opt_disc], scheduler - return [opt_ae, opt_disc], [] - - def get_last_layer(self): - return self.decoder.conv_out.weight - - def log_images(self, batch, only_inputs=False, plot_ema=False, **kwargs): - log = dict() - x = self.get_input(batch, self.image_key) - x = x.to(self.device) - if only_inputs: - log["inputs"] = x - return log - xrec, _ = self(x) - if x.shape[1] > 3: - # colorize with random projection - assert xrec.shape[1] > 3 - x = self.to_rgb(x) - xrec = self.to_rgb(xrec) - log["inputs"] = x - log["reconstructions"] = xrec - if plot_ema: - with self.ema_scope(): - xrec_ema, _ = self(x) - if x.shape[1] > 3: xrec_ema = self.to_rgb(xrec_ema) - log["reconstructions_ema"] = xrec_ema - return log - - def to_rgb(self, x): - assert self.image_key == "segmentation" - if not hasattr(self, "colorize"): - self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) - x = F.conv2d(x, weight=self.colorize) - x = 2.*(x-x.min())/(x.max()-x.min()) - 1. - return x - - -class VQModelInterface(VQModel): - def __init__(self, embed_dim, *args, **kwargs): - super().__init__(embed_dim=embed_dim, *args, **kwargs) - self.embed_dim = embed_dim - - def encode(self, x): - h = self.encoder(x) - h = self.quant_conv(h) - return h - - def decode(self, h, force_not_quantize=False): - # also go through quantization layer - if not force_not_quantize: - quant, emb_loss, info = self.quantize(h) - else: - quant = h - quant = self.post_quant_conv(quant) - dec = self.decoder(quant) - return dec - - -class AutoencoderKL(pl.LightningModule): - def __init__(self, - ddconfig, - lossconfig, - embed_dim, - ckpt_path=None, - ignore_keys=[], - image_key="image", - colorize_nlabels=None, - monitor=None, - ): - super().__init__() - self.image_key = image_key - self.encoder = Encoder(**ddconfig) - self.decoder = Decoder(**ddconfig) - self.loss = instantiate_from_config(lossconfig) - assert ddconfig["double_z"] - self.quant_conv = torch.nn.Conv2d(2*ddconfig["z_channels"], 2*embed_dim, 1) - self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) - self.embed_dim = embed_dim - if colorize_nlabels is not None: - assert type(colorize_nlabels)==int - self.register_buffer("colorize", torch.randn(3, colorize_nlabels, 1, 1)) - if monitor is not None: - self.monitor = monitor - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys) - - def init_from_ckpt(self, path, ignore_keys=list()): - sd = torch.load(path, map_location="cpu")["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - self.load_state_dict(sd, strict=False) - print(f"Restored from {path}") - - def encode(self, x): - h = self.encoder(x) - moments = self.quant_conv(h) - posterior = DiagonalGaussianDistribution(moments) - return posterior - - def decode(self, z): - z = self.post_quant_conv(z) - dec = self.decoder(z) - return dec - - def forward(self, input, sample_posterior=True): - posterior = self.encode(input) - if sample_posterior: - z = posterior.sample() - else: - z = posterior.mode() - dec = self.decode(z) - return dec, posterior - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float() - return x - - def training_step(self, batch, batch_idx, optimizer_idx): - inputs = self.get_input(batch, self.image_key) - reconstructions, posterior = self(inputs) - - if optimizer_idx == 0: - # train encoder+decoder+logvar - aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - self.log("aeloss", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) - self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False) - return aeloss - - if optimizer_idx == 1: - # train the discriminator - discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step, - last_layer=self.get_last_layer(), split="train") - - self.log("discloss", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True) - self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False) - return discloss - - def validation_step(self, batch, batch_idx): - inputs = self.get_input(batch, self.image_key) - reconstructions, posterior = self(inputs) - aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step, - last_layer=self.get_last_layer(), split="val") - - discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step, - last_layer=self.get_last_layer(), split="val") - - self.log("val/rec_loss", log_dict_ae["val/rec_loss"]) - self.log_dict(log_dict_ae) - self.log_dict(log_dict_disc) - return self.log_dict - - def configure_optimizers(self): - lr = self.learning_rate - opt_ae = torch.optim.Adam(list(self.encoder.parameters())+ - list(self.decoder.parameters())+ - list(self.quant_conv.parameters())+ - list(self.post_quant_conv.parameters()), - lr=lr, betas=(0.5, 0.9)) - opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(), - lr=lr, betas=(0.5, 0.9)) - return [opt_ae, opt_disc], [] - - def get_last_layer(self): - return self.decoder.conv_out.weight - - @torch.no_grad() - def log_images(self, batch, only_inputs=False, **kwargs): - log = dict() - x = self.get_input(batch, self.image_key) - x = x.to(self.device) - if not only_inputs: - xrec, posterior = self(x) - if x.shape[1] > 3: - # colorize with random projection - assert xrec.shape[1] > 3 - x = self.to_rgb(x) - xrec = self.to_rgb(xrec) - log["samples"] = self.decode(torch.randn_like(posterior.sample())) - log["reconstructions"] = xrec - log["inputs"] = x - return log - - def to_rgb(self, x): - assert self.image_key == "segmentation" - if not hasattr(self, "colorize"): - self.register_buffer("colorize", torch.randn(3, x.shape[1], 1, 1).to(x)) - x = F.conv2d(x, weight=self.colorize) - x = 2.*(x-x.min())/(x.max()-x.min()) - 1. - return x - - -class IdentityFirstStage(torch.nn.Module): - def __init__(self, *args, vq_interface=False, **kwargs): - self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff - super().__init__() - - def encode(self, x, *args, **kwargs): - return x - - def decode(self, x, *args, **kwargs): - return x - - def quantize(self, x, *args, **kwargs): - if self.vq_interface: - return x, None, [None, None, None] - return x - - def forward(self, x, *args, **kwargs): - return x diff --git a/ldm/models/diffusion/__init__.py b/ldm/models/diffusion/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ldm/models/diffusion/classifier.py b/ldm/models/diffusion/classifier.py deleted file mode 100644 index 67e98b9d..00000000 --- a/ldm/models/diffusion/classifier.py +++ /dev/null @@ -1,267 +0,0 @@ -import os -import torch -import pytorch_lightning as pl -from omegaconf import OmegaConf -from torch.nn import functional as F -from torch.optim import AdamW -from torch.optim.lr_scheduler import LambdaLR -from copy import deepcopy -from einops import rearrange -from glob import glob -from natsort import natsorted - -from ldm.modules.diffusionmodules.openaimodel import EncoderUNetModel, UNetModel -from ldm.util import log_txt_as_img, default, ismap, instantiate_from_config - -__models__ = { - 'class_label': EncoderUNetModel, - 'segmentation': UNetModel -} - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -class NoisyLatentImageClassifier(pl.LightningModule): - - def __init__(self, - diffusion_path, - num_classes, - ckpt_path=None, - pool='attention', - label_key=None, - diffusion_ckpt_path=None, - scheduler_config=None, - weight_decay=1.e-2, - log_steps=10, - monitor='val/loss', - *args, - **kwargs): - super().__init__(*args, **kwargs) - self.num_classes = num_classes - # get latest config of diffusion model - diffusion_config = natsorted(glob(os.path.join(diffusion_path, 'configs', '*-project.yaml')))[-1] - self.diffusion_config = OmegaConf.load(diffusion_config).model - self.diffusion_config.params.ckpt_path = diffusion_ckpt_path - self.load_diffusion() - - self.monitor = monitor - self.numd = self.diffusion_model.first_stage_model.encoder.num_resolutions - 1 - self.log_time_interval = self.diffusion_model.num_timesteps // log_steps - self.log_steps = log_steps - - self.label_key = label_key if not hasattr(self.diffusion_model, 'cond_stage_key') \ - else self.diffusion_model.cond_stage_key - - assert self.label_key is not None, 'label_key neither in diffusion model nor in model.params' - - if self.label_key not in __models__: - raise NotImplementedError() - - self.load_classifier(ckpt_path, pool) - - self.scheduler_config = scheduler_config - self.use_scheduler = self.scheduler_config is not None - self.weight_decay = weight_decay - - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location="cpu") - if "state_dict" in list(sd.keys()): - sd = sd["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( - sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - if len(unexpected) > 0: - print(f"Unexpected Keys: {unexpected}") - - def load_diffusion(self): - model = instantiate_from_config(self.diffusion_config) - self.diffusion_model = model.eval() - self.diffusion_model.train = disabled_train - for param in self.diffusion_model.parameters(): - param.requires_grad = False - - def load_classifier(self, ckpt_path, pool): - model_config = deepcopy(self.diffusion_config.params.unet_config.params) - model_config.in_channels = self.diffusion_config.params.unet_config.params.out_channels - model_config.out_channels = self.num_classes - if self.label_key == 'class_label': - model_config.pool = pool - - self.model = __models__[self.label_key](**model_config) - if ckpt_path is not None: - print('#####################################################################') - print(f'load from ckpt "{ckpt_path}"') - print('#####################################################################') - self.init_from_ckpt(ckpt_path) - - @torch.no_grad() - def get_x_noisy(self, x, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x)) - continuous_sqrt_alpha_cumprod = None - if self.diffusion_model.use_continuous_noise: - continuous_sqrt_alpha_cumprod = self.diffusion_model.sample_continuous_noise_level(x.shape[0], t + 1) - # todo: make sure t+1 is correct here - - return self.diffusion_model.q_sample(x_start=x, t=t, noise=noise, - continuous_sqrt_alpha_cumprod=continuous_sqrt_alpha_cumprod) - - def forward(self, x_noisy, t, *args, **kwargs): - return self.model(x_noisy, t) - - @torch.no_grad() - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = rearrange(x, 'b h w c -> b c h w') - x = x.to(memory_format=torch.contiguous_format).float() - return x - - @torch.no_grad() - def get_conditioning(self, batch, k=None): - if k is None: - k = self.label_key - assert k is not None, 'Needs to provide label key' - - targets = batch[k].to(self.device) - - if self.label_key == 'segmentation': - targets = rearrange(targets, 'b h w c -> b c h w') - for down in range(self.numd): - h, w = targets.shape[-2:] - targets = F.interpolate(targets, size=(h // 2, w // 2), mode='nearest') - - # targets = rearrange(targets,'b c h w -> b h w c') - - return targets - - def compute_top_k(self, logits, labels, k, reduction="mean"): - _, top_ks = torch.topk(logits, k, dim=1) - if reduction == "mean": - return (top_ks == labels[:, None]).float().sum(dim=-1).mean().item() - elif reduction == "none": - return (top_ks == labels[:, None]).float().sum(dim=-1) - - def on_train_epoch_start(self): - # save some memory - self.diffusion_model.model.to('cpu') - - @torch.no_grad() - def write_logs(self, loss, logits, targets): - log_prefix = 'train' if self.training else 'val' - log = {} - log[f"{log_prefix}/loss"] = loss.mean() - log[f"{log_prefix}/acc@1"] = self.compute_top_k( - logits, targets, k=1, reduction="mean" - ) - log[f"{log_prefix}/acc@5"] = self.compute_top_k( - logits, targets, k=5, reduction="mean" - ) - - self.log_dict(log, prog_bar=False, logger=True, on_step=self.training, on_epoch=True) - self.log('loss', log[f"{log_prefix}/loss"], prog_bar=True, logger=False) - self.log('global_step', self.global_step, logger=False, on_epoch=False, prog_bar=True) - lr = self.optimizers().param_groups[0]['lr'] - self.log('lr_abs', lr, on_step=True, logger=True, on_epoch=False, prog_bar=True) - - def shared_step(self, batch, t=None): - x, *_ = self.diffusion_model.get_input(batch, k=self.diffusion_model.first_stage_key) - targets = self.get_conditioning(batch) - if targets.dim() == 4: - targets = targets.argmax(dim=1) - if t is None: - t = torch.randint(0, self.diffusion_model.num_timesteps, (x.shape[0],), device=self.device).long() - else: - t = torch.full(size=(x.shape[0],), fill_value=t, device=self.device).long() - x_noisy = self.get_x_noisy(x, t) - logits = self(x_noisy, t) - - loss = F.cross_entropy(logits, targets, reduction='none') - - self.write_logs(loss.detach(), logits.detach(), targets.detach()) - - loss = loss.mean() - return loss, logits, x_noisy, targets - - def training_step(self, batch, batch_idx): - loss, *_ = self.shared_step(batch) - return loss - - def reset_noise_accs(self): - self.noisy_acc = {t: {'acc@1': [], 'acc@5': []} for t in - range(0, self.diffusion_model.num_timesteps, self.diffusion_model.log_every_t)} - - def on_validation_start(self): - self.reset_noise_accs() - - @torch.no_grad() - def validation_step(self, batch, batch_idx): - loss, *_ = self.shared_step(batch) - - for t in self.noisy_acc: - _, logits, _, targets = self.shared_step(batch, t) - self.noisy_acc[t]['acc@1'].append(self.compute_top_k(logits, targets, k=1, reduction='mean')) - self.noisy_acc[t]['acc@5'].append(self.compute_top_k(logits, targets, k=5, reduction='mean')) - - return loss - - def configure_optimizers(self): - optimizer = AdamW(self.model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay) - - if self.use_scheduler: - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(optimizer, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }] - return [optimizer], scheduler - - return optimizer - - @torch.no_grad() - def log_images(self, batch, N=8, *args, **kwargs): - log = dict() - x = self.get_input(batch, self.diffusion_model.first_stage_key) - log['inputs'] = x - - y = self.get_conditioning(batch) - - if self.label_key == 'class_label': - y = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) - log['labels'] = y - - if ismap(y): - log['labels'] = self.diffusion_model.to_rgb(y) - - for step in range(self.log_steps): - current_time = step * self.log_time_interval - - _, logits, x_noisy, _ = self.shared_step(batch, t=current_time) - - log[f'inputs@t{current_time}'] = x_noisy - - pred = F.one_hot(logits.argmax(dim=1), num_classes=self.num_classes) - pred = rearrange(pred, 'b h w c -> b c h w') - - log[f'pred@t{current_time}'] = self.diffusion_model.to_rgb(pred) - - for key in log: - log[key] = log[key][:N] - - return log diff --git a/ldm/models/diffusion/ddim.py b/ldm/models/diffusion/ddim.py deleted file mode 100644 index fb31215d..00000000 --- a/ldm/models/diffusion/ddim.py +++ /dev/null @@ -1,241 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch -import numpy as np -from tqdm import tqdm -from functools import partial - -from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like, \ - extract_into_tensor - - -class DDIMSampler(object): - def __init__(self, model, schedule="linear", **kwargs): - super().__init__() - self.model = model - self.ddpm_num_timesteps = model.num_timesteps - self.schedule = schedule - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): - attr = attr.to(torch.device("cuda")) - setattr(self, name, attr) - - def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): - self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, - num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) - alphas_cumprod = self.model.alphas_cumprod - assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' - to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) - - self.register_buffer('betas', to_torch(self.model.betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) - - # ddim sampling parameters - ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), - ddim_timesteps=self.ddim_timesteps, - eta=ddim_eta,verbose=verbose) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) - sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( - (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( - 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) - self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - cbs = conditioning[list(conditioning.keys())[0]].shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - print(f'Data shape for DDIM sampling is {size}, eta {eta}') - - samples, intermediates = self.ddim_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - ) - return samples, intermediates - - @torch.no_grad() - def ddim_sampling(self, cond, shape, - x_T=None, ddim_use_original_steps=False, - callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, log_every_t=100, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None,): - device = self.model.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - if timesteps is None: - timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps - elif timesteps is not None and not ddim_use_original_steps: - subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 - timesteps = self.ddim_timesteps[:subset_end] - - intermediates = {'x_inter': [img], 'pred_x0': [img]} - time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps) - total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] - print(f"Running DDIM Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps) - - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((b,), step, device=device, dtype=torch.long) - - if mask is not None: - assert x0 is not None - img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? - img = img_orig * mask + (1. - mask) * img - - outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, - quantize_denoised=quantize_denoised, temperature=temperature, - noise_dropout=noise_dropout, score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning) - img, pred_x0 = outs - if callback: callback(i) - if img_callback: img_callback(pred_x0, i) - - if index % log_every_t == 0 or index == total_steps - 1: - intermediates['x_inter'].append(img) - intermediates['pred_x0'].append(pred_x0) - - return img, intermediates - - @torch.no_grad() - def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None): - b, *_, device = *x.shape, x.device - - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - e_t = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - c_in = torch.cat([unconditional_conditioning, c]) - e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) - - if score_corrector is not None: - assert self.model.parameterization == "eps" - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - @torch.no_grad() - def stochastic_encode(self, x0, t, use_original_steps=False, noise=None): - # fast, but does not allow for exact reconstruction - # t serves as an index to gather the correct alphas - if use_original_steps: - sqrt_alphas_cumprod = self.sqrt_alphas_cumprod - sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod - else: - sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas) - sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas - - if noise is None: - noise = torch.randn_like(x0) - return (extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0 + - extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape) * noise) - - @torch.no_grad() - def decode(self, x_latent, cond, t_start, unconditional_guidance_scale=1.0, unconditional_conditioning=None, - use_original_steps=False): - - timesteps = np.arange(self.ddpm_num_timesteps) if use_original_steps else self.ddim_timesteps - timesteps = timesteps[:t_start] - - time_range = np.flip(timesteps) - total_steps = timesteps.shape[0] - print(f"Running DDIM Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='Decoding image', total=total_steps) - x_dec = x_latent - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((x_latent.shape[0],), step, device=x_latent.device, dtype=torch.long) - x_dec, _ = self.p_sample_ddim(x_dec, cond, ts, index=index, use_original_steps=use_original_steps, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning) - return x_dec \ No newline at end of file diff --git a/ldm/models/diffusion/ddpm.py b/ldm/models/diffusion/ddpm.py deleted file mode 100644 index bbedd04c..00000000 --- a/ldm/models/diffusion/ddpm.py +++ /dev/null @@ -1,1445 +0,0 @@ -""" -wild mixture of -https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py -https://github.com/CompVis/taming-transformers --- merci -""" - -import torch -import torch.nn as nn -import numpy as np -import pytorch_lightning as pl -from torch.optim.lr_scheduler import LambdaLR -from einops import rearrange, repeat -from contextlib import contextmanager -from functools import partial -from tqdm import tqdm -from torchvision.utils import make_grid -from pytorch_lightning.utilities.distributed import rank_zero_only - -from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config -from ldm.modules.ema import LitEma -from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution -from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL -from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like -from ldm.models.diffusion.ddim import DDIMSampler - - -__conditioning_keys__ = {'concat': 'c_concat', - 'crossattn': 'c_crossattn', - 'adm': 'y'} - - -def disabled_train(self, mode=True): - """Overwrite model.train with this function to make sure train/eval mode - does not change anymore.""" - return self - - -def uniform_on_device(r1, r2, shape, device): - return (r1 - r2) * torch.rand(*shape, device=device) + r2 - - -class DDPM(pl.LightningModule): - # classic DDPM with Gaussian diffusion, in image space - def __init__(self, - unet_config, - timesteps=1000, - beta_schedule="linear", - loss_type="l2", - ckpt_path=None, - ignore_keys=[], - load_only_unet=False, - monitor="val/loss", - use_ema=True, - first_stage_key="image", - image_size=256, - channels=3, - log_every_t=100, - clip_denoised=True, - linear_start=1e-4, - linear_end=2e-2, - cosine_s=8e-3, - given_betas=None, - original_elbo_weight=0., - v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta - l_simple_weight=1., - conditioning_key=None, - parameterization="eps", # all assuming fixed variance schedules - scheduler_config=None, - use_positional_encodings=False, - learn_logvar=False, - logvar_init=0., - ): - super().__init__() - assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' - self.parameterization = parameterization - print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") - self.cond_stage_model = None - self.clip_denoised = clip_denoised - self.log_every_t = log_every_t - self.first_stage_key = first_stage_key - self.image_size = image_size # try conv? - self.channels = channels - self.use_positional_encodings = use_positional_encodings - self.model = DiffusionWrapper(unet_config, conditioning_key) - count_params(self.model, verbose=True) - self.use_ema = use_ema - if self.use_ema: - self.model_ema = LitEma(self.model) - print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") - - self.use_scheduler = scheduler_config is not None - if self.use_scheduler: - self.scheduler_config = scheduler_config - - self.v_posterior = v_posterior - self.original_elbo_weight = original_elbo_weight - self.l_simple_weight = l_simple_weight - - if monitor is not None: - self.monitor = monitor - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) - - self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, - linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) - - self.loss_type = loss_type - - self.learn_logvar = learn_logvar - self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) - if self.learn_logvar: - self.logvar = nn.Parameter(self.logvar, requires_grad=True) - - - def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if exists(given_betas): - betas = given_betas - else: - betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, - cosine_s=cosine_s) - alphas = 1. - betas - alphas_cumprod = np.cumprod(alphas, axis=0) - alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) - - timesteps, = betas.shape - self.num_timesteps = int(timesteps) - self.linear_start = linear_start - self.linear_end = linear_end - assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' - - to_torch = partial(torch.tensor, dtype=torch.float32) - - self.register_buffer('betas', to_torch(betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) - - # calculations for posterior q(x_{t-1} | x_t, x_0) - posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( - 1. - alphas_cumprod) + self.v_posterior * betas - # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) - self.register_buffer('posterior_variance', to_torch(posterior_variance)) - # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain - self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) - self.register_buffer('posterior_mean_coef1', to_torch( - betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) - self.register_buffer('posterior_mean_coef2', to_torch( - (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) - - if self.parameterization == "eps": - lvlb_weights = self.betas ** 2 / ( - 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) - elif self.parameterization == "x0": - lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) - else: - raise NotImplementedError("mu not supported") - # TODO how to choose this term - lvlb_weights[0] = lvlb_weights[1] - self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) - assert not torch.isnan(self.lvlb_weights).all() - - @contextmanager - def ema_scope(self, context=None): - if self.use_ema: - self.model_ema.store(self.model.parameters()) - self.model_ema.copy_to(self.model) - if context is not None: - print(f"{context}: Switched to EMA weights") - try: - yield None - finally: - if self.use_ema: - self.model_ema.restore(self.model.parameters()) - if context is not None: - print(f"{context}: Restored training weights") - - def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): - sd = torch.load(path, map_location="cpu") - if "state_dict" in list(sd.keys()): - sd = sd["state_dict"] - keys = list(sd.keys()) - for k in keys: - for ik in ignore_keys: - if k.startswith(ik): - print("Deleting key {} from state_dict.".format(k)) - del sd[k] - missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( - sd, strict=False) - print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") - if len(missing) > 0: - print(f"Missing Keys: {missing}") - if len(unexpected) > 0: - print(f"Unexpected Keys: {unexpected}") - - def q_mean_variance(self, x_start, t): - """ - Get the distribution q(x_t | x_0). - :param x_start: the [N x C x ...] tensor of noiseless inputs. - :param t: the number of diffusion steps (minus 1). Here, 0 means one step. - :return: A tuple (mean, variance, log_variance), all of x_start's shape. - """ - mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) - variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) - log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) - return mean, variance, log_variance - - def predict_start_from_noise(self, x_t, t, noise): - return ( - extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise - ) - - def q_posterior(self, x_start, x_t, t): - posterior_mean = ( - extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + - extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t - ) - posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) - posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) - return posterior_mean, posterior_variance, posterior_log_variance_clipped - - def p_mean_variance(self, x, t, clip_denoised: bool): - model_out = self.model(x, t) - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - if clip_denoised: - x_recon.clamp_(-1., 1.) - - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): - b, *_, device = *x.shape, x.device - model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) - noise = noise_like(x.shape, device, repeat_noise) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def p_sample_loop(self, shape, return_intermediates=False): - device = self.betas.device - b = shape[0] - img = torch.randn(shape, device=device) - intermediates = [img] - for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): - img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), - clip_denoised=self.clip_denoised) - if i % self.log_every_t == 0 or i == self.num_timesteps - 1: - intermediates.append(img) - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample(self, batch_size=16, return_intermediates=False): - image_size = self.image_size - channels = self.channels - return self.p_sample_loop((batch_size, channels, image_size, image_size), - return_intermediates=return_intermediates) - - def q_sample(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + - extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) - - def get_loss(self, pred, target, mean=True): - if self.loss_type == 'l1': - loss = (target - pred).abs() - if mean: - loss = loss.mean() - elif self.loss_type == 'l2': - if mean: - loss = torch.nn.functional.mse_loss(target, pred) - else: - loss = torch.nn.functional.mse_loss(target, pred, reduction='none') - else: - raise NotImplementedError("unknown loss type '{loss_type}'") - - return loss - - def p_losses(self, x_start, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_out = self.model(x_noisy, t) - - loss_dict = {} - if self.parameterization == "eps": - target = noise - elif self.parameterization == "x0": - target = x_start - else: - raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") - - loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) - - log_prefix = 'train' if self.training else 'val' - - loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) - loss_simple = loss.mean() * self.l_simple_weight - - loss_vlb = (self.lvlb_weights[t] * loss).mean() - loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) - - loss = loss_simple + self.original_elbo_weight * loss_vlb - - loss_dict.update({f'{log_prefix}/loss': loss}) - - return loss, loss_dict - - def forward(self, x, *args, **kwargs): - # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size - # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' - t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() - return self.p_losses(x, t, *args, **kwargs) - - def get_input(self, batch, k): - x = batch[k] - if len(x.shape) == 3: - x = x[..., None] - x = rearrange(x, 'b h w c -> b c h w') - x = x.to(memory_format=torch.contiguous_format).float() - return x - - def shared_step(self, batch): - x = self.get_input(batch, self.first_stage_key) - loss, loss_dict = self(x) - return loss, loss_dict - - def training_step(self, batch, batch_idx): - loss, loss_dict = self.shared_step(batch) - - self.log_dict(loss_dict, prog_bar=True, - logger=True, on_step=True, on_epoch=True) - - self.log("global_step", self.global_step, - prog_bar=True, logger=True, on_step=True, on_epoch=False) - - if self.use_scheduler: - lr = self.optimizers().param_groups[0]['lr'] - self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) - - return loss - - @torch.no_grad() - def validation_step(self, batch, batch_idx): - _, loss_dict_no_ema = self.shared_step(batch) - with self.ema_scope(): - _, loss_dict_ema = self.shared_step(batch) - loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} - self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) - self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) - - def on_train_batch_end(self, *args, **kwargs): - if self.use_ema: - self.model_ema(self.model) - - def _get_rows_from_list(self, samples): - n_imgs_per_row = len(samples) - denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): - log = dict() - x = self.get_input(batch, self.first_stage_key) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - x = x.to(self.device)[:N] - log["inputs"] = x - - # get diffusion row - diffusion_row = list() - x_start = x[:n_row] - - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(x_start) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - diffusion_row.append(x_noisy) - - log["diffusion_row"] = self._get_rows_from_list(diffusion_row) - - if sample: - # get denoise row - with self.ema_scope("Plotting"): - samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) - - log["samples"] = samples - log["denoise_row"] = self._get_rows_from_list(denoise_row) - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.model.parameters()) - if self.learn_logvar: - params = params + [self.logvar] - opt = torch.optim.AdamW(params, lr=lr) - return opt - - -class LatentDiffusion(DDPM): - """main class""" - def __init__(self, - first_stage_config, - cond_stage_config, - num_timesteps_cond=None, - cond_stage_key="image", - cond_stage_trainable=False, - concat_mode=True, - cond_stage_forward=None, - conditioning_key=None, - scale_factor=1.0, - scale_by_std=False, - *args, **kwargs): - self.num_timesteps_cond = default(num_timesteps_cond, 1) - self.scale_by_std = scale_by_std - assert self.num_timesteps_cond <= kwargs['timesteps'] - # for backwards compatibility after implementation of DiffusionWrapper - if conditioning_key is None: - conditioning_key = 'concat' if concat_mode else 'crossattn' - if cond_stage_config == '__is_unconditional__': - conditioning_key = None - ckpt_path = kwargs.pop("ckpt_path", None) - ignore_keys = kwargs.pop("ignore_keys", []) - super().__init__(conditioning_key=conditioning_key, *args, **kwargs) - self.concat_mode = concat_mode - self.cond_stage_trainable = cond_stage_trainable - self.cond_stage_key = cond_stage_key - try: - self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 - except: - self.num_downs = 0 - if not scale_by_std: - self.scale_factor = scale_factor - else: - self.register_buffer('scale_factor', torch.tensor(scale_factor)) - self.instantiate_first_stage(first_stage_config) - self.instantiate_cond_stage(cond_stage_config) - self.cond_stage_forward = cond_stage_forward - self.clip_denoised = False - self.bbox_tokenizer = None - - self.restarted_from_ckpt = False - if ckpt_path is not None: - self.init_from_ckpt(ckpt_path, ignore_keys) - self.restarted_from_ckpt = True - - def make_cond_schedule(self, ): - self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) - ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() - self.cond_ids[:self.num_timesteps_cond] = ids - - @rank_zero_only - @torch.no_grad() - def on_train_batch_start(self, batch, batch_idx, dataloader_idx): - # only for very first batch - if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: - assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' - # set rescale weight to 1./std of encodings - print("### USING STD-RESCALING ###") - x = super().get_input(batch, self.first_stage_key) - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - del self.scale_factor - self.register_buffer('scale_factor', 1. / z.flatten().std()) - print(f"setting self.scale_factor to {self.scale_factor}") - print("### USING STD-RESCALING ###") - - def register_schedule(self, - given_betas=None, beta_schedule="linear", timesteps=1000, - linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) - - self.shorten_cond_schedule = self.num_timesteps_cond > 1 - if self.shorten_cond_schedule: - self.make_cond_schedule() - - def instantiate_first_stage(self, config): - model = instantiate_from_config(config) - self.first_stage_model = model.eval() - self.first_stage_model.train = disabled_train - for param in self.first_stage_model.parameters(): - param.requires_grad = False - - def instantiate_cond_stage(self, config): - if not self.cond_stage_trainable: - if config == "__is_first_stage__": - print("Using first stage also as cond stage.") - self.cond_stage_model = self.first_stage_model - elif config == "__is_unconditional__": - print(f"Training {self.__class__.__name__} as an unconditional model.") - self.cond_stage_model = None - # self.be_unconditional = True - else: - model = instantiate_from_config(config) - self.cond_stage_model = model.eval() - self.cond_stage_model.train = disabled_train - for param in self.cond_stage_model.parameters(): - param.requires_grad = False - else: - assert config != '__is_first_stage__' - assert config != '__is_unconditional__' - model = instantiate_from_config(config) - self.cond_stage_model = model - - def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): - denoise_row = [] - for zd in tqdm(samples, desc=desc): - denoise_row.append(self.decode_first_stage(zd.to(self.device), - force_not_quantize=force_no_decoder_quantization)) - n_imgs_per_row = len(denoise_row) - denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W - denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') - denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') - denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) - return denoise_grid - - def get_first_stage_encoding(self, encoder_posterior): - if isinstance(encoder_posterior, DiagonalGaussianDistribution): - z = encoder_posterior.sample() - elif isinstance(encoder_posterior, torch.Tensor): - z = encoder_posterior - else: - raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") - return self.scale_factor * z - - def get_learned_conditioning(self, c): - if self.cond_stage_forward is None: - if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): - c = self.cond_stage_model.encode(c) - if isinstance(c, DiagonalGaussianDistribution): - c = c.mode() - else: - c = self.cond_stage_model(c) - else: - assert hasattr(self.cond_stage_model, self.cond_stage_forward) - c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) - return c - - def meshgrid(self, h, w): - y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) - x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) - - arr = torch.cat([y, x], dim=-1) - return arr - - def delta_border(self, h, w): - """ - :param h: height - :param w: width - :return: normalized distance to image border, - wtith min distance = 0 at border and max dist = 0.5 at image center - """ - lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) - arr = self.meshgrid(h, w) / lower_right_corner - dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] - dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] - edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] - return edge_dist - - def get_weighting(self, h, w, Ly, Lx, device): - weighting = self.delta_border(h, w) - weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], - self.split_input_params["clip_max_weight"], ) - weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) - - if self.split_input_params["tie_braker"]: - L_weighting = self.delta_border(Ly, Lx) - L_weighting = torch.clip(L_weighting, - self.split_input_params["clip_min_tie_weight"], - self.split_input_params["clip_max_tie_weight"]) - - L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) - weighting = weighting * L_weighting - return weighting - - def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code - """ - :param x: img of size (bs, c, h, w) - :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) - """ - bs, nc, h, w = x.shape - - # number of crops in image - Ly = (h - kernel_size[0]) // stride[0] + 1 - Lx = (w - kernel_size[1]) // stride[1] + 1 - - if uf == 1 and df == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) - - weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) - - elif uf > 1 and df == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[0] * uf), - dilation=1, padding=0, - stride=(stride[0] * uf, stride[1] * uf)) - fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) - - weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) - - elif df > 1 and uf == 1: - fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) - unfold = torch.nn.Unfold(**fold_params) - - fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[0] // df), - dilation=1, padding=0, - stride=(stride[0] // df, stride[1] // df)) - fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) - - weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) - normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap - weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) - - else: - raise NotImplementedError - - return fold, unfold, normalization, weighting - - @torch.no_grad() - def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, - cond_key=None, return_original_cond=False, bs=None): - x = super().get_input(batch, k) - if bs is not None: - x = x[:bs] - x = x.to(self.device) - encoder_posterior = self.encode_first_stage(x) - z = self.get_first_stage_encoding(encoder_posterior).detach() - - if self.model.conditioning_key is not None: - if cond_key is None: - cond_key = self.cond_stage_key - if cond_key != self.first_stage_key: - if cond_key in ['caption', 'coordinates_bbox']: - xc = batch[cond_key] - elif cond_key == 'class_label': - xc = batch - else: - xc = super().get_input(batch, cond_key).to(self.device) - else: - xc = x - if not self.cond_stage_trainable or force_c_encode: - if isinstance(xc, dict) or isinstance(xc, list): - # import pudb; pudb.set_trace() - c = self.get_learned_conditioning(xc) - else: - c = self.get_learned_conditioning(xc.to(self.device)) - else: - c = xc - if bs is not None: - c = c[:bs] - - if self.use_positional_encodings: - pos_x, pos_y = self.compute_latent_shifts(batch) - ckey = __conditioning_keys__[self.model.conditioning_key] - c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} - - else: - c = None - xc = None - if self.use_positional_encodings: - pos_x, pos_y = self.compute_latent_shifts(batch) - c = {'pos_x': pos_x, 'pos_y': pos_y} - out = [z, c] - if return_first_stage_outputs: - xrec = self.decode_first_stage(z) - out.extend([x, xrec]) - if return_original_cond: - out.append(xc) - return out - - @torch.no_grad() - def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) - z = rearrange(z, 'b h w c -> b c h w').contiguous() - - z = 1. / self.scale_factor * z - - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - uf = self.split_input_params["vqf"] - bs, nc, h, w = z.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) - - z = unfold(z) # (bn, nc * prod(**ks), L) - # 1. Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): - output_list = [self.first_stage_model.decode(z[:, :, :, :, i], - force_not_quantize=predict_cids or force_not_quantize) - for i in range(z.shape[-1])] - else: - - output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) - o = o * weighting - # Reverse 1. reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization # norm is shape (1, 1, h, w) - return decoded - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - # same as above but without decorator - def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): - if predict_cids: - if z.dim() == 4: - z = torch.argmax(z.exp(), dim=1).long() - z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) - z = rearrange(z, 'b h w c -> b c h w').contiguous() - - z = 1. / self.scale_factor * z - - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - uf = self.split_input_params["vqf"] - bs, nc, h, w = z.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) - - z = unfold(z) # (bn, nc * prod(**ks), L) - # 1. Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - # 2. apply model loop over last dim - if isinstance(self.first_stage_model, VQModelInterface): - output_list = [self.first_stage_model.decode(z[:, :, :, :, i], - force_not_quantize=predict_cids or force_not_quantize) - for i in range(z.shape[-1])] - else: - - output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) - o = o * weighting - # Reverse 1. reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization # norm is shape (1, 1, h, w) - return decoded - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - else: - if isinstance(self.first_stage_model, VQModelInterface): - return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) - else: - return self.first_stage_model.decode(z) - - @torch.no_grad() - def encode_first_stage(self, x): - if hasattr(self, "split_input_params"): - if self.split_input_params["patch_distributed_vq"]: - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - df = self.split_input_params["vqf"] - self.split_input_params['original_image_size'] = x.shape[-2:] - bs, nc, h, w = x.shape - if ks[0] > h or ks[1] > w: - ks = (min(ks[0], h), min(ks[1], w)) - print("reducing Kernel") - - if stride[0] > h or stride[1] > w: - stride = (min(stride[0], h), min(stride[1], w)) - print("reducing stride") - - fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) - z = unfold(x) # (bn, nc * prod(**ks), L) - # Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) - for i in range(z.shape[-1])] - - o = torch.stack(output_list, axis=-1) - o = o * weighting - - # Reverse reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - decoded = fold(o) - decoded = decoded / normalization - return decoded - - else: - return self.first_stage_model.encode(x) - else: - return self.first_stage_model.encode(x) - - def shared_step(self, batch, **kwargs): - x, c = self.get_input(batch, self.first_stage_key) - loss = self(x, c) - return loss - - def forward(self, x, c, *args, **kwargs): - t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() - if self.model.conditioning_key is not None: - assert c is not None - if self.cond_stage_trainable: - c = self.get_learned_conditioning(c) - if self.shorten_cond_schedule: # TODO: drop this option - tc = self.cond_ids[t].to(self.device) - c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) - return self.p_losses(x, c, t, *args, **kwargs) - - def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset - def rescale_bbox(bbox): - x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) - y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) - w = min(bbox[2] / crop_coordinates[2], 1 - x0) - h = min(bbox[3] / crop_coordinates[3], 1 - y0) - return x0, y0, w, h - - return [rescale_bbox(b) for b in bboxes] - - def apply_model(self, x_noisy, t, cond, return_ids=False): - - if isinstance(cond, dict): - # hybrid case, cond is exptected to be a dict - pass - else: - if not isinstance(cond, list): - cond = [cond] - key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' - cond = {key: cond} - - if hasattr(self, "split_input_params"): - assert len(cond) == 1 # todo can only deal with one conditioning atm - assert not return_ids - ks = self.split_input_params["ks"] # eg. (128, 128) - stride = self.split_input_params["stride"] # eg. (64, 64) - - h, w = x_noisy.shape[-2:] - - fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) - - z = unfold(x_noisy) # (bn, nc * prod(**ks), L) - # Reshape to img shape - z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] - - if self.cond_stage_key in ["image", "LR_image", "segmentation", - 'bbox_img'] and self.model.conditioning_key: # todo check for completeness - c_key = next(iter(cond.keys())) # get key - c = next(iter(cond.values())) # get value - assert (len(c) == 1) # todo extend to list with more than one elem - c = c[0] # get element - - c = unfold(c) - c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) - - cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] - - elif self.cond_stage_key == 'coordinates_bbox': - assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' - - # assuming padding of unfold is always 0 and its dilation is always 1 - n_patches_per_row = int((w - ks[0]) / stride[0] + 1) - full_img_h, full_img_w = self.split_input_params['original_image_size'] - # as we are operating on latents, we need the factor from the original image size to the - # spatial latent size to properly rescale the crops for regenerating the bbox annotations - num_downs = self.first_stage_model.encoder.num_resolutions - 1 - rescale_latent = 2 ** (num_downs) - - # get top left postions of patches as conforming for the bbbox tokenizer, therefore we - # need to rescale the tl patch coordinates to be in between (0,1) - tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, - rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) - for patch_nr in range(z.shape[-1])] - - # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) - patch_limits = [(x_tl, y_tl, - rescale_latent * ks[0] / full_img_w, - rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] - # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] - - # tokenize crop coordinates for the bounding boxes of the respective patches - patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) - for bbox in patch_limits] # list of length l with tensors of shape (1, 2) - print(patch_limits_tknzd[0].shape) - # cut tknzd crop position from conditioning - assert isinstance(cond, dict), 'cond must be dict to be fed into model' - cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) - print(cut_cond.shape) - - adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) - adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') - print(adapted_cond.shape) - adapted_cond = self.get_learned_conditioning(adapted_cond) - print(adapted_cond.shape) - adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) - print(adapted_cond.shape) - - cond_list = [{'c_crossattn': [e]} for e in adapted_cond] - - else: - cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient - - # apply model by loop over crops - output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] - assert not isinstance(output_list[0], - tuple) # todo cant deal with multiple model outputs check this never happens - - o = torch.stack(output_list, axis=-1) - o = o * weighting - # Reverse reshape to img shape - o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) - # stitch crops together - x_recon = fold(o) / normalization - - else: - x_recon = self.model(x_noisy, t, **cond) - - if isinstance(x_recon, tuple) and not return_ids: - return x_recon[0] - else: - return x_recon - - def _predict_eps_from_xstart(self, x_t, t, pred_xstart): - return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) - - def _prior_bpd(self, x_start): - """ - Get the prior KL term for the variational lower-bound, measured in - bits-per-dim. - This term can't be optimized, as it only depends on the encoder. - :param x_start: the [N x C x ...] tensor of inputs. - :return: a batch of [N] KL values (in bits), one per batch element. - """ - batch_size = x_start.shape[0] - t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) - qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) - kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) - return mean_flat(kl_prior) / np.log(2.0) - - def p_losses(self, x_start, cond, t, noise=None): - noise = default(noise, lambda: torch.randn_like(x_start)) - x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) - model_output = self.apply_model(x_noisy, t, cond) - - loss_dict = {} - prefix = 'train' if self.training else 'val' - - if self.parameterization == "x0": - target = x_start - elif self.parameterization == "eps": - target = noise - else: - raise NotImplementedError() - - loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) - loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) - - logvar_t = self.logvar[t].to(self.device) - loss = loss_simple / torch.exp(logvar_t) + logvar_t - # loss = loss_simple / torch.exp(self.logvar) + self.logvar - if self.learn_logvar: - loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) - loss_dict.update({'logvar': self.logvar.data.mean()}) - - loss = self.l_simple_weight * loss.mean() - - loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) - loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() - loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) - loss += (self.original_elbo_weight * loss_vlb) - loss_dict.update({f'{prefix}/loss': loss}) - - return loss, loss_dict - - def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, - return_x0=False, score_corrector=None, corrector_kwargs=None): - t_in = t - model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) - - if score_corrector is not None: - assert self.parameterization == "eps" - model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) - - if return_codebook_ids: - model_out, logits = model_out - - if self.parameterization == "eps": - x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) - elif self.parameterization == "x0": - x_recon = model_out - else: - raise NotImplementedError() - - if clip_denoised: - x_recon.clamp_(-1., 1.) - if quantize_denoised: - x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) - model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) - if return_codebook_ids: - return model_mean, posterior_variance, posterior_log_variance, logits - elif return_x0: - return model_mean, posterior_variance, posterior_log_variance, x_recon - else: - return model_mean, posterior_variance, posterior_log_variance - - @torch.no_grad() - def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, - return_codebook_ids=False, quantize_denoised=False, return_x0=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): - b, *_, device = *x.shape, x.device - outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, - return_codebook_ids=return_codebook_ids, - quantize_denoised=quantize_denoised, - return_x0=return_x0, - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) - if return_codebook_ids: - raise DeprecationWarning("Support dropped.") - model_mean, _, model_log_variance, logits = outputs - elif return_x0: - model_mean, _, model_log_variance, x0 = outputs - else: - model_mean, _, model_log_variance = outputs - - noise = noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - # no noise when t == 0 - nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) - - if return_codebook_ids: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) - if return_x0: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 - else: - return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise - - @torch.no_grad() - def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, - img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., - score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, - log_every_t=None): - if not log_every_t: - log_every_t = self.log_every_t - timesteps = self.num_timesteps - if batch_size is not None: - b = batch_size if batch_size is not None else shape[0] - shape = [batch_size] + list(shape) - else: - b = batch_size = shape[0] - if x_T is None: - img = torch.randn(shape, device=self.device) - else: - img = x_T - intermediates = [] - if cond is not None: - if isinstance(cond, dict): - cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} - else: - cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', - total=timesteps) if verbose else reversed( - range(0, timesteps)) - if type(temperature) == float: - temperature = [temperature] * timesteps - - for i in iterator: - ts = torch.full((b,), i, device=self.device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img, x0_partial = self.p_sample(img, cond, ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised, return_x0=True, - temperature=temperature[i], noise_dropout=noise_dropout, - score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) - if mask is not None: - assert x0 is not None - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(x0_partial) - if callback: callback(i) - if img_callback: img_callback(img, i) - return img, intermediates - - @torch.no_grad() - def p_sample_loop(self, cond, shape, return_intermediates=False, - x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, start_T=None, - log_every_t=None): - - if not log_every_t: - log_every_t = self.log_every_t - device = self.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - intermediates = [img] - if timesteps is None: - timesteps = self.num_timesteps - - if start_T is not None: - timesteps = min(timesteps, start_T) - iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( - range(0, timesteps)) - - if mask is not None: - assert x0 is not None - assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match - - for i in iterator: - ts = torch.full((b,), i, device=device, dtype=torch.long) - if self.shorten_cond_schedule: - assert self.model.conditioning_key != 'hybrid' - tc = self.cond_ids[ts].to(cond.device) - cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) - - img = self.p_sample(img, cond, ts, - clip_denoised=self.clip_denoised, - quantize_denoised=quantize_denoised) - if mask is not None: - img_orig = self.q_sample(x0, ts) - img = img_orig * mask + (1. - mask) * img - - if i % log_every_t == 0 or i == timesteps - 1: - intermediates.append(img) - if callback: callback(i) - if img_callback: img_callback(img, i) - - if return_intermediates: - return img, intermediates - return img - - @torch.no_grad() - def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, - verbose=True, timesteps=None, quantize_denoised=False, - mask=None, x0=None, shape=None,**kwargs): - if shape is None: - shape = (batch_size, self.channels, self.image_size, self.image_size) - if cond is not None: - if isinstance(cond, dict): - cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else - list(map(lambda x: x[:batch_size], cond[key])) for key in cond} - else: - cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] - return self.p_sample_loop(cond, - shape, - return_intermediates=return_intermediates, x_T=x_T, - verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, - mask=mask, x0=x0) - - @torch.no_grad() - def sample_log(self,cond,batch_size,ddim, ddim_steps,**kwargs): - - if ddim: - ddim_sampler = DDIMSampler(self) - shape = (self.channels, self.image_size, self.image_size) - samples, intermediates =ddim_sampler.sample(ddim_steps,batch_size, - shape,cond,verbose=False,**kwargs) - - else: - samples, intermediates = self.sample(cond=cond, batch_size=batch_size, - return_intermediates=True,**kwargs) - - return samples, intermediates - - - @torch.no_grad() - def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, - quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, - plot_diffusion_rows=True, **kwargs): - - use_ddim = ddim_steps is not None - - log = dict() - z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, - return_first_stage_outputs=True, - force_c_encode=True, - return_original_cond=True, - bs=N) - N = min(x.shape[0], N) - n_row = min(x.shape[0], n_row) - log["inputs"] = x - log["reconstruction"] = xrec - if self.model.conditioning_key is not None: - if hasattr(self.cond_stage_model, "decode"): - xc = self.cond_stage_model.decode(c) - log["conditioning"] = xc - elif self.cond_stage_key in ["caption"]: - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"]) - log["conditioning"] = xc - elif self.cond_stage_key == 'class_label': - xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["human_label"]) - log['conditioning'] = xc - elif isimage(xc): - log["conditioning"] = xc - if ismap(xc): - log["original_conditioning"] = self.to_rgb(xc) - - if plot_diffusion_rows: - # get diffusion row - diffusion_row = list() - z_start = z[:n_row] - for t in range(self.num_timesteps): - if t % self.log_every_t == 0 or t == self.num_timesteps - 1: - t = repeat(torch.tensor([t]), '1 -> b', b=n_row) - t = t.to(self.device).long() - noise = torch.randn_like(z_start) - z_noisy = self.q_sample(x_start=z_start, t=t, noise=noise) - diffusion_row.append(self.decode_first_stage(z_noisy)) - - diffusion_row = torch.stack(diffusion_row) # n_log_step, n_row, C, H, W - diffusion_grid = rearrange(diffusion_row, 'n b c h w -> b n c h w') - diffusion_grid = rearrange(diffusion_grid, 'b n c h w -> (b n) c h w') - diffusion_grid = make_grid(diffusion_grid, nrow=diffusion_row.shape[0]) - log["diffusion_row"] = diffusion_grid - - if sample: - # get denoise row - with self.ema_scope("Plotting"): - samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, - ddim_steps=ddim_steps,eta=ddim_eta) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True) - x_samples = self.decode_first_stage(samples) - log["samples"] = x_samples - if plot_denoise_rows: - denoise_grid = self._get_denoise_row_from_list(z_denoise_row) - log["denoise_row"] = denoise_grid - - if quantize_denoised and not isinstance(self.first_stage_model, AutoencoderKL) and not isinstance( - self.first_stage_model, IdentityFirstStage): - # also display when quantizing x0 while sampling - with self.ema_scope("Plotting Quantized Denoised"): - samples, z_denoise_row = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, - ddim_steps=ddim_steps,eta=ddim_eta, - quantize_denoised=True) - # samples, z_denoise_row = self.sample(cond=c, batch_size=N, return_intermediates=True, - # quantize_denoised=True) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_x0_quantized"] = x_samples - - if inpaint: - # make a simple center square - b, h, w = z.shape[0], z.shape[2], z.shape[3] - mask = torch.ones(N, h, w).to(self.device) - # zeros will be filled in - mask[:, h // 4:3 * h // 4, w // 4:3 * w // 4] = 0. - mask = mask[:, None, ...] - with self.ema_scope("Plotting Inpaint"): - - samples, _ = self.sample_log(cond=c,batch_size=N,ddim=use_ddim, eta=ddim_eta, - ddim_steps=ddim_steps, x0=z[:N], mask=mask) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_inpainting"] = x_samples - log["mask"] = mask - - # outpaint - with self.ema_scope("Plotting Outpaint"): - samples, _ = self.sample_log(cond=c, batch_size=N, ddim=use_ddim,eta=ddim_eta, - ddim_steps=ddim_steps, x0=z[:N], mask=mask) - x_samples = self.decode_first_stage(samples.to(self.device)) - log["samples_outpainting"] = x_samples - - if plot_progressive_rows: - with self.ema_scope("Plotting Progressives"): - img, progressives = self.progressive_denoising(c, - shape=(self.channels, self.image_size, self.image_size), - batch_size=N) - prog_row = self._get_denoise_row_from_list(progressives, desc="Progressive Generation") - log["progressive_row"] = prog_row - - if return_keys: - if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: - return log - else: - return {key: log[key] for key in return_keys} - return log - - def configure_optimizers(self): - lr = self.learning_rate - params = list(self.model.parameters()) - if self.cond_stage_trainable: - print(f"{self.__class__.__name__}: Also optimizing conditioner params!") - params = params + list(self.cond_stage_model.parameters()) - if self.learn_logvar: - print('Diffusion model optimizing logvar') - params.append(self.logvar) - opt = torch.optim.AdamW(params, lr=lr) - if self.use_scheduler: - assert 'target' in self.scheduler_config - scheduler = instantiate_from_config(self.scheduler_config) - - print("Setting up LambdaLR scheduler...") - scheduler = [ - { - 'scheduler': LambdaLR(opt, lr_lambda=scheduler.schedule), - 'interval': 'step', - 'frequency': 1 - }] - return [opt], scheduler - return opt - - @torch.no_grad() - def to_rgb(self, x): - x = x.float() - if not hasattr(self, "colorize"): - self.colorize = torch.randn(3, x.shape[1], 1, 1).to(x) - x = nn.functional.conv2d(x, weight=self.colorize) - x = 2. * (x - x.min()) / (x.max() - x.min()) - 1. - return x - - -class DiffusionWrapper(pl.LightningModule): - def __init__(self, diff_model_config, conditioning_key): - super().__init__() - self.diffusion_model = instantiate_from_config(diff_model_config) - self.conditioning_key = conditioning_key - assert self.conditioning_key in [None, 'concat', 'crossattn', 'hybrid', 'adm'] - - def forward(self, x, t, c_concat: list = None, c_crossattn: list = None): - if self.conditioning_key is None: - out = self.diffusion_model(x, t) - elif self.conditioning_key == 'concat': - xc = torch.cat([x] + c_concat, dim=1) - out = self.diffusion_model(xc, t) - elif self.conditioning_key == 'crossattn': - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(x, t, context=cc) - elif self.conditioning_key == 'hybrid': - xc = torch.cat([x] + c_concat, dim=1) - cc = torch.cat(c_crossattn, 1) - out = self.diffusion_model(xc, t, context=cc) - elif self.conditioning_key == 'adm': - cc = c_crossattn[0] - out = self.diffusion_model(x, t, y=cc) - else: - raise NotImplementedError() - - return out - - -class Layout2ImgDiffusion(LatentDiffusion): - # TODO: move all layout-specific hacks to this class - def __init__(self, cond_stage_key, *args, **kwargs): - assert cond_stage_key == 'coordinates_bbox', 'Layout2ImgDiffusion only for cond_stage_key="coordinates_bbox"' - super().__init__(cond_stage_key=cond_stage_key, *args, **kwargs) - - def log_images(self, batch, N=8, *args, **kwargs): - logs = super().log_images(batch=batch, N=N, *args, **kwargs) - - key = 'train' if self.training else 'validation' - dset = self.trainer.datamodule.datasets[key] - mapper = dset.conditional_builders[self.cond_stage_key] - - bbox_imgs = [] - map_fn = lambda catno: dset.get_textual_label(dset.get_category_id(catno)) - for tknzd_bbox in batch[self.cond_stage_key][:N]: - bboximg = mapper.plot(tknzd_bbox.detach().cpu(), map_fn, (256, 256)) - bbox_imgs.append(bboximg) - - cond_img = torch.stack(bbox_imgs, dim=0) - logs['bbox_image'] = cond_img - return logs diff --git a/ldm/models/diffusion/dpm_solver/__init__.py b/ldm/models/diffusion/dpm_solver/__init__.py deleted file mode 100644 index 7427f38c..00000000 --- a/ldm/models/diffusion/dpm_solver/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from .sampler import DPMSolverSampler \ No newline at end of file diff --git a/ldm/models/diffusion/dpm_solver/dpm_solver.py b/ldm/models/diffusion/dpm_solver/dpm_solver.py deleted file mode 100644 index bdb64e0c..00000000 --- a/ldm/models/diffusion/dpm_solver/dpm_solver.py +++ /dev/null @@ -1,1184 +0,0 @@ -import torch -import torch.nn.functional as F -import math - - -class NoiseScheduleVP: - def __init__( - self, - schedule='discrete', - betas=None, - alphas_cumprod=None, - continuous_beta_0=0.1, - continuous_beta_1=20., - ): - """Create a wrapper class for the forward SDE (VP type). - - *** - Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t. - We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images. - *** - - The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ). - We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper). - Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have: - - log_alpha_t = self.marginal_log_mean_coeff(t) - sigma_t = self.marginal_std(t) - lambda_t = self.marginal_lambda(t) - - Moreover, as lambda(t) is an invertible function, we also support its inverse function: - - t = self.inverse_lambda(lambda_t) - - =============================================================== - - We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]). - - 1. For discrete-time DPMs: - - For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by: - t_i = (i + 1) / N - e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1. - We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3. - - Args: - betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details) - alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details) - - Note that we always have alphas_cumprod = cumprod(betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`. - - **Important**: Please pay special attention for the args for `alphas_cumprod`: - The `alphas_cumprod` is the \hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that - q_{t_n | 0}(x_{t_n} | x_0) = N ( \sqrt{\hat{alpha_n}} * x_0, (1 - \hat{alpha_n}) * I ). - Therefore, the notation \hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have - alpha_{t_n} = \sqrt{\hat{alpha_n}}, - and - log(alpha_{t_n}) = 0.5 * log(\hat{alpha_n}). - - - 2. For continuous-time DPMs: - - We support two types of VPSDEs: linear (DDPM) and cosine (improved-DDPM). The hyperparameters for the noise - schedule are the default settings in DDPM and improved-DDPM: - - Args: - beta_min: A `float` number. The smallest beta for the linear schedule. - beta_max: A `float` number. The largest beta for the linear schedule. - cosine_s: A `float` number. The hyperparameter in the cosine schedule. - cosine_beta_max: A `float` number. The hyperparameter in the cosine schedule. - T: A `float` number. The ending time of the forward process. - - =============================================================== - - Args: - schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs, - 'linear' or 'cosine' for continuous-time DPMs. - Returns: - A wrapper object of the forward SDE (VP type). - - =============================================================== - - Example: - - # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1): - >>> ns = NoiseScheduleVP('discrete', betas=betas) - - # For discrete-time DPMs, given alphas_cumprod (the \hat{alpha_n} array for n = 0, 1, ..., N - 1): - >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod) - - # For continuous-time DPMs (VPSDE), linear schedule: - >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.) - - """ - - if schedule not in ['discrete', 'linear', 'cosine']: - raise ValueError("Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear' or 'cosine'".format(schedule)) - - self.schedule = schedule - if schedule == 'discrete': - if betas is not None: - log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0) - else: - assert alphas_cumprod is not None - log_alphas = 0.5 * torch.log(alphas_cumprod) - self.total_N = len(log_alphas) - self.T = 1. - self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)) - self.log_alpha_array = log_alphas.reshape((1, -1,)) - else: - self.total_N = 1000 - self.beta_0 = continuous_beta_0 - self.beta_1 = continuous_beta_1 - self.cosine_s = 0.008 - self.cosine_beta_max = 999. - self.cosine_t_max = math.atan(self.cosine_beta_max * (1. + self.cosine_s) / math.pi) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s - self.cosine_log_alpha_0 = math.log(math.cos(self.cosine_s / (1. + self.cosine_s) * math.pi / 2.)) - self.schedule = schedule - if schedule == 'cosine': - # For the cosine schedule, T = 1 will have numerical issues. So we manually set the ending time T. - # Note that T = 0.9946 may be not the optimal setting. However, we find it works well. - self.T = 0.9946 - else: - self.T = 1. - - def marginal_log_mean_coeff(self, t): - """ - Compute log(alpha_t) of a given continuous-time label t in [0, T]. - """ - if self.schedule == 'discrete': - return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device), self.log_alpha_array.to(t.device)).reshape((-1)) - elif self.schedule == 'linear': - return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0 - elif self.schedule == 'cosine': - log_alpha_fn = lambda s: torch.log(torch.cos((s + self.cosine_s) / (1. + self.cosine_s) * math.pi / 2.)) - log_alpha_t = log_alpha_fn(t) - self.cosine_log_alpha_0 - return log_alpha_t - - def marginal_alpha(self, t): - """ - Compute alpha_t of a given continuous-time label t in [0, T]. - """ - return torch.exp(self.marginal_log_mean_coeff(t)) - - def marginal_std(self, t): - """ - Compute sigma_t of a given continuous-time label t in [0, T]. - """ - return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t))) - - def marginal_lambda(self, t): - """ - Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T]. - """ - log_mean_coeff = self.marginal_log_mean_coeff(t) - log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff)) - return log_mean_coeff - log_std - - def inverse_lambda(self, lamb): - """ - Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t. - """ - if self.schedule == 'linear': - tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) - Delta = self.beta_0**2 + tmp - return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0) - elif self.schedule == 'discrete': - log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb) - t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]), torch.flip(self.t_array.to(lamb.device), [1])) - return t.reshape((-1,)) - else: - log_alpha = -0.5 * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb)) - t_fn = lambda log_alpha_t: torch.arccos(torch.exp(log_alpha_t + self.cosine_log_alpha_0)) * 2. * (1. + self.cosine_s) / math.pi - self.cosine_s - t = t_fn(log_alpha) - return t - - -def model_wrapper( - model, - noise_schedule, - model_type="noise", - model_kwargs={}, - guidance_type="uncond", - condition=None, - unconditional_condition=None, - guidance_scale=1., - classifier_fn=None, - classifier_kwargs={}, -): - """Create a wrapper function for the noise prediction model. - - DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to - firstly wrap the model function to a noise prediction model that accepts the continuous time as the input. - - We support four types of the diffusion model by setting `model_type`: - - 1. "noise": noise prediction model. (Trained by predicting noise). - - 2. "x_start": data prediction model. (Trained by predicting the data x_0 at time 0). - - 3. "v": velocity prediction model. (Trained by predicting the velocity). - The "v" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2]. - - [1] Salimans, Tim, and Jonathan Ho. "Progressive distillation for fast sampling of diffusion models." - arXiv preprint arXiv:2202.00512 (2022). - [2] Ho, Jonathan, et al. "Imagen Video: High Definition Video Generation with Diffusion Models." - arXiv preprint arXiv:2210.02303 (2022). - - 4. "score": marginal score function. (Trained by denoising score matching). - Note that the score function and the noise prediction model follows a simple relationship: - ``` - noise(x_t, t) = -sigma_t * score(x_t, t) - ``` - - We support three types of guided sampling by DPMs by setting `guidance_type`: - 1. "uncond": unconditional sampling by DPMs. - The input `model` has the following format: - `` - model(x, t_input, **model_kwargs) -> noise | x_start | v | score - `` - - 2. "classifier": classifier guidance sampling [3] by DPMs and another classifier. - The input `model` has the following format: - `` - model(x, t_input, **model_kwargs) -> noise | x_start | v | score - `` - - The input `classifier_fn` has the following format: - `` - classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond) - `` - - [3] P. Dhariwal and A. Q. Nichol, "Diffusion models beat GANs on image synthesis," - in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794. - - 3. "classifier-free": classifier-free guidance sampling by conditional DPMs. - The input `model` has the following format: - `` - model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score - `` - And if cond == `unconditional_condition`, the model output is the unconditional DPM output. - - [4] Ho, Jonathan, and Tim Salimans. "Classifier-free diffusion guidance." - arXiv preprint arXiv:2207.12598 (2022). - - - The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999) - or continuous-time labels (i.e. epsilon to T). - - We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise: - `` - def model_fn(x, t_continuous) -> noise: - t_input = get_model_input_time(t_continuous) - return noise_pred(model, x, t_input, **model_kwargs) - `` - where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver. - - =============================================================== - - Args: - model: A diffusion model with the corresponding format described above. - noise_schedule: A noise schedule object, such as NoiseScheduleVP. - model_type: A `str`. The parameterization type of the diffusion model. - "noise" or "x_start" or "v" or "score". - model_kwargs: A `dict`. A dict for the other inputs of the model function. - guidance_type: A `str`. The type of the guidance for sampling. - "uncond" or "classifier" or "classifier-free". - condition: A pytorch tensor. The condition for the guided sampling. - Only used for "classifier" or "classifier-free" guidance type. - unconditional_condition: A pytorch tensor. The condition for the unconditional sampling. - Only used for "classifier-free" guidance type. - guidance_scale: A `float`. The scale for the guided sampling. - classifier_fn: A classifier function. Only used for the classifier guidance. - classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function. - Returns: - A noise prediction model that accepts the noised data and the continuous time as the inputs. - """ - - def get_model_input_time(t_continuous): - """ - Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time. - For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N]. - For continuous-time DPMs, we just use `t_continuous`. - """ - if noise_schedule.schedule == 'discrete': - return (t_continuous - 1. / noise_schedule.total_N) * 1000. - else: - return t_continuous - - def noise_pred_fn(x, t_continuous, cond=None): - if t_continuous.reshape((-1,)).shape[0] == 1: - t_continuous = t_continuous.expand((x.shape[0])) - t_input = get_model_input_time(t_continuous) - if cond is None: - output = model(x, t_input, **model_kwargs) - else: - output = model(x, t_input, cond, **model_kwargs) - if model_type == "noise": - return output - elif model_type == "x_start": - alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) - dims = x.dim() - return (x - expand_dims(alpha_t, dims) * output) / expand_dims(sigma_t, dims) - elif model_type == "v": - alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous) - dims = x.dim() - return expand_dims(alpha_t, dims) * output + expand_dims(sigma_t, dims) * x - elif model_type == "score": - sigma_t = noise_schedule.marginal_std(t_continuous) - dims = x.dim() - return -expand_dims(sigma_t, dims) * output - - def cond_grad_fn(x, t_input): - """ - Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t). - """ - with torch.enable_grad(): - x_in = x.detach().requires_grad_(True) - log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs) - return torch.autograd.grad(log_prob.sum(), x_in)[0] - - def model_fn(x, t_continuous): - """ - The noise predicition model function that is used for DPM-Solver. - """ - if t_continuous.reshape((-1,)).shape[0] == 1: - t_continuous = t_continuous.expand((x.shape[0])) - if guidance_type == "uncond": - return noise_pred_fn(x, t_continuous) - elif guidance_type == "classifier": - assert classifier_fn is not None - t_input = get_model_input_time(t_continuous) - cond_grad = cond_grad_fn(x, t_input) - sigma_t = noise_schedule.marginal_std(t_continuous) - noise = noise_pred_fn(x, t_continuous) - return noise - guidance_scale * expand_dims(sigma_t, dims=cond_grad.dim()) * cond_grad - elif guidance_type == "classifier-free": - if guidance_scale == 1. or unconditional_condition is None: - return noise_pred_fn(x, t_continuous, cond=condition) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t_continuous] * 2) - c_in = torch.cat([unconditional_condition, condition]) - noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2) - return noise_uncond + guidance_scale * (noise - noise_uncond) - - assert model_type in ["noise", "x_start", "v"] - assert guidance_type in ["uncond", "classifier", "classifier-free"] - return model_fn - - -class DPM_Solver: - def __init__(self, model_fn, noise_schedule, predict_x0=False, thresholding=False, max_val=1.): - """Construct a DPM-Solver. - - We support both the noise prediction model ("predicting epsilon") and the data prediction model ("predicting x0"). - If `predict_x0` is False, we use the solver for the noise prediction model (DPM-Solver). - If `predict_x0` is True, we use the solver for the data prediction model (DPM-Solver++). - In such case, we further support the "dynamic thresholding" in [1] when `thresholding` is True. - The "dynamic thresholding" can greatly improve the sample quality for pixel-space DPMs with large guidance scales. - - Args: - model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]): - `` - def model_fn(x, t_continuous): - return noise - `` - noise_schedule: A noise schedule object, such as NoiseScheduleVP. - predict_x0: A `bool`. If true, use the data prediction model; else, use the noise prediction model. - thresholding: A `bool`. Valid when `predict_x0` is True. Whether to use the "dynamic thresholding" in [1]. - max_val: A `float`. Valid when both `predict_x0` and `thresholding` are True. The max value for thresholding. - - [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour, Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b. - """ - self.model = model_fn - self.noise_schedule = noise_schedule - self.predict_x0 = predict_x0 - self.thresholding = thresholding - self.max_val = max_val - - def noise_prediction_fn(self, x, t): - """ - Return the noise prediction model. - """ - return self.model(x, t) - - def data_prediction_fn(self, x, t): - """ - Return the data prediction model (with thresholding). - """ - noise = self.noise_prediction_fn(x, t) - dims = x.dim() - alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t) - x0 = (x - expand_dims(sigma_t, dims) * noise) / expand_dims(alpha_t, dims) - if self.thresholding: - p = 0.995 # A hyperparameter in the paper of "Imagen" [1]. - s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1) - s = expand_dims(torch.maximum(s, self.max_val * torch.ones_like(s).to(s.device)), dims) - x0 = torch.clamp(x0, -s, s) / s - return x0 - - def model_fn(self, x, t): - """ - Convert the model to the noise prediction model or the data prediction model. - """ - if self.predict_x0: - return self.data_prediction_fn(x, t) - else: - return self.noise_prediction_fn(x, t) - - def get_time_steps(self, skip_type, t_T, t_0, N, device): - """Compute the intermediate time steps for sampling. - - Args: - skip_type: A `str`. The type for the spacing of the time steps. We support three types: - - 'logSNR': uniform logSNR for the time steps. - - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) - - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) - t_T: A `float`. The starting time of the sampling (default is T). - t_0: A `float`. The ending time of the sampling (default is epsilon). - N: A `int`. The total number of the spacing of the time steps. - device: A torch device. - Returns: - A pytorch tensor of the time steps, with the shape (N + 1,). - """ - if skip_type == 'logSNR': - lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device)) - lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device)) - logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device) - return self.noise_schedule.inverse_lambda(logSNR_steps) - elif skip_type == 'time_uniform': - return torch.linspace(t_T, t_0, N + 1).to(device) - elif skip_type == 'time_quadratic': - t_order = 2 - t = torch.linspace(t_T**(1. / t_order), t_0**(1. / t_order), N + 1).pow(t_order).to(device) - return t - else: - raise ValueError("Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'".format(skip_type)) - - def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device): - """ - Get the order of each step for sampling by the singlestep DPM-Solver. - - We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as "DPM-Solver-fast". - Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is: - - If order == 1: - We take `steps` of DPM-Solver-1 (i.e. DDIM). - - If order == 2: - - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling. - - If steps % 2 == 0, we use K steps of DPM-Solver-2. - - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1. - - If order == 3: - - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. - - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1. - - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1. - - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2. - - ============================================ - Args: - order: A `int`. The max order for the solver (2 or 3). - steps: A `int`. The total number of function evaluations (NFE). - skip_type: A `str`. The type for the spacing of the time steps. We support three types: - - 'logSNR': uniform logSNR for the time steps. - - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.) - - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.) - t_T: A `float`. The starting time of the sampling (default is T). - t_0: A `float`. The ending time of the sampling (default is epsilon). - device: A torch device. - Returns: - orders: A list of the solver order of each step. - """ - if order == 3: - K = steps // 3 + 1 - if steps % 3 == 0: - orders = [3,] * (K - 2) + [2, 1] - elif steps % 3 == 1: - orders = [3,] * (K - 1) + [1] - else: - orders = [3,] * (K - 1) + [2] - elif order == 2: - if steps % 2 == 0: - K = steps // 2 - orders = [2,] * K - else: - K = steps // 2 + 1 - orders = [2,] * (K - 1) + [1] - elif order == 1: - K = 1 - orders = [1,] * steps - else: - raise ValueError("'order' must be '1' or '2' or '3'.") - if skip_type == 'logSNR': - # To reproduce the results in DPM-Solver paper - timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device) - else: - timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[torch.cumsum(torch.tensor([0,] + orders)).to(device)] - return timesteps_outer, orders - - def denoise_to_zero_fn(self, x, s): - """ - Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization. - """ - return self.data_prediction_fn(x, s) - - def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False): - """ - DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`. - - Args: - x: A pytorch tensor. The initial value at time `s`. - s: A pytorch tensor. The starting time, with the shape (x.shape[0],). - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - model_s: A pytorch tensor. The model function evaluated at time `s`. - If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. - return_intermediate: A `bool`. If true, also return the model value at time `s`. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - ns = self.noise_schedule - dims = x.dim() - lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) - h = lambda_t - lambda_s - log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t) - sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t) - alpha_t = torch.exp(log_alpha_t) - - if self.predict_x0: - phi_1 = torch.expm1(-h) - if model_s is None: - model_s = self.model_fn(x, s) - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - ) - if return_intermediate: - return x_t, {'model_s': model_s} - else: - return x_t - else: - phi_1 = torch.expm1(h) - if model_s is None: - model_s = self.model_fn(x, s) - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - ) - if return_intermediate: - return x_t, {'model_s': model_s} - else: - return x_t - - def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False, solver_type='dpm_solver'): - """ - Singlestep solver DPM-Solver-2 from time `s` to time `t`. - - Args: - x: A pytorch tensor. The initial value at time `s`. - s: A pytorch tensor. The starting time, with the shape (x.shape[0],). - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - r1: A `float`. The hyperparameter of the second-order solver. - model_s: A pytorch tensor. The model function evaluated at time `s`. - If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. - return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if solver_type not in ['dpm_solver', 'taylor']: - raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) - if r1 is None: - r1 = 0.5 - ns = self.noise_schedule - dims = x.dim() - lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) - h = lambda_t - lambda_s - lambda_s1 = lambda_s + r1 * h - s1 = ns.inverse_lambda(lambda_s1) - log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(t) - sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t) - alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t) - - if self.predict_x0: - phi_11 = torch.expm1(-r1 * h) - phi_1 = torch.expm1(-h) - - if model_s is None: - model_s = self.model_fn(x, s) - x_s1 = ( - expand_dims(sigma_s1 / sigma_s, dims) * x - - expand_dims(alpha_s1 * phi_11, dims) * model_s - ) - model_s1 = self.model_fn(x_s1, s1) - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - - (0.5 / r1) * expand_dims(alpha_t * phi_1, dims) * (model_s1 - model_s) - ) - elif solver_type == 'taylor': - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - + (1. / r1) * expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * (model_s1 - model_s) - ) - else: - phi_11 = torch.expm1(r1 * h) - phi_1 = torch.expm1(h) - - if model_s is None: - model_s = self.model_fn(x, s) - x_s1 = ( - expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x - - expand_dims(sigma_s1 * phi_11, dims) * model_s - ) - model_s1 = self.model_fn(x_s1, s1) - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - - (0.5 / r1) * expand_dims(sigma_t * phi_1, dims) * (model_s1 - model_s) - ) - elif solver_type == 'taylor': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - - (1. / r1) * expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * (model_s1 - model_s) - ) - if return_intermediate: - return x_t, {'model_s': model_s, 'model_s1': model_s1} - else: - return x_t - - def singlestep_dpm_solver_third_update(self, x, s, t, r1=1./3., r2=2./3., model_s=None, model_s1=None, return_intermediate=False, solver_type='dpm_solver'): - """ - Singlestep solver DPM-Solver-3 from time `s` to time `t`. - - Args: - x: A pytorch tensor. The initial value at time `s`. - s: A pytorch tensor. The starting time, with the shape (x.shape[0],). - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - r1: A `float`. The hyperparameter of the third-order solver. - r2: A `float`. The hyperparameter of the third-order solver. - model_s: A pytorch tensor. The model function evaluated at time `s`. - If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it. - model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`). - If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it. - return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if solver_type not in ['dpm_solver', 'taylor']: - raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) - if r1 is None: - r1 = 1. / 3. - if r2 is None: - r2 = 2. / 3. - ns = self.noise_schedule - dims = x.dim() - lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t) - h = lambda_t - lambda_s - lambda_s1 = lambda_s + r1 * h - lambda_s2 = lambda_s + r2 * h - s1 = ns.inverse_lambda(lambda_s1) - s2 = ns.inverse_lambda(lambda_s2) - log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t) - sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(s2), ns.marginal_std(t) - alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t) - - if self.predict_x0: - phi_11 = torch.expm1(-r1 * h) - phi_12 = torch.expm1(-r2 * h) - phi_1 = torch.expm1(-h) - phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1. - phi_2 = phi_1 / h + 1. - phi_3 = phi_2 / h - 0.5 - - if model_s is None: - model_s = self.model_fn(x, s) - if model_s1 is None: - x_s1 = ( - expand_dims(sigma_s1 / sigma_s, dims) * x - - expand_dims(alpha_s1 * phi_11, dims) * model_s - ) - model_s1 = self.model_fn(x_s1, s1) - x_s2 = ( - expand_dims(sigma_s2 / sigma_s, dims) * x - - expand_dims(alpha_s2 * phi_12, dims) * model_s - + r2 / r1 * expand_dims(alpha_s2 * phi_22, dims) * (model_s1 - model_s) - ) - model_s2 = self.model_fn(x_s2, s2) - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - + (1. / r2) * expand_dims(alpha_t * phi_2, dims) * (model_s2 - model_s) - ) - elif solver_type == 'taylor': - D1_0 = (1. / r1) * (model_s1 - model_s) - D1_1 = (1. / r2) * (model_s2 - model_s) - D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) - D2 = 2. * (D1_1 - D1_0) / (r2 - r1) - x_t = ( - expand_dims(sigma_t / sigma_s, dims) * x - - expand_dims(alpha_t * phi_1, dims) * model_s - + expand_dims(alpha_t * phi_2, dims) * D1 - - expand_dims(alpha_t * phi_3, dims) * D2 - ) - else: - phi_11 = torch.expm1(r1 * h) - phi_12 = torch.expm1(r2 * h) - phi_1 = torch.expm1(h) - phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1. - phi_2 = phi_1 / h - 1. - phi_3 = phi_2 / h - 0.5 - - if model_s is None: - model_s = self.model_fn(x, s) - if model_s1 is None: - x_s1 = ( - expand_dims(torch.exp(log_alpha_s1 - log_alpha_s), dims) * x - - expand_dims(sigma_s1 * phi_11, dims) * model_s - ) - model_s1 = self.model_fn(x_s1, s1) - x_s2 = ( - expand_dims(torch.exp(log_alpha_s2 - log_alpha_s), dims) * x - - expand_dims(sigma_s2 * phi_12, dims) * model_s - - r2 / r1 * expand_dims(sigma_s2 * phi_22, dims) * (model_s1 - model_s) - ) - model_s2 = self.model_fn(x_s2, s2) - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - - (1. / r2) * expand_dims(sigma_t * phi_2, dims) * (model_s2 - model_s) - ) - elif solver_type == 'taylor': - D1_0 = (1. / r1) * (model_s1 - model_s) - D1_1 = (1. / r2) * (model_s2 - model_s) - D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1) - D2 = 2. * (D1_1 - D1_0) / (r2 - r1) - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_s), dims) * x - - expand_dims(sigma_t * phi_1, dims) * model_s - - expand_dims(sigma_t * phi_2, dims) * D1 - - expand_dims(sigma_t * phi_3, dims) * D2 - ) - - if return_intermediate: - return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2} - else: - return x_t - - def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type="dpm_solver"): - """ - Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`. - - Args: - x: A pytorch tensor. The initial value at time `s`. - model_prev_list: A list of pytorch tensor. The previous computed model values. - t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if solver_type not in ['dpm_solver', 'taylor']: - raise ValueError("'solver_type' must be either 'dpm_solver' or 'taylor', got {}".format(solver_type)) - ns = self.noise_schedule - dims = x.dim() - model_prev_1, model_prev_0 = model_prev_list - t_prev_1, t_prev_0 = t_prev_list - lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) - log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) - sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) - alpha_t = torch.exp(log_alpha_t) - - h_0 = lambda_prev_0 - lambda_prev_1 - h = lambda_t - lambda_prev_0 - r0 = h_0 / h - D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) - if self.predict_x0: - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(sigma_t / sigma_prev_0, dims) * x - - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - - 0.5 * expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * D1_0 - ) - elif solver_type == 'taylor': - x_t = ( - expand_dims(sigma_t / sigma_prev_0, dims) * x - - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1_0 - ) - else: - if solver_type == 'dpm_solver': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - - 0.5 * expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * D1_0 - ) - elif solver_type == 'taylor': - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1_0 - ) - return x_t - - def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpm_solver'): - """ - Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`. - - Args: - x: A pytorch tensor. The initial value at time `s`. - model_prev_list: A list of pytorch tensor. The previous computed model values. - t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - ns = self.noise_schedule - dims = x.dim() - model_prev_2, model_prev_1, model_prev_0 = model_prev_list - t_prev_2, t_prev_1, t_prev_0 = t_prev_list - lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t) - log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t) - sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t) - alpha_t = torch.exp(log_alpha_t) - - h_1 = lambda_prev_1 - lambda_prev_2 - h_0 = lambda_prev_0 - lambda_prev_1 - h = lambda_t - lambda_prev_0 - r0, r1 = h_0 / h, h_1 / h - D1_0 = expand_dims(1. / r0, dims) * (model_prev_0 - model_prev_1) - D1_1 = expand_dims(1. / r1, dims) * (model_prev_1 - model_prev_2) - D1 = D1_0 + expand_dims(r0 / (r0 + r1), dims) * (D1_0 - D1_1) - D2 = expand_dims(1. / (r0 + r1), dims) * (D1_0 - D1_1) - if self.predict_x0: - x_t = ( - expand_dims(sigma_t / sigma_prev_0, dims) * x - - expand_dims(alpha_t * (torch.exp(-h) - 1.), dims) * model_prev_0 - + expand_dims(alpha_t * ((torch.exp(-h) - 1.) / h + 1.), dims) * D1 - - expand_dims(alpha_t * ((torch.exp(-h) - 1. + h) / h**2 - 0.5), dims) * D2 - ) - else: - x_t = ( - expand_dims(torch.exp(log_alpha_t - log_alpha_prev_0), dims) * x - - expand_dims(sigma_t * (torch.exp(h) - 1.), dims) * model_prev_0 - - expand_dims(sigma_t * ((torch.exp(h) - 1.) / h - 1.), dims) * D1 - - expand_dims(sigma_t * ((torch.exp(h) - 1. - h) / h**2 - 0.5), dims) * D2 - ) - return x_t - - def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpm_solver', r1=None, r2=None): - """ - Singlestep DPM-Solver with the order `order` from time `s` to time `t`. - - Args: - x: A pytorch tensor. The initial value at time `s`. - s: A pytorch tensor. The starting time, with the shape (x.shape[0],). - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. - return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times). - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - r1: A `float`. The hyperparameter of the second-order or third-order solver. - r2: A `float`. The hyperparameter of the third-order solver. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if order == 1: - return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate) - elif order == 2: - return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1) - elif order == 3: - return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate, solver_type=solver_type, r1=r1, r2=r2) - else: - raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) - - def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpm_solver'): - """ - Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`. - - Args: - x: A pytorch tensor. The initial value at time `s`. - model_prev_list: A list of pytorch tensor. The previous computed model values. - t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (x.shape[0],) - t: A pytorch tensor. The ending time, with the shape (x.shape[0],). - order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3. - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_t: A pytorch tensor. The approximated solution at time `t`. - """ - if order == 1: - return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1]) - elif order == 2: - return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) - elif order == 3: - return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type) - else: - raise ValueError("Solver order must be 1 or 2 or 3, got {}".format(order)) - - def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5, solver_type='dpm_solver'): - """ - The adaptive step size solver based on singlestep DPM-Solver. - - Args: - x: A pytorch tensor. The initial value at time `t_T`. - order: A `int`. The (higher) order of the solver. We only support order == 2 or 3. - t_T: A `float`. The starting time of the sampling (default is T). - t_0: A `float`. The ending time of the sampling (default is epsilon). - h_init: A `float`. The initial step size (for logSNR). - atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1]. - rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05. - theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1]. - t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the - current time and `t_0` is less than `t_err`. The default setting is 1e-5. - solver_type: either 'dpm_solver' or 'taylor'. The type for the high-order solvers. - The type slightly impacts the performance. We recommend to use 'dpm_solver' type. - Returns: - x_0: A pytorch tensor. The approximated solution at time `t_0`. - - [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, "Gotta go fast when generating data with score-based models," arXiv preprint arXiv:2105.14080, 2021. - """ - ns = self.noise_schedule - s = t_T * torch.ones((x.shape[0],)).to(x) - lambda_s = ns.marginal_lambda(s) - lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x)) - h = h_init * torch.ones_like(s).to(x) - x_prev = x - nfe = 0 - if order == 2: - r1 = 0.5 - lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True) - higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, solver_type=solver_type, **kwargs) - elif order == 3: - r1, r2 = 1. / 3., 2. / 3. - lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1, return_intermediate=True, solver_type=solver_type) - higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2, solver_type=solver_type, **kwargs) - else: - raise ValueError("For adaptive step size solver, order must be 2 or 3, got {}".format(order)) - while torch.abs((s - t_0)).mean() > t_err: - t = ns.inverse_lambda(lambda_s + h) - x_lower, lower_noise_kwargs = lower_update(x, s, t) - x_higher = higher_update(x, s, t, **lower_noise_kwargs) - delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev))) - norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True)) - E = norm_fn((x_higher - x_lower) / delta).max() - if torch.all(E <= 1.): - x = x_higher - s = t - x_prev = x_lower - lambda_s = ns.marginal_lambda(s) - h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s) - nfe += order - print('adaptive solver nfe', nfe) - return x - - def sample(self, x, steps=20, t_start=None, t_end=None, order=3, skip_type='time_uniform', - method='singlestep', lower_order_final=True, denoise_to_zero=False, solver_type='dpm_solver', - atol=0.0078, rtol=0.05, - ): - """ - Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`. - - ===================================================== - - We support the following algorithms for both noise prediction model and data prediction model: - - 'singlestep': - Singlestep DPM-Solver (i.e. "DPM-Solver-fast" in the paper), which combines different orders of singlestep DPM-Solver. - We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps). - The total number of function evaluations (NFE) == `steps`. - Given a fixed NFE == `steps`, the sampling procedure is: - - If `order` == 1: - - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM). - - If `order` == 2: - - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling. - - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2. - - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. - - If `order` == 3: - - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling. - - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1. - - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1. - - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2. - - 'multistep': - Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`. - We initialize the first `order` values by lower order multistep solvers. - Given a fixed NFE == `steps`, the sampling procedure is: - Denote K = steps. - - If `order` == 1: - - We use K steps of DPM-Solver-1 (i.e. DDIM). - - If `order` == 2: - - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2. - - If `order` == 3: - - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3. - - 'singlestep_fixed': - Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3). - We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE. - - 'adaptive': - Adaptive step size DPM-Solver (i.e. "DPM-Solver-12" and "DPM-Solver-23" in the paper). - We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`. - You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs - (NFE) and the sample quality. - - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2. - - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3. - - ===================================================== - - Some advices for choosing the algorithm: - - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs: - Use singlestep DPM-Solver ("DPM-Solver-fast" in the paper) with `order = 3`. - e.g. - >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=False) - >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3, - skip_type='time_uniform', method='singlestep') - - For **guided sampling with large guidance scale** by DPMs: - Use multistep DPM-Solver with `predict_x0 = True` and `order = 2`. - e.g. - >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, predict_x0=True) - >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2, - skip_type='time_uniform', method='multistep') - - We support three types of `skip_type`: - - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images** - - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**. - - 'time_quadratic': quadratic time for the time steps. - - ===================================================== - Args: - x: A pytorch tensor. The initial value at time `t_start` - e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution. - steps: A `int`. The total number of function evaluations (NFE). - t_start: A `float`. The starting time of the sampling. - If `T` is None, we use self.noise_schedule.T (default is 1.0). - t_end: A `float`. The ending time of the sampling. - If `t_end` is None, we use 1. / self.noise_schedule.total_N. - e.g. if total_N == 1000, we have `t_end` == 1e-3. - For discrete-time DPMs: - - We recommend `t_end` == 1. / self.noise_schedule.total_N. - For continuous-time DPMs: - - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15. - order: A `int`. The order of DPM-Solver. - skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'. - method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'. - denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step. - Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1). - - This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and - score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID - for diffusion models sampling by diffusion SDEs for low-resolutional images - (such as CIFAR-10). However, we observed that such trick does not matter for - high-resolutional images. As it needs an additional NFE, we do not recommend - it for high-resolutional images. - lower_order_final: A `bool`. Whether to use lower order solvers at the final steps. - Only valid for `method=multistep` and `steps < 15`. We empirically find that - this trick is a key to stabilizing the sampling by DPM-Solver with very few steps - (especially for steps <= 10). So we recommend to set it to be `True`. - solver_type: A `str`. The taylor expansion type for the solver. `dpm_solver` or `taylor`. We recommend `dpm_solver`. - atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. - rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'. - Returns: - x_end: A pytorch tensor. The approximated solution at time `t_end`. - - """ - t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end - t_T = self.noise_schedule.T if t_start is None else t_start - device = x.device - if method == 'adaptive': - with torch.no_grad(): - x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol, solver_type=solver_type) - elif method == 'multistep': - assert steps >= order - timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device) - assert timesteps.shape[0] - 1 == steps - with torch.no_grad(): - vec_t = timesteps[0].expand((x.shape[0])) - model_prev_list = [self.model_fn(x, vec_t)] - t_prev_list = [vec_t] - # Init the first `order` values by lower order multistep DPM-Solver. - for init_order in range(1, order): - vec_t = timesteps[init_order].expand(x.shape[0]) - x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, init_order, solver_type=solver_type) - model_prev_list.append(self.model_fn(x, vec_t)) - t_prev_list.append(vec_t) - # Compute the remaining values by `order`-th order multistep DPM-Solver. - for step in range(order, steps + 1): - vec_t = timesteps[step].expand(x.shape[0]) - if lower_order_final and steps < 15: - step_order = min(order, steps + 1 - step) - else: - step_order = order - x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, vec_t, step_order, solver_type=solver_type) - for i in range(order - 1): - t_prev_list[i] = t_prev_list[i + 1] - model_prev_list[i] = model_prev_list[i + 1] - t_prev_list[-1] = vec_t - # We do not need to evaluate the final model value. - if step < steps: - model_prev_list[-1] = self.model_fn(x, vec_t) - elif method in ['singlestep', 'singlestep_fixed']: - if method == 'singlestep': - timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps, order=order, skip_type=skip_type, t_T=t_T, t_0=t_0, device=device) - elif method == 'singlestep_fixed': - K = steps // order - orders = [order,] * K - timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device) - for i, order in enumerate(orders): - t_T_inner, t_0_inner = timesteps_outer[i], timesteps_outer[i + 1] - timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=t_T_inner.item(), t_0=t_0_inner.item(), N=order, device=device) - lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner) - vec_s, vec_t = t_T_inner.tile(x.shape[0]), t_0_inner.tile(x.shape[0]) - h = lambda_inner[-1] - lambda_inner[0] - r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h - r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h - x = self.singlestep_dpm_solver_update(x, vec_s, vec_t, order, solver_type=solver_type, r1=r1, r2=r2) - if denoise_to_zero: - x = self.denoise_to_zero_fn(x, torch.ones((x.shape[0],)).to(device) * t_0) - return x - - - -############################################################# -# other utility functions -############################################################# - -def interpolate_fn(x, xp, yp): - """ - A piecewise linear function y = f(x), using xp and yp as keypoints. - We implement f(x) in a differentiable way (i.e. applicable for autograd). - The function f(x) is well-defined for all x-axis. (For x beyond the bounds of xp, we use the outmost points of xp to define the linear function.) - - Args: - x: PyTorch tensor with shape [N, C], where N is the batch size, C is the number of channels (we use C = 1 for DPM-Solver). - xp: PyTorch tensor with shape [C, K], where K is the number of keypoints. - yp: PyTorch tensor with shape [C, K]. - Returns: - The function values f(x), with shape [N, C]. - """ - N, K = x.shape[0], xp.shape[1] - all_x = torch.cat([x.unsqueeze(2), xp.unsqueeze(0).repeat((N, 1, 1))], dim=2) - sorted_all_x, x_indices = torch.sort(all_x, dim=2) - x_idx = torch.argmin(x_indices, dim=2) - cand_start_idx = x_idx - 1 - start_idx = torch.where( - torch.eq(x_idx, 0), - torch.tensor(1, device=x.device), - torch.where( - torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, - ), - ) - end_idx = torch.where(torch.eq(start_idx, cand_start_idx), start_idx + 2, start_idx + 1) - start_x = torch.gather(sorted_all_x, dim=2, index=start_idx.unsqueeze(2)).squeeze(2) - end_x = torch.gather(sorted_all_x, dim=2, index=end_idx.unsqueeze(2)).squeeze(2) - start_idx2 = torch.where( - torch.eq(x_idx, 0), - torch.tensor(0, device=x.device), - torch.where( - torch.eq(x_idx, K), torch.tensor(K - 2, device=x.device), cand_start_idx, - ), - ) - y_positions_expanded = yp.unsqueeze(0).expand(N, -1, -1) - start_y = torch.gather(y_positions_expanded, dim=2, index=start_idx2.unsqueeze(2)).squeeze(2) - end_y = torch.gather(y_positions_expanded, dim=2, index=(start_idx2 + 1).unsqueeze(2)).squeeze(2) - cand = start_y + (x - start_x) * (end_y - start_y) / (end_x - start_x) - return cand - - -def expand_dims(v, dims): - """ - Expand the tensor `v` to the dim `dims`. - - Args: - `v`: a PyTorch tensor with shape [N]. - `dim`: a `int`. - Returns: - a PyTorch tensor with shape [N, 1, 1, ..., 1] and the total dimension is `dims`. - """ - return v[(...,) + (None,)*(dims - 1)] \ No newline at end of file diff --git a/ldm/models/diffusion/dpm_solver/sampler.py b/ldm/models/diffusion/dpm_solver/sampler.py deleted file mode 100644 index 2c42d6f9..00000000 --- a/ldm/models/diffusion/dpm_solver/sampler.py +++ /dev/null @@ -1,82 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch - -from .dpm_solver import NoiseScheduleVP, model_wrapper, DPM_Solver - - -class DPMSolverSampler(object): - def __init__(self, model, **kwargs): - super().__init__() - self.model = model - to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.device) - self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod)) - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): - attr = attr.to(torch.device("cuda")) - setattr(self, name, attr) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - cbs = conditioning[list(conditioning.keys())[0]].shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - - # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}') - - device = self.model.betas.device - if x_T is None: - img = torch.randn(size, device=device) - else: - img = x_T - - ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod) - - model_fn = model_wrapper( - lambda x, t, c: self.model.apply_model(x, t, c), - ns, - model_type="noise", - guidance_type="classifier-free", - condition=conditioning, - unconditional_condition=unconditional_conditioning, - guidance_scale=unconditional_guidance_scale, - ) - - dpm_solver = DPM_Solver(model_fn, ns, predict_x0=True, thresholding=False) - x = dpm_solver.sample(img, steps=S, skip_type="time_uniform", method="multistep", order=2, lower_order_final=True) - - return x.to(device), None diff --git a/ldm/models/diffusion/plms.py b/ldm/models/diffusion/plms.py deleted file mode 100644 index 78eeb100..00000000 --- a/ldm/models/diffusion/plms.py +++ /dev/null @@ -1,236 +0,0 @@ -"""SAMPLING ONLY.""" - -import torch -import numpy as np -from tqdm import tqdm -from functools import partial - -from ldm.modules.diffusionmodules.util import make_ddim_sampling_parameters, make_ddim_timesteps, noise_like - - -class PLMSSampler(object): - def __init__(self, model, schedule="linear", **kwargs): - super().__init__() - self.model = model - self.ddpm_num_timesteps = model.num_timesteps - self.schedule = schedule - - def register_buffer(self, name, attr): - if type(attr) == torch.Tensor: - if attr.device != torch.device("cuda"): - attr = attr.to(torch.device("cuda")) - setattr(self, name, attr) - - def make_schedule(self, ddim_num_steps, ddim_discretize="uniform", ddim_eta=0., verbose=True): - if ddim_eta != 0: - raise ValueError('ddim_eta must be 0 for PLMS') - self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps, - num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose) - alphas_cumprod = self.model.alphas_cumprod - assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep' - to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device) - - self.register_buffer('betas', to_torch(self.model.betas)) - self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) - self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev)) - - # calculations for diffusion q(x_t | x_{t-1}) and others - self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu()))) - self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu()))) - self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu()))) - self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1))) - - # ddim sampling parameters - ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(), - ddim_timesteps=self.ddim_timesteps, - eta=ddim_eta,verbose=verbose) - self.register_buffer('ddim_sigmas', ddim_sigmas) - self.register_buffer('ddim_alphas', ddim_alphas) - self.register_buffer('ddim_alphas_prev', ddim_alphas_prev) - self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas)) - sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt( - (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * ( - 1 - self.alphas_cumprod / self.alphas_cumprod_prev)) - self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps) - - @torch.no_grad() - def sample(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - cbs = conditioning[list(conditioning.keys())[0]].shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - print(f'Data shape for PLMS sampling is {size}') - - samples, intermediates = self.plms_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - ) - return samples, intermediates - - @torch.no_grad() - def plms_sampling(self, cond, shape, - x_T=None, ddim_use_original_steps=False, - callback=None, timesteps=None, quantize_denoised=False, - mask=None, x0=None, img_callback=None, log_every_t=100, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None,): - device = self.model.betas.device - b = shape[0] - if x_T is None: - img = torch.randn(shape, device=device) - else: - img = x_T - - if timesteps is None: - timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps - elif timesteps is not None and not ddim_use_original_steps: - subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1 - timesteps = self.ddim_timesteps[:subset_end] - - intermediates = {'x_inter': [img], 'pred_x0': [img]} - time_range = list(reversed(range(0,timesteps))) if ddim_use_original_steps else np.flip(timesteps) - total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0] - print(f"Running PLMS Sampling with {total_steps} timesteps") - - iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps) - old_eps = [] - - for i, step in enumerate(iterator): - index = total_steps - i - 1 - ts = torch.full((b,), step, device=device, dtype=torch.long) - ts_next = torch.full((b,), time_range[min(i + 1, len(time_range) - 1)], device=device, dtype=torch.long) - - if mask is not None: - assert x0 is not None - img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass? - img = img_orig * mask + (1. - mask) * img - - outs = self.p_sample_plms(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps, - quantize_denoised=quantize_denoised, temperature=temperature, - noise_dropout=noise_dropout, score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - old_eps=old_eps, t_next=ts_next) - img, pred_x0, e_t = outs - old_eps.append(e_t) - if len(old_eps) >= 4: - old_eps.pop(0) - if callback: callback(i) - if img_callback: img_callback(pred_x0, i) - - if index % log_every_t == 0 or index == total_steps - 1: - intermediates['x_inter'].append(img) - intermediates['pred_x0'].append(pred_x0) - - return img, intermediates - - @torch.no_grad() - def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None, old_eps=None, t_next=None): - b, *_, device = *x.shape, x.device - - def get_model_output(x, t): - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - e_t = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - c_in = torch.cat([unconditional_conditioning, c]) - e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) - - if score_corrector is not None: - assert self.model.parameterization == "eps" - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - return e_t - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - - def get_x_prev_and_pred_x0(e_t, index): - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - e_t = get_model_output(x, t) - if len(old_eps) == 0: - # Pseudo Improved Euler (2nd order) - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index) - e_t_next = get_model_output(x_prev, t_next) - e_t_prime = (e_t + e_t_next) / 2 - elif len(old_eps) == 1: - # 2nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (3 * e_t - old_eps[-1]) / 2 - elif len(old_eps) == 2: - # 3nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12 - elif len(old_eps) >= 3: - # 4nd order Pseudo Linear Multistep (Adams-Bashforth) - e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2] - 9 * old_eps[-3]) / 24 - - x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index) - - return x_prev, pred_x0, e_t diff --git a/ldm/modules/attention.py b/ldm/modules/attention.py deleted file mode 100644 index f4eff39c..00000000 --- a/ldm/modules/attention.py +++ /dev/null @@ -1,261 +0,0 @@ -from inspect import isfunction -import math -import torch -import torch.nn.functional as F -from torch import nn, einsum -from einops import rearrange, repeat - -from ldm.modules.diffusionmodules.util import checkpoint - - -def exists(val): - return val is not None - - -def uniq(arr): - return{el: True for el in arr}.keys() - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def max_neg_value(t): - return -torch.finfo(t.dtype).max - - -def init_(tensor): - dim = tensor.shape[-1] - std = 1 / math.sqrt(dim) - tensor.uniform_(-std, std) - return tensor - - -# feedforward -class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out): - super().__init__() - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * F.gelu(gate) - - -class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - project_in = nn.Sequential( - nn.Linear(dim, inner_dim), - nn.GELU() - ) if not glu else GEGLU(dim, inner_dim) - - self.net = nn.Sequential( - project_in, - nn.Dropout(dropout), - nn.Linear(inner_dim, dim_out) - ) - - def forward(self, x): - return self.net(x) - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -def Normalize(in_channels): - return torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6, affine=True) - - -class LinearAttention(nn.Module): - def __init__(self, dim, heads=4, dim_head=32): - super().__init__() - self.heads = heads - hidden_dim = dim_head * heads - self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) - self.to_out = nn.Conv2d(hidden_dim, dim, 1) - - def forward(self, x): - b, c, h, w = x.shape - qkv = self.to_qkv(x) - q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) - k = k.softmax(dim=-1) - context = torch.einsum('bhdn,bhen->bhde', k, v) - out = torch.einsum('bhde,bhdn->bhen', context, q) - out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) - return self.to_out(out) - - -class SpatialSelfAttention(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b,c,h,w = q.shape - q = rearrange(q, 'b c h w -> b (h w) c') - k = rearrange(k, 'b c h w -> b c (h w)') - w_ = torch.einsum('bij,bjk->bik', q, k) - - w_ = w_ * (int(c)**(-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = rearrange(v, 'b c h w -> b c (h w)') - w_ = rearrange(w_, 'b i j -> b j i') - h_ = torch.einsum('bij,bjk->bik', v, w_) - h_ = rearrange(h_, 'b c (h w) -> b c h w', h=h) - h_ = self.proj_out(h_) - - return x+h_ - - -class CrossAttention(nn.Module): - def __init__(self, query_dim, context_dim=None, heads=8, dim_head=64, dropout=0.): - super().__init__() - inner_dim = dim_head * heads - context_dim = default(context_dim, query_dim) - - self.scale = dim_head ** -0.5 - self.heads = heads - - self.to_q = nn.Linear(query_dim, inner_dim, bias=False) - self.to_k = nn.Linear(context_dim, inner_dim, bias=False) - self.to_v = nn.Linear(context_dim, inner_dim, bias=False) - - self.to_out = nn.Sequential( - nn.Linear(inner_dim, query_dim), - nn.Dropout(dropout) - ) - - def forward(self, x, context=None, mask=None): - h = self.heads - - q = self.to_q(x) - context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - - sim = einsum('b i d, b j d -> b i j', q, k) * self.scale - - if exists(mask): - mask = rearrange(mask, 'b ... -> b (...)') - max_neg_value = -torch.finfo(sim.dtype).max - mask = repeat(mask, 'b j -> (b h) () j', h=h) - sim.masked_fill_(~mask, max_neg_value) - - # attention, what we cannot get enough of - attn = sim.softmax(dim=-1) - - out = einsum('b i j, b j d -> b i d', attn, v) - out = rearrange(out, '(b h) n d -> b n (h d)', h=h) - return self.to_out(out) - - -class BasicTransformerBlock(nn.Module): - def __init__(self, dim, n_heads, d_head, dropout=0., context_dim=None, gated_ff=True, checkpoint=True): - super().__init__() - self.attn1 = CrossAttention(query_dim=dim, heads=n_heads, dim_head=d_head, dropout=dropout) # is a self-attention - self.ff = FeedForward(dim, dropout=dropout, glu=gated_ff) - self.attn2 = CrossAttention(query_dim=dim, context_dim=context_dim, - heads=n_heads, dim_head=d_head, dropout=dropout) # is self-attn if context is none - self.norm1 = nn.LayerNorm(dim) - self.norm2 = nn.LayerNorm(dim) - self.norm3 = nn.LayerNorm(dim) - self.checkpoint = checkpoint - - def forward(self, x, context=None): - return checkpoint(self._forward, (x, context), self.parameters(), self.checkpoint) - - def _forward(self, x, context=None): - x = self.attn1(self.norm1(x)) + x - x = self.attn2(self.norm2(x), context=context) + x - x = self.ff(self.norm3(x)) + x - return x - - -class SpatialTransformer(nn.Module): - """ - Transformer block for image-like data. - First, project the input (aka embedding) - and reshape to b, t, d. - Then apply standard transformer action. - Finally, reshape to image - """ - def __init__(self, in_channels, n_heads, d_head, - depth=1, dropout=0., context_dim=None): - super().__init__() - self.in_channels = in_channels - inner_dim = n_heads * d_head - self.norm = Normalize(in_channels) - - self.proj_in = nn.Conv2d(in_channels, - inner_dim, - kernel_size=1, - stride=1, - padding=0) - - self.transformer_blocks = nn.ModuleList( - [BasicTransformerBlock(inner_dim, n_heads, d_head, dropout=dropout, context_dim=context_dim) - for d in range(depth)] - ) - - self.proj_out = zero_module(nn.Conv2d(inner_dim, - in_channels, - kernel_size=1, - stride=1, - padding=0)) - - def forward(self, x, context=None): - # note: if no context is given, cross-attention defaults to self-attention - b, c, h, w = x.shape - x_in = x - x = self.norm(x) - x = self.proj_in(x) - x = rearrange(x, 'b c h w -> b (h w) c') - for block in self.transformer_blocks: - x = block(x, context=context) - x = rearrange(x, 'b (h w) c -> b c h w', h=h, w=w) - x = self.proj_out(x) - return x + x_in \ No newline at end of file diff --git a/ldm/modules/diffusionmodules/__init__.py b/ldm/modules/diffusionmodules/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ldm/modules/diffusionmodules/model.py b/ldm/modules/diffusionmodules/model.py deleted file mode 100644 index 533e589a..00000000 --- a/ldm/modules/diffusionmodules/model.py +++ /dev/null @@ -1,835 +0,0 @@ -# pytorch_diffusion + derived encoder decoder -import math -import torch -import torch.nn as nn -import numpy as np -from einops import rearrange - -from ldm.util import instantiate_from_config -from ldm.modules.attention import LinearAttention - - -def get_timestep_embedding(timesteps, embedding_dim): - """ - This matches the implementation in Denoising Diffusion Probabilistic Models: - From Fairseq. - Build sinusoidal embeddings. - This matches the implementation in tensor2tensor, but differs slightly - from the description in Section 3.5 of "Attention Is All You Need". - """ - assert len(timesteps.shape) == 1 - - half_dim = embedding_dim // 2 - emb = math.log(10000) / (half_dim - 1) - emb = torch.exp(torch.arange(half_dim, dtype=torch.float32) * -emb) - emb = emb.to(device=timesteps.device) - emb = timesteps.float()[:, None] * emb[None, :] - emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) - if embedding_dim % 2 == 1: # zero pad - emb = torch.nn.functional.pad(emb, (0,1,0,0)) - return emb - - -def nonlinearity(x): - # swish - return x*torch.sigmoid(x) - - -def Normalize(in_channels, num_groups=32): - return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) - - -class Upsample(nn.Module): - def __init__(self, in_channels, with_conv): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") - if self.with_conv: - x = self.conv(x) - return x - - -class Downsample(nn.Module): - def __init__(self, in_channels, with_conv): - super().__init__() - self.with_conv = with_conv - if self.with_conv: - # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=3, - stride=2, - padding=0) - - def forward(self, x): - if self.with_conv: - pad = (0,1,0,1) - x = torch.nn.functional.pad(x, pad, mode="constant", value=0) - x = self.conv(x) - else: - x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) - return x - - -class ResnetBlock(nn.Module): - def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, - dropout, temb_channels=512): - super().__init__() - self.in_channels = in_channels - out_channels = in_channels if out_channels is None else out_channels - self.out_channels = out_channels - self.use_conv_shortcut = conv_shortcut - - self.norm1 = Normalize(in_channels) - self.conv1 = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - if temb_channels > 0: - self.temb_proj = torch.nn.Linear(temb_channels, - out_channels) - self.norm2 = Normalize(out_channels) - self.dropout = torch.nn.Dropout(dropout) - self.conv2 = torch.nn.Conv2d(out_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - self.conv_shortcut = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - else: - self.nin_shortcut = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=1, - stride=1, - padding=0) - - def forward(self, x, temb): - h = x - h = self.norm1(h) - h = nonlinearity(h) - h = self.conv1(h) - - if temb is not None: - h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] - - h = self.norm2(h) - h = nonlinearity(h) - h = self.dropout(h) - h = self.conv2(h) - - if self.in_channels != self.out_channels: - if self.use_conv_shortcut: - x = self.conv_shortcut(x) - else: - x = self.nin_shortcut(x) - - return x+h - - -class LinAttnBlock(LinearAttention): - """to match AttnBlock usage""" - def __init__(self, in_channels): - super().__init__(dim=in_channels, heads=1, dim_head=in_channels) - - -class AttnBlock(nn.Module): - def __init__(self, in_channels): - super().__init__() - self.in_channels = in_channels - - self.norm = Normalize(in_channels) - self.q = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.k = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.v = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - self.proj_out = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=1, - stride=1, - padding=0) - - - def forward(self, x): - h_ = x - h_ = self.norm(h_) - q = self.q(h_) - k = self.k(h_) - v = self.v(h_) - - # compute attention - b,c,h,w = q.shape - q = q.reshape(b,c,h*w) - q = q.permute(0,2,1) # b,hw,c - k = k.reshape(b,c,h*w) # b,c,hw - w_ = torch.bmm(q,k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] - w_ = w_ * (int(c)**(-0.5)) - w_ = torch.nn.functional.softmax(w_, dim=2) - - # attend to values - v = v.reshape(b,c,h*w) - w_ = w_.permute(0,2,1) # b,hw,hw (first hw of k, second of q) - h_ = torch.bmm(v,w_) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] - h_ = h_.reshape(b,c,h,w) - - h_ = self.proj_out(h_) - - return x+h_ - - -def make_attn(in_channels, attn_type="vanilla"): - assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown' - print(f"making attention of type '{attn_type}' with {in_channels} in_channels") - if attn_type == "vanilla": - return AttnBlock(in_channels) - elif attn_type == "none": - return nn.Identity(in_channels) - else: - return LinAttnBlock(in_channels) - - -class Model(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, use_timestep=True, use_linear_attn=False, attn_type="vanilla"): - super().__init__() - if use_linear_attn: attn_type = "linear" - self.ch = ch - self.temb_ch = self.ch*4 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - - self.use_timestep = use_timestep - if self.use_timestep: - # timestep embedding - self.temb = nn.Module() - self.temb.dense = nn.ModuleList([ - torch.nn.Linear(self.ch, - self.temb_ch), - torch.nn.Linear(self.temb_ch, - self.temb_ch), - ]) - - # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, - self.ch, - kernel_size=3, - stride=1, - padding=1) - - curr_res = resolution - in_ch_mult = (1,)+tuple(ch_mult) - self.down = nn.ModuleList() - for i_level in range(self.num_resolutions): - block = nn.ModuleList() - attn = nn.ModuleList() - block_in = ch*in_ch_mult[i_level] - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - down = nn.Module() - down.block = block - down.attn = attn - if i_level != self.num_resolutions-1: - down.downsample = Downsample(block_in, resamp_with_conv) - curr_res = curr_res // 2 - self.down.append(down) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - attn = nn.ModuleList() - block_out = ch*ch_mult[i_level] - skip_in = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks+1): - if i_block == self.num_res_blocks: - skip_in = ch*in_ch_mult[i_level] - block.append(ResnetBlock(in_channels=block_in+skip_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - up = nn.Module() - up.block = block - up.attn = attn - if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_ch, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x, t=None, context=None): - #assert x.shape[2] == x.shape[3] == self.resolution - if context is not None: - # assume aligned context, cat along channel axis - x = torch.cat((x, context), dim=1) - if self.use_timestep: - # timestep embedding - assert t is not None - temb = get_timestep_embedding(t, self.ch) - temb = self.temb.dense[0](temb) - temb = nonlinearity(temb) - temb = self.temb.dense[1](temb) - else: - temb = None - - # downsampling - hs = [self.conv_in(x)] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1], temb) - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - hs.append(h) - if i_level != self.num_resolutions-1: - hs.append(self.down[i_level].downsample(hs[-1])) - - # middle - h = hs[-1] - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block]( - torch.cat([h, hs.pop()], dim=1), temb) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - if i_level != 0: - h = self.up[i_level].upsample(h) - - # end - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - def get_last_layer(self): - return self.conv_out.weight - - -class Encoder(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla", - **ignore_kwargs): - super().__init__() - if use_linear_attn: attn_type = "linear" - self.ch = ch - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - - # downsampling - self.conv_in = torch.nn.Conv2d(in_channels, - self.ch, - kernel_size=3, - stride=1, - padding=1) - - curr_res = resolution - in_ch_mult = (1,)+tuple(ch_mult) - self.in_ch_mult = in_ch_mult - self.down = nn.ModuleList() - for i_level in range(self.num_resolutions): - block = nn.ModuleList() - attn = nn.ModuleList() - block_in = ch*in_ch_mult[i_level] - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - down = nn.Module() - down.block = block - down.attn = attn - if i_level != self.num_resolutions-1: - down.downsample = Downsample(block_in, resamp_with_conv) - curr_res = curr_res // 2 - self.down.append(down) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - 2*z_channels if double_z else z_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - # timestep embedding - temb = None - - # downsampling - hs = [self.conv_in(x)] - for i_level in range(self.num_resolutions): - for i_block in range(self.num_res_blocks): - h = self.down[i_level].block[i_block](hs[-1], temb) - if len(self.down[i_level].attn) > 0: - h = self.down[i_level].attn[i_block](h) - hs.append(h) - if i_level != self.num_resolutions-1: - hs.append(self.down[i_level].downsample(hs[-1])) - - # middle - h = hs[-1] - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # end - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - -class Decoder(nn.Module): - def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, - resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, - attn_type="vanilla", **ignorekwargs): - super().__init__() - if use_linear_attn: attn_type = "linear" - self.ch = ch - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - self.resolution = resolution - self.in_channels = in_channels - self.give_pre_end = give_pre_end - self.tanh_out = tanh_out - - # compute in_ch_mult, block_in and curr_res at lowest res - in_ch_mult = (1,)+tuple(ch_mult) - block_in = ch*ch_mult[self.num_resolutions-1] - curr_res = resolution // 2**(self.num_resolutions-1) - self.z_shape = (1,z_channels,curr_res,curr_res) - print("Working with z of shape {} = {} dimensions.".format( - self.z_shape, np.prod(self.z_shape))) - - # z to block_in - self.conv_in = torch.nn.Conv2d(z_channels, - block_in, - kernel_size=3, - stride=1, - padding=1) - - # middle - self.mid = nn.Module() - self.mid.block_1 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) - self.mid.block_2 = ResnetBlock(in_channels=block_in, - out_channels=block_in, - temb_channels=self.temb_ch, - dropout=dropout) - - # upsampling - self.up = nn.ModuleList() - for i_level in reversed(range(self.num_resolutions)): - block = nn.ModuleList() - attn = nn.ModuleList() - block_out = ch*ch_mult[i_level] - for i_block in range(self.num_res_blocks+1): - block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - if curr_res in attn_resolutions: - attn.append(make_attn(block_in, attn_type=attn_type)) - up = nn.Module() - up.block = block - up.attn = attn - if i_level != 0: - up.upsample = Upsample(block_in, resamp_with_conv) - curr_res = curr_res * 2 - self.up.insert(0, up) # prepend to get consistent order - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_ch, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, z): - #assert z.shape[1:] == self.z_shape[1:] - self.last_z_shape = z.shape - - # timestep embedding - temb = None - - # z to block_in - h = self.conv_in(z) - - # middle - h = self.mid.block_1(h, temb) - h = self.mid.attn_1(h) - h = self.mid.block_2(h, temb) - - # upsampling - for i_level in reversed(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks+1): - h = self.up[i_level].block[i_block](h, temb) - if len(self.up[i_level].attn) > 0: - h = self.up[i_level].attn[i_block](h) - if i_level != 0: - h = self.up[i_level].upsample(h) - - # end - if self.give_pre_end: - return h - - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - if self.tanh_out: - h = torch.tanh(h) - return h - - -class SimpleDecoder(nn.Module): - def __init__(self, in_channels, out_channels, *args, **kwargs): - super().__init__() - self.model = nn.ModuleList([nn.Conv2d(in_channels, in_channels, 1), - ResnetBlock(in_channels=in_channels, - out_channels=2 * in_channels, - temb_channels=0, dropout=0.0), - ResnetBlock(in_channels=2 * in_channels, - out_channels=4 * in_channels, - temb_channels=0, dropout=0.0), - ResnetBlock(in_channels=4 * in_channels, - out_channels=2 * in_channels, - temb_channels=0, dropout=0.0), - nn.Conv2d(2*in_channels, in_channels, 1), - Upsample(in_channels, with_conv=True)]) - # end - self.norm_out = Normalize(in_channels) - self.conv_out = torch.nn.Conv2d(in_channels, - out_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - for i, layer in enumerate(self.model): - if i in [1,2,3]: - x = layer(x, None) - else: - x = layer(x) - - h = self.norm_out(x) - h = nonlinearity(h) - x = self.conv_out(h) - return x - - -class UpsampleDecoder(nn.Module): - def __init__(self, in_channels, out_channels, ch, num_res_blocks, resolution, - ch_mult=(2,2), dropout=0.0): - super().__init__() - # upsampling - self.temb_ch = 0 - self.num_resolutions = len(ch_mult) - self.num_res_blocks = num_res_blocks - block_in = in_channels - curr_res = resolution // 2 ** (self.num_resolutions - 1) - self.res_blocks = nn.ModuleList() - self.upsample_blocks = nn.ModuleList() - for i_level in range(self.num_resolutions): - res_block = [] - block_out = ch * ch_mult[i_level] - for i_block in range(self.num_res_blocks + 1): - res_block.append(ResnetBlock(in_channels=block_in, - out_channels=block_out, - temb_channels=self.temb_ch, - dropout=dropout)) - block_in = block_out - self.res_blocks.append(nn.ModuleList(res_block)) - if i_level != self.num_resolutions - 1: - self.upsample_blocks.append(Upsample(block_in, True)) - curr_res = curr_res * 2 - - # end - self.norm_out = Normalize(block_in) - self.conv_out = torch.nn.Conv2d(block_in, - out_channels, - kernel_size=3, - stride=1, - padding=1) - - def forward(self, x): - # upsampling - h = x - for k, i_level in enumerate(range(self.num_resolutions)): - for i_block in range(self.num_res_blocks + 1): - h = self.res_blocks[i_level][i_block](h, None) - if i_level != self.num_resolutions - 1: - h = self.upsample_blocks[k](h) - h = self.norm_out(h) - h = nonlinearity(h) - h = self.conv_out(h) - return h - - -class LatentRescaler(nn.Module): - def __init__(self, factor, in_channels, mid_channels, out_channels, depth=2): - super().__init__() - # residual block, interpolate, residual block - self.factor = factor - self.conv_in = nn.Conv2d(in_channels, - mid_channels, - kernel_size=3, - stride=1, - padding=1) - self.res_block1 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, - out_channels=mid_channels, - temb_channels=0, - dropout=0.0) for _ in range(depth)]) - self.attn = AttnBlock(mid_channels) - self.res_block2 = nn.ModuleList([ResnetBlock(in_channels=mid_channels, - out_channels=mid_channels, - temb_channels=0, - dropout=0.0) for _ in range(depth)]) - - self.conv_out = nn.Conv2d(mid_channels, - out_channels, - kernel_size=1, - ) - - def forward(self, x): - x = self.conv_in(x) - for block in self.res_block1: - x = block(x, None) - x = torch.nn.functional.interpolate(x, size=(int(round(x.shape[2]*self.factor)), int(round(x.shape[3]*self.factor)))) - x = self.attn(x) - for block in self.res_block2: - x = block(x, None) - x = self.conv_out(x) - return x - - -class MergedRescaleEncoder(nn.Module): - def __init__(self, in_channels, ch, resolution, out_ch, num_res_blocks, - attn_resolutions, dropout=0.0, resamp_with_conv=True, - ch_mult=(1,2,4,8), rescale_factor=1.0, rescale_module_depth=1): - super().__init__() - intermediate_chn = ch * ch_mult[-1] - self.encoder = Encoder(in_channels=in_channels, num_res_blocks=num_res_blocks, ch=ch, ch_mult=ch_mult, - z_channels=intermediate_chn, double_z=False, resolution=resolution, - attn_resolutions=attn_resolutions, dropout=dropout, resamp_with_conv=resamp_with_conv, - out_ch=None) - self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=intermediate_chn, - mid_channels=intermediate_chn, out_channels=out_ch, depth=rescale_module_depth) - - def forward(self, x): - x = self.encoder(x) - x = self.rescaler(x) - return x - - -class MergedRescaleDecoder(nn.Module): - def __init__(self, z_channels, out_ch, resolution, num_res_blocks, attn_resolutions, ch, ch_mult=(1,2,4,8), - dropout=0.0, resamp_with_conv=True, rescale_factor=1.0, rescale_module_depth=1): - super().__init__() - tmp_chn = z_channels*ch_mult[-1] - self.decoder = Decoder(out_ch=out_ch, z_channels=tmp_chn, attn_resolutions=attn_resolutions, dropout=dropout, - resamp_with_conv=resamp_with_conv, in_channels=None, num_res_blocks=num_res_blocks, - ch_mult=ch_mult, resolution=resolution, ch=ch) - self.rescaler = LatentRescaler(factor=rescale_factor, in_channels=z_channels, mid_channels=tmp_chn, - out_channels=tmp_chn, depth=rescale_module_depth) - - def forward(self, x): - x = self.rescaler(x) - x = self.decoder(x) - return x - - -class Upsampler(nn.Module): - def __init__(self, in_size, out_size, in_channels, out_channels, ch_mult=2): - super().__init__() - assert out_size >= in_size - num_blocks = int(np.log2(out_size//in_size))+1 - factor_up = 1.+ (out_size % in_size) - print(f"Building {self.__class__.__name__} with in_size: {in_size} --> out_size {out_size} and factor {factor_up}") - self.rescaler = LatentRescaler(factor=factor_up, in_channels=in_channels, mid_channels=2*in_channels, - out_channels=in_channels) - self.decoder = Decoder(out_ch=out_channels, resolution=out_size, z_channels=in_channels, num_res_blocks=2, - attn_resolutions=[], in_channels=None, ch=in_channels, - ch_mult=[ch_mult for _ in range(num_blocks)]) - - def forward(self, x): - x = self.rescaler(x) - x = self.decoder(x) - return x - - -class Resize(nn.Module): - def __init__(self, in_channels=None, learned=False, mode="bilinear"): - super().__init__() - self.with_conv = learned - self.mode = mode - if self.with_conv: - print(f"Note: {self.__class__.__name} uses learned downsampling and will ignore the fixed {mode} mode") - raise NotImplementedError() - assert in_channels is not None - # no asymmetric padding in torch conv, must do it ourselves - self.conv = torch.nn.Conv2d(in_channels, - in_channels, - kernel_size=4, - stride=2, - padding=1) - - def forward(self, x, scale_factor=1.0): - if scale_factor==1.0: - return x - else: - x = torch.nn.functional.interpolate(x, mode=self.mode, align_corners=False, scale_factor=scale_factor) - return x - -class FirstStagePostProcessor(nn.Module): - - def __init__(self, ch_mult:list, in_channels, - pretrained_model:nn.Module=None, - reshape=False, - n_channels=None, - dropout=0., - pretrained_config=None): - super().__init__() - if pretrained_config is None: - assert pretrained_model is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' - self.pretrained_model = pretrained_model - else: - assert pretrained_config is not None, 'Either "pretrained_model" or "pretrained_config" must not be None' - self.instantiate_pretrained(pretrained_config) - - self.do_reshape = reshape - - if n_channels is None: - n_channels = self.pretrained_model.encoder.ch - - self.proj_norm = Normalize(in_channels,num_groups=in_channels//2) - self.proj = nn.Conv2d(in_channels,n_channels,kernel_size=3, - stride=1,padding=1) - - blocks = [] - downs = [] - ch_in = n_channels - for m in ch_mult: - blocks.append(ResnetBlock(in_channels=ch_in,out_channels=m*n_channels,dropout=dropout)) - ch_in = m * n_channels - downs.append(Downsample(ch_in, with_conv=False)) - - self.model = nn.ModuleList(blocks) - self.downsampler = nn.ModuleList(downs) - - - def instantiate_pretrained(self, config): - model = instantiate_from_config(config) - self.pretrained_model = model.eval() - # self.pretrained_model.train = False - for param in self.pretrained_model.parameters(): - param.requires_grad = False - - - @torch.no_grad() - def encode_with_pretrained(self,x): - c = self.pretrained_model.encode(x) - if isinstance(c, DiagonalGaussianDistribution): - c = c.mode() - return c - - def forward(self,x): - z_fs = self.encode_with_pretrained(x) - z = self.proj_norm(z_fs) - z = self.proj(z) - z = nonlinearity(z) - - for submodel, downmodel in zip(self.model,self.downsampler): - z = submodel(z,temb=None) - z = downmodel(z) - - if self.do_reshape: - z = rearrange(z,'b c h w -> b (h w) c') - return z - diff --git a/ldm/modules/diffusionmodules/openaimodel.py b/ldm/modules/diffusionmodules/openaimodel.py deleted file mode 100644 index fcf95d1e..00000000 --- a/ldm/modules/diffusionmodules/openaimodel.py +++ /dev/null @@ -1,961 +0,0 @@ -from abc import abstractmethod -from functools import partial -import math -from typing import Iterable - -import numpy as np -import torch as th -import torch.nn as nn -import torch.nn.functional as F - -from ldm.modules.diffusionmodules.util import ( - checkpoint, - conv_nd, - linear, - avg_pool_nd, - zero_module, - normalization, - timestep_embedding, -) -from ldm.modules.attention import SpatialTransformer - - -# dummy replace -def convert_module_to_f16(x): - pass - -def convert_module_to_f32(x): - pass - - -## go -class AttentionPool2d(nn.Module): - """ - Adapted from CLIP: https://github.com/openai/CLIP/blob/main/clip/model.py - """ - - def __init__( - self, - spacial_dim: int, - embed_dim: int, - num_heads_channels: int, - output_dim: int = None, - ): - super().__init__() - self.positional_embedding = nn.Parameter(th.randn(embed_dim, spacial_dim ** 2 + 1) / embed_dim ** 0.5) - self.qkv_proj = conv_nd(1, embed_dim, 3 * embed_dim, 1) - self.c_proj = conv_nd(1, embed_dim, output_dim or embed_dim, 1) - self.num_heads = embed_dim // num_heads_channels - self.attention = QKVAttention(self.num_heads) - - def forward(self, x): - b, c, *_spatial = x.shape - x = x.reshape(b, c, -1) # NC(HW) - x = th.cat([x.mean(dim=-1, keepdim=True), x], dim=-1) # NC(HW+1) - x = x + self.positional_embedding[None, :, :].to(x.dtype) # NC(HW+1) - x = self.qkv_proj(x) - x = self.attention(x) - x = self.c_proj(x) - return x[:, :, 0] - - -class TimestepBlock(nn.Module): - """ - Any module where forward() takes timestep embeddings as a second argument. - """ - - @abstractmethod - def forward(self, x, emb): - """ - Apply the module to `x` given `emb` timestep embeddings. - """ - - -class TimestepEmbedSequential(nn.Sequential, TimestepBlock): - """ - A sequential module that passes timestep embeddings to the children that - support it as an extra input. - """ - - def forward(self, x, emb, context=None): - for layer in self: - if isinstance(layer, TimestepBlock): - x = layer(x, emb) - elif isinstance(layer, SpatialTransformer): - x = layer(x, context) - else: - x = layer(x) - return x - - -class Upsample(nn.Module): - """ - An upsampling layer with an optional convolution. - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - upsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None, padding=1): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - if use_conv: - self.conv = conv_nd(dims, self.channels, self.out_channels, 3, padding=padding) - - def forward(self, x): - assert x.shape[1] == self.channels - if self.dims == 3: - x = F.interpolate( - x, (x.shape[2], x.shape[3] * 2, x.shape[4] * 2), mode="nearest" - ) - else: - x = F.interpolate(x, scale_factor=2, mode="nearest") - if self.use_conv: - x = self.conv(x) - return x - -class TransposedUpsample(nn.Module): - 'Learned 2x upsampling without padding' - def __init__(self, channels, out_channels=None, ks=5): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - - self.up = nn.ConvTranspose2d(self.channels,self.out_channels,kernel_size=ks,stride=2) - - def forward(self,x): - return self.up(x) - - -class Downsample(nn.Module): - """ - A downsampling layer with an optional convolution. - :param channels: channels in the inputs and outputs. - :param use_conv: a bool determining if a convolution is applied. - :param dims: determines if the signal is 1D, 2D, or 3D. If 3D, then - downsampling occurs in the inner-two dimensions. - """ - - def __init__(self, channels, use_conv, dims=2, out_channels=None,padding=1): - super().__init__() - self.channels = channels - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.dims = dims - stride = 2 if dims != 3 else (1, 2, 2) - if use_conv: - self.op = conv_nd( - dims, self.channels, self.out_channels, 3, stride=stride, padding=padding - ) - else: - assert self.channels == self.out_channels - self.op = avg_pool_nd(dims, kernel_size=stride, stride=stride) - - def forward(self, x): - assert x.shape[1] == self.channels - return self.op(x) - - -class ResBlock(TimestepBlock): - """ - A residual block that can optionally change the number of channels. - :param channels: the number of input channels. - :param emb_channels: the number of timestep embedding channels. - :param dropout: the rate of dropout. - :param out_channels: if specified, the number of out channels. - :param use_conv: if True and out_channels is specified, use a spatial - convolution instead of a smaller 1x1 convolution to change the - channels in the skip connection. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param use_checkpoint: if True, use gradient checkpointing on this module. - :param up: if True, use this block for upsampling. - :param down: if True, use this block for downsampling. - """ - - def __init__( - self, - channels, - emb_channels, - dropout, - out_channels=None, - use_conv=False, - use_scale_shift_norm=False, - dims=2, - use_checkpoint=False, - up=False, - down=False, - ): - super().__init__() - self.channels = channels - self.emb_channels = emb_channels - self.dropout = dropout - self.out_channels = out_channels or channels - self.use_conv = use_conv - self.use_checkpoint = use_checkpoint - self.use_scale_shift_norm = use_scale_shift_norm - - self.in_layers = nn.Sequential( - normalization(channels), - nn.SiLU(), - conv_nd(dims, channels, self.out_channels, 3, padding=1), - ) - - self.updown = up or down - - if up: - self.h_upd = Upsample(channels, False, dims) - self.x_upd = Upsample(channels, False, dims) - elif down: - self.h_upd = Downsample(channels, False, dims) - self.x_upd = Downsample(channels, False, dims) - else: - self.h_upd = self.x_upd = nn.Identity() - - self.emb_layers = nn.Sequential( - nn.SiLU(), - linear( - emb_channels, - 2 * self.out_channels if use_scale_shift_norm else self.out_channels, - ), - ) - self.out_layers = nn.Sequential( - normalization(self.out_channels), - nn.SiLU(), - nn.Dropout(p=dropout), - zero_module( - conv_nd(dims, self.out_channels, self.out_channels, 3, padding=1) - ), - ) - - if self.out_channels == channels: - self.skip_connection = nn.Identity() - elif use_conv: - self.skip_connection = conv_nd( - dims, channels, self.out_channels, 3, padding=1 - ) - else: - self.skip_connection = conv_nd(dims, channels, self.out_channels, 1) - - def forward(self, x, emb): - """ - Apply the block to a Tensor, conditioned on a timestep embedding. - :param x: an [N x C x ...] Tensor of features. - :param emb: an [N x emb_channels] Tensor of timestep embeddings. - :return: an [N x C x ...] Tensor of outputs. - """ - return checkpoint( - self._forward, (x, emb), self.parameters(), self.use_checkpoint - ) - - - def _forward(self, x, emb): - if self.updown: - in_rest, in_conv = self.in_layers[:-1], self.in_layers[-1] - h = in_rest(x) - h = self.h_upd(h) - x = self.x_upd(x) - h = in_conv(h) - else: - h = self.in_layers(x) - emb_out = self.emb_layers(emb).type(h.dtype) - while len(emb_out.shape) < len(h.shape): - emb_out = emb_out[..., None] - if self.use_scale_shift_norm: - out_norm, out_rest = self.out_layers[0], self.out_layers[1:] - scale, shift = th.chunk(emb_out, 2, dim=1) - h = out_norm(h) * (1 + scale) + shift - h = out_rest(h) - else: - h = h + emb_out - h = self.out_layers(h) - return self.skip_connection(x) + h - - -class AttentionBlock(nn.Module): - """ - An attention block that allows spatial positions to attend to each other. - Originally ported from here, but adapted to the N-d case. - https://github.com/hojonathanho/diffusion/blob/1e0dceb3b3495bbe19116a5e1b3596cd0706c543/diffusion_tf/models/unet.py#L66. - """ - - def __init__( - self, - channels, - num_heads=1, - num_head_channels=-1, - use_checkpoint=False, - use_new_attention_order=False, - ): - super().__init__() - self.channels = channels - if num_head_channels == -1: - self.num_heads = num_heads - else: - assert ( - channels % num_head_channels == 0 - ), f"q,k,v channels {channels} is not divisible by num_head_channels {num_head_channels}" - self.num_heads = channels // num_head_channels - self.use_checkpoint = use_checkpoint - self.norm = normalization(channels) - self.qkv = conv_nd(1, channels, channels * 3, 1) - if use_new_attention_order: - # split qkv before split heads - self.attention = QKVAttention(self.num_heads) - else: - # split heads before split qkv - self.attention = QKVAttentionLegacy(self.num_heads) - - self.proj_out = zero_module(conv_nd(1, channels, channels, 1)) - - def forward(self, x): - return checkpoint(self._forward, (x,), self.parameters(), True) # TODO: check checkpoint usage, is True # TODO: fix the .half call!!! - #return pt_checkpoint(self._forward, x) # pytorch - - def _forward(self, x): - b, c, *spatial = x.shape - x = x.reshape(b, c, -1) - qkv = self.qkv(self.norm(x)) - h = self.attention(qkv) - h = self.proj_out(h) - return (x + h).reshape(b, c, *spatial) - - -def count_flops_attn(model, _x, y): - """ - A counter for the `thop` package to count the operations in an - attention operation. - Meant to be used like: - macs, params = thop.profile( - model, - inputs=(inputs, timestamps), - custom_ops={QKVAttention: QKVAttention.count_flops}, - ) - """ - b, c, *spatial = y[0].shape - num_spatial = int(np.prod(spatial)) - # We perform two matmuls with the same number of ops. - # The first computes the weight matrix, the second computes - # the combination of the value vectors. - matmul_ops = 2 * b * (num_spatial ** 2) * c - model.total_ops += th.DoubleTensor([matmul_ops]) - - -class QKVAttentionLegacy(nn.Module): - """ - A module which performs QKV attention. Matches legacy QKVAttention + input/ouput heads shaping - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv): - """ - Apply QKV attention. - :param qkv: an [N x (H * 3 * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.reshape(bs * self.n_heads, ch * 3, length).split(ch, dim=1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = th.einsum( - "bct,bcs->bts", q * scale, k * scale - ) # More stable with f16 than dividing afterwards - weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum("bts,bcs->bct", weight, v) - return a.reshape(bs, -1, length) - - @staticmethod - def count_flops(model, _x, y): - return count_flops_attn(model, _x, y) - - -class QKVAttention(nn.Module): - """ - A module which performs QKV attention and splits in a different order. - """ - - def __init__(self, n_heads): - super().__init__() - self.n_heads = n_heads - - def forward(self, qkv): - """ - Apply QKV attention. - :param qkv: an [N x (3 * H * C) x T] tensor of Qs, Ks, and Vs. - :return: an [N x (H * C) x T] tensor after attention. - """ - bs, width, length = qkv.shape - assert width % (3 * self.n_heads) == 0 - ch = width // (3 * self.n_heads) - q, k, v = qkv.chunk(3, dim=1) - scale = 1 / math.sqrt(math.sqrt(ch)) - weight = th.einsum( - "bct,bcs->bts", - (q * scale).view(bs * self.n_heads, ch, length), - (k * scale).view(bs * self.n_heads, ch, length), - ) # More stable with f16 than dividing afterwards - weight = th.softmax(weight.float(), dim=-1).type(weight.dtype) - a = th.einsum("bts,bcs->bct", weight, v.reshape(bs * self.n_heads, ch, length)) - return a.reshape(bs, -1, length) - - @staticmethod - def count_flops(model, _x, y): - return count_flops_attn(model, _x, y) - - -class UNetModel(nn.Module): - """ - The full UNet model with attention and timestep embedding. - :param in_channels: channels in the input Tensor. - :param model_channels: base channel count for the model. - :param out_channels: channels in the output Tensor. - :param num_res_blocks: number of residual blocks per downsample. - :param attention_resolutions: a collection of downsample rates at which - attention will take place. May be a set, list, or tuple. - For example, if this contains 4, then at 4x downsampling, attention - will be used. - :param dropout: the dropout probability. - :param channel_mult: channel multiplier for each level of the UNet. - :param conv_resample: if True, use learned convolutions for upsampling and - downsampling. - :param dims: determines if the signal is 1D, 2D, or 3D. - :param num_classes: if specified (as an int), then this model will be - class-conditional with `num_classes` classes. - :param use_checkpoint: use gradient checkpointing to reduce memory usage. - :param num_heads: the number of attention heads in each attention layer. - :param num_heads_channels: if specified, ignore num_heads and instead use - a fixed channel width per attention head. - :param num_heads_upsample: works with num_heads to set a different number - of heads for upsampling. Deprecated. - :param use_scale_shift_norm: use a FiLM-like conditioning mechanism. - :param resblock_updown: use residual blocks for up/downsampling. - :param use_new_attention_order: use a different attention pattern for potentially - increased efficiency. - """ - - def __init__( - self, - image_size, - in_channels, - model_channels, - out_channels, - num_res_blocks, - attention_resolutions, - dropout=0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - num_classes=None, - use_checkpoint=False, - use_fp16=False, - num_heads=-1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - resblock_updown=False, - use_new_attention_order=False, - use_spatial_transformer=False, # custom transformer support - transformer_depth=1, # custom transformer support - context_dim=None, # custom transformer support - n_embed=None, # custom support for prediction of discrete ids into codebook of first stage vq model - legacy=True, - ): - super().__init__() - if use_spatial_transformer: - assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...' - - if context_dim is not None: - assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...' - from omegaconf.listconfig import ListConfig - if type(context_dim) == ListConfig: - context_dim = list(context_dim) - - if num_heads_upsample == -1: - num_heads_upsample = num_heads - - if num_heads == -1: - assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set' - - if num_head_channels == -1: - assert num_heads != -1, 'Either num_heads or num_head_channels has to be set' - - self.image_size = image_size - self.in_channels = in_channels - self.model_channels = model_channels - self.out_channels = out_channels - self.num_res_blocks = num_res_blocks - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.num_classes = num_classes - self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.num_heads = num_heads - self.num_head_channels = num_head_channels - self.num_heads_upsample = num_heads_upsample - self.predict_codebook_ids = n_embed is not None - - time_embed_dim = model_channels * 4 - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - - if self.num_classes is not None: - self.label_emb = nn.Embedding(num_classes, time_embed_dim) - - self.input_blocks = nn.ModuleList( - [ - TimestepEmbedSequential( - conv_nd(dims, in_channels, model_channels, 3, padding=1) - ) - ] - ) - self._feature_size = model_channels - input_block_chans = [model_channels] - ch = model_channels - ds = 1 - for level, mult in enumerate(channel_mult): - for _ in range(num_res_blocks): - layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=mult * model_channels, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = mult * model_channels - if ds in attention_resolutions: - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - #num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) - if resblock_updown - else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch - ) - ) - ) - ch = out_ch - input_block_chans.append(ch) - ds *= 2 - self._feature_size += ch - - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - #num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim - ), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - ) - self._feature_size += ch - - self.output_blocks = nn.ModuleList([]) - for level, mult in list(enumerate(channel_mult))[::-1]: - for i in range(num_res_blocks + 1): - ich = input_block_chans.pop() - layers = [ - ResBlock( - ch + ich, - time_embed_dim, - dropout, - out_channels=model_channels * mult, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = model_channels * mult - if ds in attention_resolutions: - if num_head_channels == -1: - dim_head = ch // num_heads - else: - num_heads = ch // num_head_channels - dim_head = num_head_channels - if legacy: - #num_heads = 1 - dim_head = ch // num_heads if use_spatial_transformer else num_head_channels - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads_upsample, - num_head_channels=dim_head, - use_new_attention_order=use_new_attention_order, - ) if not use_spatial_transformer else SpatialTransformer( - ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim - ) - ) - if level and i == num_res_blocks: - out_ch = ch - layers.append( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - up=True, - ) - if resblock_updown - else Upsample(ch, conv_resample, dims=dims, out_channels=out_ch) - ) - ds //= 2 - self.output_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - zero_module(conv_nd(dims, model_channels, out_channels, 3, padding=1)), - ) - if self.predict_codebook_ids: - self.id_predictor = nn.Sequential( - normalization(ch), - conv_nd(dims, model_channels, n_embed, 1), - #nn.LogSoftmax(dim=1) # change to cross_entropy and produce non-normalized logits - ) - - def convert_to_fp16(self): - """ - Convert the torso of the model to float16. - """ - self.input_blocks.apply(convert_module_to_f16) - self.middle_block.apply(convert_module_to_f16) - self.output_blocks.apply(convert_module_to_f16) - - def convert_to_fp32(self): - """ - Convert the torso of the model to float32. - """ - self.input_blocks.apply(convert_module_to_f32) - self.middle_block.apply(convert_module_to_f32) - self.output_blocks.apply(convert_module_to_f32) - - def forward(self, x, timesteps=None, context=None, y=None,**kwargs): - """ - Apply the model to an input batch. - :param x: an [N x C x ...] Tensor of inputs. - :param timesteps: a 1-D batch of timesteps. - :param context: conditioning plugged in via crossattn - :param y: an [N] Tensor of labels, if class-conditional. - :return: an [N x C x ...] Tensor of outputs. - """ - assert (y is not None) == ( - self.num_classes is not None - ), "must specify y if and only if the model is class-conditional" - hs = [] - t_emb = timestep_embedding(timesteps, self.model_channels, repeat_only=False) - emb = self.time_embed(t_emb) - - if self.num_classes is not None: - assert y.shape == (x.shape[0],) - emb = emb + self.label_emb(y) - - h = x.type(self.dtype) - for module in self.input_blocks: - h = module(h, emb, context) - hs.append(h) - h = self.middle_block(h, emb, context) - for module in self.output_blocks: - h = th.cat([h, hs.pop()], dim=1) - h = module(h, emb, context) - h = h.type(x.dtype) - if self.predict_codebook_ids: - return self.id_predictor(h) - else: - return self.out(h) - - -class EncoderUNetModel(nn.Module): - """ - The half UNet model with attention and timestep embedding. - For usage, see UNet. - """ - - def __init__( - self, - image_size, - in_channels, - model_channels, - out_channels, - num_res_blocks, - attention_resolutions, - dropout=0, - channel_mult=(1, 2, 4, 8), - conv_resample=True, - dims=2, - use_checkpoint=False, - use_fp16=False, - num_heads=1, - num_head_channels=-1, - num_heads_upsample=-1, - use_scale_shift_norm=False, - resblock_updown=False, - use_new_attention_order=False, - pool="adaptive", - *args, - **kwargs - ): - super().__init__() - - if num_heads_upsample == -1: - num_heads_upsample = num_heads - - self.in_channels = in_channels - self.model_channels = model_channels - self.out_channels = out_channels - self.num_res_blocks = num_res_blocks - self.attention_resolutions = attention_resolutions - self.dropout = dropout - self.channel_mult = channel_mult - self.conv_resample = conv_resample - self.use_checkpoint = use_checkpoint - self.dtype = th.float16 if use_fp16 else th.float32 - self.num_heads = num_heads - self.num_head_channels = num_head_channels - self.num_heads_upsample = num_heads_upsample - - time_embed_dim = model_channels * 4 - self.time_embed = nn.Sequential( - linear(model_channels, time_embed_dim), - nn.SiLU(), - linear(time_embed_dim, time_embed_dim), - ) - - self.input_blocks = nn.ModuleList( - [ - TimestepEmbedSequential( - conv_nd(dims, in_channels, model_channels, 3, padding=1) - ) - ] - ) - self._feature_size = model_channels - input_block_chans = [model_channels] - ch = model_channels - ds = 1 - for level, mult in enumerate(channel_mult): - for _ in range(num_res_blocks): - layers = [ - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=mult * model_channels, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ) - ] - ch = mult * model_channels - if ds in attention_resolutions: - layers.append( - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - use_new_attention_order=use_new_attention_order, - ) - ) - self.input_blocks.append(TimestepEmbedSequential(*layers)) - self._feature_size += ch - input_block_chans.append(ch) - if level != len(channel_mult) - 1: - out_ch = ch - self.input_blocks.append( - TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - out_channels=out_ch, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - down=True, - ) - if resblock_updown - else Downsample( - ch, conv_resample, dims=dims, out_channels=out_ch - ) - ) - ) - ch = out_ch - input_block_chans.append(ch) - ds *= 2 - self._feature_size += ch - - self.middle_block = TimestepEmbedSequential( - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - AttentionBlock( - ch, - use_checkpoint=use_checkpoint, - num_heads=num_heads, - num_head_channels=num_head_channels, - use_new_attention_order=use_new_attention_order, - ), - ResBlock( - ch, - time_embed_dim, - dropout, - dims=dims, - use_checkpoint=use_checkpoint, - use_scale_shift_norm=use_scale_shift_norm, - ), - ) - self._feature_size += ch - self.pool = pool - if pool == "adaptive": - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - nn.AdaptiveAvgPool2d((1, 1)), - zero_module(conv_nd(dims, ch, out_channels, 1)), - nn.Flatten(), - ) - elif pool == "attention": - assert num_head_channels != -1 - self.out = nn.Sequential( - normalization(ch), - nn.SiLU(), - AttentionPool2d( - (image_size // ds), ch, num_head_channels, out_channels - ), - ) - elif pool == "spatial": - self.out = nn.Sequential( - nn.Linear(self._feature_size, 2048), - nn.ReLU(), - nn.Linear(2048, self.out_channels), - ) - elif pool == "spatial_v2": - self.out = nn.Sequential( - nn.Linear(self._feature_size, 2048), - normalization(2048), - nn.SiLU(), - nn.Linear(2048, self.out_channels), - ) - else: - raise NotImplementedError(f"Unexpected {pool} pooling") - - def convert_to_fp16(self): - """ - Convert the torso of the model to float16. - """ - self.input_blocks.apply(convert_module_to_f16) - self.middle_block.apply(convert_module_to_f16) - - def convert_to_fp32(self): - """ - Convert the torso of the model to float32. - """ - self.input_blocks.apply(convert_module_to_f32) - self.middle_block.apply(convert_module_to_f32) - - def forward(self, x, timesteps): - """ - Apply the model to an input batch. - :param x: an [N x C x ...] Tensor of inputs. - :param timesteps: a 1-D batch of timesteps. - :return: an [N x K] Tensor of outputs. - """ - emb = self.time_embed(timestep_embedding(timesteps, self.model_channels)) - - results = [] - h = x.type(self.dtype) - for module in self.input_blocks: - h = module(h, emb) - if self.pool.startswith("spatial"): - results.append(h.type(x.dtype).mean(dim=(2, 3))) - h = self.middle_block(h, emb) - if self.pool.startswith("spatial"): - results.append(h.type(x.dtype).mean(dim=(2, 3))) - h = th.cat(results, axis=-1) - return self.out(h) - else: - h = h.type(x.dtype) - return self.out(h) - diff --git a/ldm/modules/diffusionmodules/util.py b/ldm/modules/diffusionmodules/util.py deleted file mode 100644 index a952e6c4..00000000 --- a/ldm/modules/diffusionmodules/util.py +++ /dev/null @@ -1,267 +0,0 @@ -# adopted from -# https://github.com/openai/improved-diffusion/blob/main/improved_diffusion/gaussian_diffusion.py -# and -# https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py -# and -# https://github.com/openai/guided-diffusion/blob/0ba878e517b276c45d1195eb29f6f5f72659a05b/guided_diffusion/nn.py -# -# thanks! - - -import os -import math -import torch -import torch.nn as nn -import numpy as np -from einops import repeat - -from ldm.util import instantiate_from_config - - -def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): - if schedule == "linear": - betas = ( - torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2 - ) - - elif schedule == "cosine": - timesteps = ( - torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s - ) - alphas = timesteps / (1 + cosine_s) * np.pi / 2 - alphas = torch.cos(alphas).pow(2) - alphas = alphas / alphas[0] - betas = 1 - alphas[1:] / alphas[:-1] - betas = np.clip(betas, a_min=0, a_max=0.999) - - elif schedule == "sqrt_linear": - betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) - elif schedule == "sqrt": - betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5 - else: - raise ValueError(f"schedule '{schedule}' unknown.") - return betas.numpy() - - -def make_ddim_timesteps(ddim_discr_method, num_ddim_timesteps, num_ddpm_timesteps, verbose=True): - if ddim_discr_method == 'uniform': - c = num_ddpm_timesteps // num_ddim_timesteps - ddim_timesteps = np.asarray(list(range(0, num_ddpm_timesteps, c))) - elif ddim_discr_method == 'quad': - ddim_timesteps = ((np.linspace(0, np.sqrt(num_ddpm_timesteps * .8), num_ddim_timesteps)) ** 2).astype(int) - else: - raise NotImplementedError(f'There is no ddim discretization method called "{ddim_discr_method}"') - - # assert ddim_timesteps.shape[0] == num_ddim_timesteps - # add one to get the final alpha values right (the ones from first scale to data during sampling) - steps_out = ddim_timesteps + 1 - if verbose: - print(f'Selected timesteps for ddim sampler: {steps_out}') - return steps_out - - -def make_ddim_sampling_parameters(alphacums, ddim_timesteps, eta, verbose=True): - # select alphas for computing the variance schedule - alphas = alphacums[ddim_timesteps] - alphas_prev = np.asarray([alphacums[0]] + alphacums[ddim_timesteps[:-1]].tolist()) - - # according the the formula provided in https://arxiv.org/abs/2010.02502 - sigmas = eta * np.sqrt((1 - alphas_prev) / (1 - alphas) * (1 - alphas / alphas_prev)) - if verbose: - print(f'Selected alphas for ddim sampler: a_t: {alphas}; a_(t-1): {alphas_prev}') - print(f'For the chosen value of eta, which is {eta}, ' - f'this results in the following sigma_t schedule for ddim sampler {sigmas}') - return sigmas, alphas, alphas_prev - - -def betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999): - """ - Create a beta schedule that discretizes the given alpha_t_bar function, - which defines the cumulative product of (1-beta) over time from t = [0,1]. - :param num_diffusion_timesteps: the number of betas to produce. - :param alpha_bar: a lambda that takes an argument t from 0 to 1 and - produces the cumulative product of (1-beta) up to that - part of the diffusion process. - :param max_beta: the maximum beta to use; use values lower than 1 to - prevent singularities. - """ - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return np.array(betas) - - -def extract_into_tensor(a, t, x_shape): - b, *_ = t.shape - out = a.gather(-1, t) - return out.reshape(b, *((1,) * (len(x_shape) - 1))) - - -def checkpoint(func, inputs, params, flag): - """ - Evaluate a function without caching intermediate activations, allowing for - reduced memory at the expense of extra compute in the backward pass. - :param func: the function to evaluate. - :param inputs: the argument sequence to pass to `func`. - :param params: a sequence of parameters `func` depends on but does not - explicitly take as arguments. - :param flag: if False, disable gradient checkpointing. - """ - if flag: - args = tuple(inputs) + tuple(params) - return CheckpointFunction.apply(func, len(inputs), *args) - else: - return func(*inputs) - - -class CheckpointFunction(torch.autograd.Function): - @staticmethod - def forward(ctx, run_function, length, *args): - ctx.run_function = run_function - ctx.input_tensors = list(args[:length]) - ctx.input_params = list(args[length:]) - - with torch.no_grad(): - output_tensors = ctx.run_function(*ctx.input_tensors) - return output_tensors - - @staticmethod - def backward(ctx, *output_grads): - ctx.input_tensors = [x.detach().requires_grad_(True) for x in ctx.input_tensors] - with torch.enable_grad(): - # Fixes a bug where the first op in run_function modifies the - # Tensor storage in place, which is not allowed for detach()'d - # Tensors. - shallow_copies = [x.view_as(x) for x in ctx.input_tensors] - output_tensors = ctx.run_function(*shallow_copies) - input_grads = torch.autograd.grad( - output_tensors, - ctx.input_tensors + ctx.input_params, - output_grads, - allow_unused=True, - ) - del ctx.input_tensors - del ctx.input_params - del output_tensors - return (None, None) + input_grads - - -def timestep_embedding(timesteps, dim, max_period=10000, repeat_only=False): - """ - Create sinusoidal timestep embeddings. - :param timesteps: a 1-D Tensor of N indices, one per batch element. - These may be fractional. - :param dim: the dimension of the output. - :param max_period: controls the minimum frequency of the embeddings. - :return: an [N x dim] Tensor of positional embeddings. - """ - if not repeat_only: - half = dim // 2 - freqs = torch.exp( - -math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32) / half - ).to(device=timesteps.device) - args = timesteps[:, None].float() * freqs[None] - embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1) - if dim % 2: - embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1) - else: - embedding = repeat(timesteps, 'b -> b d', d=dim) - return embedding - - -def zero_module(module): - """ - Zero out the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().zero_() - return module - - -def scale_module(module, scale): - """ - Scale the parameters of a module and return it. - """ - for p in module.parameters(): - p.detach().mul_(scale) - return module - - -def mean_flat(tensor): - """ - Take the mean over all non-batch dimensions. - """ - return tensor.mean(dim=list(range(1, len(tensor.shape)))) - - -def normalization(channels): - """ - Make a standard normalization layer. - :param channels: number of input channels. - :return: an nn.Module for normalization. - """ - return GroupNorm32(32, channels) - - -# PyTorch 1.7 has SiLU, but we support PyTorch 1.5. -class SiLU(nn.Module): - def forward(self, x): - return x * torch.sigmoid(x) - - -class GroupNorm32(nn.GroupNorm): - def forward(self, x): - return super().forward(x.float()).type(x.dtype) - -def conv_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D convolution module. - """ - if dims == 1: - return nn.Conv1d(*args, **kwargs) - elif dims == 2: - return nn.Conv2d(*args, **kwargs) - elif dims == 3: - return nn.Conv3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -def linear(*args, **kwargs): - """ - Create a linear module. - """ - return nn.Linear(*args, **kwargs) - - -def avg_pool_nd(dims, *args, **kwargs): - """ - Create a 1D, 2D, or 3D average pooling module. - """ - if dims == 1: - return nn.AvgPool1d(*args, **kwargs) - elif dims == 2: - return nn.AvgPool2d(*args, **kwargs) - elif dims == 3: - return nn.AvgPool3d(*args, **kwargs) - raise ValueError(f"unsupported dimensions: {dims}") - - -class HybridConditioner(nn.Module): - - def __init__(self, c_concat_config, c_crossattn_config): - super().__init__() - self.concat_conditioner = instantiate_from_config(c_concat_config) - self.crossattn_conditioner = instantiate_from_config(c_crossattn_config) - - def forward(self, c_concat, c_crossattn): - c_concat = self.concat_conditioner(c_concat) - c_crossattn = self.crossattn_conditioner(c_crossattn) - return {'c_concat': [c_concat], 'c_crossattn': [c_crossattn]} - - -def noise_like(shape, device, repeat=False): - repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1))) - noise = lambda: torch.randn(shape, device=device) - return repeat_noise() if repeat else noise() \ No newline at end of file diff --git a/ldm/modules/distributions/__init__.py b/ldm/modules/distributions/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ldm/modules/distributions/distributions.py b/ldm/modules/distributions/distributions.py deleted file mode 100644 index f2b8ef90..00000000 --- a/ldm/modules/distributions/distributions.py +++ /dev/null @@ -1,92 +0,0 @@ -import torch -import numpy as np - - -class AbstractDistribution: - def sample(self): - raise NotImplementedError() - - def mode(self): - raise NotImplementedError() - - -class DiracDistribution(AbstractDistribution): - def __init__(self, value): - self.value = value - - def sample(self): - return self.value - - def mode(self): - return self.value - - -class DiagonalGaussianDistribution(object): - def __init__(self, parameters, deterministic=False): - self.parameters = parameters - self.mean, self.logvar = torch.chunk(parameters, 2, dim=1) - self.logvar = torch.clamp(self.logvar, -30.0, 20.0) - self.deterministic = deterministic - self.std = torch.exp(0.5 * self.logvar) - self.var = torch.exp(self.logvar) - if self.deterministic: - self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device) - - def sample(self): - x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device) - return x - - def kl(self, other=None): - if self.deterministic: - return torch.Tensor([0.]) - else: - if other is None: - return 0.5 * torch.sum(torch.pow(self.mean, 2) - + self.var - 1.0 - self.logvar, - dim=[1, 2, 3]) - else: - return 0.5 * torch.sum( - torch.pow(self.mean - other.mean, 2) / other.var - + self.var / other.var - 1.0 - self.logvar + other.logvar, - dim=[1, 2, 3]) - - def nll(self, sample, dims=[1,2,3]): - if self.deterministic: - return torch.Tensor([0.]) - logtwopi = np.log(2.0 * np.pi) - return 0.5 * torch.sum( - logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var, - dim=dims) - - def mode(self): - return self.mean - - -def normal_kl(mean1, logvar1, mean2, logvar2): - """ - source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12 - Compute the KL divergence between two gaussians. - Shapes are automatically broadcasted, so batches can be compared to - scalars, among other use cases. - """ - tensor = None - for obj in (mean1, logvar1, mean2, logvar2): - if isinstance(obj, torch.Tensor): - tensor = obj - break - assert tensor is not None, "at least one argument must be a Tensor" - - # Force variances to be Tensors. Broadcasting helps convert scalars to - # Tensors, but it does not work for torch.exp(). - logvar1, logvar2 = [ - x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor) - for x in (logvar1, logvar2) - ] - - return 0.5 * ( - -1.0 - + logvar2 - - logvar1 - + torch.exp(logvar1 - logvar2) - + ((mean1 - mean2) ** 2) * torch.exp(-logvar2) - ) diff --git a/ldm/modules/ema.py b/ldm/modules/ema.py deleted file mode 100644 index c8c75af4..00000000 --- a/ldm/modules/ema.py +++ /dev/null @@ -1,76 +0,0 @@ -import torch -from torch import nn - - -class LitEma(nn.Module): - def __init__(self, model, decay=0.9999, use_num_upates=True): - super().__init__() - if decay < 0.0 or decay > 1.0: - raise ValueError('Decay must be between 0 and 1') - - self.m_name2s_name = {} - self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) - self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates - else torch.tensor(-1,dtype=torch.int)) - - for name, p in model.named_parameters(): - if p.requires_grad: - #remove as '.'-character is not allowed in buffers - s_name = name.replace('.','') - self.m_name2s_name.update({name:s_name}) - self.register_buffer(s_name,p.clone().detach().data) - - self.collected_params = [] - - def forward(self,model): - decay = self.decay - - if self.num_updates >= 0: - self.num_updates += 1 - decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates)) - - one_minus_decay = 1.0 - decay - - with torch.no_grad(): - m_param = dict(model.named_parameters()) - shadow_params = dict(self.named_buffers()) - - for key in m_param: - if m_param[key].requires_grad: - sname = self.m_name2s_name[key] - shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) - shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) - else: - assert not key in self.m_name2s_name - - def copy_to(self, model): - m_param = dict(model.named_parameters()) - shadow_params = dict(self.named_buffers()) - for key in m_param: - if m_param[key].requires_grad: - m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) - else: - assert not key in self.m_name2s_name - - def store(self, parameters): - """ - Save the current parameters for restoring later. - Args: - parameters: Iterable of `torch.nn.Parameter`; the parameters to be - temporarily stored. - """ - self.collected_params = [param.clone() for param in parameters] - - def restore(self, parameters): - """ - Restore the parameters stored with the `store` method. - Useful to validate the model with EMA parameters without affecting the - original optimization process. Store the parameters before the - `copy_to` method. After validation (or model saving), use this to - restore the former parameters. - Args: - parameters: Iterable of `torch.nn.Parameter`; the parameters to be - updated with the stored parameters. - """ - for c_param, param in zip(self.collected_params, parameters): - param.data.copy_(c_param.data) diff --git a/ldm/modules/encoders/__init__.py b/ldm/modules/encoders/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/ldm/modules/encoders/modules.py b/ldm/modules/encoders/modules.py deleted file mode 100644 index ededbe43..00000000 --- a/ldm/modules/encoders/modules.py +++ /dev/null @@ -1,234 +0,0 @@ -import torch -import torch.nn as nn -from functools import partial -import clip -from einops import rearrange, repeat -from transformers import CLIPTokenizer, CLIPTextModel -import kornia - -from ldm.modules.x_transformer import Encoder, TransformerWrapper # TODO: can we directly rely on lucidrains code and simply add this as a reuirement? --> test - - -class AbstractEncoder(nn.Module): - def __init__(self): - super().__init__() - - def encode(self, *args, **kwargs): - raise NotImplementedError - - - -class ClassEmbedder(nn.Module): - def __init__(self, embed_dim, n_classes=1000, key='class'): - super().__init__() - self.key = key - self.embedding = nn.Embedding(n_classes, embed_dim) - - def forward(self, batch, key=None): - if key is None: - key = self.key - # this is for use in crossattn - c = batch[key][:, None] - c = self.embedding(c) - return c - - -class TransformerEmbedder(AbstractEncoder): - """Some transformer encoder layers""" - def __init__(self, n_embed, n_layer, vocab_size, max_seq_len=77, device="cuda"): - super().__init__() - self.device = device - self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, - attn_layers=Encoder(dim=n_embed, depth=n_layer)) - - def forward(self, tokens): - tokens = tokens.to(self.device) # meh - z = self.transformer(tokens, return_embeddings=True) - return z - - def encode(self, x): - return self(x) - - -class BERTTokenizer(AbstractEncoder): - """ Uses a pretrained BERT tokenizer by huggingface. Vocab size: 30522 (?)""" - def __init__(self, device="cuda", vq_interface=True, max_length=77): - super().__init__() - from transformers import BertTokenizerFast # TODO: add to reuquirements - self.tokenizer = BertTokenizerFast.from_pretrained("bert-base-uncased") - self.device = device - self.vq_interface = vq_interface - self.max_length = max_length - - def forward(self, text): - batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, - return_overflowing_tokens=False, padding="max_length", return_tensors="pt") - tokens = batch_encoding["input_ids"].to(self.device) - return tokens - - @torch.no_grad() - def encode(self, text): - tokens = self(text) - if not self.vq_interface: - return tokens - return None, None, [None, None, tokens] - - def decode(self, text): - return text - - -class BERTEmbedder(AbstractEncoder): - """Uses the BERT tokenizr model and add some transformer encoder layers""" - def __init__(self, n_embed, n_layer, vocab_size=30522, max_seq_len=77, - device="cuda",use_tokenizer=True, embedding_dropout=0.0): - super().__init__() - self.use_tknz_fn = use_tokenizer - if self.use_tknz_fn: - self.tknz_fn = BERTTokenizer(vq_interface=False, max_length=max_seq_len) - self.device = device - self.transformer = TransformerWrapper(num_tokens=vocab_size, max_seq_len=max_seq_len, - attn_layers=Encoder(dim=n_embed, depth=n_layer), - emb_dropout=embedding_dropout) - - def forward(self, text): - if self.use_tknz_fn: - tokens = self.tknz_fn(text)#.to(self.device) - else: - tokens = text - z = self.transformer(tokens, return_embeddings=True) - return z - - def encode(self, text): - # output of length 77 - return self(text) - - -class SpatialRescaler(nn.Module): - def __init__(self, - n_stages=1, - method='bilinear', - multiplier=0.5, - in_channels=3, - out_channels=None, - bias=False): - super().__init__() - self.n_stages = n_stages - assert self.n_stages >= 0 - assert method in ['nearest','linear','bilinear','trilinear','bicubic','area'] - self.multiplier = multiplier - self.interpolator = partial(torch.nn.functional.interpolate, mode=method) - self.remap_output = out_channels is not None - if self.remap_output: - print(f'Spatial Rescaler mapping from {in_channels} to {out_channels} channels after resizing.') - self.channel_mapper = nn.Conv2d(in_channels,out_channels,1,bias=bias) - - def forward(self,x): - for stage in range(self.n_stages): - x = self.interpolator(x, scale_factor=self.multiplier) - - - if self.remap_output: - x = self.channel_mapper(x) - return x - - def encode(self, x): - return self(x) - -class FrozenCLIPEmbedder(AbstractEncoder): - """Uses the CLIP transformer encoder for text (from Hugging Face)""" - def __init__(self, version="openai/clip-vit-large-patch14", device="cuda", max_length=77): - super().__init__() - self.tokenizer = CLIPTokenizer.from_pretrained(version) - self.transformer = CLIPTextModel.from_pretrained(version) - self.device = device - self.max_length = max_length - self.freeze() - - def freeze(self): - self.transformer = self.transformer.eval() - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text): - batch_encoding = self.tokenizer(text, truncation=True, max_length=self.max_length, return_length=True, - return_overflowing_tokens=False, padding="max_length", return_tensors="pt") - tokens = batch_encoding["input_ids"].to(self.device) - outputs = self.transformer(input_ids=tokens) - - z = outputs.last_hidden_state - return z - - def encode(self, text): - return self(text) - - -class FrozenCLIPTextEmbedder(nn.Module): - """ - Uses the CLIP transformer encoder for text. - """ - def __init__(self, version='ViT-L/14', device="cuda", max_length=77, n_repeat=1, normalize=True): - super().__init__() - self.model, _ = clip.load(version, jit=False, device="cpu") - self.device = device - self.max_length = max_length - self.n_repeat = n_repeat - self.normalize = normalize - - def freeze(self): - self.model = self.model.eval() - for param in self.parameters(): - param.requires_grad = False - - def forward(self, text): - tokens = clip.tokenize(text).to(self.device) - z = self.model.encode_text(tokens) - if self.normalize: - z = z / torch.linalg.norm(z, dim=1, keepdim=True) - return z - - def encode(self, text): - z = self(text) - if z.ndim==2: - z = z[:, None, :] - z = repeat(z, 'b 1 d -> b k d', k=self.n_repeat) - return z - - -class FrozenClipImageEmbedder(nn.Module): - """ - Uses the CLIP image encoder. - """ - def __init__( - self, - model, - jit=False, - device='cuda' if torch.cuda.is_available() else 'cpu', - antialias=False, - ): - super().__init__() - self.model, _ = clip.load(name=model, device=device, jit=jit) - - self.antialias = antialias - - self.register_buffer('mean', torch.Tensor([0.48145466, 0.4578275, 0.40821073]), persistent=False) - self.register_buffer('std', torch.Tensor([0.26862954, 0.26130258, 0.27577711]), persistent=False) - - def preprocess(self, x): - # normalize to [0,1] - x = kornia.geometry.resize(x, (224, 224), - interpolation='bicubic',align_corners=True, - antialias=self.antialias) - x = (x + 1.) / 2. - # renormalize according to clip - x = kornia.enhance.normalize(x, self.mean, self.std) - return x - - def forward(self, x): - # x is assumed to be in range [-1,1] - return self.model.encode_image(self.preprocess(x)) - - -if __name__ == "__main__": - from ldm.util import count_params - model = FrozenCLIPEmbedder() - count_params(model, verbose=True) \ No newline at end of file diff --git a/ldm/modules/image_degradation/__init__.py b/ldm/modules/image_degradation/__init__.py deleted file mode 100644 index 7836cada..00000000 --- a/ldm/modules/image_degradation/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from ldm.modules.image_degradation.bsrgan import degradation_bsrgan_variant as degradation_fn_bsr -from ldm.modules.image_degradation.bsrgan_light import degradation_bsrgan_variant as degradation_fn_bsr_light diff --git a/ldm/modules/image_degradation/bsrgan.py b/ldm/modules/image_degradation/bsrgan.py deleted file mode 100644 index 32ef5616..00000000 --- a/ldm/modules/image_degradation/bsrgan.py +++ /dev/null @@ -1,730 +0,0 @@ -# -*- coding: utf-8 -*- -""" -# -------------------------------------------- -# Super-Resolution -# -------------------------------------------- -# -# Kai Zhang (cskaizhang@gmail.com) -# https://github.com/cszn -# From 2019/03--2021/08 -# -------------------------------------------- -""" - -import numpy as np -import cv2 -import torch - -from functools import partial -import random -from scipy import ndimage -import scipy -import scipy.stats as ss -from scipy.interpolate import interp2d -from scipy.linalg import orth -import albumentations - -import ldm.modules.image_degradation.utils_image as util - - -def modcrop_np(img, sf): - ''' - Args: - img: numpy image, WxH or WxHxC - sf: scale factor - Return: - cropped image - ''' - w, h = img.shape[:2] - im = np.copy(img) - return im[:w - w % sf, :h - h % sf, ...] - - -""" -# -------------------------------------------- -# anisotropic Gaussian kernels -# -------------------------------------------- -""" - - -def analytic_kernel(k): - """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" - k_size = k.shape[0] - # Calculate the big kernels size - big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) - # Loop over the small kernel to fill the big one - for r in range(k_size): - for c in range(k_size): - big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k - # Crop the edges of the big kernel to ignore very small values and increase run time of SR - crop = k_size // 2 - cropped_big_k = big_k[crop:-crop, crop:-crop] - # Normalize to 1 - return cropped_big_k / cropped_big_k.sum() - - -def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): - """ generate an anisotropic Gaussian kernel - Args: - ksize : e.g., 15, kernel size - theta : [0, pi], rotation angle range - l1 : [0.1,50], scaling of eigenvalues - l2 : [0.1,l1], scaling of eigenvalues - If l1 = l2, will get an isotropic Gaussian kernel. - Returns: - k : kernel - """ - - v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) - V = np.array([[v[0], v[1]], [v[1], -v[0]]]) - D = np.array([[l1, 0], [0, l2]]) - Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) - k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) - - return k - - -def gm_blur_kernel(mean, cov, size=15): - center = size / 2.0 + 0.5 - k = np.zeros([size, size]) - for y in range(size): - for x in range(size): - cy = y - center + 1 - cx = x - center + 1 - k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) - - k = k / np.sum(k) - return k - - -def shift_pixel(x, sf, upper_left=True): - """shift pixel for super-resolution with different scale factors - Args: - x: WxHxC or WxH - sf: scale factor - upper_left: shift direction - """ - h, w = x.shape[:2] - shift = (sf - 1) * 0.5 - xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) - if upper_left: - x1 = xv + shift - y1 = yv + shift - else: - x1 = xv - shift - y1 = yv - shift - - x1 = np.clip(x1, 0, w - 1) - y1 = np.clip(y1, 0, h - 1) - - if x.ndim == 2: - x = interp2d(xv, yv, x)(x1, y1) - if x.ndim == 3: - for i in range(x.shape[-1]): - x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) - - return x - - -def blur(x, k): - ''' - x: image, NxcxHxW - k: kernel, Nx1xhxw - ''' - n, c = x.shape[:2] - p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 - x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') - k = k.repeat(1, c, 1, 1) - k = k.view(-1, 1, k.shape[2], k.shape[3]) - x = x.view(1, -1, x.shape[2], x.shape[3]) - x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) - x = x.view(n, c, x.shape[2], x.shape[3]) - - return x - - -def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): - """" - # modified version of https://github.com/assafshocher/BlindSR_dataset_generator - # Kai Zhang - # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var - # max_var = 2.5 * sf - """ - # Set random eigen-vals (lambdas) and angle (theta) for COV matrix - lambda_1 = min_var + np.random.rand() * (max_var - min_var) - lambda_2 = min_var + np.random.rand() * (max_var - min_var) - theta = np.random.rand() * np.pi # random theta - noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 - - # Set COV matrix using Lambdas and Theta - LAMBDA = np.diag([lambda_1, lambda_2]) - Q = np.array([[np.cos(theta), -np.sin(theta)], - [np.sin(theta), np.cos(theta)]]) - SIGMA = Q @ LAMBDA @ Q.T - INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] - - # Set expectation position (shifting kernel for aligned image) - MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) - MU = MU[None, None, :, None] - - # Create meshgrid for Gaussian - [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) - Z = np.stack([X, Y], 2)[:, :, :, None] - - # Calcualte Gaussian for every pixel of the kernel - ZZ = Z - MU - ZZ_t = ZZ.transpose(0, 1, 3, 2) - raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) - - # shift the kernel so it will be centered - # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) - - # Normalize the kernel and return - # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) - kernel = raw_kernel / np.sum(raw_kernel) - return kernel - - -def fspecial_gaussian(hsize, sigma): - hsize = [hsize, hsize] - siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] - std = sigma - [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) - arg = -(x * x + y * y) / (2 * std * std) - h = np.exp(arg) - h[h < scipy.finfo(float).eps * h.max()] = 0 - sumh = h.sum() - if sumh != 0: - h = h / sumh - return h - - -def fspecial_laplacian(alpha): - alpha = max([0, min([alpha, 1])]) - h1 = alpha / (alpha + 1) - h2 = (1 - alpha) / (alpha + 1) - h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] - h = np.array(h) - return h - - -def fspecial(filter_type, *args, **kwargs): - ''' - python code from: - https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py - ''' - if filter_type == 'gaussian': - return fspecial_gaussian(*args, **kwargs) - if filter_type == 'laplacian': - return fspecial_laplacian(*args, **kwargs) - - -""" -# -------------------------------------------- -# degradation models -# -------------------------------------------- -""" - - -def bicubic_degradation(x, sf=3): - ''' - Args: - x: HxWxC image, [0, 1] - sf: down-scale factor - Return: - bicubicly downsampled LR image - ''' - x = util.imresize_np(x, scale=1 / sf) - return x - - -def srmd_degradation(x, k, sf=3): - ''' blur + bicubic downsampling - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2018learning, - title={Learning a single convolutional super-resolution network for multiple degradations}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={3262--3271}, - year={2018} - } - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' - x = bicubic_degradation(x, sf=sf) - return x - - -def dpsr_degradation(x, k, sf=3): - ''' bicubic downsampling + blur - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2019deep, - title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={1671--1681}, - year={2019} - } - ''' - x = bicubic_degradation(x, sf=sf) - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - return x - - -def classical_degradation(x, k, sf=3): - ''' blur + downsampling - Args: - x: HxWxC image, [0, 1]/[0, 255] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) - st = 0 - return x[st::sf, st::sf, ...] - - -def add_sharpening(img, weight=0.5, radius=50, threshold=10): - """USM sharpening. borrowed from real-ESRGAN - Input image: I; Blurry image: B. - 1. K = I + weight * (I - B) - 2. Mask = 1 if abs(I - B) > threshold, else: 0 - 3. Blur mask: - 4. Out = Mask * K + (1 - Mask) * I - Args: - img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. - weight (float): Sharp weight. Default: 1. - radius (float): Kernel size of Gaussian blur. Default: 50. - threshold (int): - """ - if radius % 2 == 0: - radius += 1 - blur = cv2.GaussianBlur(img, (radius, radius), 0) - residual = img - blur - mask = np.abs(residual) * 255 > threshold - mask = mask.astype('float32') - soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) - - K = img + weight * residual - K = np.clip(K, 0, 1) - return soft_mask * K + (1 - soft_mask) * img - - -def add_blur(img, sf=4): - wd2 = 4.0 + sf - wd = 2.0 + 0.2 * sf - if random.random() < 0.5: - l1 = wd2 * random.random() - l2 = wd2 * random.random() - k = anisotropic_Gaussian(ksize=2 * random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) - else: - k = fspecial('gaussian', 2 * random.randint(2, 11) + 3, wd * random.random()) - img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') - - return img - - -def add_resize(img, sf=4): - rnum = np.random.rand() - if rnum > 0.8: # up - sf1 = random.uniform(1, 2) - elif rnum < 0.7: # down - sf1 = random.uniform(0.5 / sf, 1) - else: - sf1 = 1.0 - img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - return img - - -# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): -# noise_level = random.randint(noise_level1, noise_level2) -# rnum = np.random.rand() -# if rnum > 0.6: # add color Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) -# elif rnum < 0.4: # add grayscale Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) -# else: # add noise -# L = noise_level2 / 255. -# D = np.diag(np.random.rand(3)) -# U = orth(np.random.rand(3, 3)) -# conv = np.dot(np.dot(np.transpose(U), D), U) -# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) -# img = np.clip(img, 0.0, 1.0) -# return img - -def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - rnum = np.random.rand() - if rnum > 0.6: # add color Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: # add grayscale Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: # add noise - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_speckle_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - img = np.clip(img, 0.0, 1.0) - rnum = random.random() - if rnum > 0.6: - img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: - img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_Poisson_noise(img): - img = np.clip((img * 255.0).round(), 0, 255) / 255. - vals = 10 ** (2 * random.random() + 2.0) # [2, 4] - if random.random() < 0.5: - img = np.random.poisson(img * vals).astype(np.float32) / vals - else: - img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) - img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. - noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray - img += noise_gray[:, :, np.newaxis] - img = np.clip(img, 0.0, 1.0) - return img - - -def add_JPEG_noise(img): - quality_factor = random.randint(30, 95) - img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) - result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) - img = cv2.imdecode(encimg, 1) - img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) - return img - - -def random_crop(lq, hq, sf=4, lq_patchsize=64): - h, w = lq.shape[:2] - rnd_h = random.randint(0, h - lq_patchsize) - rnd_w = random.randint(0, w - lq_patchsize) - lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] - - rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) - hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] - return lq, hq - - -def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - hq = img.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - img = util.imresize_np(img, 1 / 2, True) - img = np.clip(img, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - img = add_blur(img, sf=sf) - - elif i == 1: - img = add_blur(img, sf=sf) - - elif i == 2: - a, b = img.shape[1], img.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') - img = img[0::sf, 0::sf, ...] # nearest downsampling - img = np.clip(img, 0.0, 1.0) - - elif i == 3: - # downsample3 - img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - img = add_JPEG_noise(img) - - elif i == 6: - # add processed camera sensor noise - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf_ori, lq_patchsize) - - return img, hq - - -# todo no isp_model? -def degradation_bsrgan_variant(image, sf=4, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - image = util.uint2single(image) - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = image.shape[:2] - image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = image.shape[:2] - - hq = image.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - image = util.imresize_np(image, 1 / 2, True) - image = np.clip(image, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - image = add_blur(image, sf=sf) - - elif i == 1: - image = add_blur(image, sf=sf) - - elif i == 2: - a, b = image.shape[1], image.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') - image = image[0::sf, 0::sf, ...] # nearest downsampling - image = np.clip(image, 0.0, 1.0) - - elif i == 3: - # downsample3 - image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - image = np.clip(image, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - image = add_Gaussian_noise(image, noise_level1=2, noise_level2=25) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - image = add_JPEG_noise(image) - - # elif i == 6: - # # add processed camera sensor noise - # if random.random() < isp_prob and isp_model is not None: - # with torch.no_grad(): - # img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - image = add_JPEG_noise(image) - image = util.single2uint(image) - example = {"image":image} - return example - - -# TODO incase there is a pickle error one needs to replace a += x with a = a + x in add_speckle_noise etc... -def degradation_bsrgan_plus(img, sf=4, shuffle_prob=0.5, use_sharp=True, lq_patchsize=64, isp_model=None): - """ - This is an extended degradation model by combining - the degradation models of BSRGAN and Real-ESRGAN - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - use_shuffle: the degradation shuffle - use_sharp: sharpening the img - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - if use_sharp: - img = add_sharpening(img) - hq = img.copy() - - if random.random() < shuffle_prob: - shuffle_order = random.sample(range(13), 13) - else: - shuffle_order = list(range(13)) - # local shuffle for noise, JPEG is always the last one - shuffle_order[2:6] = random.sample(shuffle_order[2:6], len(range(2, 6))) - shuffle_order[9:13] = random.sample(shuffle_order[9:13], len(range(9, 13))) - - poisson_prob, speckle_prob, isp_prob = 0.1, 0.1, 0.1 - - for i in shuffle_order: - if i == 0: - img = add_blur(img, sf=sf) - elif i == 1: - img = add_resize(img, sf=sf) - elif i == 2: - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) - elif i == 3: - if random.random() < poisson_prob: - img = add_Poisson_noise(img) - elif i == 4: - if random.random() < speckle_prob: - img = add_speckle_noise(img) - elif i == 5: - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - elif i == 6: - img = add_JPEG_noise(img) - elif i == 7: - img = add_blur(img, sf=sf) - elif i == 8: - img = add_resize(img, sf=sf) - elif i == 9: - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=25) - elif i == 10: - if random.random() < poisson_prob: - img = add_Poisson_noise(img) - elif i == 11: - if random.random() < speckle_prob: - img = add_speckle_noise(img) - elif i == 12: - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - else: - print('check the shuffle!') - - # resize to desired size - img = cv2.resize(img, (int(1 / sf * hq.shape[1]), int(1 / sf * hq.shape[0])), - interpolation=random.choice([1, 2, 3])) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf, lq_patchsize) - - return img, hq - - -if __name__ == '__main__': - print("hey") - img = util.imread_uint('utils/test.png', 3) - print(img) - img = util.uint2single(img) - print(img) - img = img[:448, :448] - h = img.shape[0] // 4 - print("resizing to", h) - sf = 4 - deg_fn = partial(degradation_bsrgan_variant, sf=sf) - for i in range(20): - print(i) - img_lq = deg_fn(img) - print(img_lq) - img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img)["image"] - print(img_lq.shape) - print("bicubic", img_lq_bicubic.shape) - print(img_hq.shape) - lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) - util.imsave(img_concat, str(i) + '.png') - - diff --git a/ldm/modules/image_degradation/bsrgan_light.py b/ldm/modules/image_degradation/bsrgan_light.py deleted file mode 100644 index 9e1f8239..00000000 --- a/ldm/modules/image_degradation/bsrgan_light.py +++ /dev/null @@ -1,650 +0,0 @@ -# -*- coding: utf-8 -*- -import numpy as np -import cv2 -import torch - -from functools import partial -import random -from scipy import ndimage -import scipy -import scipy.stats as ss -from scipy.interpolate import interp2d -from scipy.linalg import orth -import albumentations - -import ldm.modules.image_degradation.utils_image as util - -""" -# -------------------------------------------- -# Super-Resolution -# -------------------------------------------- -# -# Kai Zhang (cskaizhang@gmail.com) -# https://github.com/cszn -# From 2019/03--2021/08 -# -------------------------------------------- -""" - - -def modcrop_np(img, sf): - ''' - Args: - img: numpy image, WxH or WxHxC - sf: scale factor - Return: - cropped image - ''' - w, h = img.shape[:2] - im = np.copy(img) - return im[:w - w % sf, :h - h % sf, ...] - - -""" -# -------------------------------------------- -# anisotropic Gaussian kernels -# -------------------------------------------- -""" - - -def analytic_kernel(k): - """Calculate the X4 kernel from the X2 kernel (for proof see appendix in paper)""" - k_size = k.shape[0] - # Calculate the big kernels size - big_k = np.zeros((3 * k_size - 2, 3 * k_size - 2)) - # Loop over the small kernel to fill the big one - for r in range(k_size): - for c in range(k_size): - big_k[2 * r:2 * r + k_size, 2 * c:2 * c + k_size] += k[r, c] * k - # Crop the edges of the big kernel to ignore very small values and increase run time of SR - crop = k_size // 2 - cropped_big_k = big_k[crop:-crop, crop:-crop] - # Normalize to 1 - return cropped_big_k / cropped_big_k.sum() - - -def anisotropic_Gaussian(ksize=15, theta=np.pi, l1=6, l2=6): - """ generate an anisotropic Gaussian kernel - Args: - ksize : e.g., 15, kernel size - theta : [0, pi], rotation angle range - l1 : [0.1,50], scaling of eigenvalues - l2 : [0.1,l1], scaling of eigenvalues - If l1 = l2, will get an isotropic Gaussian kernel. - Returns: - k : kernel - """ - - v = np.dot(np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]), np.array([1., 0.])) - V = np.array([[v[0], v[1]], [v[1], -v[0]]]) - D = np.array([[l1, 0], [0, l2]]) - Sigma = np.dot(np.dot(V, D), np.linalg.inv(V)) - k = gm_blur_kernel(mean=[0, 0], cov=Sigma, size=ksize) - - return k - - -def gm_blur_kernel(mean, cov, size=15): - center = size / 2.0 + 0.5 - k = np.zeros([size, size]) - for y in range(size): - for x in range(size): - cy = y - center + 1 - cx = x - center + 1 - k[y, x] = ss.multivariate_normal.pdf([cx, cy], mean=mean, cov=cov) - - k = k / np.sum(k) - return k - - -def shift_pixel(x, sf, upper_left=True): - """shift pixel for super-resolution with different scale factors - Args: - x: WxHxC or WxH - sf: scale factor - upper_left: shift direction - """ - h, w = x.shape[:2] - shift = (sf - 1) * 0.5 - xv, yv = np.arange(0, w, 1.0), np.arange(0, h, 1.0) - if upper_left: - x1 = xv + shift - y1 = yv + shift - else: - x1 = xv - shift - y1 = yv - shift - - x1 = np.clip(x1, 0, w - 1) - y1 = np.clip(y1, 0, h - 1) - - if x.ndim == 2: - x = interp2d(xv, yv, x)(x1, y1) - if x.ndim == 3: - for i in range(x.shape[-1]): - x[:, :, i] = interp2d(xv, yv, x[:, :, i])(x1, y1) - - return x - - -def blur(x, k): - ''' - x: image, NxcxHxW - k: kernel, Nx1xhxw - ''' - n, c = x.shape[:2] - p1, p2 = (k.shape[-2] - 1) // 2, (k.shape[-1] - 1) // 2 - x = torch.nn.functional.pad(x, pad=(p1, p2, p1, p2), mode='replicate') - k = k.repeat(1, c, 1, 1) - k = k.view(-1, 1, k.shape[2], k.shape[3]) - x = x.view(1, -1, x.shape[2], x.shape[3]) - x = torch.nn.functional.conv2d(x, k, bias=None, stride=1, padding=0, groups=n * c) - x = x.view(n, c, x.shape[2], x.shape[3]) - - return x - - -def gen_kernel(k_size=np.array([15, 15]), scale_factor=np.array([4, 4]), min_var=0.6, max_var=10., noise_level=0): - """" - # modified version of https://github.com/assafshocher/BlindSR_dataset_generator - # Kai Zhang - # min_var = 0.175 * sf # variance of the gaussian kernel will be sampled between min_var and max_var - # max_var = 2.5 * sf - """ - # Set random eigen-vals (lambdas) and angle (theta) for COV matrix - lambda_1 = min_var + np.random.rand() * (max_var - min_var) - lambda_2 = min_var + np.random.rand() * (max_var - min_var) - theta = np.random.rand() * np.pi # random theta - noise = -noise_level + np.random.rand(*k_size) * noise_level * 2 - - # Set COV matrix using Lambdas and Theta - LAMBDA = np.diag([lambda_1, lambda_2]) - Q = np.array([[np.cos(theta), -np.sin(theta)], - [np.sin(theta), np.cos(theta)]]) - SIGMA = Q @ LAMBDA @ Q.T - INV_SIGMA = np.linalg.inv(SIGMA)[None, None, :, :] - - # Set expectation position (shifting kernel for aligned image) - MU = k_size // 2 - 0.5 * (scale_factor - 1) # - 0.5 * (scale_factor - k_size % 2) - MU = MU[None, None, :, None] - - # Create meshgrid for Gaussian - [X, Y] = np.meshgrid(range(k_size[0]), range(k_size[1])) - Z = np.stack([X, Y], 2)[:, :, :, None] - - # Calcualte Gaussian for every pixel of the kernel - ZZ = Z - MU - ZZ_t = ZZ.transpose(0, 1, 3, 2) - raw_kernel = np.exp(-0.5 * np.squeeze(ZZ_t @ INV_SIGMA @ ZZ)) * (1 + noise) - - # shift the kernel so it will be centered - # raw_kernel_centered = kernel_shift(raw_kernel, scale_factor) - - # Normalize the kernel and return - # kernel = raw_kernel_centered / np.sum(raw_kernel_centered) - kernel = raw_kernel / np.sum(raw_kernel) - return kernel - - -def fspecial_gaussian(hsize, sigma): - hsize = [hsize, hsize] - siz = [(hsize[0] - 1.0) / 2.0, (hsize[1] - 1.0) / 2.0] - std = sigma - [x, y] = np.meshgrid(np.arange(-siz[1], siz[1] + 1), np.arange(-siz[0], siz[0] + 1)) - arg = -(x * x + y * y) / (2 * std * std) - h = np.exp(arg) - h[h < scipy.finfo(float).eps * h.max()] = 0 - sumh = h.sum() - if sumh != 0: - h = h / sumh - return h - - -def fspecial_laplacian(alpha): - alpha = max([0, min([alpha, 1])]) - h1 = alpha / (alpha + 1) - h2 = (1 - alpha) / (alpha + 1) - h = [[h1, h2, h1], [h2, -4 / (alpha + 1), h2], [h1, h2, h1]] - h = np.array(h) - return h - - -def fspecial(filter_type, *args, **kwargs): - ''' - python code from: - https://github.com/ronaldosena/imagens-medicas-2/blob/40171a6c259edec7827a6693a93955de2bd39e76/Aulas/aula_2_-_uniform_filter/matlab_fspecial.py - ''' - if filter_type == 'gaussian': - return fspecial_gaussian(*args, **kwargs) - if filter_type == 'laplacian': - return fspecial_laplacian(*args, **kwargs) - - -""" -# -------------------------------------------- -# degradation models -# -------------------------------------------- -""" - - -def bicubic_degradation(x, sf=3): - ''' - Args: - x: HxWxC image, [0, 1] - sf: down-scale factor - Return: - bicubicly downsampled LR image - ''' - x = util.imresize_np(x, scale=1 / sf) - return x - - -def srmd_degradation(x, k, sf=3): - ''' blur + bicubic downsampling - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2018learning, - title={Learning a single convolutional super-resolution network for multiple degradations}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={3262--3271}, - year={2018} - } - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') # 'nearest' | 'mirror' - x = bicubic_degradation(x, sf=sf) - return x - - -def dpsr_degradation(x, k, sf=3): - ''' bicubic downsampling + blur - Args: - x: HxWxC image, [0, 1] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - Reference: - @inproceedings{zhang2019deep, - title={Deep Plug-and-Play Super-Resolution for Arbitrary Blur Kernels}, - author={Zhang, Kai and Zuo, Wangmeng and Zhang, Lei}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, - pages={1671--1681}, - year={2019} - } - ''' - x = bicubic_degradation(x, sf=sf) - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - return x - - -def classical_degradation(x, k, sf=3): - ''' blur + downsampling - Args: - x: HxWxC image, [0, 1]/[0, 255] - k: hxw, double - sf: down-scale factor - Return: - downsampled LR image - ''' - x = ndimage.filters.convolve(x, np.expand_dims(k, axis=2), mode='wrap') - # x = filters.correlate(x, np.expand_dims(np.flip(k), axis=2)) - st = 0 - return x[st::sf, st::sf, ...] - - -def add_sharpening(img, weight=0.5, radius=50, threshold=10): - """USM sharpening. borrowed from real-ESRGAN - Input image: I; Blurry image: B. - 1. K = I + weight * (I - B) - 2. Mask = 1 if abs(I - B) > threshold, else: 0 - 3. Blur mask: - 4. Out = Mask * K + (1 - Mask) * I - Args: - img (Numpy array): Input image, HWC, BGR; float32, [0, 1]. - weight (float): Sharp weight. Default: 1. - radius (float): Kernel size of Gaussian blur. Default: 50. - threshold (int): - """ - if radius % 2 == 0: - radius += 1 - blur = cv2.GaussianBlur(img, (radius, radius), 0) - residual = img - blur - mask = np.abs(residual) * 255 > threshold - mask = mask.astype('float32') - soft_mask = cv2.GaussianBlur(mask, (radius, radius), 0) - - K = img + weight * residual - K = np.clip(K, 0, 1) - return soft_mask * K + (1 - soft_mask) * img - - -def add_blur(img, sf=4): - wd2 = 4.0 + sf - wd = 2.0 + 0.2 * sf - - wd2 = wd2/4 - wd = wd/4 - - if random.random() < 0.5: - l1 = wd2 * random.random() - l2 = wd2 * random.random() - k = anisotropic_Gaussian(ksize=random.randint(2, 11) + 3, theta=random.random() * np.pi, l1=l1, l2=l2) - else: - k = fspecial('gaussian', random.randint(2, 4) + 3, wd * random.random()) - img = ndimage.filters.convolve(img, np.expand_dims(k, axis=2), mode='mirror') - - return img - - -def add_resize(img, sf=4): - rnum = np.random.rand() - if rnum > 0.8: # up - sf1 = random.uniform(1, 2) - elif rnum < 0.7: # down - sf1 = random.uniform(0.5 / sf, 1) - else: - sf1 = 1.0 - img = cv2.resize(img, (int(sf1 * img.shape[1]), int(sf1 * img.shape[0])), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - return img - - -# def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): -# noise_level = random.randint(noise_level1, noise_level2) -# rnum = np.random.rand() -# if rnum > 0.6: # add color Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) -# elif rnum < 0.4: # add grayscale Gaussian noise -# img += np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) -# else: # add noise -# L = noise_level2 / 255. -# D = np.diag(np.random.rand(3)) -# U = orth(np.random.rand(3, 3)) -# conv = np.dot(np.dot(np.transpose(U), D), U) -# img += np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) -# img = np.clip(img, 0.0, 1.0) -# return img - -def add_Gaussian_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - rnum = np.random.rand() - if rnum > 0.6: # add color Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: # add grayscale Gaussian noise - img = img + np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: # add noise - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img = img + np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_speckle_noise(img, noise_level1=2, noise_level2=25): - noise_level = random.randint(noise_level1, noise_level2) - img = np.clip(img, 0.0, 1.0) - rnum = random.random() - if rnum > 0.6: - img += img * np.random.normal(0, noise_level / 255.0, img.shape).astype(np.float32) - elif rnum < 0.4: - img += img * np.random.normal(0, noise_level / 255.0, (*img.shape[:2], 1)).astype(np.float32) - else: - L = noise_level2 / 255. - D = np.diag(np.random.rand(3)) - U = orth(np.random.rand(3, 3)) - conv = np.dot(np.dot(np.transpose(U), D), U) - img += img * np.random.multivariate_normal([0, 0, 0], np.abs(L ** 2 * conv), img.shape[:2]).astype(np.float32) - img = np.clip(img, 0.0, 1.0) - return img - - -def add_Poisson_noise(img): - img = np.clip((img * 255.0).round(), 0, 255) / 255. - vals = 10 ** (2 * random.random() + 2.0) # [2, 4] - if random.random() < 0.5: - img = np.random.poisson(img * vals).astype(np.float32) / vals - else: - img_gray = np.dot(img[..., :3], [0.299, 0.587, 0.114]) - img_gray = np.clip((img_gray * 255.0).round(), 0, 255) / 255. - noise_gray = np.random.poisson(img_gray * vals).astype(np.float32) / vals - img_gray - img += noise_gray[:, :, np.newaxis] - img = np.clip(img, 0.0, 1.0) - return img - - -def add_JPEG_noise(img): - quality_factor = random.randint(80, 95) - img = cv2.cvtColor(util.single2uint(img), cv2.COLOR_RGB2BGR) - result, encimg = cv2.imencode('.jpg', img, [int(cv2.IMWRITE_JPEG_QUALITY), quality_factor]) - img = cv2.imdecode(encimg, 1) - img = cv2.cvtColor(util.uint2single(img), cv2.COLOR_BGR2RGB) - return img - - -def random_crop(lq, hq, sf=4, lq_patchsize=64): - h, w = lq.shape[:2] - rnd_h = random.randint(0, h - lq_patchsize) - rnd_w = random.randint(0, w - lq_patchsize) - lq = lq[rnd_h:rnd_h + lq_patchsize, rnd_w:rnd_w + lq_patchsize, :] - - rnd_h_H, rnd_w_H = int(rnd_h * sf), int(rnd_w * sf) - hq = hq[rnd_h_H:rnd_h_H + lq_patchsize * sf, rnd_w_H:rnd_w_H + lq_patchsize * sf, :] - return lq, hq - - -def degradation_bsrgan(img, sf=4, lq_patchsize=72, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - img: HXWXC, [0, 1], its size should be large than (lq_patchsizexsf)x(lq_patchsizexsf) - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = img.shape[:2] - img = img.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = img.shape[:2] - - if h < lq_patchsize * sf or w < lq_patchsize * sf: - raise ValueError(f'img size ({h1}X{w1}) is too small!') - - hq = img.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - img = cv2.resize(img, (int(1 / 2 * img.shape[1]), int(1 / 2 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - img = util.imresize_np(img, 1 / 2, True) - img = np.clip(img, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - img = add_blur(img, sf=sf) - - elif i == 1: - img = add_blur(img, sf=sf) - - elif i == 2: - a, b = img.shape[1], img.shape[0] - # downsample2 - if random.random() < 0.75: - sf1 = random.uniform(1, 2 * sf) - img = cv2.resize(img, (int(1 / sf1 * img.shape[1]), int(1 / sf1 * img.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - img = ndimage.filters.convolve(img, np.expand_dims(k_shifted, axis=2), mode='mirror') - img = img[0::sf, 0::sf, ...] # nearest downsampling - img = np.clip(img, 0.0, 1.0) - - elif i == 3: - # downsample3 - img = cv2.resize(img, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - img = np.clip(img, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - img = add_Gaussian_noise(img, noise_level1=2, noise_level2=8) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - img = add_JPEG_noise(img) - - elif i == 6: - # add processed camera sensor noise - if random.random() < isp_prob and isp_model is not None: - with torch.no_grad(): - img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - img = add_JPEG_noise(img) - - # random crop - img, hq = random_crop(img, hq, sf_ori, lq_patchsize) - - return img, hq - - -# todo no isp_model? -def degradation_bsrgan_variant(image, sf=4, isp_model=None): - """ - This is the degradation model of BSRGAN from the paper - "Designing a Practical Degradation Model for Deep Blind Image Super-Resolution" - ---------- - sf: scale factor - isp_model: camera ISP model - Returns - ------- - img: low-quality patch, size: lq_patchsizeXlq_patchsizeXC, range: [0, 1] - hq: corresponding high-quality patch, size: (lq_patchsizexsf)X(lq_patchsizexsf)XC, range: [0, 1] - """ - image = util.uint2single(image) - isp_prob, jpeg_prob, scale2_prob = 0.25, 0.9, 0.25 - sf_ori = sf - - h1, w1 = image.shape[:2] - image = image.copy()[:w1 - w1 % sf, :h1 - h1 % sf, ...] # mod crop - h, w = image.shape[:2] - - hq = image.copy() - - if sf == 4 and random.random() < scale2_prob: # downsample1 - if np.random.rand() < 0.5: - image = cv2.resize(image, (int(1 / 2 * image.shape[1]), int(1 / 2 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - image = util.imresize_np(image, 1 / 2, True) - image = np.clip(image, 0.0, 1.0) - sf = 2 - - shuffle_order = random.sample(range(7), 7) - idx1, idx2 = shuffle_order.index(2), shuffle_order.index(3) - if idx1 > idx2: # keep downsample3 last - shuffle_order[idx1], shuffle_order[idx2] = shuffle_order[idx2], shuffle_order[idx1] - - for i in shuffle_order: - - if i == 0: - image = add_blur(image, sf=sf) - - # elif i == 1: - # image = add_blur(image, sf=sf) - - if i == 0: - pass - - elif i == 2: - a, b = image.shape[1], image.shape[0] - # downsample2 - if random.random() < 0.8: - sf1 = random.uniform(1, 2 * sf) - image = cv2.resize(image, (int(1 / sf1 * image.shape[1]), int(1 / sf1 * image.shape[0])), - interpolation=random.choice([1, 2, 3])) - else: - k = fspecial('gaussian', 25, random.uniform(0.1, 0.6 * sf)) - k_shifted = shift_pixel(k, sf) - k_shifted = k_shifted / k_shifted.sum() # blur with shifted kernel - image = ndimage.filters.convolve(image, np.expand_dims(k_shifted, axis=2), mode='mirror') - image = image[0::sf, 0::sf, ...] # nearest downsampling - - image = np.clip(image, 0.0, 1.0) - - elif i == 3: - # downsample3 - image = cv2.resize(image, (int(1 / sf * a), int(1 / sf * b)), interpolation=random.choice([1, 2, 3])) - image = np.clip(image, 0.0, 1.0) - - elif i == 4: - # add Gaussian noise - image = add_Gaussian_noise(image, noise_level1=1, noise_level2=2) - - elif i == 5: - # add JPEG noise - if random.random() < jpeg_prob: - image = add_JPEG_noise(image) - # - # elif i == 6: - # # add processed camera sensor noise - # if random.random() < isp_prob and isp_model is not None: - # with torch.no_grad(): - # img, hq = isp_model.forward(img.copy(), hq) - - # add final JPEG compression noise - image = add_JPEG_noise(image) - image = util.single2uint(image) - example = {"image": image} - return example - - - - -if __name__ == '__main__': - print("hey") - img = util.imread_uint('utils/test.png', 3) - img = img[:448, :448] - h = img.shape[0] // 4 - print("resizing to", h) - sf = 4 - deg_fn = partial(degradation_bsrgan_variant, sf=sf) - for i in range(20): - print(i) - img_hq = img - img_lq = deg_fn(img)["image"] - img_hq, img_lq = util.uint2single(img_hq), util.uint2single(img_lq) - print(img_lq) - img_lq_bicubic = albumentations.SmallestMaxSize(max_size=h, interpolation=cv2.INTER_CUBIC)(image=img_hq)["image"] - print(img_lq.shape) - print("bicubic", img_lq_bicubic.shape) - print(img_hq.shape) - lq_nearest = cv2.resize(util.single2uint(img_lq), (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - lq_bicubic_nearest = cv2.resize(util.single2uint(img_lq_bicubic), - (int(sf * img_lq.shape[1]), int(sf * img_lq.shape[0])), - interpolation=0) - img_concat = np.concatenate([lq_bicubic_nearest, lq_nearest, util.single2uint(img_hq)], axis=1) - util.imsave(img_concat, str(i) + '.png') diff --git a/ldm/modules/image_degradation/utils/test.png b/ldm/modules/image_degradation/utils/test.png deleted file mode 100644 index 4249b43de0f22707758d13c240268a401642f6e6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 441072 zcmWh!c|6nqAO8$7B{n3LV`kK(93v(n=FF9&gWOr7x#ec=DLIy6$XOP(=y2x<5$5{3 zs+mc-V`-Qp{Pz3DAA5K__ISMae!rgQE7jW4_~_x2hXDXMYHEV90RS#N006atxj3JE zF4jW;AOJAMT(%1vnml1{bTxP?g+DiynQo9o!I6N_%E*vbgZuO|L|mjk7P zI+d=K`&W>AKZIh#!o$NOBX`NMJA*)>jW^|y3Q#;Aq4n&kr^~q#OBBtfvCT(8H#W{9o?KF0OXT!$_mv{Kc%5DquBFg3b@sO7_q?^dupWPXl z54e1i%uFqg$z=NZ`PI>IX={rkWUC^bXM^*czmHU$U0g`pQ7yUKjc+^zLamVJ`t&iC zhXDc@z;14{=4mUN9YVU<+VqJhq?`3MyZ|P+*|}Zzzq~wlF8)L?v){TxVRY055O3&vbrg{ zA{o<(b&h;RX>9lo!|;7Uqfqe5%F4|tQh4Ef-*!PDFMfB=nY|a|vb(S<<#G>;$qqX2 zIe;GfzRJ$OsO?f{*~dj#N(O_&niw&AvlF|Go5O4z(*ri6szhcjMxh^?P*8(MDie??6!N&){dv4x%IdQ+0(SPrz81#ezRI<%+xlBmx>e#T6 zUq7hrDyIByUXJI@r^JW(+`^n|0)2ph+o1p$0O!!J-dAZDp@>Hi=#!fPK;CSaCn+CZSTJ0g!<}JmE`;e5Cp(i=ACVn zB_^PtC~nSu#5ZmKw0!9DQ-eUj&+$%Uey#fQ60p2dp@#vyGPgUkqaQj<4;mnkq!R4< z>0nSsT}EGEo)t@b(3Uh8K9?OV;3idhuuhvts2cgzpt(RGK#DQZZ((n1ihdE6u>jy# zeGPt!1cma2s@ogNa|Qa_;wYcVy~Rb&)3N_T$+2w4TKG<0y~D(KvR1Cp1}_5BlREYl z?>K>@efNTET9Ev0!oIJP54PB})&n6njk2EAfA?iq^ozsjoRPZ$-Fuq%Az8T?dr&4J zSr9Ab0gvr8|hg#PRPNJDi*8$MoBXp|R<~5E&U6`0(0U>wh5lkAQ$IP>&=ijvyI# zQ)1@f@Xt9OJwA9KpS-+0CNMPdr&O>%+(=Ikh6VmLF$Zb2b=Ud@+PW8ZYagl1g}ck3 z_yG9_Kl_|+B1~=6)ls2bXKXK5JNPjBjjA}0S7O*=Ogq(lq#!VmHANHemFTXi_};?Q z;)N4_)pH^5h{?F~`FDrw$jAVPPa|wrY|I)M%-t6D)WJGgm+o7qdAQr_Dz6!G&DYip zJMQo>XoUW=gyV*V{1)TMb6I7)Zh1;=)M}Eu`w|bjoKo;jTG9o9ME-o(6?T!?o<;L0zbKwDO9L*ayGU~X@-c8024k|S-(`b>%6F?fQo489W-9&-+-!H-tS@S~D7)(emDeqNfUd4%5MoCwY7A%P;gVN*-QiV5V%)Acg zGI4HRwacrSgw3LE7!`Sbc)ETAXia=^S2;v z{nYX35JwABdK)s8$}%?*Oa`YWrS2|dv>O5G(-`p$Kmw3?@o$B)G2CDeHHE{!(L)3< z!FTv<4G0e1-Q2&gLa1*hmSg{A9K2=kPsHv`nD#oeX&VnP#IM2iyL~A_jM#%q@TpR( z@YXlW&j`6;jM_Js*SG5%ub)x~6RcY|qwS>tCRBTS-6V#d-F z8*KTw19N4|js9uRam^hLS9k#{{q~(ATa6%<-z~fYysr7aHhES>Ru#T5G}TxQ0H}F{ zE%JaFyOok{n20yL428BqGjsc2*I5EYk<-GLdHh{@M%@gaK)`LI{Q}Pl#M_`>K0yI0 ziI58Vc&&;)^(KTtCO5zYIxqh&cM2;O;=8ZxpLRBJl*(MC7uY{~ciQM&tzur#6{6(x zqkwYA^$@p0G7+&+VlKclXQ|lUGnxev}0M9+aM5dipA{kGc>L?eyROxZFEvh0F4Bx-;UoyoB+(Z!(VuCERE9huC#1EW%2;_IfrHa}9 z1+K*l5KIbIz(iESDV3(UZ?L&+#A>*|baTEpQ=Pvl|It*pvc0WjWu*baf^+*HU;J?O zCm~YwBwwgJk33349ple^+a0Q5%gRQfM4+(QTZFJ+;?(yR3OF5L({PLn7_(G+^%sdI z$QLR`19I~pnUNIrIm*jFc;zmjGrTZW?zqy(2PSPVhUO#p+`$Jq8`ywxnRFH#^l>siWIkV0qf@ zJ_<8ghg;wO_fLE9N{!Y%^AS5U5MF%Lh)Hv1OifXLN9nknw}Qjr9%&Atp}FOp7b{dp zqime?Y-PV??rJL`<=}QW>^E}^#wIX@&1N^(dO8D>w;WG(nt*AzQ_+67pt=lcT`DWv zhU-T(Z9IfROE+0l)cook%7bXT-p<-C2pS*uIknvQv_iSG0?s8v;*Lkn1bm}|Tm=sO zDG)(5?21P_V@++!-RC@<94QobG=s1eb)GV&!YeX+tGuGq*p3~Y_ExcPHc+cb>4iD? zWjQuI5%VRjIrM;Qw-&_3Wnwm>mip(a+hm;b?62wF+Kh5Iyq$U*Tj-YNE7;BzKQx?@ z=gl+-`!G%f!}Ig=RAji~E`Mm$dtPqR+3q`MnV6o)84b*XpA2$A?7tt~Ax=IN17$DWwjh?vbm`D5{&R02=->sPXIk0W^ziEd?F0>N?xkfJvJ ztEtSKI}tIP(eF!mfF&bfo;)8;GOZ5viC(`j^Imm@d#wL5v_JReF+dzY16IWVu43E| zD<96yrDOHpVAZJ5+`EN=K0`*=N4l?CrDY->4W}wU#OR(V^H+lp7Yo_f#R0~;eA8H} zJ~dHuRAT6A_>F7+L8$8!&2^n>=WKgTYfk7D&f8((0q@=Q2 z|BMdL^9|3-q5ea|nL}gHfI@lbWjIE>qr2L}^|}wGyZe}iK=CVYzZ&)hqtgh4Dl3`+ zg3ZIJ-y@{U*g8htVJ4GQML89g3a_Rn4^RB+RD|qI_5+iXmCEKe4}S0fzjih&n{x_4 zFaVx)oBNYnlV3<0=i;J*n3s~@mnGfi#kcl7U3D$bfZ4BRnTcVpAeb=8L@ zafoGeiv=r6t0>Hs(nLx%8R&WKN4un~g8880JHd{oK}u?_vG;bRV>FANDiyV=+8{lh zCWdz-n#OT^e|{uD4!s%KjOaMa{h*r6q1AqM`IW1?EfgPV?^X02tS}S~HLVQRdS*#R zaoF=6`*SbMgDi>mI9laN0$4?{@3${yr81iFO6#?w=Um@xRCt6L(sccZmM?8*yKjCY z2DfWwzPd?gGny*%RwJWhTbUtzdSh{5YT7j6CEF3VTZ==cR*rusg)4ju&gJ4#J_66J zgurZYC&iWE5S3EdcD32@2Nhaht;b3zY-=p~nr^`&~KOwC)?=({PcHe+msfS)ZUv%!1m8g0a64$exY8oud6U=|uFbO}S~V zq#gn_ys@$};Sw7i9XVFwz2t2w3{RVKctz0wG=livL*ECA$_HxjVR(UHlm@pyHy@yW zX+W2U2SZ4K+{^tQ=aex8YBTQ_17^>a&2l6&Zr7ky{r+HNNLeWbBJf?L11ZHK1-+6khzS}Vq-VcLd$q~>8ryhb&aKGV27$KBl z?O{i{{~fY4Pt3OIMWgZQtKVy`8^Yii|4@5rFi};eqDioZFVW*d8x%O0I9NH@h~1Ii zkHo6lhT7Wm5NKBY-Qpf+pl~=!5|4(#1;w!jxt{`nX+8U8t;uF~7j-a)9DXy`Yhi&> z@knoyA1xOJ6L}B=YlBx%MZh1%Nj5|QJuEO?*=vqjm=k_{&5R%FLkSS&4YtI*_%;31 zF2so)UKlvg%r35oU{cieMcpLJ@>h0slJg#A|LW-DTZwkmK;_SGFLb0jFj}LwZG854 zpJ1GVk3&=c>s4HC+~1`6O&eicT4N+VqPDgIoacg8nlp-ra?#2=I9iwZZcEYN{K%qq zS6HiaQDGtQV`T-$VB-zQcNIjmVDK)$bFT6M0iDCa$x#Qxtw6NyrJ_2VK_};*YKtt% zIT=c<)W_BaHzyi_3ryyn#jQ@Zq z%tvh zsfK;^UoMNJ9L8YYdjx(i(bQVwv_+7{K|`P zp5Eg_GaTAwCQ6P^klUIu!ra{P zl_%p$&zd4nwVwwBDAsH!X&@!!H>F?B&deQphClOFrQP^a^erz~DWDKhWl&Q?zX#zf zyA#JJa=C5t)6K0Nj#$3Jl5ZatYOkiRo#0 z`ujDD3`aR|gyqw_?qaAhdS(JmUS5z8kTz^|3YVsmD<^M=P*c|z#|R<0T)V#^I2tIBy-*WzAAkOo=WMdgdZIt<^sH`jsNmWi(ecDV_J zCNct!)RMJVOzIknX4K-!G;2WA-!U$ni4)l56v-sqGE-rlc@#-!J6QG20ChBrZt-aR z?$E;R6E)nQ7PtYjw%g?%;iDpf>kqxWqrK>kRsEwkxo-1ibaSwZs$I;PY;gUP7vgL0 z+aF>!LuFJNE~;2oL>+XHGm3Pc*i1Py_SaqZUq?UBHVQ@Ao@$@$-WuT?VovKnuIac} z$}BIO)5N#}o;yB4Rv$OE9(J;9LQo+qHS_DIF}0;3jq?6}$@KO)-c_toCm@*aTB#DI z5>#!A$wqvR(@$&{ekUSkgy8?WGK6l?`(BKXE@;p=82Zm6G{k2pK4Hu|CLK4|?@XL{N~S{r^rQMsSkIsBja9B zdYzg4^%WO&oeEnP_3U%sKgA!6zsLyIBt7N^q45dAS+aR&Ww>5i=LK>7@qNR0B$@D1 z1)JY^c~r-E;)i|Y@=*x_1TQteud)mifp6$Ysn+ExJWIIG4g8sMWU8OkP^;n221am>)XP->-Ky6SCag zNXjk12eL9jnMod#SK8qS5~)YhkO<*;gj9F^2QK}=PRy0)YLjdT{3K@th)YRR zKg<{8%!v}n+|LkjIRZZ7~uC6X$ z;nw=Posa$4@d~o(-ZzgtI57-Ak zqz~3~qj%QVLR)uFK-tawD1da+&!WFJx{1CzqIOAFmm7w92rk{6O3-R%Fnm_Z8*z>} z9HVY|V?6Tsk8ELBBdukHLjZ6%Ay8puc|k_dNq%TQVBT*>H?PTV|95W{-;#lS1HK$n zg2rt8=av`+Ip(XQwtp6YxqaC5PF_e>S%ttM@8g74zFyWN;B9(?^5%Yfu~()X4TBM- zo$+5CHEN3Uy(zTXjA0wgcH#ARq)}ApvPwL51b$4>cZX zI9i!4qP%E-C6q5OBy(Pr?66GNF17^s@Yl=Q_-|ltUzmaEAi@A_`Td23(Ttc$b5IsO zf;lJbQA&zCtND0IXPn|;D-6e&5!K(HdhC8`H66FE^7`7nNH?*^pPvl(>Rq!|=bA6L zo%i4FSj5O(1p)>Wg#2Ekaa>G;?*~&inynGbs)}K=n1KU8ZzrWj$HC0dhKtAlx;md4 zyO|@0R+k&cPHI&}H!~(2nH_WtkKt(cED(JYpPJnn1q76chQ53L3u|)5++>t)ed&8= z*cmRHD@d6VNZiFEj`$Qf`bGBb+*jK}Dn^W2I>%I5K#ZoRBUV4?c{x(zgr(b|ZP{VH zvm9Tgz_NLR@<=N<4LT?&E4i*vPcqPuv`h@>z;i#$J*A03g~EPfuu^ys8d}1Q#(yW| z2#fJZYk`q!PZPn4oxz#1<=#ewms{i=HlbKaYP2VgWPT1O5zK$i8r;@V%1UvtZcs3uNSMKL;CSd;p zeAsGaH1dE|bRdye(7fvLwU*Lc*EhQzrIUYmLD{cvd490F%+rTK{SF2MugTX_@xQtSwR~v~ust7Tm75Z1Rq^ zYeor$Gf+;_O>eo_9_mC8ukeEc)~$D2j!J@uB8Boavbj|rCYE0q&``f(T3)d}T-VtB zV|iMCVUAL>(o&-Xhyxavw&I7ZRBS}~F}Jyb7A{O`zd*d8vJ%ZH>X<<}Q!~>ugWFLz zGyiO?Ebr24R@Jj0woFL@!E%|eQaoZjq8g#&7t*pUS>bu7;Y(#z>>A%DH`u{_@VWFK z9U=9LU@w{VB1kbOM~h!L3C4wbVrYlKT0Kiz9qCT%q0o^SKh#f zU$`$_gwoT-+uK{H17|RK<%`Vyd0j5o>}&r1dI+H?RXP4Q`z{LdiTiQ@T=_Wvprmw2Z45H6&4q24rIUt8RRa;Io;Cm=|e^f~8Lk?hc2D^Gv;D<^)IosB< zEQ9Z_SZ;qnnd{K=j-NvuJX^V(+_n+4xESBIyfY0ipn42gPIlYWxmKyXtcV***E58Hq%{_<*Ce_{!ZG z^~;pZyUDD{5CpDrsOVr$-`zrEAE3AyH7vx4zV5h8ImeRdAK=8Evw`6ejj%tBzOg$a zMGihWWY%mTClo!!btqYEXRG=(j?%p#X0NPS*f$b{Od>hFsuk2hiO z9v$Y0O%CwWtjK0 zHVAfx!4bkmIx!BGEb(KRnLH=_Ch|!o5U$VFU=u-zuCg#M4Uzh(xkmoQFQV1_0CoYzVSvNA75yQn@oA8SD__2 zLt1C^O&u*H4QhC1Ui8qtG^jxaA)DAeR9D9#_veXS;wo=R7aN*7w8;l^u{#D#NvNP~ z!DYLvAN+!T#M+Cs_Pc}e#c$>S@#tfcxQj9((%fQ~zs&Z><&sW7fleyua>|!8Je@JU zXF6(C%%2#I#8HmYPhIeY0a=LZR})=0$2^zYy0fYzp#-x6i2(ZI%JN3v{IQZ-1LSbx zi1yp(Dz4{kO|R7@>*b6Pla_1q8cC{LDTM;oH3{*D@+|~h!C%B1&CK=u2<6V> zF2?tg!XG4YNa$1NCt=k4%AlFqkDU_VLLe}N4434Eh-D8AYxp1<`f#=Xvd4^)J}X?O z$SR~NvZ?L@_$uApSo`7Hs#Ku_5R5qu|5kVIfg=Yf8rOBY!~>{@K5{|MYrLsx-0f&^ zXYcOpbGX^{F(GN4OOrWTU9k27+tCYQ0%yo0NdJcMp4H8rot@3i@yLVq#gP;tX)~mi zl@(C^h8;Fwp^gbyjnR5G!*X~!qIQl@6}!(Wirw3o7WCZ=&z|_W!baSTJd;|f1 zk^QoBO{-?y^JaOt+Z-pzq{KD!v$T!w%oPN^yzujk_A|?QR?n@2zw^3xh#b48>-fFp z&CN}*2N?xHZAaXQO$;V56d4;EYt>Nv7@U7|z|h{9Iq}Nb&((KfDB@Ik5E6OXUFU_i zT^;V3f9*Z&1D*zxfr>h*>3l&7Wwkk}T<^xH9o`V};+DLzR#boDFR2Lh&i!ghk>vl+ zA_<*N)hD^+1f^6#7(&B9ombQT(a#tcCXraNsUj*0`VdFHu21Ne^f&`ceyNyDEF++!@}JHKEkK%*<+f>{lOqyn zJc*p`e*XW*zZkspch+a9>*~OKxTz`ND&RDs?jHg#lvjzYtl5~NKZ1}sy^a%;lK)%| ztYUHZO;UbbC28NQndbG+<>FsE)3YWi<0==jYvjadH~mBH@N2bwRbHOO>2$$LSv4g= zJkJ+_u1@sZCYE@#<6dp66VuO8(jutNoS&6QjcRhJdi?FgivHg;=iqz1w;!}cwNm`5 z?3$ZY zF}e?pNej{G*BdgXEvK6Z^15yn{{gkNExIgd1^c^YLBz%#B9~1*Qv1{_cBQ!3*+E8~ z1w>NUND^VU#n`+{99MWJlvewQ;NVjk(R>Yym@8nl-~ekg_qmgq0H9zhO=@_A9h|4unbOF}n5RW(?k1s6#P$&)A9&}ft?Z~8bvFz_@wR0>r5fSBb#k*n<2?~=Y2vE6z33do$N!y~btY!|Vd>V9F-z@-z z@oKKnw?v$6Wlxm?vyorELe!=ws@t9kR= zyUf;5_7EE`6}sqhART+y=LUGN#jWUSFt?@}YvF-ZEntgMKdL1NQT%H-nfi4ULZ9qO zzmaUM8a@Xfxd{6~Dx^U!Id>*+YQ`HRJOG@IO|Hc;lWds4OX(Y2 zu)MtVG`;EKB@Z5@-&DmCQNk`)I^iS+k^V*ibk*Y1v)qixstqkISR)KPS1?JLSOua5 zf+nV9OF;w)>y(OFgF6wffIBE!%Q=094}hClEl8qsJtH%_g+X(|LsK(xD8GZ zOpMl}sGGux71`NAFE{#mg}EBg0q#xK6b12*F+)ZLX;pqz zKwGDq&!e=W>>xTjy2?Z}V&{x7^2Pl8eD*?Ai@9wgujH*O1yIl;_{zE@rG^vVFFffI zUwbW&%<1za<>*8(B_#&u$$`j?3(&h_-Qp4c`VARE;jIEb!_QaPYckEbJkm|(vE7EL1mpFU(()@41 zMWq_W<(6{<=!q=4Opg8+BpLA=#c3+~weIhP=RE`u zdKQ)=XA$k-eG6Ly%teq%Nf0q} zY2gCqzs10a2rZ>~Qj*Wbze<>|=8>m%os)=e8hoc*kv`Wk*HQAwaD@gv8=<1-&Tk-At7 zxzv7AFv|Iyx8uSD=-+*gVmNOb64!R{P86>YR6tb98O951r~l5Bl@3{cxv-ijDsvoSP%T)a z{Infv<@O)F@n%Ya%zKt+jN3K;6@Q*P_#~n0nIuip4{Q6=&!Zw42Y+*D%RV6xp8BdP z;LnGG)`P9ZzfmzU;ikwsElw-MnbGpJfM|_u7?b+i*z_G#2p( zzktob@edHGGG%AqiM#3JQX{YgM3nP>8rBtXxt z?@*nqieEyp+Pnb>e8iN^?#5Ny{o_SVF!mTIwEd zVNG%<%O;m|ad{juP6c^3a!965e_vEn zbCVs6jiRCL%47pLR-JA#IYjx{%)}52L}gptcqGhN;odbn$KqLe|_5Y)~JmT z3Z?c!ul69z9lN};nob@u9P6&`n~f*1mlX<*s?RH$js{oJMn+!z`bcLQbaV2!`g9#4 z!fgQgY>+&%%?ba9BDt#-PrLV`AVI7ZoOdPIGxW&dBPC=u<1aD8QTZ~r^~7lUpD_lwElgI3#V7i^hoR5u6SPRfiLqH zehPbPug-hO*6L>9dGC&;`{5Bg`zg$Fxl`hh+tf}-y|2^qf_F!wMkru>%C{day=HDM zWs1%4V1r!+V(%L_)!ihWm`*Inb|Vd);<=vpNjTjki!l;>Qj z!YTfj6tDd}HH_J68;9wA5fA%!s}l4BJb{w(Z4Rhs*qObmd&@Y z|Cy!6YTYh6pp7d$hDtT6Y7}$N@w|5fWCKGbB%&k=ee~deG(QSJ`m=IBQMGxGU;6K| zgk*o)((WXy#4fJN&v5TfB7JgetE0Hw$_)P*x8PGl!cj7}t6% zh$9MCI$Fv&UiDA8|LJfzN-0@RShj0MgV9JZvc=!zCe% z#0a~=6&lPvg*D{hwjSku+wTI7iVK39j()vn$*GBz-wj0h`_xpVd)^EjVAE=RclI}4 zop`ylcb_(~yZAR)>)eQ%$otdWDdTw{F+JG%7rzQ-%z$a}J@Lhz>V!lIO-=V>+{L!6 zlIfBFy{}7+b@z2#_Wx+a{@d?naz;q<#~51eR!G`Z#L=^+q`8s6{dGF|?oG&Dh1p;S zPFbGe?6TbQ`PRnla!%buonn;Ev!t6LxoD{#y-R9=~+SA3Qc{QQa*G-77iYYU^X+}T!-GA`%ItURE`+*4{T-PPqimDr45Cnr)|iO!aNaiB#`lQp z>T{aU)5Hl2S_?08U-Bd?>nvBEtsUwC##!KIFVHQ!Gte^( zK|aWl_TH8KHep~SeL}#SSE~FT4E*aF1!P6EB_<&gfSu%2SMlEeBATmwdbZzD8>r9K zc3k5NZcv(Aofyuo&QlPy(dSyMPqd&A>jop7i|O@Wwcd^|M_ z(165SSlgm_^du{v>z!$z&V~73=Wd(ICkWWem^Kisdn-2fTAcfh)3yXn2ztDNx4|ZE zQ)fo(=DrPQ;YkPy?_Z|B5XW7=F4eMYSIz=l;KvXy_eA5%Jv|^W(o~Q-)KBt6KYJRU zM{ZDLsVXHF1l=q*EiY*DW}Jl1s?OfZMbGjOpnA^BIu=1l&kwb@5KiWUyX15psGq3R zstpOk+i(gbR#wM}or)NVHPuy1s@v-0?8#<61L4;K0Z-NX)%we7?zg%)R(bbQi7d52 zPJXdsLXDprNF32_ZEa;wR4FMb4Js)CQt&N3njNPUwz9D?X4ju>yT3Xj)VYrAv6~y` z@LM$5=I`z`!x$L@ z7`t~R5v`nJ{Zz+PJ#!c8cqpvl)|}^k-C!tRcCUF_v;d&=BD)|fj5fXzQ&ofhI9uSd z^uFx=D?PFM{|%3>C_7;-0qbT{cXc0{bxp-DPb5pNVYkH(D`hw;3E|bYp*!5c$~@m% z&Dj1O<}+L<1wG0U<)RR~(KJ^u8nIEX!z=ti^>4?bBC$TvJxR7uZw1dtg}~%`woO_# zQ?~YlwUUe$Bbt+i|D)Ppy0jmV@%BHD=Tq#H5%4WKBWrw_zAFlPUXB#YX#p|i?l{Lu< zA#!*MYR+c!_uq1))NtDr+8~KUfBC~HzUy<#N*rX2Xwr9IS^P%rRrwO+`5@ zMN*a|*WzuSh?JIZN#WW1Kcs ztD|6(JM&30<=dL=sc4jWhRTlkYcm5VSeU?L^&0y$aDP9gNNI3zd9T)&z3cGllY|V{ zuRjZiP8cE{e#!o;t(4Qp8X2)gzQ{Hgjk)4xiGj`OM6|ZJWGxC5j)=ZKrjlbLv2ed> zipj1J#qI6wHP?vAyN5EPO$JUwF}I(pq~%(YZDan}cYlLoP3K(O|NKyRq$|{tNFv`o z95YKReOzJAuoGUjOmtH`GEgz@VD_La$oVNpkuqBk_BnjDs>*L-*%22~SWcdwZ{68* zc{X_3U#MZag*l?Ox6f|nWRVqYvutPQLg=tLgTa_QXCF`aC-~-o)fMFD$X6Ca4JjE zWzVUKtD0SeHfM@4iy| zaZ}SkVNdCUPTZI#-p=h4$JK{O|Bf9^*%;92TkQ zmH8U1)hpczHoA%)B0=M*7EeBbQ^nc$Ff7Ub z=_k|~0fhNo+QcBo)LY(Yxh}T-N_YPUbAN@gx0Vrm<0;zA$2_jYDs?R48BrXj! zmB|MI8?Tp?TqYfXYmyo-UX;%?oC_CR^Jj9ao_VEg^`gLv+&5Ceev4B!n*ZfF*O9eJ z$%y>7>g8d;#s6!S=XSC274B)~c{q|BZrNE)Uvg#&KDAB9>7_(>s9U3SYgOxiLKSW= zVc-R4u(#U%4u37M8BijRcsfo@u&X#*P~{#smJ>)JLvZuVV%WCJy(@tSVn_U{9w0@~8blJ*eIC6}lPb9h-4y?Zr_@wrlZBKx zWajF%oZ0N4ikg_cotS24dUG}>&Xk{SWZNk753>HP{p`-Hd!B7WoN`pWBvUG?sy#L_ zF%jZqAYh6SykXW*#SWp7k>u=N?cuCMpK{Hvg)-TCNo2aAO<)4<;Y$XFP`T63eFT6u zrC_iQj?Csd2k2XB&~2~MOSR`PLd%61GX+nDj5ocGK2@AaQsvT-pBWSp%Oq%8aLNXz zV>9y^(Q>=a#u#xDw`Pey5&Qy2srvt!=U)sGb_-_IQZ{zhc5^s^=*Wm_^3-O?E8I(q zAWK`LndTKwl1|i4J^i{~ky&_z4)pO7%m{?!m=g|>Om2zyw+)tc;N!yo^0^iMC}&um zhC8&iKlNFyJou|@ka;%a+t?$5^jmqNu<+lv-5{GnP0Pz|#MABy=7*d!$C6|0nV@o@`HxGH<6{~nk- z-$`N|K6t>ZGb$Ue`@_|C`FYIw2nC1wcc6OJncAuSzsnnqtGw$?oZtF->~3A`Mhc_< zN>;E04o}5om8St>_B~lA=EKdtxz}Xz$L3~d zwe_Tdl23HyUC>jV^_PQ`7&|DPxiLh6w#TKc1E~bj(G+R)Exl=H;nS)9YH68$)^D5c zw^wUPJQsCGv|?V8YNx(vsn);$t_LK1S#Mu6QN1E!TT(#y0$hB2d?qJQz8!(|l=}L} z9t*elqWPN7GuXsS2JrwN{F>-yH20H=tXe~yI^a3yA+ETp1RzV z=H=c0I;qFW!ak+a^sf!ag)u!0=T`Mch@2Asq4(lOhAVt_cKfHDWwh5Td%Dd`P7aI3 z+73i31-Y3eetQOS^Or>ma(r{X|Q>1-(Y;1iOMsEtoNGB#obi`aRQbvybt}{)vrPE)vV)Hm zKe+-Dz;kYj$sv#)xAM#Hra|q#?e1QLRX8wldF31fK!s|~(#B=kgIbs=gGe#I{}<3H zE5J1$&N637X4-S(=o>?3Nc5oX-I|q&<^LjsQm#4nJZ`G=E)gv!V8Lg{xDp+N`J3&RmR8vzD;@<( z$1VAxA!#K-^LUe9^y~U8GaZXTs_;djNIz&J^yzuAfIolsGgKm$>vp5p?>BKeuK5)$ z95EUbfo=D@D~q*E98r6inKxA%LaQ4#`U0PsX>3A(5^=bi3+g{_JUit7dVu@5rQDOw zhE;a8jF!H1S(Ch;yTf@75y~cO7h%D$V1_zWG7QHTS7Hb$>&*fTtxpt-1$btgG02n=evMl6&G(Q2ZiT z4fIfPTb6yH@i*kPQT4AM4&46LVnKYoX`&0o7j-6iuz??jMGF&Tul5N*x|GX)x1GFv z!x=iXqkO4Y+bqoup)B{6C-s@I9@pUX)KWbqdYThDA8>Y$H>>uyQbuMKQ~JjVU=T?k zS2}E!7=OM}N2Kv+(w|HL`-@LUID1B%r1i_4&~?Or5yp5O-sI>)(cDyzs$*OPbpBaA zu9Pn`fn{!@ZYp!)z4`#~x8tsubSb($K!eBsoQ#XHaNgWqQ&kz_i3Mx>Q^OTL$3VvN zCMnx9`G3X=2z2C3HAE;M`OVLv8A zL25qjnM*Qr3vK`Em7HjawM5F@xA&wvN2Oged)PTonQ~}-e6Mb0Glpq;TY;QC;7ipc z^(?$S-`+p=sr-K&opn@`|NF*AH*A0i(j$j}G>j5qgtU~TG)gx}hs5X*$$@~*Y&z8P}}^mBM(6!^$FMq-Ti^YIk9?i+vD)I zrB|05(mG^NHw>=E=MO>z4aF&4hf1o>e2NZqvFo;9`&0V{>Tp46C7e)e42f@0aFSX< zDRsIU)J7YWsz(Yb{LNbul|lhAp>DvB`r!Tj@-WLXR4bi}3y)a$0Vwbo&{J0~<+$7c znYQ1LiOWbYJZUU=_AJL+8&Ft*Us8+=8aSlQ26e5S`$&IC&uPd3T*C_sHDk0-7J~q} zDYs1TYoojMzj$@HmcBDOMOe!|ce`lQuWbkR1j`Bi#Z-u@9LGZ8EkRWwYyOD9&``Lg zVCdVN!ue7q4Ook&ClmywIW_PSWEU1{;t(n(7={;LE&;FD)j|4CDXvQfzH3dZkI3H1 zL}meo?mK^suXmLzRqsfTfp13*+DK@aYs{VDl=u~+>eeg0MijNOc6wzbyXj9v|EHvz zyCce{_qXqJFs3G)J7OP8QQrF>vM0;7?hXNiE%Aiq*WNJ)E9>|B4zWuA%%ZXflCyVT zne-pjViA{z_`m})PR@w}bhhwI%vmIL21y*IY6ZeV&nQ9KQPue9HRt&KGeZIv}6$$&)}4FW#S&GISW+ z=a-~Fzk!BGGA%99h9hueR6yPdR|&m8eRO?JJX{%>%yjT@gk&>mS#cDN!_&@%Pw{UM zWpGG~<6GynVY%Wy1(MBI~2g*9N zve2uDAX9hM%BfQxEZ`@rt10X07K9?fQk6d()fE_!;>L4DN<(!Oe}znF)+Mc(Ssvpf zvYDWwGao?DIG#i&=Wc=p1?A(n*{S2`B<0C5C+gjhmB_c``D%U322{_Td^m-ovXNAL zXK5IpH<>Fv`9=TjJ8gHgyh|1}*Ve)A(cXRxWcBMp`_ENf&sl?|s68TkiPzbhMZI3^Jn?kl)@} zswidvZ+!;P>S|4;k(sEB#1owvAUoLlyXk@IuI}ZJAfD&9QYa9AJn9~9nn?l#kgcEH&zVjh?|`H9p27&*b&K*4=76h!ywvucOM8 zwU60!$rd66f?~ruFmR9x;7mt1e(euQTsrjYS`o+nfs^g{iVoymdlLvG0|{O-_YudH zpG&mn!o8)R9BkVc=mAl(keV3-M7r7QpJk)(pYb-`8PmdD%2(W%fE(`EE-?_sGR_=W z0i-xzhzJm9{#m^kThny&>M@ONycQihO%f@AG>a}ZE_*B`*Hmw6dOYz{!g^gZjl=>K zBsl23az@V3^tyF=hKAqebS#c0mVd0nUyLX23;v6lRaJDG+&Vt9Is(wPT7F$NHLa?W zTTjzhI9e?zslvFv$szxK!5?!2o&5`^0fn0tMkwGP(Ot-Qv)S*xa8G{y7eW?E9NM2F zBZS8x%cMykPJiMV9&>tW_L4<}f=EgH1Mg22RX2JmsTLa5SC6TQH;|FmM@YXD$Dbf8 zw zJRwnGb|xkApODgIP*jl#j)(INB_(1Ezn}IX8t;qs4duez%^SJ?%u^&=o)YIqtbH$N z3`PH*(~4ETcX7fxqjC6{%R>#CB@!mJfZg+g%hhF^B=+HvVHOjA)A4g#m0P4C=P=^V zzC8L+*<0pMRp-0&CtaG}_i^^G=$^+>jI=7aaKBrWe%L1N$Fj{erI181RU)u*En!3uvZx_=`517fkA8Wu(i1UXUw5#Kc+d*{xx4vzMZB zDh~ZpTZZBy@<6s@#cw@gti5{wE;J=c`cxXHa9~VqQ0n6(Y>R%vYXU&_EM0^Qp?Lfc z&@?tuV=SuKj^A$X?)=)G?EKH|281?jazbc%Z+kwivQI01-`uo? zELAHiz%fREE;+P|6=^ZSUkxa>Cwsb(c63Yg7}xVk48RLY2mDkezgA20)|_0^78Ek#gr0MQ4z*%2 zs~{n+XA0gLoZaETT+F^vGeEge(2t*7?(Y&)h@en&)yr6u+r~ z0^2hA68%&{tgj!b)p2pYEk2=a-t5ZW15ewUkiX%b6Y5sx#`YOMC=e=+4Wc8q+2UbS zKrlqd#gk9>P(FQe;<8fv8|!u5H~IALzKk^!MfJTfEixh{T>SJ@XBP+yYMX}>73{I7 zKAic~*~(gBS@#8S8{tm~w&NY3sXZrP0~wBQ!YL~NI|bF~pdBKaxEnUUJ~g=OHmGE= z65Bxit|-s!C5Qk`_xp+-pJaU5yLWz{{<6B?U}C2?5hDWE;#mX{3$<0zul z!Sj`W*+|$kZ`s&rlIF|oKr5!^AH+vy_H}c4Fx*^sDJG>-4AES?@x(8?WsO_J0h8FCUGo1<` zK4&-dGfe4n{HQ;Dulx6K~dhb$zHJ(Ed zjErQe3-d#}`N##|yW1t;mdANo({+E5^6zg7`*iXHAwT@Jf@0qJE77(KNiFpGYn9 z%Kc+giry>VVCj^OZ?m` zK7BcGrf8dvK~YtLo9!1sOV|#u{+VH)%dLO2m1Sx2cdL)8^pV}~ru)R~(uyzhX8Smb z#0hB{{ZDDAA!PraTq^w}A9|*(?Xj4?UPnO>3-$`fccW#0;*he#E#?lP+)sv#pMZvc z4xFC){#7gd(|1fvxE@|t2>}VshQC$Y$5Ft6Yo4797n8k|%N>xOu`N}^6}#oGQn*}v zc)K!`^)c-BNbCW5)r`k$qRWl6iGhA{g|{c}>qO&wL+T<#WPBoxto<=8-c5K{TttKl zD&C)?G!2^WLfalYjSxf#|J+E^D=0yw5p9j>na4i@)iY|&WH81tWfWen#2ASw zNq9)ji^JL2g>a~|`Tl?yx?^l`W^jdyP3RNg5_$b^iPi}>1Y=#@n}RH=<|F32gPF9R zEe8#q<8miY@xog6 z|F*A4xQXSwiOF0RDW*i5b$bq*ARONDh%73bfRM?TEJ;C2LR>?n4*NWuyLtfG&z}EJI@Vm z8NO7OW&oi=sTimT^e~9APaU>i-Zue&O|o9U{JXW#b-VQ>Y_;)lZ|~2UkI^|WImVhE z2g_%P4A_x?Nunw+ejTg5F5uWb$vyR70?Kp#*rmft=?^JSo^u+|_X~>(C;ZaWE~8T#JocVWSIm)Z zc@D`$W~65Qg9ZyP7x*qm+~X*oU{*C zHYYg1s`Of2p#iV8XJYMhxL>xf9e>JAh&*fpU_Pt46Eg;X4&u=lu2sJ7N7YXJQ6SjR zN`^8bwi3o}t@4ONx>%`{jyPQgN;q8ZVEbn38&38l_M7i5;J#g=dse9DbxI`OiA63L~qG9!vp zdVSU}BUGP#_GHEUM9zv*+}R=9SYIgFvDb>K{?awGp+zcHBoC({iPZ2Rs7IIs`b89p zIO#_Z<1ocknxh@1ZU!X1O`$P6t18rhhfP(fSoQ-T|KFbMaS5}P=g|~KUrs;|N61kq zxmk(`nXo)XVv^muATeV_MyE8E2e#^(4&n5pB?Ifh(ymLd%%V!$^4Q{~%RTLQyh0|Wt|Lvxn)I4w`@ZhBOS7P!k!AoUU zP3CM7r9bPtc}S6tgWx{ia7x+BMJgQL`|QKtB~{QWEIV5s*VrchaQb@+8BW9Jfx*ju z5#n>wH#jJ>`P1~wh;iiYg~gS!qm)?~F>YESBdkpv`JSQ5}@iRVlz z<-&uza&KylK>BdZY*QrZ*$EYzz3V$V1A?esU_FfzV!*PxWKXAMX zkiuDs;p_5)5qRUH6&Z>M*Rxi4SJvn1>h;&sx$LC8UxWic6K{)XkwNEv%wy)!%BdiB zQVs2v4C>c!XnnUA6Zlp7`?sxZ5#WsEB9LbLnCO$TRWs-D6;9>G?*l!@mJ9T&V5@?% zfZTLWhd9lDLi6OzZq|G7dBzL*3)e|53&AWDknA#9I0uBLy^cInn0+n}ck@uV#70COC>k@;c%GnE3byXf3J}X;M#_+9+ zJy22WCkD*!(zE|1P2aq!3}K=vilp+O_%c_R;x+}D>Rx%y%tihdlCYrw?*lx-aV3|Y zLVl+V-y(1*6+^p2(hM2i&)BNnG&WCzx|2sQ6yBu}vxrH`+;VsHNb*$z`Go^qm8BoWZzxc9=;FVscykpm!q2ZDo%K6WoQhKN-9 z+B_=7qD>wGL`*aI2w}4(0glS#5+bougxYyP6rb}?s20@7XL76dC|HX-V;bdwE79@g zRQxRO?D7EJfWbUHAml8BGndR}oZdnLZ!d0F-a+vZ-p++g7nRGDTJ+Q?sm zaj7*o$8l{QKxzcNJjY&%d|=Y_ON`SO_)ia5K1bjQGQPA@exN;I(tr`g`#zGNX3@CX$`u? zB&SqZIy(!cuMW@3n0Zx|Q<@D9N;Xgu}6JTIL)sGxk&WhT39bH>kJ^!dBn zHp}2f1%Cub=tdz)HaT(0AlDv~$gG)Pt7ek;oZ5K1MoatBZg>@A2pAxqt$bM^9PXoq zOWAU&=sJwG=&H0Fxi8#>EM3C3;9T6)6GyU|ao*7Gy7xj*vnUPRT$w-v3i02>UKs)F z#4?_uAjOd}wQ>qjDr&EgYX$eAzErp>6#p_d5dxjL@N~2(<;IUe`j8JVCJDXmyb@_M8-wqCMkfZAs!yyn&nRG<=fj*vzQjm8EPMcZUjzE z^qv$Dqc3*Ceu=uE3MJv}8+T2l9Cj-2yX?pbd^4x$Dr+iAq{t8OP8mgT*v=jbKgTx& zpE9Lz+2I!!k;aX<6aWqo07shT8Ae{qO0Y7o}qvI%ouX*|rW|Ahi~uK@2IO~mr=&ch|( zrx86`FGQnYPsgba*9p*L-soJO2OL!(kOSJ^*qU#v9hJ(aVY8w4Rpbf6!0V`ENap%> z3wRmgT|ThNgi1(06}fPqvrAhSYv`%)g&Y=3~)YHa^M0OztQ## zJw-hPGJ*#29Z`JP8G3cQ71$B4Ca4_Sc~oOdj=$LGY68$`ArU#tAxjrGtw~B>drC6? zx!%)DJ3TdUpzPDg3B5lp)5&_x**+JtVkAo&^FmvZE|i!C4S{POIcIJN}@68g1y`oQDM;IwiOEe@fV$MZk8 z|Fih6Y3mAkNc!+dN-kZRJ+Jtc=sN2&@>%)s_M?WHQ5Kr>)L%(Wpn4( ztENrUD-pi^6NSQrO%6wxMj%GnX`bEijvbu(ES%=32;a}25tQ5^qT$J+My+TB@@56+ zSn#jWUhw}Sl?DJak{l*wt149;hqh~j^z4H_SG8i*nZPePIuDiNUc}`DrHGI7K>@QQ zLiXBf+qZ)wlCLtrwPU_OUt2R=Z7fYyv7ZwB0oJL}9kX%aidKetC?tSXZ`tk>rYUV# zEdK`*ry8TR#%7Ij`GAql$IfGh&l=i-K3jl5Pc#vy9og`mTjL>LvT0Ii!NhCOUx2J6 z#%w?bQMqa#@XCd|NVC80)&urvjRGx7&WE9vae6tNye9z#VC!4}bsL>t(HIhz^J=@| zOUyWMt6p_mKmo`DAxTlr%Ah&nZn=JuqTrlSgeI=y1Isla%1#A8I1qiB>6+_AI1Z=N zAzX6^x2nYHuGdX|4)x_eLW_5)&5ClIpPlGZz8NvCf$`0!+x#2jFEK?Nv{ue& z`Z1&QtuMb&zPqii?6MHy=OR4M;W!G~Bw&t*H5p#=A4yIDpxly#exADUr7N)9ux!F) z{5kE5HFjh10r>471+%c{em9f7P=h@_qUIlJwIz+ zoX}AKx8c>c#x5*s^5$oXL0REhr?ux=V@WZ_7gv-aphBVitUnvTSkPY{n@J5?8P4zSNWKX5 z?FTTjze*Pvg&w~aszsSg#Rmr?`pbVy&;Hc(^OqD;LfDAC#G}}VXHy}~vU7;_z4Udq zYz#d#N+Qa;rZ4^M;MON#x0tx7BC1a$;!B=6&7WoP^^aGPzT^M<>yoT7YgjS7I?A=7 z(1H?8N6AjZvXl2McuY$<(Y*idrBuaGx+wHnXD8@Ol6lv&cJ{iz#924%C55in#Y;6m z3%8Xs5`(T0))|+Q)P-$jBR8F1aCY@|(Zf0qV-x9Ox^Wl)b!mV=9NhY0JyEDp^}O0C ztL*i2>cp7b^HSA2@~Lm(&EcizE4%`uux~eQ0eE`cM2f8IY;MbKO%~I3_`stYvna>?SvUDA%--)p^$!iSU~;G2n}|e* z_D{sLYIh7|^%3{{-;iG~IyyQ^GJvan&VaN72+5}E(bd@{(~ZS?^UkgaG&3|bTPG*R z*eVm#Lo{cYQXOE*>1^q01+T>5;t2qc2>p9HgwjW% zP1f%YUEhoXer|HmX{ZJO^)yL0uL06iZ53KGU-;w7;<6ETxd7z(Q%lvm7Bh2s5mI^y z-jA!fGC~7-kJZV?h~^ zmIyLn-j;nJ=Fj=aLZb+~C89M0K#?1P4Dl99U2yE5W&Qns&od>S(?l7ZuZ)dl8Ed1q zMxTg2uBvZsYmMH+VX$+c7c{{KM}&PP=p|qiV#DR&pAq1o9n(Db(f?p_<@!2qTv9aX zq2ZR|_$?|*ZDfoF!g9p2v0YOsf6cFLV1umo{)IG&q>`6ntHgYnHxR?83KxzUuU$Fz zV<$kgn+x`mD_|saciTE=zd6xln#ONfS!hlN3EAbNBB={Gd{%R^uCOy2f-UoYTPcjH z93`JYSh0W|8+B5vzgMNKdYWU0!JSdNkf~RX+P*}U%sF&a!PqEXG;s&8Q}N#--!JTQzeZ+)~#wTxnprZ`G3SFAG0KJ5zhlk4$?@1+@D-=k<~(V`gdhS(p?8!YzMoSoHXgZDq~y^}|IS|! zr!bX>4J7=A+!g&>795weZ5dl(U;4^Y?yhv=KMs0+g(F42yY0T=Og86_4WO}oW`Jl@&O%J;*cQ>h7wq^$kr+|VyUf|YjK^~Pne^SF(+r$u(M#BL`z zvEsjg^wpcTHW_DBmgHK~?>%}v1*B)!nkA2rLS4~#kfk$PJQmzqt?I$gwKM&Ah#s(F z_qa>m)vmb5;6P%m@xI2e0aHem*NM;DkdS~tlsC`@5Eu}GNhll7$?={*TBXHUEMWA~ zgm&7EB~3oVte&0;bIYir{AC-Ess7;xEzhgwjdoh3b|4nfgve=CF#XVr2a%Vs(imgs z@fL84XZx(4=DO1eY(@;Dr$h`Z9YoLDgjJ<$R0zbd6|c73jjtXEY{LP9a!+nU^}Y=` z$k?f2;B!EHT+ZU)Y>9T%3!#|WuN@5mMNP6(# z1|SE$AfMJeaaMju>cQ2_$15oj);s#PTFY+ThD^N=IIH=W+uGm`#HJ0~38h2@$pUbAec z$7WiYKS2A}qzlhn9J^|a;`Rw`z8eaxG`W7Di~6d<3u;(1KAT*VWt+ZM7GD!lok)Dq z*}~quE|FKX|NfKxZ$(gDT6~5X2f;(RdV}iKXu)VBWsP}iHmUw_B>pZFJE%%ZA$I!} z1t>lWe?4<9OWHIBa;#tyR~V=6Qx_wx{`f-mnK%{IgS1lOiP*vP7SaWW&Pixe&j77W z?MeKS^#a^dc)5Ko8T&S8(zakwHlen>(8_*c%JAEsZ}9lxhF=q7G0o>}X=o|~Qi16a znJwIP9=G16#q03NynTtVm_k=*J&U~+!*rm4<>0zWOG1K6_ch}?Qh^WO1Y1hjeu{K| zf4b01P&i>i%L27oIL{kbdFkyzqhIy=Dwt(xI;d;KMN!?Ho+OH3I1!cW-9P5*hNLxL z*j{If=ggcBAAy&4kMpXtkP=zBnVRMSB_*2K7fV3~y4Hx={vP-w{NW4X;c==yU3Com zV9?}PY4-{_BU`(sC0>qONO~KLAP@RPPp^%^>2=?Ll{H!2;8l7+MI#~%#n`Fjr|6Kb3Jra)fYC78vYlThPqe8` z1Q-gmByJjbapQwMCvL#o0fY*_zoB09Bh)6^i~v0ENqO=TDd^Q|E3N#U4iIiVi-DWUXldjt6X zZUTe9LJ$aRxFwM5YlvuySd7|W>*hmiihr5F#UImOZVMH~_mZF4A zf>_$U`y2p&LfOp7XO((Mix7742AHJ9d52h=QfcRH{LmF_S9(T}J zcN+^?8_IrFV9C-I%rKNTT$!8Usm%>A&ih5u! znTE_DkRo2t!h2_es4;p|x@SrG@nQ27VKWU&3~F|?JYz@UN;rkDfIff(#wM#lN@VQvrKFGEe~HuldsA1rlX8e5f)?70JtEY+VOWvlkf{ zQSl}J_s7g9N6F$jMbyN$A}7daik6mye&3`T3!(TY|53!cl+B^+@fxt=GW%yu-UEW?8Wt`LUm~B@* z?!hC4n=M4dd)aOqIjPVtEsuzt{`QJ0zS|NpQFzk+&D@io&@F+sa{p%5m+z5&StTYnDq=)NKqz_h^lf`f#~c@{LNi0% zcaAqO69Ror77nEC^nAHE6+Lp<=00LI=9U(dA*&(4g?Hl6cHH{P7%N-h>R%*P-t9;!QHGpcgBCTFCycV=ER!xt8u9+rAk!D5Pl0Qzcxaf_|P9U+KVTHAJ{ z1XDQ{8HMwXD&E-Z0iABQOCxStw3+j!RKeuK2hTVS#SdK*1xnt^Ck=`mUvol%s+uth zh_@ip*ja`}haG=sxR}DZqUXw*-uUn7sI8!ha)*DPgBtAcvdwq)&Hqm3pd-p_WJc`V zqG`qL`1t5z=}va1?-Yeyb`gOlvR~YUin=6@TG>|T*OV9_)M1ZEW&(b=N#3j^n`C^M z%iS?`0vbOy-&|AFI90nDJ7W%PtCrCi^LTGT#Bn}rOhJyBE8jO?$2Ml0c&@BLa<6EqCEO?=npCZ=&AkrvD5}*o3zW)Q zhq+47O*S&H;PtjTqGkSHue*^SD?goX{n>m~Sqv^T`>?#+Q;gWCOWs6doSFddF}Q5O z(`D~J&kD-X5Nd%UaQ$j@gcs7XiF-7aa6c>apK3#tai?qdx;lB!`RhcjpGcETIg0M$ zbv@s~GnI_NR}9%BM69w^AgS|Y5HQpkIB4XlsP_KnZRDlCPA&CNVeTE9z$;CoN<+F= z+?4?l>+yX8+w7ksX+QVc=T7PiE=H6=6G~*?v02%VXnDC(c1J9`-ZV+JQ601R-5idO zj{}`2JJQD^L`ILiL*4JdL8$FM*}U=y zW-dD&-Q z4e~=g`le#RW92sVgk6Dub2(^17USe-1}b**d?}YMd*_A~x7TIa0qQyDvsZ85P5?*h z^6tptDY+bI_J@=61UyBfdQ)r?F?$}e;M*sZt)G$Bb8zN4VKF!=mLxoQb0aw;)><;A zOZ@7A>6|I4KLlh$?qDu6zB!7ub^eNGew7ltfG2&DtfvWcResC#r0`q70O|qWiKX9ygr!`q}JNww{-ocTURC=9Y-|%or4HcpQQh-qA$DfY0clYF39O$M%hG2u;2(*$p_x z$!K9u=b+tM@3`!VN1PNWZ+lW(8%i^!z$bfcybaakh6NaPAQ1zB;HuaCH$vx4L#Y?U`C6(6o^lduu|H?7a*;5?cJY2g3wpcw2hU4H=ODK}hsV zWl8E5x}2@ZjNd1#lo?c$Y}oh*ffF+j1U4}EJS*bdrYZHRUil0E1#v>PRe&2-cHzhB zL2K;Yy?-r?B8~{cAxd{d~?&b zsViw^FxqFrn*-q+&a0rWq|yyBw%T!=X+!?-B_XNu5U=5b)L{zvOTF8mJwAvo=>pS*BZAWa@gX+!IakXVcbG99#mXi% z@b%Z?OQzRlgb>Sv!aYXeU7ek?Ml}%Ejx;kt~lNP3-6=c3sca7|i)iS2_u{4%V*crdc(umC$Oq z`CW9dB$tg6#5FFtYRY-!m68=zwRoVDz6TApsN1rOD175(zYw91nELf?_0xH~M9}o3 zXZ0&?HRO~*+=B;Q>hB(ws=#{3XQx(!Y+u)^I~y8T_lJ-P3kNC__o#o$A6PXTj*P6l z#Ce;;Toe0z;T-0RHK2_Bp9+XjcVz%&Uu|uj2g~y9%L0%2lal#$Icmy~<7J~~ib!Ej z(3@h5HCM?H;^&4>HnY9A=k*dTvOp1_N-P1aiB1tjkRV4=MCB>;0gy(WMCIeG`FbEU z(yB@yZ4yBq^7&2`O_EJLG~W3<)^2&##}a*8UO6h3PQDYu-mU^-onNMHj10uG%r$%` z258%=8Lu;13vw)9y%O96TwHF!b17@f%Wjf+w4W;5+uQjmVwH2)b5CRk!ykXoWr9qJ zCDp{f#7`7X=ZNj^P0D*cG?wMq3g8Gw?F&SqrSx%AZyJE<`}l@_vy{~dT@(Ax!a$x7 z%DJPC{>DdbFI*wIQV`zYgWNvNyhL~{PW+|8&i!bD0lsneQDb2$AO9l zhURaPjS26!@}LVC5-4xZK=ZSNc%#y+Pr4BvFWPz8tku&}73SCjcDmuLC=MR>c~8{n ztSN_ryDMS@Ow5Ff(;AL+D+#w;@Qau5gyNd-=n+7+b2VTkLIpa(@;bb7ym*kD?5t-_ z1Z)qGyO)xEHODt$fAWCn!~WVqOhIHDD&?akrDcKT#LhI{%8JWcSC|^?+~Q%}a%$+m ztge92kO1j+7E6{`v(>d_anCaI9=N?Su17T=^JBv_YIBFxz+I@7E~4_=BT!ZSBk@!p z-_OP}q=vS4m1v%>Lp_g;*y;vJ5I>>*KD9ws%t-BW^bc>Yn%>_1s|%Ja$V%q}8*=&Z z-~7^9&yAaRGSab>AfFFO@qF-yk?v^b6ji+H?SNGm34|SbN`#1yh&5f~KVlI77}R{) zi*d2HzZv!h_Q5%VE0@w6)+^#7QCg7x17U1P!XCBmethIH{$6uGRsavFW-!dg@<;v+ zRS2;seWU)!jBHsohw4l=#NweIakU)>{!QdAQ#9D6TyD9Udp2_T^1+5QA zfiV=)eB$*x-XxOx(pqO&w259kUkAhZ-JVX^R}Ao^-o#1@mtgn>f~SC)72FH3duL|e zcl>?n&~;8LTslrTNTOY)GyxxUYg;i+VX#GJjJ?X<5P zjjab;^Bc>?!yg2(UJ6GQ@`>-r?rfeKJ99;~wcUUft3DXAO(tm-4PY|$s)Rl!51|@( z>a(63FvHh^AR9k&`PgTFXzyqU1_;ZM3`WdY(;pqLxipzoCz<8_{?BRRXo6naVhv(b zfl==W#D(uPpV~7ScADNKAmPvn@5a!lgY=3_5@v=0A#%Veq<=qtnv8;qxe){G2><{f zsBGZc_=*mmtX=`~rH|=k)q5J1;V0R|UJB@zjpItTJIfAjEgc==)w<5(GRN(bZBGpI zy)RbR4lXR#XkNJ5GYyF*M7FL&h9Lmh;``0_w6?^}4UadN{3oxS`OKW30{8}d+X%}m z+s9WPB_GhvRA$qU)Bf{dW#^0dDjkpWN+5=|2ksP|breV-(FOl?@Wu4n+qr676Ff#u z3icE*O;~^HS*2K?TRSFQUe3w3A5lR{O4brKLf^Nw*x-V=u|OJpA({MO(j9ah2kJ)O zH%L?hyha%=qE17UXM}_!NrD5Rb;66fGe()kB&mk`%*xtD4*`|Li$U%)b}0qNWl}tm zlh#riIy&^+&3gXQ`HKHq$4%baYS`sPHCbol6}D{Q>FwXs8SJzCt}yJ;#f4iJt6pMW zCsvrZ`$~k>(sEn&y;6SJ=rdh7<*g%BJEkrhYN zb?`u0WxYFMBF_7!E`b?rMr_;V*8S;rT|NDudEdHyY40QUUQ}7xlaFNqzx6&U1_uT^ zE$bmK;%CyE-jx^}w^NDj?46(VCN;HLkWYJPhz{a`uv#ZQ(d$6-Y9{@=OPnvleRFS~prKD1p4U$wk`4d_N@YNaYbhx%OJ1$(dtw`Wc@{gf2 z;=?f+^G;{-QV(rvC8Nrt!2ES38GKOTXuuw4v;-ua$~^1O=|LHKZJi11**Rb~5LPeePpm34zw|ujDP9*SP+4Tocs2$EB#p}yKBqzPhK1=U#d3&F@EXSg{Bk; z_@BQZ0NJQt6h@t0YzRQXE%d!tUOA=kw`)`#44HHlkFDZLb$5)S^U6J(OU9rs1#~fn zgb!1ZX8C_yE{{WYTYsV2P^w{uZ*oN6L%41_C8uik36DE|?{>(!j{!*S$<3{w?I{&_ z3Pb?zA(Ojz#^26!K4(zRapBC!L=FHBJqo|7nqYmc-<40sEn=UDCLa}?XrSO!j zv}g@M`?&P&aR;@!DoipUvjlp3D@Ex~Y>MGo#h;GfSrDI&_r2qgW}z&0+Iu&V=DmW& zerjQ$xY1hRdSK;%Q1HrqsH%Z&>7?uOWP(_nISzjNoVXcHoF;4VT$s2iee~+B>_==nrkAKWe9>Sn4etHnz>bW#Wmh)46kK zz)aC?_`Q{5w4I9W?)^+}Q&u^VCO&WR+te2N<8a2WDFOEV+|`buDtbn20zL%x%M*Zf z2E6@yvY|vOyc67lg4BA-pUn#8ox9}UX{xwf`>hXCuUsC>~$9fcxuNxE9t%8`UXy_c#@wis2WX;CQ>^OW< z_;e<~n%8=WK&SWdOE8_$Oue#+1W(n*e~|xPzMa;t+mCm_5#LbHi#l)F=$+tEd~kbx zh{@wACQME8-()K6PNysb^?y0A>c=5%sEuso<}-J;f3x^#K4z7MEFCxJTmo0Bs#st_ zkCaU%e$;8G`4^wUF6aYhcG(myLMrW5z>vYH&KPr26?+48qPwqlwP^H^V6hu#?)UdY z|0bW_>JEhbyK@gczh5~F&0{JwP*jbO_AU7prz1Fc7y54@>@;s@CVS`4GQMe!j%st; z4bQ({A3K?zg#A5z$VQX|B0wT4aIKW`&8)wFo+ADGg@oT%8qdnL{=W;Oz03_djg>TC zwTH^Fe5B2!Xj+3=xGC7Ic5!zWe~;eY64?KGP8Dn~jb^R(hm z)mJWGBjIHqL!dm7QJXYI*{WUs}oT zxa5@`I>=1e!df&c_P>P%y6g|4)+e8ORM562!}edUn{sr*=$(~ZH9R!* z=%(O5Or1(JsqydpsjabRD#2ZaE)KovzPK-Y8m6}8<-f9~_^jwOe}1KaTS@Ry$lv$$D-GPEBX-mkjzp ziq1Qp>i>`8myjgxwMoX6zS$|6H(O-8_O(Kk9T%6(WZcZi%te$vQo8mC*<8uqWL%NN zm7D#0|L&hXdPw))&wHHLInTq^=ghI=7y92=RC=8+XJhks9ex&@XN6Aqz!1x!cZVWb zJ&*jH6>6%Ftk%T+`Kea&E-2GJ@9oq!yiROkJo{F-Xtw13#(y64SGJcr|?;AKdIwRq3U^WH=1ibv8nheb1f z4Owc-<>;^TKA~4;x6yvyJ49N=l~yLlYIp;hH~wjlP&x_yA9M1aKjwpPA{46ve1UX zsOR0KXSdm2x|U}QOb1Ey&y`(%#PayEwRA&LOO`3e$bnma>g`;KjyI|owFWEr@U`6) z_)B%j+cFfUE~4)*1G3NH)GbXd zvz{1fQKkawVv2}ZX;3HtTobaOPe$CQrJJ7$ttzRugDf}Cb8~~!@d*nWbQZOR)z7+1 zCnY5Ta0k%8#v7LBo506FmK$c9drcID*MWQZwkNK8^l-Je3o2Inl}qB?Ud)old%Ol@ z2`3XbJ@jpHZeig^LP;v}tj>Tmd4Uo(sp7h;`7ga`*DtE|52EU%aZN`ROE5+;{hqW&^`x z?8dhU0kQX!p@Bw^YQCst3vj0YVu-VHWR)%!q3G?%z-3Xls9kiwde+U4bv3?k#!rO2 z2LmBp{`aXqm1qw-6W8*)uT|L{*qNcv#>FE!f??E^Z#PwT7Uxa?Lho$bYr#vVH0_zJ zE{L7(?wl{j*eNQK=YckR^cRdtFgDywg{!De)cab|$f0BbUdJEOdKn{G@2ZkisYKgH z)_hOadU${HEW9fr+@UcgK4*&)rx7Czi&<;G%&pB%;1i^ay;jdqD7qqZd&#e+-j>O2 z?oG(Z5hK**&Gm7=*Djq0t|j*B;ZevVRv#*=yWM}dq8~E9$#S0Y%S0mACf-nvAx$E) z9CbaTS}QSB5Y4Y;l@r~p6t0y$qmuuY7G%+4kY3_|g%z_s1ohlkMfLGUbBd$6PvyBb3kp& z9soYN*J57Zei&J?E>C=uQ=$hC$Bw7hjsxweY_2%b8;AX-Ji_6CT|PLFj(jrnuXRU9 zESR?2`b}7#;7qE^&+V_%Vmv2x| z&Eigv_y6(N`o%RuzY&42QF#)?K*B=u;kV(@M<w(`ZYr?t6;wmRGRins{60mBwK(Y) z@L$M7klT%^jghqIfimH_FUYp$xweMm^0t$0uP~DRMo8b`+U{E0VO`k2PTo-N;-fzY zol1wZas}fapf!}5N*NU2ZrBDgEUC!%>zUi5l zCwPlIwLM~1M&904cdZnA4r-QcOmUFvDFeP4mcqtc*S1@6YP?tw7XVmi$$VW9AwH>+{E@aWG}2j2xw=Qlbxd*B!m#wR1t z>eQdNZR^J;W)Mk0i9*z&XeIqy$YKE!3B?1eEh`iCW-h&H*ErQb6o6PpAdui~77v#g zV>*BO-o`7_gBx&XXJ>XsMuvo)qJkzPqt}t=)bCp0fHEP;UPg<9=0JhoE{@}>okoUB zIr2msC3+j}&RZp}rGB~Vqr3lnp5dL+T40X&X+^jP$fMywNx=xHdMb1N*fhh z5DL5<-+DY(f~%)TRNq|UF2Rbge-f94J6LAk<(q2Q$oY?zh=9FWL1PnNX-UeG|E#Zn zI6tb}S!{d2P()fA?dbszCZkfwGm~)g4)56}x$St!Yw=2UE1s_7$;}Z36G0S>kHzFSG@Z^J`+bo;&8&qLKYiz-(8 zGdl5d%8fS8-{(O_Z?M{KaO+r7`-Cp`?Ah%&*K&L+<=dwD?uPtvRocW7ymQ~x^gLn& zCJ`qfqF-$hBMWPY&mbNCdeNZb=equsc3tVANM_)hJd4agzo~GPCTtgv|D1aq&E{EW zWs1N3ka@}!?p(b9wg}y%zyJQ-?8q4C!#%aL%{>Ti;`FBp0d4kN;jcPl>d5#pq>mG! zp%MD(=0D{T8d0`nWQNgTqj}IiN(7!YG$0Q{J*zmJbJVuy`LAa6len!ZS|}k4k&cWW z>OPz!m+mwL=K26b`@lCZ9|G9WoJHJw?QO3V;Lw$|-C_ogIsfh43l|+>g**GSTZ?tH zv(RE64m2andg&o}{BbH5u)=wBImWlg^z;oaQR*`oH;5V97};{{Qu@|5qsJIBXEqBq0opJ@Fq&RJ{@|jq>bjDN8Lpqi zU{?rPAEd$K(>XMhQ1*FdU2gQv8-Do8TCiMRDHS-ILi$q*;AcGNEWrP6n+D+kym20;_LDkVXnK$$_+fJb_+!=`a zFUZT=vvq_h(AV>GcUS1^QjW}Y(XC0kL3c+Ag-PLeclFdKScR1P4v$LFgiSp$J(X)C zVfq)u!iVr~*4immRF_`#czZiCS>FuY!WQYMg{*0Am^XXh3)_&NDt(ZhaLYNCUF|hn zH^RD8IAeF?nbLrvlbu!39qVBkx52hOCiB~HVUo{TI- zei=w~=jAe{P3dKXurC}QvrsZcxb&(+O2%mj0NL;-fG6ze&@l`#zpy|%O&fFHNI;Vo zrJb`kr;coUsW>wV{f3MqaQAsMX{k@By(VE3O)dAAe;f6clI+0 zR8Z%6dIFo(4o0RarVcZkv-M1M!_~eDsiWqrNE4rlE;oHYUbej^b^2#uG|3=FBFVrB zVRY@Dw2D)uFwZoM>84KBh=yNu3mue_`PMrUpZ@0u@4Bh)cpQ0dU?^V^FPmSsRvX}! zoZGp2fB5@-h^=XFNx73!m9~T_{=v~^-KV!>I>s-ynl7-Kzux$(T9YFp7gMHQ&q-qu zTznJstkfmE=@JG4&vamqXyp*qlfy6SV_X+pA&Y)Cv>zqQwXmf+eHB(bym?@nFEzAq zymW!d(!#Uy2F7Kstn3Kd*I-soxo`7<4$pQyk|vZ(({m`DuGXNjHOl?uQ`nTZvyOnN ziZA~^@(ws^yW{DG$gxp|Yf(cq35{PTVl}AZu$Zbe(3uF*1;EOA>lZobI6K|j9cd-D`U=`T zkV*8BORB7u!C)8}caA&*?r~c=LVQ<^sj9YpvaG~xGEgEUsXCNTpE_{W@Xf&|Cr~Ps zG4CURkU9XbuwwVYo3SypUzQ=xoo;Uf6{mVS6oV8rKJ@ShAV114nqHDlnjM4MRD}X@v4?z zE`BR{aR;eQwV}305D+g{xcZ5N)2NpmCb{dMd+aKhzg7|`NH{Dgh!yfXK3$L+fc!Zm zJ=U4sC9EMc4-eM;n`Xz&+}sl9qzv5XXG3;^SpSGyeF4V1$ll7A7GG{ppiqv^6Z#3v zP4n(U^`8Pk+qwWSpD|J_q* zh=c=NqQ?BKkUxN1{QBj)n4xej{1{GzPoAju2eQijjQ7OO9{Y7yϐ}ewmE<1P{om13ZIR;da-v zM;oK&d?U@74==?Xt^fL@M&KFTYiZds$mqA`+L39|6!E4L&9ziXyIR*>P|HqX?G9mm zo2sn>DM)jK<)E{4sNp8S=7ho2X+4$Y$puMlM2_Xs6D_3ZX7cH!e4Rbaru0@0`pgEjmc3J{DYsRVcJ`UfBl+KLD!TmlC5uT zm9G7um@R3S5p??*kp3XpFGn+$A2~Ta7ZL6p=Q!1uc0pa8p0CV#jHmhXf`CJO`^~Qq zF5~OOAGcA-Wj-qa_AZ~ZjtDa7X1PE;>N_+lD!dSr+1PGLKgwhdA1pL;W)N@GZ;@R0 znEM#;peZN$1AS>t7<5`fY$f2OBxqM5g-nK!mlYsa+5sN>-#@8D2_>9=oTQJB`a7W;l`{M&x#!bC+%~iBoG%2lb@=u_cxGK%A?{!G8diGohMMi z>KzFp-C*3uOxkDj^j49#hS5UP1PS;aL2eK4?D#Zbd8qnM&nl{aR>lj$_w`AY2Hw=( zKM^db6nw;jXQ~BU0`Ssm^0JSdl2RMcYw{P}r6s8huk}2L%vuAlzkdZIpDO0PAmj1k ze!yXVT$M+P4@dX)th{u?OFJp-gDJ4hWE8Y0P#7<-`F5$9QStMH;h*g$OyV37Q1UYF zJoe9RMgw7$KydrUEA~>^debCMkc&^e!Ct&nUNtkEcqVy zf6)j*9P;mk^GFs!sA&8Jl(lW##_wi(J>;M8UT3-kaY&oABhLpTRy0UUjok zA{DNOxJpplE%c1H8M8X)XCDm8UVBD)7fz36(I#pRn9cYNEQ2%6vH23Y&|8zxR~x<_{r z!x^2+Q6fssA^(0KFBI3eOnYFg44u~dZw=GGoqNPx3>@l;2BQdrK;S_xCJwj|ip?bO z=^Zx{GhdjftGGz_xuQGJ6U}4boMhWl^Iy_iZ8-c1!JvN$Q6eRgL6Z=8$2U8HSHdv1 z#6%VO$l8uMZM;XrTQb8=yy5PL<5~9I;VS0iXfYFyhqj^*$9mswB|HfUvHU96BbwM- z{LqP#g1*`VZ`*T~+K_FfzlWm*eQ*@Si>jnSlwcX#r&cP(JgeZ}3kh?OUO9Cs#@bAP zyNw_L>wt4BZg~92(({wUbDqBJ+{vja$?nvYkweHA`Jt^y7GQ&e8VL<7I^l{~mETRg z$FoH+w#QkZ^i_O97G=aMO?IBt&HwUm8oM&MIpGX}xQ9fo(q~nqRZh2sW*Yqt;G_;{ zx^~ohC*EzNY1b#WsE>w-Blh(4q<*iSeqVLRV^mh}{!6Jur^&yCW2D1CE@Blgj*&kS z3A~*Zg|a@URU!?8B+>qx9eVF~Wpi~Z74P?xe)=w(HMXjKG1Gp!;Dzze(sDGTZ&%QK zyZN%Qig~1S`Jq{tVr1)l+KLZFkPjHd*Z; zVBi*DFRhTm=J;8Q2L|RfSlRv4Y#GKCDISC3VEJ_9ukc?%VVJP$!<|9$mY1ObqFn1LDLsMXPSB8ER2 zm5m|L|CGtD6p+!o!^d_13Zw&UYrIF9DHw+Mt2W?23|ogfW;AA|oC+P~Yrgm9X7z2G zeOZP!L1z`q9m(#8WOO*o1e43{=6`t+dPWbyyXiu}e}q8l4*u=GFCgK>YUfIzad9^( z<>u(s0K;hd(^DZ<$jg#c=a*DvWp5>mI40R}l&$+BbZY-EarTbaEL49!{mzVcY)vO1xHubk5b_{wa=R%Vd$jLig=GT?vdpguX5fVS7MD33ID2h|r1LM>yUsDp{L2wnj z(SIF&VI=3jC!dZUt7!LC^Fj>Mkg*;X&?lC}*eC&>`wEzXtIKb8 zKbpCsv7PdUwmqm$wSLB(#;CQWW!7Cr=D3CR7vR6_@1N}LJ!^=MS>ew}Y5aZKM9v=K zn`0P*d!(-k0qc9panqN^5NgVsl>rJA%^K$ z1B>1Uj(0iriPmo5cSqRhw=`VZV7j2Jy`V4xfe;QSxZs5>&5X6{xME=9&?f;P+TwI9 zP?{%^;RE~;jc|op*3Pc!zOxg`Mi!n{)Yco*7>j9?ndxM#znGL;eht1tQ<<&XFU()i zPE=i3nTi#a@}@1-+ZOC;+8dS6>%2bE|1)^b*ZZ|GJM6g%_1MR1Hsx1|&%_ufoe<|@SgKE?Hm$*R|jDY$f8s4Y`1smAhk=I67UHaftGM(%M} zk?keZjNHDxSv^_Nw{LH1shD09e(I)Pn0#5%KZxd4tgz*)jJ1rwL4liZg@r5N81(3v zMzT9=f|Ca8q)?dUQ}Nd_p%)k{R^%ZSVuPV!opY|GklHQQt7}*9@E5@3vDll@UtFmq z#R~Z#1@IAs*w5(u@mKKE!kb&}B`6*L1(622gF3%e+}#W7x4u-C#*zT^u#)yljKS2>0B-;1BPz+uD@_wLzrKggtbr4fF!kg%?_6VWc(@u_0e3LnX7cn$f`plna+-&Wg^ z-PzXp@%g{J)3}CJkY`GeBCN>5AI3`hm2z(Zgg1uK3)C1+7MiS=jypI+cyp`ig3(;f zv}g1cx&JDmuI$&6nb%1_H*$Cz6HTndSbg1#rH7pef!wc?b{1QPod60hGunP71$Fqz)*a(CO%k9Vn? zmnT+<4y7WM-1mKqK6En=fZj)D{h?m`NPFXgMf`E0 zj^xMTJ`OvbNw;%>Kdi%QD{N(b4IA=>%MKOaIRrdWP@KmMX3r$v|_#s?u4n5$Z(Y$b$+f7x(;%AWq< zD~xZ+WVRRpW@1LOn_@!RU%pS>a_=vY*mOhB$*}a_igAj-^B|}M5APIDNk|r53nDc+ddFN+I zN>YZ4jKZ?nVIFSv*k2rm&k^!S&G0YQhKAoR2?Y>?+2JOV=|#ey$79_Ok88y9XCE=7 zy4AgnJLf;)eAse=vzU(T%_|)%uodMox4UFYry=`r6Mlap@-syV+NzX2uJUDem3#k-*$YrdWxlHE||GF_j1}=k?AQeKdBf1?s#-8Q z$Xr{F#{fbbj@-QY9cBCqc=TnCn_O`5lXnvD2&3K+WnMzT6vcTo;|*;0?Dx>vnuJ~M zx+G&K-&>MY9QG%5a*4Nqk8-bc*X3|rs5_8ynrvf(EKM?>PdpZ>v5IYan9x3D(NPXCQdU0Z>sA8 z7Pf)B<$t5ZX`Y*%R!E7N-2W_kyhV?pX7Wh1x~K)ayFcr1>HnsL?$vQWRAoR&EvOSd zbv-Z#V%GRYdp{=aj7Hsb&HB)(-_bLKo!0ja+7l-|dyHX}3|ItTLqb$>AWv~HS51J- z^_@#2ccGsB>+HWAO}c5YH(m({n))cWH-$b8;r`C|lc#n^1_+cP=jGot_rB;^?gwxI z`IiWYyu6Iy7XD#W>UIq+ZCw=Vro#QK-s~TQVVxW#}xC3$lyb z2VsZVi)Vkm!s>XBVzQ6h&Wg`<)nu&+|9_mr&i*;&?l~xY{8q{Sb}(Su;wsHW-43MB z-*(2+?tFqkIkv2%EF4Pt*6Qq&sPg+rKDYIu%^^mS*>9PM`=5+V-$uQCGRCA9GAS2$ z3d`pG-Nt zsu>I62HDIEcHR@l9!C&w^d>{BJwo(ssOM&>;v8 z3u(YvVC(mzuRTw>GwMmiib``qT`Ps|XWOVtNnFqleHQAfhl~ZGPz)otV@V;^4uw4z z@XLJ-J2L*i_`?PZrUfl^pGfw(#rZ(Zt*q@_Hnh4d8OZ@HsYUwOGRWUxHTwei9X%Y1 zVMhqP*JxkGVZ137cI0+r^A#|iv{aX#T|QWM20g8mP+;%NP_!jv3^~`gH5mxy>Vr;7 zBC6r#=ZV^;?9}gv!T!LytQer6dDN;Fv0ZdA&6{he{LXNe2Cd}R`X^mTUR4|^xMmSH z0yL*JAO1Z!2*1Ty7#qUUCsgBSPdzt+)EurjL(|NxbiMD;(>{s2_r+XGW?}L|{;uAB%R2Nlg-D|IV7aA>HNTR;#0l8 z-?@?<{&Bdfln5^>BxkeXj~-n~iWQ7X;{!I0^O|2sSS}-hdPktljlQr<{wY$K>gA)r z>%U?sLIw-<*o)xDmUpa+NBK)Y(+$~RoMM&JVIU0O|VbomVIt!<#wx_6e`)N_E}lo z*~rP%-Wl#2I<5Ax8okj=q3o6rwXM7r)BdU!+98_=|Ah_4N^jqV5wAf~`1rb~+%il? zg6wX4Bds(BL?eDc+Y4S&JbiNm_A^FLw~t1mbHD1B>rTts1E!JA&KDIwt(!wdJ&G(M zO{+(?ZzuXFVr=TB-CtqLpE#O&bSM_RYQ&+-BQ}1iGe|N$d)N9t)j^wZ9GBbeVzSKg zE|$)%ayt1!$@ys5j?#(UdHMN%*guK0l^7XDPz3JuMjX39k&aZB^X=no`VQmcN$ioZ zcmIV45&Sq52CM|8c9al~?`! zU{r%-6QC(9?(~gVucJg@u>q`iJvjO0LG;!}T!U5H$-Z_<#;Q<($bwoyUCjXF$lH4n za!`is_Ujknv9#b4?O$W9qwTVh)9~#`*(Re=+&@Hyh(q&t*f)WM({^YZZ}Fv<1R~n$ zhJkS|_-@FA*eQjHpZ{Lm_B?1i8z*Oa9ll$!D%>%R|v|MqWc+dd-5Jx%Pe2_XOW5T}M&5eieQbY`~?d>gdZ#=NvE z!;Y3?CMPe5i-@TD3U$qU*34fS1loM}PaIKIU6NXr-EnRklRZ>D>m}2qN4wBd0=MJI z11A8;b70SW?mFowo%W1~a)fBv%xwFY>O_7WjOkqd`xlRQ_V#X{C>N}oaCHNN=UR?L zp-U3N$Ayg_9{*##o+|RX<6BKy+($|;w;<6t%Z5tO`SkiBi<{OqTw^G>SC7j z{S^6D?47`Kc~y2CrixeE1*ix)@AIQrR&Km?e2zSPinynEFXU){tvD|waz6HFZLp=Tw?&&P=m3nRa9%(^SoEy+)W^^alY!G$aW)>BIHVP520w%y5^ zal*{$Ra-5L(wj3mjA|0vv|r$ZsuVBCTGwgAS5aMip*E8Zuan~kJ0y*yz&I}AGk zPv&{e`9|IO9%v;#1?7D)yFR4_D7Vs8w~vd(V1~1yG0q%u8P9TqJ!G=$GbfEEhw&dm z&Mv3qAMo@%ho`7EHun} zmcJzq6pkOP3@fE6Jz~D9yJBH=EjoYLQloevK>phKd|P$}k1h$+ac#SN#a$(B7O6s> z$|W8D-!I{3^QN0bEY?KVKVHTSAPf%JpA6z_$Nn~L{|=D9r*v-^U8^eqiI=Q3bL*4a zq57Q0<=ET+{>j?zbrHerdB0pzaZ(;jDz<)nz=_F7bKxKu{F;Dpbd~8DFOK|wMA0~^ zg(M!}Tx*bn7DWuzU`3;?+R5|Pvmk2z#eJGqu#m;Lo-JI2sW|+ta(%Ol3Y(Xy4P%@W z%N-lxWf>o$;CHIl+F_AQ0avZ1GCk>qd+jocrjY9Ea$;YS5>(tGIjSP^@Aj~r{kq`y z*TrHS-0DcUbUtV z)%uCR|3uo^GJMQ_1M0??7+K`|%t8vf;Ak{>qr} z%q^sbCa+_5H@m)6U!8D^VPeED~DGlplrhs%mc4H&?6sb@{aIX_@Ceqp}#q zP3rfb-2M=M-3YJiZM+1{r{$0-xO?MMET1~hCHKZ#x0CxnNvmCln~3-c)?iQTF}YBg z&R1?jg{g0{D3s&BJx0(|d*JC(u(jhW1(#;k?$ltJ^6Tn6V@Ldbw}P&GSndj0G#Hgi zd?(gj@ki9R0tgXu#O7)D_&BA+cTq!E_kOpC$O+t(FMeAv$8ja2n$}s_=YWz4mjASd z{J4)Zhxf>Slw9_zxKFO}voqDZfdKpUOgP^OIqaPG|4>W z?{XO^9glGkx!m4am@C}`&2*J|ra73aZ7!Aa#QBNCrR+c3Lmr!roy)g~Syl?oJVmmA zyi%+lPLj$<(Gf3eoCk??Ju&>)4EYo>OawClc^h$d(kl>+_-37N`f=x&^z+Y3k`h

9YZ4 zrJgBJV=8EpJl{6KU=9;csj(1ndMA&S;}$g`M>SK>LLwblAAUTyOoUlSL&nD8p_TMH z-U4xNBi;T{SMDclN%PR}TAA5bZ+@9b`?TFhYm}i{!*HMNGT!j}x6LZ1OwDej(N8y- z%1Qgnm3kRoU~EQdB3?kL;Ar|V3$9{Ht4zJoaU!2#>~gH9D@heGIxxD$pC=*k);Ie% zI8%%k-!@204bMi4{uoD1cPFT*qeR;MLNbmNziQ3%xx{?zkt*4_`ZdtOW$kcxH|hLO zOj6qkKu24I7t8}9! zu*@Rh6^Um=f!CUw>#?CU2gaTt5$2)-H&pRWoO6(_b#L|M-27Ws-0f9T@#&kk>9#6L zTa+I%NHTHrS6^kbAc%`xSeT45`m>PzF?>3sZl=Nmx$NAY52>u3nZdNpwk>+~Wb@XGZDWj{K&0 zFK@Wq$g!4x@V-o($-#oejWt^5qN>SS22+bm-rIPUNK=hh^=8U7cK=b0O=$P2Z33zQ zF7X54gjvK-+=J&DJceLHsKU=TWX|`_cRLr);_AY(ibn#L`Rn`t2Im?|`OPDS)&CvL zTcHan!(HBh6B_6*xD{FbPZ^%DGWFMHk&Kn4S5sQ5o(5A`jMw5&VCcQ(GF964i4>bj zkE8Y*`ZcC3l1hn*C9x%>F_7?<5bhY4YiLX zNV~ojc(^KU{?;?K=h!FQ#h|4IbhQWWL`5~D35;so(a)h-4XMHf30Ap8U}4(c){Flc zteCL!gw1Y?|56!BrxK86JqA$Mvc*>6uBe)RS6 z9gE%I9&UFd+XIJ(EH8nBTi~;!y*-~PPidWphYs6XY!iq{vuwT;W799zWw*8(ol7b| z8I-zTU`$;!F%{focN zC>~zcp|O#~w!fH;w)Y`v+#^X27GMAC}Orho?-%%?-JBSHp5Nt($*Lv&4jcXtQ<+k!!)bJZ8?-WYK| zXhHV^Ss_nab@}k{-%k?w_)bQ*Mpu3Y2scn-HKD8?!X0=v7|gR;c*kOobBW-m3Q|XK}uK^Lhub*UlC3WmTT8&h8t=du5I4pSnukcC3%)|9`Wa z?0@L3^mamlsez&KV*=e99Y6kqQXQd7m*YSy=T_mZ*)HxE7=A46$5hn7p)P?l?3#Gy%KUJ$`X=?NTprxi$fXt|cT$mM1SS4+i6S!&qs`>RCz6`^zPO z7{7Nb@m&`G^z0d!jRD_}uVOMN%ks}%pV{0Epd*aWK``0RWzXxgx4&^26fHI3TFAoB%H-HdUCNQj zPn$RD0jli^oNc{ZSADgaQl8ggiE{1lA0U6lMmqQYz-g+QFxeZjjNszX43H*ILsMti z1j~zQ%Uc77!Mh7RfUw}^AuLOD|6Er(I2cbS`1k?`pf9vEImX69Rj1r#ica!Cl7)mA z5-J??E1dAo6`Mk2a6(mS&$Yj6tJK~d?WnXE1%(i|tIlnn3~v#hEWhB?2)!#dR!CnV z2>a7$fI7qMme_6Vf6i3O4-dsBO#Uqvn`Jjo??b+KkD}O0cUKWIHyn`Wn=smhQ&@_O z^dqf8pL@|pm)V7VEMNcOcFEMnSblOo&c`vF{*UkxNp_Ja4(VPx-^46}mp=Nw>Ru*V z2rr*%Ryd`srJSpbzsu(AaiuUYsc>vmmNnyhckwa}QVD;98Wfl-odKAFS6*u{l@q1< zQ!L+rQ}2Mhjzya3#Gsno@?2{>hlnCB9MTdl$HC7!~1ELO6+JHe`^_*PPe{P5|m_o=;s zJKWH9x#7_<*Sj-ZOfKdAF9QuEsbNOY>-+vW{Gh9CxJI&nDVq^4pEn$RdAiuFz;|t3 zpg{6&MXikvSKNCot*QDnJ_NrIQDF5OCaH`2UZYDMyPs<^kp{yS=H<2Zxzo>qN9D(Mg&gRi*d^6DPk`15x|1shE|;TO zmdzxa|09dXQQAXt!;tD{D+-rYYxW;8KSU@hrXvSw3V5U4s5aJostRWYiy`enyb&d_ z)oW?)F$tFFAm04Dtei*N$K;)%(OSH;?Rw8}5_pgf_kSAGy0o?R1%`%(qW(S6g3ALh zqE;zLUpAe3*3DBcNeu&aB&L(!Z}(|1{$x1tB@4CyDpR^6Q<=umv8)Bx#fpN|-7t~l zd}qJ(YUA%2+pF4!R;5HHHDE4Nnp^A{3p?8TH_(%mF(9k&=z-rk-cT#mFm{6+B;XoZ)ux&K`wlUxpHZmW3exZeYmuvmP~lG4X-jo*;K}VoNdn3&K7PrI#@=p*~61nBKOCU&7z5yc@Uo8_0x5ND(wm zvRR=JUEt2B=hWP`G((!vV;5eq)H7Z**{`j8ZQ0H*mtDP7ra*&qDIo+;Uy3&ZjoKQZ zE-gt}xZl)avr(TU`u*S4b)wI9X+%ypLtp&Es!FP=CP~%))L6^||%rn^{a=%Sr{ zpX|pyhRSKu^o`9xJ;+`b=C^XGAY({YJ|aTTv0SI%JvQQkvk?@cdz#|gE7}gnv`7@< z42sR-7(dE+C0^~&Q)vcN8yFHKrHB*4u)?Z48L0JjbDE2CbsHHU40@5`#K1GqGQ1l= z72XP@&G~WCq^j<(1LuuIY`33uQ9o|r(8nk}%3o#7_4NdnyN=CRy#NRJl|7^0jn>w> zb6m+HXa>c>!U-fvD)wnC1#NAU9<#K$BXvnSE(pOjv^x3kfVbb^9C2zIzkAw**|=G_ zyzU<+&l7sIthsV+aCGd={yevFp4lHm^=SKzV}12*??FLKNZYIjYy?{qbnK18Ki$F zC&3NzazuMR;u8)U*tO#6h*n*!z6~ZQh*^~?5_cJr8KWWO1ay-`BB^2LXxm?nFoaH& zyoW%X9Mpy7!|gzjbh(F@tB&IYN0Lgko^hV~<>l!r20(kt^zCs^!~~0;VlY7%^d!b8 zl9#s)5gLl%X6ONdrBOF|xAoQ0xfWB$z`6vI;X8x4)>VWyh;4&3y3Fxa9m40!e*@%!KP-3nMz{Kxsx4sH zj<^DyS9S8iZqORj8h)6I^iw`odHxi_ z&ZVe2|9vF@cX4tEbD%JjxE? z5$C25u~^4`DARJS&{|9T*GBL2aQhyFDIaRql`+F3vBz)3f_)B!vR;ys^|jfjdRsx6 zkY!~_*IL+J!l1_9LgL@Yc>MQ+ z+aH7I`40d?wO{*^D&L2)aKbB7777zXN>i8k$c;-T?ayJBtk9dQ9qQs;?aYHUwa7`~ zo3xah>_>fNKTvjFrseL+aY8j!&fm(bo;GY&pk^0L9yMg4%`R(88XL~SS(~n^S6h^z z;^+c22N8)YvWW@VRs{<$NVX_rIw3(~Z%J4;6I@l@mDSa4^m>&PE1bubdYMosKDD+e$XkB}B@^5AL-~X$L4GfY9hsVc30&Y`f6LOD5 zX*y{PieFnYP>Sxk!20k*#{A)#Q`*-8OxL`dxs!@H9Dg-(FvGf+YTlWxXK7hL0lU&D zCvMd`_dgTyhC#Hm$Ljp%I<@*(;pfF2t32ou`RFt(8Z}G~NC)&Pl z@#zxis;K~_cfoNi9#n9xzKj1+Y4`F>~8Kf6<#_;&hvJ;?6*@{sdXbuU715ZBl` z=G^Q$tM;Cysp?|*z#0$(egTAme_^W3|Gj0ceL!2aJCHf9V`Rk2W&lw~?N8WoUDz?u z3>0&G2;i^KhrA(La$(dK|F)2;Ocb`LU^3Z5r`b(Q!STC)|LltUGpySXJ|l$!lRd3Fl9_2ZMA$#UJSF- zK3&Vm3q4%Ru;*AnAS|BE{a-$gi2=qY!v~V?zdecn7e#xcDU5za*B(YdbExGf1eSiJ~Z14B;d7kGy zPPiO4{##mr^AS4{xO*p?ucJluTpmnD9qu;e1ck<2a7~r$W!?dQx|^2yv~jGJUtN&g z`_Vp7!=ke>?_O(m1>O{~)^pG|ksOvSVo%3wFC8btc|G>MxR*0-ndRJ;;o)SYW+%+QDH1UwmpD$#XJFX+1;qiHF>*C0eRA3MNZX^1|QOasnhfSN8ZdK~s*Dts&d#q}~^*RT6DD zB!8(nlsh(|I$q*u>@VuF#bE{_;1&CUHdyq#K1RQ+z)ZDYXNiOzHYHwpfIv33sgEIlnsaw_ewpg1tYzzRwP;R^_I%xbU8A`=QJD%fi>qF3z-o&mqNgt9@G6 zK+@k3)wjR3w7ObOy$bWxlD%=3E|q;oW`BNYGWluAPJp`br-|sxY4R0sWKUIqGJ+~q zxsddbO@zN7Ej{C6n^Ga^O~Bm7KgDpeff>439v{|GRE*}BF8olKoZ;{iTrkm#mm!r{ zVW%&Dqli`X$yvW_tKPXxZ5$sv=;3H$@v*VX>M5i1^Tq!~^9_3!)M;PS(SB)inH*_Y zJm0+IV2McWnz(8CpFD8pK&ux|_JJR7@ddhKSuvNT8pBB+tT*TZoqW2!7F_ui7Y>NG zhH$S514Hnw`SpOr1Kd0SJ+i!_AReDZ578vI-q5+9C>N3o#s*%;{~B^>V!Pn_p%}k- z{GJMklt3NaqtV7F6iUCuC3|AO$H5A%Lg#Q$2Pt>H(l1dv{Ibla&$kwL1kMOKI3{oX z`v-0?ZxP`2GAJ0O$#})w>`P{9#EYsWx%6w$Po;;B%JbVp+#$!=e{&_o;F- zS#bzV<%$Z~)vUtS`E6A77BO;6)EmT5#hZHk6;)LcRF{JJqLCjjy#Xcw(lFHu4_f2ka`niKgz_iczV8-vPFZHk{d={1g#+As=%DBIc z_PmlGLV}xxPFsIvwz}w0bLpN3GxCDCqll66K-snB?fjqEzp`)gAoX9!`bUvH`Sqbm zM(J9jPr8-(BwCBs% z%0&>nD{Et8%c2sbQh{vi_X%5+#psi%<5hnbRzO3c_3clcY*WUbvS4%a?+q#hLENs| z!tO-f(!;vQ(pQd_#>sr*yUM`TjBM03zM=MYak2Dqbx{7;Z-pF0S&q~%cw5w$%|yQ! zlUIb~3N9*hUxKtK!661?s)krgVfss#AavewXHAROzF@1~3Nul!CEqHz&fG*_cjahX z%rPGx(>j1udqtG7bg-4SssHHyiHUJn=jDn&?lL;;UII@~X0Yy7{LJTbUE_?}PAS#X z?V7hBtX`429B?Z5)&zR9b@iU-0F^*ZD)k{Q^|7OMPg&4d*E$K)b zd{*jaLC07Q0%7>$E4CQ8GL;xRSW9|Dh8JTb)@LOla_<0G26>0G@ZTq7@qF7l?;vx8*^s|O@sDMo^Lza z=U{I~@Njc9kXG}w@@-aYrb2d{JYpY-G8)9Qh7XIa+4h<1z&|DA=&_V>!>^ZFoAZ|Et*2*s@_e#L|i|;`%RI^!t#UM=GC)^k0pc2MvA@-DFZRs@!$ z(c0Q}4(4%oD2x8&WIZAUqM&dOplq8z?qPkq3JZ^%+eAyVnlHaZZ+2}q_I(^~z}%tz z?OHst2cbSq{RmXu!I8tQrCutj{ycZz->dbz14y#EH2r2Vr4zUuEEhu*aXh)MQ3d5= zdtf8Qx1?A`fW$Htz$Dbqj&g!`4G7Go+SCTwRDW?BvZi!!*JsncLgik=gXlS06{l{D z*D{{`Oe(zS(TkL?K7XRiC;4ml=wPRB|Aphxn?7)=KiKd`%aO-MY{}h}{Yl#zW?}5O zVVu~FF>NffZM=`1UN8XFg(9$05$Sp&@vW5oR#YbwT@=)wDB@y*;%nd9j!2`8%Rt1* z_gN9dI;8ve8^oa zfAL%Em~Sa|pA3B*Yd0N}{TdrNW7!=IM&;jMAXZnn2Z<@kX@!YZepr7pCH+@ot0RJ9 zZkI1A-P$gQX?Jt>=0!TR3o96`79i3Y+zrPQpijygp+0fI1p3M74A07T3DrMGX%&gv$YiV%ki#}QnA+=2If$oR3y|oU4 z{>>UI8Kku`gV{R`Qb^B#4zky_dzNesO>ovslF0Q=r(ddtUH2Nk7(Yuyj1az1dk-Qy zi?ojw__EUc9p(iVy1_sjtP=&s;$aDTi4>IFS=pzjGLr|&q`y{QKm9f=Sdpc&kq2S} zty1)Y7a8bEKpF2jtdpnohp!e#=;tuojg(5GEE+IVZu{E zD1*T#Fc!!DIe*VK+$u-(=iil6d?=J;Dzwx7a!Oz;A&jcLd9)f}pjcv3LD{Z`xl{Og zYn0#fxB?6PE%#EClp&buD?PcBhw=<12?zC*qGn04i)_g=h=S=X+m>$cn_?VQqL~VO zt`^jMbO~+^^I`4waCSof!zKclm+er|(I?uIFPbI~w&WL&w%NI+)?W9)_PL0#A3xoL`&FpUAHeUW)E{K9BrWJqR|(nFr@R zu8~w8NLVTQ8$8m%Pxh=An`#!crGK@F1j(9@cb3(M!?cR0r@?*1uWd3M_$B~Z_4ZQZ zgvXoO2>Lsc8q}`VF%5?#Ma*ZFiE?VunPmm7$OxW_Olg2Q%LO`96V6Yy2P?-Aqy9C1 z`hMs|-BGbQF+0itvY<7e3>v>BaYG0z{uIMWdtwrML2LNYs_~YCe70>07S!3y*uK0w z46>>f7jXO>d+gBHN+;mmojq1a^at<%n8UTpX)5ea0?=BO0=Wkf+BT?2C|uoG6mnm! zu-vnDZ9<_M@iY(mKms+l1g4+zFkeOyL%ra+&g22Y;$Q(MmmaPu(kT|+OK>^U+TRQo?dz0PeN}J{7sH>%o-x9AF7F$nzc*5BbtaH)BhmYz-K7S13Ts ztn*ROv?jN@8kIM6C{~#0TZI{0>8DM&=Zl*ULU%$52jkh9Qn~XF4m@N2UA}mFnMRwB zM#ntFhml!rE19HZn=`p==EboFXI{N__oHX)iO~2f&27BZ!uPTCaiGd%hRV5k(RWaU zHrky5ZapnLeeS?mo^J6>$&pf3)Xjn4mUmHKd7gCQ32iAKY3Yo;#8Ba;Fn!@{-}gw7_Fdoz5~OqCRK zO)dAYb(SqnAJlt?oN>I=P6Q&b{uhEMuoHD^(Qr8khWFOXMbf^72;I`ZbKbhil|jYj ziYqb$wvI-1hFppSCTeZq{$sFp9^7k4S}H&kkiFB_zyVbL#Q$Wi??~t9!bwumi%qT& zgHcfOx9Lw^=nlt+Z2S&BUaNRHnQOGAO+Gw4EV__9(j9eBPT6WdLhQBwp%%Dx)%9T}wx2r6@rf{Ts)71NJGz{z6;` zsFw_MNBO4yURn~F{M`uN&GJN}lqVfNvpd>e?z*Ks%b*(ssJCjPC;W;1)+7Rf=Tn$D zRSSkwri_m9xfbB8t1;TzTG6|^zau(uJeJxn#VUFey>pwbgfB;aLInZZj_Jc7zKs1d zKTikonMf8M?+9AlZgA`~(SX58Ar;Hv=}lBkkL$10rAQkv5Kw>ih!g0wK?hI*Q~K^{J1WAMhkD{~j90@y*IxaZr(xD%mbj-}B18 z)GN1KHH5hhaTjXPHYv%qr~8!}yTgua9c=_n9M;);-Ryo!;A(rGiX`O1^W62dWE$Umg&fDma@JEKPLD|pGD{Jfy z(;qP_N$#T%&VE>4toFo5JJ ziO7?!L2C37rSAZIYk4aA&vDUZ`ix{Mj%S$fFZ~y@yG{x9CC%E7WVE$fF|ni48b>6V zD|v^N?hp}5`p#pBaJF;SAM#>ltn-=eyXN@+x9{|b=w7NiN7leW$J^ABgx&F2-z~zl za%%2!#}647OKLI%kv)if#K9)9=cyQPZ_ga~+WsfwpqICeT`yGJ09pr+%EVH3SzcYV zK{l=DdL-SGS|g*}u6GBT%~8$=Og@g~bN(4>=b1+5@xa7ebd)B1t*NW`G`Fi4|1FeE zW)neL!vG?MmeiQ!fPZO4pJ}{cG(yv|tcT1p{Zw0;EgbZ?SbI7cDRpnKbsA>peC6zW zHwJO1rqCoiy{u8Zf)cvmjbVGLV*eZhu676vqyW5w%kwTVTDfr69dzvB6=-{r5n*9J z2BH@2lw!+_OKDd5JCF=C>{Jwm2Ev?40DDd@t-X4LUhG>JfZk zPux(oLv08hZw*fAoB+9^He`JEY6ySg;F5U!*6ck;x>x`8+dR~MsU5X%kuM8vmiUv8 z=yZ^mua}a#!XhH^`gc$VvVkZRz1zm@t)sOBuUn5z(a_%31G(p!>4-h`Gp!Wp^ z7Kw1|}e?Z_gJJ z8xDH=ilzvfdezy5tx-q&vu%sfc81y|>DVlF&z?GKYafl5^5a}jbhk9yA5weJ@fJRl z8emAD`)f}GC$u0*F^-QD$Kn`^Ad#fW4jNvgD-zw#k?GBEZYX(^QX48)u55zm_XL2x ztmpY1Y!iNMq3OYKUpICpJJhG9@Obq}$?`lH1xhk6(FdA)8 z$G*SihmSfx+qB(t3pMnpguhM+iv;}b->(u74S(58C%4F3 zD;V$SJu3Or>+xZEFK)OOL%8wDIqW6Jih&ES5IcSpD(_aaH8jUO0}>Yg$S1jMcy)iu zQ3015)A;RKDTkK?t1i?@UL`73Yb;faF-z9e_3>rY6W*bBMjtOTNrWu%LSu!`n39oQ z^OKTCf0zBE54wczPTiZl?bDRQ{zTgQw0TOI1~9Se{PO!J!hrfkq%If}Dfq!Ra&HCm}W6JAj{Thvuz%Z zwblE8^YJZTI%P&>b58RzbhM)ZlZr1(|E34kT~@b6)rR)B)%njrgyUZ!^dQw3m?gP; z{}i)Rol9&n+Oa1pi5_fUi^o#kdqUC2+iERcU0q=0l*ELmgF)C@E|&t1u0n!B`el>@ z(jZ+<)Ei?B4V1oHI@)15wRp!DAJ1MyOJ;X1BHPqlfc-$u_!^Dn=~fXb0li2Dx03I) zwL6Jl%ENbNyL3*Pb$Yt4S$i5yDjMr~6qJK0*m+C)r=+9dr)M|zbk+@?LM6%GJPF}# zq_uy3%{Dn|lNy+3>C2U?qA*#TH|{s_`jlQYFDQC}iO(xp*hu0QVz{0dd zu+TG5pQOg#|5CTpCdFhn|9&apVN8r;O%@{2IdAo27^wv`^MO2mi2do<4o;=jfQR2~ z(Vy1TuAtl2fJY1REwWeD7>!0RKc+2cQvSCo{9fMHyo0YTcbwb<#XR@eKVGfi+eoP_ zG|+#RDGYSMfV!8Iv|5Gv(%et^%y0p@7A=`@UX+>5uUok@a=W#{4#L-M3Y;*Hpm|v3 zI;{gS_japzF)oL@6}hjsR&GVfUEB&EEO%UDEU}FN9oIKC@kVP$uJ=u+Y_tFMLY~Rn zss5PFa`XMNEhe*w>;7f&V`aPFwjw`XHIBzF zq|m;-3IFKB!Dr;L4slhz1&vi0l8(_PzD8K-g-k|3pZQ%)@Aqh$+Z!t0(-2T@<};z>+T5rNi$AF*Zo0q z@+*&CkKbMFlv%%y>KFND7+q(F2!5wkDa*SRh-#RnkNG6h{LcTZ+o?^Y`TDhA9%TDU zw3d4bqx6lsY@<5Gz)9p&sQK+UiGh66T93P42d{)l9{4tU&(x6`qlOZ9kkaaaXqr_~ z)1|^@db*g6i4QQ`b)R!-8o40R<>%g6&%A&&J_A9gYGb0C&H9V?Lx*_KhM25Y@8n-v z6WY7;4_wn6sV3fC_1*>e-@!es?*d4z3mpis5eQS2l(q4D-v^ zA_QDU0oSo9>g#@S4Qxt#tLVoDRIb|huy3nlNBWCUUG|V+Af=i z15()f`;jib8|Wkf>PUHFQFG{VP){tJ_}~F5v};3ainZ~lG9>>I0FBP^NVT;4slMiT z1hfOJ)ww4aP82^(YW4}r*@KKEVIw9)GwI$`V|7Z|!hthvr5C7A?0=f4E`m@C&EA)}Hkn39O?-kykXIUK#gly{_ zOq#YEwEwd$wP@&UWC=N7q@?!V@mUw^vQ;@&6vuMo<@Y0}|Mao44KaykD2QCI9mjq_ zjSDUW`;i4qx#L;ry*jMK?H?iBN0BwLl!J*)od2um*AIX1Eg6&t_Jd9Ny+yl(;%OcZ zU&;Sje;d$JlE>dKfc}^Dn!)Ip(1$w|$CHVJ9U((^|J3HzEc#l`L4CiSKZGk8bOwsq zh_t4I!ok8ef3~^BlX2QkvWuIEz81%H=;Jqx8vQ*h#f3%`Y3;@3}kE;f6Tdk=H9 z_U6=ErlX#}+MV;rf38MvK`Q{OSz6PK59>!h3CC=i^w`8Ak1P%j-gl$p5VxXJ3?WSL zOWEhvINfUXa^T3zjGs#zAQBELOp`*ttbav2Fbx9k}V}R`7fyB zXMQ__;zM$g2t9fHtp`$nwA1vqBriEjv67at@Nk{e!jKn-GRfpTY$aG1E&s(IyzFj@ zA_@5YXJ`djWkDSZaaIkCE%ysLA%VtqwYrnuf{9D(0Pw{?y#ei*8C$uL%5fbkyn;Y% zw!QW)2nV52?{LT3^8aEImO_b1F<-Ykm!>%`@51#GQ=1tdf~ z*VY1njthWdIQf=Nv?rK@+Vzosaj;pE$)!NDjUboJgB2Z z^Lw%<(;_<@P?9nTmKTaP>g8ct0tqPMP9Z1^`L*BspD`)^Rme(E59?qyYtsjirWO3~#t}L(C=hpoeoYks#3Wn3eOvkidMrP8^f5W58d6lPGiZ8!I zPFwBkc5ySeO8%otDv;{B(W7XN%)p{F5$=wn1`MYwTXJ_8!WAl35#t|WP0q$5dp`Ti zV7!VGdSC4vef%i=wsb{4rY35^HlBS3Z*Mex(pPc7a?t(v!*0G76lG?&+)ohd`E%8Q zsat+*J=aLdx~Amaxz#fRtfOLdNTY3pe1eUYsAClKn3cZ@jQ*qZc^gas<)(2D6LF;= zcEA4OxpyvJ@CZjA$pq$vN=7{;IELj4-2G$5M+>nl4`GYSy(m@VzAkXV>N$A{I$n1F zZvFbqhdTUGzpO7sF5oRA$iyYC?f(5sT+?C8r8yoh_l40`Cte<{=|C5?*N_nrJ#^&D(0;)_w}0v@$Wl)j0<>=~Fi4|;q!-;ZtaN|u-P?yV)ItT#V+jc_1 z;ALnXZengy6?BdNtO&!UYOEDI`K#8{pn}(wksD%^phrap^gnl2nj(|`A|D)Fd@Gk5 z2LTipUpr?2a~oxKI4giE6rV|@1qd|Ukkog^q}@OHGf0hTdT0rfTaRN^nAlxXJu(X} zdo0{n(~AquH5@U7f^D46xFGWic&jGo(7Vz zETk~1ksRoQMS@usbiCYi>_OSRM4Kp1qWKd66pXk2Ua?=XT~K{Rpj5Ve?o*bJPgv=B zT)AeZRoV)Ms1LdTi3h|%mwyLq31WL7?$&|8^+?kaeYXUhFzKLJ*OnmlPGfjS=fukw zJ}^;!kN?lo@3n}IRb`nouaReRw@Y3iFof@s%_P5CY{w60{^UcyHk7U%N+6LgGrq=! zdzdVG!&P&0H?0k-qvA!m+6>AvMlO}C`qyM__E_^3!{zw)#FW*jw>)FfI}P3DwtlF z7f5ujl+o~{Qy}~t$WDGm#fr;_ExGf=<%SH5#SqcU&!(8H)g1rmKMD^_bhKiiA4tX3 zo_XT#atdiV*tk!Ni_FREKhw;vbDsI7wFJy%%Ko@vZQ>EOH>e4(XCbeUy>=@;?AKox?}R|n2Gcwo=aNjzIQz$8 z5X=lxoy$aK!CIkQ!F9yo-vo(>oryW2o>Yv5Lq*hGi?PGa!tb<(5oeT`Vdu^g$EYWp zp@AjGb2=xUjxk3YZHtlU|8+sq0A*$`x9W?GkQ&-%QBsNJqM9*Kz@%vhFJpR<4K#JB zOHy%6BN;EAE9LSzpJOUTcBHU#C>Yi`)qXLB2Dr1Em3A%JniUtCvZ)hSex~4k#I9ee zsx8U3){oC*Wq%Z6`uqoijVCCAdJ6U86%g)(~(u;}1mPN8)h#FJ*GObii@WpXV@qGj9EHYF7vMaMMH-T%^>U*ViC3Izwxu@y~YbSm1g$+wGL z3xm*^^YT7;u5Pu&w*?Ppt4BVP zY5MQ2g=f{tMWK^^t%V0&UGvA^YOfVf0;MU)W6sZKhm#TV-Uc|FyXpmS&h2OE6^xrZ zITz;VrFK=3!{O<%WeTB_VxU${mFZc<8#7M6k2XfQiDj)cbGiHu`W7{{)2_d?Zt1?j z50I;ThUmt}R<7neWjfWal-KmWLJX4OXT13NTerRYycdk(^PlpcZNSatG+`ll*N zD4jj^7{U&7kjo8Er!GWAfz7f5Dfsf)7%Rhbmp>xiS@kQ*4x{P=W5F=#xONMv3>EB; zXDpx3ylt@Ex4Xyud;gkz z(Ru<`97-`hp$3+06t*>X`)@}z9lhMT4ac@qR&K5+%H7_vyjE8niQ+Mes`766V$b!8 zZXBIypCfJ!k%K?!2w?1&vSuNUVrDqUXn60gm|txVVI==n|K14N<-uLGm_14$4`Xby zYN2n{Y`aT{=w%m(CCeVLf6#s4$Iov7&;pn=N?cdyA&D8e_fZM6%E5Ndu3!Fv5h0(l zqF(vjQimwz642BHXNO5hz7VPB$}kXZj%RKnAfz7$Cg03p;E9a|lFvT(<8)(as+Zmp zuam>_Duls|MIy=!1awjvaItc-kYv`_^UkrI2CKQUC6L%4Byr3g`x`vtr}lNJkL&BW zq1PJ;1?Gf|70eswjQ12a$WR{RhIQ+QMlY#6aHhg&0c|vuf#20GKWr`u=<6=QtB3Qq+^L66>Y6?3m0MdzaBu&TZlqqJVCV_&o@>&DJOxp(9iE^ln5`aQ|kweEl0qCy!b zsJbv}st)xdJ?LBXZ@a3rySuvHOg|6JMF6pMXemM?Eur@PfdM&_oX0!u*X8X=6k6RU7>W+L^vJ1elR-&l9394uqFXTNu0D} zkBUR!G8ZgrUPuac?CWx908~b$6^leIVPV?yim$45ie=1UgwE%G)J-C~Z7Dvo*%ze5 z%NZb(wy>EspAt_F?UG3te?e#q1R&#Nsg|5C$zuQ?inx;>J|8YSo zE!q3o*=6gR{k!G_D5bJls z3Lt7HKeUs(32MM(f30x|&Ro6?sX1-Q$Zl=kIxFt;Tb_x*_pVCe4eOI7+j&(gRdY1rt9$eY+dJEsbOVHCP~J8>ttzjj8?f&^g+^^ zX5YaorR&~~#Pl}FG4dphZg1>lH}~${Q)sc$dJ6CB^cN)omQsqAvCjO_I|qGT>3Fap zCOMBwzR-O$5NE)&omSP|9YY_hCl^265~~i%fFf2$HMK8~T&N%0uYb1jqAuW!oEK77_5@678; zSC&<+P4M~I7eHa-K!CctAT3}&m8xZQVmn;6h5Aa4>?LNvBV`K??>(B{bCR+7I7@d0 zUH7XN>mkSG$6oz@eY(1sM>`k;S#x*Kt zA-?{-NcV9Xlp?QA)PVb}mf%6XP$JkPW^FsHM*m+?J8bCA}#R8ypn?xMF zFDa|Qs2-+^Q#O8_tOwUixp#MYXkfW0K`i%6>xS0w%Y{BQTm*II=8(p{>IVg9*1}Z$ zBQzd+i|NR$+fSQc&nbKEewe|SmpLW+san`>^y9i7cKV+iZ&B8*#ys(Lxsu%HEkTNe z6S=XvDVda&M~*Eu`ws`+>$!??NrmrY%H_Vj;nv`O<-V_H{UJq@=dIWN>jC?Lmb3Pb zPKoj%KC`S^xLvS!cWmL#9n7ji8WDu>TAtN zNQ$uuc^h#|&#WM#Ff&TAlt|Q@-WN8|m_YE*jX+#E>IXuYA^7aM&Pt?wp?m#c?lY4& z-0WEuxB*-{yH;`*B3?R{{W(qcvhBArd0QR(iqBcmmD9+ zsLGfqyUfE9_&u*Q1De!&#us1BjN>QAd2Q(47l)4tPm@hwsj%L6j-(6kts7A99^xxy3@IB zd1u%)iR%9q7`oDH%EQ`WP?~m#k-{7bFRKffhmQ$f<7|8+ALq|0}RbbXL?g z))-a~0oii--jja^REM6375ny*so~*UtJU{!bG-*n$%BIfZ%|dExn8xgqIDO?Lsy9r z&QlppLX5_K{@3$gKl@4%dcwS@cxT}aRy+P%x9E+)bWE0aDxrPQp3>7`nLbrvo6la` zToAe1gO5`)#yhICy_~=6*!d|zeE03?h&jlb*)xhn)0g9cg8cT9vyS}V#~#bgy6K1L z&HUMPe|@rdy6&A|T{c?Q+EHuak8sT!u3poB#5pLTVT(+?_bGRMJoRj5+!%7xh{!M-#`htckLRuE|qtUBv?XZ_Icn!_E8Tl7Qm*>P+V^zrwf zKqAk#9smzmf+ir+KYnAB5Lh2N=E3MoS^}3x>VwXP!UOzpa z&;r|HWM@gneU1hbEgSR>O)Je+@_mN;;rCNVhunj2<&k}jyEoP~mhge1>iCBps<6+f0bTc z_-gGCQHmwTuH2Org9K0dsE5CjO1diL zz~z@=<0NJ)8uzFEiGec&b-y~i{jAspXN8N>sZ4C_D_PcXQ^<68t8fGXn5Yw}-D%}V`+^XY3jA86Bqp9j zLZ(2zF3w{>iN^S@QVe(j6`8MVk|Vbk7#6qfCH1uePp zRAQA+tIM6KVZ3sx;$(KsKAoW7Szn?J%23jmRK{uKenUJrNVB+j=OUT?^^bz@TB$b- zKR)m2>+93_8b)=w^QJACNB2g2e7hFOqO+Go{Wl&E8EsOfj{m_k?0o;ABy|qdN{F-P zk@JE|DsJD1f>bU3i%?6L2DYQwrB(6lanL^=tSNw=3=OrT7IS;J$NbFGp1Q&@L33jz z3>d6V6skG)9PG~%sb`bygbgI8mOqEs*NMIND#jN95|L*rZ<5(w(}Ei8TM^cT?ec`y z)VMyzwMOcKOA3bj#!WpFGEfOLPRHVGDYZM3R~v47U~+ob=-l{kdPrbqY+V?XZbDK; zgbKdp#-*HxM3=O*2)^#TEqRUI>b3mZ8mpAByNyANd{_MxqX)CkF*1a}hCd?(ZDb8H zD$k8err>gNjmfbcllurEv-`X9Thn7wX9zA`hdH^19p!RYp2o_sY4`oiWoNCQ z;n+?IvtjKm+M`tUoZHOIUMD|Exh}(Fr0G)oqN*sw5CA}eK7tRxa{XRwMW%H4gM;b1GdAs)+{ z0sr}4=gwmepOsc$%Y!TzFywyA~X97RB)db8nQ=SG%4bDsSjzsq-@}-b^@&1a1sT71ys-#y+};I=-VE2bO_2T` zpJrb*)w3DPb)=gEx_NI_L03J9OM+pwx2tPRvhQT8m%tb4m8M^oWhCqF?@PoQ<9W=x zK4XfCo58K-tXdN3ac{l%CPyV9QcF-i7bU6R+_|iE^slXNjOKV2|6%;#f8cHYvpYwv zV`5|3hJYJw?x58Kd1vjK$}D2-YC`^B*NO+8{G1%Vc~Fv3U4DYGDLaQChTyd(Xsn>6 zMWhs6pW{}yaWFv&rS_g|cFji%nRg;5W7S!KXf<>Hb=H-0OFlof(nZgOQ)h>22(}v1 zb9#DuyAc9exYDHBa=ZrF@2uMT_dIjpZT~W@e<8%Bk${lRrEKr5*{Cp2dgmzL#Sg(= zd;9qVzkKoAJI-8JWv>Y?DH%MGVdxH_x9#uI`MywBDEoXghGBC}WuK8}Z@BN?ZT7ln z!cVn-jj@OcKg~V5;J(#tvHkRx-I5VUQ?zsW$e6^=fKjIV$6jrLsGdDv-fD_M=JV@P&RaX-IOH;^}l8AC6LlMKyek| zCS2=^Twp^7T?f+)Q~{10Wcv&*NnCkw`}Wdxs+Rg?*t#~@m>3@9@5Cfe?c(nmmirex zXr7m7axeIqq;vSlC{QH%!|%PlpdLOY98adM-gvFtQzB)1X~~@UZ!!8{Luc>uNxPM{ z;4?s*<|mI_T`sz;J0G34Jk!FdNcZk&{z$&>WY=9#0YV3X+}68pk6t>d7BCj(>HQkc;SrJ5m|qX+;pKD zbpgD)B6<#&c4ESI|E`5;KS>VV)I1wl0@8shmZ}8)vu(J!qhp{7pUdRiw~R@wgFtd^RgC>!H^14s7c-=gC{)e$L-bO8LESfkSFTVUK5QovsRJ7j=h3rUrnphfmLL@#!8^E!?` zdj5=y({`AHo?&U~^#|Kg<%{}O&!#VYXQ&;-|7qYVz>lDc8yN`hbL3k0PYL+3{5NdZ zV3B5=eJ$@|6!(rwU0%6-J9J?iA8!{&4|wJr{dtq0pZZ81>5%L4=Ci1ZNCr{P$Q;tPD}f3wwm(U}t>tx)cgm@#+^&$^H@_(6-pqIk(6#rIGub zPRgj5ytcO16yU!4pt)ntH)eZ`Dqn$LoIuN%p}Mbsvc>pfpb*8McFn)yx7-cgnCSG% zMng=RxT~aM@y}9OYZ**bYG>Y;ePRj3su2`(UbD!gn&^G%@ST4o%X)*MOn=G*4n@%M z^wZy~GL__jJ>M%mGBZbVItjCTxN&C;!3pXVU6SWFP4{msZdtzahcJJ~LV4uYsiWN| zUN+B|HObtU9d%*!!LK)z(LC&l6ikqN6^398%#=4E$x3MmL`iv|KuQ6jg=ym)8&nU9 z72odXuqNDxRu1W_B;114&t{k%molZnOwZf0SB`hM)z7Y5XRz-iJx1eX_M|L=n>rjm z>$NO`HZQvnX{W@o0R5rCrEfht#U^*31-9RTmtv(VEP|LAw%-iAr^bRstEcPg*RKjR zttoRu0-tYqx%a1M&Ay~jo0v_lsevN2Hy=Ds{rz4bOlf)B7R{S@U9WpbaRMqMmV|q( zfEGW+@&;3Ov7DKsXNSfQm=h&0U-s+-_;2q^p4K@E`@0ZoU-M2^&YkBaM;SH3y}^d^ zfaF_OD+B6`39E=8P#yuSL@CwU#P&u@>UIUUs;a7~8*Y*I0 z8xUGRK)L}f%Q0)~pcSW9*h>i36gpGkN8zpbmd=|`kf zrgw=g=*KNi4mKut-(n#1(`Sv+w7`6@tUfPzwuAbme$Fp+Fo3Jde$iJAL{1gJd|;DTfEg#>;JaSS?v7U9#mN41t0Sr zd=|nXNsuFKMBL^s8?qu}yT$UCF?XIJ{NwSVTFc@pE}d3vydR`em~74{B=w1hq9sN^-oA`@Z|d*LL|)S-fdLFL^!H_LCCAC;aZm)%Qlw$QH= zysg8HYYHO;i%i-jdXn4nC^SWEO?czv6DWH2?vv@PEX2sT%s5^-r>NsCnkhbsVyXN$y(8G2guY#-eHW;{*Y)^tAQUVLa=^5xz zX;tGmTh}{KHYXJvA5cM8@3;k9DB-?-{ptln8v?+-|8I znNG&+e55RfS#EgmH-4Ds4Yqah?*PB+s74Y_)Ku1ed_;q|?dwk(B~6FvY4Q6-KRk|w zrhkZj*j8+|2owvfUmt4bK05^GoN5+z*AS>J=Ucv)$lr@X2Tc+w8TiPal9PY_G4m=L zNAr}U=rKfvdPUZ7U?t`UQXZtJSU%vF&lA72Opw|dK$Q~s{`MXHv*TDAl>|;njf!7A z{v;;4X&bhL&!$ra#NH;Zi`_G{2K z3;jXX4=@%w>xs|A6Dn*ykA)uN`^g{Ov6Lz`D)!$pGZE#tKWT75+UpYidoMD zNQM;GeGDgtY!s?B-0;E$7A^ni0saE%7ywBK)-=X*>f&2{@k~QbAAWL$X$sCR!>POi zh2@edAC>|R{%uew+nTJ$tN;HKA&_^v_^_Txrfh%NxyX1G;}XfeyQieXEW{w z!qmlvkUN>#Lrr(Y-|A)!l*I|I@cnFjTe92v*IF>ry<&+4FRPPh({`wNN`{M$4iY41 zB=~1co;#|>X6E_>Y=4BtsFVG#HNB>r3N$0e=TkBn7qs7vdhXF*nc<026iZ0X4h3E`oTGIJD9)TOW&aMHB$}RrOE7TMLn!NV+Hq=ExGDckf=JBv@tP9Ir8$F zI_oJ21GOgVDotIv=ONs}R2QmE{9b%d=_%Ha_h&{?2ue|p99e{4 zUN-+fj?O)h>F@vJBMRNN+@&y7bEmmXa#teM+!x9vx4B<($t}6eU2->yn)`HNf5k|KX;s&i(Grg|?AOG&z%$UADk z3xx_J!3v)3jvcE(kh$ zZaf3RQ!LicNSGaoAq=)$N3QEs2c30Y5}G( z(z^H73229&1v=P>f}tXv%m&4fH(woYh)WL9-1yk2vC0!Z?CCCKUm@Y zpEdd4kNtwl)92p5^(rn<3Z6D|KBlSfI#@b=TnMrgfJ({f-`IfZwgh&m7*hipZCE~q z&=r@x+D{fUY3h{2gqkNGrEUcBrS_DC(+~ORN$2dCb5JS(iu4Y? zt0|_*<6wkW8gj{TB-kT3OhSCyw#+aQ&*?viU>d`ND2igwd6mciYfeeD*$v;rMcKu! zn-}DAS$Q;0^5}AfcI45-&y}Cyu=sfU?86y}7wn0VE&j{|!)n&3I~`$6_BBg#CBND2 zs>XO@(ect*ufkDt3-YC!3?NtgXp^;-(!bu_*_pIdn603Q8-f`42oMH+2yO)mKsfi#14M>q`TP~(m`SyEJc@;);Qf47PP zO@vQ@Cq2E|)m4G$C@;ePG@Z>#hg8Cs!r9%e%=$C-*$#g+Z8(qnP}+tGcZvxL7cGH` zBi6-0bfb6-GnYdm$pZdLx%@o*7EV7M?8TewG1IFA!=AxY+5w zrGMPbI#{AZ`{sWk7n^H>rV35UdKhIJ3*}oYG`<-@#=xkOk*Og~tP{^UhG`UVtlst< zdu02F+#VDe z#jB&M9-}g}s2B~Y@q9!l`l%xKB?zCQJ03Fx!Rfo(x4h}c3Ug8~GM+*yl+Kp~wq+}% zI2wGxJVpT>LMrOAC!H#oLMmuue5MV9B*E$MZD4W)HUPmm>~x^MYp;hg9i)YxE!)2S z@tY`w&oMH2C~|#|xACXZ^lE(p{$%uPu14C}8z8XSyawSbOE4U)h1?lYebpqXUiSLlW=){4nlSSW8tq-R1N6 z@&2-GTja)Q^Dqn6R$83QGJwl9WODR~DqTeQ(&VA5o3TUK@PG`43vSV(66Mpw^S@tgwiY%YUU@yhycuIX;D0xYoEe;lxc~ME9(``!ePhQh(Oy9$^0Q_41ZZO2k~Hfw-hR@3%B(!1{|sA zsH>%$($M#BL*+!_S52rmSH9P7_mIb7J#N7JAobOIzuqli#yi72p{{8{vZKl35$PNn zgkZib@o^;TosSNpMU!YDQ5b9j!WR)f9pssVPW4R{KY10F|6lAl=GD&#AsYjK89>Qz zQ-OTq1760dmhh)IK&uNae~s5>Gde#}dH~AF7kd$cWmt6)2ouQM+n;%JWZC}ja`?0n z6~rRo1`YrVezNu5BLOf|MoZTAB9oIk+wJ?4 ztLV=rLpOOqu1Nu%{?zVgd{;4ji|+N@!Kor?CTlxqDNx|1qbF6VeP>P<3%W@IL{2~9 z=RoIV{q%ouArVj+RFB1M(omo!0)_5my(c}8zI?p;#eDFE<5&)UZ6x`GBNXjUkEk85 zH@ceM*9h+g?>V5feZyO`>*N1;d(q;jUkG2{Rmt24g?q#N)okN#9f{=}o z5LQ`&yd2SfZO!!NY`jkLU0bu%9$-ZW)D^OT3o%fp{7Twzx?3~toRROjXDDY#OLJuF zxJUM&fTpy#k)w=w_mxh8lQPVRo6&mMwH)TZ}957vWRl*j?LVhfeX9XOODY+AO9gEP4uW z4>?afh^kJ6-O;z|E#H1?Q7nOa$mRHM{j|?Lf95N=8@d&#FrHDRl5$xN8AB-;d~bSH zrw=${s7T|k&3wN)vhV|uTtaDX+>$ui<5bA%)9pqs$Sb8A1TLMbSkb8Z$|lj_Ym=xl zo42#VM<|-!9I+_ z7`!7p&4&~bNwmk;1FW3#K}?DwEq-_#oRj8*K``G)J9~CRaC^Ts@DXrtw{A@Fmj$i( zTtHXA!9}{C($MByv~-(?U7p1yIf$Y#mY;+JHYO7xjniXTsvkbyn`+@Ne^aC_tt|_+7hT`P0)?sDpf05iFu(7pF&)C=v zpvkwmX|sulcs!yXEX$-0ng~thWbPS7QjxTL02LU-i49QI;!1Z{=&L|}K@9+Szwqg6 zLM{Hj`EUI59hm~+JfzvR{BG0o2OrhaSLvniSa{@(y4Hiy+6IFUo5!J@av-+BlXq&e z4QPm|0Hs~ir8p+&h<6lISSEAkE5($0LVI^1f3#TkN~;DCmzaS14}gQUsmMy{F7u?* z#(0u>?zxaM@|t5xqs0xe)$>M2z(gL0!?`08un22iq(`Vgnc;^KOgLxt#%qV1&jZQA zWz>!K{h&07f4}yN8lzm#@H6Y+XOTnS9Oz01rAhwPXpqRTzUUS$8Sv^ujFiqdmmrN` z{#Kp|`E#++2`UxZ{nI-vW8j**$`c`mO)+1b}nH&sI;xIW~ z&oK~i(W05m3?cK0oy%U@G^ktJS7+SD0dXveC{LLXXMMG_*VAKS0Kyqd-$$066^iW+ zae-=mw4%mbn#*+f>SU;$5&P<+f$8t2?xO)z)o+)m?k9SB+X8;CYo9el^hHn+&2pAo z#qu!&S;^v`&<4qz{)#c=I|v&CUy73F`FsdGa&zpcFXVDd@X8#fuoiRgS)Qa56C=6y zG$doY@u#ogTy2$9fakTLjTc8x$085tHyKX@JFHH+enaV^%RIQQ*o?hKemSEpugmnn zLWm8nIKV3B{`ri}pArZQY~CQQx*HPd))^_nDG)?##}8MB5!~HPtsBNaN2^>S(=~(|5moMJEOF zz_QMirB)nASf=`i zV8uUSP-JjqPoWqIy{hy&&zk}}PK818ppu&p5r}_={lcB^e};)EIF%iO_ji&WGa#!t z?v?V!_+DgnZSA6V@XC3-pav&4f^C|p0^ZaifrRQ@5o)= zqrHYB*d1lxwaBBhM_ZPYwnR0?znujw-ADVAmfr2|`TyfIyg@Mrox$X%0R34l#aA$< zgYRuz^jQ|+nEeC$+IFDo7%KBexTR8^n~!}g16m9vvVgYHyFPgkjMR-%G7`1HBBFqi zyXCQP$xLXTyQrT zgV-4Ln2Zg%d-U2>UT72$J?Jn@-L4P?=9)-Ze|fw z!=Vq2%x3UOq*SR}&r~i$|C8_-@is5KaUWS)CA1Qn)Ad8 z4o%agOw@a3{isst!ASa_F8(6ifN_V}*Tg0J*Sxc^Ps=pzOVTxoF(CTmwX8-_{Yn>K zj-avJ_9fkAT$g=N+ACZyI{xY8v`Vd%jOTHo;8bLP5>E@BK<}m!0_&NNwMDYn& zI@_~Tuu->+mGu#mGcV91Z86qO<(!FLlC*=9GTAL0!#3f5I2`0nTb<- zGV44VJSCY4JXqp}I*ZdYF!;Uk@nIKXlY1M)@e7=+0EVo8XvtLA-;KTkW}XhKmZ$5Q>M1S6ZZBMp}u1n zrnUJkO;>Fnr@NjDqch569NUyNXJ_eZ8Ax|Fme(^M16%;rLQp?$?;kvBFkM5E%U5%7 z(2^h-6hL8vs?(b|_t`PIhvE2nSL0~W@~A7VU7q4f8+jx9qHR;Tqp{v|KIAw|@7-QR z?ew&bjLT_J83H+1V0HM%LID1dcx70L+>qVwK7=9e7USuf!KRimqiKXu|KEIk*0#KIOZ-8H8wBSwCAPut`@bZR1 zMY1Cc3pP3E3|n8W`cBO%#@o=?S5S5g@b85#UQ{m}a>4dK^Q3Qqe}{)te5R5;$YU=b z-B4~msGn|$JURqSKahR6vO>dd4T3_Wo+Iyt~9)Sg}r6 zMN^h|#t+Lh4fg;Y17E&4^dbn5ZEa=!&z|px+EOnYGdFp zMCL=i8%dP|_vc9-KYMa9`m;)^5!Ll)#o3F2zlG7~xirg3p{v@i$s&Z)vO+A+{OcDT zV)7&{=R@)tAyZxr9gkrY#CdE?(xdH;kb~f4Qj82^04}glmi4=#ecuS7ce80uQ>-Mg zhC>7=mnH=thw7o#%j{wjxBN$?&iti3VY_-X9z-{Blhi-ODk*!=(|;p*ptH{u8w|q~ zp5|VmNmoE1u7kvvhSJ&&4pxxf*zXN$E~l+5&b)#-jQ|@TEjlXo-+SXfvd&jqay_N1 zB)Ogl^0PnX<8F+8hK@>-V>8RT_R6eJ+}>=s4NFIwAS@xG4p1qGXtA7Z(e~14Yn_oi znM?5<3)Dac=b8e8Yx_~8PXkKsoe%C$2~othYkS<+ce9piiW+oda9QI}CN>wE#mk@7 z^I1{7BoN#(9|EXlv?eqs+dW2M!(?gr!Te^y8#`-9*HqD@a?xZOyHl?scIkE9L0Q>l z>*FMOGA?dQIX)Z1VjMz20qrY-@EV8B&Ii_ORagZnl%dVW*x-k3&dp^S5Hsb6ZYkbg zOq~8ZIabi-xrbR&Ghs;-wG6zRKmX;P_kPL`?&irekiQ5NqdG9YiwV{`cBT59V*ss0 z9`J#>?b!06elnnLZeHG+_c^B7GmDt}H%fnVSfz0^bR-nHu?o>lIT5KGhA&$%o`B{< zK@~cy683NB?y7;~ZOW}a)>CMQ&yA_ ztN>@bzzs;@CgycvEqNF;G{||(RM_SgPzO%6K5m#^)^^38B>#dr`1O(G`1>_kp_Ou$ zJgyUz6GAOP6k!49BF9XOahZLKT}=*^N-YC)R5B8zy@Eu;0#7y(hfBO;PPhMn8;Pi87f}IYu*1RAs8lDCQm z&M{Pmd^i1ogaq26XtC{?KPb&?^AHa=e8{7xSy}Oh8gHAwx`-S!B^th~Ch=jPo*lp& z=Fg8?k0FV1BP!sF6!{2t+((O}OB=gC@q-ma8xXCX#yN=8zw3)a33@R-NRUYqg+uc& zyM(L5s*B;8$Ao`W>%ph9lF)(0J2ItNkki)hsv>$ zTv{U{`B!bpHiiIqN0%tH{ZRAjJWlFyCUy62*=@{1KHzvbxe(8+OrhMbU5_MveJX_y zT(;559yP9HJ(-Eup~eiiDCP(;jtPqh48FLbBi3oSR%)z0@iG03!y@y;gui($ZyB&Y zK22fWYT+xl>s){6@_m#OwzT+B_Jc~{gJ7Jv^Nkx<#;vYf2b_po3rXg*{`d5W_z*m+ zn9pN45Q^uD%7|qpTC>E$GYs+0Pn|sJ_2F|EiDOoGEDnCyZv9w)_YcqKh%duP;>y`G zRr$X(MJXGwh<)`gnrCM!C>0SqZebvql$kL)>891`Cqwcxzie)by*prC&6@JlX&ysV zgk;Ri4>~5Aa;M5G8u5C_O<6OvU2(k}Ye=JN*Gr()A+QVpsgx}*)2m+Kk;Kn~s`7lb zXjECeNzTj=2=t-<|&nGiy#l9-opF>TARtu_=FxCBATD0p)*>xS4h#zehsIedVWnTw{l87F|Bzd?|p zosSxplnXKp6Tf`_T^P85gz;m=$kv4^5D2@q7_vC?kJ+rocYvc;qFm!=fw&Vnv@`Bz zAj{0q2DIHrwe~5-kl6xNX8jfm7I{F_`otZ}3N&$N1=Vl1(K{2AVq5>h)JLl*fm zS+M_Ry0y7IGSI)4_X$2p-s066!#QLb-cQE*Q~D-(zHP~atz#DiN=64Ue-qm`YZUPC({sYozmUs+#w zOT7va^g2sv0upWY7qitgpX%^zlEpDn>$VX!ApDJeF2x!GO!xt6H1z1hS2WO2a59UK zQMN-m)(-KSug}?!ZPN06i<0HTr~BbS82!gFFEgUDd`?ZF=6IIQB|&bGdRU-O>OCh) zxzqI?wtxDs4;UOS>e7{__9Zr#Z(NcmS%|TTyk;R;n18@J`7a4%&x`-C?sHbCY{JQzWDvUT^ zoIlseEOTTh9Nl;>WBVd()QV=i{yw!%2K5HnSJUQ3FY~T~7$}QW(#AGpDJOoPE$=~3Yn%Syxi)O>8_Mqe8(ZU2jig>A+S`lj#tEeR0R5q!`&IvY0^|){mK7@spg`_(58AH+m?v_vnnI#<{C@Z?S+6;-!my= z`=-iziJk7xO;_(?^X|QHX#eCJ^e!zZZA)M%T@lpesFMR-nso|r#)YhiAm_`2BWe16nR};K(EPX z0F=q6_fj==5#uT8CA%eYnSQemA<~YBn{&+@-^8?v{zj%Los7HO53LGwC{vgXpdbR?$ z;uuitKoobx2NtvE+Yl`C)up5=uAJT~w^_net@Iu4wjKmUzF@t}WWxf1*JSnnx<#Ba z{d{3`)_Jh_?o75sxh7&k_vOXjrZglOF+QAb&*UruCq;bmP1NE^6^S;hAV5mwd0@H{ zauArsrT10#8pYOI@2{{Sw56r+l?`pAshqux7E$Kv=O79(>Q~Eln~1;4MQh>20Z3w) z`zFRiMOTdBaa!KeEB>lOLoU`}J_gmObX$jz!5Uwzz>v^kZ7j=~Z* z{Tc^0#LRC7e?7Sp+FE3M8S-o2&hqaSLB>L8>zN$M5qBbMHhyEatB32A7B#S7(2J@h z{XR3}+}-v`40iOUD8VUNo+eQouZK=;w6MSASX~<(+=lUoXPy0PWlu(W@Xy-?A~Q)X^` z!P<>R=99lKfNG zF%J33ZbU`eE(ZHKk2N2PJ9QN-h2>J49_S zt=Sa|wb#hO4Ir7E@JjSgygHT`gP=%t7Yx@IXU2weiJHs~40w()yIxHl;ASeUr5jBL z--lH(UD`AZ9f^3`S2+eCBg~?stop4#dEkc|7_v&J}-m{qEp;>26X_Aqv1Kz)aGG4R971M_Tf4XhLNJxI>mFmlHiV6%KieYSo zQshN~8ULJXLn1=`K#r>cB6R%9$5-;KzNyAS6S5;G4jhyHrET0pc4I4xMVX`O-Mr5z z?^Tk5l;V^Nbm+k;yBB4i8pmAKJ%CY~D}TSwjI-}WiAE{X1geJhh^!E{TmDUY-_qBo zm_Div;f9gY{9}^jfltu>7 z66A4pXr1-Q4MxM!oA!v2&7n?JLTq~R7W(C1XBJ(XFzdh*ThMw3 zEQZnooTqS2#`hj*S(LuwdoP?73p;i7>08CpkwnOGH-wmhl}+gBz*SzSXGnuv1i9mv z#@@dB$o)D>PR5RkcWAPUi>Q59vtC@l;P_8hB2Pj%X?wKtXBA<UM@#53umDS-;pz6>KbZlr)F~<-qC>3bZY8C z)I)*Gf#DZwESW?;x#iyxW^pN8Rt^G#;k2_Lxa#74((?3~vkBHF=&gFQ2!-14W|XGO zsluTgXv^2l{z@@mK(QIuH5s+bHlVe>_$u)-VOJu)EaiJ#hx`KDy-Q z@qK)-!1@O_Ce9BuHBf@X!Z z1S*c9237e)Dy4e1Z4F~6`_%P6f7g)4j-RcA{*3jHV)jRsy}ehx$^aj9#*vtn4f&;T z#`NKtDMp5R*v=h;ihjK%@3RdYv;~R&$8@CD>wjxqXLB~XRUHc-11>+?3?La(#_)y+ z@OJT}!}y~-k}C+t578g!s4D3f+v{9tfw-cs(yT#FxaAxw>+!`DPu2m$Q9o1o@EddiWJB3#%;T2D`zFW$jmQO+fq zdmf!De$9K-+DHP+93@Xq*TPIOLFy>{oo@NowI7n#JN6$ucFRK%#sxUF3pP#(i@R(F z2GS(zUghMaW<-69dD&Y_p%HkznPDP#2U($peFK7y=b)1bi-A^>lDRG8Ufe`5;(WD# zG_Mf>)#8%Z4utK9l*fxCFm+{d`FnJwjMBQ0iv+}zEu`e=R1jH0)PDg<<-+br7($;O| zcp$gA=4gu$7vCI5x)sIy4#)cUH7Z~?yuyW}%{Ve!=wADSv~%zD=`or6Zoky!%X0JY zC>2E1F1#7*S4jmz#L+5Ckp0ppKycEtLM`hP9SLE*+D>h}H7v7^E1!xPqD&DjmUjISTFJsOogT9C8h4$PbN zs9tDpTer{eSp;x-0W3o${NG=zlP2;d@=gOYEAv|`hdv)BR~l%I7_7PF?G7$SYoB9a zDK+`~jQ5m+Fo$u=KgcZGp=pq z-^s2}np`w1RZRs7yk#}Ni*a{{f?|990p1CnWMh`M=U(ciD>`13m=p+R@*gOhb_6%$~Agr+abTt;pFiMGm|yZ&P(+D2;8ag+Vr> zQMT2j@agjkLutT^B@Aj zQ2k+MEEMPS!7fFS4weDDj1WAT%k=g;&-_U*Wn+EW|`U_bY5yP^dCU^ zO7DF+vC8!9l%mLMe2U`ry9WG_4>XkV1>pb37*bfFehCsj{cg0y_k4{^i+d7CBY<-* zw|prtCM5wD(0bh6{=}^Ql8851yta2L2o_4Z?tQ;n$2L(XkKP>30HRUOSvIjd#lJ{t zClxOyKqX5)YuJrgR7H7PlWV%U0*0&6_Z99y6-@^Zoj&8Nqw>FW@Z-;* z(aZ>-53j$6P2OaJR{JAv2z7cjbI$|bL>JUlH>zt;gpi9aDzkP=A&g}DCxcz-lK=U* zuiK!Xr3v_}-_NfH$gW8a`Qk4@@6Qke1Fn7 zO}g-Fb^;SW)Jj4D`XzF^;As75NA|FPP81@C;7%e;Fcn(LV+mmthg-s)5OgD)4~P6BC26Ww;yJV>=md8KuF zhM$Jnb!^=?^EIekh+yn*fxs*^ED>Z;lFIAU^;UbY#j~Ax`P1f)pNgR1SCumd!)4Gh zd`^ftE2}i|qR>TJ;Bu2w!Zi*7eAj<^PI)ej@0*)O(}DHnG6ZM|+z&hOeL#}w}|7AjLpTxj~<8P zv7u`<4MDCXhW4Ywj*3lTA|<(8W44EI*ML_`dqgP53COHC}U zX@o=*SPv*-xGgl*2*no*Ng?ZL?sslg5zp^M@YS4&_vQ1z0%okAv#u1whV5`>eLX;R z%4WJHZ1;~S`^WORgW+x4Rh=u19-8tnl#3>uIUD`EJ#F@vEUH|$wJekdq>p4E5mdMDpA2JLMRI;Dvp!$ZzL+-OSL=fa@vWP21Vs4auj3 z4C-g1n;IMKvqP+V?~Vn|egL3NUBf=j+@_Jh!uPL7#xG10xWcZ09gXeH(%zM(n$`G; zd>@^U8CB2b?Y`U4{T*nHspXlSo#A8WmA}bc;a=~k_37hx_$!ioX-Mu$lBYJr>vn1w zTfJ`u$yjxK zp68@3tp)mpN}W9$xkIPy=O{OHG-k!#^G}spp8D!=id7qajbS@x{XTFys7s~oL#yKB z)`!yI`RrdbHLx|1_42;o8k*&xaxpDvTWHTq#pZtG)`PD4MonJJ+Pg+rmcFxq-@q#5P0qf2x>1Vr*`@Zh9Z8H z#V^rw8f5Q(>l9)_eDa(g_@0}y>tV6-GWv`mr4yX$jnkDGY4wN6OHJx8Ohi6GOAvO0 zs4)@92u2|hSrcgMkHCE*)@L%Uxkn1$(f+o)mD=moK*J!i3f_9unLO13Vx2f9-UVjn z>_FRjen_%WdXq7eAe^&8A9nmT`L|#|HfJ*+%F6uOB=fzV*zA^~rDqg1 zigDzbiQo&0zYbRgZpcZwQ&X^L5LFg97=O^Q`yamcZ2=dje{*y5y|LIl)3LnT%Z61( zj>*nNVlOGzmUv*krh9KkEDHUXBp6gwj9BLqCYK*-CiW@UL4e$G!rO3x&y+~0pAbk& z6dl8G1G6L$F~2Z@JU~e~NJVB-7g}nn8fC1jURPdv@ojm#*}j~3;r~6ULuH&OxF5mE z<&HFS?KeW7lt{Wig{Ah=1BFD(To+Cyw%UXfY=O38A=Wi--hk`^Er#M7rDLAdEEFe+ z+zx%fGcmY#xLgTL={PlSQ=LK-|o!DP3FqrQ~@y9fnqjTG9_hSunrJlDu zgs_(&ADxY2IdE5q+Sp-%xiOZd@b&x$sdn={iGiwcI<6s)SqnOm|x$s zCtVSoI5UHexJ!Pl!!!X~8%whhVe1dMP7~D5?-a)K6jS}ovG3(^e1-o1;qx@c6~53) zzE9+zguzv{wQmZ4-*lV~N_EO#c}AHc0V9-m#Lmy_!y$vxjF2LOJ_uA>lhvSi00s0| z8@qMYwUxT7wl7<1hzc8*%yr;D~IULvJ)vzHV ztR)i`87bt5p7Sd?Oqj&RdnIKX{UvqJ>DYK zACm9xS-K2~iV$p)$+Af`xygE31R5gt_ZK&NtfKv5l7l0IUKrLdoRIN-_EYfW zdD+rc)nk8?i?=~c+0Kqi^3x!%!CkdNaGJ*ypZkYTi+*$`)8HUmMH4dbELqnz^Pn~V zdd!VCk_0x;1^}ZG|5ZI*gTAuah0%WS!CL`6-VA2mpZMV9(hT`Q+zs4MTS`?qPH*L$ zr;X7kX9m9e8-rC+B->`lU0{PPirA+VTjd)!T)&>Zak(?*A~cwDH`;^sgYVU&3xN@w zMv;fh-lLc?wIp1$pW_rqtqMbPlxHp2cjc`GJ)~!Va_wenth9Q1#BI`~JC=y>+A=v` z8_fnWs3K&C*b07$3xJ_+PR$7YYvgVJhh2?&BBS8coN29;{nX?8g#)XHgalR1y52d?7M;$!ik0 zU(kD4u;L=iD?TY`?TPKD&gx*fKV_IY{fZyiwmk)IS;FeE{LuC zJ1FR-QYD}7C zF4ojwusv>m`o0bRy5c8nYh@RSFXmciDJP!&Mk{ToGOCXk6t{VXS!2e^Mgk$}hBd@!G?tKg&f%BM z*x7>)A&>sQDk@_ADr1fnAf~=d_Gy8m4D5wfDNZdfg*j z-+$?3spB!C+g6Xra*yQ6;2u{LWv)efO>jY4d0FJP z?@8X}?W^CB?%Q%07wt?bP@4D#uJs8Fsaywyag*KhA;_5Fph9M~xw41Wd&OsEa5&i1 z?JdXhitt}CED-UW6b{K`Qa*f1^s8gNla#x&e(?H?CxNfKN$5DYxtUjT6a{wmNLr(D zKWnwnj8(anRhZabK%A zJa31?EOfocAw0MQSDPR)-ShYBpRFilFU-Y4_!^%o9*a^HV!f`~p9Z^=QwwzvDoL}w zXTdFh9?As*#^>1;PPKOXE7!*PQ8<=wrd@%}m6D%)fWq81kW4<5=c224FGR)1lnyNf z>mBw4HS3z^2L_bKoCdzwDK~xFu~mbr${KF9>shkxO-GCU@f%m&mRfupek<~a{l;TS zTE|4;sy7Q@!9-e19Zgd?C}g#DTT3q+2n#DQAVcKcYSY*oyE1jSCf3F5|T@VORz+GSS2WcgoQuXFQr3;GNQuPwC4jCG@?2|nM_Y!hvhr+5GP zo8``T$JCX6FdLpQ=;P-G9-S7c4Yh`enm1<qTXPkuOW{aWa2 zgvMUM(u<1`e}?`ozPKDOA=atsR2aSN3j}9}LzRn>M?!}^3FYQ?v_Lx^)?eF}@dZm& zRg&7F;4#~)dXp0py0HL4>F$qWrlzMOgZGU;ET3)8{9)gIvdC<6S$SIRx(3~cj&U2v zrq@aJ?}i=EvM?MAETi7)xX6q*?s?Bsu*LSDnvr-82#*IeX|$#L?%vWHp_Nr&Fh*Jf z-&Fdi+Y#*=0~=I~P#Th-?Uv`5SQ)B%0eIY+B6b$Km>^>L~rk_tuM8=KAh;;}NILh;j@sLYZ|B2NEa zZO3ta-5cL!N$^^Kiju1BV)V8Mi}@8&9W7!ZXzGyAe)zUK!>+U9bPeY*ST|c#NqLC@ z9KPuKoP{2GTWfhnnY}*mVTS1JS-QpGZ)72z=fS0mnwrAxgOOi9o{G`?eVR#6B;e;T z3gRgR5mJV52x~&J-!gM3K1KRkhsWf86pwm|i}JENS({kk(cOM*K+!^QYc^4*>_7B<1L>3&E&9KW(o2WJHJe!=2VKP1MV5VcBq_m zTrhqylTwnzGx76$7q|G+0bkwRA$^P#sC z#(}RtvsIyLNS;MVdyG(HhzMk}x_46k%xpT^ec(eiaf>r*wJtpT@uGL&DKaUMm-;bKRW?oV>gkD@6 zYmurtuwk+F*BZt2zXzxuMp;=rAi5Uqw>7xHU?ydpu*to9Vh!~BDYI=rqs`z8Ppqit zakvt~3Rx}BbV}Kdq2TAmVYiKc zn!U^XxAwR0=O}r6(C@U5EiK41Xx+;tah%_}Dj+|5 zZ8Eb%P$vElw?u4gZM*q~r~zRSQz^6kIkLdeSV5Yc7A*-fHgo*D-(|17V)b)jJs7Ml z1)ID~U&iw};7T8wjn*uAR@BbhK){3}W&#Z3OMlf#89(Oa8sX`aCr>QyhMf_);+l9X z8V*;s11?Z`#%0E@up%Yvf^vzU`03WiZ%RPQ?jcCIF7yW_m7-kE8D`3qxzBmxdLq%>V~A{ztLi=`^>lU{JGL(dz0-TF)xg{y(=O+sAdOR|-52~l=j zH+y7frtDc{ucB-3nQ$rGYmba;UtIf!-|O@9XMYrV-}meFJkN6;M+0L7e}Z&FRIBdK zkmrv7PTThhxWCNu5ew1NSQ?m(tD{JHR~m78hevQ->y@Y%h@dPuYhyged10`NzpoD!s?jV{IeZu37E{EQMYf($1 zFNe-W!(Tqz5sfbXqfu6uRZxekF89jH;lp!YDqO!n436W6q_z4tyk(i|nHWZ7oEKQb}AyMsD7wys7 z-*#-4CN1P(Q(nZO_XH^;%Vs}N6!px`!w)sOKkuuM6*uThh9_6AH+0mtG&gT~EYz0* zY3+3xny1-!U*IbuMQPPNm3QvHQ$l1x8Dt2vrB9whQS~NuVaz( z-z~)1Bh7#$=mQwjx232_tVdM9pY!SS{qw@&^TmKAF72jQ^Tk&(Io<|8*7MrwEqTUW zDGV_1-Lpp=0jJNhxwsk+z23dDO>M=)je_`NRG&5W}S=(OQ?@N$pt@h|88)$Yh zUkOJ#EUm8cR9u7A3qv4ZlJpz~pdU!PzP@e-atC?G5SH+_Vr2jqanU>uO>nZ2~VTbG$Rboa- zS=J!vLcDbg-qZgzO%HwxCLHQ?VrdI^>@JJ>z4(~Tof#&x9+{v=-=@$}iWY4yH++a< zfQ{OulG0+LWJTy8{e3~|`Buw)zt%S!Y5<1&zoB^NZ#C{@KxS67YTx%lhFSS@<;U&q z?W|}Kc=y3ZeKUVl#9^D9%Yzh57IH0i@Ns(T6Tu~bT)|ee-Fl2+os8$GQbmZ|7kwKG z>-Evskj;I^t*3$HXK~uMal>~Pr&-B!ULgzv3_cfm)mI$eFrZiV{Trhdz3bBQ&(vGp zVPOAvKt?(^_A|reGM^GwH-27|eZ9)T#d~$2TJ@poHga}6)PYVqmxKMUfIwek|1hWi zt*=0iaCUxDu8B}0y+2zv9TPkgejCqbEcVxS_=_NA1g4TVH^KkM`F6-xX!4K{9Ypmv z7pBGZ9W@#eSdI~5{Pjo?ZZQ0oigIY#xOTHc*vbi^Y)EIJOHo;cXLKW$5yG?e_mhb#=7+#0^ms<#p1R6hGWiMuP|FY;+x8 zh6C+b6BPO^%I@k6hle1_H#u`;CE%2;fgS(&#_1Xa0$%Gavvee&@RZfm)I59ktY2Yp zSX@N#s%l|genN&vnRM0S3tLK=y-YfuW6>*q*roaTGs`r@S22)6q|IMxXm;r@7>*BZ zDwtnB8SP6BFKO}zV)}{ywj%e?s$?c16+B%2$6ahOUM-W;4-{(W>b`sYm?ZYQA8k#O z!^6XeclanfWV;g>zi@@&vwe4gR0hDym+;dcRH8%785>FG!P!T|Q{p)$z~85|^q%es zjM@Jk+gWJ9sl~tSf7a)@5cy7bG|C4v>FRK1FLNW#0$fsvenc1l{SK~v(i3RNpISQ@ zY~JXGWyF{D`V@CgsgR@bnwgD5Kv0*|ZX?e%nMZ#F8Ak;zf?Xx1I90nGJ>X!D>uSs) zTYi3VDCoPpTCgCRq?fXvxx(i%!bEj`>mVugjkS4Tx@<`*D_r?B|W0r=z&*y z;sAZSk;B_6*;nSGf6&vTqlOwcq6~|A-3<<&i7)Db;h^EvS?VCBP_#gJTpn0{YTI}C z?)>v4A7mDVfwq_k91i+DL0m+$*UOldkQsLdx7UJq?BAC7=haw$jAlD#GKOC#EH~1&a|GsHq7_ ztv-d>W=~inu^!8ewpVOqtDya~2h*F~?+_g;9(4JWQLVPp^#Xqx9`+4+u;5G`suy)> znf|lHo4f?D)CryTm)Yy66z;b=yMT*h;wP~N{_)Hez7Gl?1)pH=Pn=lymE8>UXAgf? z;XF9}W4UpbXdQiFhJEFw#Fpfs7gApXNOC!#(#FAo2WtLf9&BTSue65lJS^sJFIJ8S zWLGtyMpb$UOK9e4gyfiL2$?B9l@?I>o%j(bdKB&}WM+N(orW61Z1e1lA&i0jvtj(IxsOUS-#$EoWo*^#oct~*iB$;S2b_(+a>=+$?Jmsvx2FI!2Bc3ZDP|SOb%o&2+_>&Vvq%_I8m{vFk6*N+ zTW^cj8<;Jc&c1S-u>Tp@9N6E-kg$);xq{*-K4kE!^^~epvpMKZ<~Tnf=0&X1aGhI# z4#%3}UF~y^^J^g&GU^sV-N9tQg3G@NhUW8ADG`BEy&FNiB!QRfGq}d(M#?|3TjtH4 zzZs|?3W|!39vReh0asqWNFt|9rg2RxPtPu7sOt@^_KL0%J_w<3A*!RZwq$QPPM5hLE7& zyGh|7c5#0|xDZplXr{4QKXczD9*Hqx0WY|9AG=cSjv*f+%N}6K$34zv31PH~P#p3P z5VZ&(_+mrrkZzR7l2LI;FoV)DlTzK}rs9;}PdR+Gu@5Z1aL=!0mdjoU@uU3O+;ayz zr(CxbeIFqf*!3EmK$V$=MGs@c$hB>>VkSs!uy=??Bc3}*EYCZ0wi{Bn0+g|XSZ@LP z%cEw(+%GdM*g{sJB!^zKbyh2}hs|t+$-vr|8FJ(*;EpwexG(oBt!$+$!6lj|&?IX7 zbX;J9_3GRA_X*?#;4uvM=j^eWz7A96b)s^yTkc+=Qd2qV2w03e4YfEUy?qgG2??#r za5{A~Dr~!GJ&>1TUqY+nkeTnr&fWcb=&$G=2B){S>N^o*jYs!iHqc*Vlo9czaVj%M z-l#Qv+*vj-{Bk*OMh?zqB{>~5ekWtFKp4H4;mQ`%FrOi@7#Z=lA=7NiXw~Y2<*VP` zg5PB>J~Y(YUrM*Y)h${5jzUluOPf;_dI!iG&f@|OQjgyNg7_=)(UL1fv_bt;{Ww_b zvTYJcS>0aO%=9lyD{H6?Rnnx@(ss}K&?BdpeYF3xI9G=L8GkjOLDf2uQs+gePk?Y+a6W*Db#_iczPmbWM(uzA!@gXe+CQ=;|`7(VX^7R~DOe*sQ+s6niMP9M|->e4X((f(HU7#oEbh z3ViTxQRf&?LrUX;Ji(%s^}gq7rUm$6fsH zO{M|ksOJvr&+E%2RAWezr#KhCmgLDn*}b49N}jt;1_L* zN#y!B7tRpJP*6jxlT!BH*&R)n$&P8*dQ%(8ANE1;%Xc?GpDZU~FXae`mjppSOCNKc z*PIod540Q&e*D}^S@YNC>oOPFdZ0(?aPYk9yd>ZxU}tjnd_V+LU4cHM0XYUgz(kZu zSP5cKlyyN>J;21Ks;YzQ1fU%Ztm7>Lqz0={H{+Ag*G?4kggd$as_^$Ua<_@ntB;u6 z&wi>f82bdmWPp}>H469xZ^1ofPG}y+HD{ipZ7lk72F5M$` zRQC5tDk*W{(NavjZdflDD$oevvpcij!Dw9=3WI9EMj3N1Qw$%2Q=Lu30xZWPS7egx zESH7R;Uy*0&IR!G$`nG8l>1v5C*+7^=v<8V;XZ!WjG9?Iw!xC~u)($E{+PTFHL>v? z#*^@!w^&M5dg?v2i2+bdhsQssc%@yh(X)3D>Uz0gP{y!mhF2pr_Fmekz)$4cReqUQ z$$!Cw2JLDo(l<^j`@7pcLh`CJ*odmpzs*3mOjfeo(Fd!_?&+&nEp*GjR%@cb4fG9O)SFDTnJuC?R6`1Dy=q6?N728(*4<4nlHFuyJSJJmHYr zgvS1;Z$PAA4~{JFGqLBt&GdJe6x`(>#^~1qvjErvAKUUo7)&0GjqzhB_ClHp{oIRJ zvm0@nItu9)+N(y1M{rO)A3GXfeq|$*A7N&el4EdghrDox{QdwhOG7#%$n%jkxR2#TwJ zjf$Dj`?Ohne1w;7zL3&?r9b-c@6PMd4|}=A*I6>079obp)D9FZ^UKPON)eIpk5T{m zWUhn*KR}*=!^XzM;fxoT$k{Qz;)H33)>6m4Z=} z?#P-O0?Tu!lrR1!rms8H>V$048N4*cXFQ`3?JB(CZ>gC5^g^u#5*P%)Y&`9}hYdL1 zPaf%*P#V&1Rj9Qp^UEng8Fr z0c#Djr|+L-H^Re_8{MwTAVns;wA@8Ux+c#RM>=*~ccu}GEQ3GyFMQ_$tu4P-a#h#q ziSl9-<*`5GaH&PU9Su!x(6}m1y5w*9mGjl}e*s|;=ld;(T*tVY8Nohpg_{pS8`>A5 zel~q4Bg{d;bOHROZxD93@m9RP(M7~?l9#u?w=emC1HPum5XjS69#mrR%`IB&U!Rt76Pa<#Z|dOUx3F~+DCG! zigG+647GPaUaWXidwP49_7jH9Or@$60pRZvEQsL`1OFWWB-rBF8!YwCP`0Y{I_B^yM9`}9oQF@g_i`QlQ=Zh_EuNDvhFrq z1y0|gebtDs#mlbGG>&rvHeAS2=VvXmhfTcCCadr5&IELp*q2O=jkq9ZHxC-7Wv?p@ zyp+y2p@OiQ3?&CtO&D4sbO_@jJW{Kn-i>BjGF*23@&zMz~SzVQmp z*YVQ7isw*RtQYK|yVxCj1MYkAOUTT@s3t37rq+vt?Ikm{k-5$clB8Dt!X+N}BGSyE zpM||snH`hQFt|b|+qhl<|KEoTJYkS4sit;IU43k`$6MOmf1X+Vhn*&8XckF@tYmPV zxT7(^-Pm%vT2P@)7(Q%1G$l`C>uUSgrx&Q?l_8ey*x8w8&(o7n*=MQQ{-?bKlvd4` z4jJOG4!bp{%#Y!)Rpc6TTMGzWY58R_*jKpA}U{j~Omo{QzpHflf>jISU;<7HrjXJ}5 znVp?=Uu#wk)Ajm?Vj7FlmshrPC&O3-e=xJyK79OwMEBe|(YQPU{-G#^#Xwnk!^Blx z`aVYqRkayg3F=_-y@2D1C`?BO?jQr}y&yAI?#C=ipP&pgcti4my%q#FVxy$deV%UM zt+8Z}c$tET{*D1FI)}10dO~mw#;7d4FD}BH2tY)kEE>8B5!3_~(=%Qca+p^Vm%ITN*#&R-4O z>gwyydb5u%8J=#RsbVGgE+bw1T%-BX(`**L$6d)5Oga%jVkOltCX2Wz0s`c$@4=mr zA4+C7%AHl{=giDC%bqOuS_|j7Y9e1xoMX;ax3gyh?qT1rCZm-bz z|I=mp_o?MPE^Yx_R6yB!{p==q6WKe(T+`7sj_Y}aI#Pq&GFh%0@kXJni|3m)!>F$> zEW6A5?~f*fO4F1iyOl6)rQ=)sdAMp}pbEjSQubXmPNIQmVC$HpA|y}UgvSxMWkZ<$ ztL2s0pOp57UBOLnrbdwc&B^nc8?|6AzO>H!wGgyelsP#^QA zwq8T*BWlUP`9WG;(FJJt7mo!srolaUTW*gDcN zv$f3z<7?Kt8!qlT;;`m*%NySYLiP!?RE+7$cV_>G`S^1P*4H z2>;mOfy-uKSooJx_T&CtQ}eiU#psdaPa0IjO*Rtz&(5#^v zoMHiCRx$2vDa}iX=`q?}T=CVqD77zAcXA-u`@vLgy4KyBBljqW@^C>kOfJb6uAkjW zW>AG0Bp40^b!J_o64(8`*UP%yuht5acX4oK3VlcUK|=f%-z8|^giNSOLi%g#oq$&v zJ?R_>KYzui!(Qwsr_M6bxfDF>^$C9rs9^>+$Yk_D$ci-QjCdN}sN`^Yz@@5CpW61p zP3^kUGE8GB*FIoqEc}x|LEAQZpx>onMtd)-N`fK{8kFOQ2J6}npCU<2n0J=xpdTS~Ub%r0P5on=ruzaOv#?$BFN z$u;EDvCH=%#kA^KzKpGm;U5ASYg0H2HxS{jKW{Ej$Ce;@#aCbA{O{4;5mHUe9k3Deb$$`<{ju( zA2Ls{)*kKjRV&e+F9&QB{RRDLRU(9`-LQJC^ShL1GqZ;t-R%FxzkDA-CU3oK2kV$G z7R#$04K*b3`@1Cf>hN6pzM-yrU+)M`n7yI8GdkjtdAygLRwwrC0>ns500BJwJnYz9 zhAVIS{P5lFqdd^op@MWa@Lq6vzIe9va1sx4PW?c!&IV1kcsYV_{;9vr*YahSkV7&u zS>Djg*E=QIu4eLh4$6Pvr;U|M){~T()cbWhttxyE2BOVuBF+=sVS%A@y>(xRe$eI9 z;R5HDN%9Z$EVkA?MKXk~S=Z^M!vjxOD;dO-p=*mwXJqIZ;)%M?xS^+01 z%D-lk$!mEE-4qeRXzFAOwX*4*h@i`(yz;E=Uk1V*`7F@>Z`rB;EIKnNO;^7xy(a_> z5$iUZTy-}|>biNUp}<~9?iIuxL>Q}N!}~V@t%1=QEc#Vn#5aVKCrq$3uEm1O`>*JQ zrITYW8&_VpW8RKAW3+ak(H?%;*zA>xM~g`q=agaJBRR4BuZ2TRs%R|(LhIxn^FOk6 z##{^h-(ptQ&4+F63mhoyy_&U*$Dpu;b}hqssmyS)Bb3o<4e!WfZ$ByEER}5kAQJuV z;D<$!(j^knPg&Q+0H3M4t|z^wSJijmxf*%BKvxa&Kbe3JT*jV(y}Li@RT*oqGZtegT6gyPCphcT znG^;H`+IWw4g&`(a2?acL^XFzbQ$~QJeiwCp*B7Q=>MLFB<7?OR(E#Vas;YB5~G>J z?1$J){NKy`&(MhyvaF*$bM3jkC_~{#`IhqT6dg!rvT;6XRrcCCzVmj(>z!>sJkQ_C z@-iu1Eiq6rg_@JrXBtgHl}L4Z%5s*MSCE@4Fj?A!@P@~bBe2G|X!P7UI`UKmbxm2; zd#7V@3&LULL_ql%?H9%*X)AdF`AO5(|8O2F7l6jJ)Hk0T&+en}_-EK-8JC$x?|pC1 zi>6*i<))wCKH<4T$IH0nm4;gl30x3`v?~l2x6gRFg|C-@!ntGjdY3YseSvUHgX@Zq z_K2HY_MnCT@zO#^2YCFiTi3P($YwrO|C#siMhk)ApHIs!7vl%>f9Wg_brY{CTCvu; zZVqgrOc&s*w^JEmn!+<%+Y0>a^+yYqJXe z$39BrOs@gOTCc1vB2k+l0qPZ#MeiuYPyepI;o2DU5HsqPnt`g3kwMQ2zA-Z}Ez7a;HYZFCG0c!{jKQ2U z+Y;bnB{F&VV`ABc!jM%Lc8hX(f;Ju)OPl4*Hs9WNIF6z6Qp|q8eR7h&{-?5ZMf5WG zC$C+xFuid?0l_t1pAM`WAX!aJew2tB({k!pMP%gHxlV3+C?1Jz`qGzSKGMP8`Z3gT zTeZAX8hBG3<(X>gfMNCNR@_Z7+-LarPH7uVHOZR7)vZ^sc_Y_)+IGFDJ`lrrJ;4W3?*!z;&4w92 z_w-z(@_F9XXt-(Q9vcE{RURqB_w>m+Tm-JJwC|pXnzAo7Y_9@yi$NmB&ZOJ5vyThH z-m|l|X3rj$0VX!UsM3f9F*gM~E(6t986@^t&87Zq?PS6yz}FuLJ zKI9ytx9tIz=^Yer!l~G3pR^wFRojK-L*Ud{z|H$`(;QaT zw^%tY1GI6nS5|*K(N{j;cqzHB^0^w34A$SNlEC~OCE~-`W4ZDvLR@pnNVBL8@*?_w zER7@}3o!(cYGwjF0~7~xRMbJ442h(4PI?x8k1Fp{1mp_&^cx4`sxM%bJOBdW{>|b$ z*9;5?6=J_@@)Ia82OU0>dA3li$)EI*;ttJs=wZgQ`6SH?y4EZ7elD9q9_2}H@;BpG z+!ajn-w7Eof^Wu$%?j$=Qo#{B$oiuU!jJy^Mp65KE-j?as2l^cg)QOwe+T*AYPZY6 zOHEb`c}&-P4~qhuCU`mwJ%IyUqj$?9DmGdSw883$UmZ6#Ok%&x(T}|PTUnjITTe6k zl1PX=-Cgmuix5xZiGOW#oxi`gw2^4kZ+ImV_h!ClqV;7+RC+oG@w8H~aTINi;;(z2 zGB3Jk`r{@<*MUV9ifx#%F9I%lKv3GpuzbU%k(@!j;(Q*DC(#VswQ*otQrzoPaFt$5 zC@-S`1?4#k3_-4~STT*7NbKA3btt%jqbN+_I~D6?Qhy-<|TPBG#v zS7M2q3sA7YdJUINPDZF-YNduaV8e_kAUp{smwLTkDeuhg3Z3JdU%wT7D2y6=bU$R^ z-HnX12>Xb)-}V=HWH#lVH}r7_C4a7r3n9mRdS|L4`A)Pj@+x;--$#ZIh7<`hn4GHY zSNamN28?4F-4SXpCS40D-%Sd93gUBqtJ6=P!$Y5xV(%20w)trc8a0*n;4Y7+GpDL( zTH;~WPaeMCaR|5JgjA_6J^?lo?3Lj)imsC4NZPA3M3&E^_S>brZf(z)R@s-xIgzAf zw|ZSXA|W;0@|$_E@Sk?D!0`l%ia7lbOyx4#n6+>JfBd6*YNKhhQWdkl`}-g9!#j1j zzkDB1xA9oU?LTnp=;cd3j zi%CWFY^+kSuX1X3wkhDe%^$PAe$UNaL6Kl);A#BKInnfVdiDex@Nb`?=3(#0J~W+d zGM5^~eBP`2IgxrXmQ5})W6j4qY->4cAr^{U)M~_yYiyQ=EUcyhkiTjgcF;gR3V9W7 zgIljod~Qi;(Kj-;Lj;&8j^bH14op75qgDe$$1%Wnq-4CdfZI(A^0I)+QPm~eO}?cnpIGgkO4}J zGcOCbTg5tIe=o~|ZC(?K8a(JGzbgAZJly;SprRsulQk}y{J>F@533Gp>+e|*BVZEx z1(6Ytc6MhtsM>dNGD+6kMe7+J?Kve$CZXd$hk^IP*cYn)gTtc_NX{U1HhbB6Taf@} zXTIWUuuGZeI@WPV9>&Mq#`*L7Yg5__Kg$dt3?gq_c?1>bMOsR=79?0j@p1XiqC18v z9A*p#;gA>GPs)~-qVP#8H+Y2ZPnr^gkneO#6CBF5}2SFWd`ZvAI-J?5wyJLhUL~t=*##V zp^TV9#2_wE7i+x!G=|$Nz8Z8pl0UUVUf#|3xaYt=^r6~^49Z`RcGwC(f1ZkCwB8*BgYW)`e{aCTm|9mj(YnwydkccVxpA~} zf|w8}e93>-p-J&rWvbj+8R3X=vxe_3`a*d;$8+8+R@x!%y|5o0{TtTfs{Tb4zTVa- z&?!5hFQi71PrAm~WBIyFi{z-J`so141|rjr2s{7jA;v zKRzx38s8RqYqSK9#PhU{TAugb%?2?d0e}Xe4*7@|`0Kku)_wBAQqL&yMJdh%CA^5j zAI4HvepR-u3n{)$m?*vtt?2X4!XBj8CtR6M(e(24e7<};>^td$=lKC2LIJUn-3jo_3A+pk;;XL;+3CdMv5lNv+n33 z*XVHBrUy5)>s~VXvfmm2o3+lwpFUhxPs+ZBmSfCfneMyKHJLEUfPdnWd58!a+N&3c*B*v^O#;TS!A0MV zoV=QhL4sE*&6}3l^iQqsuD!Z}dL_kaO~bbJICQlLRE%5YVR2dFL=u`JRO;0v#vSR> zSm{qDA@G5#Y-cFVcEBK+(!gudg&qs*9laN)jotw?4`i(y92I-~W}LBRn< zUi~hftAOY{dw54dCe9b};=;X6otp9~6K&h?zSlU6`ad&33`Yu&`yC&?#%Wx&ba)a{ z{8NiQ!$8MYrwn2>{T_wY*MSvT4c?8no=>8SVy7T8oQ-UWoDbykL4HaL!JO=ce+%8) z)hnfr(S&zLcSJcc!*cHGs;RKHmOZ=uT~dPdq^eG%`rcp4S|<~+BiUn$57$ewS`kAp z?UGp_ZMwtujcoTyi=m4NBh^;s|GK?NZGRV>{?RNfQB6D@_z>O4V5p-nUt;p_hYeSx znEP1L0nY`~;rEe0jM57uc|TGAlpY(!3Cn#CEQA9?Sn;X0zTcSk-Xo^8{9b`q*g8yN z-TUIhnef{Fs?~b~k9az3mqAHXitUun^&7fC!EM+`+NJKMi2dZS(r0A z5>A?lz+t}yzPPr+EWs@l59UX&i$Mlh0XG3R(^W7!2B6Su&pwqo}B$P^dVi)Zh1SJbm!Q zKZW3xc85ml-A%Z;1(NrL@U=_tU!;L!$!u3co%LyXu~yfVN=$*JRKJ`g4TpStAGNJD zg`>Gt&ax%Li;MJ^1|n=o=@d(LHV+0$>cV6qespM<~j&?M7KZMOM2;M?sXcAVS{IsVvFpdnqszMct@xCh8_a51u^} z*q!gV$n{S)I@^Eue-1(+veDMtU#bC5L!(zN`LwoNF6y-g68ifITJ39f812-A=w5@X zprJ~1hb*l?Ar8m5v$_K_Xx<+$=u2psuNnQbWug!J8mK;8*>CX_m?>$hvIYs3?B*?s zKf2dVa+Wv*`$#*-fM4|gC~dScpqBuXS3e z&bH5yE4a9uJ@`-{$s1P>K#Y=u1Up3z32e?$gt}V3U46@ch_&TMvhGcF`WL`kh@^0l zi?P*fHcv8|7xrEQe+b{mtns;(_hWK1{}LYXMc;sz$zeDZ1VN;5WxoJJ;=8iO(8%v9 z=A^pJa7*Y6ej>WxCthN`vhDisDaskUkoT1)tC(}N&QmmlUfW;x%6q*68RfSAq`IW zuon_(Wo-RS)K0XLCZ)yS{A5@lRn7+7uS~_LCw^TGx=K;k#-Qj~HD}*XYo^JRIOn@$ zq(KvE7HLUk)VzWzIVuw@E{^C0|~^BsPgOT z{`}`g)iLB{f){WmQsy}7J83P~m(Tbpmtj&2NkfMn1BSh7Aq~P=+Jl0X9EXS>!X*)3 z*#TF^GC&z2K^7ZnTF>!F`KZJRep6P5yQIMuw4dKT5HDw@raGB5h3gh;Tv`BX;=&hM zg4KqK2E5$h&c)&@20?Klv1-0hu}7a8g}jUjXwHkvMjGW_hA^Tp)SdJq6I%sqy%Nc!$G75~_)-Be#Jh0DtRowCrmuoV6uJjiv zXkcttKT_SIRNjR;ldBQ+JS2oNe?Fu9*Fa@0ZtAmi@qI)zUINAeOolJmNCuSrNykg8nHT z0&Sxsjb{T$BI4_M{ajNft1h}2`Xc`>Pl)T3+~Xuu%M8`~#R3AiVlz z;?-ov9?NiE*s*q^GE6L1r{(?|`16y#;X5ZwiteYSG$V3s(St#Y?2M46V`@3PJq^*Y zt38nxN9@lWcx1)<(|09s$XCjN84s4;`-TC;s0s zUk}^rl&MQi?41X0(agp0ZBA7RI+!=nXkBUvP`L&4u>+eensrq&D*W>s!mB#Qr$s zuP)l_$UR|hN;!h)hlM##2&A@qEp?p2**AcCugwSX%$W0zj>@g@?Wlm=mNI*4rnAw2 zb&IRE#A!gmJF;XqxSfBrJdo}ZKp+7qeM7_SzumqHRPXX=5((H*rWspK#|aB(K;m1r zGhSxknmO+$9^D{MZ@DX=`J24=*Op2qzYsa1w6-xxjkaKZoe_WNTv1`AQL2qGLT$Rv zz+{y-^w_M&xZd7Q_A-a|>Oy#l@^QE(IJhf+`+4jRfMpfcS-lEu{S|%~6Pm?Lj{l(Z z!rgCCKR9|4ykR$cT)~p%Xt_#THKOlhvdw_kCi(1*tUFR%TSet%8;<>w@zH-U?Z+^3 zTR)5s^Sy`X{eCnqL`CJrP?Q)3LB6^aSnt?@=xsgrKW#oX3b`gLSo zq8_BJHXgyQADK7SA*c1J&EaKJEXAi7R0*k`J<`T|8;yKW2lNDZ0d2$d45z@x6(Njp zpVje18HUkX?2sCQkd&GfIsL;W_ARzG0)laq(2C;~k9vw=wi9d=zE^WW=|aG`RsZLa z3{t%6 zjWqwskF9gMP|w&3c|murN^$?=t!VYq%J+W^_W@cCu1juGi=Xr8X+MEh5c9ko{z8#gM*Jhdl=f}zmvA7 z$4xOxL)fhDfd4j8vqaNe^~v}3AFKW?GOBDOB3rHdLup;7|Gi`5+tv#;7?+I4W$~F! znaBvNcWL_gbk>MnyQD_V{E6YMh>L}AWd7>7dNfdMRi=z+u=lf7g60}4mu-Z~y1vfb zhqzye%Q+N=%aOYE9__y#;bwVZ#eDMMye^;}J4d!SmJ#&RFnrs9ytjYWfy}OFZx3%^ zAkNOFQVY;{pfLgayG91hAWIJ_PsK%mxJ3XQp2oW7Gpt7o2<0p&eJ9%>lU98dvI|67 z{K*4&;C8r$`!@(C)JMJ3O@SFM93|aV&+mG@- zu9J>{kV#hiwcw>ENg4Rt6iJS5|3u$ zeGrSN!#Miz)38)Jd>HVWtK6Hw8Bpq(VmzIxZe1?Up;PSYuYOng>H>T8UU`o4MBa}= z(W=O_I*Z1YNdxB68mycsh!;P3>#w%Rb5Awx;{h z6l6)u7M%(6!+$lNKLktLJiy_x@uZY62fPYcv-gMHJgUSc*|mPyc%v z*a5}nY#E$QR9T$LoXvDN>|qX0k6QFZBWs#mUu@-?BY?rD|6lLZS}i5TAKZTvnnAVD zg0qKWW)fQJX6O7H*^NUi8KkjqWNVX8n=x+{6kjbb?4;B)>nq&MCU_}sbWf(Ajxq3r z{Sjv_!f+0fo>?@VuJzR{)RI@TkBcVRYOp>k;UNX9(GdQ;axS&6nBqylkrdN12(@E= z9s(hH;Ad^b2v12~-I?DxoA;NAee`RL$ROfuA?S6wyA=+qwEd&#C$Lp&C7T}B^uPb~ z6xjI#sKWX7*+1V3FEwSOshE!S^pbMoGI|qNsLs~*|`~2SLPJyqq z9H~P&!j=y27-2fDJ_|CZtbUwWW*r@wbG8o`<}SZ&jX9)>QB(VQA5qb+$DzrjHnGM5 zznbu!oR|=zgZyu*b@l{Dv=pY{7WQGL}p?mp1Lp{-Lw;@hLU&W!GlA#}b zcgbg(cOfj-;#w+&*MX6;se9k$G&f*<{>|83p_9Stg0iT&mIhYPYV=qm2!o8xm~BiH9QQ zO(%=P+K1$hWDCFj?d@+Fu`%g)iJ4qsK=-BEp--rCj^+g2YYj%VuJdm4HwO3$N7~DZ zf*5A5vfYr|W!A6!@~UOl18J`X1p&PJtk|B0MF*$DL*R~R=3+!*uQB^>&v-2?3-y^e z_<4I%2F?MO;ls{EJrgR@tO~-EF|D;P$R|Y(y*jL|lv!{Lva|h;23M%&HOpegqu~x^ zKwsqAQXJ6>W$}MnJPS&r0mQ2ei$^1eg(MsUgT@$Qxre zNY$P1q}vy0#zVLKR>I9wG=;G~ys)t;BMqrx5oh{9EYm#)L&EeB16Z~7O?n`moQ!QH zxJ3)fZH(!?9=02eH@<++Z*972*jusU z+C-nhPgqTYmR27}{ToVa=MwjkA8hxo6&3>%a3ii8HZ&*~T2Z z>8UWIq>wpC8rlrlz3aa`S|YRfL5&4u`*kI?xowwi{a+uH!@LaTvhTn(#47z_+w%WbGUP3SV{dRU z&I~1&&1o6^U3>LJ^g_5$#m_Pl*8fCyOU1pok@&GNLXwp^zP&vtofI}%&N8J?5JUX_ zCFp|bdfd-l%5=W#h(zho%IiVpD>2V1UdO&zyFp7C`DghVP3+HjDxKv=A&Z=!L*^n> zG~V+;=BOpA7NvM?3D(={ix3K3trt0sskL&1P!39Y_eXRW=-%r!2y>s(W8N#s;U$S_CsF{PWNYz%1ArU&LjfxT;Sxcesgvb zRWnOIWBdGTtlR$y^2zCn{;TgJ5bhWXezq(n1=02W0jJqGxmP*=Q>#zl)GNt*$m3pD zN>TvWE69sv(EBC{;k;3{ZncItOdSQ%F6Dje!L{#x(l}?*1Y)?VL-##`x!dqT@e5fiUZzSKb%p2>4Od<^HK39q0 z@ubn>k%o~}mL?D(jUEx^)CSg(g7w0hGSqCq@q$UhrQLmu#oU1iel1?h)T~yXILJ$@ zKVvwLw>XIkm=`g9!0RTEb|Ojdw+4q*z5%`%#dDz+;vvCI1qyjh{+CvDc%?U<)G)o} zt4Jpk{6K>r`kES0ocb}+%FqaPM~=J7Z?FtlD)rtvOcK5>Y4j~myPzQUqMP%NrPF%c zgnbF+%#X*u|NHaJRtFIh`awWu^zTnOqAG2ms1}NU{O>Q5lHm3CQ1*dJvxnhl|M_Vghx2w04gQa%^Ny$T|KIpAlcb|4 zp>T{GStn$VI3%-}%9e5L6DK6=cYhy`pTGQx z<9^?-_v^Z@=M}H4mJqofNKt;+^9=kdr@r|nnh|6cck7i<;xxTJJe`3pHvuUMY4ao8 zin|NhSo6H}f!c1*Y2^+S;h_0AlA*7Mh0m6nr{a~>Aml0rT%7C^nODECdo|s02E43I zoNh99yxsplQJq`&Pz*yDfmn&EeDIczVI$`Uef;cc&)Wr-*>YiV6X|QBqPr=1Q zY*q3TwWuy6z$edb$y3cU{6^H-XQn(oasI}7t?t*xYi_MLz}2_PVkdegF91q9usu=3ZWdI6e2EfH&{oPj})MLhIeZ`hb+^+@(9&c3lU8)jic>_{E>M_Yu*K7SG zyTu_aLbQA9s`P&c83|l7a=%xwjpEcFm1kmCFvJNempHqUi%yS9@eRw6u1304+~6 zMM6F7h|MNx65e@2cUeRj7PnR5%JKx-n}PT}s=#jqhOoXvU}{dg$mbd?SHH`V8%rvG zzH9m}%OcUo^DEQ2fHXJ{9&eGpN%f=0%}#mJO2fk5kI}A)TK;izUVx>5t6$g^5 ztE)p9W~YG|qwZ&Ueqjc18;tXcSRm}C)95jxg0jnfc5?Bp%v%g< z8i{~MJr^5h`b^+>1hRNSUso|NzwZ_-gnM3v_Zh7p6w~DE2_oAw6zRk1WYIqJx37le zmW*%IS5+MGYQ$JHI6q}$(wE>H>U;Hy$F{-Sf%B2-JrO~P8Ox%b-kj1i#Z?C@^`zy% z>`?&4qF^jd!^_ef5%_oMTEuhL9mGB_Q?~@O!rEb|VwKLrilHCL-9Rvk_H|H>&(fm3 z+s@uzxenfkD$Q<+*VcX)1*}pee9B$1BRt`6f$BGjejrtVy^MTg4~l&KKJZ!1pSFBS zp6shzfRWw3@kp6t^F3g23Y!WzylVXEg?&U{*i`XH@<}?~?tFx#K|cUXoNM(c8HdG6 z=#koW2D1;~FhAa2)*D{qxq}M4ZShYYagdnaJc{-d0I~A3#%|Xnm_B{>lHpgbYdOZH zVXExbqmxvVh9dMfI{P+=Mn20fOP>4F1kr_sAJ&!X)^5wEXL$1ol#q>1`3@jz zimv*~f2HWcwc1zSP;zz39dx=GVMzcv-{@TiLgkOTiqyRK9iLI$L%v7v@>hQ?EUP!Q z$R{oTJ`Jr`I1DYw1iETm0E{`XXb8_OWjzH>hmgJf;wS&}vXKF@UN9h;-;nzs$Q4kG zzFi$xB)z%(Sw~3@c=R0#9{DvcSl@f_S0n5GxCC(|(UH{gP|6?O0cZ)(8YA$E` zjC`yo+A+mu`@ponD+0bwsXx3xZ1GX!#ImgxZqhk7ou{`aS=13Q*+O^T95H}0Z#3-f z;TLxL*t=WS>|Jrjsyt*CTdojek>Xck%kwiopS`XkU!`R6eXAHEWkjmJE^I`)tH8C>ELK%VmNjXsf4=J&1I$p0pwPNI{${mF3m z29BcS;uR6$c?H$$Z+SKx|Gv<%b6o3-Tzvl+e)HjOA+}Zbb=XqSf6oMxy?<)ub6a8y5*gc^k%v30(IU)J~NEi zHGSXeiDfeT3{o9`Fk^DX#xNPVCTZw8>~Pq{orwQiCM0e5S0|I_dzn0oW zilh~`yz*TBR(iBNzXn*9r$&MHk*-$^h*z`ik5ry;J_C!m&K=mLuvGN=cF;pBK{<$k zUjn=N{n_a^J^;gn=D$lBmge?zPU))fhu-DU=HB&ZeOj6WMw2F-w7=c&muY5HU@6zEiljE%lzxy(yXruuzJm#=RYpDg8k@K@w z0Wdl8h}81Z`O+opP``kS50R+u1B3tVU;(qS0$`WVm0F%vNQJ-mN&Y#ulkaeOt*wWB z zes4Xwx~j!i_0K}+@#>Vy>6hiXlhRfI&#s)&7ECIkK;tBQk-i0Bgm(E>T>Em{8oS*B z9hsSz+S7#jxnd-Z%?~MCH8H=Gbk-+PATS%r52<~_v2M#K-LeTs?2Oz=&R18B<;Tol ziDs-ZfPMP*>GE*HZo90HyHPqe9+p{NkC~5>fXLDY3?5{Hb008Vk#SFX(i3GCOPw~j zRy=9q;-&m|QFM`$<>1#&92!ll8k;hpK#mW)acnD>0p1GlXgj$G@)-042wNu~7tX2* zFM3I&3fj3n5X@JEARF1xzAoZ15k1(TPeEF8eafsDv(RP)!0ZM1LBdhV2)p6 z7q`Z9q1>(3Dz@?f^S3P76&f@$E(Ms;fQpMrbDBu%HxyjM@?L$F8QjHwoLmu2(f=QM z$F)LcYHHbC<$X!(wId*0ogv?5d3#d-!_g=H;wa4R*5WF1OE{7;6F@Ez2q7i^K7-jj z?0fE7f#do9Kpc0IblQ~|tv?)%R$r)dsI{ck#Y)#D0iF@4rm=LF)~Kt})U>d9AY0eB zqdXZNNMbBljR9x!SPRY@$l@3UYTx?To+>a^c+y;WG;h|Lv~Dy(=o4ddv8%FtAoqtC zwC<~n4cciH>#31;Yp$$~V>i^^YN;G{37Xf`yzc0`{{F@q@Y2HgB5&E%*Kk56lCPQV zz%7Vzm^7a(nUK2cR#WeCPO4-c4EIN8P~`PU%apoR0LhpxB_4!)WxTYs--|P$oY^fo zjwt~@nQIb(C&Tiji#b!`fc|> znB?Tl8aW*(x|Th0OYMHY&egh`o)!M^z`;O4)GY;CX_o8}yCKez7?r--G&A|@JU?kb z82V14`A+Es0+aLm4aI%tX;0H}VLY}L5Rbu(D=#IlI-^b;RO|`pWZ9I?n8U;9st_yqWjoJX4j$@cYY z1c0n;cvJ3f;hMt+!sEBS_GJcU)Y-YGme;T%-GBNG@=VKLFRHBno+>On@;#Y5Kd3OM z_V7rnAySta7^JT8v#QZP4e5eQ(Hl&VEcHt%vMGm*v2t0`)%QX5eHb958*mbyHs{{K z`S>O<=rCSi1_LDp0kFRDJpj@^fZaJfY3Ke&)wI{LhS*dTwLV`A@#r}Hb-dJl{O84@ zh^f!EDXq{7gAG*R9nes+(W2ZNzH`z}{#H&c^A3acblT2fc}HD-&uhibqf3>nRCmZ* ziM#A^TO@g_J`?d1lO?obUXhb)VNW;P^yY7hlrL{%!gSL8;u@vF8kXy~aa6&4B1V-} zxD}_FZy0qrz(5jCf|$EJ;{IqXAoWo|N5C_Z@l)~Sz=*=jwHinQwXfG}nDImObva|m zt*3M!sIF6w1%&s%ncBxMz~OWzy|X2t=G?8#uME!~oK={LtAtVLSr+hNjcc<_$=!=eaqOV_j0aySEo&13 zq-B#7iwnp{ae6F8(&03gv$G`TF-Ul3iUBR%-LAz)=|oF zHgPT_N{{h2^*^UKoY69Z|5`-0S`LwogyA-k%;{H}>Thg<_6;_^ZVBu%&~kpA2Eo-Nbw6m2fFw4M}sv1-qcL~u_5_stG~9(70TP!PVV=5 z>GxfZE&Uqzt{wV+>4#J;P2?-=B*N|wEVl5hx$rF6`oa&C3^XT!c|U!eK-`yd7g1Y@ zxAYwS`(j>>ZPk%jv8{A_b@Ywsu#wvDFc}`i@*0<1WAOmdjtEunZa-$ysTO>!eIK>xY+Xr#nf(!`ru ziN<^f`=~mhgrfqmJp+suR7f?|b{$+N@e-s5`zU$#mQ69{Z-Yw8tjhkqi^Yqxg>zT5 z5O)B*Q*S!^5A@(n1K|fUQcF-+L+1%h+=zwkDcsxc+&9nleR7QED;72c4^PDlS{}xm zMkS1=FdB2c`+XqHCTQmh)n$Ot$g`#s;w;gc({lj}}{!dNf2NzKRZ58s~*i{A%I*Z#JX z4L1=9DO_(Pzl=yQ!%F^Dn-{9++vM}RT>05IFxHmW!F8d>ZY(4`x>M@|>N*iS5G zrS|P1b;gjRr_imaOL8>Qo3ky-vMfAR>vcGna>$y zL`w?ePrbRv8EVX~CM8hzWum!$#oM`(v1o|N75aLn!I@eIv1;zB!rWc;xND{J!`&44dYBdx!oOAmns0ShCixvG&sth9%+BX3ElM!+#!-%a0|K%DfR(r~ zX+s3kIu{rgcE{cbIBaE;kW)9lw>W|Tn06J`%N{bjb2$3FMFhEUEzDZ`wfX=2T0sBA zG!Mcx8Ojm_QTfAL{O9zvt;*GvEv>uf0Ni&JzC$+l^-n0npbT^UBY4 zk-rPfG9BoOfR}<_*!wq<+bQoj8y%3q4Cv_OZgR0@^Kxqs4Q`|-!hW22fLK4sk_c49va2FvO_L_-jE;dNpjeym zCn9dhn~6B7w)7m)Zxxw#R-q-*)Be(ogsa(ua*oafo>xp?tG4>R+G-S#qHa;mTX8_% z^E@nK^TO>!Lh{ah${T?XV}i}kj34o6@Ygk=M73Uv{d+i(Tv^&>LO~DkXXhnGM@B!W zO(>$QqX`HW)yatj&X89#D(L8kswlO*WR!P>Tk-b$I>%`-&6btUpQUjOkXdt_jfARq zmI`Lb5-9LdEHw6&9HaS`?63xA;|iy3)SGbB_8-*{+oZ!g70LK8JcZN3mqcSmLdb)1 zq+f}*#NkWlfTI>z7XUiI6aWicKHNV%1aJ_?qm3Om><8p-*lIt?X#T98ipCkMQx8T( zCuFbU!N-VP41I^*uXzRU6{YxfMe$@;sM+LA-WTUZ+)<8?pG(d>76S;HD}K_UigAH} z?qn(>cgtJsuj9GnLmv*h?`=;FfO|uk`k9bJSiQs+M5_eJUE*8={*HwXH2zLKcd7wXYB4wW?!;g2y-N6L4WTZ=CiKX! zJGjh)aGUB295bM;Bm~>nvHX_lozT`hAC(hT7M%_Gwd4s(YL9ca%%%nl$y2!+%H@2d z5{?91MUO8%t%kFdyPNRyZQu{2&-)B6dP`H)M`0y9clG{^7oPw5dcK_n31g2tJFl71 zQy-<&*6&s)L4M#S{6z7?=QR?7I+xUf zHx8(mO*S>s5k;=}DqD-0!WTP%r87ZJGG_+n$0_6t?N!=W?gT~7=Z`!Bg0(l&9n1hX zZ^PN!oK}an_3J*!#B0yI@(ddC>vHK9SZCpN%TniGxX)KhQ&l2|0OKg|J8{NUy2MV@ z^DW8h@xp(|Yp4P&mzEwsGbN6RfM=B2vcrtpYLJU4Z*0Z`-na}fEOtHm7RUzjSd_$j zRW9UZ9|1>Xj{;^I7gzW;=cQYMU?2hplORw44+^Mk$eajP*T4KQUIn8iLl%N2?172G2SCj>|*yM<6YCg(rK?zsEOO{c0;E&^ThV zg`g68v~#p@>aAS(zpRjf!IolTLO{QGyZuZOEBz;%Pj+QIJVn#SlLlci)5Q5;8|tUd zE|8=&w(l)Ps^3^`pN{slvNL_toz0$TNzqM&FE~Jd<2Df?x1K)ZVTN%aoJX<`15hWX z)hxejTQ&8w#-C|?VU5ppD45D&$a!K+O#%;A@8DQtRkhIDi@uW5J~PsreMU3N%)%xg z|0u83_dcF9v7epLY1bFD|1GQZBYPt0+3k;4*r{qNb{cINa-Ao-OAB{I_Q=s;JEg9p-D@xXH+byfP_9gUC-M0CH#s>ohM9%EV zF>R=06F|Jv17vD|-RE%)h0g&}e@Em6>*^^#(IWX2dj^*84qozltzgRy(75dE9p=)l z)o#=^bkhQNi<8Y=&0G>-Y?<%v}G-t?mVhWhF{Z?n}@zD}Vj%`Y3BY z*z_6b3i~9}2>g9iaVu~zEp(Lif_UH!K;W{(qNdQk6|eP))xrmhsw@Vx++l{Vhd9U- zS8WCJjYON>N{W)Z7Q}I{@ZI(C)&TOpL<@(Z%zgHieDW@NVkH>gVB%KM`fvqcnjToC z_K(+AVm6tgdPI>x4a}XDt}!r+CNl>OruVN`&sx*NbBnphFgh4?lBk z@$w+tc{3zwv-^Sm;1*l~Jx z*6l8XBpsigpMaNxn?p~ARYEQ*thLFcaUV0&j_ystI2c@p#^WsBgQt_Jll8lKz(*Hn zq=w5(Wu>l7kzgkSwcxjO&c%etD7Cfh9CV~QI#%x!f;&z7mMRGH*)yf#h4BZ$n_Uuj z4)5qP$ zlFq-?+}xDbuif5mID8OgZDa=6@%>gt{MdQ?`TX?sNAOF#sWtx%$29Nxosv1Js4oZJ zcq?(7^u~2VF7NX}gK)?lfN{TYx@p>-MhZFyT%tgQ`u{))sQioVTM>tU|M)5+?SM2! z%5s{aB2eX7vb=X3j3>H8bsflmo$6hf2l_w%g*Ko5rn$#_6o8WPu#W#>KC}^9j@vES zTRQ(>!6ar~eFbb)Jp~*fc>}!H0#HDA9ZijcnI5h@P=p`yzdGdNO2^I2$N&$kV=^LU zd=OoS1~l=rI#vb> zA0<&xsoWq{z_b$pb#OG1mdVC!9t2l8Y~b_O(`&kimCrDVOod_Z^T#Oc7vMUuAM!mcTS4 zxI@TeK&jDnd-l@7{p-N%C0C$|*&5V|Z(!Q}JW8|g!9Zd$N8&0NOlyJ4uvEpx>? zBcB_9-qe4Il4{4(757F=~x|A*>nHNS9EN{Fet^6VJpj8X|4oD87VJvSjnE1#1M}pXg z%)Go)Yq8d?qm-7VS3JdKEkFOZZ9ImkF`fJNI)U(0#1<3|;vR!Av<`f=-FmTlWbRJ8wDzGyIM-W#I{^3aah<1KL{9LI^5`ZWS#8SC75C-w#rbod zrSJ~!epty$#~qNR(}ekzbfwA4qq}NeYB|L zz%f@r@nv*8)Qnz}X$%VK2wMH+b6fdYnXdtG)@v0~|4AKohZUhqxS9_#4XYgddTV56 zaFfNXp#3)CNz8j0XAXIC65*Rsp0vrL>cAJKNiI+vY|Ra0JLjk87{>z#r?tK0gMbA2 zrITQJQax2lM>XhROMWVJcABltHgdSdvg9PliW#*J*tQqWcNWxoZ8^Z{Py%Itr*R=X z`1BLzWG;O68m|Tju+<6|pAtRJh}M6vL@cFV{7t<$xHzp2XbPbmm@Z2wh6h)<0;K}D z7c9pB5yAVe^2zoNfDGFMMrH>y?ehQSBSKuq6w~I}mb64i5XG^PTVwic4gkzeIY1Y}PtG<%S>Ls`#*~I%0_Xyd8vmevCBZ=q zx&&o&6UPGo@q*u_))y;)THa<=9dY=wwN%Do03y4>x}~oyK*1T3C{lf2uDMjge0ZYHSrR&IFu=%yJi4pw1hm zqSC${c)C`Jv58J#*XmTbcHqxQH3(v8$=12NQ&M?PUE&7*9XE~X4}UL|i7Ncv?vvMW zTA>p6;6v4n9ot}Na)J2Iisv(b_h4{lDymOMi>dh>EDu6f>xjjfn=g`a6F~*oVp6 zf?_gopRPpe?szwUx=B+v2P|pUes$v5SRSp-2b1SIsZqAy`ntakVUR;J8z-Arb5nP-deJxbpjoS1e*5PrPJnd~3)_5LW>GbJs#(NHq%vE}Jb}p}aSirSx3yx8{f4BcMU%L_e6$?qxXb zH(@>2H<0^0msBz6Nz{L9G+unhv=7}}TEtWrabJU-nOQu0!Lni;VPjPT&aHgU)U>py zf8@ylkJmYv94xf7vqiOi}j7;iYO`@v#jDuRCmFfR9oui!2m9xbLDXB zU^-+$ueA}_s~zsn?fG)`KhpzJQ(0T)cU6=o$7H{mn;Z$!bOPtW(Z+cH|BODl{^WUl z$k{(>P$*?YhBWRBfb@Kg$^sW(>7NTqUSj_8{&^M!u;zsVo63`2Noj?GQEPy}_p%W0 ziY4H|KG0PZclh7Gdnn3keV+eOK!6;Hn461G zU&Bc+RfgjY85hDi#j<={!#X-b4glVz=Q@6rqA2cj3xM9|$VlsvIeXAttqJ(XX-mp= zFnHp%ViFpc;a~>(u1h-Jyj#AQAL64GO}DmN5(tndT0|&*CUt)AL#olh0%1et?CbgH zLQv?buYAD-K&Ab;6A)!)G$Ij~hG-Y?vlT5qG}msn5(9euEC7eJVtcT({#%k1An36{ zaoQfcfqp?#Qu^JpOmZhs#g&hh#`QjnoqL|Lf2pQgArLwq{v)qe7uAtzO{o;ItKJG5 ztD#h$DwT)xeO~+T^T`qBTS89sa;vuNRC}SoRv-)9wIr5l-bgFv5#NbmSYZ~PY`JMY z^-Q}e!;5NIHpQLU8QXH&ZD;_tK~8NQAS}+dI;7A3Q)&LEpQy^l$}+*kqN|W&xH1`p zwwQelm8E55u}SPVqDb7!f^lBYiK=r)%9U~cxb=+yk7U1$w&`X9AUt_I6#t=*0sg)> zAiRHQIhS6tHKK`PL&wjk3yB+0*H2pcOzqf`K+8NXQReh3 z&4;>&eSdCAIrfLeaF9^(zgZ|)l8)^vto)0gRXTM)dndS==&$^K_Ga2xrsn5NW1RTb zw7Jh1Av{8LmSLoQ=IuO{*8o-e#P<2FZR1Ri%jMNz{Na3N(B9g~NS@5tpFHc+{j;M5 z{Kff+$t~nm|248fYGJUeepgNMXmsqS8tm=({K}P+5j~NWFGz9wCmq%~8+t7+dJP0Q zIO_x04V>WENrS`|?m#))F+;XT)k*l*12rdlroQm2Pwagv@<_(&zq zhr^*FE?HZ8IGrml8w7hGFEbpP&kT>%C9{g^+P^84RTo(v`z(Gn84jt8{hk*6O&vD<5deqTHP0<wk zmJ8Ux0ry19j~PDN&}++cbHIz~>syBspn@rXT!LwMzvRdtZ&}>9**cRg6#3uxH0NA@ zxtAN>&G|ZRu~NH-1G6s2r-@&~59=1Rq2gEABYpAp4%z+zM**1YKIX1+*3}N8Cuet5 z%l)oK0clA)^3LDDeZP8tDv9p}2<<5Fr%bzP6mKc3#v?roQBoatYyULVJack9z``wK zFuoqt*#mVjjX_ptZSrp5-uYqucEXzS?xLNZVX;}$I?h`4v9Hl;47#+8PD4E~hd)8N zUA7SbZIM$jH;XR&92N05RgEbEn6wMOP91yW0~dGn)Z(~k+-|QIR5FZKBtu6iY`Q)P zCBm4M*o}LldTC1bkZ@qBrg zB_mUfS-|OP&!^NFyc2Wodha5Q{Bi|ZG%AZT=8JI{M>FZw7mam4uz3886tm6Y>8Viz zZL7%G<>r7rt6>(CZ105IEbyMNp6$<%BU`=HAk38*UQg|DMR9!NmV-M^fk=G6hDnGz zCz8n*OC1V1!my7^C71lIYYhnD@RFllfaTs|_VGJY&xF9N*akI-u9Ne|7rrh}g+EGP zpL}6IyRGVZku(44d5Mc=guwZeMOHP5p4H&@S#oh&GK*mz@3K9n!u+bE-C^!Nvvezo-9g)fUdxr6U^|5Zx$_avR$)LKnxZKRQ&C7aQ+T^J_HS93{3u5KaXxm&&+MbF?EV%)m@6_ zVT9Z`A^f;Vb|n z6LxVPmayTitgI5&Qcz&@&#N8Szg2BMF`p0TxdQb(bL&FBLl#*v0Gvi=?fMDgT!7{r|GdrFpRf#`1IRQvRv6o_XixQ{D%>w{4^dF4Ses2Cz|_^rf?> zAIxupD31)(C$BB2IO@YP5mN{%k@hRgvP6^pwD$Qhzl+~4W|`#n3uW57eNKFI+fDe-)w9B1VC zXv1F>*m1;Qu)`0&N`hiL$KAQr0o{Q$u<-EOwQG>tDxF(Y-Q@PLh2WFHDV5_LVB9C6 zPRIboD>S^_>edE^aadQ`I+-^(`$MVb!8|}Ug$Ae1028Z~IMgevod9H$K~+Q^n$N z03HDB%XQ>0HdM~J&xbGe$zC5wrT6-Sn~!7BbHreU(%;^A#qfW=flUkKYi-!>sGQEO z2jZ?~I#4VQg*st3tR6Al$7KdtfjjT8I!>E=V5dy*Q@$f%9RK<5Aj3q~`m6no-+~^E zz+Y$g*{)+RGlI|qtKNRU5O#u&k`_AUIBHe>!D}UoFN`&6%(V?~$9;L7;`ptVCt8b) zyh?bNY^4VJfLqJH+#ZzOu%RR=Nyn_(B$9|;$h}e{$6w5w zmB!%w!VpHA6_cKk=$%gW>FjTyNUcuCmUv03Q$PQo%sg>m8o*8x_eHRB{y8UQ%;k2e zGgq8BVAcXw&~Ilt9pZx{Mekd#4dRH=53bk)EF@U*g=JU%}xc1^AJM{oJ>7ON8-Lap)g3TJ}q< zT`B+N_JN%IbH%HvcS>P>k-8A2T*7oO4bQ{WHs7JX1kRDWX(g z#?Y%KOuN!OWp0%LY3P$dNubP74QCiP(bJR245!D$N}oq}J}Eg)s7< zDsW-riu0-DM1IL>0(r{uT9K2p0)}g-)|y8bgejxQiPrw-ne5$wFRX8_ z{$m}X#A_SbezL2OrG+yAE^|}=Ys0+=hydnr1(?G7HKH&mvsUYx<8}KV28K5tfymS$ zSc0OyebUFH2jT$05PkeNRQdf^-dJ3HZD#(Mtp15oX|{$y+_zv;;}WnTK`tcF5xS)) zED1p=0?GKzUhxdJ$7lx zD`Ah`idFIis;Z8NhT58_*dDW9ik{#RS>C;i&r+dzqL@nF?w43E~G_ zzR!=*tB-gjU4gE98U+QL3TC)0Q@3oA|C=qv7%JZLml5Yn7bn$sCwodW!o@hV6Fwvc zfGj<96Ewjf*DUN%n{ER5-*6c?lEzKI#bVKe8v&BIA??5bUzv`$SS(|3H#C0wjG2Iyt?<&l~A?p^y1PAc* zqwaDq1SOrJaCnV71};R^oR`kvw^YYrChM*HiWW4+5mYHl9mHCJuJpNNz}lB4B=k{h z!oPpu1iI3rTl~&RK8i8Nm*t)N7+t;Ta^>{gPz-fJ;geVR_^5Y?tY|0E2YG(!^T-W5 z(cUyfWG7d4c4R}KdGr8}%XPKUpyz89kNot~uT<@C{Mo!`4QOF|f7>#nL9n&flWupe zV2`i;_gI{91!9!Og|MksaFG^G9q%>$Kz4gwsNS->S@m#j)z(oE@KRtPKwoZ*^^ym| ztV|UO(A=neN}5hNgAkq%=r#{K{Mnjche}P<_SoEpyae@u4HKaBn)^RH6|KK!CH7aB zCEV5>|6y_Q=P$cGAhJdrY#N(QvD5hMm+wn3Ez8zZ=)ly5JVUKW?R1*PweyD2D8g^l zHEzH|1b*7Wt~w-F!Y=gVh2vsj77!o8;pSbOu_*9bkeGvX-PWqPg&vfY-{x`O_1Wv= zVBCzVVvp>~Ha9oCO+(v7zuf(2$Tqc#Tk-wVVwKZjTn6P>YDe(Wr7VQDXglHojpIJu z69%U>L*P&9#UOTVxSQV&zHN`spqQJq3Pd!Ob>*P&GLJJAtcT=^Tb;8I`=v^re;VmS zSvY@)x?_!aGzE|lx*o`!_uQM#xmWu_H-ayF$Go9iD1RpP0FxltF*@6Rna_MQ+xADhqF4H^vX*oZP ze1>3aou?gVThXi z+lFh1hI$WY;>Z(S$`%#sZ{CM=h?8>W1Vlf+y zh~G2gwKT&7xsSZ66@cq7+o`}}G#>40?OKjb;=%P3?MLI;(F5qTH1@01&s7;C;iHFt z=-H?S`$#(M{SJWX{C{6)n+}z2|79>AwK4#I9j+D8*}AT#C0CkFsIPQ&6YXm%ylcrC z91ySwX4&?}w?$l>A8$14E~iOSW6Yojy~@vAX`CDZ(Vn6jORpIVYq6X0KZ#G%fg)Fo zwYJO)3Jc*qRG*`skC?jxig*DjsNl{`0)h88@IJkUib)7PIuDU-$~Wkion4s25wrY~ zxhN8S8GKpLYrOP^K)@wwxy+Fg>W)nr&p=e+xKc^xc^tT1$op{l0+!COh2aO{S4M|* z;p%tce;yP9g&p&kql7xI8eamz;{1>*p{Je!M~k-vA}o=o60=$0sR#!hxMbkje*^|O(f8Pu%zTon%HeLZ1(RRqBi z7YdNipblm-mfkjM9H^!9^La|h`Q*Y>RCjzWVmZ=hVzhrCyV!Zt0p-+i~Z2^0whUzy`D zn>AOUA+;^~BJF~}SKzxiQ5!$22ySwf=B!mfwqH*2pso`v#EB+vFCTVsN_P`_h zc>vYF&i*_ltc7ZWcwCFGb1kn30WDXOfjDf?Fq-8U)^gSgVrTt0SyE(cmW42JJT&d= zU||OG#rL>_clLDaz;+DK;txfT=@5&|V%%Ne*gu#LHDrujmR;lJJ_K0aL_Yz$YeKwT z^Cz_gxCk<|PLo0Io;2kX6Xeaj@#JR&=$nh*PlJj{&ZZ1+8rmZEw;inqHIa#CVwyS)Sd;&HSl6>OUWgi$gIi>|V;GB_LDlQ$1nT-kJ zck<4HmHc%~n{4fqG}eCoNGEy^MNvC3MWL0>zc5YIizbS_^S&E5S8{Q+{5GhF3YymX zSA({mii(p{GcqNTg_jvBLGv+61CsPepE`4V{T0 z6enqhX8Hzh)iTzsqkc}ssYY}6J1zz7Ih=&E*f~n={(HLd_jDGqiT{VU%@u@b!7}`i zYd`-paB<7p0)P$-P?93kvQ#omQ1yQ=w7`Shw9`*FHpZpGAL5gFLqkJda7a=PI=fqT zCCCaSw8~}A_Y3wIa`-!?dk%<@!s;Rb?WeX8$;2n5CB&U9JD8t>tZvJO9G^gGk0IY6_9UxR^Em49m(`ZOtyZiK+fl^DZ7 zX#+~wyEgJE68Z|H^x1N#7t~O)_f8|&0J^;O3SBC<_NGmK^=j}|qJ#IRfn&jK2ka=L~98)-23It@< z@vY)Bq2vh{Llz01E@$?1=bWsqgCVM;6D7$;a~V-58|FX%|A6j~3=1c?v)Gj+3@}*e z-8Uy4`Sf8t=7W(^6DF$pm{jj=cS>k~gIm|w{RUDy_ zH0ptQ50atv<|(Z%(Bym64# znjgRmaKqlPONwGZ_PW#4ZypOWu&b>jPeT|j%985mpI~(;MD0daq)%qM!H;-o?0WsU zgBd~}(~|kiL)YeR>EPVH@UPw&uszy z@)1edp5^$U>-W@7R<0}S*}=Kj)z+}jD$L^gwH7yCMmF;ARUtUg{?f4VyFZ$}wC3Y3 zsqssH7lsh!9n?V^7@pBtIvSr7Z_`ItfqOzLV~#C+zx>vhwM#}1t|%+$KZ%v1wZBbO zBh>PiHsf0-EiO;0x`;`W%Hr)j+z`$u@yre6_*BC+D^7^_!*~C;^I{1y$AUsD*CHCO zJTn??6k0+gR9J#PU~yH(dd3dGS|Pv3m%SgXb_;J+oi)5V*9r*Dz)`;od;?`Z_dFCE zZpqhwDareZU5$*yhzxuG;I_hthXvf53UbF%#lnDHjsoLH;)9fKzzgNAkHt~;ng~-4 zIpR~sMgReHi)_!tLLi!B{y3POdfZeZ2?HOP#TF&>D(|6K>GEDMprlL^(mWgUy!FF< zs-X2?a|C2j&!k@R| zsZpvT->y>gH+6K7@r5amK?CaZzK3vFaVIZHF{Qgrq(DMqWd)^tBjBf_cDKYzKH7=B zR53+P&d3Qteo34+gE{ zC(W;>fv3hS-|ktTewn#Av<~$YDClvau61k)W;D zo6ukHp4o}@UixUA|7av~;Ic!b+`RuSs=ACf)lYRr`;(l+w6E{DqlAjJ$n|%A+YZ=| zK8YthwB^6ywLacg$y72PO(5NDe9Z<8(!)|5TpnXHs61!aGu7m9EZX>L0-LS@kXY zmq6xZyfow?4Ko!y^xcXpf56u!lM@Z~RVyRO8^+dbNz3f^LdC(^uhC`wAF8$r&jz_8 zj)txC3%K#Teo=z%HecCBYs8-mLH+<#EW7bYSX2LaT|!(Z{&3jOD$}^wu(SJ|=T|qF zpAqke+7COK3#h(pb+&!}QvL3<;EQ5*`z&Cr?;i&2Hb_zTjIOZ|$->yyIzv{cAKZsf zlKHR%Q@gIbN@1o0kcm9B9boujq3~Y+DB!3&O$8wIWk#0>Lm)xZz1cu>uD=EF&N*kl zlS&F|&uDq@9ry=)lXGY?+>#LeIF?DBSy%2J9Dp|GSNi~P*a|HQkh(ko$oPKAuw*SQ z{s)KiL_f2s$;{~XL+<|0B=ssRAQ83X7;B4;5V)2k^%Nk!;Rv@mp9Z_6+BrQ9<5skK z?G!?m)|xB)gc5PFY7o(V85Aqa6r<8f;F61F4OKPbmKPsQ_(shY>S1YzAjW0ky%Q zo-$0`Emc_K!uN-qN^C5pUiDMe#<)psbgQ$e%bv>ZOTOok1~Eo2M0+QN+>8H@qw|iZ zdjI42u}9(@sZPi^Nyv;c65^2Tj+J8*#j$5b93>9PDA}Ww-9h%~7@1|2ajYE4cFc}F zf1mrifA_eLhr{=KKA-pd^?E+(Anpp0{O&4NIq9|?P1koGx4t@-i4}HBUNO1;bij1#G*mRqkoW9go~Lrg^XJTvraSXTyY*;~f2(@CeoT8_XZ#AAa*OahER-0< z_#af?#C&~NDrnOK&QMiF8mo zMBOD?WYv9B8sn=!RiKOtMP;{#f2noC6BxT7!v=TC!yq(iX$h%mR1@x$w zVKsCO)IwNl8cfZ8{hA*Typ0e)v$va=U&rWrGY5T|5KrCAKAAd^kY7OkC>P}qA1R%x zBmxb#S3Q`il#%+Aiwlhn{eo-h5RzrY1`zahb{z!!$4LHremByK{1-%SBo72CX95{| z0GysSCaWO4HeC>*z!Nw7$nC{($iZ^Ag#RHhI~5Y&poAP&fq9nbyF_4X-P`cU{`WIC zyYDgW#_wrCv6pRaSr3-}nfl*;>X5lm?0QI}_MYhQFC8z808w`$*lcdBO@?sh6HScg zxLofdv*S8D^A{!`QV31xUb8ljcoY-?7?3H^?!`b_j@s#Im)< zvtP+6D15@xnF!#|C)m`~?3)8Yu40m>znV^KY<99wR-0yEu_sGA5!_~we;#n{69Fi3eWeSqO!vkp^@t=`n zZ7rKx9RViL3&YL7&67zq4B2L>0Y9aSG@USnca`Gh)n@9CGmU*~(o?`EEfBfzJm4<^ z8)rq4g#4KSqr}d*!^sad7t{=}~q}i`)%HKkxH?_X4x>+Ch?YKJ|ozEWe|B*;i<(13|Z2#Yl~j-rz$*VUwHr zs${o3+stBS_WCsrmN)Q}1+XI~J*S$)^ZooRlEz{g0O( zvKu$My>G65Mu^gM(nTgnLT{*E8`Ok2cKvWHgk z?P{i%EX^+3&2EU+-rt*^nF}1#Z2$p&k%ieYD%r5oI^K`F5jdUyp1Vj;nzJC+Pw{4? ze&_#`sF(i{$J+Ymqx%hN?97r3iyDq!D(KnG-q@8|tH%(&9A7;C9bly{`oRa}Sn%f| za!Sg(laCyW*i#q*VqnuNgq6yioLfnDuo^iZ7HBHqD#OVCT2ULAh~A6%b)w_^6>{PTVjtlzo?6)X!ug?ck`?Mb`$O;XR%RrIOT%w~d^?s*9l1pb1uJEqvroEZxAKM*W< z!~Nrvy*V!UfwWdW%i*-XoFXUbi&5)y98DiWVZi#*S1i2t_6n?^T1zv*tfl!WzBpfk ze!hDft^KTJ!+$9ki{rD=a^k*Mf42_Vwy2)_+VNJuG&|x;YMnwt@V!}%Q&eWjLY}1| z$m)N;IEae&Ua|9#c6Y9-{_S^o2j-g)V8;EBcwd!Kejx+3j!!E8jJcD2`cMB)_UR9| z?x!+z6FSXnr&P(Su6Lhv30&d)ZpO73_q1+U24Za6-VIo!4oT@%DYU`E?=}YCJHc1gHmKU&| zjIf@;%9iTO6>d8gp;punyuZJU=CjjzQ16pK6ah)hjJ{(d=XWJi2M~@R%mnI10C&D; zhMjwCl7N?*=Z3#hUYi@tlp-htAeY}w`Ezhkw+9Mb8`1KB&H#uPlV|AN{~ga~8o2h3 zq~NQnz#PL!ECQ-3IqYCbWfM9ojVg2vQBqdE9H-w;*$2{u#y$Gr`UcN916ic9!c&^F zZIIwFwMwZERA04@>!(kA^LOk`_lSo3Ji}HUe)B9^qgj=`s1poL>M4~DlG}?}P0yYO z6^rcQxZIcL`0FQQ!`-EU;jWmWpTUm{aLBeW6*$sZl(FCqMEC|D>^%$)Z54Fz%6^q& zdo$%M$A@%ZT6)J@M$)SUrl0g>Qm}qU-+pp=CF$1X*IMVy%;ScZT0>7m??tPDFnHl^ z1cbkBq!a`?r}PH}5cz|`9gBBcXXKu3zbysEA4ThUCg`2L*eLn76Zmx4udSNs~BWD!u!>*OK?f7zwI1ltb&vR7Y03-2kx3NUPu zyV)corWj3%VuPu`r>CKz4D@)WbDJj~O4)bj7^0#yKjF!NT<_k;;lK9q+49_goo;N- zgpAb)4p#QJE;2>x76ACbE@WKa9jFupu&|6;R}zqvy0`|STO#x z<8$!lJWQ(Nlh@Au8Od0XU?37e+1{^!F0BjVU6qXi3C)2t0nTEf-i zikgP?E{>0l-R{6u>9n&gF?|5@0{e4)V+Y6Vw4n%Ze)d9i;X>`>? z@}>aP7{>n>{>JHM8T2oanO%ONCaE5MSJF;H+LhCePk@U5chBKn_2d9`K7<>-MjUUl zbe|7&;!E$M4l+vwLe5#X;qaELqUTOy3h#iuSSO?q? zQbF1O?R*9KLd+olbNfg8FimxPocv?=2kI|zb~;_ZwX#d+d=vqEkuq1lr*WBEi(ssw zZ)GI;eB$%^Ek*&ANf+qEzjHN}7*$%s z4Y`tL4pnXx9F-Pf%k3TgYg4h&XkE$b5rXY_DbF%v^_Mz&RSOAz`TlRQ)It%_F+*xB zZk8xM8G`Hdt-An+{Bf8%VcPNOIhZ=~rW76YrhxG~5|rxu{ob=@o4>ufSC+W_cxd|I z>5WT&Jx29;L!bt|GqqEzVD3FiPu%v9t0YD}F(Xd81ybf+dFU0|n`Kt=$a6WDRUgsd zgaf&=H8YdRi)GHV^FN)_N7{S9c*T&Ilf$bVq9cs>^HmgaCl?71yeR4OR0a-q3@9SX zlvXiQ&pf_Rfi#Hxl!(Git^3~&2Q+EBrYT_aNFG413f2$h5~&#nW%gpv!oZhq1&HMV zhlrvQjZKUc?G9~ue*VH>fUSOs6a;(hhL|PKjhllNLk>2WP93QwwX)M-s|ixH2Z|CD zU-7DX;(g;DBjmHX5@!T3U0<@lvsmoE^vrB%RT^>ZRY$$2sRmi`367XR5p_Qn9klv4 z9W1#Yuk4;2SD@}!zljQ#HhlW<(f)34b&wPrm6#@dUm5{t!DMFTR41BF5(s| zs5F}3Mw6v0heLFwR{aL$&guj=W?+P0H!xB)yZx5(!oQ9`_At=@0yVBe{qXbSDiFq@ zWq+}Mw7;*BgKcYr=^PFI3c}-~nXgd&jjyQ5(gb}UnOIAKdh6^=Dy$m@O5+`{{px$CsrjGQYg70- z8Q$>GvsTVFYg1d&=0;~*+?MY){RoG%R1&uXl}1dcMCn;=`KVbl3^l$9enU#oUa+1> zk549!|FAIm&vr)sS!FJ*LNVBe1D9CVpypY*qs1d%tNh(s2-IFca5CpMQ+&M^SSYwl zbZ1aB`8D5GC8Fk!-AbLSb_D(Et)TyTRr0<&Y~c)?w>k7=JPy2km-t)_gkTbHS}r*x zcjx+#t?tM_{xv^F3&GleNm*()n#!fIFVMR#AJ0v%*Aqq)3AZO-?nzO@+^|k6A9Ap| zI8s6sL~s?gIfTW!P*YQ8vFU*uxfKZHRRP&^Oa12J(odrL!^W$G_$i|sR%M&s0rE{JUP{&_rig=; zivC8;rDL}B(vdoI-*b=2gsS;2CFv-B=k+kTmrz_7pNFPA|A%O>7c|ruvh6Bi#^7O~ z{>s)+4GKzYWkK5;KLg$IK*#gnJhr=BQCa!i17U}n^tCnChDmTyJK1OjCX#Ec)6*WZ zF*H)kjtgL<214>wpeFgX#Me?=OISk?F{PSV&RK9Sf9Th5XS}Rgkl7h2sf40ow24>{ zL}jb_;kz|XobO9$pqB80t$=DXWnYnHaQZ~N{i%s-)yY`)(J$Vkxh4hcQqx_zbbb{^ z#s1tRuhI2miFe*S`AiTwNvbq7u7$-edVWly#QG|8nDH3zQ%7p7uUN>d%aQyMqw0Kr z38BE;a6`&)(ci1EsGM%>~hDgJ->hB?xKv3N+M?jTYJ#Pl19 z=%9|e&02M0k(!SW0{ zJ4!1`EuN%ZESN*hPmAn%!T-&zhO1LJS3ty3l7%Kk45ZoD|JGf5n0D)d&0`4u_AUf4e8xBz|O_j-9u0N&&s!RM360<&E|&>8Eq6Y z_3&h0tG@iVMWFFGkh@)!lkXQ8eBh(NWB(($8>~K3sM)uOS(t$1gYDL0Q>k22wq4NX z!XPcW75cR?lPt~?F+J(nB*tDw{u|OT2Id~{U`?#=>e&=m;T`VF!=@F1lD4yaCov-9 z9N*cQjpboAtxM3*gTblMRllicyOa6)*(D`E zRDVFXw`StqxU*Ptw|cuB3*{>mKT|-i2aR57%8HhcNVwe1h`@E?t++&qxZ|jz+)9!f z1ip24?P`IjxnfKTI+d+83tr4O6E?Zvv z=r5m(5v(&DDZ2RWHLc=wzrtivqth7^-v;;+^a07*H7_ncwu^U zyiDK{a*vR{MH4fcJ7Ut{BN!_c0mnVz?woX+LQh>_cNEWHUqVb)v#COF%AUSc4O{f^ z_u#g~aT_vAK~#!;o0P%F?qz7f&|Et)=;x^}3lr-GZYgxMD@ph^_u5WFVPbucDN?`= z=3tU9NYARLe(_WW#%}mGyxFZw8yKcXP)QELxua*dm~C1m*LFd|!lDm9%+D6OE3tZN zeLDCy`*xnjEW1^H(b{_7Kl7h@`8psC>oG^=AV#9%tf-Rl}(H{ez<|Bj;Db%8I|&cY`2L z1A`SZ`%b=p1t-derdQzu*Vpn7jJoUf8ZNK-VWD!^VQ$KU?Wc-?%@eJy&+yy(dlr~y zN?9p;;Olekq)-00u)68*XD9oQfoK@Op2jPZkFBxf4*nh&f(H< zF@s~;0!H{cgnGHY#PKq00dT{C@+sY~&sGFr1^(V;8OWnf^Zh(l25dF5S=l_|oOZ@E>(+iC{tJxb*!$$*oqu)av z$@lv1u+Gute=*z?5j9#aM&5T=td$2!-1p)by1F+m#(aH_`tn}fEeovqlinT79*W?$ znx_52O?r8?M$$bi!ZlRi#)fbP@+L*y(4JPp=8r|JYf-@?#QPjVR3#CK*IwX})3V7E zXD*6?Qy<>pv1fcj)$?;CB>KIv(T8*NY6b?>H^w8DYYrUJTX|z(i7vZ)dw{s=$!YW` zwclY5+y|;n1CZ7uVmahiRA60R3$0s4Z77dWPm*dKdnZ1YY0e-rF>&sSGFig zHZ3LnUR|e-9RE>YqN6xF4d|^!oT@^(J>AaEZh6!dinAG!Xl>zFtuE;G)nK=(7&S*` z$~GcBj)dF>XjcGG0cIi+a-MA5OLPFtzr*z=ZU=n#aMDedbp{y*vFWO- z(#n;jY(CBCkfJ?UE-sx9cow460f&L!t)kK$_JK9W8|F#vbJ|k*i3llbDD@g!&^0G{ zl%)|s^N?lTcMdZrTBjqn9Ogmj&8~~GbfkX*)X1$(N2a9EQ!6w@a&%^MQFj1@F3po) zwP(6;*)#W48N~_9VjI*{R3rE-k%7XNs2#(&sL|xeu1rJ%BbfSkisOb^btWmBj>Y@5slIV5Kc=tPbU0_&TxP>tcDqICY`8Xx0BV zqTZ?$d2@k3cX8oeYD?MicEc7a&D8EZ4b~QZMA*5T8yBc(cDv)J`Sq%LZYHL9813mA z@7DwO(>ZA@LIfiDX0L?GK;6R}p5m_Kd9DnWGJSUXrAi3mi}jm!sMM!N^ft_0O03&l z12Q`N^tA9>;~(7nr541hh4Wb;HLFoo)v1QTlDHb)K290Kkr#Y+jI-jxI}dps-Cw%r z>U}PZ#BPy{s~M>)bZry8{pfUYlMEMlT;v_c4%ch(T;ppAuDZMdc+J z@|~9jB7Jrbq;vSR>_q4hIT|NzT;=O2)%P>m($)A_1t;&>h%5cp2(3vDnh2%t!k?-l zx8I$I^G9C_^m*GDb68_VcKESKIP*I^YvWBrIg<+a0^P*7W-dMtsLWSJ6&J2d_Xj_5 zpXtMfp;hibLy}VpA(HGqhM|?yFAd5$&lNa_JBuxNJ7VUI#?O~ z;t=+FXQh4q-C@IR*#186@zHAWAJm{A zo{(^-nk%tj2D`NHWSJakTDlg|`s&=~@!>9jTJp^|5b{Hf5q&^kT?=qySC`uMPL2;} z20>?Jy7uMh76~BpLTfMnA{~N;^c`;2kXtt>@f4Z0o-U(?F%WI#;o%9{@3ubq&U+T zZHE$kyfeWI0<_5iX5f?%jcbj!XX#n-w3ffc%!Ao|*}hRmacr8+zfB9)4l z(Izw=zMng5CBI-Lf`)RVj^4nA8@BX%gg>~=`l>=0nxeYoSRGBAAN)GD%|> zdtBQ7J7AoB1Y!n<0F+#Bc)O!Aot0jTv5H7KK%?F7Nba|>0UmEM7|dS#gg&e>>CXAz-n!(~=r0uO{XC)8MO8oz^Fp#PkX;NBnoSAD$`; zJvg)R$wlzi%_jSX95xn4NIc!pE2`geZy8m(RJtY_AHC3?93Lxjl&>r6>zP#!qNMy) z!*2&G1--k9P##oeoROn?o;p$AvJNHJ4-qZS&Bo3j9^Qs*U|mixwz#(b)ljR?ZEe;F zhZw(Gv$FNDa%22udu3^-bzK90%Qt};o;TWVDcWdn#rl)#4<@9?!Y?;Ys zu&T%zGms;cg1h9i_IFdtveKWbyqE6L6C56cuNoO3V5!WK`ErVMK4!3g9>)$35l$DivB?tlNj5XJnBv;@TJrf2R(P9CxT9E zPUok%^{<8Qfld!Fb}&HXc=XBj^Z&t(I>0r^E0(Yej%}e*EHAdKw2UduYM=37oh6Y> zE{@#V+5&^%D4Zs51h?vFx}`+&I9ey+yBP1X*#chOJ+^-VoXUNE1z=Wv6sQ(&NR*IV z@RlW}Q~k#)kChKMHd}%F+Z@w5_`O2iSquU-9m?c}uL7u~ zV>e!Vj&SG|sfCQpl=Es&#!-qE&L9pefJb88|M}bQ$m_C(CCJm&*FJP5M zB$gASyUPp4r#)0u4G9)~IRIlF-K;s|COiA38jZ1Mx!lHLbTAO42_QVlMi2Pyj5iy~ zQ(waC{Or|rx9la3nc7?zOMRqaAP8hsvP{$WsbgcXe?+T zyxa5ooN87(eeUPTzmU;l_VU6n?sqIAiikn4U;`1S8js^z1b5r+5}$|j zS@y=LEwZze$qMIEQGL-Lxl0vP<6t&YK7>ZsI6f+{Y~NjIYs2AXZ_*hW-i1X7-|!Nk zr7<>G?R%ikVGyCKR{XqXPO)HleJjhrlI;%m zmH=6ia-A96E(yxNUyOb2iPMkUnf@pt5Me7LEt%mLsdBXC<9vKEvh^gm(PH819B`XV zw)G041UPl~z)Sd~Om)9M1!iHI63VuZ;^a4&RJef_!3tQMC6X(qT*1FR+GI}v2&rw7+)wa~ z(on!Wf&&qJc+e{my8lYneTrx2kd0UQ&a*(D3|&S~!;TxfB#f($`H$PbZX)`DkOcX*jOoDFQmg(!H!~N;pcx+4T6jDi z2_W$tk=i)ub3;v4JsL#xj~f!?^7#zomnk&Vu*2$pSN1jIMN4PbZuESB$*-mjzY|kl z`Ue7!5C3*(H&3KZLA!oJwyIBR_tEZxywZG|p#=o#NNA3>O3K5tCkK;3#vvhrUOw2{ z$%?dnVHr6HOjBPndbxV$S~Ne4ynL>@v0KaakP)Q&w~VXvpT5|_tyD#|F)7v-tKxP@ zR<8D^@Hgq@!05ZZOt)h4l?4_hZ_#)dp|&DoCxG*WpYz5!W&aJSZJpL*0dMV+k^TLr zMfkPd2I3)@d0NtXs#Q1qS^(D`W#z*0eXk|LLPpcx-)`_L=eNbI1}nXu**n-KgEim4 zt#wl;Z2-sORsNbRPEvoD3!o?0=N}PSE@9Q+?C|L4(e-}{hDAie;kPJXD;|wb>0ZPg zMk3O+VkmC0K)XjlSDztA$PqHAox}+No&IQ_d>fQ%0Yl3M7=T-2p z0`mY5El*SaC6@k)&|h&ShHPxKTZh|(&Y|Yv0&u$lAHeQm_R;LMR!OLBpPJaVPvHEu z@pV7`j!Bu2qb-2S`7eOTR?6eo8jb^J;6Q=`#|z0>PsX0yp_KAV)x=VaB?gZN`OGo7 zn4(+|tnRdftm2lVXq-2*|Q$5lKVwbXw{q zd83mYER##Xm-H=_8AeFo7FWlIst8SfB*fhGT1)!vEpsv<9k)fo3Ag_av>bUS-&h}- z>+h$SSwf*}Ir0e#K0+cJH8J=d5=9c!wcwy`<_Yw5>wXQka6nxsttfIK`!z7&%g{>^i~P#&C_8^%g2T>c_6u*lgDl+$GryG@=-YVK*!Te zf)Z@}5~U?O-QBkwA98Jf8v5fLQw(;8js{PJcF0;H{v09kz7#6S=NVY z8ve)y9n+PSLFmUF6D=~~CG@v}kY(U{G`8*i1fQM}+O;XvG&CgG;Jh7V*45ER8vB8= z47>ERwEN__%i}v#-!#7e8zRB4uzwI1iE7n`5FIuLZ|6OtFPYuSG+~?${0dkWmQ+9S zx)Wmes)&CPJgR^HxQSb=vq$`eP(dK~k#~)>J<&D&srCQ@sWAK#5|~V+{rvR87m3r& zh=Q}+illlX(cF9dQ)C+iTOSLPs+?18eUgJqQHvn`*f(lW$`rqFWu2sGDNZiW%kXPB z2JCn#-I;9#ux`(a1uJ>9w%G`YlkX*5`Sj?O6QI6>v7B2?(BYcp`d@jv|Av{X9iD+9 zfsIyrtdnba?(Dea!Kf%TTyo&e_QR2NyK$;(bo6{0CKoTHaqX+4F;!bc2!Foj1Xm>P zeN97z{~fNb<(-H7Zw4>BSb1)>Q}2DOLyVR-$XGM9B>5d&;f--0Um*tae{X1~YkE^1+Q!lizS5 z8P*T}Od`$$OgHc8^|*Yej%UIl-WK*^BtCdOVhLl?{cya*`)nKRRMtPn^JtLq;vRbChy|yO&3Di(M6tY_0$F}!m#pzRgVee*^g6n!(X+|?p3jwa zA`FcaeLZ=D`(6ku^6>UwTM**B2f`b@;0rv1SkY{Rk8;U>+IB~gLv}aazD&uX9Y{>mrQbj`i z&=1{PzGpCOtU<;2UW%v)Zq(U)#5XIsxgRKc!1>?PhmnVh_TI5L#l?|aAMFD-Q zZ@|`oro=~j!vFo@;9=!fqmAV870dnF^Vvw|6V%-@EiFspqKe9+E1L1#g@0~4Az8SE z${r`A!zY!But};Md{YNcYuA>1{;J?h@+n_#+OlR*-_bQpvE!j%P^lj0zTd;GsQEHg zs}Rem83Nmtxrwwv1lV#HzeB6nr{*Q9g6=H$4J?kZO>|zzmyqWaYKU)6$?kt_Or@h1 zzkbI1`m0F|uHR8ST{?2Z6mg6;+hFd^(m&beE&4HQF0uYIB)BnEMCJ*<#_5kdOO_H! zaOQ$Hp!)9gT zOeZf9b-sY}K@4Q5KD@aqF`&`sJVLA0qZW)z*S-1@Kqse7v0oZd^uvpga$n0cYhiG6 ze=u@4fACAq1)3mafZ0Wvo2~U19*nN8cP>FGta4QD7EILs`ywe?>s2LHqE#1=w6DX= zbnj@vR`TxdKA&m*>{<|-eu?DF=7Jc)kM2$OTd6i}jbsjWTl{mpaOC^0&H6M+H}a_= zBF&BT)%C&m#c66Ht@GT?vZ8vCCtPiCzh496)exMxselt9`g2uj)oz~U`|EymYR*4l z2`{gDTy!gcCP}Trsz@7M5Rb#3ODhtq>ctdNs;-|!6Mo94H> z%eFvSYK@HB-7Vd1I2LD1-ShCTS$fdorR8qjgfgI|Zw>%uTgRW1n`d-&cN+ZJ{QYgB zHRapM`zff#rqBj)@{QLexQX>0r!F0%WoLT%&f}AK)FQqZExbL$2ClXC7*|fv6gq5( zbb_k`HC?e5kJlruuzcGt>kz-ub+9h|P~z}gmyNA7JBw$SkJf5I-H6c!o`3At$zS_C zpkcN^yn7%$P{q=0#6fIEMjjA)i*aS5*AeVrI{7}6qf@M#0`IWyZ!UdTW#Ok~9ihyD z;}ibYJ)w4$K{r6ZuLRxDQd8_0en&Xp7dZY9rholqU^T$NOQL7MQu?L*bh z)?Q_jZ_UJ{^6!Qn z@YZrsO|Wn2K8r*rm*mJ2YW4r5Z$)TA#UdcsfppGG1{ljWyJi0H6qNhrc=g<;TdJj& zl5I`EUoqmD_#h0F8Y=m|A+>?2g;-YvmwJ^f zC`q%IzRRnZuPt0$^#1z|iEud0ZV4HPY>lpl(>B*WyB3yW1U`V0Vm1L51(m|8_R;?u zwk@fTvu?h?b=N1JzcuN?_^H17Z?Ne#f44wi#CfNI%W{8ILP9yz&qGq>Y;f`~6C>+O z2+0|~@M}CMr|2gM-$JW=c-2I3VKGw#V6-e@>A|bUy_IHmuyzY(n6$RO{(w8 zEws&;U_NE=yXSwVM5XA~E1xR=b8)S2PVSBhaHO#M3JusJh_`7rHa|G>3)>ja>{yot;?}|?*62+@N z3i-8Eo0Y(z_g}tDi03j^eb)yG^R(%s73vX85N6%>w{awKdeya5I-(gd zggzcF4cRWXentZU$%x&^hN)A+)IzhqzCO6~Z~b;n(7-tqj^wt%Cs$U^rGV>#-k^-$ z%!I7F{Ug`L$(IwTlKE*UFp8Pr{u~kv zq9bl;f$k2LFsXb2!meAZ+?sy_Iar=YzbetGCcR^+Oj$1Rt~yG~SB$=TEZTefqCPUl!H(c;nmWN@Eo*Wt9;PvZ>Wh0`2fiV?WV= z^{TaLz%?~;Wy;_=DV_aSxj27Y>on9}F|&WIg#o}$;>UD)!}*{O{+u-;nwuu9+Et~J zhIDPpm}XTzseLKy@n%W1n{TcvVc{I8oT=T(qUoH3=Vx->;g*Q$?vL^`r{b#N@gT}6{R`9?1eZ+@ z*euAbtr?PF_f*&%w`lE59SUkSJu^of+{iIFo6A1TE!R9|HCcJ{Pc(mSm1qB}ud-!1 zW^O$H;q=M=ODjJ&iKI}Wkkx~MbTau~6P;W2jb~2%8+pvswVoY}=RaS08g9uqUU{y_ zS&)9feXJ6G_X!fBKDJX0=YiL&V z`N{bGQh@P}-HDUJEtUg0I=CkEQ2?c}P5I!Xa^T^7FJO7acg`&BP5Mt!tMt*Fo3kzO zrJ13INKGmKUiUvt%;@Hv4*>p-UDP;8MeUC@^f&JPeFdD6fB*zS{>QDrB3Eh`lbf`! zaOl}2O3Byf)z7!@d(mAOw664!t8;TZT^7Wrg!2Ja8jO6xp-QJK-L1A$%g#0PBSB9d zfW1p{_f;2i!3BF8R@uUr0r1hv^xs|A5F;RHm6=z#8zOI=m8CBdkMg5GE8R=sR(*PW zFg#HMgc0k=Pus+z#JN%D`@(5s<%y{4umHcyu-7w^tA1)cckVcWl*|eckr_rmFd~*m zK>fgh_~WIFzI^;M{vWM>o;yi9#bP;M$NfJt@{}w{Gyf0QuN=xNsutweq zfQI)c>|H2uMr+AuO9;;$pQzxN|J;+7e%W01xkTrcFm-deDBdJb7v|=e!v!ePDUpiR zo>s}$$AEq_Q=!QlX`AL3^4Q=V$r*2fxj6{FBjvbnA)VKiBKzeRWbeyT+sOVznIajb z$|)n&#_)agFQaqsi(b!^rSIe>ju)N#cK`I$<=FJCtCQDNT-{-QwHyLb()HCW)htSt z>MRFViq+h6{E04ScE^*WhG2Ml0DRow?CU4gcP3wI(40-w@>S!mYtR3^|0gKBmM$IR zV4PZmh#g&6>7N`yH(nSFdBpMInW=}xAQH05GwAWzq!_iKyPo@aTFU*c1J@_hF!|I+o0yuzd&1mly>zlMG3 zPTiLc)i0m@CCB0&;~i~LgnPIkd9^Cln&V5o$*0>$TzRa;vLm%<;O2ExRl7$>E#a}` z*6zuWn$^QRj250Emtrx|Tg@geC215>@o;ynaq0kwR=`E9)1rP=sq)zmvzgu6Pu+6C zM|+#Rr_&=&SNruwG3@8+Be)=|DfukS%elcWoHkufDd>0ylEK zDQ2}CXr1`RF5|D_d_b2I(K8YPQ_O*D6Xk_d-4$9UdMaZ_HEV;jN?V6>>PBAG_L#kz zSMvy?OYQ{6Z0yVIL`gIMrX(l#B7U3FE9G$nTx!y z&(y@D=xT3BMnR<|5z=p3M6lBPOfq+!5RykOk^LAOx>LN zFHZ(yn+ln+@}MsH?6j3_P^L;3!!pNx_Qwnb;KpU1Y^N&>C#a_-nzhMKZC@TiJ3FZk z?pkUEq~#gLfSnX#ApF0a6o4Ut1l7XfaY(}?`va~0PF`ifvtR#?R|6&{X2OkIWA)K_ z?pqz`1%TB)-PqU|d%VsB#%iF%y6L0jR&&^y|CsE;rCR6!r-w$#cfutW-in4$Jyo>A z6fV;yqZGNV!2YjODqO?2rwGN5C`3@tNaiKtzYHqQS$wXV)rr3P-;T+z;f{3Z;VSr1 z=HH~Yv-`T*XR1Rng|06sj!`#%d=IFYaJ`kjHs{fE)f{7nrAs6*w552*6uO$@KetNa zkq>B9?DJOriixery!D{%ovpri|M_T?4_8{PkB3i7IWz>qjIR4zYHI@Xd@m|vMy}`s zY!7NP?KkG!h-b@9N?}J-TMmYMJb%G?*=&I&6+R=M41Y`~M#Y!=wBorznfO!QK}BVW zKF*0=dM01D`1W@7$|m6|K3km!?U9TR>*yB(rHW>V%%v?sL3fewFl%I&osB#?)hdR#z3d(Qe0KfKvh7yL^6hY(uq%>_^$RdG_ z)G4DYjgfRbrz(Cmuj^WMpOr*%Mg{*lPWRWoq-8xD;kv|nxWU~QUf8d_PJWyNS&V9iMMRf^?x8Y08c zvah!Q@?o_{ZeqrOVqSny+8GusU*Dx0Hc4{M)~GfuZN?vHg3qX`fa3>Ks$&H4XX#0= z!A!M!^-0Qaz7|s!Pls{0rjTo)}4t>MNKFxk_RmH9ZBATdG+<)Vp$ht}+c5e$272eDb}?C9Q12HF6rw ztUoBHYdE#rS2yWbypQkEd1S$;B;&2+D*3?)DmC}UzAf>f*RNuNk>YXr?;9D3K#Fx~ z>*VC|M2-76RO|Z)c^f=(8(vSw=RwZjb)p?^7-QTv+Wz9t@u7cTUF-DK4gu;uvxTFJ z*`X)B`c3?5k;$##VQ%EMdRXf;?g1s}%FJbBmYYvU zDv2!FZ-{pxJWdqpF|M)N0EKeOcHkwA1scwLODFN%Px7>({xuVB{v+f$ueu`jz!5)E z(0~l1>uX&?=+h@@^K*0m8JM0dRscfj=?NjkP5!Z~udx6x;AVEDbiMZ`9mb>jmw10{Q+r?cevGQlL^7XS4Yrv58W#=DlVWW&yUo2+!L^PmHYZ~)>z(M(vnIRU}nG-WEwaHKTS^jcx z>}`}!cFGFRU9Zs{e5j4=;Cu~vy)0vLJ=CG;_*STT!HEv8xl|Mf`G;b0w)C4PUT1rr z?K8&6_}(+`)r(H&ZGj?T3njWrqLg-qVP8P3aEIB$p0vY);r83UBV|OmN>RD!-KpnO zN_Pz2Rq)+-^&z+M%-0We-3U4e72^%av|rQlGP(41Rb=u)e&;oDbrsP!@@y|Q_3)X5 zUEPJBa(5nnzin{+QDNJdmT+C)jAHE4jfK|Dqko%BDGPG?Bbe9PF(piydxkXk%q>T*$}wliZG_ynH1}09 z_fnYH56Q72Ns=P`p6|<_Ugn=Y&*%BPPqSiJ15Of`neOa^UY`q<+|Fcqn0&7%`L1Sv z1a$LJ<{t;GP2cH8`KZ;ASK%lQej_QH-$T|A4}Qn{cL{uuR2u{&%@Jv8Cvr*0#zrN% z{tU0qiAp24k#V+-B7@SH=EpiQ<|B$o!8oZ$tV#Y>k1>C7$KQjsfFswgK@c~u#oe)A z&-9WIEDkcDm8hD{y}ef`un6a*0xW23iOYVr_kD+3JW2K49q}9Yz=%YxhMub*<=UFQ z`G$d@^q_eb`gm_a=qy$=_A-a3TDP~YUOB$Aeq#+Tc8Wl#Za5JIJU^5Fek^lG9y~wZ zSy~-<=$bU%-uOdB%^zG=N{chUvo=wdc-`J?vBzlLI2 zY>mF|+mrAxL0w(FxaHUItu78E)+WHT$_ISGx4=Jx+yiNjA~pyTH(BHmqap^0{|$)k zy}hVEej4Vcy9Fnz7EZd_jSM{WCMvWmy1ow*B$5N%Bda|wV>w+}`qH2IQ-v?Sft6tr zf{wz3kG{|&zdVUFaL;=Y>NL8KuNkugei?=AM9DK@5Rf$u!qJCxs`5ZpbCN;j^S1gt z-t?dcd9KPU1>BOJYG=oJl&F<6&3PJMA!xzYLv})@FmK@58O>W!BLxyg7bMOqdwh9e zpddWCca1^x>B)k!p`wmqNUEx(&Uux~tQjwThGp-1Sm0y!>~kz;9h1s6XNc<|ZKwJT z478&!Fl`W3Sp@fZ%r??(P=?dRj-B(+&mdB^T(Ybb#}AhdOyXmhH{aIoOV-wzcX)+g zWO$>6tQOB*V`Cx)At#}IU&voWZLVurLC#2RvT&C8n|O?q@-qC;e(TIqM4BKb-mK(~sod{8n1P89jp>WO$cc&p9irl)gT_}S_Yd!`M zJ=6evQsuRfncwP&!I#=IkgWoLiqMAxX*y>gIj7^J@YkJ3K~(sYyrSag&@|uC2QG&0 zst#++Cnr8setj~=cu%KJFN=Sdc7r`n<>sqdAi>={@;ZK|&>I{Q5%OvFViQ^HYvSv*?$IU_67D%G^kCH78Y|=@aPnk&TL^LzbvU>zyV>Uqt}RW za7{g`waSjpy?3bJtKeA~psDE!Jf;_wZsO)a)#P8G?JImo7mxh+F%Ucw&+D=JLJpU1PE~{?P(Xtrzzdf`tJ(wb%lQ}R0qt)`|0{K4a2Fa3Nukwqz21mKfOlLw z#zT%e*Rz#3vCa&>>+&GV41?n;{(8{jK9}*q=Ul{L{$kkid=Pz-fo-AZ;Q?re0yS_m zcit%&XdoxQd+l62{`P$LaLuZBAB3&Y)%U#fTCR!R(c+}EcOG;;n7n>nyDUR@@{5`v zLGY+zI%qCHE*~1j>T2M%Y`RwUF&X?^61h?b~HI!7QzC? zgC#bz^60*B#lKgfbwoe?vR82Vov)>Du_D7u!r0C~zu>P2VvzcNiLo&Y1Zrebg`6>D zE&U8ot*xPK`u+c*Q=@8&IM7>%rImU(oMkWYTf2kdz-4!CQcd%7K}p>5J+BCVj!?9!xl17fZI&0=)R z(=9Jz=67c~Ej9v+xvP<;@_mVX*yo|&Nt}9_zm;@Nz z<&4%RJAKfXf9>5CwpH&8Kz(3uNq~O}yz_VO*v2yTE+U{$cPc)?`?AOz9o!dUPx(k~ z3T4CLlApo8vOWDxOz>p5ifI#{g%ku~8dycKZ|iZeXU@unXDXAw$%VN3kjoc)fv_?r?k$>VMlav_c4|EX0nhrq93+n0VLjEIV%2#Z$SYprd zQKxSk8rxaVpXw{h9oIj)G+^yYxCz0H)o%rLg59%%yQI(^Nn1D28sEu)vajDq*1hcQ zY^73BgTB=y9-jA~o`AF$Zo8R(pWj7n`yAh66_)z*;~9{C`ZfmuHtQ~(tLQuQNtNlA&8Zi~EB}j*O0-1gm7BKS%$(G4o%zIFNN=~a z+^!*kK^|bY_2lFu)MaQU#t7E$bFF}a2QN6(h$c*1TbqQ(CFx1wZ)Y9{QBDx{2`2y5 zvf!$~0^)h*@tnF4&WO@F<^Xv%=~qAWwXA7eP%M-_T8(#m&UBA8C{prVW%H!A}k^=xS3>VoRoZY0xHv3IT@ z%=3z;E&T^NdbT>e;%_>=r(1~Eg2;CYWe%OqdCK`#t*T}OuOy+aeE(DKfXzFyAJGWb zL|$I{RIs<;P@k-pv1Xgg2u>waP?lz4sN!Dq?^UAA6&qX(&RGQ!-CFgRW9Fl#r=m;5eDmE-kymPQ3=GjHH>sA zq$(lj4*Uw^H}!B@i_5GKjJ?xi_k$j+qW132{QWmp+E@ zZ0TB4=KjiqsYH50tCD##rKkPewJhZgorM}l6utcUYePyoOCH2Ran&PuYWgmBegDiw^x5awp#Q+aZtgW7g}FH@u$1{TW4VxUPlj$^AlyEDiHos*`eYrC!QsXV1V3vBun3! z{R2Ory7Tu_t1~cx#S_SgGJrIo&+5AU zdIG3VB?{SE_XYiu#Ma*P?#N^9GV}JZ?WLWDo}+#4qfgxOGusNsKNTVmHx>iJd*>Gh z_#DU4qOk#*&oQjx+Q(YtV66hfs?a#u)LMh24zE>lu>~)+0(rFX6Y?jG_5Er|;g4?oM}UhegjV*KQ#73c-^Z+)b(2t;mm{l=7ys;jC3Bv8MJnvrKmx~Ez`3!WT6 z>S#?z8MA(Q?7zS@BU!EiL@3}YQ_oLUy~*U^%<#TZhfZv|P^Pk4Mg3fR9y@jS1)HN2 z1uj@QxcvU1IudM%pg+N`b>6eHMb$n$Zun|kW2~zD zC#Kz@wfs3aP**?S<4)sYoT=XapGEaxYbA#`S}+{{8~?g^__2E7?o*L_ zH>Hn!UTayb%MwJ&EduM5PF{XNaqYWn zbi2(5N)DCgXy=Qy@^|IEp{qL6oy_iJ@b!*!L%gvwVCuLBr5z!#UVhrrfMsbBy~_oDy8~=<~}_Qqnze$X@YSmEJKu<%m)2 zJ9{~>=6KwncsKpw)XpbBnog>p$ZwT8PyS*11{#HI(EsR2IAi=4>2cQYAZ` zH*H9x;x|KvKLg}#oMKOvo>K!XHSgz!$io_6k;v_e$fMQl>mfi@ zJlgwj1JD;U?dDSc+RxhsGG!`7Xa_uD1Q=)pgm!dxZp>Ns^1LK&`Liwo8xYtO*}qk8 zqv`_lE|z?6ez$OyZe|Dw;p`2ICn;W9*RPyao>Vz^YLM8h#MiZFcZ*zU)7gBq>QhTv z8-Z5#^u3N7ndUj2D{EtO2AO?#I?&{6c|RwzHP8R#R#%S;*Yg0Xuu=Ye&(>AQrLs~X zUYv<@+bO3@tL8Wxw?JGr(ps-0BnOqAk!oy86U3^~@JaqKQr3EbVCqDrw}Si_S6UDS z-fB%^^mtw`;gkT+zCXm{NNe#1$u>JbE|jYv&E#hv-pMkQi_3mOdeBR&VQ0B<-x#K^ zGf{V|n-m!HNfX0c#m1GkB-3F~!bSl(WM`n-T3VN^tC1ZtUzsIVIpGSf(pMnn^`W?I zj8kAv=5*g1^t0yoA&E9gO8Cy(u>XP)P9VZHO)W|TVT0~@Qy`zLTAdehZ5#3W8pK*x))D{3 zD*pV3&~irqz?CFbJWX@MLyq*a$ip^Ra@ErFwx#4#)1|m(N~kgm;=Z9sR7JzBoMh!( zkAIR$7zuw^yI&KYmLAC2$7x11gC+S0P44>T-VYPOChcn4nN>w)RJXa<`jnGPd>QZY z8uIHfsyx2rHubG#=sqRasGiZ87bYRR5q4${**bgBa!Wa?G>=(IFOi+u(SoO^n#PbC zpqfN|>RsI0<{=s;h2yHmSB(zv^4AMHb~{0Eyt-j>cAASym!Nxhs^{=Wr)_pc8Y-9H ze0hUsHcs5!oK|=yv>@ipA76X<(ItuKA-$v=Q!_YF#$h3}P1&?oib}*AxL}G>^0JQ8XHk8r=uhEa_K%(f?s0QV`~=m( zYGi985=9lc^Lw5al1KKwh8Z!4hL{`XR=`$Qi1YZ?J?3-C!jg&0^6w-6&2`>&Y-Op8h&QiLo^ir+z19}RZv6x ztY<4gypme&cNPJ(%iuhMpAJ7JNIReXThCM@xppD!+F#96&zj2Jb$e%l#-E|E!bWa4 ziKIl|(8vM@mx_i=RTtHTQfvmlxflLo{WL;p|8Km1|Nb4csDYWRG{Xgulb!mNbpQHC z%u*m+`75veDZZ7E6m1Qh=SLNYMA6x3i@F#-u_PF@NNfTjWd8GBa{XO$29cC3QaM^u z@na$Grk(uq=7I`2VI?Dxa4GX6h{4$oiVC z_zWF%o598nzH}e-{`1Z|-f$?m0f{&NlfXEd!2hVRGSP5V*MQ|wrdPnJ0DFWv1Mle& zdO(eBn|vc5t7IlBq&>jsTduzVB%pixD!sr;@or$X5*;OTA_GNQmV7IdNKgH3L4|k! zj7?HWiM1zfF6ZHYvO%`Juf zx2sJ)$woT7P$^5A;`@egXsF;v0jn=XmY44OS1GwNebJr4tF^E_RvGlua#$AKVQkDn z$Hau>8LesnUEUrr#^vtRFM9SIeDBQeFJ4JEV@%6fPA3q4{P~G7x+eB?StX9gql)!D zCmhEU&6;ZyBqr!>ruX{f#+a7t(i@`D@V-q3N~e@-X=!}kt94hKa|TX-xv75q?dfrF z>!yRf{m~EC>wA*_29JNK_x@W|h;#h-`=PE`^NDEkL7ckx_un8v)T?zdbc?}M2PH!? z7Z=&O2B~KG5F$8tFPS;@srYq(4G#)+@8M@c1o@UcJ0G;A#ATaNgmr-M;RJ7$@P^8% zGIRbp`~&bYINiHTt6JkQ_reY&m@QailDS^_mZo)OwhV&&u?1h(JU77I|E242zosE# ze;im|M|%&ycZxrt9R+Juo&T}xr$h~~6MU@awENpG&)dgv1 z12KsI%${Laai7*5n_?PqSyaEsJ#XHN0|Ez7S#?C)%*M1PmTN$xB4>h!AW;-3U!5G) z6xCo?(kS%grkNGz8fI`?WbS6sspY;gs;Tia#i+)J9!f}G5`n#j-DsGX!NP3BC|y|y zH=OE-<+3o6#^LoKCzzq~`+9QE)%)re10bjA*0^yq)3oC@|DWhNGF8T5WQKWMX?@ikBqr(`c_W{gZ~Wy+BM%pYFGYQcm2 zZ`kWT7!sk^d0Pur^SeZ$P%;*VOUwe+r|$hM-bK3B#_>zrew6y>#9~w2V^wM40^TfE8#roQ@Ht=_N@ei5 z{b_1Q6JwtMoqZKN+oVdi!Estqs!EAuU%+oTCGAO;^U+x$PMkBEoY;iz>&KgmEp}Fh z_&n-dBEfoNEhID|q6Z@n(%>Tmz||E5N#H$c-;m!Asg*Ae)KF+^i>kROQ_pi)mwh!#ayIP61X8 z)au_>NE(wkIm9FHcg6L?eeQ_g<3Ol;cs+7|I=lDacMxbgWMgBahy14W(t026tR+LZ z`th4c{GCj+-9;xCeC9}4P9JVD&l*<+6; z7*6-SBbt4%ea$0Rze%>nnhT-qHaq0Y%neguGIAB_`K=~(=BNblJjc^Qw{kWVF?4t6 z2MEK9jwBR^^16-18tU>@SCXp=?(_Y+=}+mWJbMQ|2hN2l0jUp+vAv;GL?|xwcWD1nywdrG_D4QbTitpqLGnt-jF^ z6G(PWfk`|1E}Yf#PY6qk+~NAy8)6kiQgWEf3Q3oAtyIS9qDW~5lPVne7L^~*HDXyg z`+eH&%iTW!-ZbsC$ed>@@`doIOL0(^3byI^M?Uagfrn4>WI%gT`_zU?1qv6VM3yO{ z!XV@jdYBL8?8_RmcUoUsAT?xJR^3?BpdtS^*$p6zFe-K}~|PU6pKN(fyqr6&v1&RCsLU>&^LZ(S1&jl=y7k-J^9 zast8Rzzlz$>Z*OFrvP(z*=iGxO?v6}BWzMl{#%BZFGXfoX3QY? z962NfQF-HQ*~*Bj$~dqJ{aAW}wmgevSP9S^aHrmRwSkt(D<6K?Ivaqb5r$Om6mS1$ z5iMU_K@p56RTyiTPcsbZ%)|w4Evg}}ASZ#}Wv_EF-0Jf2fWyCm{8nyiyOyAyOCf#d zFUk;1-o?n{!<|u}!&(fa!s+3(Kmh)9VdB6P$duX3XZJ5n1a4}ilsS#>;Ao({ zH08yJG2~Nd_oZU+D$T5se5@*vwTpMHGxalPA_f+3J2kH8j^2NoE|m9XEH+8`rPCWd*ydA7{XD2}0fu57jE+(9jZF(RLv9>Y96;xzi z=uYNwN(xN&<`6oMDYewOQehn!hn|i^IN>KBAUm6=&<`@heYS8W9!z`r%&qL1Dm~l$ zOc~oFKT}R>^^vzzE%Q%Fx<+=o^@+T)1lil(>OLsvf~e=pmZQJ7@gEs^?Cj05%VW6A z=DzJ_|Kb%A;J>;)QKl(jKg9O$)lpr-(BkFEjYM?eyp$m1@2u6@Qm8Z*tY`Tb{)gh%`Skbi->@Eu`)70)gFOCMa;;Xi zCF4UN=mYas6>VSnOzbQ>N7>2occ)nAS}H z$0nYyPcj)x+fnG*{Tz%QmV`jdS;LwOHv{2={=@&3P@L2~4O<3@iih)uaSA=b5n&5n zEnwpunB2ims#RY&2@wy2k#JXewa&d8oOA~OG49Ocol6qbSr$EK?dS5KyHDd%G+Bu8 zk@br;jK&P%$MP~jKtQB5KxwEpm&eE<(1b}*dS$s#UYrv0{Apo5%4|8CcCcga+U*^M zSH)7gF7LyLBhv>HEp-WN$^NlP+^=mOcAwfICMmr>si#J-PDIXmyE_$qsQeG|JZqUN zotVIHQIWz*n`e1M_I*WpV0YC$_2dq?x`HAjd*{76>(4`R1xn_^fvXguE1b)_q2sWi z@LnejEcATI?-3*RANPeMJ7S+Q4gR(E=%#trHj8FGX|v>ISY6=t^?g2~-&P&ZnT2V* zq^al9G*CklQMPN9JmH_T40>&nn<%sCJ9ojkxahYbd*z`Z-;VV&x`C7}JxK|0uEzXo zPCR$9b!{2W*InT(D^y|JmI5?$KuD+Wc>Vw`osBYqsUmT;t0OWn*|N&=I%Rssuh%HU znvWj2L|wPqz`PR;)=JfLsbwo#gVr*Mve6*N$TgdCF8lAgS~44O`i0)OmrI#i>lMM} z*TYDx1d+dzP!0o_wCh2>ccdtp7oxCi?y=gD#1sVy{X7b_e@RXAC2LAGcgWQJ5sp{H#;DQy9BmN~msxa&Ha|~E@j#-hjhQu* z(dc~!!W46xI@!_-b_b-zlK_3POv-DO(6G?pG>kMMK) zwebC*hx+s1FVEW9t-WZO;N{7$vW!%XA36TBUg|7atoN`v#_%0=cTG^BeHLeJHz=Oug z33p)X*P~i{X&a|oL$}s1>Ad>$GLFY@m}Jgi>T~;|BGZ%v$m-KTO4Hp7^_^0Di8q;~ zuqq91N=K=qKr_9ds z&`U2P$8^Y-@_RQPh{T0!KYvGmM$n)SPk3~3%%MD6f-oD3DP!NdX9#pu!iQkmxBz7C z!}|Bv$xKu^ahzO-v=k|)2G zwrd#-VG4j)JO^qephspJ*PTqZgEq^r-LIR~?A$duvnFH3pGNgClBdFB&SZX3k7X*h zd|-kyoK&esIbN$xX6Fd>@4L%fgleNE=}^wf6N@2l>zkv?GxDV3Ce)A@p;QqpX7ZK# zk{E4asIpIn!KdvTnfz+0mkl)+tp3f|I!OSu{~f zJySt$Egns3bQ|krSReG7=$+ziHvUvwtT}3qP7Ed`ZDRW9|UD1BO$UJ=B8%=AJ1^HV`6BBX@tO}8u-pa>-htice8Rs&h`74zVB5o zN&HeMn9XH>(P`SH_TCpmr_brUztOoCc}+j@FIg35^chXfe>@Yce8XR0%Vv6A>E@=I z6B`ZFDae0%eySb;`2d)dgSZTU!*|XDO0OQTm$bdQTJ-GogHA5w&Mf`;YDO9>FWq!N z5U5eFiE@W6Wqbz7TZdj-*Y`3(rm0NC8A!b}oiFU8XDAL|h?QRr1K&qi=1GgFnU(_X zs|X*evcIvf&;`XmKrTM6etqXPJ@26Y;lTqs=i#4zZlIoVu-(`@=vMU-y~XfvF!Imz zs^1Y9$aR6r{?6`QkvnTZ)dkYFFQ`NmUfk%C?p)jwqYY;F^TrRQ`u9-tP)SO*xLSX5 z9=aw;)>ehp+s=3n8r}Y_ms1Ju{^rl+sOp8lNc8izqUr9Otgy)0e zDS}YvmnW@cHRKsxp?WJ`ohO`cNQ|HQfu2|0yS|^4Ardz6>`Q1jXa7WC-J0hL@lM4~ zM6at$7Q0)wI=-`sTuSsCDWhs8!(F>aSxqah(LIwuYi+te;7Dt)Rgkh+3w9-VupaqT zxRs&lGIn+IenRvVI5H&{8%QZJ=Wju_7kIy#H?F8>rZX3<)e2xJaC<5DvWz#GW`?rg zhw6IWcIkyFShWsb;ky2?JvCh5k6(;1A~r6fs)%CXGLj3v@W?_~1%M#>usUm%ZtvVOk(jsg= z7Bd&4b7as`Kgb$VAAUlM0-1PUTc3=Lmh+QiC2KBs1>Ud@Nadb)Y3}syth4#1_9Zr} zn*#qi+W5|Xv%LZqRy3EFNos@=G zSv@)!!+%^c7;)sR_Mxk?6PFN{b;4#VOenTVSsjlnAW^%Izcst(r3_B)?D(66M7DyG z1#TqGd@XNuX%$fWH%d1;P>EuZtUlcsHT=Q%Ot#%=$Vr~hyMAByd$t}5 z{P9Mi2l@dG8$t@tjJJUzt%MC zg_BzdasJuSbE|J7FgPS6-q(Jnur#CA;XP_(bd-a4v!(MvuU@IfyJeMCE#zc01Ub1v z9Q6I4r&?pnll`*Y*B~y;pL>t{;IPMBEUtL7qugCR{CYBW$xt&)>GJX=Mw0oKCrwQ@ z-R!=L9nS*__`ICYtA2&$?;kd#ok1(UN&^z4X_asW^&DPPqsQcojFMo-P`?HptI)N5>*{wS4aHwvbR?>Oay~7qRYB_>wmz7`O0-KX-l{E)H@-LJMU@d*uo1x8RP~>X zFX>i4UO#+}O4u2LTBy%?yml^Am_Z36oOmNH_Y6qnWzDv=ywqf>VsLeSgs!n`s&{5e z&)i1;P=wy)7R42CP~pnI*z4+qi;6NNAgK&wbTUN;Qw^1%{Pk+yYCV{E;MC>8s&Ba= zwaMWn&FTG-rWpW{$$D%s@`zV*k@Xse4Qj*Z-Dobjyv%r|XJ2IGas76O z=Y=AdY;{eKrBF1lvFSBgzv$_R@L{^^7UqNb|aITor@Ob}v8Baw& zGQ(fjN3`}rqAgudA4*2kR$r4wmg{mPqY8P+_h`Gc`R{)NcEJ%SuzU7K|1Ekn^J&Ek zs1*Kx#*;&`3qaI)HT+5D2FPAHK5E770t5`#pHT zWEjCfm3aM@x+D`Xto;(3W1T(^Z#Bkj#Ay@kOye!bYZVUOYKMzQn~TA5b*;UB$F~3j z5|^F*f|P<|)@6A`6$bh^RYX*de_qWSjkPfC3YDkJ8sHYz*8yC&zvlCD-}Qa@>~EMx zYRoVUl=RmKBm`!ES2Ue#*+#&JdRUzrFlV z;j43|TSPFp};6q_K)sS4u*I-3}F+PUa;*4%4-~TU! z#Jbv_a@E0b-b-&XdIWe(){WU1GjYQMnzW#E3Ad%`sJe(JIEzcS2TqOMuk~F>^f6)M z)#ZU`h;k>KPF~JIIjvoYMvaKtM8{7YC=Oc3If~w*htKSdJr>B-zvkZ^cgON(b&5C3?yCyvHB^kcFCO zJ*_n=Fzx8ZeX;I6S>==`q?Qd$RkwQ5=|@VjHmHQY=^EUy#+u5RG>K4pl7G%!M4P$J zz?0~qFYM)oh_NKMgbyx+vK66XYPvrg^8A?r#4kX*c&#_UdlY@kAW^z2u;a{H zH_LpJroASPKa@ENh3h!gaXNw3qde^D7i;WS{i=*zg_w|S)7nsFfBvOaJx2!JmmDc! zPsmiS!lz68^<0D0q#Kx@*Z``IwfhYO#7TbL%)KtCXItA?gcKE9CO=lduAHUb zXrN)KK%!Zs-Z8BH#D&I8U~R6O*k~wK75UG~%uXB!g&+FmQ+_>7V6jm612gU%v&6OA zy45jXwEMi|!106e%>NL?1}h_hVbOF+)h21&Tzw>6MCV(FC1Onj;EsF9NrJtNAD(tq z+%JKgGhfdwDTIyP_^}NhhP#^phuYf8&E*`BjQF>`Jb$RIA~_HYcR-m)3+k=;^>m4X zt`q9@E? zL?sow)5-%E286`LuM148bDYJh<=6RJx@VTV(_XW*RdTme|N`CgTz?be*HUovU>Hxb35^W0i zQbIsVUjE4d@R*>8glL6Q1t!7Iplqe~=sN~LT+S#X8H^0h>@8GY%wu4@VH(GKByj?-6$mv{nQT=;)<$PvI zX)*aN?U;rc$3Nei_m2NX9ylC-ei!N6PCTX`zdQc78Pt1_ef-lwV@p1J2dHNr&<@hq zK@{|+`h0oksb#GSUm?<@FQpsQ9|5G6l7kvi7;G$0eRg+-r=%uEw)?# zM6o@msIx7&T<=GS+D8`_9_qzr@o+7A26Zr3q8_?tN3$Y;{HNi!iCLY#Z$drZ2v+=F z$CU#DTccOFGJH8mrU)wyEPvIw)BG3!s@}qpEgKdu@%oOeudYy`Dc9Iq`uNoTtQlwLyy8fvG%t6U{ z#y-rT($i(PpYnVO1wdbr1+0iO;tyL%Z*T}d=^Lh&opz+N-ay5m7w^}-w-z7 zRPf%ZQG}jRGUdTj=9k7PO9ofO-|_?RW8zWgu_7bn#EBC;Hdw;g@li!!Z)CQA55|PB zLey6|bH9$CwSbh#5e4$2hvXk8*2<>=LbEip9M73nzsk#7TbuL!9t3(?=CKUp=z?t> z&fMLO$jqP`-ni05v^7$meM#J%o^d$(`uun{qWAfICtDA)Ocy3n0P$+0oHjSqUz15y zE|W-aD*hVgM)+3pGg#W^-nqgoG?d^}a0VhFfeY6&z|X{#tLOP%l;{`>(1UuT;%%9` zGffU3F`xf;G4imQG_ekYVsLf(Oi@EJ##2}EN+Qv;r^^whi!^v13adh_ZT?VUyzAu6Gwree!oVF_#>UK+^Y1z;Y zt-w>GF=AbA{XKrxnT)P=9|+mKom2*fAq%HBJg~ar@tXBT-WOBNWOZf`P8CF ztZf_cC1@6~`Vh+<^QMT<)sHQkz(v*G+q+m3SumvaF1WmZ+Zu=O?AbtL@pa#bk-Eh=9}KVU|r?j zS`2TquUEP$Ar!Y1pgE0hJvav`#Z*z3LP=zvPbzmR`j>*WtT&QKZ+`sxC0lc1VJSex zt+U==@)e%e+C8`YMKFbsH{sS_ekq2uc60=Uay%=8PoEC(Axg?jrc!PMTOwHbQiibPne&(6CTe=u)}NIwly-NPkb}^X`62q^>_s}G)v4s? z^jX>4;=}b87$F|OR7s41&GZ&i63k?o5EV$vk$PTE>L7q$&E1>ABrEFKeCUCbNobH34C`t!1a7|s{f5>qM+-7vjr~xl>;^6 z)m7m*)A=N!+!&*}xyV;YTpf&*z{;+g!uxIWO;-oW@S#(?i5+ zwQC^J&W^*d7T*+uk3n{efzZ2t0;w(TNlHp&l(50>_WfGHETLygHz)CH?qYE@`w^QD zYDZj6l~+2s0yrr=!5mb!d2`;e@`dUy;O?V`(~%ZfD11>^E>7$Xy| zcBdyR52Zr2(W^}JWDFVBRwJKo?nsc;{85Y2=j9!SpFD$*s2IcON8zNUF0imLz(^Tr zi|lvK+plI=IC$5qS8rm48u36id-AQix7g7@?|ydv{F*Yxe>gMq<3%i=wWC0j#ATnr z7wObzm@FlpVQX=;I9hpweW%Vo9GYQ!DpX33U@|Mb40QGwRwu674n0h9lhynQM#aA5 zJ{zMIO;OklQkO~lVY=+@CsHZMmt-vRrNohAK4G41BFL4UGmJwF4a~83*U=q3#r1)1 zk}vp(2tMRZ`-JzG{|-p3oBWYUyVZ943Uyst7cs`~faLh9mf`+d_g|^zQQ;AfRp9uh zHd)`TIY$CCHop;IWaatVc!vs=g^8Vd551dUbB?a_7Vet*eer*1u_hH9?S zxGu@&`OOOV(69)XzP+tQ?q!X^wqBaI+WeA_0m=Fa@K#+v5CkLe6@Et@n239hw-01~ zfL5S?-&`X<{`qbN6cO3r^A;x`hU|SG2dIulp~)m)MF_Pd$0&Knlfcus&A>}{AAzBj zsX2K@Frv{lQIldeCu2$`S30Q>wBD8vGrK0g6zZt!aB5acB%QjEQn9Rol=cT8j@S5`l>`J7_kWU*LP|Tajjpwwt*9Id_O(m`c~EBT3$6G#Rw^25X6QcGf7Dg~6d2arZ+k%!KvV=F zta-waP`;Tk`G&e`U9*83QTmCj?4=F{=ve=%X7aNR&(aJApWa0hp1YCtGM|@es^Xxz zVbjfKc)&AGPE+~Jn)1pQStY1n7l%{DV>eWiorVsIWL}rEb;QgE75LZtB~K<1$N;F8 zZlb2TTWZ?{{1s&4A=t!jOrKU=$V3%><}>@=K?&i8ENh)k=Sd4u9S^e)tq@qD%JtM?;GOYWQZ_3z)Vzk@PFDh@I42iwc{Hb6S^B3*s zO<9}?1_`jTFSV2YJPaX^7}$83as;+c#Ooy~DO)j_2l!g}CI-f~o9tjXt~^%k4d46z z?&#Y);Nj_QqB1<@j>rKD(UJ7j>9>O@k9d+mr)UFff^MBnUvSM$9coPZ*Zqlr>hHEm zpp0aXy+Mwji}&s|qtsNU8-Y&(&U}>X=;4pCy_q^g54%S7K11~@Z=jMI))O*2Pu!5; z71-TyVqsEkV=o>Z&vAY3YQ{gR$9&J!@_lBZJ^Dz%(qrI+l~R0^Y{J-`;_21o$?DjX z5FU*Cn=*JXvs>izC0iC*Oxj)R0g=(O81_xBn(6!dw;LKyQAG5bwBC0(%FNM9-XnZC zB$hSyCn!oloRQJY-pBL_(1PMUe{^*U#l#!>Q~k4u749OjO)Klf|CYnuP`KDr!k^!p zo%3G6C4n*q=7rH!ns@KP{ze1c{b=4hvUT1|Ol;xXUQlO&W?uqHjSe28i`u$0Ccqh(y@$ed;)bG$Kha2D;^nL-ai>h6O0~?b+naU@;pdDc0^ZNeP~MjUPwY#tPjsr= zmJGn36}e;35oqgIP!6Yi7PQkA*@m`*nH~XuZcCW}9iv>XeByO)mqy_=v2xLk7eYopJoHWWHH0xn`2$2(HK377 z9obRw2Hmh;6+um23h@S$OuPm*JBp?+Eb_|XqTJ519*bgTAf+gOoGL`y>kdV^<>ifx zncEjf2)xo^xzyM0RR1kJD#NhKuHik+#-UV+B`wJ&I*$;H+GZ^Yq^9QHXG}$Vd@DJP zUKb75E?Cw&(Wio3*HMr_vL1AH0w}u$Z$!y7Vl^6;EI|S0j?!N4{!8BSqQmquY$ns6 zkxB1@G(jnW;IB=d1=5!pt#buNKmmN{g5`{#%J8c*Hjv)gx+hQ9g@-ATq0MO=QmnIZg;_PqW0prKDh8X<72 z2=g+pFB{5a?g48zuO*!D$LFb8yC(5gmYLu9RUAk`Z^-x$R|)7BmQH;pTCLY-ynHLi z;RYWrtEBeI=GN#q5sX($?$!~$vptb``QxyEK61zVXk6jw-#cQEgkO6hHh}`a=9?^S zWbOx3Jzvtq!uo%X&OM%~|BvG%B=>u+u@xcYo?8+lVeXW<6mq|t`#s5>klY$d=DxX( z+{-oy%e!rh*n><@p&vIXWwq>z-OEJhw zRLjz3eeT zv2FuxP1^m2vCDJvQSL5;LFJzeiA@~WbTpqC8};)Mnl0($`co`t=fo-+K3Nx1Ym7d4 z5NAEsv&9P9ld|>r`d5+lkw!qMneS9Wg#W{weE+w|+3$;6`NV-&M*j)6lXkYZ&$qYJU6Tx^ zzu-+n+82CUR2>;uQ>VW9C<5fFSFeo3-T(mxER>BPY4E?uVDuP(Jf zGV~}uvA}(*$w4iPfwuHEJbe|b*9(xZWy;*F1#JL#lnYkCf$I}JxH1YqDksP1ft;c@ zgz8UwE;I!PhifFtF!g5W&o+JRB2nBxeGfvLtoe;76Pms5MI6@6dn&&5k_0fbyAhp` zOso`c@Z=qbevymSwiN@6U86&8Nf`zk2C_+KtOQvCTtsmjNRNdvZmnndW--)b;)&rK zWW`TsB67&cf2@tF%%U!P4P8mor#vSw34s#}%C5Yhurc#+aRh8tdR9R#`ZBYBf~>jf zaaB{W=KRF6P{8F(R0ZiOzd>fkoeV@R^c-gaOLR z$eR+hAzeuQSx&c?0UE>vNE7d48~xE%Ho$1YpMV5Dk;UEWqNrKZ{T8A<2VVVv3vj= z8u>O8kLht7_CgJj-IZ9J65orZKrj^sSSY(-BsR2k!#3y%{dIz@_5Q-r)v?zvefph3 zr3)RSl;jP7(k{(BL+C;GqR(p;FV1e?jG>H~Qlf+)6lRr3z(MCx=GVb=Nsf8-aVs0o z4fD&4`$B+f!ARj&EC^-L0X6&mm%r+Yp%V>7ps{0)@|a%J?2L)Q{(;9k+6H_%y>yD- zHuJ>>;oik+ar+N{DXgU<`tn0%)#us*j&W9Xv(_=XlmTJ?@`DMyNVxE z>aHt{shQ|ac{Krd&H)_LKA9}`XTKxqN)dYr;3KHw> z+vWijyxPCw|6Bp_dPsx|7EH_Is3(P8&xEb|Z^jiU*GZUkwHY5r^bu}`iDhn6;182=)t1b? zaIC*VW1##(Q&bt4i+sIz@#Dp#&F4WqGJu4`m0>@U5IH{6)4WwsHZg7ml|Ug4fqaGI zu8WqYuv$cEd)WMBIl0_9@#ro)W>Xi*0{ppO&}H9;^Hd7{_(Lf)`KO z@%PSG?|HWaK`I?;cfoZB*J2!zWp2)xrBKZp6dX7y=s%Zde_?s?NsVBtFUDY?n|RG~ z&v`56T}~BEEqxTEUwQPR1}wj%oJRDr2Fgl+GL=toO$AEgMWd*{>@QoT=28;BFW$AO zy*kAk*lxT@Rg>1SE#1#CnyNefZ?Pee?{#9u-FKjD8`Ck}0QJIG2_NF|*+|D!fFbkg z_-CsnkRY0um%GPhmYg1d`*s#JC-3DN8{9Q3%YbqF>;I3nVBbh@(^sj<_NoH?x3-}6 zd_sr-M&Hl9NA0Uvx&6mv-9f9j+i>}RS8#rliaB#f8h2wUG=ej|xe~(!a&C)CJf*xe zz->f$4Y03nm83V77M1{5pdb&l0H!yPTh(1(5|o9|jDyo5I_++wvH_uxda=70C3goS zSm|oV0a*TXxTEBw#y|OQ;H^1q781!Gzpcekq!XZ{;B)-XYXiU4b^O(V7SL9RFKR=@ zr0!cnXD=@|r^$F9S%Q;qkb3c75XkF1e{qK_$i^Q+xt6P1g$Fj(OUnoYvb>5%=YeuL za#D%0wY1+VQ~OD(or|Nv5J1PKxUZj{*q0rq9XWpbh%{hdGwk=-p@w3G+iZwBvj!~v zNg|gRimWMWlYv90V^q>O=8Ry~9}P>7g(aMJFC<%CV_Ov$+<)EIg>vP*LC;0)6ctL#aZ~Xm8DA>q+T5-(~}))LC@+`$4S6)b7$>VL|Vz%d5+e zT|Ud;y0EevJ6-d=h^wh`zElOneY7b- z_m8Mn+Zi7hho#oMr1O^e*(My+d)=><<73Z@+7FoXHm1jP3iRLA>((A}8JH*Th~${z z8{JfV89{+w?SO@_oe|QnRI5=8?g8@Pfrt^2xb+2}=&O!+@BdsAsM+3?cRX||PU}cO z;q^OHiZJ}@UR15_m#FRwAkVV6xCmft0B&q}4xzP3;54Y2*pu zFDjOrd%p_?xXo#?e=kSxm~n(eMV%iWuD(sU1M+RPVH(h~7b(q4KWRT%Jl{4t-!lS2 zHdFrFMJ`{=bic(tXUV&N$1#9c8b>R{YvutMfSen_j_dy|F4WcUE_c1uf0u-n5xMIc z5c&NQu5YTlAJ5qh- zp$FkM@@`ab>U1H;P<=o;s02!aLUgu5`85lVpl0f(f2Thh|E_|&<_cIMXdVjH1^Pjm zb)=ag4CJxZFqepf0|h&YL~6qn6OMdC>SyD}aY`QFU8m%e<$vSZA$4`@S*3t^^FPR> z;#<30CC>B}dD{C=h3M@))BKi*irKTA82V-?)Yp3bvaYhNRL8t$8^*;MdsRo`m2oj+#7{5|Nu`?zARi8NSy6FxG4&fvL`r+d@ zvtXl~!ub-NeW475j0o2n3+}HCx`VCL z4z#SL+5D@2z}d))(K~-H4j=rin)nDxwDB;MS<6E-&&G?mKB#HO#boL-a-Z&olnS>7 zB#oN)yg~V+=@cG0pFW#!offAQU*@!mb8b^DHIWEgdXKwz2^8z2*}ZVXw&NBjNDpnA z3~{e3jdvJq_#;Cvly&xN8ux}S7WMprW6dtGa>z*4k@^XdO0YJ`S~JdB03C?yJYgm~ z)rGu)OI%>9`qnH4g(AW;jOIUR8~QtT+8G7R=nK8b*!c0_w@vJ`)I`5AC8Mw)%zc63 zJ`_as9bGkov$qP*Qs`5P$y?GpTG4|^7VJ9-O|AN@9qsHpT1RPAw~DwxHiA?!xet~T zzkBjv9ZMd42?(8hdGGOGH*d141p6M*;>q*{R3$hIdltlpM+>Q3Wm#!9Npz3IeGlCF zFC*kXkd$Hh#b4&=djk(u!yx{00$x!Rhm5{&)7N*tp5OInz4V9SeDG#e?fUnxl_vhi81BFtoVASF6hwZ@l@wgwDxm!Y>!Pa+0OvNSGBE| z9Po+Px1(OszS>P{9U~(re{vO@TE4$f8^!)r``UK(OU*x@XXwBsAH}!G*P4l|O8!n1 z0NE0@+A+1m>LE!B9A!v^@jVV3R%sAPEo`4z6IKF-EL#77TNrUf=&DQJvXY?$&8gTV z40U)8@v#@suqq%IRD%Iw?RGFk!h)O2)e!wKPZ>vBSJ!-OKLbD&LsSH?*X02^ndhp- z)P{}mlhN#T4i~K)2IV;w7i@6LFLNeCUD_T_?_X_Yk~#F8q);rv5`J zLKVfdLbIm+hD3L{jhmQ-D6uv8K8;Os7nSTdKL==Lu3>IuD3WHgWS}~M zlpY7iNpUM?=w3AD`}8tpL&0D4C7g=(U~&<9Btv~`E7;ydC!=WNW7SDzpAanA}D zJnx9#x|}_=3A_<2Aq?Pbn?wVrsL-h0rD~m^bibnVY&ODgYB$%ut~5?L}nc z5bafpVDJcvsm3zI*`+2h@tMSmzv^=zUi(2|Bl0xX*0rF_OFlE_3rPI6Nk$#9&Bour zgyJ#j%>6uv0FVLl{PEs*%BPObx1(AXfP}O&=6e9Fiu*&b;e;*(sv%oOR82F3iCr&X zFJ$_w-Z(0$T_XY-@JP{P=MI?jj)c ztArriWNQt`%B4`*LxDvP?~b)M6Wa~<>2Y}&5BWdTsxh>=z8_xAOq{cl%UjdhLd?hrJR{&Xp*MeOFkO&(_kgqCx zHmYSW={Y5j%GTOxptIh4rBJrj_9?Z%QjS4J2VP@m{QLpLf9wMV$rC-&GuGy&m(aTc zFywIDnj0tQ09+Q8U0!}O?P*zm>T>Oa0wody&l)VP9?qaw*+a>dh`eSZZq8actE^ce zT&6_GhL&DQV`Q=jfZ?o&$nJKzS-3{P((eR^v|D08(kmPx8CrznccJo~HIM;p`tS?!mgdU*9k(J90>&GkH4Ga{I?OO;iRL@}fyLFn)I8;`MQ$84>N|VejJ|omQNU z-H)4VIDnr~L0|n*2zpFkRikKj%-*m;$PIm|?W+Bq#c#sIJ>YrU)#`o8_woH!S5vVT zu=HEY$$MMcgPL;U26PQ}Y5VWh#j09lX<9%|i(Y1NSDi+gdjlJ6{H)qc;`FC{+w~Z3 zonbljJ8Vt+jQYhT#Dz6qKGc4ipR{X9yAO`xnrT9)Q!O{a98-aSW3_9qK=0a8&(00g zf{lRV-tgdntzi6s4}q3nJP6d>=E++hM%w&p2Fp(jKLkh*Q z=ID+_SrYE@$PHLQJMO@yxE;MQ0Yj1=5!bExG^d&v>8r__gKGimz^;)S=N1(fb`A_- z0V(t`?Beh+M6KxKEYb{}8nYC-T7ys|l!EO(MwH(8B)0BWFknB8UFN>dszM5IdJs2b z^)I5!_4ess|3tqfPes^=V%`l_AaIF716Eh&$HoFvNk>dGifc>6{s-jqS?*QtpT;lAN4Pch^a5}3{&{{5?F_*%iad)X&g)kl(t$hXsV3k3`}#wBReY3EcJ z=RyH3yf8ujE6SN=!g##^AvTlnqi}Uddnk6C_XRB~8&GK|k7edYKnb0XoT)&0+KuxL zz!V%JJ5p1H)Mk9fDAAWHiSm_vHqyZ2oOA2Ckoo?^V@3=Ijd31T=ChZ#t_*Td_k}x- zK-8|XvSLh8yri}+pLRunj|a{)^*|fYV*h%?r*y7PX;*myo$!N#%oP6Wj&*ID7oONwK6jBbUD00$dtunN1lKCXdoWOBG z?F+$?Ps9OT3sG7g{O929S=1{4xFkcL>9juTOr8P4ACBj`yO6hMTiegN+zQIe30pzr zgy=)^qW8hU#Ttp#ulv8HxFEm!-KSe&wkr712gah4b{0^CZ>@1bqG3gn0p5;mVowtKK(U-^7!rpDe6nqh z+_G(SAW(AUG+g0)wQNKLVg(J()1Tn$QwDxETsnpbz_aFB?*ocK5;#(L4cmSwz8wW{ zP1rk7iB{03-rmlxU_uSsy`^xEz#VF+ueg1n-CGyO8efx(^sH5-uf+SeY2TfIP}x~O zZHCp1=7^y!K+?cM**=ZE9(i}C!{7!MpaLTk?2))iw*uI5ZP1zor3L7U*3JT;F%#+k z(kdRhf(`^r_|a%B?wm(2W10OVDp-l`Kw2dOqj zM$sO;v=uS$mIUS988+IvHeHw~Vx%_9m&;(5U(ssW`*7fk|hWITMT>1QToOaiK)(tGuhH4g+03LU4eD#Sty{ z>=xT=ippbuyM5u>yqiNmZLiez1h-se6n_~Ox#|9UU(B|gnt86m0R5QuV};10!s^-b zM-%J*upJ9GafzDDyiY_dm_k&dmG-)%+^u2Fyvkp`=v);g0P(PJJpRs)FdWED!#cm$ ziNFw52&bD7D#eDgz&lfjwcA=`F@9vGkB(eU7zeb%EnZ~O#o5K*h2``0mGk?O4BOj+ z?qXmnLd}79!CA}lz3z@LY%Wq@Dv}zjrFS6~f4iKc+^uLmlpC-((1HL+SUpfv z1LgqJR|&nlAvYS{^8`i?@eU~u01%USzmGNvraXaOUq3-Y5K;uBnfS}J>!cS7(9*JM z)HLt|)!>f@geaf0Hm|~h*8K7kMdO?oDd3L%HN1}8ol7XkN zI8lO9oG;vZR}>2*7gT)5f@@HeUrO>=22rlKte=xYN=(2;aMic7EIU&)IF4 z;q%B_;P_B~Bb9DQ0cJdzuI)Ynu1(ZkbC*zn0rxaFrhK?EC#jFS zvWEqvPPcTg*#TRMvz9b~^j@ z_R-6_M0^};QVlA!YBi+O24f>9M5m8_+4_xoy|nC#slFa!Y%^@kG-bL+;mM*~(duRh zfR%@tp(Q%t+mtcWiXY3aU}piGj#bQf-j7Ql_vG8vR&iutd`xt=cS!+HHG7R7t%XR> z*4&j5XMKux;$*^HpwnW<3UD>o=pi`aOaVFjp8%FQ!){IGj*iov-UE{MZ(hyT$Md!7J>p#2k5Rnwpm!ZA?3kLbVKsEp z8(-W~%^np!TEX26Qg@?)ODTT*?J`DHGX1 zZYEh%A#uCXX$xD;ZoD^YdZy$l{t})yx^H&j|pZlsF;fvt`0vj7~>osOUdBL@E z@_7OxWI!4E^|Ra_r5vaMdlO$7{X$0`%pCkEr(-+_m}COx{n5XH1R4r0THEFQ$d-A} zn)=%bxy1b;EM`J-oGQ9y8R%+(@QCt!A|Zcp%s0(Xuw7y!@QZ1Zom`8sSV zseyjPAsv4lrKOuobmA?CSCI|F-?3;K0%zciX_F9#Hg%!16VM83O@TC!gz-3$!)yn= zI9(tQVDgvT-OKG=TAzTnvCNdtDg`>G%C2eB334!WOK;`!TVg#T?(>?_#Nk?5cnLKM zV=9eh1EYl1&7(~}-Up0l1vr_}ObIMZ3xq3R;eSNnpD`V9n+_Zy5FWS`=6r45O>1eL z_T#4RvvMhP%*lQYx^7sJ2wwkR;?H2fM28|tPg0kKx7Z(p9LygryXtJO zh+EGZ@N+G2ZeOI;tVCt|4?fg-LYtZDY|A67^0!_^hKJGXs-f7Gk{yQu)Jkzl&S^}d z>)@Ayu?cxb+ES$12sZgA1_3x7@v1^44HM@NXKX1@^`N|WUSU7a}W`-rVX){p*T!fQ}n*v>)l2>>S^ z5njvAs}3}u7os5mifBNlxiAVuI1z00`ju13nm?*S8s3fB`=t|xl?e_E&tqh$K0T{(ZlUDtxJj^A8 zi9>(JL z)5ykpn-MOORvtgq8td2++zJBkfy>SHa{v{U-GqsJ+_O|Y?#I)5?LdgEXQ3kohzR+7 zTs8~Or~a(8uphb5G!|}ix=qF6pI+|QgoA+C6ucg|nhqXU_Ge?}hJkb-HTt@1uW!Mc ze3@fFC_36*+4q0jN@^NEBGOYO#mEho7Wc>DlajPhS(-QUP84snjtp$P9@+J>x>#9_Aly)!emZUOmyIXGgDTrXa6-hC5{cCfz^{P%vHmsLiI=coiPPsM!WQJii zecp=TILyqGBthntQ)*W<=9&RmsPG(yD7+tZl zf)qu@S>@;E*eCX^F)Xo{ji5`a5XZu`o)LvO;LlIZ9{4f}jnUDz=YPadeg#qlgkEL# z@6KgU#jpqak;{R$$cz8t1H*63XDZSbDD^CAfpkrhpWV5qa8*_H;=*TS`;Qtq%{{2` zVa^vQCF{y>(Z){c15i~k8JyBT5|^maSMOMBZ`Hq*%7p~U;0L}P3@-Kmi4JWYr@jvh&H0Tvr3KRH|!AlBa;u?t~r(vz%ihl6}qMak|5z>U7 z0{34^RX<_il*ubZB4vPHg zQy4z6kn)!e%ebl_KE zLu#gh&vXbo6U9AZ;zD>;(Io!X^Re(7_sV%^-|aT;c5iOOu>=to2XktN-)-sNO5fC7 z3g1B95z(fKZAsPpwe?^SJ?qDhrlCMTn{Xb&+;DJkxFHC=abK2y<`a0nv)TKWyQcF7 zb77TyC74EY1j`L6Kv)QAc_is_*v_BMjV!(NdKylm*f=7RmC_>bF(iPgmKi%=O{g+~ z-)T{yMg62U5Ef+oTF?xhpa}DoQbh?Rg&CXBmo5n*m|RJ2xO?HNHGD(WkpiL78OZyL9Aa z#UdU51Bv*0YOj(OHv@{wUuM)yF16>G9uM4$ zYF4$HV}#)0VqA7NO+E7ASWTDkFaNSbND1#>$-r@wYOjXdt}zdM3na_ex-qKvvHah& zLf#7ULMsy-Y0IYo!PIvR|0x7v*8MeC`!@ zD8P<-{Hn1+TBr`e;H+X^l9rB_`y*Cv<8C|U?%_P9ZQe;g*6L(cR(ZC|9Q`fsWh=;4 zY#yTn)C@v64ACKRO!f7>8e$e=^Q4Q|=46F*97s<@lC2uVA0II_>Wfjk;kdmpj&3C? zMsg6Qvrn0bAy_HtWCiF`J+J`?$X*t?z*33<1k0Yo5>*BmXwizQ{EmqRsdOn)V8BJ2 zd;1CKX=8L=F549qcdNfz9o5}M`ilV)%)F;&MqswEAg&Q31K?n> zF*B@~4OlzrYWexb$!gXKOW_gHP!vjZIir?;oX<`0Y1uE${!hgY&qcSERG(+cW3a`g5!v=NeS8Iv!kqedse{ z%*~4Mp452L;}IDW5f%}#(FP170oGqxc30F{SZJrvO7GE1 zfcl3)Q*KG`57WtsfATwt9LK%B-0^<=od-+Q7J=%g8#* zj-nCWN?Xlvo57U1;tEKvTl?_sYrH#n%bhdUC5YUxSP$*fFyzEyzAFI9vh&IirlkSQ z6ihb_gon$CJ=bR2i!{=$EQ7e>xS$#4$ud2=t!R`m* zY9`upAAb(l@fSm7A*wBqjTn5Ez99|0s>9wdmU?2FR6PE7Nc;Ke@NII42QVZt=FCeH zqWbEZ%V7LwZf*`0B+?c9j{@A2; z9{8yWk~qOZ8pMUL#=>qN7c!Uiq2OHYnI{fS>%5@5bpGYvhAQq~(5%n6fK2`JU*fmR zB22b>U=^49z3+C`XWDC}Xf;OuTxyN^W1Bc$QAx@9sbh5egnJ2>oBYMr{0d*j&3D%7 zBwLw_^B1Hs%AS^2@=tjW0F&ya7ll6?tPFiG%%I^wh&Z(|XR^B#P{fh{v3_1x`*ikV z`x*F%#o_7;l0)K>^^9?qLM>6HHpS7!X;g;F>>Xn-7$-$lY^qXd$V_NVWEkjO32^?I zn|rmryt|_#BJzA_SjO{TF*m0#LZe?MJ^?oc^5S8VCG80wr*O~9eMJ1|45nv zKy{uS?bc@}H(jC7+&LQ;jDNwr9oAaV)1w@$Qp_Ul!VD2^ta20T62B?ep0ZYet+rGJKakF zHhjRZNjHRa*UYJGbypNZmd(vd3fVaMan@GTszB-g{Azgr7r6{C1l*^&yOSQkZ=UgcbUyBySdN7;=A zmiGsrv+hHM(N{RX)gL^+GGFiZfb{TA&bMdryUfMc@5x02Tz8ze0 zU|~P0yIx|+mmyU0W$bz#J-Es8K5VUYxD7L0BQC-_DWVa__lp)*pv)?nAkduB+2r-v zOQfVZi$YV2ca-u`Qqy|@S5Z?B8e^S6B(R_GjZzEJszDpB(#C<~zwPU0c+jwl=x7om z(KE@5qjr>UO(Rl*>$2^QT`kP1{z)%09>>81Y2#<}C)hH#KX_;{GJ18$DIJX5{4hfz zkeg$?F0g|d2h^Sy*YPcv7p*?Fb6+g(^+v{nRb9kYm)=Zu^A%l$}1vH68UQVip-brPvpb5qD8L{J;AC|UB%wr zE$puUTom61Tp;OA_T2GQd)L0T*Lsp>mq*!Fkjy}J_L1eA3^=ADxXX95ZTr8Z=@l8= zmtP>0(uZ0f?DfRHKIw+rbTYr64dS^?R-2jClS<@vI6YiU=pvcv5gSUgO<|J-6qM?5 z3tK8M1X}WNoZ4sj^H$gykmn~4qupugzBnV20U9&i&Q8d-!ySI6b(&GJ=)osNMc1N_ zHvtd(A)w%jHW0h`O9YaQTB6_PVp9aUmGsbnP!IIDO1~;7UP?|p-@ZCs0Mc>_2{x2Xj+Hg%4?YpIcRpvG}D9fYsoA3su`gb*%%UQ!<1dtOh@!ZEeR)A&u& zwgUrL$N5_PeT&hX524B~8Ug9;U6*4Pydt_HZFBpD%%EW*A)^0%N42X;`?fxTGszjI zlo~BA*%j_Xn?dcN1paVNv~^mo#Bc*>3uA&@^d2^GnxZ1`||U2+qiUv6tl@||@z z+4eFGUxsKR-+&2mtvSP>wk$p zUvuZ?vbjJS_;+(#lwRkOnF8NH&ZZRP-o+;&|8l@W_@O?&WOH=tJ4Rl4_B|`#%R*JI zQkzF>z)NGxAFYRD?_%kd?LSK{#Zo@k1WyEj4?J~bzY z-b!1nl=_yb)4sT2nbeuazsO=j-9En6Im>tC)XV92=bBFt<#(QjV8z`|noz+G2Ygx; z>LCU{4O#a(!$Y%tJpQEb7D$T3ZH(S{FrF+bDi?illrS*xz}X#UE)KHs0ZNJCwvmM1 zoxMYOLo>Zwas*^H9{s?-ycSMdpCf^fe zN+1yCOoumpfm&2EBu!?FTwtadBNYVk%6Fy=iwJpITE>QS>~n$*tCeRn_4+DZORx`7 zDi*vZXh5g-bqak$wE}8l3i8DiNZrbX zK!zKBA(O`2(D|WOx!IOlKfGq->H{kk;zQJ~nGIo^@yS)x75d8ubqiD!O*sySsbnS= z2^)$lg~8Zq;u>7f8~ZPk;WOWc5S>_gD6g3wS|P|YD?i(GX!F`H_a{A*k?z&B8Tyu> zAnzgqv47}BA?0J=k{KJBM&>Ss`xTI$KHFu$m{irX6=pHc?U?vHF|RPuK)OnteJjJY zk-~Wil%|p` z@BaHellX=*)8to6^^Fmg+>xn%R?e9Prr7tiuB>#i70^#3db$QeqC%53{TW+#zn3oJ zSXuP}(v2H;Isj`^97J{5NnMl^pzS=aO{cb7civTxNK$jv(M!`xTTC zYM+?F-+nodHNmgFB&%JDxe}6OY!IX4T3701Qsp|-n(bik4=I>Up-(luTaT!*c&-Q@ ziBhkGz_V;5M-zON1oS&aHG=;XtE+(lnB0HoctHcN!Vhi`3viOoQwz3nO9sU$zGSK+ z$7i&jE%`ku_FVdAYg!ZjeMu5%$3qu`6$YZJy0ne{KsvX4mbDXt2NS48 z7AL-VDTEpEzc+D81dv!vw;QPFzuyQibNKdBM3KsqT07`h$18#Ny787ojOwysB({lN zzXop;(0y`ydo_56I%}DWs{7C@X6aV>3rSX!Tn!12 z>;!&>n3QOhqMy>5>xWix=*e1;??Dy{M9PaG)K46D*+1WjL;tzlAcrD&rX%B08 zwSyv92KTl2o~oeU82FHUBmtRQu;4t7;vR6bW0RAuuc&bw6{{UihL%rN5|04`=bQ`i zSw7)tb-<|jUbIdhoT!v8wr0msO9!>TQgkpk$LGFGBQ4lGH1i)99I)423mo;*4D%dbvVxX2xu3ZJ#zU>;gUD#U2d@X+*i&+ZoaZMi zG86x-CxhH<+qoT6)el)-B<9T@$*ASPwYA^wMY_=Of^VJJNZEz&K=;sNxHG;~-A@4X<-mlDb5bN(zPj!R1 zJmT;%N)&o$ zm58`z?mjrzG(wy#RT(ICwNe*{Er6+D(-$+y=UKdkxw5ync^&eihq%@ce|DnPUFI2sT08joM?;ly*IM#yyVS7!@kEi98?RA>J|6vz5b7as_n8hI1>SmGt@|?o9Or)&Lv*~ zaeioGqV3p0(Ja9b5}qA?eXf1wx$5)Npo^)y$>*@b+V0hle_Gtw!@s$J=f#o1KN+C) z6**O?`*QUA-d@YKwC%^e?o=!k8Tpa4-V2?y6z*QDO14tD+`U_hGjRP%H(SKRmW4#< zK(DV_p}yGd+qES?1K=OpDM186iatdH@m_Yf=bUVrfYhRfnv~E=GkD@L;AUGHD2|d) zxL6gux9shBkAc?ptnAe*z#SKTvi68-FCBqF>v{FJ>Kat!%_0$Z`O}NlBgd4D03m%@ zmZ&oL=QuN2ZO#<9G+9~M1OSxQ3b#YTJ0n6uarC(|!RBq>2CMX14V|ERlqc4z`Hy^h zJ&_4$D?^(4r%&o6$ZEHXIuD|d-q5{O=6iioQD=YG))uM{KJQUab^OZmwDKE^Jl@Nk z2L_f-wxiaJF(~slD6Dhv?dtI6tGEVCjPr#-i7bOw^?q3ElK1LXkX*IDRKJ`uLGYU4 zm@+58C7G5y)~(Aibsuci_29TO|2{b{w`^wipp$$K@bU@Kr>9$MsM(+22U!+6VUvi6oURBTPmTDII0T6iw{ALs+ptTEUo*qC$4yI<^SgYU~!7CG>C~^_`Tm8 zwW5wz6exiSYAkZSYEix%^A2}?`!$G#Wzd4(i5$~gj0?hPQ;U9 z5XHQ*pyo?Bx$3~j9l_I!bKbNRZ;lSNh`7qo9sPUsM%}CEw=sP05>AxnuVs2x&ycp3 z_$J-{q0wl>i+jLEo3FruU4k(=2K~g+bkN(Qwd3~qfxUZ+2+4L%Td-rsBKCL3$Z%W5 zZ`s=6hR1ngs=+D(g$L26y@KaE3HJ)6j0fvJO0uXIGnvCyQQT^I1v?OpNZW&+|% zq#4jyKmKy!M=9bsbM)y{aeUVaS9jV}4FuIJ!WIFPiBNc@WM7_{Izw;q#p%~7-`0-Q z)jWjX*j;tCYeqMnFB`+XWCC&`ixU0EhShAJbczG3!)wf1f5Mz~fO>qNOxal{TN4-o}~C3XCFAr~~gC!u+6pwvbY>?Y%Z zwg@pUN$@1_-zWFZHjDpLU%z!?P4oe#&qP3PsNl_uEFuTZ&lA zgmd}5%+AJ15eGdFw*SyOd{Z=usC5P5%F!dB0M4@=%TFrFe1_fYt&?P^Adralqc^~ix2pyA$Q%e)h@0;s=?$W>EUF++ zm_+KgR$eWZ%w0wCjVQUDSIn=R5W_vwB67}eOx>%4XbTDx^YinPUQ!MI9pKS0(-XdG ztnt(4@9?m`o^pAOIR1r;PGhw4)pl=sZO_rr*G<%fkJzAAJB$6*0cJ%gtk_iliVW{V zox~9)e<|h2qrk3?E_}tmG1ja>6sj<;RXQC9*y+%GEahY6%E5V1VkrN>!hYFg#+T6U zLQ8Z5f~Pj+o%q2B%Qji0_QK;BY8&{sVL z->M77b{6&qe@W+fX1)H8-DX(>w>2lQJLv`bysp?xNMu<%zjMGyvx$Lq^kyAkx4hW# zbsb)RH%MJV1&wKa$}HWKLym+_c;`u=)l$wVz8M!Cpx7fvk#s^TqMKCeB!+| z9kx&GxtYrFrc}e4)Fx11K4SDrFnvI43~1S!v-1yu`Qf!!K(w%)?|Fne;0X<~y%P{Q zBvw~l`E56#J?&!P;=CKU73zs6y>-Co-Vf>iy1aZ~e4m^(mz|rP@5UXr2*G}Wrvi-| z=xG5Cc6`GWjeSocB7tKUZPn@)_>)jNm%GcS{TK^OpC*+6r z*P9o-!RqYPHC2HhgLNO%)2eFV481UTlEAwg=bf8%(t+)60S$1|q^E_5LNx8nZzP93GPvw>HG1Ec zr~NK57}r8+ZK_3cQx?*hE$KBo*r4fVk*E8~Z)<1)AA=G8>u~v4{SNb_g#m{>&&#e_ z?B4Njfv|T|jsD##WG;{V0hh}*D68_9y*&=M!Ou{%NR*}KvlNSZ!0_JVpr|~6my_~v zFzIgd$jx*)F9Wf!?BjQWN0xBL1(+GG@i-u9IsXtDM zLs&L$CTpFLS0WGPjjgqM1ea2h!loH;$@nW*d4CrM)0$f*$`HLlR*!5f9^^{qdTLuH zx3tFvUIL&PD%Yyan4>*k#ER`dG<_jZBuU#aP=>TBnOL_M7uGTLP`Rd4EGY@x07Cy4 zMa6zRC}>s;nSc7u^X5W>G+ORqhB%Y3u(yU?5IgIJs`LFP@a}+n&w5KMbD8^;Xn8mN z<0lQ9W3Jn*F177M6bc_aISRVW%@=v-ZA}yta=tL4Ub{b7a@HkF$$#!}*4r?kbrShx z&P=yACE}H(1wh&unF#fYrdbmQnOV;Mc2lS?RZF_Kh+b+cH6#hGp4N@At_5Yi%pW0k zam{NhW9WU)u9qOH%^!l@FGA5)2-;%4e})8#QOwrVzr)iuP1j9FpI_&ZbdqA_>1p~l znN78cPCRX_G=u=aAwB_QI4qN&p300~;3!}%2WF7cJ9NzAT-wI3qNA*;&%dP$?6mShHzbzuR9kGR}{)7(#dd9Inj*Ib|b227mMZOlz%y2db1^ z29oWDD@rHSz|WyxG0?QK+rX8`@+hX?fF6O`=`-Oefr?MaMqmAzPADZfisfKd1Y$oV zejqf|8ldPcpnO*o_4OanwMx&itu^0z>c}+QUV9@tw2_U$#JzGXo+-zcrGC|`j#*ab zs@^+>mUz7dVF>${HqS>R)(dGBD4LgUZbLKb&kHrewTeCX;S$|#c~f%gR!k&M4z7{~ z?-E_YTXbT^6Mbn(h>^vWOLyx0S?38bA`Gu~XHoPo2qm&>IzMAMP(29O zw1|LGWnqsFoQ@(5OuQm;Jbnn|N$2{Bdv5 zu@rOk?CfuqV+iko17^XSw2`a(em21^Dh8DBJgRl$^4%u?vo{IROiXXTW4<5`zpnF^`;K1lvDV(YK`<#pndzFg~oTl*WcC3OcT5 zx2*gND98$txoRM-)PIOKiK?45)n^!Uv5q`FBEu-az85I(2GX!upGMD)ea>2IE7SFR zDY@(tP;_gY*>BaZn>){x3y(X6j)9Al!$F_&^B7oAFah`JeuR%R{7*rCo1OHsVkXQf z$?MC*T{OlImg(Yw2FlTt9Ry5M=JAdDwLnjbLgSDAEt_c6!tb>Cq7b~HTPKM2ksZiH z_#btKtsYnVXC!tE%zrwdxWUT<>}}2AN;f4{Q(*;V>298r!anO*STC46h=2zeNtE`c z_~X#rTiP1C;xykX>W2FjtS?WsiqZ*-53qu!fz_tt!4?4qA8M)^a!{lbvZ)3pm%=I2 z^_Wca$cTuG>bnt9s)B8`jdwIvo+2wAXggD8T#9Gt3B=%bDX+wJw= za)AYxZKVR1O4Dl^0&23%iZTO6><4#wQC6d*Aciq;L;iCJDsy6Bb9vxR%;E8IXq(-| z3%1kfmGzDbGF6^ZTr@d~vGzPqS~)I8OO(r_B*bQi`&jrwbSHAJl!+BXTIAbUM9i4R z?JUq3*6-WDB!aujnjVnAsdb6<7C_Xn+c@`CWA_%kpkbo>1dB)2Z+;(ZSt<=7cSfXd zJ$D7>>7onaa(6s276z4a`$F}V9_hTD^EJR+b**WT5BnwkyG$yZ*`PA&gz|J+?B^3; zTP$Q;ezgq*Dr=>sqpv~VZWF(xZNFt9nsq4tW2U<)xwk(2W#$E$YDb*)y?m!B2786q z3lbfLKilS(^6xnH?EF#Rz+Yz03I?gz_2HCAOHEHkcgBPP-3%D(8vaDr0t~<^^6hQYp+eo!|RaT&R zQ;hx!)E`NY=+p{K-jE0Izn&|y?5LjY6SH8%pL@kc5sQV!Zunb|a~6Jvw!9ha+!7MR zarbWKUFgoyZ6NdyPh-W%6P{8n%4EkRWK%61wRb%MX|)Q0$n>%oPo+MwyS2J-4|?^s z7E@8KLA-8#jQzlirhbwQ(?~918rv6zE_~^yCzfHj?g{Uv4>X469n@1MauHxg;jLG4 zfX-3+LRjG1OSS=gd_3m3T0BF~GJcBs*1W~8HZeebN-Ji#U^bqa;HmNY$xu7q!01W^ z!3w3(9eF>a)f=8rh0cW0d})7*CU^k!#QMazWp?ej-dmq;QF5C{-Ya$b^wu@BH8#G+=w zZ1%gghU~8>V6xxwz`!%^8Svv=(9YjH1$q@kllQCra+gvI~N#A2b96p?iTX zD|u9hZ%WhneL0Ni1mT!&(Y~cMu z)pG=1kZ=rp`+dkVVE2A-?hCKV7spR>vuw&cA8Z1ciyP;>fhel%^2VlcHablx77u}j z(PIdn9?>?(KWt@8;r`53W1fwJ!c`#0Tul*~NHC29hYT&G5)=k0rtBRro;9DH0QsH zi&rH!Z_MvMhUY-gS%7kIHL{R>{|J-+yW_N*p`W-In{R3M`BCtAZ6)W! z#)NXKYK$4MBWi+`S~hwwY^!y!-G+k1j!&Ep*%rT$VrZ%64s2mIRE+#Hs$lcTB0CHU z_h$fs8Z;#=r#1?8zb!b4Y{8|3!R+0q&)3u>qJ&&Q(azH2nqj4OFplftz}Ww}az+}U z6UqGhyW^@BJ{6EczF~M(@q62s_qY=siv$#u4HhMkFN!H3dz?8qA|zx`iKrl(a+l}d zQTV=kiG0-2->J5-gA?G&Ld=ea*iQF_u~no*xEV?o)2P_k12pQJ5>kX@gC?#Sq3}P3 zMz9<=N-ZbXmYU2BGZfm=nyfa!%}K*g4HLAM=#x=db+Mkv4gWJNO?S4`1879ZWGIQ~ z!RNT%CY{w#HkGzG(E#~zpb`GNHQV2M8bDC>IxtlQLhkl>LPhC(zR5!L;7UAB?bJ@U zf%h`!KBPk@18kusd9vUz6I&j0{e6~$6HqjIYGlN?cFR@~a?ea?H2x zu-O|1$Ld!Sc%9a%BvlYT+y)qU`=ds_zC2g)>oe$sJU17}6~iOClI|}}V6`!jZ_74% z$_FbuH64cy$@Ktx4nd)8U~-UdhDy%%6eII~+1e8Cd3n=G%d;AtuBHa0nfKfV8q0IT zwSAPx3zYKzGN!1E&DeHX^&|Kg=V=|um=e_z+jr84m$lss^*7wY7AD&^)JX|x`*MHzpot{!oTo8^m3!z* zXsD_N+BV-bhnJ%DV|G+Wb|w!1va9xD0O${Z6achaTTk9e2@+E>!t#YhV!s1~@P0qo zv=}+O^ofR2B=!Hc14ScE7{aKG4bA zS>?YEAeqI1uCZx#LhH}K@}QE!Hw&($=|Acfz(ZZ)b%7sTV0;Z9Z0-E>*Ymu78(g)2 z_OnY<tFwyKk-5(8mY&$ecE}yiJMDWsoX56OW@A7oDq3P ziLUErWxC_xwOJw*LoB)Y(3QCxfNZ(gPCNIs>GK_n^s}BHhTepr(CKE2L6NsEHNc29UH^L%(yHdHU|v^cmH1%SxT-?u1xLu10cw5zgmtk5VM{JW z{5#37>mC^gtPut3k&h=DN-VdGOUjr>9^ajbnQtiN@g~xGwZb{I8lvmBk4n*>O1}P! zI^=Dw4GRxHjyg3t!$%!`I+O25BIj=E9&Xf4hsB6Q>sOb0|6~ok64qd9BE!j=p6nOW z#lNw_vniETw{7PrUUAo%wR^1b0M021LfBmUx@FV9zu<}OTCmEC`0*=Zl7=KS9>L&! z|LdKs_KL|oLRuq++0|sGY+k{~>$MitLQ`ul@|{A)z=<0sVw$h0-Qd?Rh_3j{3CH|LWnZ57wGYWXIuC&_3zzFqAAnkwJ*xDSl9roJor2UWRdLmu)*w>2?YF8X4n zq7&J(VLVQMaP>PfF=BM4rm3}wZ%qChKMuGU84d@c7y@t^PmDC9)C+S$3V5)_P^hbJ z42(}k=uMn-;Le>K2ZE9hAJUtg=_=cC@f>ZFRVg~%**0_e=y1tI<7axmA4_-C@xKpK zxjQg;?Z~+=KSM(zB3z!8P7*Ndz0;2-+e8J%aS{V50%2QY;+`xZ(^6D(^#aAxnN3mG z^a|@a(0Yakuq60HDwcyGJZzKta>_h@qS3CGr@bE%~kkN@d4Xb%^%iTw&&%j*Jrp+x_ zm2xr=mK)V!>pz4UnXhg3(Al5+&%L$laav!tnB)(uTaJY4 z^qYYLWaL4m+8sSvZn(||93?97h zh9uc^8sQOnBtY5q;M)mAMF-t8gg`qKxx)*?l{3M{WAF2y-8ImlW7 zQ8uMhglxsY`95H?L+DlV5v7q`>udG}J)duOBtk4g)VzTc=Hyl9*NuU{lI@nNuoX zU1tV@+cY;;8+zFXa@MokP1Fv?HXC)^xK6{}R=c+YVQoy7TBUo5~yk71s zXcssi1Z0pyzq;gXO7tl4M5~S_N*RKRic|9a_2@|V+oDy` zhxb@}9BhFBCE3hVdv@{sW1`i!;#Q$7X3!N!7WJ9@FOP2+3p<2ah z@6>$I7+}waE6U{md~ctyQ4GSAK+^PVr=c-8;-)c41+J~D8R(3Jqr}Re*l2s`qG-MV z1uzuAzQgrz>~xbhNwgalG#Jf~Aq76uL|_w6;5e_-x3WwMtC@-s+Skhp$he1!vN!rQ zsZfpfPswpiikN;haCcxi`#lo}^16(NM5z^*scGAvDJyV2#`BXVy%ahL@9w0_1qD{> zUq8&qM00>q1ZRR~^G^na{!}0`tGf7(tnxhCiNWV>G`ra=*SN63KJQEJ^o5QoVJ4Ri?v<-EaqxlLI&#yE!O0@`4@UR3O`;ceEo*=@1O{Cf$WKXIq`St znVCOaCFe!Ie_?I+YI5Yt`2Nv;$UR;04?QRQXJ#V&+*`+HeT(*r3FTe8jIg$lh9 zvqb%c%`Q-&&^0H%?nw^&fg$ct8vTRk3kPj;y4AFoe!Z95P8PgcMzv~F<9)wWgq$?= z3@-l;dhwQ1(*X-1iZ^$+^J#nZ4427%p>{=6Da7Y*16xe#xIA{`ZeeTYT5Go)Ev9&Z z;n+zGQ!_DGIp0xEK15$kO;wfOq}&QFu~!>uBvJ1#qdTxk+-U~Ha9dr}Bs!((&aL>6 zky))IU|!As>*r4ZKFHF;UhI}q$|(wnfwusWBPer1I%_(>7LJJBdQZThT?H(PTgxV4 z8#)#r()V`{75QVO4}1K4Q?gtnN^if_rW+7BEN6RTVgkhEwRUd3Ec_w&MphW)&C@`j z8!QCW9dV5ZOMwEnvJ992n_Pes0cKfJpD3bC=1g07l?hZs=yb!FV$Zh{;Jpa9Px|Is zLa`qx)NO^$*#>^VzyY->07S!jioN$7%zaXpXXaL0>{iA40n*M)P)CNPJ{;#)ji$8j z9IfZuWYL3LJ58v!w*EONX5%=hh0@)1CeW+qXTM!gu?NJ$tyOw%vG(64hZUB5eUk1w$tCV`s5O@_fL~R8Amgz z1qZ8yH#Z8zW7?A=9?jjyORG#qSZrWpI=3JDn0vXcaDDmWJD>2CMTMyGV_}-4u`ckk zqPR}R9u6b7;tYk&N?dPe97hgc1%Y##v6y9ql)QEWE$q3-_4Ss&#NE)l1)!({VN5B` z`PIR2Br#VZoXt70i&5BOf&O(&No?CCCSjTdkDj2X3>SUoqhY4tId-P`yzS!Bm9)mp zSdiFH3gvad+D`fCn<{1?a~fE8uRU=`0Yp8ID+ZC=slkeYGFt0QetBi-Qv1Fj&;{+# z)BRywWfQxaY+;I9pdBXxBUypWY#@2xbyd$d8CdQPn+}lf=WWB)%v1q{@Y^KsuG4)R zRG*v^w`&E+sOf=rul*5NdJ@JZN2W6aPstGGJS({a50;kk07CA770g+(Di03}4Htgi z?AN~Fjh6%Cf$Zah1>mHM{O4S@{PYOhKO8pN&f}s)X0n{SGrA)O=xt%VX||&^-#-&r zK*Ke(%;hd*QEs*s-r3*9wo0_ zlHO38D9K*`5NedW=p-BQ2V;2?D_P~*zSmvI+5_~M>#g5Af~2#|SR8RmSvNi8u`iIk zviAT4GumoHH_E6I2tvqn8C5AW)0gU#;n;D#kB~aICN~9ZKM2HnPb8tPmp-=z2|Hp6 zAL`FieU$}r;z4jYM!YO9j}0lu0@!6K4bLOC^7+bt5qD%|3P;O3x7v79Zy%Zt{UTzs zo<^SLAT4Zo+AK5E%*4L_9($fsggH?mdm*?|=Mubgux*3)OVW!oeM-jkQ<;lR^{u8b z4;^c&di4yU&Z*r$N@~&f403h)eh(#GGY9GESOa)07QP1!`66W*pqZ+lA=Pd3`#xLw z_wWpwA}#q6k4R0ne0qI2uU|@8^Dv@P70Ne<{jF&(h+PHO;?6O*q}(RvbI74-UWpRX z{PTh0C@le~J(Opn*|;pVo*8nDeOIGAxYy(*dEbzUQ~UXFslx2U9w^7+qMtRj$G%f` z{UrM$r)%30XbV@z`W{pi+p6yk%7 z-*D|9^o!9_WE?o4wCssR zi{3B$*}R*CzjX4vM2kF*s;){0peHD2E~Uaz(zc=zDnsrTnse)MwxMXn!6Zv7di`Egw;ZB7tP3>Gsy&?1R}WGky49;fnkM zZS4P!{_#4^hhEmq)#J%y&A(;_Hkd}CR@2ELy0jF=U(^qY;I{z1q#P(05sL%0{-i8epvY4%l!%{{ROu7_%dty%vPsmxWudmtyK!UK=B!UoyE2~u3V(rE z(q~)$!&HA%f(J8F&%$3C>3$M}PER+014}a4`zlp+RYkHV0@z~l<68wN-2I=WFR18# zaahy-v(uivjWxTh(<|)56Fz-Fn{7Zt|7PBlzD_=$y#4cMDdzZaw-;xV5>;u54kdeq zf+0%#xJLh?u|3=MR?5_WK&s`v(DNdX??1p<8_2@Oso(i+gp8^Im!y(JMgF2;2{T;Z zCs4a&b1iOLrV;SExEaGhw1Q|$2U(#|%&$d08 zx-L>@z7R4!tlC|}Y?OXwKES4T7^3&s02KiE^q!^Y;rt|Yx=CksHxxFj7&*DbMgKAp z9_R+HvpO(|)HzX%?9BDy`jx`8kUpuqm%uj*;{;j=xqYt?2OAexM9KMgDkU|Y0%z*d zTW{I%4S$?qDJ=BH|A(F)HDN2Bv&fzeDTFWlQM~h5e^S+rSu*)8O%UA$iP+n_ert?$ z>99->LRs<^T17HvQB^H`2>l#%u<%^S-p8B)r0^k@#x!&kM2B#Vpx^mq@Kq}$jNsh_A%Aqx!fW}SJRwbsK2l9*`N8|W)P%rx^Sr(F+hPpNMTiXV{12|-8INvJtyz{K)dfRbOL>nX0 zf9h)*z2xgvoQ2&B?GP#Q*zP#M1%NWUjL>L;sJneJP?7rY5`CU}H&d2;*IK>#2Vl>T z6OnFqJZdS0I5`?1&tq4#flCrIPpA0Xhc>x8;k*cIslA%EU#V-ph8^leQ}s3Xr_ zm9122gL%w-11mTS)A&(WOv9J)NqKFJPjzRI7%PME0LxuTgwqRCoyDI5kollb8>$Ld ze5zHuBic_`Pl%e`&GPQ=o_bMUGj~$q3WEwE0}*B$)k#3g+h=nd0}CCPrGzAx<_Gz4 z&!x6i{~qefy{4w4h|6=4QDOc*>6d<#B|Mr-ze-GEWY>(mTNISBqA-G)(&w~j_H_n< zM7c6f2(JD9P^MXu?r93B1K+tV5b+(w&RgUgE3uoBBx*1+bYm$J9Nb#~ee46Grcss-%zm_Sw5 z#fZ~z#odTSmHkhjrf)rsdQVpBXH5l`Q|7s4GUa=LeA?^bf6&e#cL){;kOH5xXY#Yf ztD5XTV^0k!*f{A)X;yrs`K-OB`ek-C4=>Ud-cS}``zY`qgOJEq?wJH`T^A$Xp4&6k zF*)bQl>|9qaHb+jXSL^~_Z2=T7|fh!wQ(N(SPsjxt@q-V?r6a2|DW%lKep3SK2f6uzB(@wK4_uLz#~U?~0-y=xa{lOPF} zdAPU-s=Jw}#~0hJ5Z@%O6F_yV-g%__tAR6~>B1^gfuZrh*|x3oDV|pLaH}2D(enfzWD1!L^4nn!MTO zb|YoIP?AzcV252YJYMFK8S>sebGw0%hAigPkbryz50pgJ!Tesnlcy1r$J)6|3jn6L z%TwD7mW6Xd8L1qb)&7`f5`gQ zbniB3;tJ5aKJdtK0~})(1`HYaZEd(zxjYoe&+hpZ42_{6TL0|_v@ZZv-?Fb_FqyM% z@^^$HKdT;c9lEkSF#2EafQiQG=NfF+KWrDTAtR`i0K0Ho!d@bvpxng>*YaO;J6t=d zd<);xlaoORK*NhMWwcf1(m%02PXW38)SMPme^I&x!R7Km{mQ>RioZm_6|#q*d8JXz z?CeTKDP&^F*Hkod$|39*<5PH}_n?EKqMKf<2Jj9DCG}8&{=ZfFWE-%-LkKYMX1{`8 z+uP$xw;-qtd9q(_t{e)cFN1~QX8#5tXJSo&v4}`N&Pt6clTcCFn7J}uNW-C8g-8XY{@RLR+0UahGFIzk6SfJU**CaTbx9BI>C}*yZZsW z3_IF4@fzc;U@0YbE_$!}=M&>{bEfV*I|d@_$@jdc(|xo&*#|S4#pxWfVPu-72Z=IO z6~W*Uw3SEOWxuE#TFA_G%Mqmk-u8oD(cI#(QbmwXk09<}5_hB!b6`OJ+;x(-4Z ztry%vJ%m7>lWrjWlfLT@0{$Gp=;OF1!MitJk%5^UVPawEO{9(9A;sA@5@17w5aS(~ zO+?5k=4SQ6HAAX}=id50Uk5T;I25o?S_9LomPyZX(t5fYwke(Sl&u8GQ zT3hw7fNADH0D^{P3}yzjl|6oK$5l^!wki8wr^0*g?=`~ymBKYP^i_8VJ%zzIq@d$M|-6OI7Q z_{c{#G9zDKzXN&3lz+8T)=DJ!+LmMlFe%l?BktA-39PHccp@@vbhIvHitYu~Ke zZcF;nX}a+}XEPYmx$Qu`)gG0Rzc&}lU5vixJu|0}^Ssa@`0OyKtUftRR)0ic$f{bO zo(o|q#W{XTHz}udF&f&<&xS;9nT3MyNqLbj!8h@S$*G~{E8qLwCZvfoLi3sG#mtEJ zcAwRtC2h{*AT3=6^EdO24HlprWd5?$L3jN5`wrD`Mh*-=j1G+tOT1PIjsT`&7I1j%xBkub!o(LIAE0xV=7XoC3-~ zV96I89L?x%WM-U&1@=&oo~6U19rBWIMd`fvGT`UDla&YlPf*XsC?0hAQx{;~3hkKZ z@G?LEey2Z%Xmr=f#umzw9vx{DcGph&kw<8xVx#+z5}gLiIpKQ$DxliEm|dGF^SsOe zc+<;Bs`)q}jmY&@Z|g^iU4KRRSs5Wf=)84SfYGN~sa1=%tL5RkzIs`K{FI~dSDxT#IbMdWnsOJ9 zLh`R$FLQrxgzlQ0F#mCW%zlqDUfB~5A>z{*pWGW$ljc_GAc0O>x)5e;Ah zDkdH8z!N!P_^PU5er+$5ozD$0z>~TNNGLk0O5`)r!EZ5tOw7QJT2p3x5Li-!6XWN~ zy_W~;s$X3X?tSg?V-lJ7HXm$Rd4j3CBA+tA`)aI@ce&HhH;psgG}?$@!c@#1`gM3< z4E&yjCVMP2q;%4(I>?)sb@;P9tn$jTiV1-2UQJ84+UPHzL%kIpvm zMnQmuEBeanOA<}AoQ&T(qCF!I4p0Ed=^Z=j_Pap-0*;EgPrT$fy(ifqFM#joc#I=# zdh7sU)ZrYUDYHayaP+s7S8Sd;b|q7-339V{%X#zICUTd~K=xYm1k5oDPK_ z#FD|jqe4e+^5glY7n)2B=d4sXms0iUGTTAjJmrzIt6Sh4Ji?raqnl1TliXWCNDcMb}EgxFONk zi?0Z@Rq{1EMBB@~vCyKO`X0_S&YAa_(R8L_<}i=?*KhQ2tT|`jShMTFP|(py)FJwo z#AhFmuJ_juP-`bCO(M0epJG}KB$M&7kcXJxLx^>DS!Dd`1Iy;Y*PmtW!}>FhsF+Wm zr}XB|;qyU_XFk7EFG}~eNz+eWVoesDR9lV29?`eq*k9-v<@AjmDrlMM4yf*)+}S z`;+SZThc>-WN$y55Zl21T;$2#qc;33*mIU~Ut3=Z7)P2>mV1_$m$jz@(7-nWaEo%8 z!0p>CwmdZU)|WV>@OM&{RX@FX!zQE=E<6B;2(uufpj?BNl)IocYNR5t_!8MNH3+Nu6w zAgJSwT#-b~Tf&sFrqD}?h#X*TDq*za{NowquShR1?44>#aj3GOb`oB(%2Pr<>+3Mu z3ad~f#la0k3X+r_>E{EG&a7I>$PVRX`Sz|7S$t%hA@?%>BTsp}CCj*=twak!mx+*$ zEV$WWXW3SJRkZOhC|X83#+l9N;B--Z#9|ev5=Ls@sjF<}qS2T3uRQNnx zn_Y~^anln0Q7g-N<3@!-eHAD6_Aaikc84?|UDkvlv>!-hc;tEBfQ-}_AcZBFG@%2_ zKt;{xQz|NM49_jA%=C=4qZjEX+~iQ;co#%c!ic7SZ*O?#;sGgj-U=1B!zUm~e4it# z(F|JxjZ4O)mN}Sybv|x*I0;@Acz>HTJ+ZF&2`%!$pOpqV^Xvf-F9w6_Xboikv55B) zyrHswYz!^)u*)iqztV4vvwr)TOMx~zHL&fDjz12<-BfL_a0ja8>`0FwCOZ&sA*!oU z^g@nI#rmb+GS{rU;u^h-M4cN@26_GmeA!M`$x!o^$%Qf?`y#DcamJ7 z_j*Z7J7g1?<8G3&q5;XGB1PG zBaeeaIwEtr8vKhq!F20+C&L@$AIJq9?FTNSP5r+IMLC<-AqkRKkJ(#!J@D&WLorl- zBJ{G)s};masK%wBwQhnKm8#1(?*{ft<)8-rjr88oW^ImPJEyrO$S^^hWVStX6#g`Ojk7*qJYhvdU>fL~cYvnK^SoO7`E(|wQPm~7W?O~YxF z_^CTRN5E7gGy zcU0(CL+n=R*<&4$_1M|o+|KD zkPGRwGoLfl%~{6lOkFXbxgS=RUXgO9;h)uQ`^fuB!oX7WXZkZqrD?MlsR|-LI-=u8 z&ffa7b}MU`{~=8R;ae|qQpGK;U5Ax^+0o27RpY|TBKvN~)wGM+?bFP+X{K(8f()-eQRQa^**A+lM3sUx z^#|48lvZ%mTYeOIC{ZeHX}O_=d2lXA3yHFV{*}p%`1PLqjFAl$(ji_rx0q^IhrPe)%)jpV z#^`a^87?BUlElSQcQyHQ)I!j^C3SlYD*=POx|<7WI>Xmt!Lm*P6%Bd>~` zoJ-6k*8s>DqM*EI=^JUGe_Z`&x##?O4OR8Mg+HMow&b7N;rk+GkkW66yj(|(U8Cq6 zePia|`OP~A!HTf@lD!Mlur~wC+rc$cp0BffMmc2n;y(5zK`zN;rjY~G1laz|JyY#BNR(^Q+t-a7?!ToCJL#C*R7S|kDP@%YpNI8sGYK0nfK^jJLKMT z%F@t+6FIpY$n8NO;7`qT|ElD=4W!sif3OAPde%g6nvpZsM8O+9b$O$W#OR|@@%Z^y(0zbB_J_h42@C+B2PE*Y#OJ&>V*K}cqmCD^2`?a z+;a+0O?u6@-@@EFNLa;ci><+b2+J5GaBYQw{w)#$I+N9*`y{aJhoIW?wAZ`djc++7 z8~wYsm6;vk2?Cih)l=e{^(8-+JU1sUw}JO{A9M&D5CzduTEte8m#t4|xYN{r_pUhl zey^Hw2-|4Kr$CT>h!ci?Lw0yx6eqh~#??+Sw>XGdZBEA9MNwC}1@1L%?WjvPi-^%i zH+_utjWhKVvQt_7d`53M)6q|CmmT9nA~)fMwCCkpfjCAE z$8=b7@+TuL7E&A`*U5F2{iFW-%%iNQQTKW^PM6r!12xW$i90)uP5VG<{`QW!q&yk8 z-1L7r8b|-9l^XTO9ld3XbqyA3Kd^G`s-tytUsOcR70s{&F;{|j1M z3T$ED{uTa=q^7Q(+Y6>S?tBz2PFwY~uq%HQaG@mWwQ!T$CFML!MIUWC3TfYL@j90C z5H&P}5p%7=CJfy*W0c4q1%O`)-Lqsb4cqDI=N&N|VkyUeSqI$Fr~tc6?9WX9*Qh?9 z!kYm_W*aq?RYlGMn6z4wND~5H1O&TNKzcj6Rsm|AlaEq9h{?-|DSw zH~+Sn_o|AZ&qpq%bk9ql6PVT`Y@gv?=KwfgAR z(%sJ6b-MAu1!vtGvgO1SvG|&oAV0EW7(+TT^VF|)a?2eAP6%Bmq7Huuy=Y*}V^4?T zJXsp+TivbF^;^Sg;$zP5ni1y@%7!TM8`qoh9|;Ms*T1Xh8=s4=STS?t5opt$@-jok z+xGbbUGZF+rvYjqtS0J9Nf2~h;Hu1aY+ML>4i3JZHSWQn|uW1>oNe zLSh?|ns0b{9NXRVl!6*Yuh+d<@KvSTvJlIf_jHLvqlp1ErMJ!3i-S%F3OmpCvI+;b zt@Ih_JVxAa(%Or?)O%x{qn)5dl}K|s_jd_8e=#G%AA(MORRd-0d;p16mWzd@IFK&5 z)~ZfcO@8O%5C#C$1Au}SfCSO9L!TQ?Q>4lRIFJe)m9*gfI3SO|xAtlodxUf(KmN|G zUjCO`j^$O$a#Gl@h9pK=1p7NZIM>B@m8HV7!&{=i@@oi1bHFNH`bL%+LHp`0ATEi@CFnEMCvvhg*tr`A>rW6y+DRrtLUBEsiEkeX1uO2Z%r}76 zGGF?ryxssA#c<^vbTiUbqN);hj4f9^B0Jg@M}hEFGC>}^9d$e!RZxC(gl)`FPSmTm z84wil0bk{-3)IL6vi?wY2@ymmp0Y5;#@Hs-F@2@;;ydc@T+W9@tn%)uM(|lwX7F|j zV_VdPL+Pa77mJ7|4#f5ub;izz=2Y$3bh#AZytrRoKO6KVNF!r0VtD@r_u8{URqBDs z^Rv@+e=lpSjV`$@2ma-EEW>YUi`h@~`)%UWz9PDHfv)?SNt$c4e(qmdrsjs^Ms`#= z8r{hepqOcj%gFijg@Qt!2w)5s9v&VX(eZDe=wpCD$#*T#AWm?uy(>E>kXF%AL92FLQ~x-$N{yM50_u za%bc+_fZiQ=00*6a#>m|VRHX{e*4#h2ea+{dcV#&&y$&Dio;VGZU8`Wj${Y{n>PNk z+f7up5a8}sHo4i8#1*~nQ-D?e^8V6N#G?TFz^|oE+xt7~+vTum#Ku7cBM5+q0TmPM zKL-X^8IdfS`NoOZMo1Ym`{^88xzF-QOYK{{&jd6+8PZZs|a|6tT9 zYUv7)aBj{st;9p&Hw67cZVA0skAfX&nuFl~&S`1Hx!Kv^#DPr-RwyIcNg3NTusVjq zQ{V5AS^gtyg+4I5n2D*?1NtnJEgs{atWm=4-F-zxJH^*0ucQF?L_DdM>XToCSLx3= zH;NiidD5hat50o!p5#joJj|9BxZqjs#CZP26`;NbS4$PpI$G}say++=j%l&o^L`f3 z%?q@dD2xn1UPE^h<&BP>Qlk*zLP=myjg5xptN8}09JYK@_carnG$_P(_90Pv+a&OH z89*z{Ce_kVzK9pu4Tcdn5-w+Y3?o5u_ZzuihiR{KDs}XqYhUy+Z*VE~dx>Ny0xUFL zZ(63}l+Ii_2YstkgEz6Mtjx}Ctm5c#Ow8HqQgpgPnI;C^RoFkY-(CZ0>O^tmgz85U z8qQ(^5r%oox4*#ZHE~$eY6euGyzmRX)O6?U&=$-Ez}xchu0A4n%$vvj3JVj1D=mtZ zd`9iXO@@mA0K8y3&!l>?Rb29psd4_$A%~|kf8;D1w(N)Ih{b1jIBnj}g5-Q(>KgpX z>^pMR>) zo*((d^r~?C%Jo3{?O%saBHrA6NU3$Ssa`m~7;QsPBAX(i9|B+hcl-Wc*zrbN!jNu~ zXJ*nY-wfLi2sjhgnb89sfkKnXeK3__^_!V)dTrS;us~w;LU@Rwj(M(^@rZ?l+R0Wq z?aie(5U*F1+SJ7J9Y)Bp_Pdm)$BPM!m9h(fEj4-Z_V{i@W$BMIg!x+hq}OY%{-v$; zk3SBj=J9%wC-hr}&*#B|?CUSvA-`XonW@=k0Qt34-+rz_$vP(@XF63Vq-Ly#pcMEB z0to%4E=N&`Hhnbf!94fE@lO96V4?#8AuN;QTYv7H+&Sq#UFtsO#!sG}ESw%XtaX<( z8I2o)BP3%@err+Y%a~#1Uw6K}SIKq)KvuiW#2adWl&_IZ6G+$sS~2a45@xLCk9 znr&cIISJ4`{T9dQYwykenKWhlsNaMGXY;?K{pa2;bYdF-$(b6E>)aJ@0)l#Ea;Y=4c0zLd;qQo{i_8XEKX&>v+_)FYZFT5-p9X#vM zeG3TMOb1sjhEeBcU?TnUOjl4-owaBbpMk!@mr;5DUS_8>=-^($2oef@Qow!q_AS!$ zHK%CcpxvKA&FS-@Lo7F-ob#C%@e-oa<-NThCQwvvZC05&J>Tx;*Uv3LntA^mH|1yI z_stWw6R+VOc~+u%aHa1ta3=5&)mxGeD#-AgaKo{zi6#EZy{2(IJ}1yPvmuoI-{Mdj z4R`({7ZV6vba8|G?RHEh4qU7dKvbcj5zT?}BNk7du(G)>#$jqFZM}V^S8dUfrTkK> z32KGh$8d6AfY4R&?n?fAtyzIb&@~D3syNZ5PAR}1;?Q+^a>y;Lez53`j%PERcfVtN zg^S4ZP&)nMb+uV)!MnG#_)y#$uuO~IKTu3$Wn%y!XveNk`vAy<7fe;mUG&Ou6nFTX zZIA!+WutMQHIyiy^Btk~+HD(6#yLyGTnJaRk{kUI-Op{liLc$}*5C#QJ|C~e@ebW=7<+YIv*JZN5WX>~TOW<)ZS}3FD~}O%z~}mFI3_uE&N+%ugl_^+0r9(NI7no z4wjbIm!een?CvL4PGkcuIxcAWyR`YvnkU~AK?4o?`M-mjKdnydh<4i5m6{oge@aev z5k3SqPq9V_;Q1AV#{lkO{3PrUMO`?dw+yTTd&~vMXt+f%P$3iCdn||jLS4mEBd+8D zaQlYRGS&X4X<8bi7F5|gr=N&$=d`U3n)>BG-6z!UjLwiKmz}OliAqAI$$?<0K|BQPl0V_J=!qGO;3eK1t=qGzQW87 z%0YR5UF%OV3^Y<{%eIJ|r5^)nw4IMxGi3p`<3Db5_dv9eHKneI9|fVr*&aWo(9_L7p_pyTIJ!{-i#!0_pRa`C8FAZ4cddIOH0+!>%qV3BdKq zu+VNbMclerc)2p%dJSd3N>-u0MEjC5Qn0_8DjBi~DLH27S{*Z`T@~YZGbP0Fw2nL3 z5HwEZsyX<>6^4d%NQVZ-QIbC0Dk(Pp8!Mp}^3mlvitSmvcjX1}mwkw6(YoL}%w_~Y z?9~6Ar=Z3OFtg$_8LW@uT>5p5%zOzpWtHanw#6V~hjhG26DdX1P>eE>myHw5`|$5p z{0Js==a=eg`+ueGU~w-2n0hoI2MBi$sC3)%cI@2PQYNo?5!-G>hlyIa_pi|B<6xse zGU<1xsaI(esu2s}k<4h>-CboU{;1-f!D_gA_@;|n<{zY!y-GvZ(d7KJ8FW@?l(VKc z`q4Dg>0m7l(4E304-!HfSBEr@t_;m8;SNuNuFg!b6<%<~=q@$KN%|_vb=!RSy>Row z8HNhKj}j6ys0#@+xiE`9yWjPXo;W)efwhh-${suNCaDdcq0yxod1A%t zmB=KS86n>=OGAcEy{*w77MWH)o)O%=Y^*?W%l*?Qg`q@YA|QxG=b9YcIi;PVZGkAW zDe-v8^Meu(&TXiTNw{y8^E}XXo&4tHj|*bsbob7w?Dj1%fMJ%uu+X#Yf{XeiHG`dnJZ^EK(N>cO$XiGZ!w*+W) z%bkOG zX6Gy3Ft*p^7f?&DrCtlMyT!q%Yh!AplACoH1DF|T?SK?k^=m!oTd69&c_$I}-B+bb z6cuQc$oh-m;_fI_3K{R&2To>NHYisQprqS??v)ud2hwo;*i01b_*&yInYT| zV~zP0ADewc`moS708N=GdkUyFvyFAl601j=YHB+FO{ z2c-?|JWCQ91bBslNeT6S?hJ=O2^}C%HPsF%7_|h<@z!O%!UDi3F)dICSz!dn47HwG zDZF~K#hN9~BsFx3`@Zl&j9fDx)EkF)r-pxrhT_#)<5elF@0xPZc59Do#&nt;I$yl5 z-m<6L`;2{a-guf1`_{x=^&|Sd29N8_f|QFlmKvmHZl2s)wdD+8-f;0OYxv$9EFwX_ z{GLb4+!$Z!XK)IqK{{YqAYjOU&`?ztxq}vS+5wd4+;~V~Nl!CGoGx+4 zD$iXC884A=8<|sg_4uj^G%58}so`2kHXe@d_Bs2zyQR%lsH-nlCQ!I4cVrr`nVmEv zz1{x9?#cpLJ+UQ5jzI+NE7TVtP#8R`(f-$MO8Y=Hj4U*CN7^X=P`^3w>rA9+ZZXky zKC5%jztF;8Kchvnd8n3*_7(9~^v?@jB;05^JF->oA68hNg7EZ`wLe4Kow9gMy_((^ z9As}yG&HG@g@L?Tmdx>QUDy`vHKujdy5jQ=90cH6qIb1KHP4~JdTHQ5Zdx}A5XfwE z&gZOd(&?7n2Y5VCv{8?bQSD+_ge)s3!9K-wJCNLBP||t9b}^^(G(f|%cd%deUSWs} zrCkCR9##hXOT!C5_svt6YFr@DU*9tO*+~kpDPd;P@xe<(QT$~1Ym7c|ACjPz>H2fN z17O@Z!IIs8?Ig*{k3gblzs>;Ga8c?GIR%Y-m*k;N{?f|+g>Fm7PIZ|=@IEi0uOs8* zrqYvVD4q9^qBpoYg7vyjdqQ#eH^;wcVaxl0mH0kbt{ZTn1qebj!gM7iC3zNi^*8r( z2j;wi^rrl#l`>9`KQjHTb}xiLw>x_SC78Y?4;l4q;d?avru$ZB1aeLR;KLSC^>|f}_bW^~@EHbJco^bjxUiNSvapRr$}h7x zX!)<39B#)4l|zyvwabF&A0I=`3%e{G`^72?q0p4=S86sC&{nc=%+Urdrg|CEHHe^W zd}m=|fBnU3Jm_pudinbA95*5&eHlRC7S7&sh@iSwkPdcsR*v~I6?O_)bH!U`0*&Ok zSmRhSjjwuEOW&mxl)6v3HKs6zQH0{g%l2e!s#8+VUPzl1wyO0bnRt7=25QFpl{tPD zQ*mnLmBI?c=&#lqFH86Q&YFs%AH3}(wF_Kr{^<1|jnFJ1LWWJc^YBAPx7=})YuvQk zY`Pu_MHdO>A{u@(*Qt@r_T#shb$F3` z4odq!w=YY-dC=AHVIRyJ)%w^Gr*qCl>f8&IH4|iVQ$ttdAJ3&EQP3R9KizeCJSvTw zcIS6U&%pYVx6^U|U0{9Y7%HeQoU`MTzW-gedw(i+dny3iKfWz|f)&w7Qb0>{Y10TL zgFPdH&y9uiuYa6X(8)n{bgc~%T5?QJcGmmL%Vm*QnEY|@1cK$~8#VGoH4O!cJ3xo# zAq!@G)f_A**1mZqQJ2&%m$t0nkCrta^+!@h^6ywVg3IGgbCgOimjVe3^Ssx zyQb}OsY1`Cbhh!tm`t+A@%raxp=64{WV6U80@&VaETyIFx5lVH?{&sM&wGph3@a>jyd{E#i?Cxat}mSY zUMR@9C5x8Czb3v>ze8SFh&~)@S(b}=!v^}jcL+NpeAHeu?x*y$r@+Dk#|l6`K-ch} zVQ7G2w$X9$SvKZ-#NlS>0->vGGo+*Mb_)<97b+(m0Cj&RU+3Bb=$j`?Cv=O%T({`x zXfOz!2LFX;ruI3F-0Hc1Cnn-{SD*(TB%Ja8rtdaepvtCG%;Q*@lYq~4)6AZoo$WYO zLv|L(;LItX6b$8=M53aj+Xnd{j!D^LkavxKg}K8SZz$pFb3!OeKc#%Y%|d%?J?P|U z|AxrZ=m+-$a1x@9=1MP$19)^cWQ*cm61iy$-I1p!fhu-HOl9zE4@&1ZaZ>QcC*0)r z{`vSl_|I50&X*8LONN;mv5DEBorUx((Gf%hdiqQwY&pp(h*)`8j_1r<#|3YC*)cO^ z7BsRdKYd4XT`|et6zH#?sOsrUjss9II``!tAD&-&P6nE3B?kY){*Z#EFTZSVr__kv z)tXUShx0DzK?xHL|YTA=9Oy}-k9-R^r@5J z$M=ELbYEh=A<)fl?bRs!JKqJ9<(caErcF}z?C<<#J6J7%KC`ZYi!LX~wv}XaWcs{? z&8bbH61YiOAS^c6Pw3v}(ahnkVQdizOH~w5WfxIFb0=$02-!fi#=e~OOY@B4!0n+L ze&}(0E3BLyfsE2gTRC-aX*vjI@E6gy7k=8QrdC8bKJQ$*GP3kO|9r=&)yL_3FJ|Gxs|Z zwCdZ-?I{B`Rbv+(n5mrPb3N=?X>B2Sf2HP)nb<@>kAheWGr&;>W9fbh4Q(Sg1B%s6 z@YzjLIPWrVX_cv&G<=^k-_evyUQG6 z9uzcRU7JeTMf>xZc-_zvNpS6j-Ml-(0EbDEK+hm&;f+DP!m&s3t~)yz;$zsI*a)e% zn4@3S+`?TE(YKS7t0gp!h-<#nC`vJ%aQR{;}?o-fJ!`)V;zQjcr zp*1(43mZoF;iZ4SeDu#!jL+*Ft0|F$+gDJxulKz z^%w5?L%*t)`zCjIS?V7Hi$W9qh}S;@j{dCz<|2NL2scqS`1^p+?DdW0*s@kx+6MB1nSsRJm~`~ zF>-Tel?>6${Vctu=^~bhcG+|j0v6z7?Gq@k?XOyt*CwJR5W@9LflFnNUscsAlvLdY zw#o%+7*;1>_<=>Uf=tlN8t6jSh8l(+gkhCB_RDN=7!6gu-A+e&`|5k4Kg|hlI;Vj@ zkkr_$*hwOA?<_H|FRDfGafn{M@KUXEG_C!kpvwZ2hpw(h)yYphA~i{{y=lH`iLLWo zX555DCTj`fyPf7Tb7bds;soj*lY%RbQ~l|9NlemA9`R;+1didUb@bk+xx-Ub8=bG3 zJqs^Z6mTO(sOw}ce~-4%9}osSBKEfwu7Y_TANN^>wWQ)caRc(^=3b~Uxm&nAKqz^< z^5YASU7`&&@GD2OKqzq^_Gx3D5TvU@ZNcY2(zJ$9FYOJSc|)b^6>}U7K4%j<*8o5X zX4Kk4s-kqz52Xh-=6fH7X#w9I9iJlxV=SAIY4mo*DVlZnCeR0c&W7>QgE=p*z zH?Yrj>ndpqte(=;@J5Uech~a*bH4Pohs&*zb@-ZrPMOA=3UqtSH@qT6TPS{~j{sfD zS!}Wft)z>2tb5Nt7%fR6+;A}bhWs`P_j$4}I9Ym_Ek6UT)U?U>TTLeewD`MXvtAn? z1vFi$FzNsAjRyp7b-S6K1x20wE#_ABz1?hL5E&l6s~)wz(K6Q-7z+^WIe68Czjcqc*9}7e$_5)QeWsLi^2w&CMkr$!^FAtPIvF(9&Ww2qjob2|| z@tad2t%nG_~A^M}*pB7fuOxpfP`z@Yy% zk{hd1TJRiyrSD}RX@!{<&5;l+gVPff()agwY8_VqaG8)|jJ>u-ttCr^`Hk4ddKRP! zDv}ClOh>?xEhK4>LW-BmH%nod(E*s2LVZWLij+Q7lLu( zl`lD46*;xD%Cd|!3aR9_Y@mxLlRwF6Nat}l;wrAhdGxQttB?*dQ28PHqu?Hrm!$YP z@;*x>BlDZWJ!b=~eR@+; z4QrV2=B6;y!&e^Y!e1%oI;&^<132U>uC_E^sYw^KeF+}5&ERwK8jpU0)xen(cnHBP znJp6*aa2$Oi9J-a*F>}0WJhA7(==EdlS{7plYez59dyRY?#UMc1747gi_^_MpO3Sa zDVp88NukjFpONUnbIu~@L>-P^rwJ^-h^xC@9mH6Jn421BWk6Cp)4YZXoJ`CkO)EEp zaKWW1iqDu`F*Y6-mpI^wFcuBhXKzk_<(|_0Pf5umkI8#yJur^qfV3I2=rK z+M1fsLRvDv{ZWb}B0?v7`;zb7E{>h~@MlAee2$TWAC2Q)-h9|vNIxUY=r@-A=GpC; z-pG8RXJCzii_ooh?M482Z7&17A1AV2&F=nkl9EjDwW&7h3}>%=U|95CuB8gTPEi;p zm^*FaNGJuCCpKu@%%CC*%dMsz5W>#<%{}uBjoh2!-)aFxN}S!-K%RVgQeiX$Ir>J{cOtrM^=tcP zAfNUY07&S<(b{;q>~_O7ZVn9~oUyFD5pqx|7LS^3FM~t@jv|AeNJQT?w|(;d9W|4$cg6j^ExPW|1uN;fNpCVx5sM`EP29Tq;Mf?*M|*QHP>&q z<(=6%GW2S^1~-w@G~k1dhI^zpr{|%E-2JDl%qh2=Db#&$9J?mHBa<04)F9(!C~^Fd zh!}foI;3Whh!0GY=c>=nB8f>^EK5JbVk4)X8#QhxaJwtWBVPno$Itmh$murFiP#Se zuAYAqifvHgH+n0AvN1+4?^Cu{qegYbGFd>x_5cAFp`CulYY{x9)dr!ngAsg?|2*p? z%3Fpqo_SXif>Vs4IFul31VxLrlvn$1cl=a};#|u230ZVHBYchA_U_}nUwIaBk$?}e z%OR&U?9Gn#%$5FfoArCg6`QvZCo+eI8n?@nP@}LRwnegyZ<|G{Q4P`NL+2GZmQ`Sd z_@Y{l^$W|koWDXkc|Ue$|AT-_jix9si*}r6!3D5~9i=SaZCQr!*-1kine+%+e|-r$4f8X0C@J4rq({(*I}kIJx_ zBvz=6WkhvD5OU`uH*kd1FImy6=EosJK19!W*77^}IXLGa>9l zi-CQrl!U#2m-LSO*?^hM0M=H;1+dQ)Yq;q!yThtXd)C=!d5Yc$uUEPIorAdrYh9BI z>y&|W{N`_u9>1`96IZ=lJJ|^cY}Ui&1(xTGBXNmjG#WqYoOUTC_G2eFE=Wf< z(*ILhEPpt2M5zYU#AtiNHwI{se5eJIzM6=u(WKd86TJh?;!$7da_{S%Ii9a03lLib27PmkW5ZVwDlm)t$l0M-VI z0^k;I87;@ObaZz|?wt@qyF%UlyzKVIbb#FNyY4o^JS`O7O8{)=;YWY1&_j6X0XB}l z(ajSXmRL;d_l?jhR*@ODSIAz0sfs-dG;a9*a9#g9eHZZJVYnB;C{Z*@{UQd@SCnJ2 zrYN0c)(o`yG!$Tr0GR2pAR4q3rM_C8g;^pVEIxI4Pei%Y1m_O8z2<|kxPZh;YYs~< z3CXv@lF`$hB~6`|93E?00a0QkQWs$Z)O**sCG|{1-ft6$t1ru*^PXIhMswC%Ppujb zyK`K0u$L(x-;V-o42`tg%Jfao`z24U7-=UGcgbF zdUF}L2I42l{Fx~KZMQD{V&r{MufqCVlV=YE&0qFf4Mg4kfge!2>*AxYw?$y*eFeg%opV8Y7F>2w6nI_AsNqY{qmnY=$O)6*q$h=*FP}Qk zMAY8g*hiX{@Vj&!(DRc+X~JZ};r?*!$)u$)_#-MQy&PfTH6FUAMQLOuxA#fk;LjK zP=1(v2um8L>Y%lH*e1F)iI&0U|EVOZfF05D-%fRuB51 z54TqwIy8MYSX<=YwfXa1KUk%>G zRm61d$;{Q&zar!mUySaxGrNU|pP4e%AF9l$;S4N#wv%&QVN(YvTn{dTZT0HkWG5zm zY2ZXpdiq`0i!-EYCAv-@{JM<(9nYrJOdO!5(+>E$)uTFOy!0^ADW@S~`}FF%)5Ft~ z1wdFi8CcF8-+yfM%wQN#o)hx(^SA0zl*!G;e0w4_FGLquY9BN}02J0)LwOcy+dXo+ zya>YIP^GkAHluANc&K#33FZ$}2DP-vGjZOm(V0|_w&p#)l~g^UoU|CN{F@u$f&S0q z`v$i}`+#^t+?v?BopRlJkm`*A<);VCz}?UlPgO~5mD*k&A72gBLohmey?6}0I_8mE z0l3WIU`l6wb0}vq4|Iu$4T_9R&>fU6-CbMzb25LZSZb&@YGaS7G=F4|PTPCC``W}} zsEScU0Xe6ZYPR`1jj_n2O0M6jwS(Y=e|?{lZ6aY_%bY-JhcSL&aV{{rcUxvMP4($= zd##9b+UN&%-dY@6sy)m_NQ)x^V8L2&@R+1&%K*Omg2vh)tS(D#@GA;_QbNT0UNA|8 zR)fbio1|i4#d2Zb5kMOhOhpP8Y-1+`%d-`fzK74+42#>i6sr2VNv12frpdQo^Vjlu zhVHm*mmvMEy_OJ)PH*ILegIA0OK7GB^8!p6K*;*ybMvy+^)jsu+*eYuLEgvSnG$BX zM2NHNaEhoz;588vj7E^>g4+12@b%WFzaw`n#`pcM`m58?zoRmDi{lTy(?{?R(>qLU zuhT1F^r9BXTLsqQI(NswAk3uB1H^gFXI$$+F0;2nAPn` zhBVz2nP#%Wf8EVxS94i|+!|Xcj_qrHgN`oQ{6%koZxLq>8i)Jm|mg#!7X?p>slQ*4mMU?5mmg@Oy*U~D)8?}Y{qSFK}RP`(Qx3htm#x#QGu(c-W@z)j*moMi?sGI?}wgL20@7N)gW9FvulVK;s7qlVmh4y)EGxw(S$+Cqix(#vY?j zun04?{+M*k@3J!{ZPUR16~Dw&xAKe57Hc5By=MN|w6rh@JjZ3)Z(ryfr*HOFMfG`etsbx zh*Gn_*J0&9N7{jt33&Te!+t-++%Qyg`O;reoR1pF%lh@6TPfKhWd;V)pnVxMxp^C_ zBTAb;KJGt`x!;#<8)#H|-uzWR?8zWdnLcLG_vLq$9s>HRfIUUX&!0?)ee?e=JCX>u z%Wr^QZ`N*-`;(AL~)7 zZ-(KCq1@b{xV#c|s{4r29a4~< zuk;I)=Z$I=Xv1eJwa&q2+Zs`B zSU1kLNowipU$8;Pb9c-eWgKQd=XH3bFND*)&M;VLuJ!4YF3eZ1PvF~3&`2aa5v9jm ztm^Oj9CK~wDl(Dbh;K<9<-YI=us7$FPD&6Eq$;!A>yo%o!lRa?J`nV5Y9NneS|jmX zGYVa8*IrNbdEvrh_GhC2FVnj;-_O0Q+y@!UYu$6R?v6T87-k1epwffmKYYSlHvGDG z0rb1D&Q*Pf%<1!w7aC6aG^s@tAq0auV?$550Wj{u(UFHd7sCV83nHrzD_Rm~j(0pa zGbVX1@C;d1mN_xfv^E>t8&0*=m}s3svoSG1Q7)P9TdsVHuv{cR{w%^@Jih*Kb$~Ch zRWr|2zotKKt*%L6!-jBT|Wj$ItGXCytvC>~ryKK}L_<(~fy%L-gs#!v>Era##aQ`jI3snN?#1lX=+yiK;s9jqp-ikWaVj` zmsjJ@+|Dw$uU=oNoidaxJ}C88rShet=B3b*sTeRKDH(l0!db?9m$^(h-I%apnn z-Yzt{fB(KpDLm1?>!d;e(V>U5wmN_9YP|A)!rdnan=<8!8+#pq91?6DbF?E~y0d8~ z+;z!1%x>&DkaXK`=;iJ>OiqSH>7zZoq$i|ntI%41!qDG5&L1Pbps4D|NLsm&s9Uu{Uvkv6#agW zr$3xdhD7=!aCrrJfzG@`*Ux|pcanQFtqMYWl)1DW{jd)Vc%WKAWS-11yFxXE2m4*( zE`;{k(%z)UWz z(^b_immeb9@b2lMH$f0j%#6r{M37y7sjqpiuCRF;O5?_*1V-$44;P(e<4vgWal~x0-QJU}|x3VbsSU+&Zo@>a|VA;TG)_SnQt& zpM30hLi^r@crp~^>~hEc8w!p6XZY<2_^TDu?-$k`kN7!Q-^E+G;+Q@919sFy$U@Hu z!RO}0Xe5{mR1box{=lIDPh8yCKLR)_ePQUrJyN)fG{2cSwrX7(zGn~2y#BNW&hd{D zm$06M4D0I;g8Mkf{bE~Rg&Zn#k?&CnSL6tfTN!m}bKc8DtY^)I^AJD6WP{#{;=S(z zmq~JaO+XGFh4^GR(HZq3_S(wIGNfOgV|4#LzDcgdWDG#9m6qU3+E!lJOx`ttkbYfc z0vmTV2h`zmHlt~G-`+L@)vsik=uyp80w4bc1*Dd)P@5A$3_%z6{Cx-8-M-EWnf`t- zc{iV3;2Cbisp2L!C(2J(wOigHe>w7S=1=OnByE0U!?0GdcWINpNv-QTIH1<`d0j^W z`W}-%SBuIj5Fk;1Go@%ZX-j{9CyX!f>w(^-Bqv^L9ewG61|f$GZklkA-IePaV`?q6 zCcpe}+)BngS&1sRf$I*>JYk}lZMOEON5^|AlmMa7eOz_x6kVM^mCfCAu^2P9F< z%G_QJmz!1+CPLH;bKLlo;9SP7f;=^t+o4cm+tHt%JukyT@uWq<8;VM7 zKJ6XqA!`&fGKS|mwqu0)Y#Z|EyJr}uwcN|S{;QlIF%{KS|8)fh7ST>P*Ry1}M9);j zl$bZ+vmiS0^RxQCz=cnU0#oJ`>m1+4(WG39YC1DdCU;EttXEO)6%aOT*$1l$vZGVxo1WK1;-jpH!IpT&?>bxeoQ?V26;uop^G{pWQ6D6|T zd9_O(E2m)O*!B5(QQkMdCcoFq{8|CzA^b z;WYjK?uu2h!sso(KQhYctBOtV9uAG9g3vitOI~+TZ-AmlaZ^NojMNlyu@YJBEJKnQ z(Sug^G(3F4Di`kq5$)ISy8BPfW0!N@@6(nmGGr@00^gDwtS-CV_F_W+N1hw|Gjni75u^whDS}Ckf3m8#U8T2)W+dRKH&hK~UVwbPMLC3%vhYMK8&w5lF{!2fa|B%J_TkwVn z<8pA8FGem2=6vfnofN5|`g^Z0FWegB)-?#=ZTyG(G5P>=+gycXzNUMho?oXPvwLuG zu)ocR7j_cYSRujz!!vMq2-&Pre2+IjCsqRM0d&l8bvt6;Gw|Oo&Mo@!geY+LT4tbv6*vvL;ZP7xaNAE7$^2p_FF4ds*V(!Mh*?9$$uYdA~3v z4btPe@&U!(M<ZLjFRM0_NaWpyocsT0*^u&l59j!{dBYKHe)J z8KAZUJYq$~X)7?b4o#5xE6-LPFVi2QzvP&pa0K}Zlb&lTXTo;-d^H4%BqVV4Er-z@ zTrV#=SFcB@SW_9k|GvB#l6*XLeDaW0zS#Jh^{!{lz}XwQwxsO9Z=l6#e&jQI8#{_P zZo6FuftwQ)eIjpeS(Ih1{VQ9YL35@^$|!#n3Z=i!AV14Rl&_64JZVnkIV7!D8Oh zq>B7~d!I=JRUM%s`=%>O2G2zErz`a#wXWxA#hH8?Uj{C#`d(dagjXS^l&&=~hK~6a zoQ{}qLcJ_mO#>J(fx~*}uDr=JN1VBbA5o_jT!f)N#Jns+l&=)b()K`}SdPr3TpM&j zs0!akk72m^1Ea)pFD;Nup+4Nih|n1Fw1!$Gm94(;D`+&O%&S0)qc9S;5eU5dIY9DE z6e%nWO^yxgUZCkXTCe3&atET;)0vr|eNH?1sFKMUv&@Q*26JurvN4dG7EqB+N_lUg zS`onvo@%RnTFB;e=Y-)q(LVMG&Ig9nYFhODXP(m!&%40MUtZ{%iW^jp5*U`>_t ze@!MwbC8)m2zcI`)||P)m5QmlFwxX7VFQ_@3)-ed8HM65Qjf%4P!v>3iZx`^ibu{T zfKaHEshWxL#AzafnX2Q@f6r*_o+g`CUxh!PXC!>dvb^07LGvNTd;XP4sxR8fUBu>*A!s~~B z`?kaD-=E1Thv?(o{tO2j>P<<=IS|If zj|O^0iMrtXz)@+fJVLyIGzb96$-Ny@h6$0zw@iBA` zIZn{$LcS$map(Gqnl5$sTlLg~R6Hr(4g7mfUs}AJRQ2$T79oq*9T`8gSmXXrY4Nb! z{A|mSwltz=W7hxQ<#q2lA4fjdMo}N=CBdzVA!5;MEM>E zswaA=t)##JP1wEU%&HznL*oFetuTPOcWbMuy4u;+*4EKY`$TPDD`?%zPW&Oq!6-UU zp1VXpmK6QDM^JqP(R>*x zW4?StqZjfP|844ph29-0(fBM{q#pG4!&_akDnNzf}7OTID=# zxU38WFqt9>W$^;q-i1#slX>DedWvjPS?>AvnO~PEb6j(KZG+b3%dF*xv|pP*X<E=Pq(tOKMTe)U_l6Dp!eI>#5g!v1XL_OFld0n zAUb+tUiNM8H#W1lSyQSi$ftnEQzVIm z-BjcxRDqDY2|Wut5-SZ- z7V6xac`X$$75|Bt1UmyBM2{_A(9Vk`9K@75 zE(m!0{e!CpsP*IBSI2cy4*mfi0G;JlRAC@PFkq>QZnY%>wN8z7T_^w6$0v^OVmKAp2>|1A6p)`aWqisl& zhd*mvym+IrqiB^Z`DMAzOBo`??jp2f<^QUFJV#^Mq4V?eGTQ)iusmx(VS|2nv9ZAD z>UtUrvz4B@Q+PxKQH6T@0}O+~sBt7zwb;;n0TsrS^o)He09HPK-US5GR^V7xNC!O( z=HCWHU_x7TM1(%#J5|p&JsmFc7H?tI2`o>EsJ*{`O#-11bWJc9H1MkmA8l?mYX3NN z!A^!-zVH7*aiuakk(w8T74<`(0RbpwJuB;=!43!`8-oB^$11a)B*6-x^f-Dtr3O0X z8d2|9&lZ^OYWgZBI^w>sC1dg??zx%?Wv~+4;CFqY1GjC^;J2YydS_mRcZk}W3g(5r zRs(vbBCZxM_Rxn39!n%aXk~^KQxhI^6ROf6XE$Hznij--9>}B%A1Q1ADfCie;oya^ z8zpmZ1tuxsT^$|lA6v>?E`UC$lwV;>A0!o);WwPsIWk$JIZ{$X+|kChMvCX<`d}V~ zjYm%g5Ne zJ@a+0+RHM9>2L;vibh7}b@~ZFSKSgmhREiY=KNh<-)}02q>4DFgEhd@hR*|C5~m|+ zuUbZbf4+f+JW$Sk|fw?lav?-Z!$?tI?S=c(^2f>T38RAkicOBr>fm;aBObj)@*!2sFC8O!>G;ME|+z}e$i zqsRjIv1!Nq`@pa`Z;9J&xd88jMZGMW?*^O`5lS2S(x(Hl2X9UX?wkTHnzQ6d`$Eie z6NZ>Ez7l`tkp)KI`o0zKSs4-01fh(pF9OiS&kBY_l||_*MJrhbF)`t{-xz@&#>NUK zKN`X6QgwLB)b<+A(`<G2(9iw1S$IseMXf(AaGYz0ps1f*_S`XyA)HA1Dz6<2V2Yc zM62*J1I3)zMQehH?PXbwydF&V$&u*)5N#7D&6#&-XDwpgp&jN^s%O!fO z2=Vp&*8fp--qBS5e;mJrWL_#=qi~CFgk0HVQxTOF*Y4UkWMz*K;+ol`Wsgh7wJ#Z6 z6|StSjElrg*GShEe(&Es{&gJpGv2T9d_JD7O>ItA)*mkuV7d}=^C;R5390|U<_!It zx31AN@^?e8D8ge6hEFornFAFCw3yp^JYD}AFx&1&h83T-Mzj2*ROtiT**5h47;Ath zHtSLR*HnkE()jQs%I+I(MIBUpu9cesh>@GIGID$6f?hqEEkuZxZ2Ku!eC&xUzsn8h z)-Q zjF9d(57?Q#T#i+cF`#PK;)@)k9D~R=pDzP# z8P-5RIM;RZXK4V)5PT`?9ytU+H0sb zN&{aHsDy%m!6o8eW7-0iFGL$3Sm!IFO zFc*4*t#@(pGH)Lffp0e4PtW$$+2ZYo_A7%PZf=H=_DQ10W{fk$|I}V3_e%dVas1Rc zMwx@Kd*UVK$2*~&8OG;7KUfpyN^d3x89M$UV+Y=C zD#TcXczgxR@3<7f7r*xR_Yc)0?EJ0rT(qXvo_fdUeI^WIo6Jb$5Pdfo6IRV)EYFzR znK~nD`1?Ic@`@kns)OV#PnsNd%}=Wjs*SdZVf^;iqYToldFhrk40VG|7e;01&;Iw%(RY(NK~IzdA!t7XrbAj0>nhSU3A*RkXW~ogmfG38dn$C#cSY?+)-Wm;_VU6dS*gVCIkex_j zAJe&Ami@5Mp|BmX415{)PBzs`8;^(P{^hr~hftysBqg*2VxYbtJR&qac;6>?!}<@n z1<%G%DA}U}g!e4a?^5gkbJdw_m!juY) z`X~K6$0hd+U?qN!u_WV=>pmU@)1q8LU-HB0YE#qd$SN1K1*!$L1VWi(2lK5eo6XS+ zL}vR>pPpX)3g0+hu9_;p+d* z8W5&?=H#tf)O$JG&NXKp(jjct@3y0fxJ?H2_jKFS!pQjYkgL{~X!ybRt@9A_>?~KV zxsQ~Y&mDt{8w3jD{r!^N^@53O zO=`~Z^V)xeU00vCb&q>(P4uTkY**P$RLTP^(GuAJTPbpb;5znAI$0^Z(OJh6pIPs8 zq2dFl&L^0|9WhB7w-$`hZ5f~_@c#lZJmQm;{0$z+z6i7>Hi*>@!y|og z|4sUze8t5)M`}1ZIT1QnOj)1cNp`rY zWhe7b!2$$E9Y%&ks`nPO2ZBg*Xu*FHVi3c8V8IO%k{Hd=R|Pl0+I}EWw0j>j7`7x*skDgjVJ^A z83xAh=0lZshcc=Pcp6Dgi=wGkRoHPPoAK5&I!s;ttiflC_e>CI{FZRslP}i z7A}&+foi?89rk;G?>fOtqUIO zKeHxM=8le=Q|{fvk@iD(h{?Ty-!?YZJ4L zRG(zT@j>nJ@133XI6<|em6V7p8v@EWfk28W|2ujpb>xO!$G^sd4Un*ui zm=mZv&a*TS)&r7tvx3w*FEQ%03T#X;EI@QF*H`<;=vfL{pM6ta9nIzz97YU!b)D2= z?1Po0tm47+gzFPwyYrh z-)8?J)!QlN@wIn!$>6KLr^EaUzkzRf?=xucA&J|y zwPII;X**(kFWpxOt;CsM?3|)SIkq$)n9eZjY?^VoVQoeXWL+_1Z0I@?R2L;{Y?I@} zcZ#pb=~*yd6wVVK76g`{oSgg1J9;s`Vli6w{{oS&<8^XsP*9km7+vC*Ajns~)BMW} zK(2`z5&^2E&@)6gZ#sD03HLq#A#EJ4}rB5z}wib|_ct~IFIF&XDz!+AlcYabw zacgqZKc|kPJ}G;JeT@;P6!||h6@rx83q_WP8Fn4W&D);P_4v;`zLxfHiSwHNe@N#8 zw`w|WM!_djyczS=!V+RHmJxQDM+quk9%5GE<2*5<2Q;YdS$K3}uk^(PMC&*ypH5gi z={fK1b5j4@o>s^(I}RlLTRKLgJgT<0wZ79U;4FffVhlI2_rx#wuL% zJ)!)ivq97*`8qA#m<5qxP=Q-cN5x}(w$dUK2AIeBR44%Lu<18497O!yFWsk^b3Ge0 zt!HPqq}KAXq;u7|1(gAwTfkmGSJ^&ZA~bsaA+uS>=3Q8oa{xL@!LJ3Xmh&L01Q zngw`dg9FCsqam;GD%lBWT-C1k4#AbsRcr62&K=!`G%n_JG z5ml~14rkRaV5_{S^?fi;4h1}pn3T)@Ly?WdyMB=YT_u4h1$Pb}&ieWKT7c z?GS20TJ}X2r1U!o>?$;Cku8cEtJd!tbqE(-wq7VtYPZ+?F!b~E)eg-JjFn`(IOjs% zc%|@-U{ClvO?a_W?u|6s@lijgo6Ws`lh5Cglj!qVw(VO;#EIMfdOx_AHsjP!ypDIe zXd~_Oj4h-uLN?#ppZJDQJG!i;sPzEt@!^-;=}0KP0ilDQ^{t>S)${)c`C6|5f$&gf zpr;vD`3}2hia}PCAh<7|gzS-!sHRXghjTLay2|P^>Z?2L-Y|m-ky_k2^%!Vuety7` z^#q@xmw2=xy!2FcbE6b(DSd1zm_^cbzJ`A)BmC z4a=nffrrrE6U9}qY~6_av$V;Yru5oqK=rgB!Mk5GctiYGJxOgwZFT3ctiOzGcfPTX zbdjTX;r+vtEdLm$JM+ss1laDu0buN}R?}tV_jW6ByX|r+EI#>WcnI|4pv3+hqKPX3)w4Gxq*YHt%F|jSZ>IL zP+PVTVUB32!q%U((3%oH)YHQw*`(%XP)cTJXFJ*8l z6;OBbD?gEHsYnwMxR&vWt3b_WcMn^~(K?)fJnz0xL4&c_=A&>5zTJA6(tm5h7)klm;;nAfZWFKB*mqiEx=IU!ZOUlq z3@bGIY*@|~8(Dg6sXhAO`SSGLQ!<1VO4>8V4@&nOqXL9EbjmBP2}$6~2L5fWoE0_- zP+%S{ITcmM#F(q_BEdH~B!Kk$S<7-Yra<{UFT=BVQ@a{^DEodp`Bk%&y{1;t?Le_S z=t4mvSrC+p8|!}BjEI>t7SN;{B-rl{!*-V<0FWqRpK2%k$m=3k@M#`Lo-1cjek3N! zq`#q?9D!rx)T95d$c%(AJjP*Z;$JLfv|U^nQww~%Pp{D1z}h1lkw8Sf^>-`K*H_xn z^kyUj|9g>n{M?3~psG+qBbnuGAf-hc(~kW3V>-RD)xSCZ3SPW416(kXYeSbIg_bn& zpDe^<*@K#23;tY}AzE|{KrSX2#YtjxIc0fE8Q(N}YhR5-f8NukhOuIDfw9d?oa7>J zmni;Efy40Z2Z(HC=h-x04sV_H7@-+==jEE!D1FIKk9jy|TJE3YLKU*Ux_zC)6;nrO zgM|rvCQOcr+?hi@gfjLCA>KS@IW%OCnij1HQUA9#c~aT1MT+Jmw$6HTcaOQ*X2}j4 zpeL00wH>pZhTt+B-KSMJqH*SscPV(>;$IeeL^9!k5N4T0cFJRNu@*DC_;pbcqJ`ex zU((G1W^y1AoCAV-O%tq~S?2@^$ljy&=0B%Ov)Bu~>&ze?0`~=*(BDN)^^DORC4y#t zUT!PtN5L6kSLF-5f60uL4qsBvt{|X8{dtyZYYT|ON-jFT3sLc~I57K2T9zn**W-vV z-;jvIKTp&#he(lZfRTCi>UEYA5J4?l+oRNlDcq=z*6%k4WQOIkK|J1be8GyZ%!XD~7#d^cA3WrBq+Yjy50!+NZIwklUF)=!j z;?}36saa2jB$bKc7^shtyWsyxN2kdwut)&wC8V|`BGKYgKdF7XGg{a3xNYE$9vk>7 zLFNys-lTLyVMx5D*LgmfJdzn>`4oJix@hPgp;}mOP#ADM(Nz>$P`=b)$+XfykHNZ(|YTW52b}6?RR`8dG0;=^l+!4;6VQ7dpok6Vp z&CB!4c7G8}Iua!beA^Z}S0t&~!*$F{!5J4fB%D^ zbh4#y{o;e&$wVzl?#3t8>-SQI?K)NIK5YixbrV6MnDGEGI_iEsv;SAXqBQ$hcyj~s z)o}K}(u@_SuxIP}lAnup8Qeu(UF{07wsy%xOPR&P-3<()w)Q$M>vb4y+jDA&CX($J z5kf6k!}6l5>)_Dn3#qjIQKU^)CYzgvrOV1pE=X;8p}z*e8KE9#!+BGD#+`WlY*M?i zftV$O?JK)iuU}d29UX#FAfPQ8i0QHgc)uulyD+vu>oQ~vUm|vv_6?9@(tS|irYjpz z?z3U`QL;mNfKzCL+y#yZa1ueIa4$(B!x!AaV96}FqTN&sP>{&p66^)t>A8yz9#E7I=QBVf21%wZ9a|7KkqjNWz4D30K!XK)pxW`Sn0GJQRMPz=z9IPu3v)OBt)1+49lJp7gIJBnAKb;fnJwXq ziiFeujX2spIn97zDq-F2l%i2r96vP?Z9V|vyWFdh!e|?N-_ra-yMlYehWVP%nhdETeW>O9DzuR^O;-T=Z?Mq*F2-d z)r|NZy0)wGT*X+w_36ePP1LN?jGAOQo65 zRiEgbZbgt_W5|h@ChS4+<(5W~8u6HN;ke+I8@|Yw$Bev1RTHs`uAa66@pr4RlW@3~ zIeVLE27NrjIsO!Uh&EU#S5ZZZx=n7+Xwbi^{YT<1!NLU(gj~8IrajMZL&lqhGToEp zNKHs$nJ+)n0umrkUsAEhe5$tt!#*{M)Hl3w{WHA;s+-BFoOaPXXc}<5rmYvS3J+%M z<|OSRl65XI>`nP?6rdhyG?M(B4R11P%E-pJR`bRol238rklSlS_a|c8Ys|~!XJ7fI zOEyJYzR^F`Wc}TG3T{dJb9^L?c^$qd`}#WH(jWtPbIuqpcFq}~C6~@an!@k_eM7G% zbRjVq@1MK=MH#+Sj~PQC@)~Me zc7RzhXpsEFg2L1ahf7RnRVJMnBVnlcZeDb}PVw$I8rWP-^bbfoq;pnOEN@H_?E2Eq zjyz$Zx$DqIGB?%O(#|d=>3sG2OxurPocu=b(5#J-66FWpm$+>0^TjH1HN5t8&6fa5 z5Kv3Sy%{&QeTDVM<6w&mPo5AbrjTg8XdZ}3MTwaX29dnjGJOx?iuK6vjL`UyL6aFK z@yXTwI@XNs@V(~XVW_F=%#_yYPfE=jH5HDEL2&5ba=RVDOjq+IkZXm1xqI+U$S8oA zDyzjD9v%XK8;JVpL^)T;q5q@+8`6BiV_uZp|5k!EA3slk2a)E zog)j=>j2TxtD2^9QhOso0!jaZ{$fj0a%{owRr?PR3p0K9n!pMV703H)bcB%L_TA`9 zX9-s=kTtW#5|83rd>7pB?vhm~@6V?4YGsg(`Wxz;lRn2q>bSD;YQfi#9$2vxofsWG zhOKvcB<9;#GZ7oyL^o&`=G?mgY(?g(ZY*;=Sy{4~1#uN}_jaTImqg?#Mu(ZT@Ez6w zCv!WYre3Hh1G`>bM{^ZXHvVZ1z2o-G0s{fBBx+8LfInwdAg}Wqz@cQVV zOV_{s;mLu|o$N2FOcQcjoAXHiHlyZZTTj$Xt0Rdl@sYqVa&<8?ZE=uwGdmF&Z% zuFe|rp>abN>dccrH5?<#7no9FN`8{78XTc252*;klM`w3?8T;uX_)3Yw?Ws!X+cl* z(7+3*YTOa6>)+({lzWIihxo$$sjJ)c;jPwx+b-Sf&(6L;k?@TG0Mf9V;b7IdzHT&K z7OMy`_UJf0-e|7vY%|#?YTz8dx%}i*RLVWO4E2`QRzM@mBV4xwzrMKSbLj`@?D^x$ z({=)y;{fM1!We%_|RkiB_ zf-h#M1Dh-Rxy*6ip(pEM%;CA&)_;$VzzpW<;9%ZJifanxC2slXaJN^{o$trOd%?K3 zq#0hFpEAS!V|7hCLke>xA=d8%T@^7sK& zh~B52f7^{sc#!t*-`3;8i$9=R_sJ!=r$k<^-9tQWeeWP$AEAeRd{?LYkZc(wGtqu_ zq;cBgG1OPTV*8RpJiWe@n2X0}V{vjwSDJgw?c$)8fi@u_H#Tk**3e|G)LSNcB;*d# zM9Q+sUV7BG+I(a$9on({cT7PaAvnY4IQ(++*j5T8)_!ZXTI5R8ZF2R~{yja7 zTu+5~NBF{G5!rAFQt*Zol4~IB` zeVEKH;LL~sknIqPm+lXebij#z`NqNzZ`B;`9_A0W`kk zpS6e>N6c=wWg?*iUoD)NPBl^g{qpO)qjS8HdJ$Y0pxa_45A8{t{qY_!8X&ZfOCHQ-Q(f;~C0Xb2XyJXAH~Egw6+~fh9AjAOfgjGUgJ%&z zKa*Yg22S22dB)-IBvV2x#4K6Pm`g9))P_@?Xyx0cKSn>MdN=J zlgAX1Xxz8eKvl8Rmgs!K%Fd2n9821RM|WZhH-(uXS4)T>quzI87FqTv&fgM1VO*Tv z$A5xNtNiMCfNsr`@z*8GR>c#)BlEwU-qQV6S*IXlCinSGS&qIbvI;Jp`&lIpirk2N z13c8L(j0l;9U_1G-(P7afnAd&`kMtRDT;s~WSB44nB538<95?5FTJNkk%xBy-ml~B z94ixD--nUNXZL5Go_pl*I1<3TQ9pRRi<$TbgpAQHisORa+L635%ZEM^Mbgnp>ar45D45xV7PF3#5HB$E zt{Q?BCH(~3DSM_5L9ezk`V|~$;LNY8?E)i;Z6YFI?aHX?IsaQ!&NtJx8on)f|8Dnn zd9G*PaDiS}SbVsmZ8APU+wn7V{DT)Ak6(mOA>69lLt4O(*=`*UfY_26j38#klB2u~ z_c3w-yPO1L?DN)Bf6PateiiT&-Y$v43kz=#EA2WL;Dq~}Z1pS1%B&AxOvWyr30N=& zkw;m7ORG^#3q#JFTUq~M=ifbeY!iYCsTzC)fqZ)I04sH9X6^pXuMk8p z`iu6H@ObY(4o}Ed8`$MwF3%qvX!nXieAZ~|8fV;edwYL>zxr$2Ia&FZwD7hR4>Ci$g72c@p8twd95-JMa>L%vb`TJ} zv_6MFX(`?MKi%+N#6c%co^3I1SJ0h;{Jj}SKM3d62L7H0xdocWX5UnYuV23|w$!^I zPxz*SmhNBiGeYx~c`K~xI$BPW^1~z1SZdz*x$ia51SY9pT^HjPmY1EGH)R0Ax_|`L zHWJf`Fi^-2vdj{Rv1jx5+?c}y2;3}~=Bb4pnEJ>Q$g8_{VgHZ*2ocmGIu(G0S$=KT ze%ntLtw;h=!A=ToG(0@KsQB9nz~6i=vx7RVArJf+HZHCAyEPa4TS~=ZI6HTM7_ra_rRPq0(?3P&=q1Oup)MInrQr0?448ZAR|__SB99}ng|jzA)X$2qK5f$ zu`fxUO3g!BaOa6^dO$Dr6gaJ6>aO!ym0FEMwwvWDm06>8uTZYWeCP>&oN28;d%OCY zXlY+Cj@>6do#No{OXC9t>vm;-Aaht)1$X8ty^l$ykGv28_ zXUmXJ?hk%n;$nZG-+Z`Z~DKW6H^6?ExE{anX3urIaFfNVBZWcP&Rn3Q$3(5et>2 z++Ij4Qfl}V8VPZS4h2ss14&*PQCEKM0+Xvd;pL}Hhpo7A)0`ZJno;>?owX5zK4BuE50f<|`pA2^0 zzj~o@BdzuYa-?_iKD{`;TURJQn-$SFIC^Cb;qbYz=GQ zKtKTTP%vU|p?&UT69|mxMpk#@8Z#EXiC9G{-4%i&M{j!o=k6|NJkUG?PNxsh%h70K zi!|CbE;aQo7z${ta0M?D7V(MLW+YnDO8cIE>-gV4PflonQOp(bZy^p;+DG%asrdVe z`o{VLATp9csC@r`7&Bi;16V{EQq+@;wd3RU#ePm|o0{6q11RS0tY@>O)5Nsns$y~i z`r=MNuXLt$jM9=SC>=H(F=lr->Qmo(^XXn8%y)yDzV-kYKmxUEIvz% zcsB$Xz*!Y3(A1EJ_N^K34sT)2KJvw1k?3E1aMVFB-Xm=H) z*!cF@3}XOi;Ec?s>0&SNi#wI$D!*tS4JLk)*4e=S{{Hw0IS38Lb|jWaan>!e6&hVA zM|lEcf>@B8axa|6mfs(+@n0Z^2!={-S6+KtkjNpA!i-AlxOxb|&%48;0@PxmaoxzW zl|>~Nvs$`HU#oBF?pBbPASyegZysy@I%d9h3fjb@vMYzxan2Kkn-x=~nIn7!y$U_8)gXoTDjYPgPl`EERH9bjpkyzWYV}o#e=+y%bd!>ztLGQ4<&uUk* z$;H8F$S*BsZ)R<_-e20|I-xV^Ec1pe_n0RAH6fIn2DR8s((=G+$6?n|+{vGlJ;9S( z8Y4VH_S}&~$mp5X{KlXAD{Q|^j=f(E)9Nv(ReV1*sg8&gY9tDsG5D`%KviA+q&4Gr zbyo=9pG>Ns5iA7DlhSrQW<$aX-gajVoBXWfQYy}8=ucc!Nu4d8rJ8pn~i2)+nVe-CMd+* zHH|&BvC}r||IkLSkVWGG=|06!iB{F{pee&K@M@(6+3Jg7Cxbc_T^>mjQXw#u#?O_N z6(3uaK&!V@5G)o14}_|Q?f+$b@pMc&W?^TibYkc4-_ekipCCL9uWctzTPqHsFriL{+0vExBFI3*S8Uf3{9^N?d+4f{9BDq-$GC`EE$Y zNV66BT$=PTr~fdvKOQnCxC6O;Cdr?4AA^W3pJISK1t7v0-LQ5yo7P>B1Jv25}jb8tU-m`u-XP_6FS?hr^0 z#b5)&9P8RVM`!#QNGg>s%zR2I4?@PnI!=8cRgR+Gub`54Zh6_Ydfm(gam@oPLW{Cu z*455MK|1M`CHfMjY`C{$FmK`)Q`r0kr4Jo3)FOtSG#ayA^OQ$p~4Mk47nH+K!|$ z*TOP6?=iQl^Qni}l5xYJrd3r{4McHb?wI;4&7XqLUE-L1-&HUSk|?#93328O1)%yp zGS?vl6_#!KD82aP*?cw)*-CA53x<^q$PSBKz`Jhj9@bC`MQ@>MGAThd&$B@qx-RF} zCbYKe)AqMn$W}EqH4AD}@ip1EZqd?k7}{5A%zFevhpO=4m*x3{8I!5q^y`EO-4OKT zw3)tik7E4Sv)Px}yjEbGM-VFwRI{^pjrg~DifcWfz1FrXC^)dKz$k_D)-qpSsarl# z-m5FPx5tWor{Df)$&du@fEU`+{+A@k3ShTmRb|Cs)_G=W1{03ZeiJ0!d(N?S+V zu0ZzYlMuN2N+;Cao*&Ta^OVP9O#X&_El6sMInS^J;-kN%*)-l;uJF_||0Hi46C&=( zXT}#=%p@4)CLE@X%V2x|mIHV(ektoyd4Eav!L(T3K2S5)H*-*(u$6c z;TJ_LN$6EMT(ApW@z0pZ=dC}jS*oH;t}w!!!kU~J^(wiV{i zGal2yl%}vtZz2_`+KY7Oe>0S06zI5wv|J?N@LYDkVo3&i-bvU!LT0)S3~I_!dsjAV zP0B!n;Y!p~U+YN3)8=VJcF?Qea(4Vs$bN8~16>nAd2X*%Q>UiQf{URpwvU%VGaAA` zf8PDPC>{K&S^PPDIQ(;4`ro~!<4$y3e*OiUi2IJu|Eql}!zwh)qRl?_V7G-v7Rstd zttT14@|xBTny7_d7@R8xmp8I7zG}&c*@pb;K<63F>XLpBVE^lUA}LlwmHFtDJRq=` z4t7rEX`PPNPm)b9qpNXQLIq&Gbg-ba~``h8A-NO(@fd z=*<#~YgW{$CXY5LS*$oFl&BgN`Z_Fq9EqagSeZB$)Lg(!`^k=NYqM&9-WCwzsj9ft0tcIaQS_ z;b(jH_1L#vts&ciCs&{Q4$}Al;Ss3MP6GNDS5Ee7PqsG4ZlQ;+hi|Tdn93@ul%cpc z`MYCY>G!p#eCrryk7?-0(NQmD__av_O8{br9M6rst(SE_%p!u(R~Telw7 zdz1k&Lb=V8u_od)Qv97|R*ugL1W5Nf^}!Q{sY85hS_5XZFRdBD-@q+pR$;3TlJk-& zWs55nq<@?zf8u(>9~!We*PhODXE*UfeukM3mH{qv zEqtRRqNPjy^>Fgt=_VygP%guH<y?Xr#$V2b~y2TJnuQ`du79VS}ttz z#fKF?V@`8dqytEz>;2kfh-IX+>j>QZJyAv52YK?hV@P~5j^AMD8c8lh;&_`>m2?0w zxMDeQB!6tj$4aM(#Yy+-2M1jA2^t9%!yKK^SeF{AauQ0sB;%bj$HfjSRQ!{6ADOZOj=1RAzFz)p z}2gM0=${12e^hg~J+our46)`^|2BH0CBItEF~Kr@nxd)hT;Ny#9l zKJp`4ZHSa0TS@d1FBX?&lDSNeyd)u!Z#bspPeZtDPxK(r)Gun#HAtc` z>T~+Y&_y`J*AQ2kmNJ3J9ACU{CjOQwFpFw_+mQ7+6bCG`)DfSTx@j#ID3HpbntQB z?7YU%v6#{`Z{-MX5n_W9W*L-j_r;VF9vMgNKOT!=?@5*oZWd65cWS~fo-F8F`i-qr zOOgP9gp8o|_JVjFSnSE<$PUW7mk0(d$HP(PQEblyYg~VMPJiRhgUyNGc8F8|(adO<;?y&c7lcp-^cz4o& zO!Wv5!c4s$kI51(;mE8dKV?u62iX+4zd@hh7?NJaybXHguOwiC%u*tpPn#;;B=e&| z4~`@CJl>A#;@av*l+F0hEl#i ze9qqdyL3#3UgS84qoz}*KmDs;z zYVUXdS?(Fes~?m+ zxYCpUL6B@c>0 zh}^=<22ygqhsJgX-z6)3uBn`<+omtd2~ zswpRTNeLA}`l`*upoyv+oXzx-F~Qr%2&{t;d-pca_lJI@l8Y9Lp+?Dra<+P&i*5(={vDPE4kZX7vg~QJ&64skO$OVT zE+;ShtaT?$fi0TnKRK?;?^raQE_;t7(HLr*p7SL*S=h+wGF3jD;zomeB&%R z2N9nFA=)C>%m#v72D7lj^`E;G+NIzuP`-w@A3yM!v@CODFH4owmv_)Vh^)oR=Sul8 zyT<#7DXVJ0HH3tO9$a{)S&1X%{Y*I3hmQ1L%6k<26zd^TP0eTKvzC%J99Zo2&ZX_k zz;P5BEcAjH6ZL83$~P9+!!;lM3W-P6b+O|qPWqb!XztfW!|8ydH#K7L=;_5OtO)dFW-Sl<88Q-W$yLDg ze92FBedo}t^Vqq0F6`teU_h2)45`~_@7&NS{@DYUd&uLOm&Mi*aU#qxw+q$-#s3br z`O&klkGJ|aBKEANcO1`Y+`n|`QkK&zij$=|yNT`O_LFaj{~x*fVmiyq%hJ~yf=#!j ztXq*Tac={Ng+^uPULu@r#$*i#>Scw$+1qsnpP^q2%%xON+wG42KE#m#lQDch^aLcT z_UvfdWZ}%$4~Zai$05rp*Pvpct$ILa^=W*d5-=nSN@XW9Uyb`5M?%hCF`bhY-a}HC zd^!gQ7LT+mfYD#?uot-L>9d1IsQ=%J6(e6Kg)|Z#*4lY5wSqhFHwR}D9<$iiSLy+j z?ghTyNds->6HmM@BMFg1 z%BgZFYI4XqQi&-MGG|7F!Ynz=`Bcd%hsfCwG3PnX`B2Hk7#Uj;l2|N_mUH;MKHs1J zT$jrn_I@3n_kBO^;;l)(^eh|Cn$~H0eR}1yYv(NPwkOk&1KcORl%!{t5?YsEhv&*T z%amkW%~!J*#Fr~-@;ldf#p#lp9Ln-p<_LNBGWCn@a=%_h>#JQh-q`J-QT0VN zsniPAL~>A=Z&x4)Gp_P;H7d8?xAUwl737b=Ii5^aeI;2ctQn0(iSk5_#1|fTp16A< z)__^ck)e2)@LDbU+uuvI?>Qjv%MUlE*R)*_(T|hkd<+FNCkZNXX{$eS&$^*Gv-rg- z09=Ulr#DFNE)UbG;En`3aMr6@blf>^%>G9G)TL-;_?^97$u1B##^t1wRilQ zwPNu&54$&qV(DDV?M1A#UiN(DyqtYozY9`8xT3cYkrz~xXq^hux7@TZo&yK_CI>1b zw^@y3?=t%GWOtfMnlnTd%N{_%JG?PJCiN`F;vJ|d$5UMT4rRfO6PDnHGmOA@8d`B01UY1w)*10L1v?SeZC3nJiAuod?2}6Xh48Wv(yjxoOJnw6 zb)L%T7LCa-8fJTQ{utFx_C-{(u|k#^EXElaJD{bVVcWhv*R`)*+uNE4E3FiG%ySnp zQ5rlr9SyVkXkZHub2122DfE zGSfHpSVlV{;S&8PoH_~U8&naz`WF<{Q!YxQ{fSLXcTZcJW?{eLz2D*~w{Re=9OHb6 zu!ucN?F2R3n`wY~DnoOFb5jkx|7{()|(!9<>dM zV&Zc9EGbW;LDQ#yTwxsW8JP4{Gh=zk8izIe7=nJnVcJOUqvT;g8wj@Zq^ ztq^=VF(Y;ywjXR`Pmd2~SR2C&uodYr7=UJpBC9J()x%%JF{q!Y!U9!>nyCIUjc6PE(38RFFPjqce0r2{}d3I?Ag1 zp<$^n$Cj;Hji4np)~d`e!d{!9w;M71SSco*fAuSA5&lPv5oCiKM@ zQ5T03&T|$)j+9{Ark&+ooN2nJYPMvS-BD_B@zn?H$e~f5YeA7G_H%|ep0SEz?8ngL z6oZva+;`p85M~v=Jk@D8`?B~`p$E6`wpH$&;zw`VBKuI+|K7pCBgN{t=#Uzv&)_du zm>|R|@+?kT2p7_vD#@xNC?DHKsm{1qhG>1yE->a|$;TmLl45#5i`-V!TCwi0@>Gh> z;ouwwDqrLZs4{lk-v@N`J;wewJ#$ZK_z{i0q5Y`ks@=ca^Wq18YqN-FA+PWtmbtO< z=x1Qz*Ya=gOk!hUSBV{ z2iJ!>@k+zLo#9@@1`8^1K3 zA7>#tgqUU?hgtt{aEFyuzjGXS|D;SYsAfj^B3^m3@}DMxdJfZhua|6UE`A-#khhVb zW_+^wbU=sTe6FkGN%R9%wO> zuK>pENjgHllo8vEQr)@5Sd`9cMg?@7R=KB;#h07c`%dHnsDh6z8qvHCPy zycX{~K8s1}Ch(7>AQS3dN-7L>e$r=oz0z)7>!>!ou)lvTtGJVy?m`Mq8nB_p-Zk3$ zx4>+W6XlhcLL%(X(J+!c3jE5|j~{;KyJ2OV8+YOTPDvHi^OT}y)?S2rZt`>UJ9N>S zn##W`yUA^pO*7e{W(G=wa=vGcDq=5M}lAfO8#>TqB4 z>VE{z@svrKXi#JCA9FD!Jx5JRXF>~aMIQo+Ol)@AF$28ir!?>CxxvboWTQl}jUjY_ipB3qoYk>wKw3;M#+%$0r`R;O ztL8H896X_*F`U(WP+I5#=etnx$!mD8%pUOD)Do0;frt9 zNNpY#7s$w7d=G(V^S{)O1E>6Ym}`9NkLw2uikrN=_*F*4!4X=QvTO^t%eci&*BCzC zSzZY>8i$3SER9Gm8@b+W9QbchsOMi_?Nfj#F^$|MZi-(AfzF^N(*8zZ&)#C5cNp1S zywV2k@=>&`@kXoTsGouzpW(oGQ%qgvAZxV0{_a`+)V@=|l_Y-VOrdY3PYoVo*QB6Y z#3NZZKEJd-pEBDOd63_gKto!3szoUgY|M{xT*#l^`PSuK!z5@*y;-4}hCwHq4*fef zJT>{?%cSHSe6Ssu%nJIMTBe%P--@eb*K;+&j1Ac#shaN-(Tj#iM6tYKh>PB5=+ryG z+J(ojkSGgFha~LUg>}@`>o3H^8_j>VZ%$sY+c{?SrJ>BN5nI|V{_!(hQ}x|TutEKR z7bd50-So`euvVPeUnlk#ZaLxi=jh&~EMjHwB(#=3$O;Zg)~C%eq_QRLCsBSlaOLrt z$+EL_8pAtK*E^gewenpODIL~|_d3CmBkYi{9e{B+O{_IEL`ttdaiFWvU5!7&Z?sZ8?keU zjPNyM!;D!c)n{bfmf?8Kkr7;Q(Yk%{#j>b+X^E0J%vC<_rBaNDQH9b?%b$GC2%!nT z<%b_Nsm(>GE1>@2M;h#P`uFOI--hy0#CADm@k!Q~C2|h5=G&*gQ&M91Add^M#knNA ziQ+n(8La7337r>a(ddUm+fw1OQgM&(EQNvZF*T}c?3dK`{oB8EteXR}CLvq*J&vxs zO0c0g-a7A#@KdP#b@xpRj^ZiL>Xcveo~bPMEF`@YV>GPenm-Jc)Va=$AFs#U;ENx9 z$#RzvUEn13`U_c$*Ff`xdUUIFp;B4);|@h{`4oj$&CJtg#AcF#IXGAjo?zH}vg;8f zF0NTC-Fi#W(e}E=KM!4*u<|ScBu>wAC5wm~ZOm zj>{adJb1$PmwemfqIjJ}`?IA)^EKb#9czkb z4(qzkJwCxFPlx*smVQ7>M&mp(UWo11_`?&iJ1BO=faol`YH5912cs0kS_*?t^;_nfj zx9Q5J#30*6rLvFG&t@L-$0stoS9WmXVN;@v0b#c9$foRL#{AEm%YJJ1%8ID6GnK(m zZATtW1s#0vd(n|6giM&fY)9jJoP?C`183HsI$&)ObMaSTV93L+q_#{u!$%!BidM3V zf|(~k>VFpBXlViI-Yfh$iyrZ7OG{gS{S$qr8DRD1g|*ItEnBP%O1obJsy&IyoDvfG zCg)+RGuAjbb9nf_`;`&2E$o}!;SYA?=F?LDjq<|QeQjwe;@2}QGTk^O{ZZCc*sDey z?5T!ONbwuqy+(@N^YNdy_dMAlE<=}oWe4Y$p>6v?_23(Tpd26MSpB=@-B_u+|@Dm|m5{;xlA|AmOlgM zt$N-%?jS{9?bSqjJ*LgG40Y9HF%&o13U8|}&A3eH$T1t4V3`F_iiKU<%8r#`fbfcDo6>d0vsY6r6TDSUnDXaC3OJL%N zl1DpG-12Mi8XB?i<@TG%q`*h_KWg4n5K@y-R9Za6DRDW)x~(%)s`5wIc@X=Sg5L(# zj?#2rD?%RAq+cc^I!%&QszbVXAM5%=X2FR;h!8$z`%EIu4R^!n1J+k?*&yF#}sS9atVVpp}|CQUrqBwp|Bul}Y7 z-upc}Lf2Q3IjSP?Blpai>sRPzAA`wF-GWx|Si5<%mruCklw1eWDtVs!DLj44{jLQ? z_K~kD%vYNcRX)S7UoW+sd-I2E7P0NeulyGE)zg5BV*J8nh32DdbK|>nk15^gVusQ5 z{@FaAC*KU?>PZ(37Cn2z*gd@L?tEtVsjK+0R(AX6ge?KTC`@%LSPVAwsVQ|ohrJ(| zNJt;XwL#G*;J@+ys`+UVh~GIGb$fFdApkKnfU34yzZFoC@2T{&FD#ocQOs!Yv{3$Kxsk16^M4B zB3aczf##9x*crqh(Ce$6ulSJ7#*dXnk09DO#RGKh@J13bNTiKJ0Gm$m0KzX#P4ECnwZrBTO`tULD|Vh!>`RyO_xGE zFFjAIGElu-EZ6rlCWc!;3SJ@m1aiaXzE!fHdv4@b-~qs<&51j9Ml5xS_pkkD@BwS? zy2fm0#2%LO$@BU4Gbv@Uioh4R36gCQAwkup%UXqOad)PRO$`ETT=x_KIOAZe=ihZ; zn0@)S*tz0QnD!%?P(CNxuNT0Tp%F-9n7;MOSAvP2;ekAVp;5HL%7Gze@x9-FtfMPo z)}E?TIVY=h@(CA5E{|2!rK6I-NzV)9M|yZJFYsnUPYyRVJ-m; zB@RJQ%9=#yL(^vPFV}bKR6Y5wrre4)caJw2>u_>7Oj#R2t)l6LGwFGwC|lNVlUjy>s0&>>qKK32{H0Sj2}zXz5PE~lF^v4%f?DK z=PD1~nfDsdpAb1$oPUvSlcScNb7C3H95t#ah7=KHQZOV@Nt4E7R-=zI8BNmbS;S30RCE(lb#krl1sis$)#-nI-`P?=(7UX^ndaBY+|vwmV^Ml&B;$+w|WG z@Jav&I)5BBDErGA-RZe-kTaXzTlX%!9$t_I47JXolTU(ZdCkazrht=h}mRU+&bgs+~wRgrUeA({(@Eek442CC3>j6Two%yZ_jaIbqE>)^dbyaq1yD zrXBz|3{z!G#}Oh=85nQ9`3@Ow)Mti0tBD7TvvC5>UPCv=fLchggDIR~E24Ld#*ml( zZWFWMQg!i}BvTrA@#Z1?_>kzEQoCH^v^q6|$yZlLyld;6{@t!)S*>NwMk{ZsYTh~egYqJx-Mb<7L~Xi+4>#jwo-R0OS`83+<@UMrVGqTEGk;Rs4_ zyv{?3?Y_AQH_ZOj^}QCNTW3$tEf_L(=ieJaMqupT6S;+S*+$3Tx*Y5k=?&P{6)zXW zpJKfd%nNit?jmz3%+IfmVlqGUey2&EC}4j7XB@X#cv#To0+TXwefrMga9%e9gC8FK zZ#hPG03={N;|kBxfTqW9QkEi^SADYG{k}kK@pg>G=Fq-`9;~ju%>XxJ4i7+Oy{z~m zjypUsG?4i?^5E}d|Cz3cy&ZsHy{@eZOzIm#AUjnXxi>XG-@EuN@ekNy|ILZ-ukLML z|F;QlT@a@lu`=N7+}hf@l>FvTFpn(}BM`)3NrFraf(L%4=OF4e1YVA}amk@yPKJ!4 z!*4^et==1?Hn$`wA>D9%@@|{XwbLVo*CG&$&}hOD@y;V9b^?C+S^BSx_y5p(!k8~= zJDQWiYY$xww@6Y#krT~N@H3v_e_)u7(6AE_!-_0oj>ywMf}P9D(LhHlkJU)!woKiZ0{V}V_{)hCeg5)$sc*j?(K;|`}H)+elc4)z#&9FcqU zdmyytVp757S3|J5ZzflwWOc&7^RPOq9i_+4qS2>ZZ1|D`&94t@9cq;Hzvhl~ON8p* z+6>v$hf0^WKKf%v?^4vH%JUg|Ru~$eE5S5wIL2^uT&u-q7kP*w>q<&;1-q`$SwCyN zpgJK>nkhvo$Ry#l*38b@E7-m>;r^CLkDbf&j#T2la&G=cmwT46t->gVR+kq zz&{7|Mh(SP8fU>pI0kGE{!C0!y}Macef3qbP49v^>rtVqgvoDucjqKiqbxqNoqTm~Z8@TEsNV}% zJLg4wfual4tpF=bRyDa5JOq96HlpWVrZlay`l!kX`7?T@j-E}vcd*m%cAElU0Nd>X znU*3As8`78&puiQoHJcvYhdUsa~{-@gm3k{vEg z>mD|^0xzuWK<$cH_jI73bIk{1yvY7QKMQ~C$WzDFo#10h-%}+;`+i>MaDT?g{p}A} zPGQ}cEH_D)xN;A>(r^D|sENw-%2tobW;*N~O)X1seyq7h`7HRfJ)j^9m{vD6GXNRD z;~Mhuz@@(}EgCbscEkl`&jkO9=W(69wT?Xf^ME_gE1~?iw*eUR2*Q?48pemXfrvJA zzN>8kp(bcB;t!OA=6Abp4*f(ybHT;YCPTRO)~lGBAL%d4JtlF;i5}qlLuQ-NUQLF| z6N}Y5f(r;f7D~x+kL71_K*$App7kZ#)(}eC#-h@7oH6nAN>=F%(Q8+)Vlh3t`yI@M zVyz%4sr1&HUDIW0oTDPT78!DY;n$Ak+}SG;-o+ig1~G6KJ()v23^5!)+U<6O`AdkKym6A3PjJ(t*jji?*!6in`9EM$hN9XX#=U`E}C{{E73>)9%iVtXc z{NoxMGo=Dbt~}czHOCU=qtD9Cd03+R>~9(P*|v8d3^VqBgPnOuAPg+|n-kXe)Epk> zlcH}-c(A|Zvl)GP5nCz1^9f9>IO_&9E8Qco^`zHZ-==UA(39Rc`S0+y68ZY)w%3p1 zPx2W+WsUXFZH8OzZQ&xh6vyBBYRvX(Xzma0%0Jay7B^Fe?>b(*Z9Z>9sAK zSd8;5Iv=0pXH5N@#Cu_Tez#{o>)>x8$my-EQ@t@|*0nQQNOJ|=&6F~7MYW)vb;8Kej?^V zZ}%6-FL!+Q3JtpI(`DFZ*=U{hr0s8EmnK6$HC|$K5`%k32H4knP>G1PWgO~R%qJ^( zyxAd$pup!X)u?h@ZB67kU6)jDP~!%sU-RVo_Uc>>r=)lC{8+4;NSS`iPZYI#cX_2s zqjs|SXw66|i3r}f{lt%Qo*&_{#ogJOG=$+SI=1)NORUeJX5K|+Od=|eJr%|!C8@-( zN!yIoGx4zfK5@Gcd3%$0vwxmk3-;QHsw}eh)vuADcI&%a=hw(>WfP%u^O&iXosJHn znO%$ub&@xHmKUFifs>n=$|)|=mG5RP9sqpB4`8|6)=Nm~ccJ1@vxjF1Y6T+?yZD5W z6Un))3d|;w`M~6~n1VpAQ=&0oTR)^ZtU9(gQ?yLxr&r`9pf@SOf!FS(OIB5mrvKUY zH)+!{+MQng(>Fc32`d+Cjrf`G2mLcWTr|teR92)v1!3Ue0)?!`4Lv1n+EJ+xdd+OZ zn`-=zYJMvjP?8iIWq`NP|1tgb>(8Vzy4HgN+V%vo!@0-hB+{#5-T&Ir7kGtI|8B&6 z1hCDmOkcpcLd;k~*qY?jaBJ;`5M#4!5UUWZ7O&h59*vwz1>PL_By}uKEZ7V{! zS}7{qkEFSPp=78PoAXlA8V%l1_0>ezhUDc+%r{e_Xm0$4=$8{B?vrf6Fx?1p8 zHf~~GVEdnsUZ$bHmh{?78GK@mYCQHZ5xTsE#l7f`f|d@R5|z?ZgKj@fKZcdo&N`>B zd)yP+cFtbyR*VaxrjanZeYvkS6T@l7cVfpge_972s>x-az$KtXgBS#gxuZCW#wYz( zfYf-f%6l-X%9GY}Eb$UHy7f~|i#eaH)XN*OY}b#AA;JyUw}(z-RXnY^h{&Mr<|{_ii`PvVjEHwIe)b!8IkH}i}y)l%Si*4FlX|K|1myURNZ#4 zO;)<>SGE4jrLD7SeX?u44Z@=+VNYb{?HkWMX`m+ZTUtTr$oQWL_Qy zOggBW+Q<%HK<{s$`X?Q2>~Kg z(39{aO|}bdB(lJ~XZ4}aY06>qz%U&cPE}^M6S7a13i&P&gH;L7f#cpq?*0I@;+19W zgUMb^T5dB+w-HFhWx)kI-HL5d2IOW)aTNE-iVVwjjp4@j@Ra`J5Wc}j)V#i0B3Iry zmMX8}7+_>9Ub+`o{;rksh%FF>_Zx8i7&o8^&=$0=w7qnGF37cLAb9iW0ljZA*m-fy z_eP1GX6Z*f=*i-lNz8)#CL%fJ79YECgQOWtKre3mY2Dlz&l=fEc%p=#yrvOHdpQYs z$gXB$!Z~9192X4GMjL5vwu`}(hI7=e{`Q+)mAO?46^u}bf#%Y=o5aRrbpOes6r561 zjwj#d?p|g<=+#Sx@|}+wF>p_LH_IS4k-%vtzpUTIEI9lQKE2rkfEcbtGq+3PuOmc` zRyyUkDVH&WU)_7&{nfQeOx)N1&GFXGc1!Qcfd0)O`p2!jz3C# zdVJ=o`sKqZsNS!!BadS@~@_|z>IVWAzM zM(!+-$x|)&dLo!u@tAKKyAMdV(KRYTjX>}s_M6`GO#-j>zZ?Sb-d=INRizx!Q|;oM zE!T)NE$#V5EmV9&bEIyNdgQ>!;2?U0tyS&qU~XH&1^?Tg`Mmk)=1*G!RaGV~>NKuv z2dOpFJK`^>%@shxeY48ui5ZLhe_R@SGD-_pX7K<*3T)sAu z`N9GzkN^5B;0mq1h`Q7dW+$|4b7&{FoaqE};jZyfq6DDCaHXGtLMxwq0`T?IrFG=dyF>GorK}cHqL-J-ZTVg0IwEP1}aI34F1mllRU-AWuksz{NkvwS#k6^gtLm39}dHdm$kP70FqCQ~0R6 zzH{(hXFe91`dUHvt+pRS8xxO1{X%!Y_Yr$nPOBUcd;a}*^^dGHTs@QwP0oTY_+Kle zNH4-&5@XV0EYPSb*BuOT+kaOd*Bt;-#y=c7$YptKnGx!^brdZ{BL#h`JQpE=bZ7`t&MI4ap)dQs9BfNFRy*G$uKbE*#vheNC9AXCXlMNO(gZ@w_(kPNE*BX_#=0>3J)B-r103f#3e9E4oP zuLdT5vYiUZSCh$tE3*o!`8dk!qhtZT3IE_!*NW&{0{V$BpoApQOe93 z#zD!kGfG_8@wT`nTJgj3`LUQgW_v~0WRW)=|K!Zw;tZ;ZKd#X~s#CI~JLL{$)q zZ=BRo%&JefF#@rAvEQ6OyBkXqqhRCgG~KgrkqFBMgSf}xHcta<9lLEQ>$Q=2s*}ci zun&BA)X}G~Xts=;ble$T>PGlf3yR?&0dZ!NOs5&Yg6PUG`HIkjIp4M3H6mRP+w6@6 z1$lvi_ad2qLEJTcntgqQMR#s&YjL5EPDvYR179 zo#6@`T2kO>6}vH z={vuttr=|Jm|5TjJb*zM89v_)>ZX7p8-Po8sNq!3Z)J-2W)47xC6?E)ln9OG6Z{^b zRY-OF^w~;padG28&n(@y?@E6N{?;YZ%0@WHwbDOSM#Rtd-@{WH(2V&t;XB8lQh%lF zMR*Splv9)lttIJ+KCGkaNq!21zgw$(F5HrDw_W37oZUMzKPIeRHug!9iUKqZ>Sp-$ zrlj+#nl!FrV@E^VS0Rb`sJE6GrvrpzupXIi$xz?Q+x0)CLK-xRJaX%18DImafu0jA zU2EM;o~WD*Hj3rbzdPR93_>~mrqn4~utCjaa!HXfK~&!rFp&WcLgAu-dXe)3Dc>0! zz4D8QF}ZGa?(yf905ICe;G5~84_Df8G%&10j0aQj@{sbwb(}YhtT~LQ!0j7g5j;4M z@}-!Ohf>S!pworrsokf;KXTO$aZNid;^^Jmi*$fO5zObFmn$kMla*`X8FH*8@)s_`UI zRVjq;7xzurvO6W|Y;omj?z(b+6)(85x##xZx%t9_m08Em*3i8j`Kwo6>hQ&BK6@LJ zp?j|J_LsZD5JN{J1!x~QsF1Qe(z zQ_Xr)8`Gn_j+{czS(Mnq-=#jE4A@-n@K&TTQy4w9@I+}TWlG4b2z~hEUv5%cxA;!8 zAL)n?2FVAp;jo0btB>SCqV6!m4i<T`MFNiWCZ!^^>SE%I+xAMPq_ z1?RKqeD(ZD!Ahq2D0qFjNV#^Xl}*s;-`C4aE%m6L2vO-`*~>XOwLJ+VvL9=nKH4-9 zU`rqV=f%y%b;&|0B+{(M4Rjov0-{bF9d2~;TvTPc(X*yfe|w&>a}Y$Qd}*feS!%n&(HCrtRD)~)o0DYzU48u_ zcly!lOsKa}g@ch~G-6QHr3T$I8{4b|!>w>Yu|F}4kW*!;vX3rx1^@1N6nWXe(jNgrDfRBL>8m_;I}tOfXx5D509kRqe8%9`qz}X^~lYjt_kT5gY7lu zE~ly4$yXq*#J`=Yo#_tWeb6#x46Fj=pd7fYW@&8yEoV>;RgD)Y`sj%NMQq4~^Hnf* z)tj1Dg2uE>kqBfB3oGO$zPFC~ZUF&*(Dr^u_2w5Xa8;jbj!U~Q3%y!cg3p9UQ~N?^ zRcrjL`5)1sA z%B4TRrQx<4l=LON;h5J7`yw!IKj7B4S=2vlu66U6aUID5H)C&_%yC>=Z#?icBzgWL z;<wv< zpBuRX{0j%n$er29y}a4Zu)UC

C;kIRhg~gM!vAOC`Sn4{$JCXdp2~=e39X=Y38A zFV6&940n{oQ-vTtSGIFgs;KMSSN4>CJz_li4~a+;e#ZV|xl&)k<((EiAGZL5JbCx> z_zf4#Mn;IZFG*}3a}*{MG#juo%KPq$uHfsYn@$|>uF$L3*h&!Bb-&pqo0|5MIfdh^ z>u;XT=Dgh3ppZrO*je3Y-gEp4ni{uKynBMP3=v9&rb4QJR}>@Hepl5d;U(Y~FIn8E zv)`+%iH_uO+%o^ygc)^}BBX6c1lpl0RfnaoD#~kTAh=ySZ2R)ayt*nYzcZVq zWFyBtHv_>=)6&IsmSJuAsB}aAC@>-(E*+2PIcf~t7hHalx^ubaH4{=}qdYN*uupvh z4GIp_=goj7pt;!eRk4g&L8%<2^NAv-Rg+Rozt~+dK5ntSm{%LQ_99S8A!U=duqO;W ztG&6t$0tYGX{U?2eL(NguRy(7kh9p|noogZu^#!B9MxZ^XVg%{jQ*uG__1r3X(Sr( zg%ox%htlAxF{&yBabPvgnqLz%SCC%UJ4iX$7e83tofXmB+`PBHP>pYj%Ub#;lv6j_m^73m1BGQo?D?QgmewfgSJEZ%`*czPwj zSSkC*qO|2fb1^t>vxFP)8FPUNN>@RIU zntFoWDNdS|0&;i%HW(4%tzQ;$s~`fp`nvV@!SGwI$%v{n=@xqk$vJW{~WPC8d0q2S^7u04=O?L@AU8S^3X0cFzVD zB^V`^`{455n5fRIZh1|s2bSDbR7)5<*|nVBfp&17-Z64XMBgBvZ(GGg6 zbkUUl^DaXTa-`2dWLL--p#dw<-V~^bpalm|q;j!%Ky2l~Tj>S#T<6y7A-LyzPg+6l zn{==3?vX-kh31&SFxAp((|&MP&F-`9AA-8kzdrhz*KGM~>Az~^{UB|y4D8w=?|9^7 z_3W*5FuHfAz;X^Z)Rb0Wg`mi2hZNYn+uwKUMMWnAIzTTePcI+!KkdH^Pz%e-UdWc| z!=mH-kDq+%lIqbpE(`RaxX-F6Aj`9UdQTYRv(FSGs`y`k1lck**iC?J}l!>gMojS*Np=v@wVBOeveL!;&RH&U7`12 zwJ|AUdPnYpo5G?-I7pQB@)mJcddq@lIzyh-cU0M-DZ{r%KnwEkXWSrxP(u+$I-tco zKMFS_dqN>U!sjs~PN@-pdo868(`ySq4z+>zpXUWbuEz+BctVqDzt$O9j zdP3|3n*HN1%=%2#i90A_=_P9!J-Vpn<09FTW|aRIEHx4dTq=_RDwBI<&Kf;(7M#aFf$aiZ2(bH#~|KkZD`Urohz zd-xwiAfFifl140h0Uq1T)W7D0$R{YovhShj{IO&MxxVboVAiO#hy>O3wuk5pN4{ z5>3>CifvgTB*q6;&}{9+WC}>8n!hcu(W^Xq!U=Lacz?cUpMJ2>vsVkuXc~cm5Jg^% zj&6FcdJyO^Cj`-+s>u+x=X+zxOPvbDEJeW63EL&s>dn8jP;l&08(LpmBKPbOBM-p0 zp{74neflxl>0@~>+uKVjhpxEX>+TL+`%bWBRfbeB_J&c^fB@TDI(J`x%S^Zh;eHJ- z2gaWA>tdD79tClbH^-KPkWJ041}xF?e$M9dl`{Sp70Vmu!F9>cI##M771P|$uytd~ z@qBu0!qR3aQTRM1X!=vUm%69q*MI)kzu;IPCCibL6@?*ymSdWt+5hkz6ycV}@%l_X z+!ygcCztk|WeNuxOl|HXsM)f5BwGk??6*Xpm~08Q$jiTK&>3!>iBQ7cew|p1+D&;Q zlBVagg}g)(VfEs8@=5>lpPl_M~}uT zy;kd(j*4Ncw*U~$uV0s>sU27=z{Z~2-+zIXABR(0m(#x5y!qNEq}ZO9cL?HsSKQnYbw2d}9BzT<3iqQBK7>WziK0^zJN^gcohK6eVijld@0JqF} zH|>1VRGeCLno)(Da7^uN_}<1!-t;2_&3qFflr7fdCYaT_)@(0NTa%k`NVX@~6^liQ z`*1^Wv|96x_tAgSykgNT80Mb)`p*;Mg0EISh)8=%>8^@uRU^+#^BSF?R}=2Byls$+ z9$!DLLPLx*FD1Lw{ObB|vKctLCm&tchSh=S^!wu}#0A!@m2HaPH$ihlC26Mz;+>1D z>c&p|hho~a7U*I16%EI+X}l9wy4jdug&yX>lCpjQc2%DMk0q zr3UBzrz@?3t(n-{8%|nhmhsLngoOqkT^+VwK{EIyN>xYv^08}6b8Ef*DMani*}v1j zr|xx!?k*;K>9c%*!y^D=?%yhC4*TcUdFo#0nfCnc{f+rHoz zkSy{mDCJU#5-yorlu?zW0pSjGYAgkf*>`g3!4p~k{HnT$zeA3T-rY-nDad;kf`4qt zDcF+Ynsl@Vz(4eg(T0dmxueZV^|C^aG-6N%nV3}zYNwu*mASaxlwui`t(uA}1Njl- zrardsD=R4Ib0m*sG}ggIDDegQyqm|v6UHQ}5qaOs23$28TJMJGvjAAf;uvVN|wA%6LoP!-|&wBVKc((a+ zJ#u|X^7zt|JWqfr`6L^Jg4Tyg8L{Uef#-w%=-3hdY%ITgg3W`1#V-`+2xrg@4So#lj-szJUpt?)d!p_T z6X&P?@KPM02{~M3Wj2%uzF2&AT(d>rwNKb+U}y{tj`;*(RN78AH7gK9C@U+otaWp! z&A$r;_=t?tneFk=xC=sfcJB|^R&4h7c3h0RtQyw6Mh#%S=PxJ7v}JMS9cfAU2>h|q z4^Bp5UfldrLrgQWDte+9lI+Sg!~b4;q-@?CVePzVCk#7G$fY6@l&7=XoO*IKI&vT72v-)?^GZ{Lw> zjBmX3LkQm-+SZrk(-rl@yE_A9kfzMY?G=rle-kahFtWeD z+v7-ZYdPwg2wE_2j|=zhMNl_q4nv#<@IU0-Y{{bLSAMax4C_j|vb>YO7mKXWPjLx4 zGwQMV>@DQOd$tSjmx<=}or_*5lIm8ix@l85u!A7eEWh4bc8JU^yyuF{W;GY z6Av~QUI+n<32;&%elr+lKDttjXE&-#6X$J^O4!Jd5cu0*HzgRD_k%CG`>W_!{v&UY zH-_JV-aJ>`=FSmf;<0$Y=5Q)dUe|lyBK%|tq6!(@&%tOSEO_hB0+TjRJ_`Y0)vsDf z^JMaI$P6gSI2M~e);VO#dy8%L9j^l~Yc5S0AVzP_wa@ZMUM>ay<)9pJR5PU=m&8l{ zMG5783wO_x>4U{!eMI|#d%JADONDJ}|qX*b_rh@Z5jLtGs*?JDaZIJg)>x191V*QN!m*;FC*HyLR0 zH|KM`{*R+`k7xRQ|M-Y}9*IrF=}KmA*JJj&kh`@XK%>-oHYt&07f0?9QY_VdCB4U^}Z1F^y; zu!R^6=J&7{rRRZx>#CIqVH9kVdV`hC}cHqS0@r8s!VX^RU20 zDA|Rq&V;X%7#l!H%7`!_KiQhv>V>0b8a~Y{7L<9i!V+7r@P4GfopDa2IMJSls#v|O zE3X3lq3-VybT)7Ek4&fIPp;fr%ccrpPeTgpd$fsokqn%^nE}yfU4%{EOjP$W9=bM@t>+^?- zou+Kl-s)Nt*MfGoktStst^ICNwfO;dGwR7zNpI1pN^MQy-@VDck!ejHp5JSh-3j|s zj|Bo~KI4m(B48aeS9tcHZJ_*Y?sLF?%`UZgH%JnVr3t z!B70ec+>Y~Uv)aD1}ZD!)_BjlrdbGt=eeZ5H|^HbM#qS;G{x;=@4w_Yfxm6F8!~vf z>aBk5+-r4uwM(oRlvwB7wCH{Z)mQRo!9IrDNMJYHJ2)ud+m(0C0TD62*@}0uu`&-7 zm5$+ks*_UDHpwF{YtMX^>ct&5id%%LO&Mp-edHc!m=AEsU6OBidzY-4mHY zE;mpCS4QfXU4KL=Lmkp^%ny3h3&4P2pwmgHbWF^9Ie|+|$gxRODUjZ@RYaOVo`i^^ zKM3jAQw2pw)5F9ZWyI-h5Za`B?3M_$+sd{=@xp0N4+f0yS--X@O`hY9c9#I3Y}Mys zOmya%R!Df2XSM;n$^c<)h2-Q8d&LHkvEn=av+|+aXgIWaZ^h&2UuP(eTvhZR;TB4q zahAnKz4M7i7O2tZgYGV(&buE$0NR4o-no6>ztJ9{>r3;p@%#>4%Y*;6f9ch>wA>s0 zv(m9L#|Vx|UZ1ONPvm;X%J-RMS|+Kr%v{|KH^l(~~5)r_PTQTT+mn@6ni7UGZh)q9+xrKWrz@ z{o6}jp8`m&jYw4-sy3p9gE>Bd zq~`c}Sh`qV$)d!{JSD&Uqrl7`zu}uMwCYn^r+ijl7SB22>W_Rc(kvR>MC5-T$DYvE zFHAtk5BiU)eAVcEV~Fswop$|f*%Ww7ya1(7u1h#`T1b)CS~=1dK)C}4Z?<4zXRV^KbPq(6~khcq&IC+j7nq^!fvX3{C1S@5|eFq+N=wt%sQRykZ6eN z?0?rU+CEu?_Pr;xn4Q!o=*b9@0Yq9QLyQGxtbP}}@%k?XH~)k;M785-MEI7a@$<)oL0 zop=;EKaxsyvfjDnQz9un&(dT*QZ<|kVpLA7% zlZaer=b|@U$8iDECh&}y2NP%)j6QByNbk*^)+N0sGOT9-QfpSXrA15;KVO72k1J3v z`dJPFa-qBX-2vA&n#!l#PabUwR1jj!zgh$KPtkO9%&I*!S?Y7=zPap*n2M2_+>^5P9~`bVLlr zFGh;d_kn=C{|s}I&5!E3hCqPilsc~oNw6Wg+`@58+@&|-Y)A9b!HhkTaMv+-t!+?> zDL59;4GP^FXlc5ml^VMLCQh91x=mK6{Z5|&4z~rxe7Cy?bkI=TdVybX(z&U(`$Y}Q z{U(#OD#<@Hd<^&uN(WkE#p%D(^JKh&-z9=6Fmic(d|WLqxmTQ|Tp&C;a#_%}-P(k- z^rCb|i)Dj6+A{;%3SW9kz7|Q}6{u54c(QN20Kn|lV96xYbX^;8t*tZl+!7|#zsPKq zGXneXi#%-rOPO#Uidq~TZ7@Wy$ggC*FBYb#R>BmP&pvSiWq{}q@F{Z1tUGsum01S` zvJKH8t{a~A^T4N!Gd?#~)P81udFetmwJ53F)yUuxp|G!4vv_3mz43cP+0?Qye2{p| zBj?I06_#sUfG^PC5Qf2yq?~}s0G>(lx8bhK(JIuQqIW#crXClj6?4H{4_xHitIB1s zv$4m9=|}aIw3=Nt=iz~HX1I<1Hu?zlJ_a$`Q=0_5BBG2GGas+#Ox^NwT9Aw zlS0IgY8Rb?-chtcxA@#eNNZOWo8>Z+Wzidx@~QT*#5WwG zTjNBx&^_lk`xXpy%@OJ;vloa?sx75ql?M{AXh zF0Mx^V~KHd&_(}S1s~l8UAJ(>->E0uh_hMIP?$r-S-W-Zte59TCAVVswaxN|*NxrY zJ+Qg8{PXu1_;K?hi`7Y8`zqj_9S0kG(yIi`*)c-@N z(*d$fGh>$H5u4aT*|#n5sc|!|8-9)o12suctqi(4wTG7-QZo}h%>$M5~_=KAxUYt?DB;hCC}fWcmsZfUOfJK?wdaM#?b*N3~+$tJ~P* zkz#opYisgy;6nh3N1Huy7=wzDFhEnUr#<;0axbvy{V^7lWFE6lgJ2~LW*Sqlo`N0u zaA(~4sz){S&ZH>erU=ipE5W=~3=;ND9i^Q?r}dwzNj&vt=voe!fY_~Tbe{nzjn zfRUPQNa=5v7JdGv6o8RE;UzK0iKWn{M#*?{r3=#4r_f=fo1K_e9CrD~%ucIqoqO9Y zn6rd8Y8I^6@0hAog(-mPn zM7&w$Ul`L{aYyYiHl~|1#oDNt__7H>XXlX=3w^9kgiX&*&?_(VMGi>VphRnP&llEP z{lATlmO9ef$V~kZ&eQzI4JR~A@NjXZt){(Yj|ukbLdYAnhUe+*e4K(PuBaM9*-o0r zQ`@7o_w6r%vD&zIM_8)8M_Xm|w4kCrAL0R5)ESrELnF^p?SD3G;kQwr^3}D14!^4$ zCb#^C-6{Hx;gE{A`|rs$3cFVH#SqD}C3^aqfrz28$^7hX|4|I9iI=;Dum;LB4@jUk&1 z=rz56vzP0~-AA8-o(~nncr3{dc^avEMzaEnmf75Xf_zXRpYAf2L6sNw((FOp|0pg`~C0A$g4ANqB22%Uw*@u zCN?w|A0215Y!P1(;YGV;QXXqodlGB0uDyQc{0 z)?nw+TOK}$*I&za5yED`*s~pyd~AJoEK@Qn+}Ml&Q^iJ*4S64sakiStbFN{No>0QrAyGxNFTV@K~-;Qc7cOIWx zfgoJ&SyM()kck)`!)F>_t1VCo+mdiHW~RxG=T!`c7VHiq-Jm$8Y@k(d*>v+AqhwtS zxXgwwon2|i6if#dF`w&n$&_%s^)SG>nV7{DmSAIczQrrN8^2b61NAsn8%46}uv{vx zM2JdWp>K%rIc|K7l-LM9A*w*?-FY0JDk0j6G@+;!{+f{#PVVtGuf_o!zk8DdizIen{t2)<+3sOjjS`Mw2$4ExOii zfwju}4(==*F4PhCqJFR%W15of{sZ|%>MK7J_cKusnPzuKeO@x@Hdsaa*skw*BM|fw z#|ONE&#ogYec%C^77h+m_f1UZMv5Eq-PoLSn5B>Nen^y3$}u~p@bJr27{2|=b?yK7 z!s`rVo}@4oGpTw}w;$0$=;`m?TO^^(1V4Fq=b71GbR1;fbvk}xFZ+O$dey#7&-Lb) zIx4y+K>hr+{WSpH+?eJ(1CsVwvC7gV< z{##sFwd7tU#ZR!X-Z+1fU#D<$aj{!~atcL;IZ(o@-K;Lt`?#823~ZA8UonIlv9x@u zgEc=)D>JUQ9Pdf1idX;~KrdV0y@US#STRa6;ZDiTt~=qDQDo(lCB^%Atm4 zjhzvS&8v1hwRbGKq!$jE2#lTmOdY{I$seu+T{7zK_O?|D?1Nhxado0GxUDJ#l-%-$ ztNN!u zXemSW44|@b)uAE3TDM5A*8*y?%s@&-l}hsC@3{#Z`~B*dx+nIku`fsjC=HzM`Cuc+{A_E3JNKR0b@V4rFug+jv-J(c9m6x~rF)AA zXhcU~fdEy&k%p}C_5E@F3P7T~PUU*c0P4U0=T>Am%` z6DQ6kEgcR-4k8`E2FzMKLER%TAKE(bLmI}^G}q|A9#BIw0qYD$0f2BsbP}qOW9Wy&%bt57z zJ}c7IWdRB`5zU)&8P04I1^FW6mD*_pt+C|~Nd-NS>q_svsr3I^1a-bCl&?KCALV`h zSYstCWF)&Lg3Eo9i_JU;H3CtL5h-Rl;EIdAW)o~V+aQhv|E{ds@wfL_oG zV(@kcE57%tWFoZL9_gj)(uHPTR6OU!J@+TS6!R+wkuL+#&k8v@NIoB;7V3A+B-x5H zvf2M&Wxl56h*YB}N2#C1+#XE8bhAW~{Y>;J?|UVQ!LZ*_xhiup34}QpjnM72(ay+$ zs^Nhwk&>O6$piAald4H|?r}5AW=A)DWx1|d;%x2(5R@&aMcCY#w}DCBa%b--?{FHI zO+&r9CeuVby-j}xcQ1k+U^inYEeh_=f|kM=m{mCOo}S^ODA0P;3fifarQ~0jY7H_^ zU$}I;!YV=2+x*ioI`PKPR>qazbQjD_R27Di;g$55Xro+!n+9_N$8#p?p@0r>}3WT76yv^0MgZ#k6rF^sn1!{m_G3eVy!AV>2amX3rq1i&qpO#4FAX zbBc7`6&yqhWyZ?E>N_y@aIPmSI5;>XUo4Lq@>Nw#jUfzgN+?SHy6ntoyoBXVQXnCX z(hS8R+}i@3L+Xt5bDd?w(tTee4^~LlEwg>nY0LkfXf!o9$IN^WpL&rs?{EDJIe-y( zjUxYmHYyDJ*b=ISi}rgwwK$pK#xw0z%z_RlCu)p7UaY>+<65l~^Cw6oV!DJsHH30z zh(eiBi!gk*bo+Uv6r|D(hx;h=#{2}D zEQnG`i=`U=)yue!^4;2U8||l5N>@~!{P2mG%?JS71K6AN3lPzZu*-QGom8vUM(3m} z*s3OrtY&`;w1oLIx_n6D9z;1NTA!u@v=Zxo{8H&R6&&dMLMc7ct$8-i=KuQAad)PS zrO=?REThg3JK4~*<(_&2_1(QXEd<0K6D=X~(LNX9>qyuelH%eseJKJfww4Ekpfl&- zcsjb6cuz<@)K<;_5nv02g=TNXRMphffZi|R8B8)yfjZoyU`#>I+dNSdbWE)tsa1w` z0R!mn+4}!IdZ5Vpte0R9C96(PPb5}Qg?z@<=_yXHqT$4V!HPAtxHkDpw-+et`{q?c zGDVFqhqLrWAM+Osxg;D36Os>7hdn7@VsgUI<`m`|dS58{87ZL1fmrOYeHMi_wBc)t zeC0Yj0_Scdx0&<%PaDe~eZt7(UzSD`nE#OZz*JDH;sz0g7I6yluG;kv^Xsrcc($nA zyaYWcgs7L#0}_(-!dWi1CmHFGFqTiLkh}+yqGe(7!x$V9F%x=p6tus5?dls=xaY-) zL3P^Gd>3w5yH&EDv92;-=wUP%l=xsjYXt`!uGd@(IU4uWYE?XD`T6+em^SH!=U8-M zg?J^^I-07Eec_u%q=}_ercX52?Q}QXtLZ-%T^PjItpF^*6i`{-3!RfR`mW{;fG-@` z9-$N~E7M)xQz+}jxy?B{UorG>GK-wbw0T!S%DP6Th4u8yY35~jJG~d9i$2d-W7)DmQJKW zgFQuc9%<($EoJO=tKO^VV*J_O-#5U!Q3P6Tn3yEj-+t39UR4v-i7{Mz7Bljgn$UZi zpH9F(DtC*yV_r=aVC-vjy-XIM`^zHkI@S|?$wIp7z7*s}8gPJ@a;GHpAO{L4E}<1i zpvkp4JrfyvFnPobQQbRmt_TeAKUZMQK8ul(_jS7jK{&yHw)}}EB0$NA(w4S2?(VCV z;ccv1vvs6e1~NXjld4AFSGNLQzt1mRRka;md3qNn7+FRIW1BQpkFLS~kW11ga&oU< zo4~z&1NYJ<_l?+A=H@{Db!)W+UXBBh&R)d}D#5higTwKLn$?JP5SReJ@Y-$ivu}OK z<3E4rBTlw`uWC}rC74l=BR#5nO5JRc>b;i}Hly{)70fujNxyy(Qo<3lY5m1Lrt}|LG<{FV*db zOXhn)lo`>Pz@5$cb)Z58{Y@=S;RYYsh1a2QBSNZc0FEZCRV4jDstufKUKb^~F}Z`3ROs>51=s5{f0^VOh>su3JaTHl{pY@D1^^=RRE zTCqI&bE1jU{j6iPh25zc-_XP9Q!PjTG-JN*(*L9|INzT55<;eA3f2E%y8xSJo{jy9oL|!)>gtSZ+ zBq;lyd;=Ye7yE_2Z|u@C=`(IQ(XhVrGh92I`h&RakRIva7bmy$WVB?YdfA}Rjj|gL zWZ3E6sRb_9Ir0}iY?0LyZIR@ZhP{Sy_pROi-R+H>(+^j5Mm<1!RAi!2M!m}I&DSIbgKGh%Zy z7^|~)YXZ@#y1KZ^JCNGwf-|b(jvFT)8oRaKR zMpXu`MyOMYD=oYlo3ZtvM44>$5%IH*hdWoG69!AK0Q(+*NzRxDk@nYZ^6fm_gm1^O z%TEEI7440vauI?x8;rcdTZ=!xA@>9G2WSKNB=Rnb0&rXKJ%b_6>*SPpuuljy%6iLI z_W)H64%6(G>s7nVX%Z!_%sh;e=L1qaAAd3}dRv(G=;>Z|<(|0vkTn4G|z%GbK7bIPLT(0RA)QerOf$fu2i-x&(% zzA`Y2`L`EtThzR9FD7GF)4P_=cIU}^65yl$L1MvX(gjOAwt}oy5WOgg#upzJPK80$ zWb@_k4LEn3zZ#A{`~sN@ddmu_<@oFL&hb7JdUpF@VC87Za&kM4^FR8F>vu}o976V= zgzO?CnE~9HcZ-$6 z9*L(y|6o@931!XR7{(xUe2B`r9N<-ZGFy(Xuvfo9`sU~du3%Z}=|{oW{9(uS;JeWDKksi3a_5As{E@WS)PM;QDq|IjS<@!=Ek| zkg?m|Ui{f!#IzF>rG0&UFet)P8BH4*58bm!GH8ja?=3f8p^ zRnOwBl|?~hSvv2@ICvW<8x3+dB&2@JkvPjY_HR6-AtQfr+dIf;hqBYQ+CpkPXtMLC9c_{XD9mWOiy~EE=3fI%;G>=L z{CdgRFKWiAn;(AZF4ujk!{hS@#wRA2rx?AAouF#!@zoWPm2hA)PVl~^kpDfLgegcF z=BWF`3dlMGretx_LK9~;5KN$5b#51CvULj3szD_d`-3Fp$mLxaHn#`(`iZQ= zMe>j#-ARGsG>aZY_~)_5)ii?pw;44mI<0zfZ$jv<>nnP$*KP6++SR&HXi$O$OYpI~ zmI@lkvm;kyKdFl%^kO|^#YcO@wwos(*PK0XBU8nr8q-#JYewE~udVW!T(~w+5hChbZ+IG8&y301M~gmJkVw{yIR^Rtv|)a_ z*@foxEp@a9ZqDg~(qov@<1+Q0WY|^N*oEdszt%_LVyt%eE5hb5omx$x*(iyJKP z9(?!vS8P0YzbWWF>ay`v9IB6HxD?vOnRmu|>UAXIj={b>#~Z+0`4c(L`Db`?e17{` zlr2XT50&S{wrikSt20{;7Y`>7+6L+~Qr2Iwv3RDgD@s6ad4>3E#6DCJZJpm%4BFe> zUu@g=7I=?~!*wece1CG-|6(V)WxF_p5!cye4>-pKDE+9Lh+ow% zA}gyIxX2k7Yv=ja$ADcR*4QjE?@}LHySEj3v??{~BGg#L1;0!>?Tl@_z!gl1YAuI!SJT-tV~lIgtFHziV_ zUvlhiKU1X@=53G`*GFqZC|6chf=7MM;ktp16ZlQ?K516xduwo4QPEb*H^d(Xuh0S5 zPJWcQ3Z~vFY20uCtAMVQ_X+)5X;NkR8fi57ZSHv&H3C{W@Hn*+q9F}zkR8@F<15JV z^X!SGeEQ~CJnPLTicrOI~kxdf@tZ1)o}auZVG0=Ij^ z-*|Gry?3*ivHi^yEHPuU*g4Jm9-ouH+=|{@t%!Ouo5KQ4sQL)ytnk#UOYrN`o2mW_ zXNcbukbL&pVBgaCzf-sC=fOpQd9WXey z+MDrM>MEA3!oL+Izw0$&+GOU0*s=heHkb5cbNs2>kfN@<5 zq#|lF6_lEcD7f64P^ak4a09$}+4T9v#Xo;MV|o)f0iu}TCA(pC2B73N8YchB9sW&= z+)E3fkTib^l&22lel8Xj)TO#wi3i8QhuO0k61}XVDxS6l@x->p=&P?bg5UOHk2+GH zP7fV=D7lZ1mwuV5|NTd!=|OOtoKPI+`yksEdttKN>n!6)ewk+@SMm>8H#2H~5xTeX(Vun#`#%O(@w!^j(fFzVUyE9SRbO6`!+XGIF7S_9{|*S#r&9)z z+N2#%jf`aRqN^{9FF(_ar8v_@A_%yb*z_D_qSVgj=A56}Y&<-=eQn)IU`McAKwVqd5PE4-*<@>x7c7lU_e<3Y^No zrE*R*mh#?oH->&8Op?EgS=+df=Nsok8$RbhT2^#)-|sqx2E9c2_^V%0i4KTFHi90C z2W9uCdqg%*7k?dpIj@9V-RlxpWZdrK1>?>fbu5?#UR26Dg~7nerkLAsT&|i&r;I^Q zyPTv1q!YyWr9`$<-nWQ62rzFJ`IYQ>TpJE6Of|@*KB-bxlE3HuH;gJ58ToQ%?t1=Wapi_i3li)4wDkQb+_=#Bn738tlRKFiCWJc$?kztJ#o>H$ffY2mqSn=e zot+(;i^kzT|FymOOCB`ptj1j(C?>5QI;4(mQldhK^b}j%4U#vvS`Aty1#LVy`8A%L zQ{d9KEb$E%Att9p*Ul(SzT^+{8fSe5m3<;^+2J*+>Z~U+7o{h9`Qas-B?e{Y&!)I+ zg#yW%6Ou0V!s6GrIqQqOxPpY*q@JI^9Q5q8HzCSxK0({Q!AGA8*5x_XQCiwr`82iyMqS!8Fz%3M3# z24YFm`lhji*|btx_2jZU67k=Edd7n6h6p+Z^2Lbqb8<4ulkgJnUQ^>@9pMlZu+L+7 zhc)QTkLJlor>8Ur+B~bt;IM2{o@tuLXLT_T8GC{A%EK31gxz>Hq>YxW%fUS{(M9t& z9A?@CWQ|IAgX$@<@b~M&$zH-(C+uIfm?A?elSQ2c(TELn^)${EATSRTBc(vh0onhq z0!Vgm2_0o8A9svl`y4i$T9J^zBEY0e@hi2yA|!vyFb2`_cHO;VZ1Sho3=3$X*RGu= zROp*Ul@}DsAJdO{Wu5K|Znmk01k|J05!}7#m`#xz`Z~|?lE1p@7I39yfAVcVLni-90Y~&*7E33=g-5#b<{-cmavI905t+ zs;L1j=#}M$v-GK*MTOD21YTBwxWzR^lM4$z_l2ET|C%(9Apuz7;6N^9i1Yj4Mw*d_ zFq92)v`Cu_-WFi;6#nLMvGsPcIXaMcxXee?R)71deO=-s}F1F2plbUdwo}P@ng`z3#;1PAit1+ zV1JwWGDYl{@U4(HbxAqhk5RWrYlYg96-N|}%nbmm?5jMEP<#=QSNqMmRowZ*lFmm} zIJR(EgVEy;x(~WUWj|nn%3GIb4g<)c`Vb0E#>nbQB~n43yP{W7u2(VU1>cHHFCd_& z8B*8S{<~q~2?x#BI`X==hKOm85luxb9*_~pW5iYZa<1eH&bkl5z9Se~wHwF=7?KU< zm2kA`w0@Gn|HeB3!BGg-)h3UG7_l6G>vZMG;4L~>oPwHpc0&EwW956BPk=T3zj$S( z%r3nCs50QF=u?ml2X=R=OkQW%dF!-;J7_+4B;C>x;di#tt(B~P?BZQUHEkIg2T$dA zR&9N^{eFzxfE-smS}`}}_ig6Avblo1sMjL^oJR6MGtvbT|3VSK;g#?F#eYz$O) zx_3$5y8JgKIi)^ z=ztekC+Mf!42(flrhm`1ui~QTSl+l8v5%ZuV^!t5&I7j{-&i6py&L$&klM_dIa17f z#QW<{Vv6;&%)I%`m*m7VquOxvVY*aXz^)=EF#6re$T{@7cpD`dZ*!)s@x-caxAg7i z-KG8pU*B<3`0*sQ*a6cyn`hV#m+Y`f;#5;48CrVEB z=~*55%Tm!1AQ4V^y)OS5KE5k12#!F=82&>p_CkY^vzJB7<&(=%kap4uw+qT7HAcEm z>UGgpk_O~f9!Xs`gae=LqL;gRmpJz%9>{}${ATlY8OaaeZA@O9?23!M@dW|+Na(!N z&h|8Ay*Il2r0S9N3Jfex{p4>K;gy=)am9lRp*_5nj?0|yR`-b2%T33Tl|<*$2?`YCs>f5e z(Vr7P^;w~1q=W=tQ2Y;^TS9#oLXra}qx54832(jMjy$~JHkzL<(ZBJ-jW%dOy`pAQ zbUmAqYBXt0|Iwo$VVjNbbFP@i+%t)E5+23!<5%Jk`E>fj{sn%BE-4Z$U`71<^Z`}k zQEPc$oBSs?#{B%#iZN-g=^m+v&vH4K`>VC(DM-taubmz7ez_0-su3kp4KF3d(ENzUKfx&e|j6fArTf@%fr z5}0iJ0Q?0ood#y%6i_%>^@q^glCns#e6<0bY)4?JM`V5H@a%{7WpUr{0sx`*l8;Gy z=#G5Ua4Zs&+#&8e5NnHf&!W2+^WCRxnRib-}P_)OscK9t(8d<%z!=`f4l_PMB7qGe?} z`9{{C_$?AlKcNH6VUmBAxe6fu#LqJRg=mdJI{Fp^a`DDV~T3X_B1<`&81;k zHH{kvA_lH~jPiEC;udAgO~4l3{I(O+Oj1yz3%MZIm|?Fw|JSX@=k}R`SyFO0kX~(% znS_FRDe^VSFAxCceJMUg4TkF*Pc+htgw4HU9-vc||775i>03`UOuSwxAgqbr4Rz=2^p?2npUurMecG~f>b!$mv&d`6;O=0x3=aAX?b^f6DTq}$xS`JGVC>Rg z^oT}WZm4#h`GAh(CwKlRGg%l$RCB9(k`O-eb$%?y3w%G;g$ z&V;pZn|_qeUpjq`5z25I!b^?x4N>OYfH4=v^`0*TE73J4EYjI$rkss~qjf|kg6)fW z>8*_pK|_@HScesRnNNsI|K|@J51z18ELi3EWP0T(Z+AhNnAsP1qWHDZ7tYV2KeLPG z#>z7@-^vd;=pP5OmHvOEaQh4#`831;=BN|5_BUvEV|tEKaOvXWF860a($40bX1}@~ zr0jvdK#{yyl5`b^pt^9xXxq_dTjcA)p3pw3ASM9;c&HbnNU z^C_DdRs_LD(#sO0Ku<@HJ(v9LxIb(APc)h(e@oe1Z#aez7mRVCJ$+?}s7feNGI7i< z4Xww2epD(|Tqm1+|MU*&okFn-y_<~xDi5b_!uRM znv4ScLzN2Bh{EE-GmK#k=adG!&uTZ0>@q6zeVZhksF1Errme46R)fDtAgMn_>{&h; z^Rq}-iGC0}Go!emG{|Bs4X4&h*HPI~3eJ&$~^`{LcJ+ZD;^9wCdIB_-aUN;nYcUn65W`5@?9(!4BEy{w!zQfb6U zE+!$Yg#xQ!e9@DuM4UE@ei*NdSy&-e5}sGmsgIb;@qt0?d1BQH`Mu9Q(Y8xcYCILJa-2<# z=_UmVq*gBY#nwb6rq5>0F?Tv>Ow*g2o1SXJnE2H6B!Bft(gc8tuFbpA9&;&^kz~QI zJiYgo_1R_9F52XMNWN3~`~~9&D!G$=Zuf=Xo%YZP^HN97mVJU&G;)pe?QhN-41c z3Xm@e(Ki@E@TSKL!KSv{WdBueQL3auN2_K!*^EM8>lioNP4@z_{10y%y#}&jOfbNU2YW~2d7oMd^qqSk1A)g`}&@$8+Nkp5`aIU zRvY2jQ~WXWXm9B;3)MJNf{o$v5B_$US@#S9pN|#Q=mTsEnRGz@01UR_(~s7dx(aAgvSh4EwJ*R~yY##@56IwzZqj}61AgLgPu zGU@6`m`z@a6i*IM;pbKY7yh`6p+i#t)77&kNnQ5R?A*$y5^k_XHe8;vXF>^nEfpC=T<=e(~D z?c{lXmb966b$WQG{fm{9o~vQ%OoWN@G5z zZrJ37T`~v8@NfBI%kKX;I`2TLzyFV4vzw8TgoY7)x;EL#yh>zd-i+*VZCPcN zy(*jQiqs|COXwOIx6rk(bh*k6$++nzbd&7g`TqLPUm5ql?>Vp6^Z9t_mO4)d>^_w_ zljJ*zCc55aE^B4v6R&7-DpNr`Zc^SZJ7 ztYS*n=s&Bs%YvwCYhAK^ivE9-&sRqe9d6DEdu*w|FfR~`%}aSaHMbDo4rb3u*fnT+ zT^*hh`50k}_Yip_mMfNhmz(M&|Esx_4zHNCaoxC(clmbb z`6Z$lXUWw2t}1!0<+~F;eBO>mEC%qm_}){yhBmnq%XSS>uFu6SJmtiIih}n;^BD5j zH?JRJPlC++ZP4DwHwMAi9>j|IEP;i{`FAIBJcZHl3+agkH zf8SC{iYRFKeCOWxOsq44b#TObMe|A<5*3LCt-Yzji2Y$_U` zq(y4Q8X98kg{z_87k+jn9F6UzsYiJlC$hEs8BA|GEi5nFfVCShm%W~VvS(xetclr! zU%eYU5#4ia@<9!pPVJGt?@qJ)V6ayQtVaBEmSRlLBlI11w24c<9abCfj8)VcMab`J zzN8bD3_N~^Xl3{L8L8PGk3Y5Wof@2&$YC$Bh$dfVg)H%lv|jwgKZR%yOMM8rqeRPQ zoms!`3xUy;&GMt3%?F{og=;W@iyDNYl0Y|)s|8x)rZ@SLJ6rvzl%2{q;^$USQ)99^ zpQ`;t&;M%c#I;o~DVrH)-_Qd~>A9X5hoa@>WiN9j%u63AKoB_K(5;+mQQ|BMqP=8Hsf)4*&N6!dgW*|lQCv~2Bds1 zbCKs(#O19RGi!(kEs@2mQv1UU98O70^)%$Arad##uiNh=PO@9fKo0X`$7|46SyDpz z0wVR~O^ne={9*{KX76-12*=Jnm|;=O|6HqpI4;lysA*#T`{-T_4eq*lkq*?M<>nRVb-ORbGFEV$&z`tFkdos25p|z8_TD|0lVdp@-Jhclyj?DiZ85PL zd ztK*NwB?29l)cddCL(znAJ5qm=UuS2p`73CxfN};?d?{J z5d(QMz}gleu|WFUpD49OwMi(B14woq9y|on_VzxgNK}2gGlY>q^4x+Iri0Tfl({nh z5!VVjj9t&>Jx3Msls8fiDyE0S7brh^u2Y5aHsV$}Ep$90+&c53d=n)v&wr$&AOQDS zjoP|4X?t;*Un8sFR(xFCRlw}|ERe4?^yAHkEA*CPxVGz9Yax}2K;g6zaRVQ*m1EOw&|CUzZfmn%YrKIpuMs-;~FwYh^XngvL zHcaGl&t?jNOBP@0WauPUzR4D9fi}K6liTA9)b-BODmjQc{kKCM-EE=t=>>nNVP+&j#J+l4|br|TM|3-$9~ zI>1d*3OyyvWdD0BZ6GUC%XZD#Eg2r^%q8F~=9J?=zjlc}M&&)uY+q@*nst6?xA5C( z+J6#G)+&_x?sN@g7G6n8@_C(-Rd-i<&*Uy=y`68>9LM)Cs>3#xofl(r~Vwt-c;z5 zH?2Ekz-j_23L_u>Axs`EE^It}*yE@FR*RO?&C$ngz3e%4USGsALRd2d!>6#w!N;`P z!sXE|;naK29{YrKxjDx9hvS_s0jC|6bL+8FU!5`$mPZj8S0!l?_76aG0bXTp2^-6lw!w@eMdFD4u@V|Rhr*^62F z()#ZHF_!zm@aU(hF{o4qaEKvi{Zk9fMMMfo4nYTMQCQFK>YTq_@cTkI+ z;F5AMp~u;MirkC+TnFCd?U#S6tp#b_s>zpX7H_Cr`iP2oB%T~dXd(ga$_cXKBZ-f# zvym;KbG`k>pH!VQq~!SOcoUJ#!2Ecb5hD}^LPw*enj} z`Crln33GIKXhU=QuejbkQxg8A@7$H(a@P5~&Wq)B-N@mazTG^5RIQp7;={LdS0rq@ z&Z<&@cJfYha(c&7NuJV04ECCLM4_ z3(?^rtGxHpH$dW`qhxfLx68ZZfs~#vmJejkc$9t6q;(-nwl>nFRWUCkn08YbQnS_m zkY|wTA6*^|qqZ!uwvo@I^6&k(F<0!4W(@m&cX~V`A*+c~{Xi61M_**9m>EhZ!KLv_ zV9e}fD*rjjM^JAe&xn;3-GUcnvJ9JadeVaC40n$3PZYB>f;QQRJ3PK{&YKHG8rEY^ z&MCUx@Zael+^N&XnH-79-rciFGn}BWIr;A7(54I0mqyw268(f@w2DVwi+Nvrhu~y(kc-rx?By?aF+nj;a@u3wUlwG8 zTEgRZTPhCHj{b?xB`~Hyu;LT8P$o&=APd?>^5(s3;E?;?g@gC*v)ji`LX@Gn%SH#h z(L3W2^oQd=KSfyMIHz|P?>>`NI5WMqU0gqTZHb{k*uN_PeoJz@Us{Ov8Q7CI+bO79 zCL`Jww8D)pZ)2A95Yq9t8G0e+R{xMs(wv?q(#u(Ru}z~9IT{y|(oZLpdw5*>GN zY>+>5+hxwMtyrz0NK^48bg_^jRIj8XpslPS5Gb;L@BaarOKe2&oOVDP)ml0n7$BX~ z(v^thO_fI%_&^kVJMhX0vhD%-=w3{mL2f7YHZ2Zp_6I`IMd)diyxdDEX^c>k=U{H| z!}+z=K&rn%thg5zdZ{kU?6CUNdlAfJvnaVn6E@CcLhR7{456*`WXy=f?`F`9Pi8J$ZTMmI|3cU&r(QhE1eRlN+@iHoO;$*akUB+YVek|zBu z^o4`F3FEadBOfS$tP=+`M`VHR5xmsi9rGAfu`oO*zu(4+P@F3U z#f))Yj~FVWg))4h%hoNu(!0N-*UKYzC+s}DsI47K%U!HSB2p657oC38OeZNNUEM@oZ>9piZZgi(JgNw&9J(%d2f&(g6*!%tQPXf zRvoS9aCuepU)7<{d4SUg@Krvs-Lq>As+l1-den)$mhvgnRmyx5N(q{o z)A(v;OWc>OS&M|fmqlkn(P@64{v1@(cY_#vy8GX;)Y$-iiuOe&_DjarVffMPWXWsr zL8m8I%dI(T^5k7!H#ogqXtzgiNRPBPbQBL7I{I8rbn?}An)g|5^o$$$`g?{icTOnT zRIY>4Z2h)tMLf)l`_Jta7|FwY)s*ymIJmxKc-8H-JC~TupC*wRK1&A^&`^5=?qkZ)%vil_T4AMP&SO03)`S9A= z>fo|gEm4n2^$YJ=ON4J`^6|N66HCae?Xq=tG87>KH5`YrQL?1xg{cwI7PLH5YU_db zNs)OKGxLt)|M9I-8z!p)Ze=S?f7~vA9#unL-CD+12V^^ksZlr*&l^I0xkQi(H)<+t zok0`Qv8b#pj@k;I5~~ivzk=2cP7-eOMC^w+xLnX`l&Hex13YaPTI%AdG`_ zMgs0j^(GJi6uykGTkkq*x(h6Ai<^2x2PCR`id+mmcVDdg2>!tQ*yJM5prv`t&F3G*Ig*#q6r^sYoumIV##V2sHA&M?+$X!KpiWfy>Sr7t zQ{d$o?d?6c2I4`WSh%S|lu9xtIS%>q$veR-k8|^{6y%5_1D8?O%~OEV7ilu_K>Q;_ z6`Auhr4;<=#YQDL&l+Y^FM^-;GKY`yO?67I0P{)19w>WVQLk(Z9Q)z9qhr3>9* z;xjw{L}oD}hMjVi_AoeCiG8)s{Uu&vNTmLSW15LXo)vR|$n^tGv0U*ay~@L8TaFrY z2t!KvuCvXbJ!Wg=%g`2*>1)!Zt?Rzwq}~9mRr99n(K1hCjawU?0NTO{M^Oa%BIwwT z##0MglIalVKF>w?k0I!{82O8Jc2#e}Ozo_6$ndGn+pqNwU}Hq<^}{i~{jp;9kh z_O?%6EF}^;%yBQ?9Gyr1^CtE{>m$^`3R-Kc?^5nAqVJ4!tX3jc3&L=+Z&jCtd~lXd-+FC_(pAh(w)B_y*OtZ8ol8d0w*(Wvo`dEq zCB96Qg-lu_VP+d^eMwCe$1>}gKXKFu;H)|iRvi0Q(vKHe!9;{v&zCw@eJs(*pX68S zGxqqB=JYTHAt9OMncbi&VQm_SvQv1fpr8Qs*I_V6IO+qF?7N-WNIH7r?H9n<_X?W% z8`-9mI>>&;KpKw>p4a{oyvt3iYB~)XUCSF)IwU_4|JuawXnJRlA1crOLJSojT58n} ztn>9MAGJZTvwHnDyBIUAJRPzzyY{OrxaVng(+mD^RSGbE-t8R2kmxoZoKZGy;+>9+ANTA+`fBZ?;x#j3cGop=kh7FTFj0WQx>pJe(HLrZF!K==17ulhCBrasAHvLJ|-@^r+n*HBlmV&n&S!zv= zDRG_z^-TcA(*%qA>Vo#)Z@AA~U{02OsdmOd?-a`gz1l)9=Tm;rJJ{vn5b-DJ#>s9u zDo&3DB*xy}wU2#_G=d=JPvXl(?5=i06Q78$Qv`xL?uOxfm|H?ATGx%2t$!PsY&SIX zi5vd71gMy7rEg{y-sf~@p3a3@*{Y@58PHmu#GV@CY;lla?ULoV2XADII@~t3y7I|O zGwF>W)OYIh>5`(s!s$s>F`D~?vfaYR?b$J~8l9cf{W3o{3zPk`3MlK3MO`a+`V;}Q zbWayhT|q8pSs~#s-StSy>fo?b_e4$c9`ztb<7nexyKTE{FRil`JZ8fiPF-7ZcVsnT zW34}ch|wQ5$1A?}hA8|zU2DvKe0d)3{nhJEoc`U_Eq$hyLdQIXqqrEd%DG#R$sj3a zYt!Yd?kA;=c8@!4NSb5h5&?I9wN#t%OU*rGlQh57RUaiD{>7=Co}Rhl4AXQ6Bn(0k zGB6}UN~LN@>Xa0;yYz9UuI&}(DlMAO=AW?tE)g7%t@PfQ z5-=k#A1ajUrok+P1o6~#JB5GPPz@)X>NXE({R%?H`#K6_A5!q*$!3JBX{{T!e^w^Kqs3-IP3ahDTbz4kVhtf=9W6s07m zdiv$Y#>PsG?%t~C(VsP*`2-7i$C8_ne}gj>AN7vk%S|t6CbDx4|6aOAQjF*z8k@aI zuCY-1k5z?I)nN5P{_F;+TZf=nvbRU|a(q;C0n+)ETe!Qz|7W-L3C&_#K_4!$mF9a- zV|}}}DJ2ZWn`C(Uv*QxOrwUbBdI}K8X*~SJDGrRU*nc!9o2(aG>RQfWB>U2a7Qxj# z`z$(trK(DYcMe2KGHxWB$shnlysscj)|NA! zhX?ohcK)p1xFy)VZAR5V;_$}b_c?4N+xuTXzh2HC0QWrWXlierR$3pV28G&8 zUx-NIeUQ#O(;7}ew$%|oyh;~;e!V`lUS;?4Pkayp>*}p|9t^Hw*94XPFEEKqdaOws ze{hAfSAH)}Ek0+?qF2Xm=Q>IX(frFay=!jxg4b{AqP1-Uv*5%V^PJ-sYppRqgzHo~ z4La$@A2Ljr4F6FHdu4>9v;?rF8jC0VXUO0$JO_d{eMkEhNB@90P{CsSx<^?f*x*+j zkrQ^aDiXR!6&U|QpXvzZx86REGxgIdzqxVFe)#REm=iYj8IwVZNquePEdB#lxo})$ zN8$IXE%XgZx?L7aU%(MriY_T~@wCKM@eeKMZ3RAtKv(FiGY;c5Tp#5_gDmC!P_hRY$iZ?L@dG(O` zyfKHC*#K0vm40h$sE7xjQZy3VLaB|j6+edsdGO_evg(eG?zp|bpiB{W^=!v)jj_j! zMeUr<* z%YxZ@FSBI7Ye4>Cz5W(K*S5&?&3IK5r#S6!n&w1(O>*SsoLHE^J5O(URxY8#TvpnF z;MewAfpsHC9YQscopR`&?B(bVk}@f0-&)GE$(ObgMUbyu(6=%%OT5|;*%4+5N=J3z z)?%?{RyyAP0mSJ+IAlW1Qu#n72f)(~Mn=f?>jZ=TgZKgPOZ0oD>aSFiD1PBukr(B> zs!w@5qC6_}21C0z4|sw1=CoKOOwq2cz)NK|JrXqCirf9xh^9n#o=er&`v~yVppxo? zGt(SrWFqJZ-Z}evEP`CZdJ{s<&8(32Y7vb%=Ek(_H?tia!sWtdP9yG+HV5vi6yEFmhXl~ek###XF&&h@soL+5Z>C*!1tz8 zPn#M}GLHrnNH?4FNY&G5g*pOE6T+{1HD44-Bl!FT+bPq7*9vwf2-sW+sC}gD&onCX5?6xF!MmFh=;U*QAM^m zBtKmGJpfnuF$p?pWROs|7wj62Qk)|?Y}t2uUIWz-K$rx#@FGMPX`2N-MYL6QopCHAn`v z?H!zloEXTDl)4hU$gjHaZTE&-1GzxOyk#Be0{#AAu5CC9_JbCNt+=BO ziuTj)&>_tdKmgR$_ZrqTC&wahxA0CVk7MkZlul2mL6-6c{BFXwK#?nolNpFxz{SCg zT&MZQZn)znW3Z6gikwlSo%Q&bGOEQ{JovJVTGr~voeNQ5D1*Sm_y~WEpSk9P%6mon zFYJQ4j=k_I)vtg5#9waIoy)Hx3$j%3XC0IVkdns~p}Cv!s+=;)0Xb~&?)$Kd7mM#q zW%-|Ls56L28d9x@H)7}%uemr`x^o4nl^w(@FsDr$4iDX*|5s`1gL4`N<@q~ses2k- zmw`fnjkd=tqAXC%2EnRg1m2HMWAiy5wy~=!Ul8`$I{!vw3gXZm1b^w^=6pfzYx5`* zKjL}L;zO5_@&1^{K#{0gP?+Zxs5Ms1)M|&)piR0sn|OP!Xx$_SCmd1JqN0cgfxc8H zX$VK3ijTpU`7itbo6sKpC?<4t-v>H`wJZ4y%a3^@9cw$k#`>t+3*$gt^v_SiH`~L* zlJ}gx;H%92A;hU}e#hud;l7lDzCzlbcArjP+R+M?u&_Rv|DT9p*k?lWj_LAyn(mT8 zYN#C0HX|qoBZv-)Kyi%7YZr^jkF5=4XB)>K z5~C%<2Mfzc7iR^cu8oHu&phY!8D3pn%x?l9*I$Q+V_o|i<}DDFKR^IH8xMMWPE^fgO* zyrO{RXJ-Qd|7lc|5s}aCn;e&0&O83@Nlbf!FN(DHwM&#}RYJX}k`wIYOKqKOzL{Sz z^VbQE zkQ$imET%n%CMTCsW(nQ*8y?x2Ds|(JI{?m9jym>tWrZSAW_{C-Qcr+r`V54+H`L_INy(YMreN*94X0>M zSKa)txQaPs-IO?|1B9mTI?eP_oiIbP+E zC1M{cjhQKVQo4zjIPCAUE=h9H7#d#T%(Erz__*{xtGhO=l$p^f#)-3;}a9!yPh>*qUAZLMP7e{CU+T|01@ymBa*Q9 zqrTCA?BJ0#Ih^hJKl;<*@8b2&26c86q%r(p_ULeNgudRYzU)J2zt7nEHBH$0ymsLC zG!gl^TE{4D3A?#PbjjCmX7JdLN17&f$ z_O2A>d-tW3S>bSGrPED#Yv652kO6tSyy@>om`3?y7_HYJof*uZ8|M<@&XYh&AX;Ru z>v`My9RI{ZNIHm7%K%4#O~YHS(gClh2GGC_5|$J^uaf*J`H88UAgeae*(N%Jekg!wVX!25LJ?)l%(CK1$z zt#|5Xrk9kYc<;&^Ux2^3Aqvz9=a~R8fN5gyEBdk0iN!&#GP8vH;ijoN@&7=Xv7g!V zXFP$fz82IX-V2%+3ukZr>;%0)kvvhI*n2u?TyWWkqP&!jupz00=-p3IXMDU+M7L;o zzdBDawddpmHz5-_&!5_6qswDuZNwDDD%d@e*U?c76&ga0Q#c*gtjM3r8}xT?Ht2ft z+)Qg=JEi)D62@BQG=(cHE&xxM;1w6<8`6AV0`I>fYVg7G2naU}Poi72HuBh6!sImX zivrA7E45r{B_tY$U(sgL!D^g#sy>Z@fo=*EZPIG&DT2jD`yZqx%OIAh7 zr5`W1vt>k?AzZd}>r?Jh9`N>py3x{x8kpC);CphGRZRxA-)}%zp&`!5KfcPI?aG$# zO|*`H35`m0?Cf8T*7ARqm8roiBOxYz`$iG3%G3m6`Auf^9YyEf@a`NG{SWqoeuvkSsPu8BZz6 z_g5S$c{}wGsqV~HCU!=T@!lg{tsj8o4xmPFuV3+2TTpb^P`(?p)!4Py$#)-d;c0ybGv!JUrWk z-Kj>bxSmpx4G4nkC(=|NE!MuMbAR9zX7c{NF4|m789iPo426podSU}R1ru3?_{z6$ z`{=(2eFQDVQ0fXhdkalFI%nGgFXxy#Ri!e&Z5A@s(V(p}y;z$OdfrrxyzNce0k7}S z>YxT<&~NC|MFr%Y zyOg(fv{>@%KtOE|A|%Ae#K+!~g#j^1>p8w0vIhE?o9~Pun}aGdE!na4O8316@7~nUj1pPZjm+#;hB`cceH(Y^yxa<5FQYnJDnG7og*`=$o zSS1pIRT4Q#w+58F6$B2?P;Lvn?nw;Qs36rfZjyF*ZS35!;>u1)k|T?kIq>-jTUncj z!knZ4^u3h=V!LTcPIqLcE-3=??C=iYg}CX%BmOCclN^(9OvbF$eiF*!z+Gw^RBUYk z$Fp6bwU*6-&{RtJu%0KkKaErTR~LQH1>fkIJ}6NYE>A0H5iEU(w_7;vkv-R7r)|*^ z+IW*avDXV_UlkcPruE@;N5lI|l|$*tP$E`|UltL-^m1!n501}ll@zPmZV06I1T#s| zY)WZv>7jh>AB^ehHhuL?sdnrhf!{t55+S@*6U)ub6ZDRvUvkpRqmMR5`2Crj9rC6tH@dtvP=S`mNLn4r~NEz)W8 zl{!BF8iK$6K8raTqo$>`0VYp4OXqA5;^LiVBH|8TUHS_S1Vlhka_c0->BUNM3Z3(d zg;WB3rmynU_34P0VRD#in@pQKkSW4fuh2bpLhe^UA}I+s$JV+@_zzdqh?8!~E$`+> z#7y5^vMtc>M_q?GIi`E2SlMw)z4N+)tqbq|q{6EACwOEk(+Ct9hlC)P3Qd6)jAzWd zGrUJX4C9M!E%r*1+=I&nKu@yWE7WbeUq#)AO%nx$shjCDuY;jWyO(K zDUzff1wO62@T3!c0PCxJG(uKU0I$@wpNeA)?3S=`2yNXuTQ;_Zu=?iWG8{_eh4xF4 z$Y;TI{~CT($V92oCIbs4N^$qaYCXl`4_Z(11{;!MP~Q0VLPT4c8DZ6Su-x?2Ga~If zl9Z)N*}H=^RKfr58pSt9oZ$N=@VLeU2jH(~49Km{Ck<)~JAQ6=_N?;`X?c{-2DD)) zQbh7jI#hy9Z&&R_0aGpY=eN8H*QX@f^UZkL^2V;#SiUOp3K+~>9J3(Xd<;wIOkn)Q z=>*p-BBz5BexD|!kL>R;KB$ewxBXY;hg!5QS%?-XaU*ZTs$)=lA9udWC0>^2RBpL= za#wBA+SJ5!IApa?s%p)<{>+rJLNC_#3gqm=Hi{p!%T1+XZ9qja)A>iavfiD@YXx=D zYo|~Q8F^HjPf^+5d!9GBzoJNB{rO7>a^k~m|MNpb-g?b z@@dEaI&68Y(<;t~OkcZvx$%4(e;F#pAgl%YB=#`iYu-vc?YW zB*u%^pZQO#vZOMA&YW1TJEkOXFlQBKzrpL2ALEQ~2u)6qrbx!NwWctj5)0Qpl25A#91#H&Jj7d76ziX5<&9 zTkT%%a0leurbfbc--x=dT$_4%U{{MBCi5}4=jq1XrF0(+sn2?weuu;TFW!+|-v7N1 z>V|1k#l4nlDpzIyquPEl)^=TtaR8!Q#kB&FP ze+4kM*PnH>eyRMy!)86ioZ+9+WgYOQ2Ns%gIyKxj-s!E;3L;Ei^(6Fi4Vt*1&231! z(f2AvSV*4vVnj$J?lt0Q#6Wn^t+hwZ>%vP;(Yc0ynU#n9oNIC+K5hq)Qe5+l{_@Q z=)>&`POY7|E%|{zfA*KqR^5(|5WC0Nalr=j5oTws<)4>ce_9>0vq=Ulr;!*Tk#)d1 z8Rg(!E->%~TYtqz;ZBbRSi}nu*sak!Ior=Z2Sv)tW!?=u9q@`sh@q;*0r+#*w&(Y| z+8kNp&)@9?lGOtLZ7dkd#}MZ4i$X2QxoM&3VyWVv<;Umpl_;Nv70oZ_$+0~exXjXh z|LCONCCqDFl#Ag9$!X?vY41;e%E~(T)`Cti@)!HQn)|7yz8ALfQ{JY)hfOPohZHTt)He+>QtD&F7b(^r=tbrF{DWAL8ox+V zg!9RVK1HD43b%2%-gFU(^jc2}Z(ZNZO0eVch2-)wi`T}Op0TjG{JGW4B*w8w;5n*z z=!w!bG_tcbDF`q{9m8OK8ob{lp>=82Jf%_K8l$iJK#Fu_1xf*JdLUBh4=g+U*w1C+X}C;ZLm%`fZx)_N<+{Kn{Bwo;vuF zY%cGWN4hfGJ-JZd<|4rY#oKfz)JP$}3PLDO`PUSho9&sQCSM;I#Nf43__+D%0xSgl zSp-uWWJ}}8$3)Q)o_?qz;6#^gS!q&g`IRmkFv#oZ#%r)X>(A*?fCxR{e*(d7r@jPZ zER9?|A|2vSO0G{Um!S*Uyw8*hD69VchC`%7BX+Bx`z2N;Tn_KvZBvpbZUW1pJ?noF z!{&+Oe5dZxN$|+*eebAXS<|vqkemw*yQ#VCBrTTvta@#?C+gsEGc@)psqCQ0)Fft; z#Pk(t;S&xTk5u3PJNlP)^r!9U*E*`f!cWP%ds6uXW3#sUhS+5rZd=I!E3VT|l@&}i zd!8b3FHUVxH2G?sjwfb6F#YhYZsfr!#;p)WOQuU8JDlQwm=iS@vtD@4_N->HtP|eW z*laz$eqgUT)Ojg0dgpSosX5;JV9_52W~ihZA)lN)R>)e1V;^JcZtST?=`ZoTO!_yJ zt&P$quTpJH+-TOC9w<4vf$AChC90?|BAs!r3}$jYz-#g$=)V># z48Mgv3^tOV6al#M;d|HpUPs2lCLmp4K<$~uij(*LI<>Vng5M3-I;^=$;zmu1@uIF( z=zy!MojiwJ<|KNg;_we{djTw?)#qak;r9ZiQ*LFBT&%Hr>>+rZyCA>~G= zI`9P!fo?w@&&~f#@e(?=re)=F4uVbR$hJ3nsq~8dfp1r1v&cP%E0X-GYf~td4;V^b z%QYX^;q$OS8aAkn#?ZuMLrl@7pF36AJaXPQy%ER0LlA}5ldwf1ibQGnwSLH;i_Hr; zS&{bq@H*MKj*_An?~YHwynf*9kNPG03mqY70o&Y{MHeLyj@{=p3cf+h4JFd-M zNK{SrQ_b_I0eG%V?s4{9G+rk;uZ5#gr{V!~Di@0f&R^&^=?W60Av-%;h}q=3THuM! zq?hyC%{wrN{Nm@9!Q(sU@ZuWl#aD#GS zqrEVxh{W1fOCv41udCyY2}$4CZqP$A6H!hXnel%34c*;I*jXtH4p>FW>blrqq4D6IpA^xjwiV6+_r%RuQ^FY)c*#KbBOssnTW6P_np&F0Viu)2k z(Q!=cX#MbTy?ncRh0LHoo_8$)m-hqx6MB~w7Tf+XLJ-D@PY=B-NCxonlanm<2jn*ljsRm)d zXmC|bdn{TL(@dH9Dq2xtKn~h%px)gTDs&nwS*vz+H__DcIr^rMa4?;)WeUM@M?9>yCsx(N?u8B1el>~&@D|AEg+d!+YoNLSLHdcVmu!h`Gz+6=< zW%GGE^M?(EAzl0hf9`U{cufqdJUo8!7HJgMVt(N_$gm7atu|Ylm}HnbnjuorZAJDE zSRm4{hw1o9^yPGo^;T-&G#)j2<&lR87sd%I{E5s&i|xtC?9T6uy80(tJ1 z@fCZ^)NA9{veX$G3(+E?!xfn;U{WqfEgm%;A0k-4T?uN7T zkiS=W{b)C&w^%h3=F_h-*5ZG3H9Zv0E+kORug6;p{jhxy*%`C<_mEsuqnofj%UCo; zpKiF+-xIz^<<{HXSa{sA!lV0k;4$G~k8spU*rO7psGws8_*yWzZvpC02aXQsk1Ui| z)D!mTU%#y0-K8b$Ob_sg(n?mOo~c6nA-*4C2%m$#0oR5+o}(r8(2;x1y`w5t#{ClV z$*H6##zvCdu1+J6*LG=k5!Z2S$yt{rYzqV5t7p(qVEwfEJ)+n&bS_x^>OC;ASqRZ6H>Ux96|jPL5(sUS{f(qEX)Vy= zJcstlJpUZc+Ck6vlop89_Eu=RzrX%C$B_ew7(b~)jjxZ5x-UMI)y%5tvGD7 z5Y@@><-6jrULHl(z8xl#pUytEW8)vc|L5f+lKM8!Iw8Jo_9rn04Ac?tg%-P!XB1>< zp<3nDF;7i#iM$U9;9qxV$LzYn#=zoc-vH0RVlDk1WUcH3W@Wl>AzHf?NBF#sYf9@2 zMR}_xsXMn5I-OVio?dHk{3Mn;188v0(!UL$&hs+QDJr0Vhr>q-5eEh=Y8Wx7(-KZA zQpF!r)v8Ov>M#XPy!sB8NnmJuw6INZEn3#*`hUu<@Qa4^Zl1Eh8a_~A#SZ0+1Yko+ zTI;76pn9#|)&>JlsQ=N6VVG}S9z`(?JYbc5e@{(*guIv!f(- z7r}ZIVxDpcO@CtQxPl2pwFVkRKc)GhRGPGsElVTd|FehhPQmUe6c4*fgpmARG$IB~ zaZE!(AN4Ly^*%_f>o1N8l}~*6u6}NQo)EVO%)jG|k&&!A!zq~mAa9;3rKGTxhT!z8 z@-l)P=p(r*o@&L&oGTwLgxy$j~>}{qQyAIaLad9V~ z9OgvyBLCtj-b4;upS8O#s{Y6Stj>sHavJ}_S;Nw%g|$U-qYh*sdYv4hO$DRV3J6QM z&~e8~yWFXfASm0D6f%D}X|AP7O>C9sL|{zZbGTIFIzK;;H;aymiHW=V z<;&dMmpMfSeIfdM_3+gHeC%s$K=h)3zchpu(<60I!gMTPw2FgoK7_-q#&B>kCmc>M zY<)%7bKw;|QqaMXovsHz>`6QGN56O!#v4ORL4EJE+%|LW!d+m^xIXW2=c43d&a2*o z*ZrYrA|jm3P7jd7wv|}_UI=k802AJt#m`sTW&ss@qJpL7@3r!}EL2At60h=CG+9KIP3E?aAnhNv2Ch*Q}^aNR`o|?g$ojXI*axA zlVoGQT=9dU`bWxTJdqVFS9cy{8S^cK&UX$xDqTCE!v2Fn(!~yFzXEW^l)tU|*3~%+aA-Zd8?Q(dLf(>(o4>zK zsh^siJy?zAwMj5JZgr*6-kIKglKT&`SSQr+!mUReaBm)y=8iEbUD6g`FMRGt8y2BK2!Ej#&vog?#W+F z;}Ybuj0k+7U5b=;p@5A{Bh^7PJ?5hfg#a2~4ro0mXrKM6gyX80OI>WD$yWxAB=-7) zdyc^Ms{7&IE&~MeD1_{nkCc}1w=rvOsxBV;v3Dg8{l1lio&b)gJEJ5>>$FaxgAI+Itl z6)^_)z;M{CLlrlbt4|xpYmLa?L-qux{jB(TSFB|$NlGt+>ziVB=nu$!<}jFekAke1 z7-zOAi(T%BaXDk-y3FP20OTnIGLK{uB^!%@xj z(I#J5eyfqNXja%)XQ=^3U2?}qpN+1Ul`ztFU|$k!#X0kVx%h9MFiFqc=A5Y_b@v}=C+`q=Y{tE3@ypniFBk4T`2}0c|?~`G!@`Zbs zI%t!$K0U^&F-9Yr(3m)=hs_ebF|4UiF278}LG33O#3d z{EPaMb7*Kt;VFBjF5fd&)|dww1GerQ6^(GmBO-k{cBCKrGLxz##3t=K%a|JWG z@%gO*&iGeuFp+)J`{Mh5-m`M^jVw!PvUSZ%>T4Eay>wyW`u1<2Y$D24m{80^d)BWp zUHo6A7!zYn+BvjFqbLm?PIdg^h@w^QT)Gf8Vc2GfZwKb&6*x zT(p|-L&_s+#A+-wUn60@c>K&Dg%3W!6@R!EnEeab)V06pM7h!R0h?u&yU={(bfiqi zw~eE(+f@ph*K9_$`uZ2!i8Wj{u;`B2NS6m1B4>Z9fbh?mfPfcZGEmmBHh!f?0}9>I zC+jO;0VHw%y(1p+>eZG>1_Fl9nV1n|Ai)PQ9!0)BM&60jE2NoON~7z|gx$CEL8lu_ z@k?%vknh+Ka%;rA+hGV4Ogk9zl|DX)nsn^}MDci;&ejoaEnGIGMa`{SL~q}qpZ0Mz z{O01)hFecud&knt@xl=3I5mts>xah&5oa9xNCw4%togN}5LhXgHg}sL>U5#~lCZbAn z2em}kBRwYFlxX^I3!6cNS>}+x9f3@`wYi(V9@njCl@2fLZ?jLtShI@8QtP>OWPkZ{ z9E58r{~go3OZI{wU;$GPVB4iWGg`|VUXNV51--&7-P*a1Z!dr^MBp;VuwZvUau^vTD!09e1y&p zA-HN%O-7WU2B3~Mt2k674Qo9)Wxgp(<_XEZm3}!ry}B58ovwn1A;Z(OsWE-nez|@c zp<)^E?wP!_pZK@2ojhxK35!WA5?PHTqgF~PL&(y)l6PPTG}IHjDi~T|&L+_9=5Egm zDh4lEV|abuF$nlL+uUPo$*_mLT~s$iq2veu*%J_Jo%0K<{)Bz}{p#2^dR1wUjU6#M z_j8tu4)}aHkFu%KcJmC-gvNHXo?O+P;NeISvTxYu#?-z5X_Z^=e-}>a8RVATLVNj) zWF$F#s=nj@{p^`!cJX*&7^$ZA%jwq?2xuKVJl>D~ao*L2nx&>^xygi?F3=q+@4Ny|kcek`T)Wp~X~t|)Gj=$2`Rno7>Dg)U_~wJ2^$24;$)2l&58Ruu(aIrOX27cQ>Sr81nSt5oW0;5K;3~HIebKLh{V$Jx0>6T+*!ar+288VXO=6azN+6 zag$%cNNU>lln^4~jgen6b}-1e`3@ud9Bxxhs97(`@a_{ooENr9KkR-AXR$kkt#A~1 zcd}eDHzUJVkjPOnCL4&5oN}__;|j17wu8Mq@CC6JnYpYnDlFG*fFIy7Fau_v*lh&{ zxsdobnDdtKln@aFY|-xh@MONYw7u`jom?Hp$2O&DW(Jr^1aMo=UAzmT;$Hc%2es4{4q0Cm~wIO4i9YZnb!>N zQ?Zz|59R+yy<#*)ugLodC7#6pZ)&=|^m&NzW3x$=t6z?D)4lzuC!#oBz0vllEi3ee zyx;8Q7(&d!&lY}jN$x_4*pvZzNfC#43@ncf){7sdM8VGQi|8}u`z|gmyq-zx6D`Cx zEAsIvUwn&v1I3Bb>!z?X8aXkFOQ(oli$B{=1NDPvzbuw|;tq~ivI<{p1WemUFcI68fZpLvdF#R2`Q4VX%zT6!TZL5h7RqpTI$Cy-ZvnvVZy)Y+9;KT+|XjR?m zRxZ8${jbX+A82ESw)A^@d$egSDrk3nQe{0FO>lHVcRJZEK3G^ED+K1J&9Lm?+ZTLcu=)m!&?Mk{Z&?W-M<%CMTWXLMIC(~u7t9h-pyv} zg?<*%GtEc@XY6-e4|jGbK%?zg0Nuq8fL?IfGkbnx72q9H(+R4}iYb~TQ95gD3VaZp zr9;ifU+PfiL^z)6>#&3oYiX3mGWEc1U=sI->H)4zr9WMHZD&ZOrIpl&=6KL*T(=Q} zx3bu7amByvi2_G^a`f4@O;3UxJLX1ugd)#810JU$iL|(FUlxc8`rf^Uk~qY zNQ^G=t*o$u8N9oOo+^K9`EGCwgABR{m&8l_S{PJJ;SxVAC`P2F8w>F6*3;k7#8c{e zPUd@J4}Pz#gdR{5PTIgRFLd_Ivht&%on;2;)*jAu(02F=W8EU7RBw%-m(c|BJ z(MV#G;jvdqlPKi!T;eoU(HygZT&Z$KxPwcn69UTOyg$CG=1ChxoY!< z#|59M<1YcmekMi>snPjLL29NMcIvd~P< zW0f*UN%U)xysL!p<*SL>a_x?KpYYF4-&$4MS)}&EZdY@}Gwu^VgjJb6%rLDzNC+PS zJ%`V^UvJ0|6U)a;XL+;iMfQDnZRNuG)r;KIIaRmVo9Y;G1%cAiSIFYKvQoV^R|bbo zKlB+*NsPreKz&qv4!ZcgviObP0870ynTwCO5Om(2ev5~hL!zQL~=A;P@(4r*7-$5M`(mh8D z!Qh2;h3Tg}1-Q^LD^W_m8{SUS@U<$p|1xCs>%NA?Ewr^5%Lr7pHSoMMxe8r$_5*!( zxC0JB-5!;DI%mhn?q{HT0sgB>A?r+Qw$?%mjEU0VE(JXb&bQ<+rNOP; z`O2D!>d9aGYs*KcF{1Qn?8J;c(9pIyxIYnj`*!B2*m^?6%lN3M>e>K;<3Vjq#DfQC z_jR81{r6wRM=Yr5eZKhAKt8nagJRnD%C}OcZ|^R5?m5;hf4yUn!c&m}zcc*uv(XH@ zK|LggK)!CH@sTdSsln(jnsgepw>~XQ{Rw^$m=(lyh*Tm_`4QxT51Qj4bep1!DiP`b zs9boK=PV=qQ`@57P}sA@`y2LkSVO}L={$&Y=YgI}J#yP#87n=QkzjiQZkNZ5uyQ;}3W@|zUXK^>zG(pl?O&D3j;D$bM zQV1-i#kMV3&7NL}j{zPcol#s{Tf07L-{lZI*Se`dnOx)2zc)AMXTt8E;pFgLSV@L* zuK3RBwE%e&^$;o?VNPybQJnlF@klN17Psq=2T z)(rPF1se@Y-J&_S%G)tE>(wU|3dc)O+Z*5PIozeCrJX;=wHAzk4djNFtZG)h$v&5B zSWdFWIsJ%W;ffOdY;S3v6CXjicJx@EVh%Y1$iq(N7)15vG4km z!%SJyGq&lWR`$qION{hlM`c6(I|sFwb1nGRnM^Gc{#Mqh_=NMUhiD5@o>o+stIoqi zrQO#N^3=TTFDikPGlNC$P)JnAf`)IJJ7hqVRNOS-2$N4yS_xXX*#WK?QTxXS+luVz zLeT68A?^#SBE~0R; zD>c>84Q0$nZfoZ(8<Evprq5ERnFf6v9a zqNE5ZR!mo6ly*t?BVe(4W-1|Kr0<~m%|}r>#qan7b5EjPv9i$bhwqfWF$FX z4}{&I0~w5@8NQ@S;j1=`k`>SjXi)vTYD4X8$iLUAb%#Zw#IibA{fdU_I&Y4DYY&R_0M%-XuUJ13GQz?C38HP3T!C{nUG^hr%76#E7j7~;3 zVG`1Y2Geo+yewnYTv4(z&W9Ryq z{}!Wi8W;ZngOZ%+eRY<>P#lrPW_lx4V${P@J$4ouAH=cBBz`Votu!V=MbojV9y_gHO`^ZVZI}2 zDbD4V-OlDDSvK+2&r>@p+0I}QZq%CQq>QaNQ>KI3d~ntyPt%5qJ}-ae^8A?=GOYri zSo^a!R%lI$m|v*;9-5G__tDNR{{xWvbuL*%peQIpu&}3Dy1P(SU!nCYz5Zfm{#CqEoKviyY1FatZ@^a4`4C`cMM=@0wNK0-UL$KtIj)xQiYDJB zW$W5pgX*9SDll8Klw6@^3R6xBYsJXc>fPE4Y7?e*{t#CUu#wf$nLW^>v#Y-^7#N7D z4Sw)o7UNc6zEhGmFy&opU5$_?y%Ds6ZNxb!@fGeJ+J5-Q?=?i6v>n%NI%#TC)IUVL zlzQv+sR<4OE-75fSmzLe5jt=bM*UgZNON59<8I@-#OJYw(@knN=ZG~xA+W2)r52OC ztiQC=ocaGMiLe#MTh)~#V1tQBDK&R74_$m>v6UQ`EbZPaJ=3sZ9Peuad{F4MmCTgp z%6rpSLHPE@L{Z(`0cZf}bBYtKv&UJ!?^BlNZ8ddxGMA6fN*7=`F4`52d6zuz{XHdT zBU{NxGMo3C@_5hh1i~4@AN##(EQ_fQxf`HLpkxr~CGG_CY@lRGZDqqnLFUj`6pzXx9JCh6D zvG!1UM>t$cK5jk(uKZXU0U;uoPk1_*9h0w+9G--oyYXjoPY0N_PY!96#RKT_Ckr@l zAiE=Dl0{}_8k@0A@gsjw+kUWJ=!w0(^y6b|34N(Wm*^@o^E>dB8>~I44kbfZ(mZ`j zvmckqCO7Hx{CpKRD`9M+CG2pGUi;VT*BZ5jT1uX0*S+THimAfG+ZKrz4k{SvWEdr^ z2#~vYtFfu{qFb-&UlXQ#dsBGD-Z7fV-1?n<&n;Q%!~dMsxRn07B5528G3v*7bDp16 z$$v!|Yz_3y#@8ay%Hzt?|5a#!x*1`9z6kGZ%}f8Cuj8~t*GKKx*_LkIsnlPRF`zmx z?Lppx={IaKmiEP`ivu=IINO~M?m1nrdj*cFX*yVoYnHtT1}QurB$>-I9Wv)Q84*Zn zFoB)y)Dt#s7d~-9-Yx{aXq~O6besec9RGT9jV7!3^2s$%xr@|1^#$VPhN?+T+;^Ah z7F6b|+LH%8`wcW(o5d7ZfOmR&{@l_A?|O)cHCf3yPkOU!y!}mX_1jLD&>wz*#2{66 z2JVOjfJ4rxa>Uz$3Fpvq0Kqj_?Wp4hTs>kTVgp=3Dx>qJ4t8Q(D5pKAZD3750b6I7 zMyW#KV(i(pof{h|!GE9CA=gi@|RudE6atX^AoOJKfXNNzKY? z%e=osM(nIJtEEzY55=(-I!JA$&-vvZKOw;-q#12tOgzv^g6*8Fs|BuQkc*xhqbCX+ zK^WwWgIn}gr=xt0l?b;De^cQdMFZ==YzJ-EIeWA|`=mxC1{?NmI^XZDI2Dp-)b(Z1 z(&Vhazcm!$PM$bg|N51V4=YdQofKQwM@0u`XTe2l8_)}r5mI+O2_ZIg%-rtMcJi+bn zJ*l~LSE~N(4qfJ?F7E{=?AL)6e!5P8l#*1GMgN%7N)C4CHg z-{jNVs4G3U1@mi&ZGC!?)yO`Iy&ReRCg+vj$$vvh-cHF!P;jnW$)1zK(Q#oMf>aIZ zWSFXXn;v1a;JZBEG;ZV7asRGGsR>=+(=|ZnfY2Rdxwxp_=L;C*ZR97|FT+jff=J$N}Wcivo2VU>z|-?OywV19$_ zs6PJnfBVC|Gw<`pR4nF7TK)S(7JCwO%ssgib6Vj8Awt%s?FQ)50+$Lk_Ah&O6Rd2} zb-!=o7<>QIr*6cmv7hLiPU`%{d44U%6ZFXt1c$g2X~Y>LLf{700wjRK+np_8Xg~dzdXObBt`mR!KTy-h|Bbjd$Hm31>tarcF3LwPqL}ZbtZG6&6)HR}+@99b zajgP9Q>pPk8ub-wf1I2Ki3}|SovnVX1L?_|ZY?vEZNLa->;D?MaSJZ}^C({Ant?w1 z&=~4-wIw|%=T=VL4#EPE5C6O0#xw_&90v91NIN%DTqxXyt${gefs`lPri=<@Iz1g7 zCU7X)md3<f~!@GRA_*bf3F7GyZY`tQxh)2jSof(D&&z(rg}ds)J9V*H^)qarxJK z*WeeGs*X<7RVb4~>`k}1R&KkvC7{mTOhf{ytY^utTi)hL+OmaCH(>aW<(R>?j(wV2 zXY!G}`IY{J8RKv9Me~*NVCOdEcS<)gb${|Z-_s|D$aP^-Bh50DUmb-ysPvDV--v9R zP6w51+HMV+)SVb$_&^m;dwKpI>&ru^fp=3 zm!--U!F`b{@f^Ayt5*GtO)zj+mUSKT+Wu?I&{Hs?FcGQ%_Qj_2t4+ ze=;idJR^lE#56@xdrvB4R`P^amLrM;?2V{$K6S)V)ji06I%mC836{AQ|Fqs7(s zo`3)S9J31I;8QT~4v5by43FozT;@XWWT}_+UtH=V%59e100qQjLEgaTd!(qIxcg&& zAjn3St%j5gCbg^NC}^5l%jzx)PMPoXKjoUsB;F^-?m=B4q#S zLpFHJSMo|p()kXDgTA!sh&-~8ryeBbUVSll&fefGyJfnvm3($N1R%0-SFV-^b;u7! zwO=Cq9^Z7WFgFr@Zju3mIY%MbZn9ljseo|hldeH9ddW@M^v3;vT4cR(|1LSoFtPpn zKNjU4cFFJZ_hT>fW|6wz!mkuc+A`^~5KUk#_404sv5^f_AMSm8>dZ!WtI#yy z)yV6pMJ5pmeZGk;0qNnaA9v#a)-z0ef#hf7L;nouTDO(K^*7-e%LP^y%_mpl%aO_GJO7dpD7ejsYnR@t zY1r7@dO2 zbS&IU2sI)#uZA8KqQgr|VBr08ICGYD$ryRrH8Qo6Od*`$!CH?({4 zo+S-<2mx=e(jiz4Nacl3_hY&`&kk-t>^;1VV zUh+bssSdrPsFkhV>yVXBu&EwpX6R?T%p-Uon){1LiamkDAA@Hd28^#|mBq#Vx|1pw z5<#j9m}<+e`KZkd_w+d8Tj%i);!AwF-Cyf@=2TNl_ZAN@7a97wK8V~)Z&CaU%$dVT z+G&b8VHNZwR?XzQEj4ukU5ovSdlcG1oNe`sO73FRbku8sn9!oW7wW;TJ@FA^$gjkP z90y5&wJWYvhG`bm_z9+rJna0v7%2yNvTTVk>c?hjczf$25hI!~CUP{@g*hqx1rd1S zu!xC4s5fdwCfb(l;8}3@=yYx@l2w;Q&Q|Y6JuXDVP*m(vePz&K2smiKjb58AOob|O zJ!_k>TNM1ro-*P~ZwaopE-uq^e%zmgLC2HJsK3Y{QVLhimD04j^X)}<21mh!9}4y8 zFYXT)C-YEIv6h&yyvz0^_b4`vFPjfu?D#;_6wljK^U+I7GQ)YS+8i?w=pRGfmqRO< zT110bD}aQxUGR`g!So+&O6whu_o3vA&vJKgv-wkeAz@=b?x3&I8&()X(}tD)HwyC+ zGnVJ0Dt#$;b5wX&U@+ilJ$>2%SOaGVXS)f}+w1NB4AJ?O(G<_$o$|Zo&KeC51l9o6 z2*P~OrONc?ngBepso2ZZpQ3&@6=23`G+kC1kDii|Ve z?z{qH&$BQ=e}vTV8j9mv>?s0WEMLC06}IN^qc17L{c$U4n4Kn@9}-`b z7$>ZU_Zt0$Q-BSZ{V`+>uUh^6oJ%y7My#oI{IH~#>CORZsi&FT%28sE*47TPl_@^z ziryw-{`2v^*y=f3{G!XCDM2F;W@0yqLCp*OT%k!wC8?Dzr^f3{;To5q8kIi)m%-@G zcp=SO=o%Q3zpq_C3G&6eSALzle2Eioh$i(vr(cM0LB;ykxfiW!j$Qh!dG>R*Pm)wP zX1MgZjN@sV&dHj^+3D8VB~RXKpA^|C=*yvb&mLxiVB8q$aXJv7rFPb~hcKVFeaGv7 zAU5adgd*r#n*+kkNvtPWt!g_dgOK~C6G9!XjbX30500ZWfF}olH!B+c9E4Vy!jto? zDQbfO=x`^yzE5ssU%(fPB(==f=hS(dX#B|Q*hG~fa7`l~cV+pvQ3}C!-clM(+EafiTbq;q=ZS8#WcYwTHg$U? zP75TEPUDE|ec@Uc3>bA;-v!Qh903CE0wK_a67g_!Yl#!wbKGIZ0Z&!jJ2CVuf})y^ zrOm8U+T#*Kd0^1jJ&&JjAa*R-P3xBH%S&W!GH_YOEMFsOSs5koL_XYT1WAYRPFc7s z`xP&RgX8{22y?Zizok06@vaW1&13?649FaDv)Ddq9N1<7T$;(rQ|tlv#d z9cg*EN7eJxcTqh>P{OCTT>mE6%?HtYk8s|F@Og;K2TrC-i5FtGd1B-a3fg9`ZhuVJ zR>8$d7g#hva#aQ4kQOsAQ&KTwLV!gVSx=$u1gM(-Hw#d?VIEcnlHb#TZ5^Yr9(nW6 zC$t3qxc2Wk5?eMRZ`(9b&@fz3330d4wJQ^T3JPTYGwmZ_WP6|Zo!dw#2}!-aW!CnhE?j#q0vi>lB%{}84o%S9^W` z)ViU_LESc%zU6|l8U1?yN`*T#HRSMk+rMLw)(61(PeqLayPRQOTOg~a@?`EkHf)Jg)aY@me@Sgal4gHym>%z^>jFUU(zHAN`#9Y0WMkj&t6yD-!m`F*hL4W$QNj`0%GZ=?5y5FQ$R8_%4qlS znSJ~S$&NU7U35U>A1uaJ@eaSjzW(*U``O>W5P`{q)L&y6D`JhS8L3>LmCt^_OY6lV>p6y@! zU}Rb54dE9XS2XeKoooY7PIs=NuHm!(p|#HE&~fZz5m|bBu8maWI?OO#py7j9XhC2D zuN(+pb~n&+^WMe)9tX443!MDuDB`rF$}*HYCxiLz3I#qh+KlfU*HlQ=e5NYO1Fz6S zzOf7_@?#cP-H5Dmi#ImF1-BUC{--7kdcte(C_weRAfDot6UO%K+4qa-CZc?;mSegC z=39F>dJd|VvaJ)e<2XXrj{s>>sG(0Km8(hhiSF>;w}I74{DY$fP!|~lu8k2gmc*?i z9k#tP4*Lu}TeiQ?B*kTo=A+ACzHWlJsenn;;&7-2lEPYS#~!Lt#Z;E{Ol@@K3@!hp{Z0sec-&O*{8bVWw>3=C8e}E~PI5K3 znWGmtyLkSH^+q;NIKH&FP45^{SNq!(YYj^YMTD`RXk16Xs63Y#B2s{2^q8E2{KRj~ z6%;RZA01F1=ujdSTIE*0b7U9_AJaRjmUb?4F16~U4oAK?g zX?cR&lsZ&f&{9Iw+5W!Nb5dJ_?%W1Vxx}L3|338;_8cU1G&J17t;Z%Igh_= zWe-jVM6^2tL{0GAMi8~L$Ccb4_W)W6Xs4A^^iaK^$TA3Mdbp7G_Fw=k}fkGl>|(iyQq^T3b?LNA^RHp~CKG*5GGni1lX zSyH?uEr|C)GV&sA5Ca$kSHElrbppL=N;x-E>!Wm-!~x{{Q7g!qr8zB=1FJGtCN5&A zH(=skCjvp;U2<3#o(4sZf~n9%g!SIZ{dQ*^J2%(vnD&LuP|<^dHQSz`6Q0ot1)aAl z7z6ApjmK?@*Pn-7s`M8bnp{yby~x0djyGmJFK_DXHQN#;UNYD?v#=9-qf7x3_xuO6 zdH67-#*v<#H@@M!&GR>b{-t3P)lBDcK~dLS9p@y(4KrVu1{c~Qla39PswW5U!rEZH zc|CRrS0RfOu`U_$p(gsf1NDSg!X$>4DDF2O^j-b5^643Ip~kE`72(eD+{iXr35WR) zau`#7N)vy9t*P|KwQ!Pza8Bb=TuOB+`Xy2?#RzFtDm*m1u{Y-j2_>5?V@&=F-@FJ* zwMxN2qC~E3xy-l1aAPZ(%z^VW5jft#A`Srdv=CEc5miCqxB=lp=NPn3z3%nThWBa2 z@BdF!(s=O$F%-7aw>;sXZ`0XE*7QKKX5U&>8V3Md8)iP}>!DXVrA=}8A^*3wTf0t> zQ_i!mTU(cRd(QR}&f37oSo<~GUVHVAS$5-X9fWNkAe-O3snqP%V!{ye-~mM~aKyV~ zzCiE8`0nC$Dw0I{36z^Kve~2-RVU%_3*qB7b(})u3zJW=Lciwb!q>IUFKC!As|2YD z|0-bRIgPv76LoOBb-D?b6#WDn9Unju>dsw-5@_hH*0|8xq+Xh z5i>9*oR>rDUY}ylazIxDZQa(9{jnj=u5KELg-0|lkKF5OiJ|1J*6V?Pc4Gb-5~cE3 z)0boNa+57#2t!xn8J)-q@>$0|1}d~o$Q9QnQt-K zzYGx@%=Hi{cHp^MbgSq`p9a?a8U)A#W|Ih<*CXh2y{_374-j?al_qQYZfKZkN_e)+ z@Rt^p@f1`HJHCcTz%WPvN|}^$0Zt8}Yo0mGT0w%NAtjQR5h#XdFyp4q>%ztay z+2tGMhs`(&*5xBjc^^yd(D739(q1XxY$BL?f&K}tw9@7?b15{{Phh%25TOjSjcIT_ zgBu}h4gdb$`8>Ayi?Ub!wLW*q+nMy##P9tQc{5Hh5GfnvU6#Gwk}e)}t|PR=cWlO% zA$PA~4a`!mk1LC9Ms|#?eBQG&U`lLrfUzu?*%$J-zM7D-gh}U+1-(*3MABfD7`_j6 zg$ief!SN6G?)&0FQ9dLxq_x zYmlGo2`{skL*^DX+y61lnP_VDX2Oh#cr}S{ibW0<=!u@gO(}A)4>O9fdP{;S76GEj z{#t|E13Xu{Km`|2aQ%)Y`(^}l_A_zIY$2_ypbeC|?N*Ff%#T6^H!a`O6*usauy5^})-@S%Q!3-Moz;~wyYaGx zwCKbAUyli`HNyOMi=kGYS8U1YuL8?_UMcIuMxKqio&4@gnj$paC6Mo7k{7rpJ2%0?}^ zj`({q_G+x^RSrK(u)~HS21~B>*qE}Y29>DxIyN9G#H?7uTeBRVrr8<~O%?A&O5t7p7SGs;~GOk2-vzkMyt17qQMW9Bn&zBa67>^%u#>kf9oh~f>+!bc=ivci1FM$UMOm~ZaK`|R*ksh97=X0);pN41BWg}})FZD4tM`RlK&X7ePiQj0|g zA0JB>%Bv5U=Fg7&oDtSCmVmPvf3mt$M^Q4vcOIoB>~EzF^~ImnwJANSTF;tivG+B#^@2Y;P7BQGWTOAhTzax`T%m5+Zv%~q(11J=FZWq zC&4D1$2nF~8AL=q$WpKZgL3E~&!|eyL!Iwf8gX$8=!~|0lI~?zSEVZT*pyv8DS;gg zplqe2@>GkxFBj~bS0C-=zZdysn`md){U{CD5F2mmf$UG@iHP)oW}eTcFdeVcRoczU_+C%qXWvBo<4q0Vq;fI~nM&(JS$(Rn*t zUpLCotAS*87tI(J%o^21<0U*eFdk}Z`FUM?@Bv`BubRxblV0-Pl)EPJf0uMw{RlDD;hMo z8VVTNu=0`F{c=$CL3YrqBf-dauj-RGKb~_}z$L4Yyl9&4{+ZI|;}=P62A_z`i^aN0 zWEev;{XJqqx?T|zi{y>m@Q)Q{9(riGTs<3!i&tfE<+4}q>V~uCtM*EJ|7v3>mr_Ew zC+bx{W8ruBxpD2YBTJ5{oQ)eKk@tB7KNv}lK%aYRgJjki>XjN9#ssp*aW^m4pmj)J zf%Rb|8k-u;5#W!U&}egQY@6TtOoiK^aEbDix|x^-ylxG~A6o^G2d&Q-MZOk|ET8=O z6*f}yJ?wgGdncw{;YqfL!Xh3sY{X8FPYmc@@XbPaSTY~86pTPJ>%k7);=~%@w zRrF)P_^b0l^+2}aq;^`gQ3~Q}{)b-8m1vGJj3J$(J*~5mHxW9FG_SUNrDSB+s)t*R z_DOwpS&tbeX%N16^_$2CjU2a2I^ixypv3IkN;6j_Kn8U=Lz`?%+HDA`^%eDq* zH8`Kxs09DBBJM=1!I(zNNSV9kujVR7z-*&B$A2z0F@xEF`y+`o^AC30I{OI$l!qTw zfhE=&W*Csj|L9@`uV+YN#9Fiz<7Mk-MGv|@R$hjOQr%5tJk{nhfAxXmT{{H-j z>vA#g-S_M8d_JB&&rZ3%t+X^J$-mv~txco2%Gr76_8I*Pu_Ldo`}=7hw6z=fX7tAP z%l{2^^TnhmywoID8EDHQHhY_+^0gkuGi4rEP>M>qb2MbKvVNCevM*$v$Vf`$?CdW|e90m( zKO^~wFD*`=u!e>Ug{Dh9eW>)riH$(l9DUYAL)uJRSxj>T5Rc!GM+~7}+Xn6a^nDNl z!bBX@UwUvcWd^RP$%1u^c8Wu5yZs67-q-y4@b?=bCKez+Fx74H67{`z-gSmWiTfxf zTKF+{CdyF1{^Q+_ZPP&why5C?a{=$f?`6#~Sij&}xg%J4S>1mCkEPe_jAScWm|l%I zdc?UFZKXH>!6lK*W?SOhH|{-4p_hxV^ewP)KwyspyE$p^JLc-v=u!N_oUqX#NEFch z3)yR35feKN)Lu-tt*o{h=BXBMFR%1%-qtLl1krO^{Pw#y_9OGC|J$OuW)Z1-5y!F8Mup1h(%Sr1%l+!J!Fj0&`b)Bo-G-bKA_xGJ!pW4SB8uMypV+WG zChgB0I6U4B6PBk>;(MNv(zWdd{NBs-NaX7UxB5W~jWN%m z2>#NXKt;Q|jHo;&7IYb(e0Y|gb`-7MMYQm)o21k`+{oK>fV9l0fK{X+KsltACAr2K zTj@cqr1*DOhl=(-NV|FAV$M!DnUAv%m>ZS_IUgLa{TFu8%}k`;9PVtF$EWR^=h-y3 zlmT^Vo&PvDfAXnU>ujQ&2K5h!uMvOnXOpoDhAlV|5i_se_388ti`IO+dz{x zKHnL6>gC3#!+t_-@a%e);NM8ehsM6Aa5qg%2}w&?OIQ}&SJ)B%liHGvG&4yu0QCze zltSAI+S@)YmABN!HgiQA8RGEAqa)so71x&G)4gvgvc7w0$i!Oxx;$Nn2_r_#&}4y( zoU3)u`c4sirtyn{$QtNBZ>kdO1!+8PNU_t;R*?j1#9mFefLrpZf|BetENK#t26U?- zmO5|BY`NtQ&g7x(dqv%;VT&bG!Mp!T$ZK_e1=|N2coK03(EJ0MaJND(-vru%Xeni* z)?wEEx4`OLpPe*OZYDGc4E+b)SWJU3bu$n-KG{E-y{1NWZvN!24$&HiF-i%StdT2f zzA;<3uQ*Wy?tHQ+Atq7of=GAyHLD9X!tYkX&vr*3xfTi702&B@7jmO|b0R>I{I};{ z#gT8<9>A~uk7)IURP>mr^3?drj&drZ93+3eqcFc|vc{0?R|Gu^Y)0qqSC|d***55R z;mFOGYOKH}BWYyk@8pwP72Y%%2BEVR?NO4Zd@E#jKQ0dFHm*PjP8<-7ge4I{-#t;DQ$znSXls0R1H-_}8(j|l$9#t(|lU`tFf@#696{NUyL$>5!v z^1g+PpnCns+4t?vxrZGc`}zOw3l4q*YnDeHV|8_Sx8za6YP-v(4IEZ6%eaMQTL^C}C%Us_W0@b1c{-?{f`DiO=#oY%Vug5- zfAcqoPA4#HfZCOtvtPp$em=!^n$oZ`v%9E)urO8S)U}#=>0emUy>YUEqR3D&nba_a zyaQ4*D+Co8Q(fI&r`sP^=$agFYo&%*CZsh~*T@_rnXw@%q?ORg=6EuMfs9WW6LgqP z;f%3Rd?^GWBWAvO7xGost;Zu7G4V9bA&0g7zNdeqK&qnaLj>L6@7bGKMiYddMf$i*%K7mk1br=$rxSozR*uo)VUNGSB>u!wJ1$N8O#%9y=ZqGjwx`+%&0 zDm*lPvf^@_85*32jr|(l_J)UU-c0Xhfij741LGDbISs$`zjKPO%k0I*XqFodVeA~2 zJYUKOhyu2-(nw5%=yIt*Le~57>gsq~>3U9y5dY2mGiEq%vN?RI(Pra9CQqaS3`?IT3y%dMpx6M;NKCM$5E`w@L-FPE1retj@QkWujdJt5{0MNoKvy} z-vcSxvYh^^pj0VXXyY$ia>7Cx-|vcm=88jSZF5t*IrCQ7)nBf@Xg?b(P}vnI!pc#~ z<4+|})BsESA9wSstCQ7m-svX+JYq;Ly1shOKSWCmOl&}ZG=H=?KY#d30HW#&qsbWg z9OLHY^UV5>$%o9>x@M)$3EuOj4XXM6T{EgCB0-u&u0;*D?QeVAH)(Bs3qL`^@Z>k? zwYO6~b1jsQwd9~JzqXFqp0Mqe+Zwg)ishDNJ4>U#nXaIEQkyU&Y}!I$z(J ztpoW9!sa>WM!(mR*KqaIoXT-_g+NVi;vj;zNQ_g8>m5R#Zw=P>>Y(rYi0aFTz*w9O z5UIKkx```cck8xj;|MAgqu5;?bCt)UnSSNeX1O;9EOdk0`~qKTr`n|$jd2UM^qge1 zFJ3EpIcWe8!C>cJYuB+v=$$aVaE!A}+F7i^P_`8lIBJ0cOq+}sWt9Ft0Ww&#lMLEV ztPmOn3Z@&Mu)#1{RwcdY${o+9x`?UMU0OHna`-cvucn~~>(oQswd_@#-2(M}dLtTg!d zQ2L|Fcq{JdFh%X*Qrb(Bd*P7aV(r?heq;zcCC!q>`&}}E^wRIv9g)l2Lrk)c`e8~@ zcIcc^^X$7OVxRXSxTV}2rYFHP5YStM6Ou$%j#FBM*~*xC)9-5DWVXUA04?#l($UD# z*46sE_~R$BHRUp@SjKmTw0^wkKZTf?Oe5c?Uyi8l+VXfupdDW#>lv%xz2 z(eiI+!ljhwExK%=(tD(WQ{eWHIJo0!T`Vs=q~;>~76j0NLJY)TC6c#K_P>??{jI0i z(iVayPb_7eL@?i2<8v3X8j(i7_$ev3A0*eGp~^Ai#gd7>{D4=cI==%w*L@9pkDIb9 zY<_ty&@d?HB)=>C<(Ad8JqNb5TfvUcWiYqxAS(5Pvz~Rz9zs8qGK$VC^`3H2I{FVG zq5Se8)~0%i8Kci6d6VSwBD`j;S@z<^YnOoIf9;7AT}8A zHLe`5>gA*!6wd+nBa9JbOtIR<>;Ta|(Y*G^oxf8~2UAW!O@DNFy<)M%`^JWSGMp+e zKM}?SZf%CWP4L~U1i1~$r8WZ1)~uLf<71d<5zD5`UTc6wnm!bUA zFs&Mk=&unc6F=@|ZWtFYB-?(b&W_yC;wR+X=_q4}7d-A}JWVE3&@v2_RPZxO z?X^KdOI~>b98v>Lv51s0&ylx*(Pgw+_I z)JO%Pr>kI9(c;IqACgGZl;l8wUf&;2HkneT;i5x8z(%F*O}*d7XkkhNh%>!UGBZEd zaEs9k>xb$lo1c?=`%76nN0-K$k^tvP7gdCxQ5>)xN7FT9{~R6we-M~FBu~I6mo*6} z`V2A9GtotHlrH3EjK2eWTY08OSm73w+bfo&958fp>z#q<^<*95=f*#T+=~h)>ciV_ z9)-Q`*MoZ`dwS*uj%3G4+e|9X)?zF^N^RGEtJo!xAqtzwkoDJZbZXlg~OQ8R~C6~FmDU3${00clQADV8$9$y!~av!PXDLEQbsRvh> zkz`FSjt67T?Qct6O`lAHl7m!f_a>N2=)G?X7`3jIZOMnq43 zurWMkYw_k6Rx$bT;ru42@>UKBl62MJ^-(UC*Xr})5J6+-&qEka_54ElPBglA^rXmR zL$NQV-qOrbk}oVv2~Ee*GEAX=R7o-tEM5O51ScTef@ESayR?((Q02u67VDA!>ilLJ z=j$(NRRI4WiiOo`8HD70iUo_3&t0csDwQohy2A%R@(+6qom4qIZU)zzxNw2BVsUf5 zBV=6cho0~W2yMT*u1^$;L3nbj4`1+3o=`XbqdHF4d=?Dk|H0Q_KB~?%#&+=6*fsfX zQ_0mhm!5g6PuUrafOEs*SGtSJ*)0 z6pUNISVnm4?jLwIfs-b0WpT4i4Y$S}Jc}dmXryM3AXyB1NacQ*A)Ltf{Re=(bV_$# z)$b8G{$UxiCPL=bD-u-BYsQm;8?p%B-M$B@T+7R?@OG68cDU6#Tjw3arWjW;T3Qe^ zyY2`NzT!k+-l*B}%oSfEa-{EO61UJF?uk4rDF5~|^gCb@*kE)N2kzZWo?xTRtC_6b zPpJfY(on5JNO4_FbzKl6rh~LT?{g4(xK2 zi5%a<2%kCUDY4NR@9UO`&?6}`+> z*7-ycXv<;i%#csWwHnOfS zbm}9hKudJ*=30wxN}7w{AmH4T_v$r13zrNsT^{!H8*=jEFq5`GDEKK<7Cu%#$ z)uUPl=VNUNzc_O3@aytk$uKH55jF6(ttmWwEr(V1ntb)Cp((y6Ii6`6E393wI0S|4 zqL&hfZk-Ktxi)mW7&SNw8N?8S>1Zvh``u z_RD5ym%ZY&SbU1w?0S1uJ^?%@pW0g1q6}cEi$9fm4(u`3kf0o0HWP(#Xju8P5< zod*XGy6^3qMy0_4&5*-|vKp8Oi&)&v)Y79EFFPxbSDCbIyCL-(GWiN*W zzshR45{8u((2ujh7~Gix=fa1>@>WVV7)WZ)8R5VMRMSVL6(qPH?9gW|+)Kl_{3iWd z^4C2@+&UeV(3a}piMCd7ddtXL5Pg3r|S zC8Xjt8G%}2se@A7HknQOBDeMeMwnP!WRAX~FVK}NgPeIx ziC=$aEz2MTBD@pcvD$8Jy9Ifp8l0SGkf>h97i;T6=ErWUyu8L2nyG&gNM^<{@Mj89 zjr3X@kuPs|b=h?ImXn}{&nV?vfFPxLz9{{>B{5h(zoLg1rTy-c&8SvtBZ#_->}C%V zG#A0g3-V`W3s>m-0H|(Ttq4^D|1khkL1KZ2(X(E$=_Y@@X`(_zoYu70U7x@NqBB zNMa6t00y1Dd~%F`a|ib)Tc|nnhugOp-j1|TXSZLa3+MP<|A7HZ_S*6puoe0FU8wZZ zw$?o-Z#PxoG-i%?n#cr^8&%&Eml*y<^iN60R;DFE40Wh6uSZG~&cuJ|y;~+CYOzf8 zh^gQckSvufZo)P77TqSAO2x z^ZIsJd&B~7*b=>E#BV^*t$_cRka@y3(d|#52G?ZghaVnEjDj&z8Q=Bu>?JldJIi8# zF_;GEvXu-YWFZasj`-DQVl7GM9_^*Q}S0e#CI09L3 zjo&EvSiIKmKYN_}dd0;r@0-^ndz51TmfUo8nu3@GdZOJvYM73${`kyYdXy8j&v z+PD@}iA>S%)3`N21YTRed&U*uTADf&;|*d3FE&-~Z;7thS5|A4ev31IcFDR)9-@+9 zf8%S>mr2~M_H|E-z3Ms<{6|%jnkmo>$0%RENnf*?v}8;!-`4PQ3sMATP#cibdjo&v z!Cpi=uxMu1o4c^Vmff2{P(jVkuotfdCxY!%bbN1Cd3s ziIyDR?}8@{+)MA=nyao^1M6);Ddl2)ysyJ_l6Od9MS8p;x6ygVh3R(`#N%kAq3bLs zCK9yx?}0s!)QsRZD4Ous39J<3{>H|P&Bw)>1gxd(00GMZcuO%e_mqx_%LU@yo$uDw ztzq#0nYyv*NS6aU`;Y43kpf+J6%-@mk}6eoa4&0-l-ZjBFW2DIlSMGEsJG0y%C>a!+Y@RKrAnwRLb(;$jdD{uqu^6|S9<2sf9*E&8`^e_|KAGVrzQ0gy?IU+wx&>`4$zA#C((jzy{+ zW_)pB3I)Ng2KH`L_keGMYK#DKnpe6@K>8vN7V5@!W{^^>)1h zp5;2*-!7F!%_6Q74)$M@Zr)glthI#e4R!V+>$%Cnqh(Om7^SmOyP0?17~O)8o5S~r zO9@x}=E^hz>{=Z34mMu=!P4U6r5CTmza1^<1=0kD;d`i^=R?i z-M^pg@-T|X|33J~IQn+AEAqexu-GG!!sLz}fHF`W7Ch^wsU?>4A?Nq2 zE3&HEmKb#G>{t>QBfeW1S$t-#bs~3`f9U;KTYQQozG>EfnYYuvei0mCG~Bbxct7K+ zszbf(Gx^l`vzsr5_u z!AMBfugl2kKsZ8wgSc%9D<=eEV2>>u8|cV5eO;_YPK7rXa^2iz#QS9wraaw@gPRv3 z;)Z@-#V?z}43oDHG(`;BVuu=T4-x1!DRQe3n!LKxcRy(i_t_Qz!P6mI=Dq^MzB98?Pa~fjZ(CS4QmMV>xZmbco zfWB)D7abHU#FDh9OhClv`;5C-wF3mZb{R_pGK+%NvEv;I>&V1n5uZO6+Y#w6%5 zBGclEmU?Xe&d9zx?Y|{!uzqps*~7s3W6#MCDn*r@s35o*yh*29<5>wOnMyO*Dp0iv zZP14pSU6*~yn!xVa{9uUeZ1McyUyCq?pM4U-zynC`I^X3EPgz##(Oa&C08KT&)5vx4TD{E5R*?PV8)N=tr zU<_2|0s;@eR&$QEc5Z$ZeoayF{lnpYWdC~PHoywr-yLxRIRL5gGv&Xntfr^JyDm{h z?><31UcbA$=5&O^-vjuz&Ic8zd|UICfyCs>a7+E~LLKjvt+QiD?o2@F1Cd{IKWeHq zi%b#MVl}CJx~lX`!DE~>JM#-=1X@MYoh_5e&>mi=q(3e0gH zUF7wYh-S8)2XKnt8u4<0Qt$m%>&y5d^T`(Y%}p3gF?xm*3c`OKfQMFPxs?uk7{tXF z;?VJ-wjhYE!G4g(_;C45Uj)*Lcrh0<3;5Lz%@mrZkumV))q2oo@VYHBF=yV1Z?G|mVRbi*_rFRB1e4{q+FtM|Hb_0p)Bd*f&% z3*OIa+9GG*ay>3Fa6xip89Y1w89SWeJ1~NhW}wvri(VhZYh)+$ zFmq~*^GW+{m=U6niB0s^@qyAK;>85X)(bXm!}qdsEMom;ZQYFI zgfhOSo!M0|e8d0xuNK2BZ1?Y4{qK&mkY9bbH49P)-5#v>5t0q~GZ3^7<=$?|VVwf5 z&WhEtI$qvmqYW_`Nt|DTaL(o@DZcE#a0X^u4%4`+uO!d##C~anMmMDWDO{j)T_CFg z9q|g0DN$Mns6uQa3w7f+v+gpf4A4%trORkq!%CluUX-{W^NVi()aYDOGdR$cm~#~9nTrEvN8lJ^oETJZ^A^hYh^^Nuw4m4 zup9tAtv;Q9+WmQP?~do=o$#ifdzJZgyJN$@?jHdGEP#UQ5Hdk)v3~uQQ%~gHy1LWR zuN{znAl9W${|fC><(i%Q4cd-VFKHia#>Lxret(#=mg3F?mw9AZgryf$O|HrD7N6_f zyHEGzJh)&CtvD#^b(ksQ$+kQBgP%GHugfOGz|W7&-Efba`ATs@=vvui)=N))3)K+D z%h}R+mU$6DqvRGKRbY3}TR4VqCCuZhlr2hiUTkqLzu(vU$? z4ykU^gsaa17qk+IFc#i~3$}7VGSpun3xH5Kl9@!Lbk}umkMdav$HB5|Aq6489qPtF z+Sl7iHd0HiE%|(WXO8oYZvXnSoOE-%aOeKf-uC$f3ZY4%?1j4)&mgqYtgMXSD#2y1 zo?^_EGI6&f0q#J{?%R~zkdfn?sW)Di_ExD-Ez>DMkrlLu6NFTKE_J%6_iOhr0#XtX zm(EyA9eMim8Y80#02ZSg%Pt;!)Q}girV`bzAT^NfC$~YS7|YvbJZx5ymbaE1 zE`Etaldv2$RzO~p1iOp}QqaLL(Q_n%-lHf73-)cLfl9MzW^xLw1^@(@bm9XWwHrh4*DXy?3Od!Fm6|AL6*aJ9L`t}83_{VCNk8dO;rbq<$UTs`1V2ke>tW{ePDrb_~2ks z{O~sL12K$Xf?mqF%Vgp!_!GbyzFK+-UWUxPUepb~moGXc>vx}2t2S4?4uAn-#|^8k z=&ORfJ|F$mr@OD7eZqkN3=q2KkeM>wzeQxDOP_@l!;1iQnJK5eIz^RkVpZRMByMrE z<6)Oou5;5^ZG@GCq-!5bS*;!(9lky2*ZS{m zpH{kaYNP?@9sO&>_xH%&-5a|*6zdJGC$6r^aoM_XFdJC^eb#Ub)+uUweSDCRF_AK% zkp30Oq$wS9)N5CtG@+aC*XN(>SMFF26dD}S;#%yKXRWrr+l4bSB(1cl(N}TR`DfIT zlg%@g<&@ps5bCPn+fI4LWNsM&Ri1OfX!V9$a|%q*55;-q#qFOs#Ac_r@8$gEJDMpT zs!3g@?cA(}GW?UdfG2gcMa~Ydz7{-OMZh%#-iAtuogO4M(s2?d0PU1Mwr|G7Psau& z*W%t0)iRAyzB!9w032SFrzS2t|OT+FF9CBI1X+(sDYLO?jVGV2AsZFYsfk z-~-dKkO1A6suT{jf!7CX!ESNWqvuL71}2jd?})s-!udd7enC4PhKl`qpDn=g<4%vZ zwq+1pMuu6QqP#M=kBsr3+T|UEu`sd%^03DGxmXSi-$RT7avEC$<$+#!NN6O;#r6*i zY7fItM~)&JVx=kwLBDBxFE=w^G76#6u}dYEv~O!mzpeUpSXDw=eMOY@rFgWbaLko9 z+>60}tuOd2SSnHRJ3#S2SAh7t{4=o49*g;CxuCWqXo!)?G{Z7_*{|71*8j1&*GNEJ zAZH4o5i7XWxr6ok?juvV&CSW|$#1V6w_f+QKeP!Ea7{L6t0wgel2rIP(#$ZHvLsJf zWtLo;K&zIP7Kl}oK*K6M?abunf(0s*N5bx&JbIW6eDoprn$oq5ofnQzdXgz{nNMFK z*64*UH(2e`2hB-M9}s^tjWA`Bg+~|HTyJySIcuYSCPVvm{X@OTFcvJlO@I$8VLus| zV201+31D+1r{1T)FH$Xk9%3Gj>dDzvS0O!$P|2HltISdIliHOw0g_PmN_nk+!fPne z&a6NkCaeLEWsm?G-tS%5v;PxVF=1gI#tKskihmlGT<=H$wqq?~26xH76$@0&C~xCQ zio>Y4ou{}cUpz=(NTI-D0AQ51eG)Q?5yIP=sQyoG1+YHY;r}f`O?p&K(h3am70*iI0?yvi^_a`eSzvY~4o?E@s9XLstWSXAo1BbjcHFZl4R*GrKGHX1=8mpuLTwwI|f5F`=iPJBL{Qyi)#R_D45> z+3UFG>BCti6t=&2)tlwoNyXTbt!sA51>zC6FTUG#n6#CAWy%Nx5tXPkglCgo9CRg& zsgf!T$tkZQpwJjOZQ1(m5QZ!5B;@wuK*;G^ETQOJSNGJ=*}HwWbIZdVKWZt#Wk{@+ z&MC)FmJ|d6L1C0HV+!!z4Nhfcg#S8b^ z<&%<-7r(|xXYy=km_y5IR&W@rlNx6%Z$T3V-qUR-*oJJEGTai4O+LHV*PF>W`tYcH zsoA2UF&|pyJeyL%3IDZ=QGhU-g+R0onMQfgom-DEipT)PyUY-`W@w&T*O$aM_tlQ` z$&)Z)CO7LK(#qx1l<)0a5L3p}pww;L9?J@qi#ziT24BU0eNi{1-s%Ku0C&Vb~lfrNj2D5W@H+FDq!1S?TiRVV6WTnkd zt`O~FtT73&`sst7J@p3xfcv{V(l>tfVDw4HX-Dr&BRJ*x)bz}nCuL2T91N$d?dRv; z=YYi#g2Iuaj$9|mnSS}Wg+PFtr(~JcToQh+NK+V*^a%VECeGptN7lQwQCZ5Nqx!$7 zs%C)y@86uq*bCK?EVcC6{GpP1u5#=yUR!o{AS^62RI_m0pwatpIT*l9s)}(9x`^7s z7Cqz8a6K5B6GZ#s*n48OHwl4Y+l0wA5|AbaEoyMUn=Z8(Ka8P=Ke(5VXNn3C?8_7d z5f?J>u-=Q~7N!Z4>R8hjgx~B6L0n91Ff51BMt4o-WCB%;a*Lk@CIvQ@^aPkDK*Jh!E z#*?wV!WI|gN0QNgvutp?pPuAXSHO0H%z2BYe`Fb3QfqUN4?IsD9d+xj$v`l7TWeXE zL`d;V^UAFtNJ+BuU<^^l2!>_nu9K9kosTUhw;ZlH-8@$RDFPVB=j;DY_4V~Nj{d(_ zsTIVe15Y{$93=6tFBkRp94^4eDM^yi@x=VzD>VQ(w>lWbSqCGuH@*p^pTWJngbvUOYz#r8m|>6q8KDoVL8 z)@2s%OEk!m{g|HlDcA_=c$5$y0F0qylVYs1@_?WJXx%z zE4ojjQ>KG7ZVq{9uD!$uelnRXqpWsx``rjo!|h=?PG{DY+@SEd4A~6rdQMtbCWX|D ztscbfiVF3ObA~u4yRY2RE|!L;eGTpnBR$wu5{@dvV&CvzHQ&11u@4xuZX)}OU^(;e z&qdyeP_dWS{(~V{cUKFnr=7!5D0Wn$XdpcId@F!aR)F_E2pa9%eCcp6b;34ic7xuL zlGg7cjOCCa5ikC3juKzwk5@dFK)ol}^*-DEqqoiZQH;~qG7dk#$4E@tLJQsdD zokU19Z;E?dZ*GT)=169r7G`oaZjgoxdFvTCiBR$4-pMPaTx^2@mbd~XwSH)4QC2R* z);CC_(39~fsm#m(i}i8Crn1@GHKl>Mw|o;|Wi7I4gJr-Zcz8BLo%Iqj5>1NZB}iXq zE@d-Qx;r~HrtAKi;tMXCPHH7fiHqWD!t$IPxt5nXJ}?*anRlZ;5wHj}Y{`d0pUEaX zEQ;GklH_B)UlN&pkHwg>i(DMrUbyWwbGK>k@XzA7U(d|OZ+Ip%vR{-(bzSaU&dM`{ z$$UKqs2MC2w^g&SEwc(v&TxIkql|yE@R2XpHI&nbpjVc@-7&YE(Y5UhRV3*%LX&P( zt>DHP+gepQuAJ!x7(MJof^3}V$1lL)Wr{3!Zx$|(>lz{r49X(G>BXU8bqL!E|lBPxS}L8e#!;A#lNZOh-jRYj!qW@N-S)9=s2 z$r!~uJF9H7s()r~u))$KsOWgiw;JRLX31~U)!AK|{60TnXz_>VOY6LgQ3{!cN6Dww z+QE9CI{~USsLdd<1}f$y!l4aaX^tLzR%{=dy9fcD$=fQ|7ncgI6?fw|r+&twfxt_%K)!tZha$ix1$9QcCEiAuZ!oVKaRKgDgK}VTa1FUSK%g`sf`5j^fIWP z&X6Lo))y3h0YH(3q6gq!CpM-@GSJ|N{?ya9rg?ck)=h6uWr$>^kO7j3qoq$4z5mSb zCSRX;R%}`4RnE!N9Npoxyf`>kN`b@XMIe8M|0!j zmzkCcTpW~m(qUTN#7*R)Bdub=OwMak7EgJL!33QOD64Z)ODSL=NuN6>F&r$YG}Lck zD)a;?w{^G@4?~C*{sz9w{_)5j8ynUD<}tsst))a`VA@fRHw&!DBtta{pe zdMF_H!qKs?sIX{H*ctwmkbXuiMXeU$<_f8>&CE#oG*ixvy>b=o9Z?y-JLWnD8laCFTiU^w9wC~1P(_YI#Lv-Q@s_Zame zd-`l%z0VbYr1=p)qkq9K*$^pHl;yE7p5l~9ym*2;{z95k1>wC{kp<1hBy>Vw1dnuy zTK2^ZL79z^&Dh!;5TJ|&j`fZ@Yc?Q~Wnn=X7O(!xu&6U55!jaQ$0O_&x+LQBKVxIk zunrgf<6q-y%9gZ(q-Qqn_8d;_?yvuzR040|Z(#e8Nw@i9;WE*u7O?4a`n7XH*Y!-GoC{KyOX1f|0V!WKNIc$5OUg*@Qv1fpEh)jeZ%f^TEP9<^~ zRNyx>A|nEWUq5vks-+TCB+Eepv-EQ`RgRf`=!XDH8b*f<*DJKz@*(5%@WYoyFe()x zPHIvpXgYLj6*q~S#x`WR0Oth;11l5AJDuIpF*|?wqZNhO(2@*4!=XJ(4=7a1Ho6f` ziVcd)y+7LwIG28dH!s|hv@lJkp?k}{AwKgvbl}8%JR;~egj^zAnq7}gSe1eA4q~0l@O{Rz}f)-pL&X2y)~Ia&m#ueO*68y7x((zia=kO zj5X|$A6FVT!B-5h%RqdpZVOfHXdSKYUro9K&W=;rQtSjwKJ_ud{^u(sDY;=xRUyUi z#%w7crp9XiBy5JNy4xBrOHrYg6Pz3!X^fGP1rlm%=KHj|3 zk%;hMAjK!K0 z_(t1hbBv)jm#G{n16`x`F2l8s_3?J>bsw@NO(^<4k>7Syn+2X}Uq%1|;OaM}sAaeR zNWU_xEds+?GoE^<_{qaIi%+MsybC;aiq_{LSfRs~fmyaM*v5{utou%)V$&ocziKjr zV2q&DHV|aWTQPLo)v0n~>~O}27X!~EVBz}gO50z$LTF465#<~ESPW-6OG9XWTJyxK zxDVrq{v6jKV*Q1^IL)H#AHA1|ZOn}JYw`HHD&e<-xyy-zCa4#sZi!SZiwQ}sF6LaWaSPiF4!9C`j2y#0Fcm&PB&3b9rlBOwISUWof3Wu17_gC~uX|U$SzlLxv2w*HE)9(*ozJM*(VP~w z@VyfhbZ_x~@}?~5ZFy6Q9uwp%G3Zl;eKD$MH9f|XnVTA50GEpL%iC$cHQTbL36Ir& z@83#q%i(e5kMX@dgnU|@KRcVBpMP4sdju&0+?TKyVY#Q3l%8C_PK&c?6?=yxq}`wP zZ&@Qg4Vt)IR42{q{VTI}Sf`Hk&JyH#T=Q&3GfWxV>tO7e4`N3SRu&dE?}Woj==rA} zl}{f4!8n7cYoM;KzNn!%(&$L8MNF#l@KJnzcXXxkn6bf|trs;S!a`MX3J|(^R9N{7Z_P`@#zozqiP}(0uUWcapB$yddyW4@`bwBi@2>bK0C5XB z@cB<^y8OzFvjEs=`;E=4WgL? zCqc;A@4_Zx8hZ^@5N|?$IcJM8XWPMbJ1-=g&|J^)NozY}xS2rk7vA(Di#AFQq=B7j zg+w82BFB!F;&f%986mU^=m&iVraGzJy6G8-t@j*E&)0A@e)BtV*^lxIPK#Z@kn^Z3 z#B7f`ia|*z$M7(X$XleBd!oWD<&wiRI}a(Vdjkpm5!lA2FXh-2agrG$b5udwsa}-$ zVtp!11ZTv=jttr?!Yklzpn-A-%d((8A2E4%%tMqIva>qO8kKi){-R99Xb+bsQ&5}& z2{ZcjAr23CRriDL?Jwlt^8bAg^>1_V>ixri0P#-9!tf@Uy+1ANDkEnWc-#=p(mCgj zxxZ}x3YSp1HsdX-Q~}c%KV+q^F)ZUo2tnZIw6%SZd-g$`ljV+r9`pi_Bsb>n${?N% z8kkQVk&Uj0ovRP$JABz}xs#cgT%WgN6L&TXi{i9-e|qfQ4T%``^Bp)Sh;>%Ockl|Ifx^S zvcW67%q(P&N6H)y2!n@vQiCd3usxg;X7|`70~K6jh0m|^zP{{=oBi!D{~J4G|Mp8( zfCL2P-Xw6MZgPG7@bLHVP|f4wclQr~=lOl*tZmc$1}3_1b16$tDomZ;c&q9MAyO&9KXoei=7GJtu9J2zns5u&cb&On6bo$|FzJ8Dn z?6Y4O=lQUIWZ}>D^{YoaJ^M91|3-SWnRg_A%zu0UUIIr)$H>3mq_{`7E%EE^pgyxX z1qbu>fwtxR)3?-XQoMar;pcxmeFkmZ2q#&=cb1p+5c4r!dF44gJq_UQbC^w?{GP?i z3bz=!E+lAflw;t<+!0df9?9?Z=hZ= z(lf^cE8JNAez`a5C-MxLk|v0nm?jqArv%pp7{u*Y zya|z%VdP-W;{{i;$7yKK8AOcWQ|oHF=6Dm~ju36)8r-X6E^>EnEV5^Q?z;u%=vUl> z|KsS~q9iZw!F<&-9eDAt@Ml{t^ZoDY># z&Uek>u7w%|`bB@7_?^kKUspiU zG85iNq^3B+zX1zLRn_b=m?&J8svZY^cmAj=n7YcrpQR%g4!hHUF@P`p%iK4pbcl$Q zTUD*U@LlW`jrvTxMyhI%z;MS$z+wI!cs#7$Nwuq9>b0YH`?ic-N!i;N{}V}e2_>_l z=*Y6R%6;Nf(BHgkYb>hkx>u0kg7cXXoAv( zYZyTE%AcFKO(D=|>2_SH$@A3t~pm$J#=jMM;AhJ$IWwWO_JOvghim zd{)`cO@s>!*T&%WwHcmnPraf9u9h5%l>wQ2IUy_JMSh7_{iwu1rZ@~QXk35?+a&yf z7*Cm$R=$J;7#wTw{e)NmL6=O>4Wpwp2zNVNAqfDmsGDnI;dgp{9rlVIZbB{9*cFzd zf=(W8o;7PF+`XP#M$1m_G?Nq(!yybK4A?HGqM|=m)(IzhR2RQVF%-(4tWbW8c;zXr zVr!Ci0+G^cf(Ada*jkDrEyyga04`f%cXOP3P;8%=f@8^7@oh&9A@I4I1HFZoV!!3v z8Y?r`Xp{u`6sd=UwU-%Q3}g<}34$+^^#&4~WtVtPUe2pZ9(QhJIGM)%SMhU$+objN zjjFpZOJ^UZbz`GywQ3cR?IzUY(5)V6LP`DStI*zEHg+jXF<3##@Tyz}-(J3~8vcD5$Y?<_uj(IgvF z{2 zy$0Yt#lV8Wz?-+F_T0^I139Q(vRpS>&AWhEF}RS~LMULGW1SpJqUGpiC(U}#9*5Wa z!j3umyl(()k5#0 z;0`XYkH3pb@Z&eMn@7_f={@(p7Y$2>2U6tn2i+Lk!0Q%R$3QHt&n&2&-&6;VrP836 zAGB|v6a|Bb?~?xMC1>>+_G{Xq$&j} zGsEBz@V>?{`j`jCv79Hbzsgmx<{);0rZVH$gEkYV%t8> z``uku0QzySq|$$8h!N7Rx=vFxf`E|3N5oaWSA*$(2aVdKte>FUiqtbVZhj0`pcqnS ze~Jl;3&~fp=YXSSaCV;)+U6pWwLE;U;S*l(d=p?Ff|OM`92Q@?GBTfrgX0+0b9u#rpK$!_aA%R<2Gj2tlHg$NUz1InwE z1Q;mgVuJmc{Zh}$zyh_1@mAuH8%r}}l--u=bg87~9gy|;n>YjhaFM4=C^&(QZmkT3 zE!&5A7P}-bp5M-E+Q~b=38o3g3oAW9=spal?RigMWcLF!!Ti08<9=AR$2)7h8xo>S zn#UsNs(uc2R>w?cw+cNDGwl>Ua|ZjuJdakcUCU%Zu>xp%P>h}$?`^g4zYcYdJ`TQr zhNDEAHhNx!{Q12;Gt=>7KCo#kp7aZp)4}k@Ix@2e)8;U|8q(`4-F}K2X#@Ra%}gfO z4Bk6)Zs#{I&*`&wV0jZckFhZF91M5HYM4ED%Sq`^jB_L!T)(}|q1A=ThkTz>5J%J5 zl|Z$Kz0v!Yzj$PW8ap2^0nLIx5=Vae;~s<$jiJpvsMX1u;s2iI{I>fsFDo*bpfnH= zGW2y2kEDZvdbj_=1JT)@x0P+*egS_YjCQp;cym3Ew=M#k^OC20?raR%xGuGOKE|}b zX*gxbul$2nf`zb2Y)~AZWbgurY+p^k!O7H4r=)_e)xnoED&apPadTdegh%|fPT1qbahIEi*o|>j=*$SUtCB*?8GIvY2 zkNJV9D#hRLnI2QJDHmiGVkn0;fkzPEqO5V>m$Odf1Dw5io`0uj9Y|#;(XX%w_%y94 zSdxHmorwV6#BkEs0xgHQ?Y$e744JB6LFi`M_g?6|d3It`6pB?9Jnk0vID=1i4G`d39`E!~X40=)`rW?jcrbT035vX8 zaj%=+PpBBYm6MoBZmU$pUA)w2A=m0%)g|9I(c<-6`lcg>Z*G=F!UmPNx!S)HWd6 z1M_!$K|b*^?B3ea+WGB`oi%pT)@5AT$Sr(iYt*|ncQisagc@77MIC)76HU)>(|Vy1J* zzIiOq+wA8n&5o{P|<-*xe>@BbyqTU%C6#7yx_ko<+CJ z+%ZAl$=p#Pmf4w*(8_Qzl-{fjh=hYOl@B8N2>M?#TMDR^8I%p0*1bLP(PCP z^H&@uT29cbKtLbVYEhB>Uqw-VV32)QHci8_>ICEU5 zbr&LHRmQM!*gFz|c?9OQI^e2qQggF_#DeKB97|YBZBzPiBn}fUDHgGM>1YOL{Z3;J zEvesmRE(edE{A5QMRy`zV;?S>KqML%&(HfbZmbxn&dkh!sL~Io)Kllx4%qzIa4~6B zY=MMl)pBT*AJ%aNZ$5vPDvF;9j-&2Hyb6~=iC-1N4rM+LMAj)=($l}at)KjnTLx$x zo+Bfp4kXLYF~87IEV$gMpY1d0ES6N34|Kj!;l@0cNCg_j5csm|^S25?{w6wR*3rlo zIB!R{1r;o>^}j45v`75K90H%1b#KQ8ErQey9JfB9%NgelJ^--L(x$G{BPoLiI01@W zj%AxMGCj8}es06NjveT%2K4ZBITQ}9Jqt9}*3i)xqC1;?pyL(x2Y>+^-3$-z5v$EL zJu?L{NydfHm{z-319i`ajF*<*?L63@ZqX>DD8R^xTlumF@X?JG7XA2&TKdkMzaIzHWGBW^rcf zc0!JT5Qwt#XRq>g4#@n@3tQ*&N`$6p2RWvNXlvB~-z@seHfeDh1VV$(LukgK(`UI! zb^t_)Q^lqL*z`Pilwr4}^CYTyudI+93K^+M*lVV#!kUb15j-Y+oVW$S_j0 z^#T`Qle+e1E8zz)y*5wB>_PBqSZyeKRJWPy`En4*w611MPfu%uRkJAU-__?#?c4u) zC|akJjZ0_{go(FAnaIQN(d6_LaN{k&t~$SFdKZg<%&)u1mKX0v)1UG$WDoZ6bx@V) zQFnqV@%}Xs!MR+n6^@MOO=$Yg#AMbvTQh8lfZT>w>oq|iKJs411y>>C26d^MgC^?m zTOA~Yc4jKo+HJs>5s-B#OBLldM9QfE4a^GdblzC@b)-&LS>tY;O>wH6Vjhg5UzI)q=y ziXqltaYq-JCRXV<5jajnGgTe^eED7CrwaR>yh=K5oNxbepkL8)l4h_-$o^3`09|029Blw+myZ+(@4O#KobzG|V^qr@BL|`QLG+C$^AqHx?wIg~H->VG zHq~w`aay|-_o#|@%~K=_Jjw5jK!aIR{M*vtm&Hm z?w;ZV$kRO@Sa@dV3d{C2eKfRDl1-$!4FJ1hVeTJ`u+8?S4WF>p3DK~0PMRh7AQzuR zDgKed#U)XYRNO1M+yVdQ4~j@6+$$|_Di5r00ph*SBC5;xV@btbc@su(KgX^{9DBpdTYD!XE zTcw-?7fz^B)Z28z0*IfKMozbVoI50XxMt)6GU(_B*U5Vdd*8phwsIej9v-jvAHfny z#Ei@$#7CQe$_s0VVhsK;*8rcGnET2OCyR7xw>#{$C9K?pw}RC;9+Y$NQIP;IICG?- zc!_5*Edue}pQrnD;*D~-ekNxfB)&u@S?u{m)LPyylS)FYr| zzl!T&)hl;ts%mi{+1zkrd}nP?2l+UT3Rxm^e;l;;0=LF>lY$O(k|((Lvsf(ark^o} zO@u(_|A`zzOZF4?wn#<3gMcV*kPtxH6^)**oXvI{()4*5%Ii6uH$esg*Ap*OQ>Jd~ znm5z@K?s<)xIp8Lhy0n{SuWifSKFCmjjgRsiCIlLsdBESw|S&hqhc^Z7dEySz(v+F z`ye3%M0My^qa!23Bl8?GIW*KU`JzdT+?zq(ZSDAY^dAJhA4gMH<0(jGvF?xnze?x1 z#SOdnzWnLX_)ycNNwE) z!JfJjcCnr7v15+ha^)vrCtZfWj7QVW(ax#{tRER$$yNVC9f0H>G+J?#YK-pb(lrA+ zgs~d~1XB?7CL9?_p*cz2NbyhQIN?WbX+zPrqiVeCGDdp;(iPrsNVr8O3X$;WL5Csm zfQ;87vEM{~+K?5isybaaY;Jl|#0pOa0NI22%k$ffBx&5+uZr|G-^mLyuwOpjisAm= z>DRgE`0+^V#@rHEztlrnk9ZZ!y*f{PN|=})D5pI!XrsaiOZ8=)i`?reP&b|5rZ~ahrPOZKXWrXqCBCdnqp4mO z9QLQ~9L9Ytun~826nFDj$Beq*3%)mN_DrbTgjW(NH^&D|l1px*#Zw^$(=&bL%nKAU z$1WDpWluO0BZ8&N1Yg@i!r(6}+A=cOt~-CK2u00ZT^edw06@qzwZgwrzlHj2cbV;d zw=SRwB6!i)bWh=6IFN#>EE;%Qz4OO)n-{izey6Mb#rY|)($gI)W==cc@oD!}JLE$6 zhLppL0x#e_GaflNaCi$~)f*fdbocQg9K%lOwfn`?wi%EbAKuuoZ93M5Zng1>^cy(0 zkg_q8cO1}MZ2Xu%Nw3|a`z-o6EE7dKef$gR)NVm}Icx2b)~`xtulZ?ELVJv1%!!*Z zYjg2PzY3`%AJ@KK$yQ^}#tq5|Oo^b&f*+Qu>ILdFMF|Adx??0CN2>m%5)Trz;%)KQ zi}C0uEgnY~0vzpycj=VKbVJn{j}-B$NrrS{?a2$0kO@)<4N2f5r8;fmFqLIRq4Aps z4kTp)vWu)D{rWo&hjlv*Bq$!Qx+L6W8{sYSKH8U6-ID;?6*T~)F26Q_siMb#touTh zr7n!L?&awYK{=4IoS-5-T42#lcN)PjdNDyjvu&3VPNarM01vTn0J ztj}N(n0^?`1G>i3cHZs7nSt;i|FGlkf9jfcHaxO?R_>!d;}5&%*a*JdYh~()KS_~t zW-J}w9(&>SplQ1a3@YcsHp29DgRwHX?d@GG9_#jDe21h4i~ul>v-O;MarZc=mIO!u zTwEo4OBb{YkEy1VLyuC~gje+EIYxINM;l&^YE!W-@$P}%``Pp#! z`ZvZX_iml1W4$Og#X&eZLLcWPPJxAtlC*)C?`b=yau!(T{Nso{H6pAQzLwq^sRQDm zyQKFW!mns$(N;?bQV2{M6x~S|M@?m>w|Q4xPUc~1UEt}Qi=HaTABB<=x?yrUh5xJ`>LC>TWN-*jYOSMmG!+ErF0hxGHn1N|`yBOA`CH zr$VBu2|AtS<9Yc>y^g$(c%oqG(k*XuF-z+A@7GBch=@Z` zns_4}JJaVkdqy|8JEfeIL;MhaI@3uJmP45oEh=ksP@-8fztJsZkO^Vz~qM!FL9<_#$X9izsk}$9yP(^4c{w&0LeimOjs*T;$y6A^tZ|L zPhR-rqf0#T>CtgPb8Jm~l%`4d014 z(Iut?yJ~_}y8D9f&7nzGpMPLk#8$*(1z2{x7(}D_Q|0IznROXU|!)K*sOskFs&wzYoDB^Rir?kAsC{sUz zVq_DQ!m3|O=Yf%%OR4qmuHZySq!}n@Q8BrmEyMsHk~`^$J1{UgjAh@68@toEaO!yS zMYQD0d&i9ts70qTeFk=~I8e4Wu}una3x9osY65%@sy`shxM$Il|2z5>z*)63=Il(C zd41=n2&S!FQ%k`*OL0TeyI^Ll;y(jFuA?XM#c~~~nU|9@O=AitZHU{8oTtONJ4Iou zhqmw=JKMYToT0cPTOY>JM^_z_b3MOSE{}-vWFok=-c6&(N@I99;Rq+WOC9zO455WS zkvS13WM3ySfeEf51V~c>^-2{>chWd+z|(kco-%wl<3QdDZwAeRVY9H&p;=YC6xQuB7K zZR7;v;f2ewK2?L$PU*Q{Tv&QVt9mKFGC7ln7X@JuF;ml7b7zL-$zkRTYfzM z=doP=LCR1u=-0Sz(vzt#?Y_TJHLo~zK`wD$X$=iw057AVnAcIV#B?u4*RQCq$=7P= z*9p)A+6M;^v>Uo>He~WTZR6w5JM!{;aQU1B5-Y1OW#{*4V0HWrE)bqhgTv{Fn06oI zTTu!4vL$236HHWVxSlLbR<FReVDH@D zB2HYcrqJ5@i%b@n?_+@!LNEp=(X3VQtTYo6eyaR=R^{W~5hVkWGL*@QLQCMqTtF^C zXF%3%pS#GjEB~!0nu?)Yi!w?N!mN}wmUXDwS81|j*oVVd)FD|3oVvw5@N7tp?q%;8NuiiOZhD%|;%(^XaPRqzcLDBy{zn(}xs^Oz<1Q8{55-O5`B?X$rQ zNwH(1dydp&2>6RQIMS;FNon06XDY&k#bJ;4zD;x2v=w>|7#bvCQ_c-S9JbN@)C(2C@x&CTDxe+Qj8c8EhP z`L)NjbV#l5Y1G8kCiY5EO}YT6jHGP! zG6{4p8;_2+ zW#Zt?>QY2-4yGrAoiCyuv@CW!)5*f^rw!p*SHlfaZ8~ue155hd;_5Lrw=r}|ZY3Xb>&8aDD=O{W{!>Eo@5FU3Uss`np=5O&OW=}j0bTgQ z);$%v!?dT=^?ZdSReE>1zjq?Fwq)X)vU@QYVUCXM;qGeKorDyoUt^`8{d&3R2-}z* z&os=b=#;nMO-{G=c&9n5Mt)k#R2;}Q!+?EHUpsj9bfF*JtPq1b>~9^NEiB{DaC&Be zJ#_ayPjcX{3X;bW~M9?t zEbgpXG<(#38I_{@b-IqF6?Yww(}&~>;gX4x+uu&25PS;q#RD}t-)A-Vv(yhzEo1k2 zQfz*v3z&5&;|KA1v>GEJJ(xJsuHIZQsMO9&Ui?y@ehYL#wJR>VTlwn5J%(10Yk)`d zJ{SyjqGpY|c4m?WwJOzU9DeO;`sTx|>j(i`MI$Uqzm`b?*|uQ8`{g)IkWxy*V=g2j zzK~r^JG9XM0^FWkZ_L81P;pGU zBY7wqx`wW?QwOvYA_JY5SuP49(C>gL;(EAOoS`A7ZKCYk@W?hhYy${Zb{3Cs1Jrvv z;4^&zLly+awZ0Z%RKUs(SR8I>*)5)>MLd_bv9$_cL47nSP97}!8~IQR?_G7u=( zO~!M7<5!mhHnz<0GgDLF8`!tB(Ko7e*qRWY8w|f-d{Isc6&x0}GnOF=u1HbM526!( zOa-;D96is8HtY7tU0Li+oo?O`eLi=v=_Uj0N{7cE`4Sq3n>JS1+)%Ka0vpo-EZ?Iu z`7J3T^Igc@YzOvTQ&3ms>1Kf+98~JKITEmsQ&&a@E5=jMN*;{!f>Du-u3$gcetKLh#ib|tk~F!|J1g}-cRU#o>Xn6=T{dz zwy%73zZkLA7wlvh3<*mm@4&M}m9D!%ef^crL(GsSaeltXRDRg>7of7V=u&r9ZAQ3d z$M)Xm)Js4&J9Z%%0gT(aK#iQ4hiO-)6o$iF(5GrnPAlD}SoYE<`I*U1VsAW;=$#Tv zL?4kCSh+)_*K7d{q0nnXR)x>}C`G~|?*Qs1-7&I;&7J?Vv3#BPJ9ur4*Dm#1;@XiT zuS=QAU3Y@WdQT4>kD%hc$PJZ|-|t0eWrF1-(@_cORUl`KFhRy^i~wITpj-6q{C?q@ z5n`GVNm_(--Z8~=j3!^dBG2{&aT`W=$3)9Fy^u=7AkhXq!@{)wqg@r4sp|x76N>sxKnXh7vG4h7g2pA?w9QP6X8o~S^Ip|Wr-!EKa$M;QkfPzk0 z!hgL$PqK>$ai^q;Tc}#cS!gmiA3Eeic*$+a4Ipf4^w|z`CTbcmrnd>13FqU)(&86ht10iQ`x69uP;q7lN099@|w9G*~k(BbI@jt|wo9>SA;4-YrG27E(I zIu&aTrbb19*W%cVK2a7!>~DL;{WK-{r*&U7a6hO}IZOOfoEV8q+en?BB2^%XDE3MSooQ zEQJgIdCr2TZ~Q3RsN~+1?L{;q+}Ie6SAD-ne6k_TgFv`W-;0v>pyFpylyA2NArr1M znwsd0Y0HFEu&voNuZ-Pq;*nnRD;s9!V4jkB!-t`jixkJ+K;25u9sK!FNj|U*{0^y1 z8~s}Z_Uh5V(@sjqE%N#R=!_ejDprW#H=#id7VQ(hcOTOzk`K0wpe1D+;V*&bET)NQ zX1GmkIe(70u~bAU9CI;$fX^UG6170-BCrC_JzR9V1QL4I=4lrfAd}=f1owQgXdM+6 z5Iy4H#Xr6Ns-+x=qmAV9FLS}hU7L$j>otR3@n%Tli)QZlUJHw-oUXJS0%J&8E9&B# zrs(ryAR_0jeObF_fbidkBsaardf+bKS@hXnZCamVsZnzfepx5cIeWpFx>ZHF?$j&! zAY(`P1xWpq*nb(1Wy1lh*u6Lr7j(SsYJP$7O9616YK{f zEdHh@TS8WxQZo%D&X6y?v&4TyoH%)9`O1UYw#Powch@vBEfobVvD;5J)bnj+@$$A- zF+~xyb)jjMn;nisv*-y;dG6tGXQkUq?11#NXA+MB9wK+^|Jo=KBZ2FLLe9ji}HQo3M(~ZYeb5yD|3TGzHBY~+A!Zk zy4?eS?U&};=cYkxbc20-d&=UM#5F--AFb?zhKh8th&a#M5>WJ~r{@CBh0GrAL`D@? z&W;uCv$p$qMN;d-35Vei#ywRQA zerc-Um|FhXuVKcmn10geW$EC#|5*?f2Gc>2VvulO_gG(Ed8G;Qu)B1-6&_>ed)vVL zi*}`JKyMH4mnph#?uq+e;85BB(WS`L*^{ameksku$}pkj^&vd;I(q`sn9la7gNvc z-cWhPudhdGC8K^)e$<*D2|1IM?0I#-#Qal@RB$0Y+5^PLWomw0w-VY>?t1$x5JW22 zSF10vBn;#x{mq5MYfg%%nlAP(!k;jg>2ECxWpN$rKj#Fxv1q#8matI$YWHE^k#$6l z#Dvb_RWuFiNOKPS7=^xZ(M9zy%SAT}QjTWQZWhnijYwGwt9GS5 zGs~gLNnRE*?ZtXD;wnDrQdMdQ>dtYpQC`LFa5BikfKwm#6gmlWJ+Pd+v8)+?K4cUALhs!9C)sMIe38t!%&0vZXwH}SXt)o9H>Z^uQV}Hj*{Tk0F)OrTFR70 z=AP;N1LBBDiH6np8#aC{&4!ssL_OCb)BR0Afuz6S1ttxDM{tkdJ1LZy7t(_fX>|qa z1WNEdFa>NBq5`rXQdjRxIKPN5Jt}poJ$QL`(Z85^z3RCQUQwR%uBdjmfLKwgfSM45 zij#{L0I8I?zZ378Q~td?e-RkI2>iF^2RM#DhGKGqw31%RYAYC`1~aZO1cmu;wDAWI ztfsW=3QWLDk_8V~U&0?|Ynrh&y0rcz_#Ml`OwL{>Rf|-X!L{<7`~pTUU~dJU9Wk}n zpU#}WyHVvcN^p->v@4d(^}I*MDtDoB)Z2=yGAif9e5E^WT;ILCls- zke|{SUk?tM_3I_E01Izrt`3}`%~)@hp^i#Vy4}~*5^nwK-e)SAmr2?iTl*9Io&J^1s<_R$EjDNjXAHJUIjRZ5bc1d2Hu*^4 z*<`LX_uQY4f+_Oj>93yJe%Gxd-$k}Ik9}k2a({VOJ(M9{*u_uBWy>T>OazXG`Ui%U ze`x)C71*#6Fkl)jtzsI3ZnGy@#p_K%FUtUJWR$m7MiWUI zs!%Y#Qxw`j*k!{>&U9zL$)ej((iPqPtuHa{UTghs@Nc9QQ}n~hcL_=-pMqK6>KG_) z-PWNwWk*mo?5jnrg`*PcwQ`A<&1ZvFx1U3Q&O8tuyQAAK1$ANouG9Prp@@-|67xE~ zX59{7Zkpo4RbNTF0#{Fzp-O`@wI0E|~qA!5b%~C`X zit0j1#lvv%h^&Ww^3$0HvCzld8K|?%Pn!VtC&B@lZ7I!ro3f??n)8^-@+njbBKSjQ zU-Q_V{oUP?pQV(NWDO6!UUebTX>PTe)_EJ=0|Czwnn1**$N7kdzkV|qE14PCi!IPR z#XwA?3wqM6@m=EzzPC>aqqF@KjXb{OT$Xu}A1rq_gkrdPY|C|ST9=3gd3gvBWS zA`E||o&<}(lG{ArwkDTekD zBWZZJuS|RMSdN^~1$keAk{cIn3f=%jeWg9&>4c8j0U=L%JO(8saQpG);{O>T2TNvY zHq3+i(db;7T~QLP^JeqJmy%i2f?o1#Qq#|jN=d`ERMt8m(5H9|;+*77x-$KZ@>#lq zoMXpB|H9JQ_W9V>)L;;xmCKf)J{45ldI~BG;MgD5jU~$am(=SBQlN5u{``>|1n$zz z^`=qoYCCHN;0(GnI8Kx+&zz*A5?6V{g&-mA@Z8@FDV{4qgnLLwT?PA)skx{se7wYygwLaKwy{BI*^{Nvx<*0JWp1!PWbw(w1Qd^6f$$omzpZxEL2-uz}p`W(1CMKS( zhNd5aE%aJU^lR?xyD!Mc--dux0EOUDaalZbB1q`!Vy}Pj&8D9|KC8U9Qtbb7$pLj; zkXO$|cq0BV2h1x}c;dhU0hSD2L=}Wv9MVa(Swju}Z@_cf+cLAdVY&pLrHN3qT*(oS z{Y6_#Q)4qTF?3@wqve%{Del}uwB>p1#aNBN^?d@Ep#AwyoP{As`0 zL8Cd6^OCL^RKz9HVv$Eh_egMtCZ%VcbY4VxWc_FLz25&Zn2X%s+1}#xy#W8L@l2S$ z#~g+~Asg}+)X!ej$a-{DO6P{-9p|diQ3v;^tHx+5;;S_Om8WRy%k0YXcxGFRl$%Jg zj|5J*^OYC{m6ZCwe*$BN0P+u$pdMGJR8#%qZ`{J@0|)Z!`E-%HnN*$bFyU&`@UsS?QWi;7 zxGlH`mH7EB>GEq${(HywtAW5MNQE8@%n{1AWy&azOURkf3HaLDy5NP>Al1Co z*U{DeVBpYVs9f+5j2;l3I868=DS&_!3gDQ5TAg;+_YMbwtDSmBYD&1E(d`m1R!`^D z8#Y!1S;WAm`NJd4v;&?B*+IpB z{{Q#F?ovOecaD=O{Y3$7_PUg{gNTrz(-8bGUb5%zWpN}v+}1blj|K-jO z;iO^3#=RS?#h@AysIz1?zOu;n1sE=?bWO@kORR2OHuUS=m-2FQX}Twbb=T;0w0bsuasRqw?%Z`o5`kfdFZ7g#^M;4b3YJs)nrH;L{L4#p#Q}YbuYJnX{rD`GMC_ze}bYTo?13pfSm%uiT{ zkiGUrxzX--kX88M7Ve0lmOd zB4d7v-tS#UUL6CmfkMw>8#k%kC{8=#LulRojrlE|9*aIT<|}P8^O#d8oN>q|6J|R@ejX_vQ>iN_K0!_ zTULpA+{m8uY1)|2;5Kc6BO4#B%OhxWX=$Bz;1yi2QTJ$i8wjTnh#=?C#RCuw`8J2W z#>k=B09HkJQFjdNE%u$(#xH6i=``~L6*ir-TuQ6E9sPI8X`v= zP-Uf+ES=j}tO--Nkf1<5EWuQ35Ya*H)Tog$?UA3>>3MefAfrbMvWmWhu*TT&^%JQ2 zvI+teirwk3ZT6@su~<~^k^KpOb2|bfAhrOPq?=VYQWUKp{lL>xGMg5wwWo4$@5>7Q z{IS>-{-9RUn@_0-*Jr1!RW4q$Qo8+LC2YF-+5>94Wp53w{L>lw@4}LIrFd6V<2%0s zW{$D3%wkft)AyhwDDy<4SN2~dg$ocm%}WE7P-&RM0~5-~hSa!@NVK{L2Sn-TyjNF- zj~v&%A@4vm9gCj%VW1yn_*<(m%3A1>J4j45)yRJ!pWy};d8CD|Po?m``SQ!U1exoZ z29VUHI1vsn%{JYzKLB7XIhl5T4I{7*x`)hf2Q70XE8F>L9$9U^5OffEXB3Z)8q-jF zVywhCoe>Y^GjS3d84ZD*3sg4@hpjDy);aIm1BZVptppgkpfh6X!gh`IG8_*F#@9eD5Yz|g${^klYa!xL zV92PL(8x&_d2zyNvjgt4=Z=K`RF5;1a=X(d4uQz9)}vKU+|LJ40}pf;%co?r`~7sC z4suRskWR>PqW+>pdB56#Vm5N%-tXA26Yo^q0d!!thaiimqmKXkQ9f8soOxY75|ir* zP*00Ny*27)ol;6INh0q)q-D+mu33)oz@8Uw;{s`flC&t&5VJB7FIM*`G1((4ufXm{ zZOPnOv~3)wHFxk81toB77>;IW74O>}F8q%3S`^H(y2v`N=FH>)FD7AmB+6HYmfdz! z;uQ=`H$BDtssU7qUIql8(L+}fsbd@^aP{&OMpx}I`&Shh)dLjFX^^;EPjSsj05A|xe+pY` zKHkeI*WL~B!wPIh1L|{(G19LeJ_Vnl<;*szo)SDt`q9<22E;XnVE%|b1HAh{I3(`k z2Z{0V=}cZyse%CF5kg0n_V6h?_iEoeO*8BbNn5CF658mLzu#L^*opjvK51An=+_;n zSTH#L&;q^qpgZrNn9`WxjSNt`UAPgpQnVH7bHtqqE&#!L=g<9300&`3G1C)^a+H6K z!obUzoYic5=b@n-o%(#dwB!9C+C%RJwy#@Zmt}4NzgskhgUb1KtBS-Z2fn6KelTRA7ES1jY#K5u6^g-%g*9V}es@S^hbW%K6>3{a|YW-AeMfp77 zQ_|W%>~Cky4L2lsm47it*9R8od?qyD|mZ)8F5OYM}tq*Z!?687%g8<^4P|Lsm8+u(1Vz8~<~ZusX@P*rNrI~9sG7VbSF zlNXbGu@ej6Ary&`f4I*P{i{lbREW%de%a4Fq^Y){X6Ea`7)S)$N;B<@F$l=01eu?B zX7l>w!)EB&Akq*$wF+L7+go)Y_FUtXUKzvFI19u*@*o^cB9*Vn=hAUrgb;s!1Gz`O z5qL9Ji-bQI(<#xb#R2M2jG|iZHH-lnkA`iqJ|ARLv zp-+E!_J@E!JS=5>ZSx=Vp$Ewlu2x4Sy@uxk**$k1zlnzZc?S6n0L@wR*cw<$k+X6T zI0aO(r6B5%fs#1B0Fvd<{0k%>T{XslP-_By$yCCY3Rb0(xOpERZ8nqVEybxMReY9I zGHr9{M;pVO4G>8cAQ26!=Pb=ip!*>2E~3TiDWeg>$SIv3`SLrF;-#hfNHO=~FEMIK zREqqm2d^->lPEb)un=G9aowJBZMwhONR;jfp;DK=_DwoH-(#I<8UFpIA*{n7hVVe` z4eh|5F4e5O#P*gnha;R-u`Xlt;*Y3v}+bJT+Veua)7!G)Fe5V$Y2m+a$nzz3S zB*f(+DZ)?;;nRi_{^a&+xw=L?Xsi;s0cKZAx?9JthIA&vu;ik_2!{&HkR?eRoGoMw zz@8=DM7Fdudz{DGE8OA2uU&0y0%<#7lAK)5d*=r@9wg3z5RCXSv8s)&89(W^u?=n` zcjLNDij*6xVzw<=BziLQ6oBIHKBz8ff{=D^zk24mi=XP?SArK_k3;awO8FA?rrY|7 z(9Z$;t8wIw6*C6FU}D#@X-El{F6bUpiJ&Z-*Ek!Q&0GHs{z9}(FtizIBc5r?VIq(%FX=WVcO{G1jSNytpwEgdluJ#bN%6}OXlP+C&j7Zy4AWly(FbPvF zQ-q|fS~=z%e%I&M|9S9uZ1;U#@AvEVeCA^5d%le|0;SPr*Mf}K`_b;-L49Y|$YzSS zn7xvR2#(zU6hSAv)BU0+|1PogYcPzxx~4-$NGOi}S(k_ueFV6GQ0t?4z{7}b${ z&(-$Jfs&L=v5b0u)?n}(WMX!O{|{V9B2JB);AORBcvwVh`lip*-1HM1iYIuL`%8mn zh9X}rUzVPlvirT%5yb(Zqa**KH#sV-m4`^Cg{ZomI^_RxJcsCZK+lH7TB;*f5~TeF zpP3^$pp7I?#wooU9K6xd;oVumego9GyckaF((KQLK*EZ&%m zaIVh?;=RCtpV>sbYEmxwP%rclfDMnWU;txr_iOZCIZ$JeNM$G6`AV4-lCQqIIYP&k z;!f;c_KC%veEPg8l;eqo{KPL<@++t>b@JvE;|Z_A+?ZMWxDJ_tf0eEzfL?i{%@5G*`T{`ewV4S7kDc@rgK=7 zK~C_9h3F{on5%g2C({ceWBfdx$(ZCzlf&tW9)^#Ha9nT{@Ha762rH(RkjW=p20sX( zx&x)N@Tq5<#w2Ne@xXCIx_>$74Wv4z3nGS5CA0&S+d?l9_UU)|d`-SwI`F{Mv^;va z;jobcZ^v3mZ||2jMF~u7{hJ{e4-95uYHY9H+FIli_oTrUNmZ+UeHzWV%9PN*`2z)FZMNt&?ra0ohDm2)q82xJXLdXhkZ0#6+9WXFC9g7_)po<^W(AXET>JYXPb{Kb zzFTH<#i@^U1!y=IEzW+sr3$xic0)RM_LSKrVix zYL0k{NZ#R(J#beQ@$m1f2P_#o-G&>&rZ| zz590|G-)AmFnliz^PIddUMc>@b+rovH>4627_qTUWPGk4^FDE69iN-@+&>RINWq$` z*kb^-=AAl0O@9JuD2I0FAG|xad4v87VtsQVCd1AlUC()cJwEL$k~m$v)&S0lnu+xH zjg6b*HBjNXs2e+emMb^iSrNFlugRh6;g!&;oMf)XGe zs{Pq9w>cc{4bb6tpoD&W&e6iFEM25B;lu5=GmnRs3=ckMeH9+kQu;7^d*@H%WQgky zNQfjtP`rRLJs%V(^WXpazV0oZ*_4%) z^`&boFG85~<+!Tif4p-Euk2#)f;u+N{Coam)?L#Z3-?8s=M=@TC(`Qtalk)Z>ffR* zIIZ17&hGda5;fA$^erqW&2O@Slyo{@`0gbH=J)MG4ik-lk_=b)ARsgB@DxX&e)tC# zd}mBK2|z#h5m3yS@4%wv#25MaTN5Q@y}V#Abw}dFW*|kl#Ds(g^tJKNPvlF*VcprG zTn9^w>cA)>BVPsl-Yx@D-b>2 zBN>?u2(A6z6q5l;4e~#`Kmgn9kb-G)pO&U5j?i4W3&zRxcEbo}O^JDa@$<=@3CmT@ zyfjA7ABIx9ZnnEXWjRaNzHkMU-3c$sPtwEZo)#C=+1Y>oOf=mzJWOXd&+(Lz%P>+5 z4;cmm!_pr61LmiF%I(dm*%SSqLd}ZDm5{H(wfU}o{21QmfYa#j#E205M?%!qfIa&6 z^@~^zSy+ohj-&xfdcW-TljDyHZ!<#P{EKHX+d=Y?H%V24KVJ z8rFuUr29<5ipW2AhK6IJFW+``Ech}SEf+F)a-mb6SDxphc#c?xbGphU$C2mnmU}~1 zuX#Hbn_hgjOLW4JIF1^t9F}88y_OM*yeA6p2GYy60ZIolUJQc2(i0KoS55lv1R_&uy}SF7>VOLgmjvfIFo#Nc*YU4MOH)%5T>fn* zcYXrs4ATSIbo87G=(F7FX0oZaR1v+Eu-2Rk3usMHUP(TjPqN2 zkfndv_PlZiyPhK#oU{$zD+ZE2@+fm_V&tl@7oYd1kb z!Q9AZStMrde>$@(D<@%YXPdxK^k5d3n|JK-ndpuADLa?sfYJ83wBvC~d>}|%#OJck`KTO~_0xhK`)h^pBxg^jA)l$4Lk^ioq z7qUW`(k_iP^an*3v=P8mYb2Oj8{jGwm6+iPM+KIpD#i|mv$PA0b)zN+SxUr-GV3=w zCFXF>`3J8JY>i-{!NK%#w}10pA_{!LD~T60shO&ASBIB(v|TX1 zK!|#yH*r^8#yypuZi^-e>zl6wxUFDbg`>3*eVcMo$aAK@Hf{JzsmF0@vu7#}fEsxw zP@yD)Oi2lV>*c_bj!S4W>Jk7XOSH@INJ-l+1dNrp=2}U2=%xh0J!H-oboHjCRTBIxXknS4e$mZ57r(bBT6xFOQ&bLNBPDZgt+4+c#s7woTCva ziT&j+*f&2ne*OpS8)@8}uDLQ*^AvHNv)|~M7#0}^iYkTHBWuNh9F{aP0pX>(8+nck zobbSW%*DRK7Z7qjX-z;@zrKC8!162Ldr>O z7G>yV1dPm?tG^;IIrocF*BbGha>xnw72TU}tD+eCtxp<;uLosRBD^9=NFkxRTFwlq z%&hf!Md~HLztMP1i5jetkz`ZSgT4&z4{)P?F>qpTg~{s`+uo?8it}9~NsYP@oP)lf zNa=WF1zZ37vk(@!K760cEgw1iZ~XzFPeq4((Ld>%QfH;aeCsWTa`DQT_J5NvpE?G1`j#r1U(Dp}l~NG@ad$r* zwPB@3G?ei(_P-90m13~N!h1QtS3L9V%IFiHTmOA>kLKpCEOQ$lpN$FI{`XzaeaGH; zd3~t_kjd38VM5j{V67bt_a@2qK9t+#g;VJ6S?SbE9kf9E!o=mRXs#eab3SfDujAi$ zEywbyL^-#e?bVpKqc<(=2(F&h%$mdJv6xQKvOb|!8cFn-*R$@*-r;B!bVRQI`}He5 z;E{K(?5vzk2{2XrYP*j$L{1JZeGZ|K-(TalB_roV61%3-JQq_4xd}qqC^Pxyrmzz! z0B5tm_7WB~LRpudh-e?mj+c-7cta~;_z7JAv`S(2G$+C!G6P3KFvQ z(vAH$L{0}zZ_rruPaaRc?>i>K!9)P>3N9RJs+0_a6baeOb&-%@z@Nx#Q-sqhwLo;qa0tJrWs+n?2-!7}>z zBok7?9Cd-2$3hU8Hn*3WZ+4w2DoT0yA8!MTiA6XYMP1DWak)#+yqy_=B%~S@tBwYL zELKXPJ}rr^&xyCk`q8XeFDeOR4Jl~V!d8Mq`4qRR&bd4TdLt{` zBDtjJ9^+5`_xGo+rpJ#{cR5DTa~mJSX%Moq2)02OWq z9rqH$V*UI(KR-V^-a>%}r5=*m%$7l)mQQXB$*|-MA{M{MR6}|tLobJyhgV~^qosY* zI;9_3dYq|B!|?MMi7}rz&C!UNNl)EDgbs!)RW!A67AP(98Ri$;XlT;ixFwAy|GH@? z?d8yQQ@R8S>N|I6&?_O`5^Dj6dHK<}!~c1z6T5i$BnkBpGXkl!Z@zWOwz3MGlC*7D zh8YyJL~c@%n3a#}dZZ|bixNtE(o#{Awj#Y-ngjiKoF^7*A=@lz7mvvM6Q5Rg#*mC) zezIAt7%QV^l10nkkD@tF<{V1;8_DT5?|rvfy4ZY^O~k{Hb!67&=JcO%5#ds1uG67E;t8^7ogFSbm0|Ev3{PPo%a8iV} zRjeNj#9b4Elk~L|imTOc8Sv`mN1eUpng`9CD&Rk3$|q0R=Rc@boUGmbCh7q?<3U2j^%z5=$vs9Jsl1H7q=!Tea@4dqVs4%h_~z%48Xd8z zU%&iXK55>ea*{86qqK%}{dzCCKQk=Q(&ij~ygo%YTzDzGZDS_S5nSK3+`AF5`NdzI zn=`BuTl53!Ogop|^)`6WaA*$HT6boa!Kd1>%B?MYefhscS!?uOSXY@X^2~498*;K3 z^SkDjL<4IH5hLvigSd_D>BPj%mf~f{5l8cC_5SFNRi@0!e=q0b)=M*<;48z%WqF5v zJpFfT{?X&erC-DS#b5;j&H!lHm89WoiS$l)ZAF30 z*x>T=^5CPq^o2%1R>{*l8;5AAeDMF;0dw;>%5#L*=pWgNVb>80q-E)9hzNA=&x&W8 zTLd7Jw6|@g6TvY_CLsd~cbjQxVSz7#;l%hPfWFUd=}rEN`$=4?Es`YAEa8^fRNl^X z{i-y1^?mT;3fGecO5;ueB<)b{L2rH$-nn1N6m-BU5NdR3T2@#p%3t@g@})Nx*h@hkuT^+>FN|qp zlW-^DuaeW1`GRGa9V^b6XC9W8-QLH8<|K~Kh8jwF>E>yOz13F*EFicC#g}o`V9o&i zfnG`IW1P11AbthL3%4qw#^ZpF`H#a9J70VD6)xZCcwGDtUO!CFq&$Ms5B`xXYcMlM zYuPcS;6>@Cu}PGL&_pLoTxPtjEZ@6Zr#*#0y~~jRI0{KK-_qXE`oapS0nW2d7(>|yM$fq6^4T+`nSP`*y$TMz zN)?w77ig&O;;g+z%1l&T-Gy^VJ_FeYbbJ55j_z%1z*(MB0|K-#Pg0pbo!x)p!L%es z{O-L7^(i)U^!EwEl5k?zpK;o1^p8_s`RNd$EiOLbD53h#Xn((@^0Kxq?4_OQ+^u9Z z!-=5qazVS+x{iBlc9b8k=FnZqr4X&mZP6eDo^bn+Zydrr$&?}aRC)6%c2JD$@J1y= z%|baN4f}Y%8|+Hmr}oh8zh6C%7|S~_Cr1CQG(HZ+Fu@q)tm$CB7xu8z<)Z-L#RR!?=ccLL(~sI^6VQO4>-t21|WK zd8}7(C1AWk$N{dB3c3U$q09(9EJN5gt$QOEnCJ%ISA{Y)AN(r(d7KU&qyFA#mrF>l zFg?*KVYeX|`R$wNw|#B^D~zHM6k5?J{~J`Y^k6NaCP~!J5L+ zXOho!%mUNHkw|ahQr=g|g%v_7RFXvY`&xAcB{SI)3M25Z*?#%+n8Y*^-G9WuS9%ig z4=mfFKA!$9D=bm-LE7$GEzfi4Zlf9hJq($`!4K#REqw?cU*g!>PA^P?d7^v!XE3&l ztt8~vF>yR!+PIsui=LMW+ke+tn<8^J$!5uLd?MN{v&Wf0J0}JXG}+rCDE0Qhk&zMG$8AX{u+RpJkLB>F;HY>f zIoVm5JmUiK03TL>vvog9i^Lvr%(4ZqDHr!;*T=+Bxs{!bfR0l1cS_8v+)f~q?&@)Ix3=b%d zdJIiVxPl5zZSj(#O^F?RpV4kb!eNK(;!pbHYndA0Cn1KTf25$WeOu8xJ6j#wRdUg5 z+lya4ooLdNp!ioHdWQLF4$u+5dTtK&$81d{jwE*VHMZD34oDnM-g^OQhWL-Dn2 zW~*xpJZ@G@c4q#7p*7tBE~I$6wjm>yc$I)fes`(yqs8;_@YZ`j^tPE1YIgJL5HYXs^J%wz~2S*cBs2I#0Ubv*lsgy@-UlFc$s_)TEm?GvG zd4I2B_~hTMG$Po*`IBn!qr>4DL|Iz^7A2tqTZ_nLbK#uRb&6SGIa%w1nHnloaY)p& zEPnT4*vo?C2K;9GL9CN6yz~Ht^XXbwUlPnT7A1kC(;dp|c#LARJCmHG`kvdnWIibq z=S@@Lyofuef~3|n@%}~js6B{LP%M0llk!saF@8e-Iy)&nw)}_XtLHe9<&Y=7i>F-R z+t&D2%j#V5v`i)`iGF}!tt!fg=2kQ!m7U|>OT)lv=|QjE7=2=MxO!uyB_L*t8#54` zljVG<4$vtOkm4}nK=-p}e8!_vc5b{m#>h46%Bl?WcVI_$DCkogt8Yt5X3|i(3foio+=X~#*4gwy@N7e z^P=dM%h}n`3s~q3@{=K?D*D)aGYfufY1x$_^Rm)dF_mp^F zM2QzSv#`HXWPH~p=$sW&v4N%E4z#zc9h+N;BA~O0FgN<3?=|v2x^4-{DN9*Kqy$Q* zuPiP?a0!$2l$8mKQfyz>TcL7~h#DzduSubl00OQ2l2(y8CLH9FG+>gn(aQZ8>m?UL zDfRbii})^H^HT6BIQ}J94{+Zqdq206d6)wzk7^+jb){dnchNX$m>{rmSfw`>S@{GF{Ipv|W->%-s> zG=feXvz03eQ{d&zvCJvXF_%yOPYvXE3GQ+4{AsKL#z{EQBq^}X?Z`|0R2J!A*1>X* z{WGEZ1#z(yAQ9B?T$%81$O9r58y4UcWo;&I2Ie}HKV3*!wT2V1=!LEY)QpM*5eX+};<&ux0lr|f=4v4i&~ zK9I}Zik2xMKSMw;C*P6s<>BGw`&sT8xU-QP^>OPr4L^^MFIJDEH@|K4l|PPTflFW4 z>@CCSjkT$nKF~Q}o8_FJ7ZN$!+`h~XFj(oglbUj2;UaV&>ZK8S2`f}zHehb{ z2!}zRkW7hVJ9AF&MUqNTMHO)>%3A%c>VVe_M$@jHizR2HMdBu-H^&<%qwhO`|1*QH zq|VDuZLb{AtkNAzq3?mW)+r1aRpYAa?fb>Dzi3jg)ez2{+HS zQ@pzz-sV=-wEj_Ug|0-irb)!sLq+127s;8{EQC=X^@Ar}+4!%1mfG$(gnT@mEp3M# zL~AI_KLhCtLLc?>6V?~HlPu(>f8NrK z;we~9Lk5uvR(;OpDaiE-F%{LisQvu(6NAM^`1aTZU1s$8)8*lTfq_$PmY{9G2U8G@ zt?`x5TOIUkTckBI^#jO00>s^9_0Vf*dM2BV7VzeX{CK^x~t(|Ne2#p6?d78(U^%|MAH?K^!8| z=$X=fO6oY|_X;GHO%ysxVlPt~tgai`;ok7lXTIY7!~QjE~NRg8RMV`1pa;KII|yIQj98yEwe{@TOc6l0Pm3R$46H(oVny zP*Cgw20n))S!n-WIwC6S56R5RAAnK@%PDWOo_+Y;Dgji%jl%?jnGIKjlk;f>;|*=jz`VFql896>u58_j6d0ixbi7e`n5a1GeMBT+AA4^{hOGc@F+5+?T{4 zHHL_Q;qE}hqad571FVV+3*^@P#&W9_cuC}Rd*J+x#c(RM4WjZPEhbhMk;?EQ#Hr76 z5A4qATOVO}CNCWaX6v+@AuX3d>h3IXNrCcI$+`CT_Cakv5_x!@JE{ie@K(wV+mrzrW^R@8hT}@S5&`nQMc}97vW$82M`Q zHwpo_^L=FnJnmWbYsJg$c*-M;=rf&SE(qRi+90=92Du|ulf~XVN~vx*p>n`8h+dz+ zxwA5uheI*ern*|d<{J-YHq`4W3Z2tarb+!L{HS1&G5K$BC*}Dz0QkFSK8X{9p-j!f zgB;QkoLeG_d>Ed%B&^ZVH%(4nFjV4|eD4#0FM_qGSdQ)pDd_T*=CqQJQLm2Bxy!ki z-j)OkhVa0dKQ+wuKWW(L8yqaJ>R_UPg$Lw#SortY8p+x#)Z~gn8)Q!!WDftZ&Xz1E z59Cb06c9V^Xi)q@{p4dW!C!RtO4QKNa4O%aBuKL)a2qA|(P^ZSi5|*c6!%KHGn`hw=1Xpcf05hn0(s!7F2 zm70KZRy(f7|OQBThTI3w_VtWC_+$CE_>nP=q&L^?3u-1zeaZE zxm9ja=U|p_oYZc=lND{mkX10TnG9?HVuwXBo7t-I5Z^w@ z%V%ENL{#GC^>jX%pFdYbZhrBxpnhU*HPYkCxt~<1*grQccUEr2=xxKFshe-mcMx=k z$&ZzU#P$pbZnWEbe(@>QiTg;; z4>{a*3QHrK6L!f%Sn4-h_x3o+?G>k2oc3qcg(LYhNfIej}N0 zXP{96A>b)=BsHy;&Ri@r?aO|@I=KUrhe*KhBx5Ush>-$pmwQn1!!B)qZpTAMjq@ z0=L@&qh>5nF_K~C$~!Z99qWYk#|18vt@r1!3CrO^@}zg0T{ifIZ!=(3KmAiu#k0)l zN2`D25&Gvj5$e(MdU3^(iuyjZ2Ft#5gRvhHAS~fcbyFTV486M_iv|q7yzT|+CJm30y|9IBe z7=%?nchQyR;5ahkI{CJ6CTZjg%F!8)ywv?uJGyBzNdI?!FqS*)*V82QK$%iOpS``8 zH;4%)H;ddXt@h|;x~@+_$>i4TDj+}X{QJI73=cF5%gz27P|E5As4uq^<1IDK?M2lx z8DNo3RpMa2QNmK4f<#T>bn>mGzxRM6;D!35)xozQyMCyUN2OcedP(dqxppoQ?-pc} zbP4aBzw1eZe^pf#t?uB#vBb?S{S-=}1J>J%N)}Kn7R&NJ5izc9eeNH3rb??}3=F$E zVsD`Si*f?%>RDc~)07M!hNk9POtKZSR8So?QHz z%T;xh&2%=gfqC1c*XbQZgg>-!ig!NUbh9jhUgmH{DZBGQ*INsu9d>hErZYXrnmUiW z@gsthKa5qoWXb6DDQ6p`9?4ijGkzXBVW!j#N%TFH!y-u@OU%`%4z~bJEln(PTAPXh zaGoc0O=^*()1w?9F_KmsK%>rEsC97-c&{{PfRM>%x#b}uM!r-j(5et+l)Vp7ZcIO( zrmgzXKM8hW)k^uqjVdw_q>ETfjY|*HFtQIyl-1F`%6x!=oi3qvU+~#$*O5adDHo)e zrs>1s%iu^CwR&(8PrdjA!tmk?@BLWH(n{>A;WoE(*PcAk)pPB-c5g{4<+ao0_qd(V z>sdIVACO0#%xp!`xbswgp5G!7vBTecMkCIqvZijPK_@@ngH=BRE!}eZ z8>+dOPS!J_r(UmQdBWzJ+kcKR^iL1jB|r5%@XYNxc+r}?nVam`nOe@Hyly=FRAqOD zHturjiIWbNrJl)X$GJoPrqi1(V=ZTQ)|Xi&L)-lT+~wXDx$M5P{+Jjx9`Zk(8*OgO zuKP@P6D;r)6XqvL%eW;mboQ9!UQ^RBw_?gKLu-_`Lbst7Dxq@0X zTH2$70vD+Ef;Be}UMA{d(Rh>8#H|IO$Q=K(;_)dYN-gdUa-HGs>ULc(uiZa`tWb<# z{nQsrBVQriXg%^C@FnVND)FO%l%fMmhF%&9nkkDooaPY-LjbXMw!u-?!KLf8CE)wP z=;Nu?@_1#l?R|4?m^-{6U<6CO%3P^u6v>zf(#ikv3hrJ5+oD6vrbe?+BRSbd%J;5;G)aM_Mr)i~rt-l3n%k0lc))x*j5whU0&M*+Xs(ky z@9>5myGt+8f^&~{myyBJisnC-fdf}|=Di4a|660aq>ib>V!O8UDy0&pKC$vZdDQi~ z)|tr2ovqcF_KwMvUY7O{Q(D2)rq8Lo9*Zsken>a~hGb_YFdd0$}*h) zmdA0ct^`sK6c6*V?`#IamfyaU)+`XxIY#~aR)l$qKq1#=8Wg0qBu2dvg;Q492HOr< z=_UN18ZJM~WzhW(Wo84q3Yybgc^}1G5`^_={{jA*u2Xk&&u z-tx5xjDZPCYSgZ0HOQx^U_Cs>Q z=fS6GMesI831M$yROFIuFAeilw}iVk9>DCZOvbnqwm^zJW``Ri$u9TCc_v8^;Kph~ zn6=`Q4QT7>(A*>=>?JG@IVqdSufVh%K=x!~1QV8yNY7Q|7nd6B<(+7c9?>2QZ(AR( ziVltlaeo+8i7cs)gUX;F1n2Qa{ZCamxQv-B?Hf;gzrL;~vS0j)=#WJ~G$(kXh*vNg*S7w`knAHnD?EE#uou zS$uK0yh?6;4T^y)3odq0NA7?4Ub^(S+h@s=34i`bVG42Jj&eb*!N33gR}ZHE-Mr}q$s>pA<8?Aa43#;Re-eg`V!4C!-}(mo4{DjX zY;OY13|NfiaFnU!pj!5ssBkY*SZL_Sxz*8Y>_ys0xrbzKLm*RB;y}+gvKOgj`Xbm! za>w?p^`mm((vy-jOAY9Gc4w%;cS`FL>HBA=epzFfeoAj{slW7a1Lc3uGax7!D2`^Q zJW4Wt0e_}V$s_vhmNpe?E65XpyhKRLUWM4SCZPm41SgqZlOF0zoj|7^d4Li?OhkCy zq6N1A`CUXRYRQ@vT6|Oo9MT?4zfkXA35r<$HX7m1Z;bUP+hf2uI-fmIq`pgN;EpNA zSbn!kIi1ay9mHcmq)LWm07LEF3+&ut|2&4{8@kvoIZhaa>7fuwRc`J7fF^6~&+SJ+ zZ*d%PpaApceJj>RWP_R`0-Wmpj6UTO`kFu7%_(#+s_n`3da3{89A(-rLk!}-$WsFV z%zaqZdWYBxxaqlb3K4=4T4ZtDDJMas^8bC^Q?2_|q>lhgkk|IDcgvVZzD ze{?;8Wp?NYH5ts+XT1B6RSy4qFF?42RrHa+B6muW4S5Zi`qs3Q%ec*dd=SvU64-tv zy%hmB2}<1r(TTZu6No-a@&CYaAR)LaBN_F1a{P>;Az9G1@n>_iG-o{W`l<2dwhsdQ z1Ph|m41(m~dg^%m(sYT|>Z(ZjG8Qh8x%PeKHjy5+yj*_&&x%OLPS@k@wXLqM%FOF2 zENmnAM(Q$kLPA<7bwuHZLH7)!b7p{LzR*>{^sL}e!I`%9)>};N38XUpfO6LKPjRUI zqnkNxWpk_6RPo5^I0Q_|SV*BQVs-Y{uTg`DhzMK0uD7QMDK#)jI|CWU^g?&T?SAat z;=U#ty*s-Y&%ba1d)Ps3GO6KDD;--J{bQ`52DkJn7pef312| z2t|Bf1gnuntoqNV4L?u*=@eUS`5*M_mLWfq&Yozg^dDmoZ46-%N7NkTi=eV}tg3%c z*p<^-#+-C09U8w5zD7bk000WoW&Bt|&leT>GAHr}NChe3k&}7u+PhE@|I0yu-*?K| zZu$^&*0Kh1K{eyYKn0S1=J#zBgN1hi#{%C9rTEX+^>F2q5ONr|OP*Ad5@4t;Z)9QY zL>soA4!av7ZOaVj`Bw3iZ1M&Zt8>&bJ1a+(UqO2A;ruN_hB*B_syoar>6*;3=ngWQ z#um(BG4y@u231ejuUa9;C&t?%*URP-|Ewr-fIbNA{IILmw)Y4^iVC~u>uf#kP;N}1 zPtHxYZX1KCUzm5k?~oBm)JRcLRMS#E_Rq@Qt$fl;h&OQFu?2u0ZQX%7#^_V0f4)%(yu8w<- zD~CD_ynw-BlMyzx+njgQ?K(A`%Ekd4`(Pd03@bXLOa+adO*d=?5pX&qc6NF@In9%j z`w7>_m`)?1kBK%&OC;R^dPDI-0tI#36oB*&L+gyo1m%brD&JdcHB|?m?#iodJ>cQUgX!D~ChUp0Hifw)IkvECl zDOMatxu**9Z;bgOLW+U>2csb1k zVlZw6dhKD0-)1}`Ks>wR9sL^%|3RP#0zh&b54VkF0lwcWt0;1Af2g$a^J4#^yTC6G zq;td7lX8AQenU0n?ar2!y)AbpvKu?m_&dw9VE>(Kxs~|Mh7^V3Xh}9VBdyl6#|8a& zpAm^w%C4Z_z;)n605s8CKsifZ0;L|fS{-v%cJ~2gs!jvq^W1~g77$}YkJrqBg)1o- zQr+WN@(btXTwO8&-Ee>CbJ;INcMXBKB%lX5SxdWk&~6RDG`N1%xV2$ zmyfUYD-sv96{qxV(7sgd#t63aG|8dXlSd(_vh}158s|l0!xX8nxgPSzvAr^C9-69D zanCeN`ZLMt@$vDWw*)MIY%otfh*LfoodPkP9-b|izMDPcAz753xaDD(GO-D=)%+ey zhVj+&SCqTSmcqz8z-jFY%&2FZjy{`ScrhmD`DQfRrS=OS1)-D}yI(-T5|jf(c;jlM zl`Qyql`OrmNF3hl@AvOLD?x`?i<_WR0d`X6w`$se5r&dmJYF#Z7Au6&nWd~D1wIai zctDhcFBV7&4T))-oTH}w6Ey~dF5-mKAq*vJi2fn00I~sRAq{ArAw$w-qM7EZIzr_c z?Bvt7*wSG?8elM>OO~EJ6EEl)#)aPElMg+Mf&V)c;@)mmvR|~OPB+2c-T_M`i@uBS zr7Z#4zfed7;})oGJAs9=5i0&Kg3gAX<5%aSaWL~kUL@mANt5K7pSgj#p7$71(RZ`s zi^yNABNTagaD zu$w>mu1bC+}UnX`XhIxnJWmHBeba|AT$)Z@HYQ z<>~5+@GmhL_P)YN2zq92c^XEF28!isPlb}w%VGR5vAC09)F=nA1w|C1@<_v$<&gc+6A62 zx_LUjp9hF9DQRn0wU_jx4WkTYZR1{yy9t(qHU#4bfDnI=Q}*b=ar>*H>+iHrut?HT5D6G)~3t5G(Ox!{JeZHLjk=ar^Fe{Um!6ZN6S7En6;(HG5WK;GW$f$HS&j%U|o-=vAH^RBG^7wpf)x!4I1E6 zJZy_oBls%}DC4~~H5)){wft!a6Ho9QP|#HNaWaa&!iu8v?Gg{? zDgR6U{qM*#9C|JLQ~;jlO1o->N`Pl%+ea|}?SBHc4$`!EPfNw~(ifOIobGMBuLe&M zB?C$#c>S8X*&6ea2R@fC@g?u*n^+j86sY*#YnTKaJn(4#m%&g?l4 z!cGRiS0A5=5$->sK@?obPIY=%w31CYsY|h0XcggU3CXdq!FjcYOq@b` zk2G}od6P;dypkT}k<$m{bw=G>t`p9A2`XxOOmU8JIujK{1ON0Si`h77G%2RPFC&NR z%dBly6-hh%uTB{L?EdvZpL;~OLiYtM3b*UxIeRQR0F0XwSom%nk15>4`7R=z75gwv z!gmB}@FyHn%ea|R$6g3!T`Nqs>{8&85D*7s>#dh6sf9QME! z+V@{NjMuk_RHYq#o;XOQ`x9lTQp+tW|Kcn46)_C_X*5M9s6^T6Q0ZE)$iTLd=U*jT zU8Ry&{8ygoE*PXT{m#8|f0Skp4kgcvxen#8a1S1kOO=beqK{E;rarFw_wjsxyWEDG zxwog)?)zzqB~Eq&=)wnA!2Te>qSQmk zj4*z%Y|1@GFP16(D^09f^7^HtagHjlXRZvk|(HjDpk z%zk2(Fh7lrwT#ck#+gjwX;{t2E06t@k>`$qbtZrrXh%zKh|Ha(q;6K?Nf%k#;H*_` z#T^!?Ns!F2SI!DV<24-1w-0vw^PUs|@ve!<*&rm!=V|;N)|WK@J_``bC$ygl|%`GVnNB- z(6961scf8@wNa9ISEQj#+?t+(e2;n<{eMMdRXRC6uXu^sou(?AqPw{@wLQDN_^Xy+ z!0S=5)P4!VD}OrMWl1=bM0$sLsa*gie-@3HtB1;me}R830UeL9+Y5m404{qs4G*X2 zV#)($_)FPhUeykVK}DF|SLZhQ z`SbM#0&=MD0-6&h%)w3ieGymnPC`8SMu9Yq?%D;B&{;pxoRe9X?67Ec%cPnEK*sf5 zYA4k2a8rj;QSw?g;rUD$$h!HDiFqVjV2#-|?~SqEG%K$9ZWjSjXRfDJlj%fr zENoKS_3U4h$*S;4eSaZ%n*6$-;p03b!HIne-ZKUTA~)HGVik=rZXfcaSSSgv~Y7U(^0r&}=-|)@%DIcIi zUI?{bb|9kU8{=GZvP4o4wrJ?iXNC$}Qo4r|Z9|tq&5Sfi4RGtY z&6E=cZ_jOyohKPh?ad%3xm$62;lbqouiH1P8Mty{s?MoKSyCoGXOz(adLh%>U+QIh z!E0w{K+f{e>0AFJdkPnl=Q(G0<`;*{4{8;}Y%Pz-O+-5dHJA}yJDjUVE3MIvgbZyy zM5mY+h`A%9rdvZsY%B8^Q_%@dC$c>G!f(NxlK7<2%HG?NV*G&ai``v%NqQ%kTW!`&w$rF=ejem>H`tAE-)I!a*02QMdPp?` zz~)W}IAgQ3?!h&dg!APRUj3KHcI>LoE&eIev9$#f_p-9Gab)@j^*Z9Lb}ITD4I~lr z?t{vmbLPi2Gh~kpQSoc_8}Uc)-&~t+Qv*hOC-zX|NO)V=@~`2+!4%IM?cz;En~CEK zH@{qK^cHR+x6d_hKVu8e%cU%dxrE$O zZp|eTX)ZA&X1R>ymV3EO2uUX7QZD->t@apL5>t*X#N0-EPT>3=)=0 z_r+qk5Kk_;u!l9Xb4uKfv?KILFk_(@BN|PiGl)u>tKFO1>a;0}x55ZH15iE*obzqe zmQ|-oMnCKHAWJx^6~BSz9S@)LgYRS{W|9;qj0aMZo|yCOeo;nIz-(hMbhm4NWw7aY zT_ed3(;*SyK}{~CO7O8*IHXNqKUZ!4D=VE37e1SNlXEmf&f4x`y7DJ0`NEe#l2ncA&haKI11U2ec zVr&76MEvTQmr6^U|6|Lw=-s`0-$fE8?<)*bE!VbNT!rEpXoK-L85u<-^YvbNRyK(I zA4=VBHu4st5f|m#TTG=YM?E)Bhx+zH|PA=#aA3@Io6*vJ>b5h}`~ zVk)fWSKkh~%gI+qPnOJI8n2nX|FzT3J?iPa(b1KyrRDCB(7^X~nrEN=mOOzzkXfCl z5&!(Fl854o4x(nE_Lj{+EK-l-jV153SSe!KvAX`*3Yi>!y0i}TxR?`d=UZP;L>Ss& z5hRPoRFMb$Pst*ebWZ5#;`3}sm`ODR0OOa@VbO5tSIkG-(-Nd4j<-Zx?r<9;V~Q2~ z8>Vb&D=zU3AA85ug5zZby)bd2L=;Xs<3Czil`@J6q#vN!b-^sF!UK@*;I@QE{l(Tr zw+80hFHKCggC+@r38Z$a*9>mOflT)^ym7|^>AWxK!w4+Y*MK2~r}?Td0Yj64d=M(! z-ndAsj6t+YP~2$};wR_5Df16=ldBl-iWCOLAX@hpxXHghV$EI=ZIY3vtOpsx0{HRO zFX&FhG=S(z1TO~3=2f~0HQ|n6<|mi$7UP@8R##tSP*t^l?QX9(?SgD%&2Ey#OiUsF z3*{2GIJl$)=vrM|p{LPNSrXn`2?QbdM4-mBrP`H20g2={1yZtH;bFs#UKs#5Rh_As zwWezqIT#&X2Zk=kwKZ3?luXJiOP0G)UFL{8Y=LXTU>>Sok7yX82DRxE~2O?jbEw) zZ9DH%0;cVS3}g0YKJY|DF{@@H`JjP9?XzlUk4BG-j0D*VEvkd-_!aVR9*0CnCYF2Ee~9D|PJ?{E2Yiz^FnNT2cIKdbdQ&@=iScX zmzM3((O{1X6upLs41KYvm%Df9v$dQ3^WZaV21#`}zG}yPPG2Ban6izV#bE#5G~ZFH znp!x8KPR5)sN>+YJoa>3lR}zE!a}^4xa(p>LLXm}6{V=P+a^wk2SPws+JjOYbC4s^ zCglUij6g19>+j#Muv=}UUrL!6Im$Q7v?Oh;6dPFNX211i;G8qx+wj9@y@S?rs|iF+Sqg}A9OgZU{BNt5ugcI# zmVI@THY~Wr!5)c}CM;aAT*03L@}jsS(rDmW1=iL_yK~cDRE%N~fU1==T;*Dy8w>E# z_cc(96!S_((R>m38!JhCgUZau+oUu)+Pjy|cn9t7&I9y}%_^u{I5a*~=II`op1xD( zIqbi_WZo5_tkhEFaYpT!m!A^ZkKEB_$xa=LiC0os7Z5rdOj9I4SMJQ37x456HS!p^mEmF(+U z_e?~pWG6ppc)_1??{v8Rpr>j^j!2whrKQ{#|2kmqvO!QgA9FK3WDM`=}VXEq(F(LN`My+ctFv@}KcXI6h z+DPdB0@#M_?j8_xrPX*OVR2pWAEJ0h@6>(7VWGN~Ml+W$yCky#zoXSegXp)2G~eV- zK2b(-@>`swHfxq2oq{Vj`?L4Q!Ep3gL%CuF>?J9g|+Vdd=h zf`3uvc_J8teZ;b#^f96(BROoi-=$@Qe$axUZ?0;a7ECouw!!$GZxlK1tSs*RT+F@_ zlw_d>KxfCLwev{!<9?4^Y-F#!OTJ?}BO&wpH8`mV&bgum+*duz zJVlfPXw*LkQ_dB`4nk?W;}X*lDju_OSM$_oU0q2fure$JBDW8*!vd6Vxg~qv1DseW zJgpDS6z~#F!j@Rg=Pv`=l6{YJsK%1PEt}Hs5&r6#;0yrjL?pCv4Zm)n?wIsB&L`b& zt5Y3*9Up_`BXKF_ohvJ|$0eqjy&}h=@q?=w-NU&4jhU^j+U4$__jur|IxeJmBmyy9 zGJuKZ4+}FeKf#%rR35N4TYdp&nCspq6<^f@g)@h5H#zn|iXKSlI3zvcG3< z7oot%Aq!7I$9t(}JjJ!$3IOW#{$rtg76{v&wQm()Y42<0C0OI`aSqK2@%T%(_N{m^@XWK5nwrOG2sB@@=b(^SS# zznVHT*{X8NPQm46mNMafN%YHdUx#<+7#Ne0!t&TO#H+g%Y#gRDpbqLj!5bEXk%IG! z?Y+-FASz1YtIDYKwIZD3tPkGa>ZgvFIeMl}Yh0NYy>R7K)|E$dZbH6_G=N!72S}>@fouZCrGq zHq!GZWEL39W@4pGETFd>?zCA3ibY-Z-J|hixdnA%ATrmJu>$ybWo!pY5j&uN?phBb zYO?Z=t<*D3CppHp!7xuRifYzh|M~DD@5|R6MkmJ>-+iV{ zC0T5(kMC_~H|>w%9(mU^g!t^P%&Dh7WUaXNS8{v^Ta9fEV?$`2_K2G%-zcy&DVV}a z3vza;jgDWIVdW*y);?2yAWCG+%*+gp?~aZ)JXlqjRs*3IAx?vN_x_5(K~>_x4_m~5@Xo@Xw^~P& zM%@PrZ0QZYSn|K$clFg3i?qHZq!pC=^KukPhr@`F!XLYttD=zjb?`E%#0|twf^JX! z=Ptg-NPX8`fO&MbjagEPXDKo5g5ukcNHTD4v-j(UiW@g)q+J$(!V=7LQtW78#k;lE zCEF$P;3)mPt+cBLV;%tRJVs2rgwaw3Ru6yc5DejM!j%;UVaa*eO3PdmpsIu>iVdni zV}XNW-H_ehzjOR(%g61gL??EHINYibSJeIBQBaU~iVyEyiZ+ZF;ml!xrzxwn27X^S zscJS^{#X-~eoQ9GzPK4ZjEOFSiBsgdPv`BsQw~n)FIn5`;2~TGc7$%eHJfuqrDR2r zc08RB?feqcF{Ub`Szxn%Y#YqJUDNZ37tFHl96XczfVtIJTjTdLuowKXu(-ZG`se4z zOM}N+QhV2%_IClPaMY~v!E*BREyw*;)qX_a;95%|i4T7_gYKz<0I1@;WU%&kB}O!g zqy-3rG}_`|d=!HBNE79ZrO#Bu9IbP9l{=8TDq{Dn- zPgp-XBq?M?UFMY2+S)rKHYmm!?fHg+fjGz*$v#PZ%g1N%QTs!TcwS+p6awYXc6OZi zeCO$0%R$y;_kg9W1XcLdc1C<_%rB}s;QRZQ|OM;k2Fxq!1lwdJB^p0_a7TR3XZ zYoArR_p8n;9^0=#fw6ViOlMlqxnT&P`ydoxxf6*e6s??6gSeTsAK9 zpw|^9*tj7O5GofL*ef2%I2bOKWg(1sOlrNt7WiYl?;P*rCk@4=mY4xd&21P?sw2?r zXd(&Ar7Nno{uhfu7-4w!oVmoR9kGZ#fgd$ln*8C3OEA>n$mOH|E<#=78*qNc5(7Z+4D_lF#JyF&M? z>*}_5Z}qlZ{?~AmK4D=>0cAvywM%#!vghs%%Omg7$1)P(%CFl0JpXw;ma1d!%c!MZVp~7ROuS@uxKsfcMFC4tG zE3V=bS?KzGh7hQ(J_R2OudFMa%`HD+c%PnWvX3?eG`v%q z)rPS5vS!FWV5BetY>ov*0~m~-+HZ;_-3D<&j9YtW^1nJTnP_X%GX~3~fd_4`&soK2 z2+nmfOFt_`jQIh8XP^T~ zo7fvpLR-E!y3?xV^aT zka&~5TgjwemsSbBSyNkvG!6qk-Tq4Pp4+q}BPr!%thBeQg6VPIytLv~+z6Oi(LBjb z4Gp`aiy_%qnIKuxNoIfcaEaH5XZeuLylH#?=z=Mf7ppFO46LY!7J(jp5tyJAi&Ypy zxU$8lfh~6FZr?XH`_oPPGi^^t2TL3OAMx4;g74G-k+IcB>jT#E7+LD1YeP-o)=rV# zT!w0?$e%@GA_IkRj(f7u!cy?6I)&-DH6Tq-#j((m=#U9TC#HZAyS_L0}CMTiJpWj`Bia zYu>zA!a{9GIdWKlw(?Hqp=b(r^V?l;5pH->33B(&&*0qft;WSI|MIcBxx9eWE8q3* zb6l8{RgT|q&?)xABFC+FF$MV^wN>TxyU&Er1Eb@A!RW2O(?)T9wxBy2b`uA}`(ysB zfb*%wx|(vqF%XS!Bx!S~pd3ySAcK9g2I#v~ZC~S% zjl9VCm-0U7d22m-_npjMT&;obn`Bc|xj=lvO{;A74{dF2{TWnpyGpOGX-$d(y*2u* zb?;av?e+FolwRshqSAC@7o>1IwsY0*63=C^cxr!zQp|P2BembWPmxiO%mA#RsQXCO{H1vrCYJ$%_sJ1U zcJS~wY&$2ZV%^3MtVJNe#ZQ@5lI2LV`1i#7Yqr0VFtVT`!vf3gx-(*Kv{dJg8X-d9 z2h$h{CN7qB_N9Nmc%qgVV@d3)oWU2FMA7hiQ}uRJZe8!HypMt{+O67Zx-$7wypA*j z%kP?>RBphypNPx15FruRp|Voi^J({SBTY~K_gAI^FXjWE*Grye{FT6+L%b)XMQtsy z+BE`A-Fu=@nzu*JyR=%&>$Flv8t%q%Z@c5nJ@Dtl50r45z zKus-AZTSzhKg5&P*Z+wv&#oQ9|K1&KX=`&H_dAuET6D{|rUp#Bz!|>jQJ|zU-xC}m ztyw864s^o7z?k==(lf+#(i|V_&SjpWa5glGX=g6wocxZF1yFDrks>3w>k?(uvwC}H(}NaD$52i}BsuY>Hpo`@xDn6n`yO7!KR+lo z53d(h*=K?#e_j$x!So;Lu2*yWZxhhy?Kka?@2~Ig$?tnq*ByA(eFuK^mtM1d#(L?{|4Tt31!P0opSC1YbL`~Xy2q4D?~ zaEgv6YnDT2a=GtKREfUoaMpUd%za48S9AZcnGvAw?!MTqmP zty;w4J%09!{C-f^@V*{gB!YLhy1}eT`YHzUHZCOng8nOtfOXMoTie=f_X}!omdZZ_ z$HLvjfZ z<@YNo^m*H+yd{F;dXQ6h9&#p*Av68?zzg=CC!{BRk(DGY#`w8ydi}=T8;fN80cqdM zfwFFKRSb{n5o~7&AX0f}ozk_$NDdrpxyaLC`3~F)$vSDwnyKH#z6>b|qXG>S02EGw zJ!NP9a?8bUI3rWAV}B?!8>nH309q!RkQV#Bo~lQsl_lUF@j8~wXJv_d4CDCyLqgON z%Owl@m-gEBsUBaLOnSe0k@2tipt)PRzX5h<*uNSkT^h;lU3La-Pl0rAq)Ib(HAP@i z! zgos5(8U$UyZoMl`@##^uX9 zdYVza9U^T0s>w|v!02katHd?^RIgX@bw*9eypO)PK>;brI%B}BG02ZKB__+t-rt!4 zPnSw==CG#?ib+Fa4Vl8JXBcUK^Hp_#gXrrn&{BlvVGn~E^_LM9ou}m$$Qkw;8MwleYVy3v?`Ju} z$Y0e=P!v0br#wR;fLx;)?(Tm!DtY}Z?@&j{P&1d776y@3lZ^GjP@g{*k|M;2#Fxar zS$byWT2Ofaf?xQuJ~kpC7789e(=_Qy8KbH1vhbfE#pTyfIRlv82~>d>NSxt76^-$r zQpl#uR2MzF28aaGxsXMUH_o`zC0%j*JAe-%VE`Zp^k^g^A{AiRTd6W%DF!#O?sq zQ1YcN0qmUbhzh2oaXzyTjPk{au>R%y++bg?8sD$;;8V?#P||WgHWqg{p&-5tL%{P~ zUAve5P;Peb=g+;p@aIb$Q<(u{_V)Hcizvx!VG|eJ()II;4TI=cVA?dO7OR2;nj{Qc zhZqC+aMj>2JCvPFMMcHLkl%i8vmNn=f8M?fg)^P6#OHbm;qUh=0alodWaSulE=`#T zf=j7EhN$I&=3|9^&sajzr^O*85=$dwZzD+EgyyyF3B!`e8I8Lm`+tkF_Xixgxo%|< z4C_~2_>!j@64bqdHrmhNc@W;6Ye65Zu4#Gm$jBk`Kb(9}P`Ohkk%U!aewRLDq1(!e zoxkP~$WZ1=MEBV)%5ib!{H3upDZb0kxXyC6uI?ay1=+lN)l=aiNLM z|KWr!b7!JrcQ~zeizN*}ED@`fS8;_PxtZdm`ZCGIRFDlj-NntT4cj$fJE+yzlC}xz zra~_Ew9VCd&EBrp6>jMaRI2YmG%e36Y5$k2@Hz$Z+)ABlCnfgvVlmrX_i9@|RTVJ< zY9N9Hf-D7i##-tE96jR`UlZz{X50S>Xz$H#B%7XlJmJ(icklbcpV~%c`o$G-87^p4 zqa$rAXTE}QY_ehm6xVYZjULgB1nn5tbPH373%MSHL)~shM4(}P{%tPshas1ImZl>8 z?~o-yPpUp?`jS5PP!)||jCq%KGRbsOoBQ6F!dD#vr!Ep?Ic*lwtJWK@Vb31j_I{b- zj1o4D-u=m92Gf*$O)r_-kK5bX86kWTERBTrG6q@;f|iPhB3gCZaIz=z1g;svE!!qr zMs<3;E9V&V-A_;TmXwxKT{lDbn)V%m5p4CTnoWNKBOW@4OQ@)dResv=$k9>BVy@2$$(K%ceB1x$$8F#Oy zr@N-pgOE5{lmj9Y`x(W{XU3wshI|J_sl9fa*XaHVeS|KDH)Hm24x}fa0K3?$4RNnJ zg*DIx6YF=xAcqv^Dqzuap(*2zCk2!=xRQXtx>a#3F{b%l9_VZWSvII!o6J{OL7cK& z;OQmv8#kW&o)jhZX%34R^&mMYBy4&z|07Mv$AtyiuzNjYni|GHd}7hB^k|%!_~bgB1h zA~NMhBG!#Z5h!U=KHvn?m<9YKY*ph6%!381Q3#(;_x5N=x@k)xn9uC|9w@SIbj|Od z4e;8Vyr-6Lm+xKX(x29Ykd4uw$HZ}5GBSn~st6U?rNAs?C{Rg;E&kwng+ZKAK^NaY zk$?;0+eIGiCvTx3qgJhxkT;bK?jE4g$s_67_&h<1y|dmTcXxY{v2G2v*+J#{MsUDp zV-BI14U_M(sEr9idK8kFF8vG)c332jPv*ibAfn4Q9D_Sis$4scJK6(+er^2^PnFYR zF-RoTP?{nxno?BM1>!3uRy?c21&Ykwyc1Vj3l)w<6?`_!0^sPPC?cJ-qEVK^_LBpEAJ z8R#{bG2kZP1P1;o=~#{NOkreRgID0)t(a=G80ai&-0<1x?xm0y#X59(9tg{3RarpRYcQ{cis6&82jNAoZ8PM-`f*AY-$xR^DGT)+-Awnxz}aef(O?h zGaiuh>*{Wx=e9}+HwX)EHme??YQOV?-u0v;HT@XlqL5dy&8yRq{66V3;I6RtD8TPg zL8X2k!1VeZs{{FA2Jki98VDeuFEaj;DyGP84ky*Jj@r%IFlma2DR8`3UqGwzju8`U zOM=%vCnoBTV6j}BycV5&uTIzIIC?8nD@rQQCPeoyZwwTbX4z?$F~siBNH7oyXsu{< z<|><#0fI}{8;cFII8`2x!9Bl%WGRGfW;OZ1lM4HkS0uJXFzZ5_;v#h56q_ zsg8oV@^TE2Rg|7K6VYtiUn-b?P+;8YS2`JN!k?|K>?5o=P%FLZgp?p_xclX!G{D607}>~}Bb>P`;+|7QiUz<+sJ!(#6dOzJFIXN^C&YE2B4N_b1xis$yH*u+Pg#3&b6bHLAv;UY2L(W3GF~~waaw}NZ-FUrTBvg{u*FeY<5 z=P+QAScBc!-@UiJi3C|uQR?TeLoP@9Mc$l56u5efDAWHdnFs9OGKW&$dB9%UUy9cJ z5mLj+=P~Y~Vv3%Q|^2_`bKa=c3O}-TM#4 zcgVI?0B_Sg8(0iRuk^d{R;AuaKzelZAci^H0u$z#NCLhi83IIetDI`ZkL>mw=;Da%}rn7-@5UaIq}8ogj@r8HL0=XlzM-hV$` zA99hd)Hqw)BhcMZ&B15$^%|S_&5r`VsAeMTbpYE>m`s5sf9ZA5*rooT5ae+@tI_$L^bG(!Xt8?4woZ0c-R2VNr!_-Se?|8z&^nF_ww&b8OQHmCa+SA{h zEUw6n{cEe)_DN?tl2zPFMiaQ77xLd$9U2vnp5ocxmw)d&Sl_Y=AdI8G4{}RBPZpe$ z9GqEyeDx%g-rO8v@Jj03lgS@40H^}lXjRPcNI>9rH{kl zpxeU_(-ZH?aIRz=$ylhXUL3A^(%0J`saQMY?$8vXEZ?P2I?y7dV}*OE zf25()1`V~->b}&@A)Xf@Y!DAS41ildcUGotF`G+sIkUs(DFW9Dp7{Q3a9n6c8P@b{ zd3}n$y@Fw&-QXq zh~SiauOh_T%Y3?SdN0H`*aE`^|IGTkvtyS!7PpLz_4O=!YPOtR7rM2(7#bQF(t~_) zSht6ZO~2#fefgk3_OiIiK=n*sz>}lgBq1oS4@#y2g43ku?!NatE2{uW@=M5iw`wR$waEilF4NVkxx2c#iD61 z&Jyncq&OOi7pL-LW8sPp2o26xQ*mR5M8s}Y^qJ_rQD^ErEbP32xDV~yHjo_gtRg0n z`S8E*gjy(HP5y)%m&1AOQ_ShyS|%NBc6h1j>w_s4qhWOZhIK|sQDqMfE{?xnSyg4% z^^Pcjvs%crW;5t2zPUjZVwmSf{L2;mUeP12xwlBKyh)=$9h-sex8rPf6P__(vM?52k= zX9`@?Qx8kfa-{s0K7}U=95^XCu*z<3hHW;)7@Xr|kMs2~j&-$9P2+Q@T+rewYB0)- zWe|+%JAN?+h=!CJc8)M}tgRc-^6B^M01%leR|!gd#gNES&SE?5M)Opc!Tq9wjpI2^zvv&(UFC%kv9uxAf!U^l0{SH7!o^TYr@{}Zd^?R_%nK3 zl-a+zE8mpW+h6-IaKCGNYkwKFJ5#M0vc6Cqu&&zmP60!KfgT;uxDvmV%p2pka;T&& zSP!*4i4RijhX80V12*%E7lzyo&EO3CVJU&hevQi^d%_XhKcg4Dyu8A+reB`?gw*>iLH+Iz{#lrD zKlt@!WnH0l=^~1festeyLE+=ZM@R2JdWz!hOI0W3 zlP-X-S5Ove9V3MoLWuLacg1{tSZ`e9hdd9r9Y02FHVfS)Af_JHes2>?N$>9D4BcL+ z8{b`6B$|T=#ZH%F$ZL{&loj{pNYiGwSLpT)-FI?5(KkuANCN`cd6_#J*>Y&;D7Fz=iFAUs8O60Rn~E-@t^y7{#7&%+wwW4$~d zf~`|UFayCSxR8v82k{jQEVu2e+WS-io#b#yIJHe1K}w2MeHap=g7Nd{7ibvqs8l&2 zWSby>zKd|J8#O4H3Hp1*ypxnPvXJlDR5RhxPa)@!$k-Sl8@H`HC^F|{H=m0G@ZuX{HK(!5APX$~tf$R3v@V}P&B%G7$MR#rGAo~y%$IBl5q%F4C0oeU&tF<32aGwfhyBB~ zg6tz62ZSBP4io6VY46Mn|0MH?w%SQ^<4MO=5?)ji!WB$kkkS?W1A^DLR_9^t6eViw z8&P)792;)l`Z+U1&DW8lUZI@Z*^hl=hEUcXaJ`s0tq|{VT{>z_5u9+MfIN^9Yum$S=CY zm#b4aQJ6U2Q&fUL6oteX3!alVaoF{Ld($Hu+f!kR*z7V(OC)rQnL{lV2 zzr3fPLyGuP>EV|X1xsDBb@B}U3VE761n7*tYYW7%DXoxy0RT4k^AM6~H zY-Ccff}|$KTOsn9Ez70g5#Pna53bov%x;QCD#RC9ejY4cRC57yK@7YH$@>cHBMS5V zii6Ujr>^M3`6Vm`IXS|`wV#^Tr++`3^MS|;NA?BiLy-xrcQPe^6vh3Bf`5)c3O@^okj0c=jJUp_K_=vanxR`xMR;un+tJd%MpV!ER{hN2HyTo0Dh!<6w$ z85|(X?bw`sbJ7I_Op6( zv2!e@e33opG)9uxthOX+z|$8^a&k!Y^{h~_FCSeqAht|%9saqD@#0wX&B zu*rUQh$-x(Ey?l0pk~A)687Gl>N+*mcSIvky&iMWde97hX)N(TYah1GY07 z7*f-g!PbOSZ=u1VC@}YD&=7hB({*0sd;dgxtuVfm7m#gqrx4H@Ob0P^Nwe;eXtxsYIcdi)2Z5ml7-st!%%diJw5cZY(h$wE=#KC9DR-ojVmw+91o8EHN|aJo%f3v!z53+5bmDdH0R0#82L zoKibQdIvBb-VqUOirSL=NnLvA5e!{`DU<(Ru4`a0Su3W*btRX6*=4fA2cq{pNV!ezazSv`Aj%a}y7_c6%+;N6o zfBSxdF)-|Bzo2f$x~|Ia)AoR?M~#{+cG={`dXC44nsqy#dkpBo`jqd9(*Mh3T`}!) zt5qSs&9Y{maj9Rx(Uf8A{M=d{oE9Y?*Cc+i z5h1uAH2m;KE>3hIT^p>&f)*liRjJY3?R2QoWSoy&SS z7b9Wj7!TB8Qc-4&DCa>+>s{#|4)^4e#Yj#aoTU{%w#d_`4*PaK;X{M!K4EXtF9hmn zWOHWj`Khd1yLI!LHHDm|nC2~sG*;!n0nyFaWyy`drA-Xk_k~=ueu{@k%|;yGL-$PG z?p-q>>FIucX&w>NQzIojya2)@1|<~}=xgi~zqe-lnJ}Fk)ImoD#!_y+#~%>~ws0`X z0C?y+?BCkYaQZI`MC&jN?__+hhvsXR;0Wnj732)h4H%It3Bh=oqWou^wn7dnEq^8d7$B zOqL=*H8k}1^=%Od=!u-j;{$Z%=;%RQNB^vkn(%47k1jiZ8TX|}bf3U0D>}e`4CtFz z9&W4*?r&@|vqQsq?4FGNygLkZnxmf5k#AW5ng;eaR)maa-ww!06kCY&q*-LIaAD_qxpfVJIsXqpIe?xVo+O)!)!P zMaqxh#YfIzdLlX|IV}sEL^;M-=?p);&&Y#4 zC>UDs1bVeaWqN00L&>B+nJG@OjD64~H#N_OMqI~KdbDt!A|KZ`VvGKW5}7m>G((_V z%3dX5Pt)=W^ZvrHOJ^Z(Z}L0=r*Sul8bDALd3gD{Pszcx2pg;^k?&kQBfshp1E<>> zfy}Sx{bUfc>?ZWNZ`H^Gsg}-lCMzK|NM)pIoCk|3Sb00@@8Ns- zZI0(&qS(oBEHuezW!Xo)yWc5Jo9Jgh))=(@uWbc(J~ek%%uiaB8FpmZsY zLXrUcZ5bP%xVYtfS3wHP$(2%0{c%*E@WOYEp*J7sd;~(Nd{lI~Q#tZQnSjnH2;*9; zLGpezOXfy&7B#-@Ife+|xgO}9YzmQ04ChVhx^C&%6uLFgY&=Kh{xoR$qLe=U052;~ zpPZNBh1UEj3*%=8ZGqZ9@1?fZr(bLhNZTK>9jG@(+#)Mt00zTXB%)W4w$tQ{7l^?YQ^GCRiqT#75HJ;sC zbxh~QiX_&=aSGWyiW0bZG@2Rl1$-_9;8Q1l9>>ewBn7U{g;bArvkVY`aF>?dHIUSF-&33A|dk)yBI=(f9a=sylrtj!hI`# z=xzt)YoWtU>4+(x<@QN7MGU2I7>($wIwEo3_M}LJN;)~3 zKQOTOE2ZJtY#5Pej!mjj)1bEyR3URQHTzwKXJGEfE3KOhxtOyN(sV11{Osyy>q9Oh zDiCvDZJlSRnUaO?A7H-r{Pz8el6tMIS+XzKtk~v9eD}A<^N@a#TkOKZU}E`UDv=Ww zJV0!cIErW^PrPZ8r3F+)l{oD=so8bbFS?lqP?T|awR zzf%e9Gcm5hS0(ebmd7qJJCkRk2N>No1}N10jPq(wMrWK?Iv^F6-AVr@ayi%7fD7_5 zn2AP+5#C3AEZg!_Ri6E!cXf5MzXJ9ubgmMmP&oUDJt) zMA+g1{^qvpg3Mx8Pt%*r7RoY&f4U%j#h!Q-+CNy+ zeGJ~;t*+_Tbt-XUDOK}co@>eE+2SKem}s1$!)*{mD>80D9FXaC%+d_m5_bB%{qrGk z*n#}rZmHLJ0~`{^3xR&5k_i(!CxlxCQk{RsxzP||35bL1O5I_4(v)NbQEd?)hj`$6 zUz3N#m#fV7n$k>P%6FX$@Keupg5nCQJXA7FK#h1-;Qt!yXH3>m?-dsw1m?LT2@sk^ z&xxIsd~JDJ6pa{Y2S5}wjJM=LAjhjEzv-3i%>Kb)&g+ARiqZh z$6p_mk2F09{E#HiEcY!CQR#dmU@`LFg8!vnup63JIQ%hSBpe2gTS_J|-|yyDmV8X5 z3y5M2w4uvAL5qTTaHv%fQiMs#F@1TE5^F^&%a0d)Q#-Sw2Qoig>F^v?>sM^W`~}kZu(piNw`pL zwe5S!T?O0e{4zWH*rRcefXlh3>1s~9+HVKF#)L*eHuRLR>1RR0Ddk?@m_K!kV~xS< zo;bQ=$nHd@G*okMO%?m5<>$p7{Gd|lc)fOOA<8rTj%~{1GTEmj#1~^Qct66|<&1}< zQL(yfJp1qRY zX*8Azp+Y5&3u9>Pntpzfd`z1=2kcl8w8vnFkQ#w=Ey%;h*8f*1DRF{f%U#ZBhn@d8 zl79_%l1QambR1fIgeEgN%cM>j#C?*a-O-}!UhZ--P7dM7yAKH3zqg;2<17rrAJ%-P z_P<1yfT;`e{IKaXSs<5m82-(gOTVWEG{;mB6hB*htlxBaj9(^0$ph;Bl1WxXci6l! zBPNnd0R=S?%ZKLUW}sx=#6N)xn_ZYMW<01Kf!|rwah}0qbf7UcuT?7)P?WTHGJL$! zG4BAWgb15)HbwGbh>0fn^ePGBO?ua~JFVP*75;r7YQhLXW(jm?n328;Sj2hTr(!XK ztH~Lh5Yja`g^@pBVzq)X(a{CWWEGrir=dytSB&%L7>cV9f`U9x{GuW|`57fo5hap) zQ(e;`VMvUi_3&+(eucpb;ufp$s7{UJPfB>`sy2JXh%O_JG;JhX`aYWoiMDo9RLK0Ne zf}0EE5P9_t&KvM5IB1&^6HCQfhR+UMea#<6=zv4aeSoOR7+zZcaA&x7z`Y9?&q)x( zw0ihX)9vVKy`#30oOE4h?tA)~O`R(jnSu7#U;_Y5!{IotdX)`UHlJ~;d?D%xWoP!@ zy&l|r=Lyp^6!X!b1v=}jNCNdmz@Ix!(iuN}1hccVv2k%&%#(6ycq>Xe`S&oR#$2;s z>y5%4BU?=#)#`wF*S*!yy;8@}-4C3};2Sa;(BS*A=3?(tm0v$^$>*tOKKr;$E;r|c zo# z05qD{k1gi$h3>Jk!xOi+2P0Un!kQru>ze>)7Hk%>x*l8@)o#D zu!WgRQTQU-%}vg225MZS4_Ec$s^g$=0BR^&9XpMmTd6zikC|QYKSvUf@88t$?)e`_ z=N`}W|3~o=HJ36{O(m_;*AUGmCZUm-`&>dUA%rBCx#gB7(ny+1%q=##O}XY;!o-+r zA>@`=ETrY@&hP#G^=I{Xlx?5)-mmjI=Xqj9bP_**FU_1hMTk=xxNy$1oYP1Sur6^> za08*kTNPztZ5Tl$#YUskwlqa32A6J`t6+KmMOvvZ%B5rT4b7-8GGx4%L`rq|4R__Y zHt!)`*v#I(Q(tzQpM42dHM#(Mietjn?7K+V%WlL$9QJ;uKwRtxiGK)nCeylnCm|j2 zuN34?+T2NpeTJVS@Wt>atkQfh+U(h{qohawJH)Q$O4tj0I}kAjnA>Q}i)TNj){ z0#yer9vIyYTXOQonAS6n@^!zwLsgyXEX@|`C8Lu@nLYY!_guBw^1 z;h5l$Ge*{Is)trWi}aAzW*!wufs0&rKUedRyhFnFosfI?3TvsP#kx_LuC;c60`y&T zFPr-*%o!H%QdoOR+?6;!%s~iJ#GxyTPUqbKtBDRa2&=(lI(MCyudXRcsnLFr_-SBM z&u7EBXZ&8w3&L%aBh9;WK#%L+U9RjmGxKGI9Sgv&iquc+{@y+>47JT^T4^f7J@jGl z@I&2WA108L=?ywJYYsN$gp@g+n-a!R3H}QC`PXwqEP!_79dw$~alC<4Mf$V0_O+%@ zZ6fMoAB)R+R-Nv|5rpxQmkXo@0eU$=pJGK!iEThh!fq<+VFdXe2Q$%2m>#gLfYM9E zkuxy2g-UGS^D_|h52_&rUt-BZt_pk*ndTolDV47c25{I)`Hz3_dDi!kF}Q-nn$I(( z#)T&%UE)Z*sy{5Tc3A3Doy%AQg&8@*IZ6$!ETzgfY@p<#=kpZ8qp)1XX@`mgq&xG9 zyEPavn(BOVMdIJ?{IwLmj4VkL1fi_8u-uHHO6LW?VDwZ~IddI5UheT!&=2_AWDi+l z(IL#m;8DNU$a~=HyldLyE0D(O+h3vc=)7TeZ&JRf^o-0zXt;Nb-MNYuK?s6wT;D$B zGlVaOYC0tnPY5aEy6-#CEe_PWg`MA<6Tm@fF1WLFSCRKbe_G z`kZOwWoNk6#qx6UkX{L{*FJHTjcF?{{kHdYF4A{&MOP$YfLB$qdT=&=V(a(Z@@cm( zHH}_D_m3asO<}FC1+BR*%x}38Z+o@JeZYpq?R(rNvPRFtDlWfD&eR$C*-3_k*DGyrYL zzW*#;V%iwl*%11)6jW;d=Ud0=3w)fO64y;M&(c>L30*Qo;_e1ktI@v<3sv6NqgYk? zIC|PBHHueZDLnb7W&`N3EzLZ3nBAbevar1YU4|qU0WU78+FzgpZM*1P(Sz*NSaKzL z6;3pV1y)Z)udPk3Z#m2)M^5a4u{Ax6k^Xeo_W&+>*=gZ>xn%L{qVn1k5BFnU*IjTF z6*}shRrd)it>-hgu4xML=$qJrq9T&RYqbQ(ySSetHU_2wVW|F(VOh)LO?loVVBRSB4t#j)qCP3^iK!J}fAqp*d{v7DUH?pYy&0nMs z9(mzqd~RyIn6=K!Z`lK8UNM@MR8a#9++W?UH<`2if-b~sA26Lf zw|sGeq*Qp=0vsnC->#hgCw^jkD%)=&a${{7Y?ryAXpw9X!r6e~1QF+ZVsJ<+RuKMy zx#>#=zh;N$GFP*T;epKE~Di0yMR>!H#rVYPTPtrK7hq zcK-!-&R@Yz>~`*qW^iOk^;=P_?5R6Tcc2LDT%Fhb6tKNlKdyM>o>pEnFd+$V-_^`3 z=2B>to4&hiYwCU6xd12XBh=t(%c_?ClJ5=T!oQn+9!7#HSHq-FPVKA=PVeAh*AZyZ*hUPeSngl0e~5ekc4{N|lZUPL=wJI;Z{^ zHCS_W>DxMX^DrSL@wN-8h)ne=g|{$f$jUZ5poh->Pepa@>E;(V1ipP6zg?gGAO4r6 z;_Z84QL2%)A8|Z6p8Q(&S3wHH-54qPLGgBuF?>tXN61`YFV*~}nsn5xcsm#fQ*5ugK_(XqopLB5 zy%%o@OmiwA<;Qw~6@q1=S${v;6M4o{lU-hfvxi~kH{EiMpocW?gKpsCqB*swE0C9# ztGw@I`p}h8eF?H>mm=Ceue5y5fq2M2B0{E7Pv0bt zI(V^VVU9hP{!E_xjoL4(C*=}KVlD)z35$rV0^^>2*?)q87F`@ItvdvAFe{w+eP`F2 z8&}^(t^J;}WXcfkj$hBp+~(`CXx zW_kkqbV?LXW1to9yN*N$ZNJwr&L@0%BJL(X^P@ZsXO5LPn7|`C<(_N-X%;zj*2E`0 zVuB1h97Dg|29N8GpQxc}eo->Ybz-sZI_4*L>aON-iMWC#@Y#fM=YsM{u025FbIN`Q znQ@KuzQ0s7zuNx>svlINZDoOdcQXyXcBh%Fw zSy|?;*$E5OXGbvK(5hiozM#%+V2<~N)t{Z6agE3P(Ob)E6A|3SA!qF!&hA#;IJ(4| zr^;T6Q!SW+y0`SejG%L0tk4W4D#)2HuVs86R6S3Ziz#jOvDUa;i41Ev^K0==niawp ztAu<*9Q)Ws`P;8MM6dE% zPL&f~2yupTGOqN^K0Pa(&C<|ZP!=`JZkAk)bFCg;SZ%8FvIIV|uLGvsUk_GMR+9E^ zSqmQA`Kdz%=lfWit4VXgO^!Js1N^1nflEYR($Z37*b4`b zlbEJJJe#Y$hw%R*90l(F&7xuiN*Yb%V>0mICn<1ClU#_Mu!`$Yg!L0$C*B|4l;I^m%x`WLDYeX@+VoLTN^2;F z_G+ZDfOg=m{dl>f&y@lK7FmNbar0)I@6j%U71ExTJ6iS(gsYncwsZ<1NLFf_k`Utb z%w6@w3{qM67g^+J-62dDyvwUR6v%MJP09W?g10LC0;vR<3ZGVhok2{mufJ*y|2lU2 zwfF}@04>dxAeoo9Gu@HSLvqxQWIGD#^D$?zTs-cS*JG7S!=s$2dYtuL|X# z9edRy+A{lAE)7R3=)yq@tW~uH5P`iXp7KKV<8DChynrQcdhUaasy(;#2^1x(UlD?! z<8-ig0m-N^Her(diTX(d^n`6nO5fGc+WkGYy1S6KE9Ck*$^->@A(krMJh2qLRkyn{ z>KDEKcP^+kD6*ag)1|H+z_)dEK6V3&Q?CDD$9?(BqgVLOb9n(*xkA#g)NzUBG=0En#}Yp9B3b-S!N0I z9UNZ_jvUg`maI)3D;vW6A-nPKBb%`sKeY@`+^TpR5q4a$aCLmR)+foPA~4u#NTV0k zf}#RfY=z7oWZ316IWD`n>#EPC8DoJG5-j%e{l2KH(91C-CD?aenK%>^S+TQEv1l)Z zy`^F@7>?}@Zdx9faQoF5?rDi7`7^^tYt_{1fs)wbps>5Mv>T=2xOm4e^G7kOv&%f7 z`>U^Y_xyxBL1& z*3w_PJN2{p;_em@Qmm9nu3?SL4X^a)4%TTnhM4g0v~{BvlDB*_IUeK@8zP zR0ZJ`?EQK~saPA;3yK(|aW#k5(#WR^tOQM@EwQs?rCIX-0bH-f7Dnp!Kzc(pPZax; zGx$+H=ZvP1Z1^5X3F{0d2K65OEcI9|XN1PNJ_eeBU>Gcu_HRIgJjeRmFWnRj7U@dl zYFGsvYd+J`L^tg z8gT+q0_qZLgqXium?-J3_l)F#>>IYWb1*Rg_Oq-d^9XQTm66ELq#|_KVOXSWN`v^> zHy{P1R5tWbNmjjb+!o_Cv7)Bd1hm@yCPeOb6r=Ny>yZdZPg=?;YMDtcPT>uTu0l1X z;IO32L-oPcHYZVc2xi>Z&3>7E&!7za8)XUuXJFIfRC0X zj+rc18Zt7Wmnau{kqc6&pRl0bh4=~WXIwy-B}LvvIiwxM8p=#In<2sX(u`M@ma{@e zmh834$OP%mGZYz$46=#llU3*aa|jRWs7lLgtxJF@6^Hd@*`N(uCZwM9GgVDk<~`cL@v2!t>mSyV+E@gfGT6l8ayRp$z}{lvPFZbH zoKtd_^Th53%Q-4MJp9iE*n3F+g>NI7V9H+2mWn1}Pv*lgce^DRnU#`4kkdp#d&zcU z3NOFXY-|)h4mOM=O1mA#;d=08fd9t^Bjt;b@ybCOL&K=skmuZ?i38bxVy`En4W(;w z(w*XA(BY_P*XfzK?yrGj?un*+1(n$WZ4U(5)|j zoccXT1BX4vQegdEUk<#K*7}~>lUB*2sG)n(mdET`L)YWfR&y+EY~XPKXC%ULp?gSs zpvr3MQEj!Zp41lps3A7tX`sash9gXDVJ*m$*L;siGqA2 zzsINMpONm6VFg#fa7GfbLZ}P+7uob>P2u)e5+j2^Mw=O_U;y!F{dW+V3UV22mcQLK z1nt*r5O@!~*RQTEnTJaxy&em$BoRNkFsYv$M-LO)*EN$LPkJ{^EYxMs#~C1#r#sNf zCftny-Tg{bQW!@cYYN8K2r3CwO@Jdn+B()+@UlF9n?~wEJ1Z0h9*B{lV1&z7INr#L z*ErUukXWn>^(p46kd%N02U!U*6KnLwB&afQ}54q;+S-rGoc5kNQjFAhgn|&YU zkV4;Ghw!zmW_8bEfBT9jNSn*iLwH^7=CJLL!M}+^_?z;9JTuPqC_qkMaW_5c^aBr3TbrOQBytPb8qy2IRPjRG<^Dk`+A++TB> zt!4v8Yl=>T&W~)DK=tEVQCkUU3vA0Opd*f{I5q=0ky14H5Qf{F3%`p1OI$Z12ds_6 zF~5R0NuRl_z&@u22YokV7O8O?(~F)t>MdygHQEE}i7A1JiT0$8GSrW>dz@Cks?*U- zlPe$zT}Cb|Y{1mrY7M_G23M>)=eBNbaQn$Da_Z+lq3Sz-xSP}5PuKHai_0MQ%IF{; zqqLgC*R}z8ZDZwFPd?BO^^7SxN$)y{y+bVGrJFwe4d6>$;lbXw z`nYeY!st^!z`Aip?p(Zm9&~`M1=n}-qKdu9(Dc@Y4dxG6hMfjGST{O4qEf5@6K2&!RT>@_i}A8iqtng&Ry{(tCT#wY0Xw!!&yn8WScm)O1b zoN~wV6$A|Xut3eEU4j#@HAD!dpJ%)0 z1$yYN$`CEy%Sc|kr;*2Zq^&8;i2&n=65hQ5;X67J938b4CEg_RQ(g)Qyx{-Do_C_R zb48VB9bAQrS&xK7U4LtZFS$Y?=KG+BoU<{r!T|?3Rf8R0HOLjn(ORZ^@1Vxg15bj* z3z91-Jr>Min~Muge&YdjT%weAMELGbWP*SoI+sqaQlTzy&%bYMGcRCzbg@4T=~d(fLdB}7$lay4qW_Dio)Gh;5!(Lc&};3 zW`O5JDpo8J+AU#?eT8*-=Kv7tI84i`>J9ng+eg9Jem>)Mt3pO2!Vq;)YThCf_ZbUt zc>nDvCaw+aG`=AY+o>rkQ@%8uqI(51lWtE_d~=ZtF^V?$(W0 z1KCXEZ3ebOT)@Q^cVBqjd%y%~%mS(N+8oqTs5{9O2~G^LlvK+T#gAd%7Z;h8oS=Uh zjlC>rf`r@qUidlkY1f{x=NawG9q4I=4!m7)Sa@Lc>U78ia5Q#qx;W0708zSe?wIAC zK1&|H4lmjT=(RJ7Fzmdu^N`!2?yE2vijI>K+Q;&t z^gqIgy9zjC^y&4Z2Oq%QWcVY07qhMy%JKsrgv;@kC*$$MBYp4gU+&376_ahLr1$6l z+H<@>MyS%crIgbncp1r`Hi)Z>^;u%6B%!CMOHrHIew6S1@zXN_v8VVlwD`Qf1Z&^k zSPW?RHa>p-?61b#YS*!b$-_U>10^sAM0NiXU&FpPeuY0HxxyQpHnXWHG~m8Wug&u} zpL$aHT!?h-v7vO{47q}fpp%uR$&_EZFF74}{9qXS+f2lA8#t`yJui8BwyZwIB?|DQ zdui=s*JT5PL+e80EI!1qW^Oa8mH5Q@gmz@OCLl0OQ|2}|o32UB-M&^q^cs=;uEHGL zh;bpaqd|!pEp2f#s(Lt}O@e~#N{Qv3fOUa%$5K34IYkQbyBbfcq#O0vmgBLq7~2G| zTo1OSj#pM4sixP68TR#W_JR}kIcnn-JtgO{fLxXg@aRrI>VUoYJTGmZ9PHQ+bsv(r zv9iV@D42Po&VxcS zUWgmmVvG@b7{k%J$RVF|Zo&ZIcNovJ^TOkEqbTk!y$m0cla8;oe+&Wy_cK$uiyFru zK#)NK_lZy`O%^raa7kLdt5M6Mv8sXtlal%3j&N%}{0}oM;l9H^5{R_~^g(RH4Khj( zcRCjEmdmM~yPxuFI$neyJM`Zc5BQqyg>NYJyLCe;@UdCa zTg?;AA&l=?bp=HDIrx$6zB=FQOS-~O4R{hhZK_J&N(qpN_}w-%(QHFS$y0vD_GJ?1 zh=csqj|-ff`H$#8(|aGmR1K8RGQ%eD*f}a{u(8jB0hcC(FAcfVx)V?YqKrD5Xvs^6 zf$>@jN-OsSk5(v!JI#jC<}^KOr-B;(w$E-(Y^f`9W`h23s1PayY5y3=f=@fGdDN9?5ZJ^mzOI-{L>VF z4LLg;UUG)VK>GBLzB}BI-QTObYyG=5!@jP>I5Di3i@D%=oVpu&CX<)P3VZAHrir5Z zzW3?w2&9-PltN^M^-YIi$8$`JwRbk(N-1Ulma5v)I1EQ;Kojf*OugEUV@QotgB9`9 z-RpOoMq9(U=8@J*%h3-SEWVvIm#4wE~k)yvcsZUr+dzXaL#cvCm z9+iBZ**ZG$?F~LsOY179JZC5jBhS$6=Q;>d%>@yZFCz}lWLWtZ#{f!zc!%8Vj=RfV zenIJ)@eGcruG1T-A4d;6zO53+ls<1sr7sH)D z^F-pq>c351+owQj}BHJ^TDHfirfVkco*70fc024YxA13U)0J` zX5yEkn;V+4*>e#`yBKHm9>kqE@Q*@Qte2+qe8Dr(Vs`;#8ac3SwCSP_alDpVl8L2Q zE4+jIHZ9J2RMxr>DVZ23>2<1T{!L>d&Swl@TxLKds{tazs9Kv0NFrQ?;cUi&Y5sh^ zNONz3W7@mHa#*jelp{se?b|)6SihtOAeu~!fi~HH9kr9eCp{W1nS0H%3($)mndjm( z96?q-QX5(@pY2DSb=Z#_plo zHk+*oGQ)`|3OP47cfQ{oYl^%+q{>cpJxf68x715%NB(6#g#DV&F~eX_n!|8CT&4I) zOXL3nZB4C~ST@=F-?N^*@PXA!Zl~zY;dz$$$A=*wBqT@X%kKhLAlzFmE!A;|gd4p8 zuGflE#Pj`(M$`6>c*i4_t;9g09OBi)QW7e*TEkih5&Cv;^Xm4NU;XIj=0${iON9V( z))1lPQ#ew0S-xX)G9pp~bl)^WtYlA6hDvQp%}tPY15GnmgvhR?;oBQ;5TKu_e1x2e zi{N>G2&p&4i^_Pz5NR@(o>}M1X(+D|!$qu)sf{lD3vw8& zE;E$$yW2Ii&0HGCd&iF_QG&<7H1pudz&S>0pa;1aF57tGHJ@KbScz_e{j7k=z)weZ zfOWpbtDAb5cbn!DW|T8JC8%7IlAEySx6PQ$CYmBY_=n;1O;b6=#5xnpiFb|sx03)~ z#|t%mUYL|h+Eb{947_*m-~*7Y-QH!W8=^Ftcy$s{1RV(qah8bi7Sj$o;CIBQH z;e0aNNBPjR!N#Q~+3zol21%c10+19)!`Ts^ik3jG(&nVZLG%m^)Am`Vk)wh2`E$x@ z;Ry=hx>RK?xjn z{5`wAa(7*7*T3mlbN$eR=e)%#otgIqnS!ou@&t2?)n;8_YaN>ugL@AwDMmvoc7eN`6jJLE(J#>;ATWN1E8zL+B$aG9LTQW2`3$xjN&d1!7 zis<50q4a zuKt$XAJ0XuK?vV|ts*HP%hce2BO0G)>Jd>LeCBn!yBiCmF4dQu6~JFB?ka|3dAuvG zzU%6`%CtM=ISLzeKfwb!aYI5JN0}^JNZR3%58HPFcQ>@wg3xBMjI3_4Kp^+~G{8tG zndf4*8DSu7jJMAwi`{ri*8Vn;SIn4+m?9lCm6(>O4pE@v^a{S%vP<43czVYcE9{F(0N&!KE6;&>_C1DCq2_<_9%iMYl@=uqECy@$DGd4gTO10h4`-q zLMJH%)LEpo4h&RXUDfVC!sad3oN?ezrr{|0t2}`=xCKAjs-gpDxvw=Lj)Vfaf z9dRDt-T1qu2Q@&*h~j-8CHPJ_zm4>is642bD$0*YcvJpZ97y+I%_Y+i$`*Y*nDtl= zuGk4RSDzlY+vl_2*_~{Ok#RBph|8ybg(ByHxfkG~us7(LffpWTq5>5*Xr8@1AhaGQ zj=Z8TQ}7OMlz@8`_0x^0pe_}`e61DKHO;LW@jp>O@)1w4!Tbr!>H?atG^H0#F$Xfk zd8>N67I{I$Lzf5qsMEekiq$deCU2%4a~LfGL3A+Gd1C$p^hsQ==tIZ98X2&_G`0GU z1_REg7gON1RnPY-`LqM&HKn<)!g57 zg@;k^NWe-NAGH-hl8M7Vw`ejxZ6QzIHhxXK8@!*X(L53r*bF9Vk8Gq1bX(*|}KC zdCX-9f8_p=B%4;|pUO+ezPLcQ*u0CSh5tbCmvcnfVLK~BUxUXE$qCzGYlpon`czYk zQ$W6~0e2U^m;O@xGc7plA5e_}AXVDhBLlN!g7`$_=9q}~R*Gm|Yed*;A7^y*B4^7v zdSeAdMV~zDD;ZBgW9%}nVEscjbsBEk3c~0g?!2A2Q$G}(CZN#uzS)p;T&Cq2!ea0xx7%4=UPsaYR64b^wlrwx(1Ar1Pvd0$ zhdc_eI3Xp4lxD7z13LrZET4*FZn#7?!~HoMZ>QUMD$+8Mxt`dgn)C>j+t23C7{>rm zTYoibcBP$D=Zh>n#6=LA-};3#okPw9X}Qs_`99_^=SpX|JAU)dt{bpF$Iwg$jRwGN zIpzet&ksCD+h>YNrScUXmqj7}G3Dj(8o9e0LvM3UJt^P-GqlzP+9vPcvi$m<7$5AR zb0ie@`ODLukdtY3`d*iBkVW6iM#X%x7eu-_NL$0wzC;dzxPfvTyqbAHclY1E>xi}W z2$1KX2q2OG)`gU^3X*QoyIOAJchoRYiWSr)m|5YI&@l4)NpT@8plR%H(s$xeMfPeg z3@2g0Qm1CZUU~bMrFOzu!ROrMskbEfWyoS!^kLyv4IOARS%3gV>b;RYQ`s=;a2}y& zvQg1zBhB^bus@1~^%%sZ*j@LoI0torQB?QbZQC1SXghC{XsO?$8lrE2pfIP!EmNz8 z|K?l>ZyEy=Q0)cO*^`rzFK;R_`Q7B%8%Fo&aM!0e9KBhD+0eSQs&)_aE?!3_3DH3+ zGItyW&Ol!`C`>*yE?L8KAzUK|q`w+yK5^nh zB_4m$j@tR=SFbyyM*T0|6ScN;4`cA#N$!yDOZDJ3iF=}V`Id^q0ytQKLztVrgCk_i zg!{O`m9ePm86pKfh+koP^paBh&I46?zCN0JjQwjSAS^swBMc`Q2zJcMLsyO!0u}|L~!~|t)mMepc?f#mJY0ljXnTNkpu)EGzFu6 zRgWH)kbCM2BP#_X_Iead$=Z2}obOcsQ(8BBQFN~?r?+81s8ov;TGH)7rVtv6-~tH# z`$9BQa({$5ucPwNgYIj?N zxxrAM6-Q5J-hG}SsI<1WT)66loFP{pzYdP!o_hf5oiH8`6LT@fT10*t94-}I`#q(b z<3B)+`1&u_a0$~Qg7)k3C*dMM>elb=y{iI}{l)VdM;|(QAB^0ZV>^S5i;9Ua3m{$> zg6rjvqv{KDR7zGCN0wX`3j!mwOR$_E4vl zzyZ_idHG~qiHknry#Bf>S0Lr4e402vU;d5mOrEE(9%PyQ!;QZvI`nDe&_Hrq(_~9K zuv^E1!t7{(2CW?&{C$A=-0BjKl1cR{(03W@r_%l^m}7Hl+v2ffpq0!^R{~s17*&FZ z%_swjz~I`#P9owVcP2n9C%_nI2pW>8;Z0cNO61i;-4e(s<=v7l zo|K4R2=>XMCNBk;LinDZbY;mfkvcs2hk8O<9R}SgL<3~fXI#MhThO+R8x?G#Dk!#m zfiIIivkwZ=w;hE>py4IT+lHjqKb_L(i{{xO8o~B zdV;0`5FJwsIcbWf(H66*TgvSe1KjUWsvku?` z4vwO?6p{h67R4X4+b1&7H3?iAmu`e{H+DSg4N+&nmjLGH$Nj#ez!OZ#Y^l#+tb?+% zuG7cOx{6t>JJq|hE2EoD7kMQtVXB3a2v!IK^eU0&t_0kaMv#v3^#ne17DpQzQ| zS=-|Zt|%gr+8w0YJA{;AN2#}(&h)v(oqzLo*YhrnE-e2BSf|~YWrp&RQk~>h*Xpkx zaGYs|ze7gny?*|s&8V+`$;+7!92YdrbSGLs{;GQTR-svOm$w+e8sqFhjQ+P^>CJNS zZ+Gf(NTxIiJMTcB6N}ybJ2maz3>2=}*$V`9#p=MrMy3{n?1aFxgvr?Jo^rE=C;D&+ zvfT>=8PL>5uIj6M5h}5d@AH0LJxxdEU?Ygh)5e4!z?3??Ra}}D>KdIueYP~V`@Aq4 zc*oF(I>K_z*q|7qgUD!8!b5->?!hMBlmx z;z5qSP*b1n*@bp6%ii(3b3zJ$cs9gZ+{x=dUwsUFUS7>o312ee(aR?VD3sTtz#2sl z*^Q)-T0|t23y%xV-J>PR#`O1JyP1b8RcKtRMOuOFwE+>emVz6X$*h+>THZUp3WH4fje2>z@m?bYH0+^}^p7o<& zEUW=7@YfTg(cppmG13CvfWHvyU9nvsy;*M+w@Ky!;Uv;$6%Y8{j#JhH+lEbVSnKAm zz=j%t#Cg%eJu%1X(A+cJ3o9KYav#TG5!_HV=CaxF=k`Yj1L! zt9eOVyDL+!G+SxBuCI~ycI!`>m9^OOGLvWKFKRuA3JeXC=|&o;KdT+NT?g)uwDdn= zPNv0fst->i+F#lIU(6ss1@j5yyx&GHDb@AmJ8damv8v%;BQ7_{l27&_IsgoOPinwf zlGP)nlfXm_MXlkVrwzsmv5r-ULpm}~36NsEg$qdj)Y=0mdru#m;z=@YP^H2O%Xuht z<<<`}k^+TMNM6F8^7{Sg2k}iUC(UJC_~4ZGU`yQZalnC^UJh07NEvU9l;ZEN4@6;TF0^pOD+cvLV(;3oQ|A+`j^%wTlJ$4KM^j)&=i?Ms$`4if+iX2WEq zMRcP>Lh1oht|k1C>kE=~d0H*AX=SA>zRoM%eSK}+-?1EZj%{H0VOSSQw&<~)$RgU7 z)$4%$3;FlxE{+yBz0<=J4&`^YqOc1+9NlaRLYnL3=q?jqg24U&Y6lcIUJM z^m{^wg4=-uV)ge7V;TfvlobCBb+}e$a{7%q7U6m={}cVMO#~cBea0$g6fNb>U{7%=MXpvT_!9WSXr_R%dZC}uGX&- z+vb7-I4^)c(jXm@5(O`^DV}c7M{lJ%(in3QvAF;4wD?Fl3g09FRcOLo=T2MrP)K`Ef?=^D#oKW(%udNe|M_PO zwsuVT&_G~l)Xvsgg6+l~SnRdKzAMVo#$;5gte@h71RXsPi@A83^kP^>croX}AZWn<&ol>&f{u_m)St5!c3olvr z0uNRqDj5*>BrSyj++t~y>g9&zRsjJQCFfoQW1=yMIN9`H&1L0O8t31p5a5m}$uc)U zu8#Bn*RQ1B7x-T0r}ec*wlOf|%|3G3tg8ayPOp+;Je%$AI0}d%G)#c#rxE^l=jns^ z+Cg5OA-wNst9v zp#^Eh*|-(Z7-Bw940#8rneJzbtSrOgLR3ty>$cwiUH0=KMmWkTos@bZynbTU(U~S~s-HOoY zXAv_S#5c1q9Z40s55)>VBqKf`ZMx1MA;VHFXN%cg$Dd8g&os*$b&Ce5*xM*&h6LN) zM&8Ww&`0d+GPJ-3m_~0+0pa@^=e9_3``llODn{^dGo?s{I=oO9nU@jPND;9F$J%LkrQg}^xUJw7g6!E%!RxJJqqKmO@N+L$ zgz{$_o%&pC6^-I}bZV1D+JA%VwCG7-Wd1-Y=XmE-q}Bmk*1?200fY>{%%KNo;$GUF zV~9m<|6XRX`zM+=sXp0tUbF5z`LT}&Rrpg;_Sk18C4#%&t(X}yxTnOz z95|I>GE~y9=2hklk))Hf+329~-}EvU^idy09UZ)THX?!Y1NXU#$>t)@mftzkq_L!n z9k&1HP3X$TA$A1V-Ayg+Xm$ObSvSHO8l<`I?H~tY4WX^0enRP_z?QmaSzwv%S{}SD zNS54aF) z+8_ziy1Slnt@6Z%3;bb{$bBUM#3?Lt7@OA(jSA15eFPE3;C&eq#l&I!a<%f=!}6$3 zShGc`-*vVdG7MfvX3>rsI+J=W?+9E}HV$f;k$c~y)A?=I9&oJx0&T35BHiTH>-=zrYJI5v=S z#;vBuhv$!q2`Sb4=@ogj`_i6*qP^g8Bay(S24uAZTNY1Z=`?f=QJGgyc8wE=}=owWr<|f>Afg&3_SZpp+%T#z=VYSv{ zLQ8S@LFDa5AH;x6(bD2#BkdsOi~1o9g54YUb+K;y{4Gimj$+kgMOD1D1lXN)Y^}}Zf9Ah$I3|ym`kUn>PY;$>Bk0IH87u+>Z zB(HESi@Q|4?4eJU>2L@(JZu0W$h2L-1ep|)AXpHtD3iF4cf?*s=&^z@J?57;P(p~> z!TOMTz^NgTKJNolXAD+$I8#im?Vi3@!ev?Mv?8MaNsORE==~Q-C9P{TP6mDkXCN)% zj+JZ6gryp(nw&=H2)GcN+p1Z?{EZ?( zf@Oa!SWS)im@&2P$_~phW70{H##S%;?(A-?_?jZVO6waLJZjShk=Z22As-5q1cL-( zddmV?N3R_#3(pmc{Y%k^wB9TB0ewWC;A9=OTYtB(&H;bh_TsBQ)-JY5n|#0L9rj|N zY;jTKy{9PN=B;bCzMi#RmCxvnotGq^G%bKhz$)J(=V~BR`9*4$P_AW(S{BHt4IfCb ztqzvCKcDH(1aK07QAk%}#@!vX#DM0-k6?`_y#mxh=M#BUFKHqMK^CewiV;k~VS|C) zC*~tA5nso&qmVuUssRebs19N(IDaCVZLE-V2CUv;0kzC~@e;aF<_P>W4EGA6 zgMD4~V6?9684UB%JT*W=xE=WELI~pale^rB-AxecDUbpz`>er+H#8qCK$dM@tK9iF z(|EhEilJ+)9M`iPaSn6DKIe2O_N&u;`0ndMYD&r9xB z=W}mGgzuc<^BYaZ$x<-#{buosPs}p7asE9Gw~W=2xs7ieZt=fA-VaJ513|I!l1+xI zM=VlWAco{!EdgrR8b+5a!Ce9#*n^xASIb^lir(GC+g0FkCGhQ;fD>JY*zuaN^MB>8 z2PV^B;)-Cgfy4N|sdaTHh4IZ+sVMIBlX|s#J_}t>q%!aVD5_Oas$)oDt$xQm6P?%J z)xNwQBu~*PK>owD8^VKUo%lKHQzNY|iis@5mV}$mkS4P(62|X&Ix-CfFUznU^q$lG z!=tpm*gO!EErKFk6o_0poF1xL2e(9q-RFIbI3d#2S&4x%!&q=+XK<4{F(;WV`?^fc zf}V-9_mXtINdn4k`_vm6XPSMLmDvig#-qM!C_4BpdG5OASz~^KZuk4H3*I${Q!`SF z#66VD%N#Rm#Upn9Kr?mq<$snJlJ3-H(^Uw#-l1lwUnFz=zf8_JXk_Rl%if2HO3Z+L zG^=ThMtx?+BDvlq53%0elhS=D^@_USeT^h)38qQPO;&BJ@{%Rbw}lYwm|D^u<7u+? zaVD!7_MT7#ejZ!X63%8T=glP26LLzOW$(|VkjPIGe#&}%-IVPIEru-(4Zfp$QA4iW zs!#vme@{2}BLmyw@=34BpWU;-@;syS)c238K7cZgE7fo=NiHNm!)4*HF$M^Z1rSMz zk1To3mCrm&cfBQ@E*87z9d)iKmEm2%bvUK~_q>9{hlEVj=1e+#$f`+^@cb`SehMnj z>1_ZOQ3|P(mSEt#)g*z^$OyplHP($Stn%M@?!o_#kN_-VR#2QEmC%rU^E4%2T#<0T zXN|ffbZ~R~g4@WQdh^89ouHngEI#-_96@|bC8z&tDz|L2$#+8OY=6ew-G&IXWNX9@ zzJ4wwJ6Eh)a^q*a<~fj6X*Qk@GO+dvh<8H{Zk2wR6da*=BvzT=0HI^5az5 zj!B|(!BJ;P*`o73PCIoF-Is_OCi3YB9TP&J`EuIH>_^o08AF%NcKL ze2?k=M%)EGbZoLEh%uATvr}-qC$-jCbKto%HIx-dAUJiuN6xV3&hT}Wsv!wtpF_46 z`u6j{i{~O*)sGZ?ZnTIE&_^T8Pac$EG}`(g4<&g?AXqA$q|$HBKEOt&)!Y~z75)bS zdq0T3ly_F%neYQpRr{!WWEh&dNPZ!=7qgE#6514r{)PM9i&71Bf2XO}(t=+HAHT|c zGno4E1*Kf4-<~p0dSEW7m~?H1k_adYC_3IQQrH@r5@0_VM!tGZy5JoeZ$66`#cK37 z)>+$!F9AOHcKzGUQE*b)Z!Ya_Mn{K5{o1q%4*E{^!Q)3vxgBvoH-RPD726g^ zIyn2QoZGg?yf8cJXWD11mB$5ZYzGN^kUHZk%UF$5#t!t?kOPsG-D@mmYo2(bDT8J<9>P8P5+kjxB_Yn)J*86=_{5oUQH#(Mxn4NftZorhYtmpvltR zPm8ZZ%x#uf>svi`yOGvJ?hm%}w(-Q;L6PeFP9oMu6oxMDo3Vn5My7lY@>RQif%w?n z?m&P1$e0p6({;cQBQfl^F%~SRUdedW=aGVAa>XOX9LzCa&jerrJ{o;$=MUpv1s;n( z#jCZ3J-4>37#A9aD5)5;Bhf}7Hzq$ zxHoQ6HAfFR9@0X3u~$$Kp*PwA`vvlC=gi@5P22zD=-dOD`u{(EXgDr$hBD<7o5Bzq z(rCHNr7V{anJ6rTR7mcZ)k9Lmq4IqY`zF)fn2Z-}{7Nfqo%@qB z@^%&N!GKL!UMMZE`5h!H;77k_Ab2Ty#51G2>0(pVsJd?H87vsUnJD-?P8N*`JP7E$bu) zvVc6?Hm|9~(%nwKzEwZ6SY(o5U|P14bGjN^&@iiCMc-XmXY2K%C2DzkWHInITVpv= zKcyT`Ut(R?qG+A!G$^dIyLEhkWJ>}&?7(7{*27k6+U(S#fQZ1kU^%91P}hBF@`bq% zfmphoQ6#eKw1PnEh4X5lym0Zr8?lp+Hc*sV?$RnP247-%3}BM=#rNZYWH%&=Hr+dN zt!fz_hNFlW-(tq#@t1lwfU2w`nR0(Jv(ApJ%ISJM!@&3Wro>aZ$IIDu&aYAKaCi&k z%MGZ{d;1D8U>7kj6R~8VQ}xb#HD^#q_3k9CS1*7|x{r-xF2|>&*w_dK)T_LI>W@;i zdzW_WUE0Xc@-jH}hop)vpAyMn+FNIdK9POOdZJ&>mF=s=(1CGw1wrnv8I6nZ_V94S;X|6A zF1ViVCEtQ3rnEFPay1$nBAa!GrRKTdD-Vjn>J3-|*es=gUNm zo>!>L8(!acy6s63x=)9~gCf%HoU3}V{j{DMIqTALTG^)yRV%)8jn3EyqhIn<;x+ru z*{M2XNuSQi2)Q>MABQenKAdKPu zGwpS`E^B)6DzTl3f}IRr>gc%gYww&tih8o`Ozz$84B92-IQL~_uF4KpgL=ANKl#SO zV)W^7@3rqkZ~>Fg%jpWm-yA`fzUM!7&A&R?*>Sew(-16*4x^k_zo}hZtRb8H4xWtn zj%>C_Yn5Ephl5XBX?8AE#dITM?Bv$8HF?H_#e-dA!4$hh7{*(5DiOcWehFP9Ejy8E z3qCqDeDQGN2^o0LeUBByh4US&Y`D);aoE}Wzv&MWQN4WfeclR2s4@jBmHW)bY}+6~ zauIGI&1rKU`@r?6XI)&1W{;QveVl``gCEj{9Cx`={;A&C!|bM%d;`ASaG2;WjoXRB z$f%PJL_fg)yB4EJe+-}R?a6dWtbo8C(=I2GvvVf0aHu8`&;J0gL{5Oz1>TgMS20j$ z*&fJBYb?^TI>8kHb4#%8rl`GfY?buoY>|t)dr31hS4BoKUf8KiO}zCCyAb#$BmYRMZq(X10Oq=~^r*K?#dLnW7nVlFJD*i8k1FJJVJG-r>nR=0!AE-3eiUa0rlzmC z1gin278J#+vW3gUGfkM?9MfGxs#WwuKTAEv+lJ!E*~v8-J;^L?)uVRQT8dbL+i z+HXs#pt;uHgBuBNu_qoVH0JbAitY63EeY5<*n6S*!INZ@rBpciKY(}S4k-#@h<|H{*$1I6X( znx$6Me7bZmza?B8|3-w5@9OCBGFnpLsnSPBVoS+1*7&b)#yFSqn9CaTF!ak2wYu+XnZ+DDXKN<1^FOB*wx9=wJQ3b9Kl$TW`^=5m!Socq~Z4#`4DXVa6(lvt@W{cBkUZs!wWZ#Z2 z3>J?ysM@hiRHV0qv{jJGQwMOu)%D_RA-bEZL$K=+@7zQEMO^>kRTF+?kDtBoV89`& zKzG#1B)~-@dmx-GgcmD>^kM-z!Ov@WPN^@cU36dkb%hUacCzCX&`TJ=6V zjGW@B41q%RyZ;a}n7%%qZf+|YVTmhN@6k5@zIOGCyu>CU4ieN-+S&}Prvpl1I;o|*4chtcCQ6%aEEZ^o=I2lxnG_0+Q@_Z2x`K9b zIUDt7O(tpT+pj+p(J#^`>P8;vR@{8q{7BUgrc0?7qhy|sL$Lv#GV^H`Nh^ca*Bc=9d;vh*eE%r^fAEt%@0w`))hRzZYWTCJc^q>CdEo#bm~CWkTUkCAepJ)XvMgn5qBq&?%`w9ni!-?i zIuM#%2G4;$KAn61U!4$`1~jj*k0k(cK=IP~p&RIgJP#UpQ#q5P{CNIF`{B1QVjloT zU@T~l{V7~{$EzJr0!WOqp0r2j(1PQ}i3%8cq@|TP9TK!rcJ-j}fwr`y9gr~RHD)JY zAWZ+N(q`IP%NEmv@@`eCiu&c&a)iz~c)EU^dAd(=;eYzAL+DWQt@`O_W4V3adcGM` zSW!vpDOJz6M|q5(JmEXIGRR0Hl(O4I9$Qj|abDBc6yeatMW`=T1Z;v&@L0wmVjgD> zAQEjIWMEjbIk@$=%_jX6%AMoXe=5Q4&aDS~ArecJ0~46!l~Ck4nVE1MLM2#sB!kj`%bn~ZknU9>ZJW1@)Y#c;#9q8$h zbZJ|I}-cMQq$nSgD#Ii(oqVr8=Iq)HUc-u8Vj>>!Zes^==O47jYpc( zJyTOt#fM7ftlxz}@aFDkqx<(`^P3lS)e_Ti1@*Vl&b;n&)gBQbRDWM*fMo%^{NSnI zO~jEV6gHw^o>-M#TveGGeX%mDj~4{Qo{}_mnyxSA9Fg}(wX^PL)(C@zDC6Jn%!Kd& zbUuu$QqNJTy4h7AlzGz6Dr3qlk0WtoaEPL9k>&URb`h1=5DS?~rj7Nwx$QH`!;eJa zKz4VHM&kR8D+VDcDG_>Bg)EoAXC_P+k$%FSc>u#oBWh-Uz8Li6ng)^hYifV)Vwl4R ztKvFYb77AJdicI0^hbc<)ekb-hp!LKVs?vX+0h3yl#pk>M}7m` z7T6Il!{Tr`je}=S;JTBVvwD&4AabG+ccvomwIn#f*!X&gz2Ob}7Z3`)eHIt{)YVXs}Q)g6M! zOX@c4Zx+^r*(k*cX;X&66>j_Y(@ATxDN%;L#gb>)6gB0UT+s)y5Fdao0RY~?zV9n5 zyXaj<3@zK%J|No}*Wib@pC4&NgJ|Zk#~Nr=tte2(7B4Gp|4TYN+dDls95SK9XglQ) zEV4_BCaZ>8`+QA4-aWGf-$RFN9F9@vaAf+)JYH7Ud*%I_ODL0!Vl~lY0L2exh}Jrv zFQ_ETzgoMx@0hUBDf3bUId96r;gHo1m(A?uL}8clChkNQpCdtk~vm9zH$7zg4wa4I+X)9@oJgYOWHb$f2R zHS7FXh{>1#v0v&Atej_1`+qz#NyDH)(7Nx0bZb(vqhg>@S)biThy zoQl=yA&K?tpy_-GpgR}E*&lkj>V#qlb}CZ0Z|RG3h_B7u^Bm4=6gB;co+;;@GCr%& zG8AQB{3lfZxm%7`tFy*2d@ygfdnn<*dHH;Vk(n)%vdzT-HF8d+u?rQ0r`+f(HCk;E-?43 z@1y=qq7}b@UyO9Dw}R=s9F5IqXw{|yM;Q0h6*{9LVzyyTjG>EbRqNkxy|W)RIqsh> zX-02WRImS0ziXk8k<=p^iy;AKrV=)w?9j&`LG2V`OiLv-$*`6Bn8OK@Lj-y{0inM8VF(NxZSdJAB!#QDhmG! z7dwfkXk>tGlaslV$8ll4U5W_g`OMJECJT1u1?qW*5l@{zFF%h8{Sp#&Ir$b6Az!D3 zum^3Oo2j(o?8^GkTbfU0$XWd}I#_I}DN+0N%b6QO3L0L!q2If|I%clM0aW_X(_-edC)yO|DL5Y(qhj(&%yO3Sdzi`7qxGGj%9vw#A-&461tKX{&LsXC(SE!=ebdyy+g2z{m7-*lm_4<4IW`-?;X6h?~8K z>M}5(V+CLAodJVbi_nA_0PD381A_mkK4qOu9e%v6{;v)gRp z!F>FqSr0n?@A>WWSzm+v(%B`Pn16(>xf!6vMw+7Am!DhB%a=KygkuOqdcw&eT$qMR z4}y-P;lyv$p0%*R9zf^>AIkq2#zD6SrddbG7rf_CL=~H+@l zAC~78ruI?kOa&l!6=?r(_4i8}s$W!tCr$d^e2>iUMu!+$X$STqgSdo>s06l<-+!D0 zyWKbT-erIahyn@W2eHDk94!5$BdnjzE@_=tAaRcXm{?gV({`F=rI1pp-sDF1r7}eK zbwKFH@HcT|we~XuiX_NQeJbH8({b0mDi28uqiAXA_4-A6X#s%G@hqVSD&^ z4JTraLkv)JQCIPT5d7NtJfH}G?sNro=o2S5dYt?5snr0vVis6TI`4@_O=&#yc{_+I zte0RbnhFm%C#g>`cA4GAi;otJqZbfxBm(@mQX$imjK=gF?kzbYU-!rLNc6wZJ}?n* zIeRH)Q~BIBZ+kxGPGEPUh)4Uw`_g6-aL6T`-#x+IF3tUYZqur~;g@;&%7C1yz^1c( zyc4RSW2@gMAQihY8}-&%sfW{d`bnwcFJbwAzsIF$RC*O`hAck7EAUYTQiIQu z?|y&8-ay?@L>NPL^Be4l)n)quls{t^=0v`4)!h(lkc^#c5(kc}OrtL~oS~ID`tCVK zPs=={QWg1|Cd$io-1F@jx_0Qu<Pf)W;Xue0HE zZZS*A5XpeA;!BYRIojqnyr8k*%yFAk9Du$$Sb9d--08|4g9c|m`N;lLaF8l;>)&Ub zSW1e7a8^LTlS4z6b`GAx$VK#3SgLNJdV(is3eX?IDhY`7K_FF98qJEc@JmLXt%Aui z7YCDS#wna@8zWz@T+vXO0*ts3{>&h8zE~tb5^b5b`4ScKN#5E&A|$4|h#v?`A zHSulKu?FXvc8mg(21-s2IjdzVFiD_7BFKhls~JHR2F4mw(Z~>oX zl5kUj3om<^c79gi!9-$T5MY#-<4*Z_Wlo++B!W2ID zpH;UUg)WnRLt4C+j}PXLEy-_9oI88=>^~TjdpmxI)D+=I;Xbr+&%n>A9`CVA-s&~%m_QBOi%80O}J=~(-OLUr3w+oI)7T~uP>^%r}DxOFH!fj zg3;!F2MZZ)n8L}-i@LL)FK)rq3$ab6LMG|}l<&rnN&pp^_leZyFnfnJ9T?i@a`x<@ zm7l-1rmyOs)EJHV8~VH@e0drklP-=l7|MwYc^^f9ik0=F^DxPJDhzgrqo}a#1!V^A z#++040`FT`vxSO-hgabL%I9KMrUIY5H8&&@nP!xMU|VVYdfndBHf~0j){CPh zY4>^#OuW%g!m$(h!IotX1Sk|ENGf;REhrF442SHGT6jGd5gcs0D>y?GRS3}~7ye%P zxU%_HqQ9tLQ_jg@j*v6&?Z$i8pdq(oZC{>6!o`cXb5(qlHTq0uBvygsB5{#B_+IBu42!B2Vk)qdU=As)+R6^zF<*yAV&GpE9e6d+;D7)QrPF{Qt=No%mb#sy? zg;?d#-OvfL?!c?Hz=2r32_pZeorR+A&p*GJ*52FP^gQO%e6jG*I>`6;b0$m23gXZF z7BeoDvGDp;<|Q4&bD16t?Bl<5rqJt7m2Z+^oH+h z7Ui0Nqy_&7)gG-rQLY)ruvkk7O?QihE(~W#28KO}TKndiw+q%w#jO5{vX#D9(W_%U z7PWd~@KNQD@^bNwHUgQW47THb+o{r1AvqvbCXBM%KjPQMKBLTAfzOMII{ZH>3vM@s zz@G4L9Wv)47iZ6H`u7J-o80+hqzxA?-(6ET=s_EPx;Zkx^-?7c6h?a--%NAM&0(5%*?ogxr}?~z{-t6T zUWNnw`W*^A7UU;I&0wgYdJLu4PooHu$`t-Adn%zM3RCEtlH-rv}|I%<486qvEw z=b?Txv)9|q7BOG^egdabwWO}rEr~v#%tlaYm9=(QfR9HDx(y9FC%2_~{Tvz!*z^8L zuF9Jk?Z2n~#e-7cN@hZdZuFcIv~+Sm^JgD>W6C!yD)^3YYv$$tK(0_MK5Bg-nlu}W zlGxZ>e5|NKLV1W$IPi_m+{)e-Tw?4Bu?Qx~df35|7e>hjr58Y=NA?RI67%H8E zB1k#7y3>{z=PrKip|r^!&Am-Ev_qsQOVRv(G^#E|qoh(l9=Z4aLm>;*K{}TbWn2TeRv;)1v!5$ebSk^68Jn0uP!56eCmDL89x_2QGx4M_>zo$%o7vevz|2_&nxW&g3(v%B`P$F@6j^xj(!=F!p7%wdpJ7+@*}! zxup-+7~1p;;QiUoXgM5RAx=vw$;|Z@t+q0M^2zKk`CndUEtD$5?WGf{-Sl^oG zu}6smNd+jkLnfvKK0@A`3_ern{mQSI)%0Ii`w0d#LVj;Xk{N>QY=u3N+tj*us_1=B zC4M^6;07uP-XnG=eTm2c4h{}MMl_*74j+t})6ztE(-nY1J9uY^ian+OuIiZ^^c5-$ zvX-URS*uJGl-Ib>QB3B{ApBe9dYFNiCsG;8%So%`>_Yk=u6q zViPbcz$xPZui3wEf}rnR-YC~u=HEA%08#tj)L80RrDxV!*&mHrgt@E)p8ZT9$~^rG zhNNKHy1lhgu6m9b5xxHHU*Gn>CTfIr49L?dg-3fe<&aQ@5y{qjjE=G z27E6!qN)Q%rq;sO*j5v7BZM#>Oy^mb1%`IQeO+_EqZU|%W>0r~7pf)tPxGUSx-hcC z-l|Ufh<%Od6$dK9!NHaW(P=@9`x5R3eu*`)Anb`~2Y|v49~v-lTr&D?DYYSjI49mB z!3;1)aZM>qfd^g3`oO-&sey8F?t6}L%_t7clLznKzAntwi&uawLHD5xdRDJr8{yCh zn;Y-VMd7bW5UD>e48P#te`21WAVH)=436B#vv2Gvp%@hJcgR`BP+3x>(g6L2jnZqb z3^e_e(0OJ;WB2kGR8_qo(B*>w6d0t0KUh9QiBFoZe!jIGmel22?^Ja{mB9WCS618T{Q`e0Wy`-H3u$IYD8z(qViL4qITWtX|!;H%A-E zDk8k$Ap}+XvSY=8zQ2{1Za3Y~eX5d3(U!`3*j?kJO1|GKWmkQ;-Yg7;9P$<$|INs_3hS^^57u3`9GXZB;!t0%soHvZRQ5(4jF24>81?EkenX9ZoCrRg8BLl+1ZsPgy?Xil0LU`Hv zvHI6HGQaJI;C^t1C)c@Zr8!HR4FDW?b_@UGo5(k{{~nJu_*dV3IuwTjA7mv>5M&E+ zuUrx=x|>z-Bu35mFBe=t*0c8(zq$0{WA`Zsv$~9?7N*ymom}Sa-^+dY z?&-X<3_m?V%UrkvA!UAuUDhRvfj;Z@EY)S55L<$pSM%^R1soS*;9t$*ey$2J-onEa z9493Ob~d@|R@Y(-_i;HCMu6&$j_>^oxpw?`NrqixI4Mk0%PO4-uR_Ddv|GQ7hC zN$rSyYvsb7wBxEXv=ChH^gS-lA4epJNe;a`yk|JjuG zM>jNWhPLSmLHazB*sEIsdpZMj2G0|FmgL+gsaMhm$$Ii0meXpcpjCaTJF5}(h`>F+E7UiIUGODEQ@ns!y+OLnuI zb!UYGn2v{Kv!4sT@+%^mwN=fj(sYQ#jH{?!qXq4QH$E}M|N5@%XMU|i88-Y^$dJsq z3n%>SNZ@PwTk|A!&7OjztK2Zovmi(N9UiDSV{!+|oh3yxRf1lbQLb>%3=>0flRiMv zOc+4LMZYlIXVVGs8>@6ARIWm37$r=%b-wEf@)_N)zceiFWQ2=@9(~+P3mC3P|1D6@ z`OLuoSzqZnU48bZ+rWqUkS3V3_Gr~W+Vk#|o;M4>Tk-q)*Q@&OV2yeyjN&>xd&vFY z75x#zFQRPl?3tr@P6{vRuvo9hrgzNZeDqq5VOdYNUdxLYo3KC~=8mt=L=yJ;sZ8m~ zWR3@*R-0Pzdyqd|a2z*URejV)J)Tj*%sboZHLoVP+kryzD&GkjDQFpkjm?drTT6O{ zuw3z9e9Ga&9DR*XN%#6H3hpB)h3d@5U2%^N`|@)U-gRQY#CcB-y7L65)$rOuKw<7i z{h(d<_>Hc1vC`GY-x=5*N&dE4t`&^^ zlxJh6k>~=N6DF3|B3~HZ1dYH1ShEFJF8TS;bP)}bn9BmJNxyLRd{~Bq8C!O7aGykJ zXU=fbh7e%u`f;?WpOQB;*p&K_A9GwbH%@r@=i0Kjl|2dm+(+ZVSP3NBDWjOjz2Ki5 zCh5laun2w{^=_OZTT8-s%C9sbp8URjQw06_bRCBTIqGCf?Vw7P3Wt=?(dhy@h#l@HF3jW`Uapq=*FN!AuMDMlJiI5rf;wXvfjPhrpvi}G zN+%U#6-pjCG0I+wQAzLY6NT9Ty-_vk;&tHz>Kj*rgn;hESD%ekjKM#y4P$eRyg;_( zYdyj|OZ43dy|8)wb+Q`d!QeC2`Ry8EuwtTV>Nx3{Asl=Ey*@oNmI^77^}4mZbA#G+ ztX5YgdO8Zq)wmRg47xqS4S1<1Q)mVX%%`UnX7y_5=9FZ~i*IypQuDki8F*gRGFHWK1A{)O5KiI^I?N2SyWH-2Hu=Ncw)|O9M}P zf`sk33Bv7x8ub%nM|eNiuBMSOi0l$cl9OUZ(fCRP-Hhh)-o)Ao4L4OPLd{N ziSneUAwq0qH2tN;kW%d`xA5!Z%=c*el-DiLV4wAb&fKaH2lZfoUu`o4oN`&H^J9Z& z{Fooi_sa9bDLpc14hd^qX&k}n-tNB?gX(-u*vmR?XMx3=)9x{}G@KG1%sSH**eFv7 zU76!m@9bPv8t`@q09r)hQR$o>!T-a;(#rDT>M{E9Mr(UV?zEcf!&?2FEcaL+aRwZD zcA9N^>Iu_v3Esi|gjtf9C5|LaLtq{H*^PuY=r&2b{0#M zbbiBf!_hF_(N~#n#GJ#y_F*wFA~~N}{Gnkin#U4$EGZ#!G9|O&>E<5lRc_okD>y7e zb9Zge9OnpJ8Hd-LpH}9c@5KOc{qXbb$IA+ghQkd&EF7acQw7_7dU=}Fu#xQg$36|i zh{d0)FE)5Dw&PmV1zcjvG z=VXBS4nPQu&Z!jN$7o|+NK8`q9cIusKAMm;yrN-K6QVw zC479b?Cqo-Vy`A=5Fly(s44C*fM#bFg(Q3TJ@T?rB;@vbIbc!BvVF`ac2@EB!>c^{ z-y*tB@Y+&%7HG5WS$`L5ClGaL~*c zdx#t2@7>}pm+WlM$f+?$gz+Z#YqJwArNiRDYlE`9c=si~Q^C2rm%!SA#_lBF7{c5i zAycqKOo1efrVGlmmJ`q9O=Z4yJ8EQ8alkTbbr5c$>X7$RA&$c#H9m;Nd*0il#5uX! zvtNg4tG6@$=znpxYP`zy^hCBIh(u2itoluHufVwPdQ_`g*dSKs$9Z_lJ^>w`6CkKy z=e#4Kr-poZ1R}zbh_@0Zm_@iDPYoAu5K-~*HVD_P2-yBVE35p4nvbajl2JVbB+!mJ+Ly43UI&O*@pUFmhH~|I z@a0-1w#V0?qFSRtffMlk3c4T!-EPs{)D%zYUr_H^`Z<-vn;`sgl~DxP0^Tqe4aVZw zDHU(P%XuUuT*z2vNZ!W3ULx|RzP!jiMpvM>>&IM-@U#5565nQPZO>@U>LXC!Va`s= zoYtJvq3GQbn;VO18+cS!eo2m~pjVye&--rkB0Q2fdh}OiFqZ-(U~g(W?h!g^3cdV3 zThrY5<1H)wW^x@AdWsJqHpH(~wS z_{uvt_8ZbPqIfeENy9zLmZe>Y`GiDuH-=to5d<6$^gFkK%6eAzg2>m2Uz+aM@d2Hv z`i#K~x1Iz=#}KUwihsj_G{W=+RMm>tjc(W=o=%*E?Qp_E+b-z8P(4 z`uyLJd#cG;XF_?&^G}z`?c7jao>_N;xz8K~O^tkR!@Dz68Gev!oM1cf!OS)m5|3}& zS~SMK*3pL&PUNz)*ibR^c(~DV^L_DaLHu) z2=Q;JwxBuGVq;tl-w_+lkN$TGd+`pp1o)cnyVgIdHuC(v&+2dOudmfa?ciJwN>L0@ z8uUj1(X?=gW`EnDURAEUAq6)XQ7$@ah#%TW;3qsLbmH?tVf8 zK;vZ#Q{z)!qIAhu!an||bLzX6myzEru+ zbs-uauOWk+)FjYuj2)aB^$@fmwcZf`5}vFgG0s!Z2OJyOvNBYhb9sP%ZXc4KwwtvC z73dD>?{G^{Ab2wdfmqao5NkCaYeL_l7GJERnb@pirzzxaDg0#XR*w*$DahrmcpW{a z63*jXJH;`Lg;!@@P1CGDQHH<&iv>EmP{WrrYb+r~onzCcIKPqXA{3fF=mgXyOY&Y$$uYklF1-Evgy%U{&j)S9A$*OSHynhxao~&dk(&7`pLL(v|%s<$2)8{)s10KPOo5`-1ESj zruZ^@E<=HN@>lnTIhLcK@pz?eKLYbI0R!q4b9W>pK<6{x%#F0ItiXfm=Pgkmh7>Fn z+AR~lKU}`;R&ZD3O%NCKp`G|zd|c()po8ZVmxBPT{Cph|54D3@WR8yrd$*Z6J`uRB zaCL6&^U9R>i+{zz*W^k=Tb= z^7!5a=TK{J);o-ZJ$;z1y76~w#Kun9OEQ%dcd3~j>s5s{lKA788wRBr^WUfF($lERoEBqr{2 zko1N1j-I)Wj=7P|n2nqldbz{W4JIf%Ms`hRua`kMXq(VlzSM|K!kp89vlOnIi>opJ z7JezqPk7)(kwiFNuLD;cj5wb_w+tEdtLX2Hr`$DA9cFvqeuttdCIpgq*Zvr&;DOhD zP{(iH%UM~j@Pltz*H3y?H8sVY|4C%*fcYRzi0|Jn4l<`|vY%jffEWBr0_wP5@~lUX zOnkZYLJ?f(8><`1O2y?5D46=a`nu>=_l;3~I%g$(7vJ|z_vfvlYoB@QYVX_CC|Jqu zF4WvDo3P6eO;^`+?9Gn@XD7`rCBq9P64Nd#RJDKtSHsYameRovx735+Qd3e5KT>Q0 zq8g!0ND8rXXzwb=?#@^hPsF|8nkZx5L0AxMcBfaSwy_-L*%Msrv8^oK#SgFZM_U_4HmfEYe+Z@{yhbzk8uG_KW*6+K44B>5QDr3QW zNm`7rNcQxVtr^iMG6{~_Q-$66(+?qyK$GG104=MLhZY#1A!h}?4ZcPl$+K?SG`@`q zC6EtUY4N9e>P>wS>XKDUYA!E7eEIC|Fh0`vp+6F1|DyK@#loLDG2+_hzqMte=CzH@ z&CQnj=oaTox@xbU)lY{KttO<=?U@2VSeLa=uZw9TN1J(^G17}>`c8N6btCQ~>Fb2x3B%l@~0+58p? z4^B#bPMT4Wmhl0{zg{F`&25Mo<1KhW>SSf@pe(hj_O*NVCpQKDGUj`-&rW>hY`8(b zT2=k~Hk^|m8i7iO4Q@~FogVIpG=q^B4e=(hOkzx}*oZ+<>Mkf^s1{G@U=LE^`>^@U z+82FqVNX~Mdfe3!M9Megb=zZwZ)k|OO|o{pyPgWNxTxBD>A~jU)6vS=62#m2Kb_;Y z36J-N)3DUK@+2b68a!|SaUJ=FpsaIm;)BFd(*5%6B2x@vPC^;s?(mVTXAZKDgS#W& zXdC0iUmgy==~g#-b@O>vZ+Uq7z0cwG1+Z(T%05ubKjhcY6M1Kh8jcQ!-2#iU9rkX7r2Ud5`0zt}8z~YGpoZ!UJot-d_li z#xy87`L&30oM=y0jnBJG-aO>E=g3S^Wl}9=^V`~8yC1*)3|baG(Vb}S>TZ0b#m))q z*Qu}H<8=Du7imh!)6IvWcLsL8PISKcNqx6W->2b*dGXS=C$nD5R(JZUA61{8y~%2v z2noqJ*10#;=ZM=slRnl`CXx%8D{c43TH;aq zel2)pd*_I?-$+V2v)r#KTzBJJ1-uY=Dn={ONFtUp*CCD%G5?JG4%7SPYUWNM0qYC> z!%zIYtycbZB$bq{#@l}k-e_BlyeK?TTl_K;1xxsR+`h3qrR_=G#GF2_oE2wKLu+55 zwxB3_canqaF6U&|;rdkx?uI)GrFdCa6Inu@?Tinca-|K| z72wA7px0~daUO4}k2|T~w_Dm>rXU!WU}67%t1NOSdE)W#eANJJw?b^|tgL4y9)T=G zP$jRgzW%lo?d>uf3#E~LuzEa{yXA#}H}tMbTY6AIJqc(Waov8Y5@)PUTqI(sS|w;x?4I{ zNq>Fm6LBYcCw(-Y`92Qd2Tj1tRAQfZZ5E_wjMGyXdRlL;2$=lt>F{{&{5s$t%q*OV z%q~K3xBpJqASOT(JrFg#5))Pg^N+{*NpYUn++qZ5MJo0#3?{E0~wXc=%BHM(aKsP`D#*UMa5id9B66zR; zM!b#KDVB(Kg5Qdj-8GM9)q8Ho+mG$EQ0fGQ2zxwfAD#V*E%TBJDph(wBt{v=ST4e4 z=av4VW#YN2Y4{IzO|aPdRci@Al>zw10thzK_;?LwMzi3C_!VN_BaLc!ltZv~jqsB0 z^H$>u07iZmAf@W*xu{J6^S@h*U;k`r2psZGqTKOzaJ34&ID8ut#Qbcm#Xp@J8Xg`l zP=A_}MAWRj3lFX5gN#|~RjC(U#xZ@_KG(ntTPbdVkwsA7QbPdbvjIB@ zJ54%Q!dnjZ@bit-FRU+N`V!;gb2GXH_WqYPaV021+geglPGOvJ`Ke5P)Z$ur(>3ns z*cdhTmibKKe5*V_2HM;r zW4WaKTR0zIS74s-*IfJlRPQyfmH?$lc&lV5PPSsbWAwujLW=80m-E~(qt~fmty|o^3Yo`Jf=;u(-XU7@U(d&6^6# z;5pn97GJzIwG|o@qo2zhG;`4HRY%z1YB-FEOmB)3{T*kI5bYS`PBzQfjC6g%K~aS# z@}5M>9iu-!R6g4{5NNx5puEx9_d1y5ocs(m-)@iUO6E)fX^2wu$-+s!2!r zC@mnG74u6nlFzkegxv91P|5P>yxx5l2Y2|D_t|>wlFzvi`DY>WN8``V=1o6Bme}Ns zb1wRC`v`JFVw9ymLu%$O97AYie_p~F=&;kqK%P=Wf*LJE3z(LhQgyiXom`b7#I#sx z02esstP`vdj+x1I(7+m8(aevplI?-B3k3?C9F3eT0g6+2`0=0AMbN^2P#ACP_C7-m zF(1C?dAP0=;rZTD+V5J(0o`kDJs6KJLMsDP6va%0s3G6Bn^$jG)*bJy{dcvvB8wf0 z6xG2ykAIcuPK&!t7S^y04G0NYYC#WfwthkvqlxfmyIc;RVOI(^=86|@LB`okjLkEX%?uPFssvNBqPUae@y@tFJEJVEo+k-#@ z9#_Aar#h}gEXFsG%>hc9c22fjtNFCS<39`(OJ1D#_h>DEje~{+i3*kLz`T@m>J76c zh=E&n)5jp zI-a-(Q0N{2sCm<&LZTfAYy^QB<1;f$?ZKd^a`^ENt89R!8DFi}wE3e`7y2~ttN_Wpg_ zrkaVyDxRP7KMoB4dY4k@4>n-)N{l^4sRMCtO_@@)e-^d@I{9V;;v+oMUzqgDjD-)5noinbzp|Z4E3T!4n1D3eBxa0i^ z6BPq!ezC|P>F~{OY&?U_23in|r+PKm|5ut;TX&Q>O^V@luEJi`lgz&v58V(Y7#148 zG-%HlVRs^$3rTf)a+E$C2X&KSG0cP*!EsDdiuV59$;R3B|3u5|l^;N0@zO8|f>&!4 z;a>R?^4Ou||8^^5pFiMHBY(C2;#c)bt2E7luUR9?3b`YtWcEu>75zQJLr%dZ?~hE! z@$&Bck=NtH0>NOp;*Z?;QD;hPy@&?Pu$c*>itfdM;145+MVt|k{0QMar%e3Ylk$@o ziAcFEHJh7j{^GQ3gE2RuA_?KDBHH}xx}PIn?3fXAFSGK?`NQ;iJ^P71)-%sgdoY?V zD9>N@H58)qb_^1N-T9)yz-+Hu)4F9LVC}#Nakzn?AC+5`8+5occg>L0`?OYQVB@FP z={osZG0{c+aSv$QXAfT|ei}3r-LU+$BV4Mj{mrj(GKzuA-dr@Dx*$JNJJd@$MB>bK zC)PcKG?LJ-Ug8avw*d1^6VzHSU_N0(M zg70Fh8mOs=c;R!A_7h^HMsdXZ6$8a>e!FjCOzs{$S1R6r*y0R)>EH@&hBzR*{Ae>C zR~SK%F-777`@YJ#E$yonzJ!)qA6kREaa8#2a6$_G*RzYgI@piDebFq;x@x1kVwqt zEiB3m`f&_yL70KLwhmrghef+kET?%6^S|h|*+!4k6OU8pwwyBv0taGbb#>l$;)mBGFFp~S-?I0@fJ0um zF#Fp7=gc2exHL>9^&}F(SK$8lCx2j_?cABzC7M*l^$BVKmdK zN0hZ;tjx?dB`-8Au*py7cx&RG&b#fPi7yNBO|vS>Bu6#)^fFhfv3KUhJ_ilYCTn@u zIThtkUcW7cGrv7*vKV3W8-AX-B->EBIXg>ka?fF^byRvFLaT)}u~rJVHlE~5ZIJeB z(3<3pHS5XEX{9XFu3wDYpKcl(DDsrtt zwP20IJI@E=ko8xvC*LS}r<37_Gho83qF-}$d$JZxG>1Hz`;JdHPft!|x2^%R#uas6 z5@^#f$OJ@)ol~ZRpMj8?w$A0yvc3yiC}iSNW$GRo5P{rhoZE-;;ZM);Tqbvn0w@+N zpO5M@7fi{ejlM_Wj6{aX&tPDvv4x2U7FtG#s@MS>f+HkvRUa=oCxAp74-{@k``^V! zNrh--2fVFVd@?*OeN;iToxOSVfi+p+a@MM38g7g76biqg+*>N z!w+P%_t(kvLJK`$3^ltf`N<2xz~6DQ4UiIHwHB3uL>dUU%3K7Zk-EL$;#ZF)`K35& zHrISafCN8)`~9bq-BPt0ZQwE_$WI7jt23l`xOiB^zV&kRZ^(U%+YoR`>9x1Rr|RU6 zIPv#^qf7xHTjOKJuV>(X2%(<&2wgAaq&-K=h>C*2W0Yt~Jgz;X+=m_1-IfU_59L!C zaAxJ)DFq}7d=CtTgFSWtJ#*8NvP`T5FJVhPuNfO?t%}=def;kcB1j0Ftj@AA*nOAO z6R2tZL~S14T=JC%^wKAs*WqeYPlpeMM=sf%LE#~iMA=m&uD>#uO^#xDlmGgn`hU`q zF$`J}YLyrGiv%~mg^(mKYrrD^=9t`j;HV0$TBd|?FGRH6+AvCkW%l}r1S-Hv26z9R zqd^+J2A=J65Ta}pE0mvs!J#5L<*w8F$>(sdtUgUVET~8ow*nvEhUK1Zz4$P_!*@wv zC=fhMHXcV$tgoDeWzunj;=A|wFdbguo1`ItZp_`46A}V9-;VulkM_Nz&7QE+XqBU% zO!J{C3F=m+X6a_&v(Y|>)_H9G8{SP`l*AW~jb|+OwCt>}gtdJWPrDqbtamB$zX~0l zJ0_EXuU`C6UkjZFy{C3F4ttz-31Ov~6g&byt9(O94F`V(r@`P@9!GA9AQ_t*HoE{u z!)YY9ko3U(u7W8NQNr z8P>3>@XgP(|HhynRHuAmaqEN1(#ZcG(2^LOVl-I0_$RK0g&ew@#FaRUG`Ql%_FMa6 zbPJn06mk=SiKFG|)u2^vBc+#`)HQs&a^c-_#V&R7#t5#K?!|w>9~5#G2KL;fogiDG zuI~k6gwxC6mbsw)yQ7%ha^=$ZFBeK+&+4tU3onyDKlV9Y`&-f0+AI?1l3ld>^WQfU z2^+rFe|F(}Cx_hO2aBbBvm%3WQy$hHGHmQUbDe|Be}KOR?0J6JpnYec9a9>YI`-kS{?2$*Xf+3gf*ljOv<|y&t)`z2RN21u<@VyutYFP04$Y2h`9h{{f5Sm*s4tRo1OZLB9g!Y=oJ+ zOkcxuiF1zDJuv0g!?nGD4)a?gqN4Th2|q&Ke|^$OKvt%Xm#qd1T`pwAe3A5l{`{Iq z7G#Sn5%i{ptW{BiiXTpXw#O$j9e!&a-r%9lu z{yJ0$d>>o{WnjO#l{qVoh}}Eiqi?UCX>D)c@){@oc6>c$k~Y;f9exrWe#CV8Q}lEe z_=yr)X2TCokDX{~x<5YFgP%ip-ht@51~Boc!Yy1a4O@zn6uKb4d`LIS+gRbeXZ zhRBfdcOmk|o!2`>!9aJ-R{#aW#5SFLZqRLe}3PcyWt2L4Jazc-jhaIPnq`; zCy#w)7lI97fE@oB2d*+h(R47l1{X6PJwCjYcw3qg5&_{sAs~q7_a?3v4*m4h9mh=u z9d7h)mb?S77XK#y8lT4au)g}bA!|VJ&MZ7 z*3O^(O{!?e(Gj=_9R(;K?;R&nj8x*wsKMn!E9>SalRNgjfa*%ew%LlrxyfWL*pJmO zFNR=X&JHEcbwpZlLeddpeC7W%gDxG}DpI6y#f}w{gcF_HFS^M~5F^$oKDhP&=<+H! zlq9`Wyp;V$`|2P}07e7b6waFj!E_JnDs0ps!8VmY7zy9fUW*u9c|XT@qrl77b! z{)*;Q6wbuN%G%Pc@d1SVq|wac3eOqHhnio6EBhRsAA$oAT1_n-??o_}XlL5F=WtcL zPI+JkFBFIx5XHW0UAjh)O^FalF#P1Wt(#NBax4%Px-bym(*t78OOY z$g^rlEe$c+wT%t$_urRx4io)>d}V&+Kc?!cs;R7aape(c1UT>SEe&NAR6JRE(&iJG zn)uv#CJa$p9#n5W9CyP9q^g+uo|E*iDhQ=6^lonh(24vKDEzMc*#QF1O9E+G7t}tu zkN8P|n{@Av{RMklLJ)U}_C1|z`lUv}b*2gqyb!;FosaNV70$$r(VDkqk1nd|-aBlaoLk`o%cqRk2v(v?Ji>#5{rz{h&A`ZUdusS_ zVjrsZAuO!kNrH$va>mk#e3h6 za+JfEYsdW8ZerjwhLkrdzEot2%Q zXr`7Nu0D|2v`^AGfMd2OJBs*?o(m$eJR?5TRzeWRYG(8BV6xNe_bUNUcB7A5c2{>3 z72Zijz*O)2d$c)d^V5IzWIp^<B5ZqEl87|8duSXOIwEv+pv&^EuWV3;ioVvoO!A9H1Ey?2}c(G})@l2)={yy!c@vP*o)w$CFi*Z0Nj9 z@R~-rFD);J&NM&*TyZ8enq3u>L`Y0hc7^mofGnt)C@ao|?oYC%r>EzIp8R17-vQ!KABn76e|&^eH>(73B%*~(zxK_uUQod(fJA{_8qUZrs+E>4 z{Q|WQI9bAC!P;UD0+Gowv6>2IRjQ7XBAm%-OGMkO(F& z2U=cQ?yrx&SZaFunPs0`O9RgEdJ>|* zDP%p8**6QmJ6f+n>Li`*|9{XjGhjF10JZXvxlEZ|uk+s-y-;Y3&AnO)B!K{Lzk4_t z)P%hiC(MRXjbJJTr>W=S-qLweeiu;LLmXYbb{!KI-1nlZR##S68OYgVS-bYqCGIVs^IO)~Z~NaiH3qO^k5$5eZ4MZlR#mw4OUtRimTg@{ zx=X&$bzk4zm`;;&4RJYOeFK~86n;DvE}@&^(_ZB=EaC9oUe5K;pjWnj%j~1zenDzW z8eDvfkG9j>;PMNfcBMvmN`WA|6RFt_QE8x^ETTc$Rf-TCl#uc*onYqGfD0gaA-p)O zf#gs~^2VR_s^w;9L*m!EU+kp@H%(0cPpIOROXv~PsVp+Z-PhF|PfhCB7d;JavIsxf zcRK9Z-|aY^Gmk*VK|)$Pc*U+nnhj;Ybm5qM+T?aFY>r#RTIqqM`zfX4NJS-lohQOG zcys-~`FrFd0^J+VBE_6xVjxitXU=}D;_lu`68tSpz{w%jD}36UKm8mYH5s30nW7SG zG%kAgeZroQ&)y;<6jlJw6W`jKNUWRErivldVrXtdRdpw=Ajs~=?O%=k0_Ahyn0__I zCeX&MoYJ{KeuJmG_g)(>MhAaDLEVv2L>O$XFqD20JcAz4o|^})w8ohW#Sb&JoR-Bp zg~=py2g%ZjPdQzWfJj$sXV-@aReWz|60??@+}^Qgz=z$HR3r4CuVX6XL# z?Mc`^)kc`DJPT9=mA}g}U5vi%`lb0@tazg)iHUr?B4&=5#+7aWYs z*EzE{S3E<62MemszvDxJa>m2JgTk~F#hiQ2^HjGx^J!+pB9uR7R`boQT_1D5kGRMhgUG8l8&%fERo3nNEgXs zy2o+pqTsP!mj+{oy)vakWjIEP>kH-M{scB;REGuahGjpT_$*BYYj)f@TCX+bOCZi* z>{3xKrvY>5KAYm5U9x^~;8Oby9$WtiOxTbtuRv@>O51&_X_*04KA8;PIGhjnQREtI z&J_KV0cW!uZ-;Upto+W7qWaC}0HrBm+|l3<`|t@?1I2ZL5A4fBee zI=sZ&s{m9F0F`Gyv4hmJ7U1K*(zpDgOk5hj+?TbR`2Q9|+Bn{D zsiIcPk$>jPV0yK+J!6iCGOS)A+=(mc5Pbs}624W9I4WZqj*nvGAzu zH*fuJaz|G=nyhd1X7xVun_hjg`Xt&8nIZh=Zshu+?;|`IlbH}q42|SmWd*RW%uJY3 zb-+(Xsf=e9d*z6wL#-)uCeq{X8$5L8W}OVhjsES_|&rIfsgXV`#vq37Mk@Wf$c;{P9xo2t89{zhZ)X-Y_=J?>GW! zY$znu=bW5U2jsgn-Ow|!Lj0_#jEA;KJUW6}f*0<3ao9#dP@Z!gp@%8Ppvz!~&)&s* z!PZvRO$QcTf|>0<;%xEK)cE&5$wRjzK7j#7dO6@Slx51Vui(cJA9uN*#J}%2=w-ud z<@g{93o7awmftBYO+O+@nz%FegLlWbqNMthe;fAo7vL;pjLAZ^jfxTY@1&b($|-T( zJM7Wd?@=3!TL#w-uVFbV82K3#-YR)mZ!PE4;?psc2TacLx0E#O>TT@t>T8?(6=enE zTCh1uJxutu#J;TA9?5Sb%BM>Q-|BrtM)m-yKJu;b-Iyd(Eg}P53^I!Ok=WhWqsC`> z5mS1gbBQhiucd79G_XKt)4a$#ssm2k+;ica(d|`PgXQN3KJ11y&NvsBz?-Wa2$6`* zf>9!1!*V_QguqstV$)sZY3al+%mBU(1bgu7Py>i5{ z;J*f0DNqO`BwZv}Vm>fk2pLm_dB3Wbxr)Dow2{;ip4^; zFpIEfackSu^sMbzF=VM%lW?_&8H@!A=>{PG9v7jF8;8p_M=tLz!&(Dd+rH(Ev{`%^ zkaN)82p@1(gxO6meek0?Eb`oz5K1BA4FHC;g-a}icKC0(L%>F^;Min4%=Ba z?Br+WNJD*DI;~N$i?iH`V`bFkr>0l7>}Pg+HugEy?g%Mt%}VN`4>rA2wvO}N?^)f2 zdO^Wf%%`bMzLBYK<>&g=(hUb3_`+jf$vE!sQ>s(bR6=%VXNRJh8XFJE{bzJFnr5w4 zp*+w{Ju6Yt%(9l}bk0Q)V%X{1;;OP{!JClxq!MnTumiRCJNu=OJD7h_j?>sxUd^|{ z<;~73E33(YW>Gz*#9eS5Uch^v$~ zg&fkZib$N0WaPy_j3VwYP18WkWQJG@s;;)rAA>WN9=?-42Hk~_0PtfItc|smnTbH@ z$=-+Hz~h6)@Pkszn8U2ZV`Lk^>nOm;Y&PwIC%;}BAB3!^;WFMR4i45>R;OnT$%~4U zD+xweR>{HFnl7+*B!e7Y*VutQ-EDDbnfyUmK%A3@aU{qgw29HcBVCptVa2DJANd`Q} zzoB<-T%=sfmz3%R2uj+RH$EfM!9Di1T)P< z=L0A!s~+K}LyZ%H@nh0-pxJnD3I(x;pfw_t$NVNtmMC0PBbz-iWR!F2SuvC>55NfF zOs5s{+JBEc3yNWec48^uhKP*hfkGX0VSB8^#x8YTYAYYEO8PwyDV{+ZTBu#5K}^?!Qi2pQXtfP2t;TM1Kfj&fDWcyu zvH#t*SoOnoDA?hb`s^Mjs$AXR!MWnB4Q zU{HJlNY+8*jftLKy!=85RAM^eTBm{Dzx8#`mFR-1%>B>u=hc1_e%B5(|KK5v`yR)@ zZ^aH^6@1UAK_RwQkWrK_5`*{RuJF0Sqai0TfN(%-k&(NJhr;Bpz)l{JBNiWKFE-Ym z^qXDx9UlPLjCEi65sHL}EPiDJWVEhEYhUd_T;8YUt%!d*EbqEd)V7PW$0fbzaJU0w zX1^EiIzj;(Z?cLH@lTvU!0z7K=(ZK~nU5B_A5lGEsPmHf>521N)@4l-vkAV$zCzO* zWbi{Xyt2c!)yG#{ka|;+Mc3JUs4AQXi3d|lD6>}OGTBQep>S9K;b*~P5X1*m zW!Hc2xl_|%4#Nm1Je=j3najB^|39+gIN>Rq zEB?yL%H|>amAzvT6L`L_#`VUan&BYjlhzZbC~kvFOh*~Ym&_-Ko!yj>w{a$Y`y0W zOk;PtG<@-rVd}Qnmj5mjOcf^7bz8t|$B?#J z2rNP>q>Y(}jc39cTdDP~h%noC9N00>%gQ$BDB@js0ro~XwmH)=Fu(qFQ6wKNTu9*c zCsLM)*3{o2aKD}rHX|uSig0r!fH<4_{L@IITMLisyK%E2A5>O$hAM2S7vPM+3VwY* z#6N$n$nApR>mu1UzX|h1R9u~2FKy&b=#$=i9MRr(`j-Y||HDI%kIu4HxE^{TXd-DM zYbtm#a63k?Ad0bJ?mZ`*XMsB3FF{8F^Pg=0xnLoOn2h_w43YP*-ijWh_g@*_1tU5c znXbTBK=<&HF=vD_A-M+=6G*a!*lkJE#XuP0VE+dNL3>XY=V!2fo8#?yar={*`C}t| z(!|mbr5b%n`QuE>OqD{wHFjP+3p`}G9Z`+*yNen+C56XB91W9K}bq0$B z1g|{d-tlK@vEyXmhe33}xE>d4$9VuozAtSJO&6Zl{!wSLk zkZ#X^^?n3TV#XvCMa#1`T-sfYCO*D^i9xEe7FU$JM8O+K0i~O*V%8w(>OK>+D(9Fr zr1h|yV^G$>Fn&T35a9s-IOK0%`|5*L|LEIJl~VEneTc^4<{xKbBI9rccK9Ku+J{4e zRBw*UsE4~BB#5oyyEK>pKfiuC3JMa@(vVeTNR)MjC!1Wd)%E^Uf(qpgFd3IOH{ja)wLPJ~2bHmggSF z!|e&d_JE@Hv@z9?1y_h980wd%fTb#!9*`8QxS1?;ZkcetINwc>Ht}OAh=%zYR>_nn zF;jeJNQN_2+}(mNyVri6tnWKgpEWjfr+%Ah^eB)$hssy0vrw5=YW1CkYiYlx6!uqq z^7(yDjnV%5)1KMclq*&m>%aa<{jzKNU|~7S=Zt?4*{=MXw`?D9+=cT|$>c!%%#W1_N(}Tn$)K>frZkh=X3JCHX*6>Q=6M|&G zS)#gvn%t-&+APFqDm}R1-JvXEllw0y7%f!&$cT4)(es~1 zOHi1ciQZI$TIZ%>JyEekal4HVW@BmX9fdcT^m6i3;Yz=x$j|zKKjQfu7N^P@mDRK2 z*koO4I&yF1d6IKx>cqDN&zDCevGif4<2&@t53xi<9>as1)7|HV?Llk5qedXgOd~3g*g2g6S zeXHU+U-RzPFT@EJIZ4c80sWeYpkeBNQ$PN1H+;|e??Q`=N1{pntjKNhdP}3(B--7@ z>ucWcQIoz<#Qn-zi8UG)OMTJy{b8hSX4(GM{=`4W7fAiM$Y_IXKJCmeCO#GVhB}E6 zQlv9?!~jbmIDtL4ErN8sy%h#pyC{9QHw|BI4w33CmR92UyV?oKTcc0xdyb`F)o{H#^yeZ~LeAc{3 z81EL+jTz@~NakZFezKrff+5sle>5|lgt}F=1Lf;koFNcQ2099kwg6S|C{(dzEU&BH z3<*vvw7t0ZqxCjSJwo5)*2hu59JGM;FTQin8YdUu)?r{m#fMe&T!!5UCR#GQ%N{ZO zTQzQ2+4@IYDb7%r0Y#h!@FqS60C#uFaiu00if0Xrb^BQj+64q+X<(vP*eDuIj*3t* z#-8dSCeH-cf13iAAfwXmGxekl*)0ku#C5!)N6$>?D@*;>27}&D)e*MGhP*$;$1%4;O7yl96(gr7kWEf`y*Ys*D zgk!0LRIziE+WpCeg@@8RoySKM?$aZRN_cxXhu$OY%$u&vw9aXgdW|7L?=H5Cl~bvM znky~E?3Zolwz(oN;mX0-^`bj* zlA(b?-}B0fgsbO6QgDhF5Mm8x(n5oZqGFLyd5)S&0uNP^%*+(o&Sqs*ZIipId}=vW31{IUIIf2@#E>hN5Bo|>jr`65)dZHM6nd$+ zE_oW;;bP{T*%o@(z$SYxyu`kqo@1+-FR*+AQyk6RUpPZLh~zYsrlYmh*h0qYuFx@u zgzBE|DXq=fBoZQyLD>WQ($89miScN?w3Pf#bO0*hh8G4kkVl814}LHB%JaiGcUv0V zxw*NNG@K1Wc8@&-YI_gR3U1P@T}kwMywX&q3pY5t2bz+D?u#>?(TJf}6+Fus0SmSr zi7uz_REI`9?^KUL;zoO_CmkSdn{#KN`C~-6ICK{@IDFU@C?JZz1f5shbNx%Uq*F%# z?nh&1OSPbo!-Wl)+TYg8-3D5%{w5}Jl+nPP;`+@MP9Ay)<_??|0)aw^d)X=YLRm(2 zA_Cg(s_?GJuw*$X0$G@05DRAvfS@@wY;wv<+y@KrzXCc`j7`XG62xpN@ud4gx0wFh ze&Z1w19K3q?Z>f5$8=#YBY5d1{gkwmYdGyHPd3uIr~FmQ+Sw1TL>%o*)>Gd5D&|>! zW;25nG*?!y&#(HJWN5?e#zO4Gvz;xn!tYB2jFmBIuC4t96cD(+d$5*Bc2h zAh=9}W+q2vUrI83+V9vdTz(Z-SxYkKTI16OXMZ<&14C0iJ=Q9!)(vgJNsHCpXk=`2 z!wU-gcL&`?tmZvf{{|0?hx#4)!m!LavwgJ(357haY^l16lWn;giTtJZrYIyZ?At1n zO)!;ck4cub%k~~dMdHRM*1tB~!X+7&)SK#TTit_^@$9athR@+TH#}}dB#C2SJ-ElW zXdnm_*V=wa)yR=n<;eGEVIVpSNIYT~(xtN`?^yUF0v(Nk zp=@-j-{H31zHju8AFXu*0tS);7}t>bhHMRk?rRYR?Ej~=9t2cSbuRwd-(Lq$J`4kN zEI|BZvN}vE?^Ye*^x#nCXg^v7+-=Rv(+AT_s-7d;4>y_?gCa&lU4~JIP1$5 zfYDZA)SJ-s`aEpL2+=RCZT*C=H+Ow!Jo5$eOY$W>{-Eki=Usk=-ibvwNe&dW(ku{Q z*Z)DX^-2kFj#|W|SZZg!#@pC!X3-|Ya9R$%;ZVliJx>7#VFtsUhHee7s8T3!;TdNaF z1qp>?57EG)dtlcgmu}idkNH&Vje6;zi)4T??2o=%{uzT_q+@{CtG&hR7d~t-SA`Y9 z@hmZ3F>sWzYv|5jdwl(vp-^F}1V_LR#39Fr=bhMSxIGR^!%b(foOo}Rd=e0X6}dNQ zaFK;pg)8w%b@lbaWJA*K^Ca(ji`B%wcJ4aRGQz{V5s*am!Th0VxKw;-&HxeuNmA2Z zdd4zmaXKiP=*Zr7x+Wi+B5JeJNX?j0qxf##V?0V$+bIsIApOk+bN5MMMGH<3 zTftb#K`pWuI92zof6k=+WT*R!kKsAQ*0M3j*vae#;Pe~5HNZk?mla(1;PPCF%uys4RR#g@>x3;wg(0Hyxrd|El6B6}Kr|3} z`y#@ODUufozbeev`5BoFJ*5YzKO-IHZoLUHt)8Sl?c`!->=g@ks#~YJ?wS~5>BLK0 zw+cC%=QJQPMtt5}RZpUnvIB&9jA6(LzZ_Tj;i0IB2>=`IcAH;{QbU~8%vGmn#$DVi zaV}cAce!_zw=|UixSlc6v4#y{*qr(^g=2#EY9nf%BCJgnAfwC6m;2xdG9PDi8k{vj z%L{AxtCHkywJbdtlbl6$t-KIn{WpI`aIjykIcc{f}?kEonx6l|-CPoXgr8&t9T6I&-9{d8=F&`4isaxx**{1J?Vr`jcF$ zMy!ij{?&r#6;+|H)*llt_s?HB%g=~?;BGeYP1ZlaI3d0sCJT!o7&56(RcHICb;37* zHRVlxXW-UGtJ^;gyBHV_wvH6Q=@dh7B%qy8ia17sNIBwBpaJ15%3S*ex`6}%jA5F> zl(9P;?v=dw{)zPfMM=Gh20eo!#os+|h!f5%FG|T_;x|CoRIrdb^r(KCgI-ibbgn&^ zThl<44xdE8Xw&3GLsl9NMe{l!Ft*LO^@rmPL$9BgreY92pRH%VYY2Zz@iP2vr>kiu z15~bZUoD&+Qe`aAs@m;nB$ zp!+j_3n6#OsK1O)9ueeSuSGezO;Yldl{$oHs%G;N{3ht^?i2Pyq#x)yIp*^INNvcd z*$Mc%ymQ!N;Oc;Xfw*@)r9ikaW>@b)z25w850VZIi9kj`nPc97IFUAvb(EHtT?64` z?!R&Jq_^WG@*!;f$>Y@(ZXw5XHlH6>EH5U;-Yz!(2?*HR@?kkX*vaw`c~aeQPc;1C zm?_#_DQAch=Y9*tr&g!Qdw&18we+P}{-j2U4v!yX@UK^T=h!!K~;I&Ynto<^>f z^L{&X2gcA7V|XKqh9^j0%y3w+kccgv?a3ln=!R6!Hnp zRkKeTbQo5Z@`d$k_IqE{KWUkbQ?td>L~wP=_WzRo2n7{9V7NK&erN}IldD@Y9r#Z% zpGM%bEFc8}ZrNO~GcYj^WA^?`b>ySU32?o6hVpooYl$RsGke2~o=lxzy_f09N@GW% z@V6xwE~44gl=H@z3x6?-j*F{MSIXk22chzpSLk=Q>V7j)0-2%j4K0ND7i{Cb=a@0S z#0DlkG4=6`LZGOY-PI}c!5sJ2owoWh8lW)K>{7*b(AGgW% zu;gsd1yL)r-(zyIJRd7;rX^qn{PiwUI@@2D+{zE}E~3-i%YwgF}3l=Sp6mqx1IOk6v6#X8p> z@*I$6r}CqUs6nohU9xy{`1=U^>->Yar~fv%Yx1)ias@z-+~f;kGNmpP{y9m}&F3Q@ zcg)-yU}R~v_D~5wP|O($+hq#Rb9yzQUvAg<03_?qLPih^sOJ#wi!)@e$z6BOM^iG@ zS_>J5M;igBYipKf;R)Tc`+Hs?feeL5`{DcP|1Sr?YO|J$ynJjN^8pbH=jK}pVGCl{rF&Ux-#14A}j&Tiz-$togP3>68z{Z|h!RZj> zhD*Df`>g_^aA52+l}=k1ShvMje_21G_e?H(Y*`X+ssSlXl-=PrHQ;2p*O*V31&={jw>GvkRP*H zeX8)zIJ-Clo{M(%osxv8t+tvD1O_yy-w}$JI>XF*2@=sedShNPrv@PY`bwkOZq<8D zTd2IoJJl`@ZEu6o6aqE5==*hb7i=qir&j@_>?$?KhcFgbK3G0DxE{cH+s-IQ`f%1O z{QL6m^ka8PaKbOBTpw1TyllJ@fMZb0K|I2`Ob^HSUKC$i< z%#QfnTe5*D>8{6KPd`zgGj`75Yi?%6Cj}H1ZaUz0v2^(fZxJrS7D7Xd@0*TjpyDU7 z77wwCIiFKL=+?UUSJ>p{#R)4nj#kui$$os^uJlM-QY59ZT9mNF)HiU|z|0a_QNWNb|pdF1ikR@*|><7XNgm06_9ybC(^L?T!q*gdfzt0Yl+i&Z{r^3&w z`Xk4m^wf}~M@D^0mefz?OGuFwV2um&oZD&p8#SlUSs6vTAzRGzt zu!DvPovV&CNs6!xC)Jz9b+s2&^E8UD&aLK5Z7v@DJBXsUW2HlvYHc&^`h)Sz=8h7-@A1a>+{}FH&W!skCt1*&>jmz* zb~0ssxm=So+5mCg+1hOOz;UAPhPD<#p8E1S=L-XU@?!4-ci2%kCmk%e?2_*=mYn0U zqhRL?kGdGS9poc>jau>^+`dp6Dc^+cKL&rW_8`=2<_0(VpgB#+_rtkjG8+S zMv2~FiLuBelyxYK0Fu5)M%ljXOEDM>mbH|)Ke6KLLcbJ&2H3yf$|E)3<(w)U@QK%X zG5wEf+rxqH_~P(E6&08$zy^RUxb|_|ehom8#h@`_#q|Vm%YO>5-7gT_;7g#{ik`LW zP|zb5%2dxia^5=llg#|JUT@0(a5k)EUgYuj3smmY4f0Rlk2oIhaW`^AUM* z>CYdN%-?zw7~HsGVhV!iZ9LjSfr4=C0b|t8zzjYijRu}(-x~r(-!jkvKtN+K0BYSJKCF$Cac@8Cidyv zEa#*6qO%@pvs`Bm2~Z|o=O=7WtKWe=gR_K452$;z`42mEGb5u*=xxK_XN<6&Cq7S4 zFFpQAxQsP2Ap$9SBtu(AID8}AyuBh>kP!irPK_Q_r+KHxr{`_W6UJ+V$%1GLpV%Cd z8Y1=EN0jmTpB4E8OibHhv!G)1gX~($^;gwe-)d2e7|8H{f|R!B5FS{?E`nDFNXJ7= zBC(Y3O#&|%1Tr8HcqHLIiXKIWaqeUCtVD*S3CG z=>5GxZ=Q)nvr6N@`ag=!J)Y_RkK%JFQ({VtkRegZCS@2g_vMmnDEHiwTkdzU5OWDp z3?YCft6KA*i_uXE1xK*EqRS=R`!49zrFKU~KD zz+sC^rN}&3_?Lf6cB^qioc_GX^LTq$9KT3+VLv&JX%{u}MtqibRr3qQOk z{4n+ZHLW)ABU0>~c%u$F+Th6rro2&jtGwGHZ*J*8?{uA3f(ke)dCF$iTWDOt@(Y8h zQjg)L4WkR;B1@Ws!F_tPDv{K z|8m^&6V1m>@@n)6*7Wis8awiMfp^6FMRTxIO7ItXk}VeB{&;abyU(XPFWMi2qR^>^ zDLt$UR$S`uUvjIcc@8TNju@UF>&*0gqM3e%*u6^>xc4zo`FZ7@M!J|QCJZ1~h zp0oC4&{?b_u@jNQF){f_?+eov6D=|K^DJbjn7Q#BMUS=7VohgdwbnIbNjQ$7DCJr# z=K9s%gS7dDC8yH3SGYA@Z=woC@AUh%1E2dvqKdqVi1#Tm9>+5sbwI^c$IDJfAm5ma zBiqPkYrObG$yEEFGNV7VhilXxsyV$;vF2dHgh8zEncmvNA;*V%X}a0twSn6U05TCA z1u?m43GbACkmkf|nqS+{PZ_4!H$8`|S*t+xaHHO<)x?2?a?2Ln_h|ig8ShlNyav~b zE-jJd=Cmewiqxbjbm*jHmM9*$+2U4QnI>V=5k=sGw-8|yw>;K zmLxN8y?eMN7?_{mTQ&FpfIsJhswGudVoQC9Ij7ykkHKGKegT2rO>ZQM>ir*;9dD&& zuUT3Y*}HQpMpv^R7rTDHWQ%~{^$D+PEZtg3+mBDS-BqF15Z-nurRw}QC9n8Ezo1lm zpQp!Jvtyp|;MClmsF=BPHuTOEfqF0PFJ)44(b z)uay7N%~@PF#neoG7SuzX{u#-chc`iq;f7#l?o+A(UAx8&tchpHiS-sTppi0lwxCu zUYSKRkk}%YnQp(;E|-vq5l6I6hVIR3D|Wnd(ZyS-y-78z4sVULe&$&dmUz~Mu9e(T zdx;fni(ZEM(%R$RHB~&20nrEu9i;jW?yD#(tgEYDa^Qs_7Fv3xE!qmRb=2jEgPz5P zzfCGy0bB|?cMo{hsE1PP>sj2#0;go*)BGA@%n1Lisu94>h(usDu4#(RrzoFp$|F4& zU?BpjgHK2O%I$m(!D!fKt9S5KMkug88hOu`=u#N-d*rh1xgDtckR&sr&c(2wa=HNj zt8pbi5ds0Kg}0FrNH133{$*DD`X!KqzePY0VCsRZl7B`;G41^IPNg0VuKENv{*$@0eQwH73^3kkD+zNIEwoLSSVu9lj7|kU6;XNrWits&$D%I3EEaqxAFeXE1(y~L2vyV6K zp8TUZ+-o1QX@e)ZOc3LBi;rv+LW7;r<)yph6V^4r(r~p@8?GL`zJ2V4amDkZDXi$l zq2a#1PemnFSPd~1_zS%h2xPfnV%5joR|2$5jty?(-+9Q)>KTsq17q9AvP)Pe*cA** z6{cKgq|-(RkU@|8S`I_%Cho_)yRFY^E?wQ!L$g_A0?z!<-Id{`$MWJa8zUqAUKq{U z3OC)pk!OKueKiClPBZ@nSiLSs1);J}6O~zFF}tTH_QPH`4ngzj4k7w9_HN{-}Ye=(5QNh;lIVdmASaD!@}_15yKPLWs6TV4A--X+vpO2 zHtsSaaG!Shl~j;;#!IW!wTvcg5cgIwyH~bA{%bVC3aKMGa_dNd=(!Hc;7x_d5-aC~ z(9>;?A0-L==GR#{QpkAxppJHOa6)NmXb8+fNAfRP;Ms{+|&ZIAwK9qugwQEusn z*DpUP3c^Z!5KcO}cr`TMRuLx#QsOKE|Lziu86i>Z1uRk48ls=bO3`7vR2B+%`E*m7 zKHQn4&hrHpe49rFnybVD?-=?}P2TE&RUo1CYv$5h-OawN0(juZq_xt8!Bp;p74#YD ziHiWE9*0L46XI8MM&8%|-l;>?pl1+7PnYr2))@W-=KSF?DV^Ew?x^}Qnt((ML9!h^ zFkHgLz|MmWN4YjwINiX%V|1$Bl+|Pdo;v1v*1)?E%7y|pB zel$muT)8cauEE@i@`~oVVB-5q(eydEL)OyzWKTTDv zS(oCv(i3R{)8zCH*Nl9pR))1dOJ0~v9cWJdDojOxTr|F8Fbce8KN;Ur8_;Q_ZHro0 zmlYKh*XTPe?fAEQl9V0jRQ$7;5c$r3GPAV`_29ndAl}g&bc|=-hKbq1)F97m|1Q@X zR7!^FE+YY#@*rNh)TszO3jPbY{C>TW?NcV8A=LM5iz+3N&UG7Iv$P zU2OXTE_HMe4VkS7C_O(T-Mu`)d$E}Ll#fbg#hl3-j*lRWVBb#D?u5n3kJT!-@g4*ROV8`v#+!IEAw?gYQlFxfO61@CEl-`bZu~TT&Z9C zwf9suyI<^Vvg4!7Cm*B4UtXo;mI42)&c#6!MedO8GZv2~zhEkGO&EngY;ufBGGdVy zXcxCKw~zzrmJAXXekf;UyzCr3Ni4ae6`HB54jQEF{yPlv9T2;NE2`DZMMG^NUvr9# zGwjZv&a*Lmz_%r(eJ^^ycI6#-qDnyGamW4lgF&Gd*&u(vnsGK_Wqu2cQ=ej=g~Rn5uu0@GO;pDFARx=Biu3knY*T`Y4cFDZJd9QpI?Z-(>OxA ziY6#KO(*gg!`H3%v+n<6%3A2jfP_S!cpL++zBXBZfXYRB-XLM=_k~@_$UuT~bZxQ; z_=RRt0*W6xr}fQGbO7g$2hR&@m?|c|Ln^)(Gcap%Pw)12z|pgxMAng)&(N6!`%bvc zO}<`rTb;2EN*>uB!=vp`6&uTK_dzC$HEO}B$rALj%p^=TyD@DvS`=!YS7ld&Ac)jF zVX@38vSa@1-T3onqcw*%1vG!-RD!2Uv>^|c5!85OzA$`~h>l?oCwk_`rChT|fhw4v zCt$=O7uS4W7+H~aN;S3Z8|q<@j&74-fS~zz3Mn2{rpn=5X?bV_ch>4?+#c}1%h zOpt3jxTd%y)YQO)o+dI(W(!3@1^>N1bqWDWBG=^cjMgmd^w2NXJq%^9YsmtPO}nH3 zs8SEZoOSDmPsYsot9u@%nQRybzWg7`Oii{af&_#L9!7doCwuQg@A{JAb-RORE`8Db z_l5xUsFJvKEj|y;yea6sY+t?uo4FO9P^GAW+UjVNgDP>HhJ#y~qJxzXt577Wk}P@y z0Wsx_>%M{oiJ~V8*+xbazVZ+#-Fe2G#7!Wnny<*CA65q<8gG-H7rDAxTB>h+D3;47 z4)t<(h%$?vniOIpND_vmZs5-oy9tuePtyE;D`5<+(k@Nou5@20OIr&Ig7<7l)0QEJ z|G7Lho?8`wcBH(b#LXSaA4d`sra0!dD zf!Bw$;x(LpHQyrRJ^VaScZJq#KEc9-HN#LTuh6WS-UPj9&7?2p!z@@n-PhheFx;6o zhO94|9H)sM?Qj)D8=Ow~#n$BY{D$U-<)huhUjyAq9vd*RFlR)KaHmhtr+GgIo*R061SI)mQZJlrb0 z{Fm8!avu{{C3=SYf*G;sS0EgjRiHXx~JFArzlv|hc=$!zgsl|C7R_xv1ccD4Qf7gzWW^F1h%6%FN zhaP9;zo^|grcuDp{t?G1yJ{zA%E)T*_|j9H=%xLcy*)AQVT-NcsY3L*2u0l3kD7Dz zjO-39e$*7Ku_u%}2Q+|>uMWv|iyG+rMnF;y{~oOAcuPbn^hvVKJQ+XS+$(co!uJT+ zxj|-Mfly2Y+y}O0W8zY0XZl!klUCSr7<#gC85mog*>bBjI`H{Rj*mF~8A}|~Cj(dz zN&0kaD>^ktgUs^KJTEFS3T_MSKraw%IJ->s;;%l1r`N9iDib~9J45cP`Kh^rx$-Y# z*NvTC3HFV8YfTYWAyQaG$}I z(`-R0DyFY2EG-CEWpW-jjmazAc=4I7Lv#+KJG*M{eKKz_m6e3Qc%`MuT&s{0_A2aD zYuo-0%y0R>J_j93P?HV=eE<{WJ<@+oxW7u@3lgIAimLH2UI4`HrcN}U$(`#c9(9{)QiU?3R6Nkp zlMlI6UopBq0Lh`=O0Rm8Z=77#L=)KT!%hoaMYIJt+w%TOUp0d#@ zTQ@D#2`jI%$eRG_5v*RJHiO1=kOiR2r}{bg1Nnu~mcEqM`jC1OFbP?)9zRPJXY3K$ z+-ls@J^6Rn!GCk3Lz;2;Xvb35s3SVqLP6$ihK?1F38eIH6?0Am1j2@4T)h5o<`GL}E7YCL{zlIuB(kgI zldi5rs~kkB)fDC)a+7yL=!7^ z1uqDVBfA*Xm>(Q&{&~j_{E!%k6eB^WCIie(!z&Z>u+qkk%ma#1< zT^@c`BlU}*c17*YsS;wsRCH`%Hxoh&z9FnD=0I1d_XQ6RM>e)R+|Go3mzj2``s(+n zizq~3$G=ewA6lw#&KM3t=_W@D{A9C%zo(ISXtxOw9*Rq#=Ub-`7J!OAI$mswqu3A~ zz_#32#d?1hf;Q?8X^{T|b{9IEZ*aCe(SfSIPRxM5L#8B15M|bo#jYfD8;tI14|@ei zG3yMcAboBcii*8vbeTXGXJ5{K@h1+zd$fA>vbsEY?M(ms1xTOGm*-|oxQk&MZkCoU z%}s65e|(s1Z*)l!1jE@23~?7DG4WUz5Kf}!#Ul_bh@ZRiLZSl(*u+mhix;m*NvJ^u zq0c|5UC)2L7|8CyFNtpwfiGzO^gaALV+vJ8V9HY6ET-P8b|D_?ec?{dof6OHet{}f zHn5&M7(QM~(~Xmox^X<85K5qz>~BZ1URavN5z21@to7@<2%lVMXG)D^$o<-)ab$d% zgankbt1Hnrug}^6xXuo=rA<5Vi@2q0G#auyQO@3JvxpQa{d4z(b{Vo+KJ~jXfVQx_ z8N)O)7>i6wRD@cO-}W9izfpFR^DJsBJ9N)ZHdCw6z;^Y?fBSveC&CR2&lEZ_&tTkC zP%}B>B;%Dyvyy6fr}S7}wmYD>ARxrwb$`+iLac)^dGxj;&-k}O*?xWyf6obC?`)%^ z(i{|O_WF2~U@exYh9L%LiPVRlXIR9+C~T9x?w z6ogi7L&rx~^2&U6h{L2MZt(yWp z-}M0dEmdQTuc*K^{pGgHxP`@J(7%y#*vvrj$WhwB!O={`RN$Hk%$>;$0oqa~jwVi) zmiMJwj&CEj0L^1$Ln>Q4^Y$-i>fA=ftuA}hR4&!?SMruHng(k`{qMju8r=>f()7DF z6K|1F2q?RbvMs;_|8`An=xAejcCZY-mH?hE4OgVJ1)*o-ReNK_%UDvnh4k{^u{5^; zNr7(Dq3v8(9%sIGYIxbH!H1M6B`Ym4Tc`lryX{t4%i9xueF33p2n2kKBzsO?6i|fV zcL;V^5LZxTulURB?|PO8#SN@i3TY?f}Yi>Rj$h@gbs6#aI8fiKgU!+^2{QwyH*HTS(~ z5oMkDMH7C?Aq9Fxg(^;YHpn zy99yg!P!Z1w{76ayeoQ9nc_OF{ntIBa#ON%VlgChYZEfWrr z$$h=Ig3&()P1l+a{t6X{<7NezBbB&|^S}oPWo{P;4frKSS%@`7U2F1fMdRPqkWDW| zDy7>2(YM;{=G8oQheN{R)i+2iogibf5)MD)Cq+Mb50PF_mhmg|(zjNCaIAV`CGMV{&xN`V?)5l6ZD5n@@V-;;K$|T?WmLa&?C!(BA4-nr42-Rd+gp*!+4oIZ=)N%kMHm$ zB>%i8Rsz<-(Jga-kN(`pzG!qfovk(8BF-V%_cBvAKCyC5!mVtyuX`e-Wg^RXTzNb; zw)h^taR{ez4hb4nH^4W2ulBN`4JS$%~U*i0%?_XLpuP)p(cPor52_<9T8@3(Q<}TU9{XF(egC) z9%QI<YKTbz{}*g3^V6PgsE8@b1prVeXG?~we+~jV(Ko&)OMv_ zL^y~xbM9P(ao_M?tLLui7q)6Y!V&MqaXJ<344tTsvu^z%p_`K_{%>Eaj{0;{+5&cW z`irH@Ennz;o+x4M&RhJo;g?P|HP0q~DtC3OxA&_L)mHN`>GXQ1Vpd30<6A8e zySFch4AA9a&8?;p9_;0Q)OjA=ty%R*Rqi@73g`Znh~r5xX|k z+^n7ZZjC1f!8j@Bn_7Y7wCX~1^_noz8l{<4bZm{DlL62nfynIu+EN;@ z@blTVbz26XY&5LJod(stIR=K%8%PStfhiz~c{u zAk&AB&y{XZa+k(^A{}^69+mu}Z_O*GfLeKaO)`SZ@!BnsYsIW71 zZR=lO`8^6@c83MsFcDfKzTh--zJVtL(6(HZT!}BE;J?w_H^w-zQ|uLF2o>b^t(SfX zy#H>-qAwN!TOcY!xk3L4^m&SYU~!xbwc@3S_V`1l6n*H0uxR5hhRf^GrwFj6l8$Hy zG**0p=wp7eM>B`5H;T*H^5IkT#ZJ3?n;%i-?M`50a~Xaj#BJw2jEJsxh308E<&T6h zzn9XeYP|HNP&fPk=3ca#wSMZAHU|=8JJif}`ageG-aKJ#H4ol%*fQf{a7!v%Y4$(K zx*v)B!Ku8qU%NVfXV&mqX&i#!>k{nm-zoD+EjA|)PojFV4O+Kc-nS2y>#NKFSsPt0 z(CL?(oVzkUgIN_M_`#=|xlegC2brqVb+i&a9)bNivT;Rrt}Ww1IjCL^M7_lie}&r$ zzIJK&YbXuS6pwUVGwRoXK2RkdOj|ITkTM^KUr9pqUIfcvAw@~_6zVbECEcq^Fc`M< zlYSA90z;?ZpvO@Op3s$C>(o#tO*V5jZG|a8h1l5hQed3w?2yl^vUcKLtqjygUlIez z0ZqQCEox-IM8~4PwFF-kMYR0y6U@$J#0j$-^Q#CHJh;N&X z;_8eBD@0G7a5{K|0EDQ-)QGYu5`eNlo)jIUKYKW0v)|v~R#sMyq*sfTVs}Bz*lUvOw z%lsw+D0~^aQ;k&OKmnytBKyIBF1$nZOtI}1AV|uq!3FgRIBF0K|7LK=@rSR==!1FD z3__?hbfYrJ|8wY>hv1zS^2YtD_c)E$`#kwKz)2xw zx5;pSY2(HDKg2e;DtiaiNersI0e`O5K{PYdII`;VMRn#c}Z5 zHJ0Do^Rqk4z5X9+K-I%xVd%d67yQPcGF;Ml!O3D_G%d|#^!HS->vGq_2xL6PG06X7 zWD1JsRHF>JaQM@b9Fw`swMr=jjA1@JEZ<2(%;t>}HN9Y^zF;NLu7m-0f#ucOzVS80 zi=yKCm2qo0R4KMINH$Y&F^)S)7~~rPXgoG{&kw^Y%Gwd8XZ)al;z@7`;CzY7Sm7(B z$}=Wq(3uiyb}wQ2YS+cGv$96VuexX@3Fi%QKz?r9{8vBqbO!)58>RxMW!U(tOnI+* zG@6#?*TnA4?eAY3NSfcAkGgz}eyi1T^3VJ&)s!E_lCj_=`;>^naSz4TuCgpjl)23c z50QIw*xFlVeKA=`#4nQ-UN}dd7ktjXDiDNCTy`TClck#f3-&%(z7yXpxc`nR#fq^) zF96oj*GJoMhsUYqRaSI)nXdtV{Apm<)f;`1|GKJ622=|B!5Q_2Yga~mTE>C(+@j}J z2b=oVg2p* z3qu2jc-f+s-*j1j9&-a`WS#Ag7fZ=VXfB|(+KmkshahopiprW^(`9?~mR1>a^Sf)^ zH-7-l8Lg{>DE4abMn+qezBUvY@FeYw0~nr}5I&|-EBWyw&N^vuOyw(H_5w9VpX*ID zP>1UsY*>~Q5B_zel~a<4UO2m+83Tldk-Ob5AYAYhK6*!c6f`D7tauBdZAGQ|u+|!n z9`#N%6#Ma94h#L9{NR%?5=w0t-J67dNgoYk!xA)&YElL}tEYLD7dr>#?>>z;QfBW` zs*vN|_niFxU)jn*4imqyX}j1`#%XK{m#Wzu8J=%6>P0dmJZ(ir5MjM=UF_z<6XUtD z)or`1eAD#nLc0yxsVay(rB>vOWW~wG+DR&6xpef&V*EW4Y^zKw&tBo2%-X@s%!X59 zd*B1#i2LuBjc&1+5`(Y;344z*xk|#~xl132-`K-i$B^%)norjE_xq=gCPI@JI{)mS z_q$;iW%36B>#zbB8nA~ndy<3pHbGmiJ~4l&voUD*?`_3KS2>5EhtecD4J(d95<2{) zv21*v=$Xc3!Yfgh?^4{q6y_TL%7$%>*8xX>e_ly>`d?b1E-BqZ?~(aU0{e&uZS#q6ku0xRCs?TevNG{MYj-q|HZBr{cISnwMOU@Ty{rDXT;Kv+wyg|W zFSiv49a8YB4GHwD`S-UWfh)ET9TOg=7oM^I*%Snl42*6U z+8RA5O`%6P#yS1UhU-p-(v-v=-p>5A;3rGeF&eLSGS;GUVT%mROH%<{^7A-fy{qF3 zMi}7SVqe1dg@M6jsZjUss0)ZLy579WHAFJz$60SET1i&V5R?A1BQ`vGy^Xpt>B=Oi0JSGjah?jjDIw-^U zj7BFJqQz9yxZ_YFL{m0`XV)9ri53$tBf==6~$0lY@f#QmE{u@M=4YH0~qAgsY_sw!A zt)BvDpg}W%7sh66%DMuQsdoP0K0XZS$kfI=JoFm=irf_OxkuQaj-|fi6C3VlunPRI zJ%H7vf_!Eua5`ByKKFxYpSU&xuis-07ueDxUdu0y7@wF~+BoIM%Luw*)?V=KMOT*Z z$*`4-7|TVnUom0rSgIH%qjgbOAfIgej6ZSiqA=%s>2rdw{i9B=k|+gDo3%eP?uO(q z9vWhkC8@+u<15#@J{#gj!(R8IBZuqX-r4!f8%8+p=Kai}wyW;$h!$KH8zEjDG3`k|S(A z@m6Ai^7v`Oj=4dmj$SJRE<8-PAX!-ZSo>Bq`*WysUUq6Dwxzntgh W{vqoXvb{`%VS%WMs+AypQUKtQ7_{2@A^ytA-f zyyW%DPA2cs7N44ec1N>IZ84YpR%|Cht4rJbXlA!XA|{FUt}G+x@_Q*lg#Zf_&N@C~ zUFR~M%8ucW>kwGZH!Wk+cea&+==O%wrV!ntFa5GQ#bY{z6Z(Mn$s1!FwXN^%owY!W zA_;N6pvAuZblzr>-4|j7SVd69>7mCMCK97{qOj9>$;a~}BT_o&KMqZ#c~7ufE0&BZ z@yaB;KHvJ*dhxlZk|GUr7WB(N0G0LaoyqdLH`CkCY~v()Z46U6gb=E{?vd70UXw33 zgv*Qn+U?CD_;kB8Q=jQTDsnGF+JI7HjpR!h76&e5?TI;6I>^&>Gf?5AS@|Z?t8U!Z zc2kc|Z0P{;{&~%l8Z!XWsYY=D;Vz9!kK>|hR|nu$Q-8lbDyWvkH3w`jcSx`0iI%OH zu>_BLb!kZhB}I1_Z|?KhE9$DMB5sh1ry)nkBki0m@Z zi4c7T!7C3|p9}wK##PAvg(e&`6-=|2x(1_%xAnz#Zg<+jBuCiBxA*;i^RqGKSKldc zI%rOdNSuriATV9UFIO9o5m7y0-U(?jP@|)E=b`K#`(nf&6puU$md$*xPkJt@llSJ1 zOWh+kKn&$z!5Vw>;SsHKhrjY+xSqb^;?5v)?MQa$znKQh;A9bx;c7yqly)4~5%TFy z)Y!H*0C<=}=xh&JXlHYw)>@+T5Gz9VWy_G`d1}Y8UvThI{{fr$wD_4^v_9~8u&bCk zubHQJ9ZVemrCrtf>WInGcF^P4H7|VaT3@yrReO1-@ItuXos2pEGmTu>Xr&+8foR@+ zB^#}l@OFm3!t10!NYm~wI2)vW-Ff#6Wb;9{;I2C~)vzMGM8t># z7i|fjII(z`Fy7>BBU5x3nnj}grn;SnEGY1^9sWU<>54#VwIhG1U2@*=cZ!7MOF_|W zg4nN?luHF`0l1H-ogf=M9CO|v131PH1`6b3p@NLuv8eWll#W5==mCV<%J02tPm<~N zOd64Wt?|WVU;^y@n}BJ$uwU@}+ig7;}Dh1Ans=kWN+|An%?u&gOoi@Dm z$M3hy6_Gs2flA?*y54rxG0BTNBR5_|3z%HK({R*$Nf?2S_fO^Eugagi<> zA1K~VJD}F{O*RHzexb+Psn=S~VLl$XwqjtLIZU>Wj?v>*eWl)GTDU){)Cxa`rU5_` z(HGh-{xVMh3XiAfcV}(9T<=q3c5+CQ7~TK_eXfSVnD21qw>}Tpsqdm|;)pM%fgopE zI6Pz0RAQWQ)JB(mKZXV~^mXpz=zi;gJwnrQdANwn4#(bSfYuNWcI@)b%3J$Gt z{-GOstk;KNeH(>e4}*oXpP>*{hn}Jw^qxK4pSRzmNmwo)*;fUGl;K+&=d`tPq36`9uXX z(cLi%TLxB1Kl$$wuop}1PD*530&pkoW=wL%N}n)m`q7=(j)bWe z28cWc!xr(@{87`d1m0`bd8h;S3kd%~cd zn)bl%l{2NNNRJf=huhW?O|NkQgnD}@x|`*?Gia?F?yGbO&|;8xTnUB@&{f_VMZpsa zRkPUiHN-M%0|NrSqzG$1^Y8#1hu$B-a@FSMm&+x%;Lnlq-N_#Z8|Hl?Iz;ANSJAIpAWK*3&5ECy%u4GL+6YeRh|pWF~@8{b}DTH>}n zJU%*DdK!bl5aSQ}NpP;np@1s$wcR5DMEI`{wFAs;F5?fP@(g?W;+WSh?%5%}Km#D61=O*7Vi!U1sKm((#gZip zYBDi>l|vs06vu{f`MYqERc-UG&H_Au`T;%DHyIQrA@ zJSG_pu|jwtQ%b7-HD`h$R&%~&;;)Y#YX2|Acd{eoK5l9F;~lafzO<%qzW0f?D5{Id zZN`89%Zn7jV3>G<=694;ugIvS1qT1K+u z4;>-z;~ET%G~;r*TAvXLssQag`Rh*^MW;_=f8rvdZ1bMhjPVJ)uN$RhtUMIMr^a0Q zWVBpeo+Kp`5uAs%C!R&nlMo0JH{_8b11B;XsRoWh@(K!}$D7=?LBaots;l;hwGBRr zMRvjBdN=zs9z5WpZ9f|!_je~ukrvV-I1VTwdz+g;M(jZH#*etxrRlTd`fR-Xy;0c% z?DTAGx{QLVcB?3{GC$4lyzFGsPz zfc$V#XFh|@XH?VM+TSkrb+fqY#7J~59_C!tEnJviV!z-O?9Cl%ohmZ9D9G-z2)cxQYIaSZ2pMAw zenY5y%F(SkE^Q6h61gzV7a7ft?Qm~?(-h+Qy1=T%{6=;Q5=(Eh+)-q?n<+U-lOoZC zulIq+lpT2{XVgs0PITTWJx)ysuL*N^xS{m<(*Ds7l4WAMvqa;9z(L>m-r<5x)#NQ4 zyA9AkqoUI0pEQ{c6jZUTO={cI>|t^Z+R1nUoA+w`SDbL%F6pQJP9}UWt2wbUkBu4y zqOD2Hg{rB4cN+?+OocN~LtYj9_g!Dpzvw!XIQG8NMfX%;=C=0_%ne3t65&cr}do-UmVD~mbJ@2dI7icGbyCY<;aDKxpUFRz4zhEQ2mGmg! z5gGw7n8+C-M>JfB(K)}-GZDLXwB)tF)hCexe&eD};N341bJOVczW|bUHxv;@9K!Xe zy`1oQIO6?^>XG8dR8;>YJ@S+ito3LH6zdfiodRQ8L)rLhf;Id+w%WUwC=<>q$i}V$ zyX4%&1gBK-U&!&~J`+gItb*Ex3*e#tYXTTcX9OX!O6L-v8oP+e_0qTk_$B8|6rJtH zgJS4AxO2W3B6dtl!t>*gi6e9K+pu=Dj#wuIQdr8%2ttu85rcAP+0REj%^>-+C0>YF zJfKt5fX=6E2&{)TaaNL8Z6IW80(&^sCEqjE3Rsfik!{C6w%g^R#cI2r$)36hrf_O+vxuICe&q4_D>!Je{N&^A4e z%u_lrh=chQTSCGBFcg18sR7G91Y-5(k%hRco23$C=+q z8J2G3;v=8Jnone^e?V}c0mpymv6q@Yn^=Xf)JHhGWO@<=q4v`cIkK(CRdP&p^BQk z0Zi%j{lik4l;X}S;qG-4E@Wrtr*FA9J}Jc@W73B;uLXPfYra}5%H!u@QBnk&JtLS* zNQ-L1Xc2{bW=;^th735XOj`#y%$hRR_Z=w5WCiM8^XtcYK+8Ko68F>HQ zgPysQI8*0lfg;5|!@MOmpB&r581*$*6j5)@n+_K2eE-c9W>5Wo6#8%dxt<~4?)cFV zme*mduW!>G9TyXHvi{pU^Nws^;pl^MON+n3Jh~#u0)*v)wO_Ta37nsEdkE@geMS1J zrpHky>vhQ+bAG?eEwZjwZTPnA`+0jth=v4!@%V0V!Ox?p&=Y6po|fYc_2^mg1W1kt ztXQcmRf3#8=GBEBQRB`lWw4W(#r#^`X_z268qI-^aeMYzS(4_1WKJu! zc0euwFX|^bL>v<+B^S}2-43Yx^(R79C;6x8l;Oqj3h{f?iIBR$fDj)1;=%FG<~7_b zj-`GDb_Mdhz9^$8Yv3k5zfx?E*KZ$fqs+)=dHw{md!+yB^m>#_18oV5ozI&1-+%tE zo?o|%Q<31F^`6x)j9pQ{iik4HyjH5+>fibgxz$bC+1Sf}k`K#WV=5h)AL)$RT}u{b zgh8fXbGS;j!>LY~Yf0!hIxn*0ZLbs$Qr>XLZsFZy=Lfzi_pi*)R760U0%$h}&N{aV zj6AEESl!V~?Wb1Jnhr*`FXsF>MS@%>|nq9(IdrvZi3H-pRTEnk0 zcQIaghT&Ho?ebt0C`fU3d65XGblS8Xaf$6oNL@!LfNM@clF@S9s4ZXo;NN%w3fQN3a1FI&CrK#9D2aluB~7cC0{1 z@PVwvGswF{L7gf+8GU`1JL3i;qIPsDW)DI5O2diLrik2bRgRVq!)oT&Firm!r*EZO zWT&UIrv5k;-eXc6cl;|C3*EbQ;|l|oxU(OW${wDF)K~G&^dw;+&@LczFaxM6ue-vv zUNF|pLLd&+TO@Z9i5FWMH^k%TR+^vkuH3hfEHh3A8L*7H#g8V*bTQH|Hm)`N<=w?X zO&v--5mcyk=?zG$F_c1A{h|2J@f+2QndwT@AHNHwV zy~o@JKWp8@WIi0<8*WuS1zejG-skDP{5`h&58RTFKi62FEmn$}X%)ig8O(t`l==k0 zO=+0Ok=cgNWQ_l#QKJqD!9S9}^4OH*5UVPCgIh?G2-5;pGZawseJkd*es>4I@|*V{ zj2k?v*Grl@$9$2BxpzTk>)>erFKyz8VgTy~cN-A%JNP#Tt<>D9JbyGh8-U5U;ac8gVEV1gZl@8m)ig~sXvcH(_6jGQealnM zG!n@2ew!apSyBzd|MAf-ElJTw%>JCqg-m*{GDFTIyZj*n9Lj-3IuJjI^*Ael<`0yN zI3>B0Y}t24slo!Uzu*?N$=mRf%yj46(q74=DLV7v>8@{r+n=3` zAl}499AZ9|Z@STa_t@j8a`_kQ5hFogHC@db%3cjMwGF$a$>DV*@cw4s$4|?4HAa== zCDKM$U;og@;XJ%CygPDY5oi{GE~uPa-n5a;)E3DnSNp}+O?@@8zp(tQCg6B) z_^PuDToSk8g}KoFv@R-#D`impUPM?z%p<3XYQFVWs~O?XSFpLU^gPMCb zOUkZO9;FtN)608rGBW2w3kD;$pIpKB&}L^hB_*D@#3i=#6y{vP`$OK}>6L}Bhp{S7 zgZsckrjlC@cv6uPZ@_e8ow15)V8==|*V{m3_VOx1rg?HL!XKlDKV4TrTDR@TW5wskzMrK~kT> z0lOmT-~}ukrYWXdxE$1D>O4@$1!)B@ntTG?S`FEu-?K+1|B%;1WwH3%Dcc~OO0*lD zBCK8bWjk_~D1;Y)mlIw> zJS)$E1wv~UZHx0<&%lf zgU~8ggjQME%^E$9w!OzsOZ|fa7MFQs(=O&meg3;|-*5mbpNsZ6P6xpQN=t5&;Jce! zl9}-*Gc9}IubfJwfCRSewD@!i3cIr7hyEiV0@Gs(+qF{2C~*2+cUvipJ!PKRorfoJ z;~V#IopPSAa?itq#!0+2@}_6IW>&l`!mrh>yu4{xdFql&)?a=)6+I4Sl)IX`9lJ_S z&BDfU8$-pL0luwPo`hHM_ny-g<1smRGvi^cjiRq5^`GQvk9rfL$}J}D0M)FNt^4Rn z&6$X3rspUhN$z0UsyZat8@;34XF5wLTfTsZK7aV{-{-Ma`Sz?&OP!rlCkLTikwM#A zKS!_rIT|h;fQNNHeUxeU}ZUDM=^%YZz!Mhbk zwB28}!z_wOtl%MDVq*0js4y3W`0tm3;qNhieqQvtnm!AJ-nMDc%;gcROY^*? zxp{$U4Dvhfo){C#B#&+#BsddcHZ5a5s4GncdnWtiv$^jhf{&7C3ZFvUeW#X)~x`&wLnX%*C(B7ap_^8)T;m9f(cfpEwV zZ=rmSXJIhn1~=RW0zlWE;9SrJr$@64{;#4lk7xRi@VJ*i(?%((4Zyr26zI%T^pZELqem+B- zD_E${#JB+O88`6oHq%^HW=y#5EeyDQS5;ZA@GOso={aP+t{xkMd@#wl_2;NFKth`-GnvE~LjR zp^M2+3mo$yr%2%;fHO!by11NFPeKLhXIRl7U69?|^y!?uKv% z!*t*+T2O-dSKa=k>ScOvDBY3=Uv_*1qz9}d;A3@-Xd$zzv0ow zcg9O}R5CM`ey_}LY|$6M|Vsyfmp!t>Z-50{J!1q?dBy)hRhH8kbT>S zR+xl!s-Us#g94gzSv41hy(JM7$IiK@;@nvlY6a5OGbh< z1h&yUy4ZJ^(V8~n05O1(#>YANu+mD=pRH;&JRuSkMM3?qv=xsp(NC zTn%!%bMJn)m9Tb~3kwMiwee)5HJOYUAGb^gPQNOW4YJ^`Q9B7MmKlU}&0N#2uD3Ke z^=N;cQET1-tJM%X-sRB)fU6qUt?o-kIx0z4ZIMy z4h^_q^IDgg!{TAF4FPd}>e$QeX6;>H)gQikL8ckNEr8EJ0Nqi~fM<1vC1r%2Ip-u$__{gaJZQ>xBJ=E0rgJy3!Y_T7au zGtYq2KB&9hZShOuMK4f)6S;qha3$(3vcaRQ_-*W*p+FQuf=fpmYKavjAV?4j3eqWR zZip9Nc3qRrC0mlBIyd~wX($kZ?h0PMqB`gQHhH97N#KjBP;3yHOk`|LO{wtg%qGX5 z1`%HR<@RwEMW1`JludsTK(h>j`c--x`WNR(Rpve!u#`~3V+TyUp9Ih zzzt`9QI8$dslp32wVrPh$JEH)9R9-Wk-D1YD|7&In9P?vBh@SgWb`M= zxMvbZwf(_PxKaf2uvl^0t|h4{CvgOX;9F^fSM?+tcKADz4K8wOM>Ihegs#)Re@S;O zO5+3PGH`tB4J2t%+NB~5%RGm;)N3=4hBeeTg9TG2KD!jwvq1$aiukP4jU;ayNkzSJ z6)Zee^qF9yL)Gu-|74aOi_nIAa(E8aeunIodZ%fapLz+eXiSU3;ddXOi-p5k;h$rF z#o3*|&;{4wcW}w^Aw9(9)RI*z>PeqfFQ2I@#kwKw`>D06pWK(@D_07ou8_cSp$?Fi zLZ}8K&`A9$Q>|K`{)tT)HzTW-#H6&thiJDhOYRy0j0=FC~)+xrYj4!AX2O&yW3YL(UD;%E>N~xp+8mypCi?C3% z8yEHPElOM0iSf4k4f0Czq2fsq5tW#cyBfjYrfuzW6r3DKTh-dU6FX2#y{u+psgv%? zW3UIB?P1aH#sncIP}CVoqg(uJ`6Avx>Mh67nd@QKgcEBS;6lRrpS!u|MD^RIXY)*^ zX2aj-yf&+z_7uq@AjjrQN=gb=A5&UXW0~x7q>n z;C6$o0`bKkj8FeA$vdu9est{5YAkRdx=JF_M`AB>Elgku*vq*hej``!s$CJr$JWr;NesS3|$=EKjNYGc@lx z=fE#J-iJBM-hN^(eTVMUD96)W2+L04-&Mr0xU;{i2|L@h(g>D1a8yL!a4nSM>KH1N zC6e=5@UObmObersvca=FHT0#xwsBbT6*DKqM9D10Y^j)Vvl$#T^x1KO&8 z3@_uM+6Gd*dOl$8_*xG$yU>tftOG=J=E8P>3csU7y_tSFI z_F9ny#KRcU@Yc0hhkH2}*eds#nAp4dZW7?EFfr*OBzpaNbag7$(1XCuZ124iVsNL2 zU3LAF4Lk5Tn017twCME$ihoC~7X3c~TXabf%H8V|U9?2Q(1>qMZ43YN|c z***n@vD%)`JBQQTTjpo>7ll!rj~QXcBT4pRQ@ zdg8->8sg=Z&X~CWVLRF-a`GP!52l2I_4^G8!*#R}vk{xOeOu@!i^b)3%7glD`g6po zDK8$=t@d+fe}8UQ{MtYMyEmRe%@8eCX&mRir;)IL5^hSrB_r2E8{Skb7gdJ*UYS_yvfS+n1b4RmL`byo@*$N+*2xU`>k(71%qmlt} zV`77PnuN%EflzgX^!sZ0CkyS)kWUSzLHi$-Nvbyy$>BV_!wp-3&DZ!J8GT*MFjeog z{Bjsr6$dw#{66I0)K3u_;Ct3#nb1IYq9E3<-y8V7RWI^>NbodXdh4a45yjFaY3^pV zSLEAqnuX@a<{Fu@&Fz8au1)vG3B?xp(EVtzd!+?o;!pi5YoB9s>fDg|famc^rik#6 ziC=bQHNX3~p}#^R=6JcLyC!}OY@N{;HH_I2nVGS(b8-k)6JA?uvvylh3}QepgbX+Trc8h#v}7{MRdPe z5luFq%^ttg$TeBQoPSxoLh%b%yU_KjK#>V%)05)r6nu zK=fe^4N}B6eE%g?kCw{FxGG7wU)ho;|5LW4{bYU@oIg!fD#&{&R8hq*GI3uAbSdY< z-JAE!pHz0WBi)pQ1GJU7IvX3u%eK1u7)Mo+hda|WW@EdbBAe)hy+6bxO)j9~mMQ3<#SD zpDIKJaNmh9D=p8u^z^CJQ?jM==N-$kS{jiielvAAn*pL_C@Vud7v7EGWFgE{5K5mp z%la4!hcM}WVW^Q~Q<-r!Qy$>9B6@v(brp@d%0A~?y&eN-{wPneIq!@VTU2wGtzL+} z#tkdX#AAcHWa> zP8H|1n1Jqw7=X71kShF;ip5lnCK$7TC8*HS%uNU4M1c1~@dT_P2Wu23H=$bpJT6Qd zQgjx;@pi#b?BILB;B66h}aWY{UL|5T*kS?XMk!z?&+!!ZnPgqrW)ZfP8{sKx$zX zsNLvgldh;&fxi_09SBZ!Opv4UXAGFh2#K`9XRu@-M(s^ zf;kEfI&PZ4w`Nq3le-W5A|z%1_DyUa>{;ga(zf)ztdCChp@98i@zk6 zZBYV`w`d>F#epZM+Qfi`U9)9Lhd=4haXtrIl#&ekn3YOTTD>s^<>oP^IGow#`>%M< z(o$$ZgC;)Pq=@1h-U{_y-Xr0}vt)~3)^=`7aCdrqJ>FaSXQrMqLLAsErCoC+Fy*i1 zaNoftiRd|Gse|J$;g6lHT5gI9cUc%)V!%BIoEIpq%G;`bVvl$uQ~^ zpT0oE`~8|}cd%t8x}_d*e7)$XWWPDSIlg;<@ya~?LEG_Bv&Qj!H;VB7c?3+-M;9uU zi0f9x*-dfrfc);kXN;}~%0!de5u1|N(q$)k`EK}3)rSw7c3~d+HK6TfGw{P0b=Ot> z6Z)lyK?aJ9xSu$YQtf?Z>aZ&E_}QR+D-Z=l;h)PUewYhMH{O&rPLX{Ae|N6TV)|6> z8oQPzM_6cBO&p^&DU5_Lk9I5CcZ!Np)IKTAZSggYu9z@WIS%_k9q^l?x!0&Y^Ssn! zerj)On$7B9ktcFHJQ7VjVKx$Ear>Nwg~2DJcCG7gN9pXNN)XwO^ z@Z^aVah=*0Dbv2CgXXo?Mcc4P?}s)a?H`&oTw3Qt$$oT>eq+)k=Zf3Fs;tkZ!vd;h2?}iAtO{4f?0j<|b=~&l_`Y~2G z9Iw=Fa}(l#OzeO-yiQXlX|i6>Y}>*z-%E;X=Zm||cOKkl0j!*v{0sA0Z|MWTn82b@ zE|r$pCwlT}0ZPbJXYkYUu@Y-D4J;VY2 zdH$V>{7?9lu#%I(b2!I`3YsOL^57bBZQIOP>}?VPf`$O8h}fE zK3iPa-01@qTMx%q&)G^l63js5LQZum9}tDKyj)SIJUJF#gomi5OYq_m7(YV(NND5K zsN-W`dFr?M*8 zeBo(osyL!YwzagW=Y<_3Uvb@LD<9RDcAPA}dKTsZWdm_2(?g9nB!*FNZhE2&`sScQ zRrzw+qML=qUCFgP6$e3E+e#hK68_Hh(M$ki_s_us_A>o8x=-@!n)O_DgUrqpU7_s= z?c%p}bVzUBv0NTl^rM((2j#Uzusvt~g@GfrC}GM(BdE6JP3hp-dSbV8QPGR=DQ+!T zylaPzTyk1^OJ%RZYkol9tmpr-3-#>>1_}%-Wu_d zg6%SU^?B}g%Fl_t&A7j2|LaMwp7U+6wFZWJn5&<&m@CF8H_*{w9J-jG7MVpI!CessI2%9G`yu8R;J=TgBl--DO7Tr4=*q{2~O&!i10 zoG_;!6?msV+SRi&;roHBWkKWhbmC~R;;<_Y!FEs>!9FD%$#m|s*%B5#lyk5f%(qJ<6?*6!kK zp4@QAb=-Y$znr!|A5uHf+5&@gAX-|RR(LuTB;6^Pr~m{}f@3-H`;2o;kP<6TbC z{h|s0y6vi*yiXU0cGQ@8Ye?;F)IZ=w&6`X&uyc|?-R?b#V8a@+Xm|rxS3}fwV99_$2LN`nhHWmAguJ2fF}rIQ1^3|=)eW1)5mcS)&Gf93^(M#OzYtm8|76Xn`ugHH0VwRh=X?qlbB&Cq z5iHwzqARI9ot%Z=Bw;$|@e#iyw4m}o{Ssa+h+(eekG!FHWP#uis7*iBxz7l99wia}ke|D$$` zD`}LYU^QPjAD{dn`Da;d_m}OZB3fMKA_-NPw01&&Wx;zLwD6(UtsIT5E%2evi3eb= zz!h!JWz6t8-i}m1q_$;j@rEeo^jlIh5_hi{*NPkF_;syF?ZLS_CLY{Ry8g>Fpju?| zQnKFVz>YNN`xGn}3JKnVyRotkedWmoB-CCM|^oVFDL71RCE<#@Cx;wdV0vdto)Cp_l4`yEDhDev{e5XPw6p%0ikecoP&^k zr2-gYZE1nYL6fhMLAO^_#^KStYo}_E;UnRr-$7m+V#asZ_QzbOx#{!XX!B|Dp3oT| zIP}vC>O;Fmy=$s>W?I^|I`NzeY6v999jtkp@q4pD)@C$^z+xQM&~HmV4g<(IP`4x0p!p6!BeX(4Fqm{4QtIb zAs!x$C{927zr%|@ci)A0g}bmTtcWm0-zNMejvG(cGj@OZhUZc-{QAO5#k;TuSSe zkfuianLL)nFE`IHNxfcGRbfL>L)BrpL(z3vMayUVvVc~~rHpel)RQStq#9^pamTG9 zK>u>Ws~7;aPmE#K##bW^AFB3rieoxO#bI!Kb$uXL!n2&mSOWmA>-{5JUz*Kvr2Fw& z2<#$`Ab2(P;>6xDTNWv^?V17z1;;qcGGOpr>Q%@Kb8MZqs~$&wYEpc$A4yQ|wWI8| zRBYS8Udv~6;p1Bmm)Qx`&%X=0fttV8-f7R~+YedmL?RNmMa^&(t_30CH*4g_XDCn<{m`l_e7yi5^%)@um350|#^-D_nMf8DCQI z4_OSX6F^BXc{X0|eTw|K&!ms%v`d!&k6QN!o$*7814(1phY7E!ym)}BGs{DU&F&yS z0n7lCzSI+)dvdI2bzHI;99pTaZWW-$F!s}Emq5Xf)on1>d0-%Q_{Uy@=TP&lX=Y?d z0Pzw?_?GD_uB4!>^Xyq2zMP1h_zi|WkR%Tk7*2ckhW@RmNtO`Y!mdg0!{w(7ZGi*NCir=#qF^LL9Te=&5^o5W+Y(pi5(FXf!v& ze^0avBtfAuVPOa9MNhD$w0#7oasiV{?OS37{_fROtUd%AakY3ifufh<7)V#Z-H)Ei z;Zx_`YQezAPrFc!R(FhIjEaY|*(?&I$u zaJ%_-mU!yd#gznz>aS#Q8|VU$Sl+H(mN>ry>l`BC(1EACXH)uIjq85bgtan}1@ff3 zMQTlA=Kw)=7EXNhwZ~yypmV6@wW+Y9h&niH`(1G^_7I__(e@jh_!$XFQq(xZ^LP#^ zi=cW0^=mtIzEPzy6Wj4+B{+C5P=w7P`$q?}?sbDV$e&bujWfmydR6iAHy0bny;8!i z$MuuUM{eu&W9pzrkjG8Lze{iV4_DrgJX}X) z4W842O0FpOTS6Z<586l7Tc_|DZ6>aab{XD3SepN8;97pBSJ8GXCU~NyMVK2vs+J$ihbGdIG>% z!)No1ifuQWTn|JE9a8Jz9{pQeGJ_Y9ucEZ1%`MWy6-?r_FB>C?reIijecHErE<4C0 zXlE&LjO45i4B4>2!7d;di#Eq6-OGBAbiOgi+v!~ou6zt<#39CbSfUWgID#}I6Pe}b zKoE8`fxVmvJF5o=xIq)lrSv!l{_dJ`VuMGw_tOU4>VOvhg14bGfwySJU%mO`hu)O1 z#@P#|lImQLi}-gV-%*J`9E8wUxbZX=I?RUXC8s-QWSk4hJxz_`@35lnw#qS+m1`#pS!o6yd9{ zf>NhR)mKF|fn0H!untu6^$y48ork(%cW`k30;+JK!<=d~@=`zJ6aWz;sa2q&8t9+9Mi=%)j^8)6 zGLb#=?AO|6tZ@b6cJgHKhC?jwNwM-}A~>dwbiI8zHNc<*M#50eBP`HI>Y!yj*zXYs zQ}EvdJr+pz)taU3pbkvYV@g&~xWc8YmRt0dfQx(bd2$rQNDiuAspiVM)t7I+A6kML zWZvgt+!Rz_ZI2bslK+f1Aclt%3L#dKU_B2Cj1Z1j)zxemn4XQ8RP!XK_tj3Y<8~=O zPiV>(1&67j>E(eow4MhkOR9xN-ZP0zC~u!?^5Tyr$0^3XC`YYg*5{c}rLxjchVK^& zihH*8(GK_NgT1{y%Akg`bzoq_I}#hj4=KtXEvgG?Fz&u&r=D|~eX3!>*2uY*XBIqp z9?+(ztKT@)f;HY;3I7~nB2EpkxMiBLwDNwo(O9W&+8^yhvU0Ad$3lAFh}?qzzXD#Y{XQ@qs47}3nZ;*i{P0e2PkvZ&+BV63ORAM^IU$PWyeD~WySXYIU@AmwzHF<+N@}mmR{&>x;~op zSTq?tkz9IN=Qc&9V<0T$i7D&!{aJgvSxuwnR4jutv`p{NvrY5>iq3OSd39U<7ll!vf}I~ z@_4C7gpCJ*MgzQP&#)0>rTUe~YpPk%)`KjCf1D{R{Q|dDPSuik=Un7~_03J0gn}l| zY!;XUq*~-}JBXaQF;!Kc4DHW6$9fxQe%FHJk}ocf8mK7+Q&T!-=3~s-yzkPlplh(v zJlQ;{!@FVI;H?DC6+J-tNgHo2K%LMskUovQLq)(L+Q6^O3hzbbel&Oo=mPnc&V5%Q bzm8AbuVoE9qaMlyfL|Cr6WvPfd(r;^ifYg6 diff --git a/ldm/modules/image_degradation/utils_image.py b/ldm/modules/image_degradation/utils_image.py deleted file mode 100644 index 0175f155..00000000 --- a/ldm/modules/image_degradation/utils_image.py +++ /dev/null @@ -1,916 +0,0 @@ -import os -import math -import random -import numpy as np -import torch -import cv2 -from torchvision.utils import make_grid -from datetime import datetime -#import matplotlib.pyplot as plt # TODO: check with Dominik, also bsrgan.py vs bsrgan_light.py - - -os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" - - -''' -# -------------------------------------------- -# Kai Zhang (github: https://github.com/cszn) -# 03/Mar/2019 -# -------------------------------------------- -# https://github.com/twhui/SRGAN-pyTorch -# https://github.com/xinntao/BasicSR -# -------------------------------------------- -''' - - -IMG_EXTENSIONS = ['.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP', '.tif'] - - -def is_image_file(filename): - return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) - - -def get_timestamp(): - return datetime.now().strftime('%y%m%d-%H%M%S') - - -def imshow(x, title=None, cbar=False, figsize=None): - plt.figure(figsize=figsize) - plt.imshow(np.squeeze(x), interpolation='nearest', cmap='gray') - if title: - plt.title(title) - if cbar: - plt.colorbar() - plt.show() - - -def surf(Z, cmap='rainbow', figsize=None): - plt.figure(figsize=figsize) - ax3 = plt.axes(projection='3d') - - w, h = Z.shape[:2] - xx = np.arange(0,w,1) - yy = np.arange(0,h,1) - X, Y = np.meshgrid(xx, yy) - ax3.plot_surface(X,Y,Z,cmap=cmap) - #ax3.contour(X,Y,Z, zdim='z',offset=-2,cmap=cmap) - plt.show() - - -''' -# -------------------------------------------- -# get image pathes -# -------------------------------------------- -''' - - -def get_image_paths(dataroot): - paths = None # return None if dataroot is None - if dataroot is not None: - paths = sorted(_get_paths_from_images(dataroot)) - return paths - - -def _get_paths_from_images(path): - assert os.path.isdir(path), '{:s} is not a valid directory'.format(path) - images = [] - for dirpath, _, fnames in sorted(os.walk(path)): - for fname in sorted(fnames): - if is_image_file(fname): - img_path = os.path.join(dirpath, fname) - images.append(img_path) - assert images, '{:s} has no valid image file'.format(path) - return images - - -''' -# -------------------------------------------- -# split large images into small images -# -------------------------------------------- -''' - - -def patches_from_image(img, p_size=512, p_overlap=64, p_max=800): - w, h = img.shape[:2] - patches = [] - if w > p_max and h > p_max: - w1 = list(np.arange(0, w-p_size, p_size-p_overlap, dtype=np.int)) - h1 = list(np.arange(0, h-p_size, p_size-p_overlap, dtype=np.int)) - w1.append(w-p_size) - h1.append(h-p_size) -# print(w1) -# print(h1) - for i in w1: - for j in h1: - patches.append(img[i:i+p_size, j:j+p_size,:]) - else: - patches.append(img) - - return patches - - -def imssave(imgs, img_path): - """ - imgs: list, N images of size WxHxC - """ - img_name, ext = os.path.splitext(os.path.basename(img_path)) - - for i, img in enumerate(imgs): - if img.ndim == 3: - img = img[:, :, [2, 1, 0]] - new_path = os.path.join(os.path.dirname(img_path), img_name+str('_s{:04d}'.format(i))+'.png') - cv2.imwrite(new_path, img) - - -def split_imageset(original_dataroot, taget_dataroot, n_channels=3, p_size=800, p_overlap=96, p_max=1000): - """ - split the large images from original_dataroot into small overlapped images with size (p_size)x(p_size), - and save them into taget_dataroot; only the images with larger size than (p_max)x(p_max) - will be splitted. - Args: - original_dataroot: - taget_dataroot: - p_size: size of small images - p_overlap: patch size in training is a good choice - p_max: images with smaller size than (p_max)x(p_max) keep unchanged. - """ - paths = get_image_paths(original_dataroot) - for img_path in paths: - # img_name, ext = os.path.splitext(os.path.basename(img_path)) - img = imread_uint(img_path, n_channels=n_channels) - patches = patches_from_image(img, p_size, p_overlap, p_max) - imssave(patches, os.path.join(taget_dataroot,os.path.basename(img_path))) - #if original_dataroot == taget_dataroot: - #del img_path - -''' -# -------------------------------------------- -# makedir -# -------------------------------------------- -''' - - -def mkdir(path): - if not os.path.exists(path): - os.makedirs(path) - - -def mkdirs(paths): - if isinstance(paths, str): - mkdir(paths) - else: - for path in paths: - mkdir(path) - - -def mkdir_and_rename(path): - if os.path.exists(path): - new_name = path + '_archived_' + get_timestamp() - print('Path already exists. Rename it to [{:s}]'.format(new_name)) - os.rename(path, new_name) - os.makedirs(path) - - -''' -# -------------------------------------------- -# read image from path -# opencv is fast, but read BGR numpy image -# -------------------------------------------- -''' - - -# -------------------------------------------- -# get uint8 image of size HxWxn_channles (RGB) -# -------------------------------------------- -def imread_uint(path, n_channels=3): - # input: path - # output: HxWx3(RGB or GGG), or HxWx1 (G) - if n_channels == 1: - img = cv2.imread(path, 0) # cv2.IMREAD_GRAYSCALE - img = np.expand_dims(img, axis=2) # HxWx1 - elif n_channels == 3: - img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # BGR or G - if img.ndim == 2: - img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) # GGG - else: - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # RGB - return img - - -# -------------------------------------------- -# matlab's imwrite -# -------------------------------------------- -def imsave(img, img_path): - img = np.squeeze(img) - if img.ndim == 3: - img = img[:, :, [2, 1, 0]] - cv2.imwrite(img_path, img) - -def imwrite(img, img_path): - img = np.squeeze(img) - if img.ndim == 3: - img = img[:, :, [2, 1, 0]] - cv2.imwrite(img_path, img) - - - -# -------------------------------------------- -# get single image of size HxWxn_channles (BGR) -# -------------------------------------------- -def read_img(path): - # read image by cv2 - # return: Numpy float32, HWC, BGR, [0,1] - img = cv2.imread(path, cv2.IMREAD_UNCHANGED) # cv2.IMREAD_GRAYSCALE - img = img.astype(np.float32) / 255. - if img.ndim == 2: - img = np.expand_dims(img, axis=2) - # some images have 4 channels - if img.shape[2] > 3: - img = img[:, :, :3] - return img - - -''' -# -------------------------------------------- -# image format conversion -# -------------------------------------------- -# numpy(single) <---> numpy(unit) -# numpy(single) <---> tensor -# numpy(unit) <---> tensor -# -------------------------------------------- -''' - - -# -------------------------------------------- -# numpy(single) [0, 1] <---> numpy(unit) -# -------------------------------------------- - - -def uint2single(img): - - return np.float32(img/255.) - - -def single2uint(img): - - return np.uint8((img.clip(0, 1)*255.).round()) - - -def uint162single(img): - - return np.float32(img/65535.) - - -def single2uint16(img): - - return np.uint16((img.clip(0, 1)*65535.).round()) - - -# -------------------------------------------- -# numpy(unit) (HxWxC or HxW) <---> tensor -# -------------------------------------------- - - -# convert uint to 4-dimensional torch tensor -def uint2tensor4(img): - if img.ndim == 2: - img = np.expand_dims(img, axis=2) - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.).unsqueeze(0) - - -# convert uint to 3-dimensional torch tensor -def uint2tensor3(img): - if img.ndim == 2: - img = np.expand_dims(img, axis=2) - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().div(255.) - - -# convert 2/3/4-dimensional torch tensor to uint -def tensor2uint(img): - img = img.data.squeeze().float().clamp_(0, 1).cpu().numpy() - if img.ndim == 3: - img = np.transpose(img, (1, 2, 0)) - return np.uint8((img*255.0).round()) - - -# -------------------------------------------- -# numpy(single) (HxWxC) <---> tensor -# -------------------------------------------- - - -# convert single (HxWxC) to 3-dimensional torch tensor -def single2tensor3(img): - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float() - - -# convert single (HxWxC) to 4-dimensional torch tensor -def single2tensor4(img): - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1).float().unsqueeze(0) - - -# convert torch tensor to single -def tensor2single(img): - img = img.data.squeeze().float().cpu().numpy() - if img.ndim == 3: - img = np.transpose(img, (1, 2, 0)) - - return img - -# convert torch tensor to single -def tensor2single3(img): - img = img.data.squeeze().float().cpu().numpy() - if img.ndim == 3: - img = np.transpose(img, (1, 2, 0)) - elif img.ndim == 2: - img = np.expand_dims(img, axis=2) - return img - - -def single2tensor5(img): - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float().unsqueeze(0) - - -def single32tensor5(img): - return torch.from_numpy(np.ascontiguousarray(img)).float().unsqueeze(0).unsqueeze(0) - - -def single42tensor4(img): - return torch.from_numpy(np.ascontiguousarray(img)).permute(2, 0, 1, 3).float() - - -# from skimage.io import imread, imsave -def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)): - ''' - Converts a torch Tensor into an image Numpy array of BGR channel order - Input: 4D(B,(3/1),H,W), 3D(C,H,W), or 2D(H,W), any range, RGB channel order - Output: 3D(H,W,C) or 2D(H,W), [0,255], np.uint8 (default) - ''' - tensor = tensor.squeeze().float().cpu().clamp_(*min_max) # squeeze first, then clamp - tensor = (tensor - min_max[0]) / (min_max[1] - min_max[0]) # to range [0,1] - n_dim = tensor.dim() - if n_dim == 4: - n_img = len(tensor) - img_np = make_grid(tensor, nrow=int(math.sqrt(n_img)), normalize=False).numpy() - img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR - elif n_dim == 3: - img_np = tensor.numpy() - img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0)) # HWC, BGR - elif n_dim == 2: - img_np = tensor.numpy() - else: - raise TypeError( - 'Only support 4D, 3D and 2D tensor. But received with dimension: {:d}'.format(n_dim)) - if out_type == np.uint8: - img_np = (img_np * 255.0).round() - # Important. Unlike matlab, numpy.unit8() WILL NOT round by default. - return img_np.astype(out_type) - - -''' -# -------------------------------------------- -# Augmentation, flipe and/or rotate -# -------------------------------------------- -# The following two are enough. -# (1) augmet_img: numpy image of WxHxC or WxH -# (2) augment_img_tensor4: tensor image 1xCxWxH -# -------------------------------------------- -''' - - -def augment_img(img, mode=0): - '''Kai Zhang (github: https://github.com/cszn) - ''' - if mode == 0: - return img - elif mode == 1: - return np.flipud(np.rot90(img)) - elif mode == 2: - return np.flipud(img) - elif mode == 3: - return np.rot90(img, k=3) - elif mode == 4: - return np.flipud(np.rot90(img, k=2)) - elif mode == 5: - return np.rot90(img) - elif mode == 6: - return np.rot90(img, k=2) - elif mode == 7: - return np.flipud(np.rot90(img, k=3)) - - -def augment_img_tensor4(img, mode=0): - '''Kai Zhang (github: https://github.com/cszn) - ''' - if mode == 0: - return img - elif mode == 1: - return img.rot90(1, [2, 3]).flip([2]) - elif mode == 2: - return img.flip([2]) - elif mode == 3: - return img.rot90(3, [2, 3]) - elif mode == 4: - return img.rot90(2, [2, 3]).flip([2]) - elif mode == 5: - return img.rot90(1, [2, 3]) - elif mode == 6: - return img.rot90(2, [2, 3]) - elif mode == 7: - return img.rot90(3, [2, 3]).flip([2]) - - -def augment_img_tensor(img, mode=0): - '''Kai Zhang (github: https://github.com/cszn) - ''' - img_size = img.size() - img_np = img.data.cpu().numpy() - if len(img_size) == 3: - img_np = np.transpose(img_np, (1, 2, 0)) - elif len(img_size) == 4: - img_np = np.transpose(img_np, (2, 3, 1, 0)) - img_np = augment_img(img_np, mode=mode) - img_tensor = torch.from_numpy(np.ascontiguousarray(img_np)) - if len(img_size) == 3: - img_tensor = img_tensor.permute(2, 0, 1) - elif len(img_size) == 4: - img_tensor = img_tensor.permute(3, 2, 0, 1) - - return img_tensor.type_as(img) - - -def augment_img_np3(img, mode=0): - if mode == 0: - return img - elif mode == 1: - return img.transpose(1, 0, 2) - elif mode == 2: - return img[::-1, :, :] - elif mode == 3: - img = img[::-1, :, :] - img = img.transpose(1, 0, 2) - return img - elif mode == 4: - return img[:, ::-1, :] - elif mode == 5: - img = img[:, ::-1, :] - img = img.transpose(1, 0, 2) - return img - elif mode == 6: - img = img[:, ::-1, :] - img = img[::-1, :, :] - return img - elif mode == 7: - img = img[:, ::-1, :] - img = img[::-1, :, :] - img = img.transpose(1, 0, 2) - return img - - -def augment_imgs(img_list, hflip=True, rot=True): - # horizontal flip OR rotate - hflip = hflip and random.random() < 0.5 - vflip = rot and random.random() < 0.5 - rot90 = rot and random.random() < 0.5 - - def _augment(img): - if hflip: - img = img[:, ::-1, :] - if vflip: - img = img[::-1, :, :] - if rot90: - img = img.transpose(1, 0, 2) - return img - - return [_augment(img) for img in img_list] - - -''' -# -------------------------------------------- -# modcrop and shave -# -------------------------------------------- -''' - - -def modcrop(img_in, scale): - # img_in: Numpy, HWC or HW - img = np.copy(img_in) - if img.ndim == 2: - H, W = img.shape - H_r, W_r = H % scale, W % scale - img = img[:H - H_r, :W - W_r] - elif img.ndim == 3: - H, W, C = img.shape - H_r, W_r = H % scale, W % scale - img = img[:H - H_r, :W - W_r, :] - else: - raise ValueError('Wrong img ndim: [{:d}].'.format(img.ndim)) - return img - - -def shave(img_in, border=0): - # img_in: Numpy, HWC or HW - img = np.copy(img_in) - h, w = img.shape[:2] - img = img[border:h-border, border:w-border] - return img - - -''' -# -------------------------------------------- -# image processing process on numpy image -# channel_convert(in_c, tar_type, img_list): -# rgb2ycbcr(img, only_y=True): -# bgr2ycbcr(img, only_y=True): -# ycbcr2rgb(img): -# -------------------------------------------- -''' - - -def rgb2ycbcr(img, only_y=True): - '''same as matlab rgb2ycbcr - only_y: only return Y channel - Input: - uint8, [0, 255] - float, [0, 1] - ''' - in_img_type = img.dtype - img.astype(np.float32) - if in_img_type != np.uint8: - img *= 255. - # convert - if only_y: - rlt = np.dot(img, [65.481, 128.553, 24.966]) / 255.0 + 16.0 - else: - rlt = np.matmul(img, [[65.481, -37.797, 112.0], [128.553, -74.203, -93.786], - [24.966, 112.0, -18.214]]) / 255.0 + [16, 128, 128] - if in_img_type == np.uint8: - rlt = rlt.round() - else: - rlt /= 255. - return rlt.astype(in_img_type) - - -def ycbcr2rgb(img): - '''same as matlab ycbcr2rgb - Input: - uint8, [0, 255] - float, [0, 1] - ''' - in_img_type = img.dtype - img.astype(np.float32) - if in_img_type != np.uint8: - img *= 255. - # convert - rlt = np.matmul(img, [[0.00456621, 0.00456621, 0.00456621], [0, -0.00153632, 0.00791071], - [0.00625893, -0.00318811, 0]]) * 255.0 + [-222.921, 135.576, -276.836] - if in_img_type == np.uint8: - rlt = rlt.round() - else: - rlt /= 255. - return rlt.astype(in_img_type) - - -def bgr2ycbcr(img, only_y=True): - '''bgr version of rgb2ycbcr - only_y: only return Y channel - Input: - uint8, [0, 255] - float, [0, 1] - ''' - in_img_type = img.dtype - img.astype(np.float32) - if in_img_type != np.uint8: - img *= 255. - # convert - if only_y: - rlt = np.dot(img, [24.966, 128.553, 65.481]) / 255.0 + 16.0 - else: - rlt = np.matmul(img, [[24.966, 112.0, -18.214], [128.553, -74.203, -93.786], - [65.481, -37.797, 112.0]]) / 255.0 + [16, 128, 128] - if in_img_type == np.uint8: - rlt = rlt.round() - else: - rlt /= 255. - return rlt.astype(in_img_type) - - -def channel_convert(in_c, tar_type, img_list): - # conversion among BGR, gray and y - if in_c == 3 and tar_type == 'gray': # BGR to gray - gray_list = [cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) for img in img_list] - return [np.expand_dims(img, axis=2) for img in gray_list] - elif in_c == 3 and tar_type == 'y': # BGR to y - y_list = [bgr2ycbcr(img, only_y=True) for img in img_list] - return [np.expand_dims(img, axis=2) for img in y_list] - elif in_c == 1 and tar_type == 'RGB': # gray/y to BGR - return [cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for img in img_list] - else: - return img_list - - -''' -# -------------------------------------------- -# metric, PSNR and SSIM -# -------------------------------------------- -''' - - -# -------------------------------------------- -# PSNR -# -------------------------------------------- -def calculate_psnr(img1, img2, border=0): - # img1 and img2 have range [0, 255] - #img1 = img1.squeeze() - #img2 = img2.squeeze() - if not img1.shape == img2.shape: - raise ValueError('Input images must have the same dimensions.') - h, w = img1.shape[:2] - img1 = img1[border:h-border, border:w-border] - img2 = img2[border:h-border, border:w-border] - - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - mse = np.mean((img1 - img2)**2) - if mse == 0: - return float('inf') - return 20 * math.log10(255.0 / math.sqrt(mse)) - - -# -------------------------------------------- -# SSIM -# -------------------------------------------- -def calculate_ssim(img1, img2, border=0): - '''calculate SSIM - the same outputs as MATLAB's - img1, img2: [0, 255] - ''' - #img1 = img1.squeeze() - #img2 = img2.squeeze() - if not img1.shape == img2.shape: - raise ValueError('Input images must have the same dimensions.') - h, w = img1.shape[:2] - img1 = img1[border:h-border, border:w-border] - img2 = img2[border:h-border, border:w-border] - - if img1.ndim == 2: - return ssim(img1, img2) - elif img1.ndim == 3: - if img1.shape[2] == 3: - ssims = [] - for i in range(3): - ssims.append(ssim(img1[:,:,i], img2[:,:,i])) - return np.array(ssims).mean() - elif img1.shape[2] == 1: - return ssim(np.squeeze(img1), np.squeeze(img2)) - else: - raise ValueError('Wrong input image dimensions.') - - -def ssim(img1, img2): - C1 = (0.01 * 255)**2 - C2 = (0.03 * 255)**2 - - img1 = img1.astype(np.float64) - img2 = img2.astype(np.float64) - kernel = cv2.getGaussianKernel(11, 1.5) - window = np.outer(kernel, kernel.transpose()) - - mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid - mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5] - mu1_sq = mu1**2 - mu2_sq = mu2**2 - mu1_mu2 = mu1 * mu2 - sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq - sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq - sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2 - - ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) * - (sigma1_sq + sigma2_sq + C2)) - return ssim_map.mean() - - -''' -# -------------------------------------------- -# matlab's bicubic imresize (numpy and torch) [0, 1] -# -------------------------------------------- -''' - - -# matlab 'imresize' function, now only support 'bicubic' -def cubic(x): - absx = torch.abs(x) - absx2 = absx**2 - absx3 = absx**3 - return (1.5*absx3 - 2.5*absx2 + 1) * ((absx <= 1).type_as(absx)) + \ - (-0.5*absx3 + 2.5*absx2 - 4*absx + 2) * (((absx > 1)*(absx <= 2)).type_as(absx)) - - -def calculate_weights_indices(in_length, out_length, scale, kernel, kernel_width, antialiasing): - if (scale < 1) and (antialiasing): - # Use a modified kernel to simultaneously interpolate and antialias- larger kernel width - kernel_width = kernel_width / scale - - # Output-space coordinates - x = torch.linspace(1, out_length, out_length) - - # Input-space coordinates. Calculate the inverse mapping such that 0.5 - # in output space maps to 0.5 in input space, and 0.5+scale in output - # space maps to 1.5 in input space. - u = x / scale + 0.5 * (1 - 1 / scale) - - # What is the left-most pixel that can be involved in the computation? - left = torch.floor(u - kernel_width / 2) - - # What is the maximum number of pixels that can be involved in the - # computation? Note: it's OK to use an extra pixel here; if the - # corresponding weights are all zero, it will be eliminated at the end - # of this function. - P = math.ceil(kernel_width) + 2 - - # The indices of the input pixels involved in computing the k-th output - # pixel are in row k of the indices matrix. - indices = left.view(out_length, 1).expand(out_length, P) + torch.linspace(0, P - 1, P).view( - 1, P).expand(out_length, P) - - # The weights used to compute the k-th output pixel are in row k of the - # weights matrix. - distance_to_center = u.view(out_length, 1).expand(out_length, P) - indices - # apply cubic kernel - if (scale < 1) and (antialiasing): - weights = scale * cubic(distance_to_center * scale) - else: - weights = cubic(distance_to_center) - # Normalize the weights matrix so that each row sums to 1. - weights_sum = torch.sum(weights, 1).view(out_length, 1) - weights = weights / weights_sum.expand(out_length, P) - - # If a column in weights is all zero, get rid of it. only consider the first and last column. - weights_zero_tmp = torch.sum((weights == 0), 0) - if not math.isclose(weights_zero_tmp[0], 0, rel_tol=1e-6): - indices = indices.narrow(1, 1, P - 2) - weights = weights.narrow(1, 1, P - 2) - if not math.isclose(weights_zero_tmp[-1], 0, rel_tol=1e-6): - indices = indices.narrow(1, 0, P - 2) - weights = weights.narrow(1, 0, P - 2) - weights = weights.contiguous() - indices = indices.contiguous() - sym_len_s = -indices.min() + 1 - sym_len_e = indices.max() - in_length - indices = indices + sym_len_s - 1 - return weights, indices, int(sym_len_s), int(sym_len_e) - - -# -------------------------------------------- -# imresize for tensor image [0, 1] -# -------------------------------------------- -def imresize(img, scale, antialiasing=True): - # Now the scale should be the same for H and W - # input: img: pytorch tensor, CHW or HW [0,1] - # output: CHW or HW [0,1] w/o round - need_squeeze = True if img.dim() == 2 else False - if need_squeeze: - img.unsqueeze_(0) - in_C, in_H, in_W = img.size() - out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) - kernel_width = 4 - kernel = 'cubic' - - # Return the desired dimension order for performing the resize. The - # strategy is to perform the resize first along the dimension with the - # smallest scale factor. - # Now we do not support this. - - # get weights and indices - weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( - in_H, out_H, scale, kernel, kernel_width, antialiasing) - weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( - in_W, out_W, scale, kernel, kernel_width, antialiasing) - # process H dimension - # symmetric copying - img_aug = torch.FloatTensor(in_C, in_H + sym_len_Hs + sym_len_He, in_W) - img_aug.narrow(1, sym_len_Hs, in_H).copy_(img) - - sym_patch = img[:, :sym_len_Hs, :] - inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(1, inv_idx) - img_aug.narrow(1, 0, sym_len_Hs).copy_(sym_patch_inv) - - sym_patch = img[:, -sym_len_He:, :] - inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(1, inv_idx) - img_aug.narrow(1, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) - - out_1 = torch.FloatTensor(in_C, out_H, in_W) - kernel_width = weights_H.size(1) - for i in range(out_H): - idx = int(indices_H[i][0]) - for j in range(out_C): - out_1[j, i, :] = img_aug[j, idx:idx + kernel_width, :].transpose(0, 1).mv(weights_H[i]) - - # process W dimension - # symmetric copying - out_1_aug = torch.FloatTensor(in_C, out_H, in_W + sym_len_Ws + sym_len_We) - out_1_aug.narrow(2, sym_len_Ws, in_W).copy_(out_1) - - sym_patch = out_1[:, :, :sym_len_Ws] - inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(2, inv_idx) - out_1_aug.narrow(2, 0, sym_len_Ws).copy_(sym_patch_inv) - - sym_patch = out_1[:, :, -sym_len_We:] - inv_idx = torch.arange(sym_patch.size(2) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(2, inv_idx) - out_1_aug.narrow(2, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) - - out_2 = torch.FloatTensor(in_C, out_H, out_W) - kernel_width = weights_W.size(1) - for i in range(out_W): - idx = int(indices_W[i][0]) - for j in range(out_C): - out_2[j, :, i] = out_1_aug[j, :, idx:idx + kernel_width].mv(weights_W[i]) - if need_squeeze: - out_2.squeeze_() - return out_2 - - -# -------------------------------------------- -# imresize for numpy image [0, 1] -# -------------------------------------------- -def imresize_np(img, scale, antialiasing=True): - # Now the scale should be the same for H and W - # input: img: Numpy, HWC or HW [0,1] - # output: HWC or HW [0,1] w/o round - img = torch.from_numpy(img) - need_squeeze = True if img.dim() == 2 else False - if need_squeeze: - img.unsqueeze_(2) - - in_H, in_W, in_C = img.size() - out_C, out_H, out_W = in_C, math.ceil(in_H * scale), math.ceil(in_W * scale) - kernel_width = 4 - kernel = 'cubic' - - # Return the desired dimension order for performing the resize. The - # strategy is to perform the resize first along the dimension with the - # smallest scale factor. - # Now we do not support this. - - # get weights and indices - weights_H, indices_H, sym_len_Hs, sym_len_He = calculate_weights_indices( - in_H, out_H, scale, kernel, kernel_width, antialiasing) - weights_W, indices_W, sym_len_Ws, sym_len_We = calculate_weights_indices( - in_W, out_W, scale, kernel, kernel_width, antialiasing) - # process H dimension - # symmetric copying - img_aug = torch.FloatTensor(in_H + sym_len_Hs + sym_len_He, in_W, in_C) - img_aug.narrow(0, sym_len_Hs, in_H).copy_(img) - - sym_patch = img[:sym_len_Hs, :, :] - inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(0, inv_idx) - img_aug.narrow(0, 0, sym_len_Hs).copy_(sym_patch_inv) - - sym_patch = img[-sym_len_He:, :, :] - inv_idx = torch.arange(sym_patch.size(0) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(0, inv_idx) - img_aug.narrow(0, sym_len_Hs + in_H, sym_len_He).copy_(sym_patch_inv) - - out_1 = torch.FloatTensor(out_H, in_W, in_C) - kernel_width = weights_H.size(1) - for i in range(out_H): - idx = int(indices_H[i][0]) - for j in range(out_C): - out_1[i, :, j] = img_aug[idx:idx + kernel_width, :, j].transpose(0, 1).mv(weights_H[i]) - - # process W dimension - # symmetric copying - out_1_aug = torch.FloatTensor(out_H, in_W + sym_len_Ws + sym_len_We, in_C) - out_1_aug.narrow(1, sym_len_Ws, in_W).copy_(out_1) - - sym_patch = out_1[:, :sym_len_Ws, :] - inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(1, inv_idx) - out_1_aug.narrow(1, 0, sym_len_Ws).copy_(sym_patch_inv) - - sym_patch = out_1[:, -sym_len_We:, :] - inv_idx = torch.arange(sym_patch.size(1) - 1, -1, -1).long() - sym_patch_inv = sym_patch.index_select(1, inv_idx) - out_1_aug.narrow(1, sym_len_Ws + in_W, sym_len_We).copy_(sym_patch_inv) - - out_2 = torch.FloatTensor(out_H, out_W, in_C) - kernel_width = weights_W.size(1) - for i in range(out_W): - idx = int(indices_W[i][0]) - for j in range(out_C): - out_2[:, i, j] = out_1_aug[:, idx:idx + kernel_width, j].mv(weights_W[i]) - if need_squeeze: - out_2.squeeze_() - - return out_2.numpy() - - -if __name__ == '__main__': - print('---') -# img = imread_uint('test.bmp', 3) -# img = uint2single(img) -# img_bicubic = imresize_np(img, 1/4) \ No newline at end of file diff --git a/ldm/modules/losses/__init__.py b/ldm/modules/losses/__init__.py deleted file mode 100644 index 876d7c5b..00000000 --- a/ldm/modules/losses/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from ldm.modules.losses.contperceptual import LPIPSWithDiscriminator \ No newline at end of file diff --git a/ldm/modules/losses/contperceptual.py b/ldm/modules/losses/contperceptual.py deleted file mode 100644 index 672c1e32..00000000 --- a/ldm/modules/losses/contperceptual.py +++ /dev/null @@ -1,111 +0,0 @@ -import torch -import torch.nn as nn - -from taming.modules.losses.vqperceptual import * # TODO: taming dependency yes/no? - - -class LPIPSWithDiscriminator(nn.Module): - def __init__(self, disc_start, logvar_init=0.0, kl_weight=1.0, pixelloss_weight=1.0, - disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, - perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, - disc_loss="hinge"): - - super().__init__() - assert disc_loss in ["hinge", "vanilla"] - self.kl_weight = kl_weight - self.pixel_weight = pixelloss_weight - self.perceptual_loss = LPIPS().eval() - self.perceptual_weight = perceptual_weight - # output log variance - self.logvar = nn.Parameter(torch.ones(size=()) * logvar_init) - - self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, - n_layers=disc_num_layers, - use_actnorm=use_actnorm - ).apply(weights_init) - self.discriminator_iter_start = disc_start - self.disc_loss = hinge_d_loss if disc_loss == "hinge" else vanilla_d_loss - self.disc_factor = disc_factor - self.discriminator_weight = disc_weight - self.disc_conditional = disc_conditional - - def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): - if last_layer is not None: - nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] - g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] - else: - nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] - g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] - - d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) - d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() - d_weight = d_weight * self.discriminator_weight - return d_weight - - def forward(self, inputs, reconstructions, posteriors, optimizer_idx, - global_step, last_layer=None, cond=None, split="train", - weights=None): - rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) - if self.perceptual_weight > 0: - p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) - rec_loss = rec_loss + self.perceptual_weight * p_loss - - nll_loss = rec_loss / torch.exp(self.logvar) + self.logvar - weighted_nll_loss = nll_loss - if weights is not None: - weighted_nll_loss = weights*nll_loss - weighted_nll_loss = torch.sum(weighted_nll_loss) / weighted_nll_loss.shape[0] - nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] - kl_loss = posteriors.kl() - kl_loss = torch.sum(kl_loss) / kl_loss.shape[0] - - # now the GAN part - if optimizer_idx == 0: - # generator update - if cond is None: - assert not self.disc_conditional - logits_fake = self.discriminator(reconstructions.contiguous()) - else: - assert self.disc_conditional - logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) - g_loss = -torch.mean(logits_fake) - - if self.disc_factor > 0.0: - try: - d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) - except RuntimeError: - assert not self.training - d_weight = torch.tensor(0.0) - else: - d_weight = torch.tensor(0.0) - - disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) - loss = weighted_nll_loss + self.kl_weight * kl_loss + d_weight * disc_factor * g_loss - - log = {"{}/total_loss".format(split): loss.clone().detach().mean(), "{}/logvar".format(split): self.logvar.detach(), - "{}/kl_loss".format(split): kl_loss.detach().mean(), "{}/nll_loss".format(split): nll_loss.detach().mean(), - "{}/rec_loss".format(split): rec_loss.detach().mean(), - "{}/d_weight".format(split): d_weight.detach(), - "{}/disc_factor".format(split): torch.tensor(disc_factor), - "{}/g_loss".format(split): g_loss.detach().mean(), - } - return loss, log - - if optimizer_idx == 1: - # second pass for discriminator update - if cond is None: - logits_real = self.discriminator(inputs.contiguous().detach()) - logits_fake = self.discriminator(reconstructions.contiguous().detach()) - else: - logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) - logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) - - disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) - d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) - - log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), - "{}/logits_real".format(split): logits_real.detach().mean(), - "{}/logits_fake".format(split): logits_fake.detach().mean() - } - return d_loss, log - diff --git a/ldm/modules/losses/vqperceptual.py b/ldm/modules/losses/vqperceptual.py deleted file mode 100644 index f6998176..00000000 --- a/ldm/modules/losses/vqperceptual.py +++ /dev/null @@ -1,167 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F -from einops import repeat - -from taming.modules.discriminator.model import NLayerDiscriminator, weights_init -from taming.modules.losses.lpips import LPIPS -from taming.modules.losses.vqperceptual import hinge_d_loss, vanilla_d_loss - - -def hinge_d_loss_with_exemplar_weights(logits_real, logits_fake, weights): - assert weights.shape[0] == logits_real.shape[0] == logits_fake.shape[0] - loss_real = torch.mean(F.relu(1. - logits_real), dim=[1,2,3]) - loss_fake = torch.mean(F.relu(1. + logits_fake), dim=[1,2,3]) - loss_real = (weights * loss_real).sum() / weights.sum() - loss_fake = (weights * loss_fake).sum() / weights.sum() - d_loss = 0.5 * (loss_real + loss_fake) - return d_loss - -def adopt_weight(weight, global_step, threshold=0, value=0.): - if global_step < threshold: - weight = value - return weight - - -def measure_perplexity(predicted_indices, n_embed): - # src: https://github.com/karpathy/deep-vector-quantization/blob/main/model.py - # eval cluster perplexity. when perplexity == num_embeddings then all clusters are used exactly equally - encodings = F.one_hot(predicted_indices, n_embed).float().reshape(-1, n_embed) - avg_probs = encodings.mean(0) - perplexity = (-(avg_probs * torch.log(avg_probs + 1e-10)).sum()).exp() - cluster_use = torch.sum(avg_probs > 0) - return perplexity, cluster_use - -def l1(x, y): - return torch.abs(x-y) - - -def l2(x, y): - return torch.pow((x-y), 2) - - -class VQLPIPSWithDiscriminator(nn.Module): - def __init__(self, disc_start, codebook_weight=1.0, pixelloss_weight=1.0, - disc_num_layers=3, disc_in_channels=3, disc_factor=1.0, disc_weight=1.0, - perceptual_weight=1.0, use_actnorm=False, disc_conditional=False, - disc_ndf=64, disc_loss="hinge", n_classes=None, perceptual_loss="lpips", - pixel_loss="l1"): - super().__init__() - assert disc_loss in ["hinge", "vanilla"] - assert perceptual_loss in ["lpips", "clips", "dists"] - assert pixel_loss in ["l1", "l2"] - self.codebook_weight = codebook_weight - self.pixel_weight = pixelloss_weight - if perceptual_loss == "lpips": - print(f"{self.__class__.__name__}: Running with LPIPS.") - self.perceptual_loss = LPIPS().eval() - else: - raise ValueError(f"Unknown perceptual loss: >> {perceptual_loss} <<") - self.perceptual_weight = perceptual_weight - - if pixel_loss == "l1": - self.pixel_loss = l1 - else: - self.pixel_loss = l2 - - self.discriminator = NLayerDiscriminator(input_nc=disc_in_channels, - n_layers=disc_num_layers, - use_actnorm=use_actnorm, - ndf=disc_ndf - ).apply(weights_init) - self.discriminator_iter_start = disc_start - if disc_loss == "hinge": - self.disc_loss = hinge_d_loss - elif disc_loss == "vanilla": - self.disc_loss = vanilla_d_loss - else: - raise ValueError(f"Unknown GAN loss '{disc_loss}'.") - print(f"VQLPIPSWithDiscriminator running with {disc_loss} loss.") - self.disc_factor = disc_factor - self.discriminator_weight = disc_weight - self.disc_conditional = disc_conditional - self.n_classes = n_classes - - def calculate_adaptive_weight(self, nll_loss, g_loss, last_layer=None): - if last_layer is not None: - nll_grads = torch.autograd.grad(nll_loss, last_layer, retain_graph=True)[0] - g_grads = torch.autograd.grad(g_loss, last_layer, retain_graph=True)[0] - else: - nll_grads = torch.autograd.grad(nll_loss, self.last_layer[0], retain_graph=True)[0] - g_grads = torch.autograd.grad(g_loss, self.last_layer[0], retain_graph=True)[0] - - d_weight = torch.norm(nll_grads) / (torch.norm(g_grads) + 1e-4) - d_weight = torch.clamp(d_weight, 0.0, 1e4).detach() - d_weight = d_weight * self.discriminator_weight - return d_weight - - def forward(self, codebook_loss, inputs, reconstructions, optimizer_idx, - global_step, last_layer=None, cond=None, split="train", predicted_indices=None): - if not exists(codebook_loss): - codebook_loss = torch.tensor([0.]).to(inputs.device) - #rec_loss = torch.abs(inputs.contiguous() - reconstructions.contiguous()) - rec_loss = self.pixel_loss(inputs.contiguous(), reconstructions.contiguous()) - if self.perceptual_weight > 0: - p_loss = self.perceptual_loss(inputs.contiguous(), reconstructions.contiguous()) - rec_loss = rec_loss + self.perceptual_weight * p_loss - else: - p_loss = torch.tensor([0.0]) - - nll_loss = rec_loss - #nll_loss = torch.sum(nll_loss) / nll_loss.shape[0] - nll_loss = torch.mean(nll_loss) - - # now the GAN part - if optimizer_idx == 0: - # generator update - if cond is None: - assert not self.disc_conditional - logits_fake = self.discriminator(reconstructions.contiguous()) - else: - assert self.disc_conditional - logits_fake = self.discriminator(torch.cat((reconstructions.contiguous(), cond), dim=1)) - g_loss = -torch.mean(logits_fake) - - try: - d_weight = self.calculate_adaptive_weight(nll_loss, g_loss, last_layer=last_layer) - except RuntimeError: - assert not self.training - d_weight = torch.tensor(0.0) - - disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) - loss = nll_loss + d_weight * disc_factor * g_loss + self.codebook_weight * codebook_loss.mean() - - log = {"{}/total_loss".format(split): loss.clone().detach().mean(), - "{}/quant_loss".format(split): codebook_loss.detach().mean(), - "{}/nll_loss".format(split): nll_loss.detach().mean(), - "{}/rec_loss".format(split): rec_loss.detach().mean(), - "{}/p_loss".format(split): p_loss.detach().mean(), - "{}/d_weight".format(split): d_weight.detach(), - "{}/disc_factor".format(split): torch.tensor(disc_factor), - "{}/g_loss".format(split): g_loss.detach().mean(), - } - if predicted_indices is not None: - assert self.n_classes is not None - with torch.no_grad(): - perplexity, cluster_usage = measure_perplexity(predicted_indices, self.n_classes) - log[f"{split}/perplexity"] = perplexity - log[f"{split}/cluster_usage"] = cluster_usage - return loss, log - - if optimizer_idx == 1: - # second pass for discriminator update - if cond is None: - logits_real = self.discriminator(inputs.contiguous().detach()) - logits_fake = self.discriminator(reconstructions.contiguous().detach()) - else: - logits_real = self.discriminator(torch.cat((inputs.contiguous().detach(), cond), dim=1)) - logits_fake = self.discriminator(torch.cat((reconstructions.contiguous().detach(), cond), dim=1)) - - disc_factor = adopt_weight(self.disc_factor, global_step, threshold=self.discriminator_iter_start) - d_loss = disc_factor * self.disc_loss(logits_real, logits_fake) - - log = {"{}/disc_loss".format(split): d_loss.clone().detach().mean(), - "{}/logits_real".format(split): logits_real.detach().mean(), - "{}/logits_fake".format(split): logits_fake.detach().mean() - } - return d_loss, log diff --git a/ldm/modules/x_transformer.py b/ldm/modules/x_transformer.py deleted file mode 100644 index 5fc15bf9..00000000 --- a/ldm/modules/x_transformer.py +++ /dev/null @@ -1,641 +0,0 @@ -"""shout-out to https://github.com/lucidrains/x-transformers/tree/main/x_transformers""" -import torch -from torch import nn, einsum -import torch.nn.functional as F -from functools import partial -from inspect import isfunction -from collections import namedtuple -from einops import rearrange, repeat, reduce - -# constants - -DEFAULT_DIM_HEAD = 64 - -Intermediates = namedtuple('Intermediates', [ - 'pre_softmax_attn', - 'post_softmax_attn' -]) - -LayerIntermediates = namedtuple('Intermediates', [ - 'hiddens', - 'attn_intermediates' -]) - - -class AbsolutePositionalEmbedding(nn.Module): - def __init__(self, dim, max_seq_len): - super().__init__() - self.emb = nn.Embedding(max_seq_len, dim) - self.init_() - - def init_(self): - nn.init.normal_(self.emb.weight, std=0.02) - - def forward(self, x): - n = torch.arange(x.shape[1], device=x.device) - return self.emb(n)[None, :, :] - - -class FixedPositionalEmbedding(nn.Module): - def __init__(self, dim): - super().__init__() - inv_freq = 1. / (10000 ** (torch.arange(0, dim, 2).float() / dim)) - self.register_buffer('inv_freq', inv_freq) - - def forward(self, x, seq_dim=1, offset=0): - t = torch.arange(x.shape[seq_dim], device=x.device).type_as(self.inv_freq) + offset - sinusoid_inp = torch.einsum('i , j -> i j', t, self.inv_freq) - emb = torch.cat((sinusoid_inp.sin(), sinusoid_inp.cos()), dim=-1) - return emb[None, :, :] - - -# helpers - -def exists(val): - return val is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def always(val): - def inner(*args, **kwargs): - return val - return inner - - -def not_equals(val): - def inner(x): - return x != val - return inner - - -def equals(val): - def inner(x): - return x == val - return inner - - -def max_neg_value(tensor): - return -torch.finfo(tensor.dtype).max - - -# keyword argument helpers - -def pick_and_pop(keys, d): - values = list(map(lambda key: d.pop(key), keys)) - return dict(zip(keys, values)) - - -def group_dict_by_key(cond, d): - return_val = [dict(), dict()] - for key in d.keys(): - match = bool(cond(key)) - ind = int(not match) - return_val[ind][key] = d[key] - return (*return_val,) - - -def string_begins_with(prefix, str): - return str.startswith(prefix) - - -def group_by_key_prefix(prefix, d): - return group_dict_by_key(partial(string_begins_with, prefix), d) - - -def groupby_prefix_and_trim(prefix, d): - kwargs_with_prefix, kwargs = group_dict_by_key(partial(string_begins_with, prefix), d) - kwargs_without_prefix = dict(map(lambda x: (x[0][len(prefix):], x[1]), tuple(kwargs_with_prefix.items()))) - return kwargs_without_prefix, kwargs - - -# classes -class Scale(nn.Module): - def __init__(self, value, fn): - super().__init__() - self.value = value - self.fn = fn - - def forward(self, x, **kwargs): - x, *rest = self.fn(x, **kwargs) - return (x * self.value, *rest) - - -class Rezero(nn.Module): - def __init__(self, fn): - super().__init__() - self.fn = fn - self.g = nn.Parameter(torch.zeros(1)) - - def forward(self, x, **kwargs): - x, *rest = self.fn(x, **kwargs) - return (x * self.g, *rest) - - -class ScaleNorm(nn.Module): - def __init__(self, dim, eps=1e-5): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(1)) - - def forward(self, x): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class RMSNorm(nn.Module): - def __init__(self, dim, eps=1e-8): - super().__init__() - self.scale = dim ** -0.5 - self.eps = eps - self.g = nn.Parameter(torch.ones(dim)) - - def forward(self, x): - norm = torch.norm(x, dim=-1, keepdim=True) * self.scale - return x / norm.clamp(min=self.eps) * self.g - - -class Residual(nn.Module): - def forward(self, x, residual): - return x + residual - - -class GRUGating(nn.Module): - def __init__(self, dim): - super().__init__() - self.gru = nn.GRUCell(dim, dim) - - def forward(self, x, residual): - gated_output = self.gru( - rearrange(x, 'b n d -> (b n) d'), - rearrange(residual, 'b n d -> (b n) d') - ) - - return gated_output.reshape_as(x) - - -# feedforward - -class GEGLU(nn.Module): - def __init__(self, dim_in, dim_out): - super().__init__() - self.proj = nn.Linear(dim_in, dim_out * 2) - - def forward(self, x): - x, gate = self.proj(x).chunk(2, dim=-1) - return x * F.gelu(gate) - - -class FeedForward(nn.Module): - def __init__(self, dim, dim_out=None, mult=4, glu=False, dropout=0.): - super().__init__() - inner_dim = int(dim * mult) - dim_out = default(dim_out, dim) - project_in = nn.Sequential( - nn.Linear(dim, inner_dim), - nn.GELU() - ) if not glu else GEGLU(dim, inner_dim) - - self.net = nn.Sequential( - project_in, - nn.Dropout(dropout), - nn.Linear(inner_dim, dim_out) - ) - - def forward(self, x): - return self.net(x) - - -# attention. -class Attention(nn.Module): - def __init__( - self, - dim, - dim_head=DEFAULT_DIM_HEAD, - heads=8, - causal=False, - mask=None, - talking_heads=False, - sparse_topk=None, - use_entmax15=False, - num_mem_kv=0, - dropout=0., - on_attn=False - ): - super().__init__() - if use_entmax15: - raise NotImplementedError("Check out entmax activation instead of softmax activation!") - self.scale = dim_head ** -0.5 - self.heads = heads - self.causal = causal - self.mask = mask - - inner_dim = dim_head * heads - - self.to_q = nn.Linear(dim, inner_dim, bias=False) - self.to_k = nn.Linear(dim, inner_dim, bias=False) - self.to_v = nn.Linear(dim, inner_dim, bias=False) - self.dropout = nn.Dropout(dropout) - - # talking heads - self.talking_heads = talking_heads - if talking_heads: - self.pre_softmax_proj = nn.Parameter(torch.randn(heads, heads)) - self.post_softmax_proj = nn.Parameter(torch.randn(heads, heads)) - - # explicit topk sparse attention - self.sparse_topk = sparse_topk - - # entmax - #self.attn_fn = entmax15 if use_entmax15 else F.softmax - self.attn_fn = F.softmax - - # add memory key / values - self.num_mem_kv = num_mem_kv - if num_mem_kv > 0: - self.mem_k = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) - self.mem_v = nn.Parameter(torch.randn(heads, num_mem_kv, dim_head)) - - # attention on attention - self.attn_on_attn = on_attn - self.to_out = nn.Sequential(nn.Linear(inner_dim, dim * 2), nn.GLU()) if on_attn else nn.Linear(inner_dim, dim) - - def forward( - self, - x, - context=None, - mask=None, - context_mask=None, - rel_pos=None, - sinusoidal_emb=None, - prev_attn=None, - mem=None - ): - b, n, _, h, talking_heads, device = *x.shape, self.heads, self.talking_heads, x.device - kv_input = default(context, x) - - q_input = x - k_input = kv_input - v_input = kv_input - - if exists(mem): - k_input = torch.cat((mem, k_input), dim=-2) - v_input = torch.cat((mem, v_input), dim=-2) - - if exists(sinusoidal_emb): - # in shortformer, the query would start at a position offset depending on the past cached memory - offset = k_input.shape[-2] - q_input.shape[-2] - q_input = q_input + sinusoidal_emb(q_input, offset=offset) - k_input = k_input + sinusoidal_emb(k_input) - - q = self.to_q(q_input) - k = self.to_k(k_input) - v = self.to_v(v_input) - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h=h), (q, k, v)) - - input_mask = None - if any(map(exists, (mask, context_mask))): - q_mask = default(mask, lambda: torch.ones((b, n), device=device).bool()) - k_mask = q_mask if not exists(context) else context_mask - k_mask = default(k_mask, lambda: torch.ones((b, k.shape[-2]), device=device).bool()) - q_mask = rearrange(q_mask, 'b i -> b () i ()') - k_mask = rearrange(k_mask, 'b j -> b () () j') - input_mask = q_mask * k_mask - - if self.num_mem_kv > 0: - mem_k, mem_v = map(lambda t: repeat(t, 'h n d -> b h n d', b=b), (self.mem_k, self.mem_v)) - k = torch.cat((mem_k, k), dim=-2) - v = torch.cat((mem_v, v), dim=-2) - if exists(input_mask): - input_mask = F.pad(input_mask, (self.num_mem_kv, 0), value=True) - - dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale - mask_value = max_neg_value(dots) - - if exists(prev_attn): - dots = dots + prev_attn - - pre_softmax_attn = dots - - if talking_heads: - dots = einsum('b h i j, h k -> b k i j', dots, self.pre_softmax_proj).contiguous() - - if exists(rel_pos): - dots = rel_pos(dots) - - if exists(input_mask): - dots.masked_fill_(~input_mask, mask_value) - del input_mask - - if self.causal: - i, j = dots.shape[-2:] - r = torch.arange(i, device=device) - mask = rearrange(r, 'i -> () () i ()') < rearrange(r, 'j -> () () () j') - mask = F.pad(mask, (j - i, 0), value=False) - dots.masked_fill_(mask, mask_value) - del mask - - if exists(self.sparse_topk) and self.sparse_topk < dots.shape[-1]: - top, _ = dots.topk(self.sparse_topk, dim=-1) - vk = top[..., -1].unsqueeze(-1).expand_as(dots) - mask = dots < vk - dots.masked_fill_(mask, mask_value) - del mask - - attn = self.attn_fn(dots, dim=-1) - post_softmax_attn = attn - - attn = self.dropout(attn) - - if talking_heads: - attn = einsum('b h i j, h k -> b k i j', attn, self.post_softmax_proj).contiguous() - - out = einsum('b h i j, b h j d -> b h i d', attn, v) - out = rearrange(out, 'b h n d -> b n (h d)') - - intermediates = Intermediates( - pre_softmax_attn=pre_softmax_attn, - post_softmax_attn=post_softmax_attn - ) - - return self.to_out(out), intermediates - - -class AttentionLayers(nn.Module): - def __init__( - self, - dim, - depth, - heads=8, - causal=False, - cross_attend=False, - only_cross=False, - use_scalenorm=False, - use_rmsnorm=False, - use_rezero=False, - rel_pos_num_buckets=32, - rel_pos_max_distance=128, - position_infused_attn=False, - custom_layers=None, - sandwich_coef=None, - par_ratio=None, - residual_attn=False, - cross_residual_attn=False, - macaron=False, - pre_norm=True, - gate_residual=False, - **kwargs - ): - super().__init__() - ff_kwargs, kwargs = groupby_prefix_and_trim('ff_', kwargs) - attn_kwargs, _ = groupby_prefix_and_trim('attn_', kwargs) - - dim_head = attn_kwargs.get('dim_head', DEFAULT_DIM_HEAD) - - self.dim = dim - self.depth = depth - self.layers = nn.ModuleList([]) - - self.has_pos_emb = position_infused_attn - self.pia_pos_emb = FixedPositionalEmbedding(dim) if position_infused_attn else None - self.rotary_pos_emb = always(None) - - assert rel_pos_num_buckets <= rel_pos_max_distance, 'number of relative position buckets must be less than the relative position max distance' - self.rel_pos = None - - self.pre_norm = pre_norm - - self.residual_attn = residual_attn - self.cross_residual_attn = cross_residual_attn - - norm_class = ScaleNorm if use_scalenorm else nn.LayerNorm - norm_class = RMSNorm if use_rmsnorm else norm_class - norm_fn = partial(norm_class, dim) - - norm_fn = nn.Identity if use_rezero else norm_fn - branch_fn = Rezero if use_rezero else None - - if cross_attend and not only_cross: - default_block = ('a', 'c', 'f') - elif cross_attend and only_cross: - default_block = ('c', 'f') - else: - default_block = ('a', 'f') - - if macaron: - default_block = ('f',) + default_block - - if exists(custom_layers): - layer_types = custom_layers - elif exists(par_ratio): - par_depth = depth * len(default_block) - assert 1 < par_ratio <= par_depth, 'par ratio out of range' - default_block = tuple(filter(not_equals('f'), default_block)) - par_attn = par_depth // par_ratio - depth_cut = par_depth * 2 // 3 # 2 / 3 attention layer cutoff suggested by PAR paper - par_width = (depth_cut + depth_cut // par_attn) // par_attn - assert len(default_block) <= par_width, 'default block is too large for par_ratio' - par_block = default_block + ('f',) * (par_width - len(default_block)) - par_head = par_block * par_attn - layer_types = par_head + ('f',) * (par_depth - len(par_head)) - elif exists(sandwich_coef): - assert sandwich_coef > 0 and sandwich_coef <= depth, 'sandwich coefficient should be less than the depth' - layer_types = ('a',) * sandwich_coef + default_block * (depth - sandwich_coef) + ('f',) * sandwich_coef - else: - layer_types = default_block * depth - - self.layer_types = layer_types - self.num_attn_layers = len(list(filter(equals('a'), layer_types))) - - for layer_type in self.layer_types: - if layer_type == 'a': - layer = Attention(dim, heads=heads, causal=causal, **attn_kwargs) - elif layer_type == 'c': - layer = Attention(dim, heads=heads, **attn_kwargs) - elif layer_type == 'f': - layer = FeedForward(dim, **ff_kwargs) - layer = layer if not macaron else Scale(0.5, layer) - else: - raise Exception(f'invalid layer type {layer_type}') - - if isinstance(layer, Attention) and exists(branch_fn): - layer = branch_fn(layer) - - if gate_residual: - residual_fn = GRUGating(dim) - else: - residual_fn = Residual() - - self.layers.append(nn.ModuleList([ - norm_fn(), - layer, - residual_fn - ])) - - def forward( - self, - x, - context=None, - mask=None, - context_mask=None, - mems=None, - return_hiddens=False - ): - hiddens = [] - intermediates = [] - prev_attn = None - prev_cross_attn = None - - mems = mems.copy() if exists(mems) else [None] * self.num_attn_layers - - for ind, (layer_type, (norm, block, residual_fn)) in enumerate(zip(self.layer_types, self.layers)): - is_last = ind == (len(self.layers) - 1) - - if layer_type == 'a': - hiddens.append(x) - layer_mem = mems.pop(0) - - residual = x - - if self.pre_norm: - x = norm(x) - - if layer_type == 'a': - out, inter = block(x, mask=mask, sinusoidal_emb=self.pia_pos_emb, rel_pos=self.rel_pos, - prev_attn=prev_attn, mem=layer_mem) - elif layer_type == 'c': - out, inter = block(x, context=context, mask=mask, context_mask=context_mask, prev_attn=prev_cross_attn) - elif layer_type == 'f': - out = block(x) - - x = residual_fn(out, residual) - - if layer_type in ('a', 'c'): - intermediates.append(inter) - - if layer_type == 'a' and self.residual_attn: - prev_attn = inter.pre_softmax_attn - elif layer_type == 'c' and self.cross_residual_attn: - prev_cross_attn = inter.pre_softmax_attn - - if not self.pre_norm and not is_last: - x = norm(x) - - if return_hiddens: - intermediates = LayerIntermediates( - hiddens=hiddens, - attn_intermediates=intermediates - ) - - return x, intermediates - - return x - - -class Encoder(AttentionLayers): - def __init__(self, **kwargs): - assert 'causal' not in kwargs, 'cannot set causality on encoder' - super().__init__(causal=False, **kwargs) - - - -class TransformerWrapper(nn.Module): - def __init__( - self, - *, - num_tokens, - max_seq_len, - attn_layers, - emb_dim=None, - max_mem_len=0., - emb_dropout=0., - num_memory_tokens=None, - tie_embedding=False, - use_pos_emb=True - ): - super().__init__() - assert isinstance(attn_layers, AttentionLayers), 'attention layers must be one of Encoder or Decoder' - - dim = attn_layers.dim - emb_dim = default(emb_dim, dim) - - self.max_seq_len = max_seq_len - self.max_mem_len = max_mem_len - self.num_tokens = num_tokens - - self.token_emb = nn.Embedding(num_tokens, emb_dim) - self.pos_emb = AbsolutePositionalEmbedding(emb_dim, max_seq_len) if ( - use_pos_emb and not attn_layers.has_pos_emb) else always(0) - self.emb_dropout = nn.Dropout(emb_dropout) - - self.project_emb = nn.Linear(emb_dim, dim) if emb_dim != dim else nn.Identity() - self.attn_layers = attn_layers - self.norm = nn.LayerNorm(dim) - - self.init_() - - self.to_logits = nn.Linear(dim, num_tokens) if not tie_embedding else lambda t: t @ self.token_emb.weight.t() - - # memory tokens (like [cls]) from Memory Transformers paper - num_memory_tokens = default(num_memory_tokens, 0) - self.num_memory_tokens = num_memory_tokens - if num_memory_tokens > 0: - self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim)) - - # let funnel encoder know number of memory tokens, if specified - if hasattr(attn_layers, 'num_memory_tokens'): - attn_layers.num_memory_tokens = num_memory_tokens - - def init_(self): - nn.init.normal_(self.token_emb.weight, std=0.02) - - def forward( - self, - x, - return_embeddings=False, - mask=None, - return_mems=False, - return_attn=False, - mems=None, - **kwargs - ): - b, n, device, num_mem = *x.shape, x.device, self.num_memory_tokens - x = self.token_emb(x) - x += self.pos_emb(x) - x = self.emb_dropout(x) - - x = self.project_emb(x) - - if num_mem > 0: - mem = repeat(self.memory_tokens, 'n d -> b n d', b=b) - x = torch.cat((mem, x), dim=1) - - # auto-handle masking after appending memory tokens - if exists(mask): - mask = F.pad(mask, (num_mem, 0), value=True) - - x, intermediates = self.attn_layers(x, mask=mask, mems=mems, return_hiddens=True, **kwargs) - x = self.norm(x) - - mem, x = x[:, :num_mem], x[:, num_mem:] - - out = self.to_logits(x) if not return_embeddings else x - - if return_mems: - hiddens = intermediates.hiddens - new_mems = list(map(lambda pair: torch.cat(pair, dim=-2), zip(mems, hiddens))) if exists(mems) else hiddens - new_mems = list(map(lambda t: t[..., -self.max_mem_len:, :].detach(), new_mems)) - return out, new_mems - - if return_attn: - attn_maps = list(map(lambda t: t.post_softmax_attn, intermediates.attn_intermediates)) - return out, attn_maps - - return out - diff --git a/ldm/util.py b/ldm/util.py deleted file mode 100644 index 8ba38853..00000000 --- a/ldm/util.py +++ /dev/null @@ -1,203 +0,0 @@ -import importlib - -import torch -import numpy as np -from collections import abc -from einops import rearrange -from functools import partial - -import multiprocessing as mp -from threading import Thread -from queue import Queue - -from inspect import isfunction -from PIL import Image, ImageDraw, ImageFont - - -def log_txt_as_img(wh, xc, size=10): - # wh a tuple of (width, height) - # xc a list of captions to plot - b = len(xc) - txts = list() - for bi in range(b): - txt = Image.new("RGB", wh, color="white") - draw = ImageDraw.Draw(txt) - font = ImageFont.truetype('data/DejaVuSans.ttf', size=size) - nc = int(40 * (wh[0] / 256)) - lines = "\n".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc)) - - try: - draw.text((0, 0), lines, fill="black", font=font) - except UnicodeEncodeError: - print("Cant encode string for logging. Skipping.") - - txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0 - txts.append(txt) - txts = np.stack(txts) - txts = torch.tensor(txts) - return txts - - -def ismap(x): - if not isinstance(x, torch.Tensor): - return False - return (len(x.shape) == 4) and (x.shape[1] > 3) - - -def isimage(x): - if not isinstance(x, torch.Tensor): - return False - return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1) - - -def exists(x): - return x is not None - - -def default(val, d): - if exists(val): - return val - return d() if isfunction(d) else d - - -def mean_flat(tensor): - """ - https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86 - Take the mean over all non-batch dimensions. - """ - return tensor.mean(dim=list(range(1, len(tensor.shape)))) - - -def count_params(model, verbose=False): - total_params = sum(p.numel() for p in model.parameters()) - if verbose: - print(f"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.") - return total_params - - -def instantiate_from_config(config): - if not "target" in config: - if config == '__is_first_stage__': - return None - elif config == "__is_unconditional__": - return None - raise KeyError("Expected key `target` to instantiate.") - return get_obj_from_str(config["target"])(**config.get("params", dict())) - - -def get_obj_from_str(string, reload=False): - module, cls = string.rsplit(".", 1) - if reload: - module_imp = importlib.import_module(module) - importlib.reload(module_imp) - return getattr(importlib.import_module(module, package=None), cls) - - -def _do_parallel_data_prefetch(func, Q, data, idx, idx_to_fn=False): - # create dummy dataset instance - - # run prefetching - if idx_to_fn: - res = func(data, worker_id=idx) - else: - res = func(data) - Q.put([idx, res]) - Q.put("Done") - - -def parallel_data_prefetch( - func: callable, data, n_proc, target_data_type="ndarray", cpu_intensive=True, use_worker_id=False -): - # if target_data_type not in ["ndarray", "list"]: - # raise ValueError( - # "Data, which is passed to parallel_data_prefetch has to be either of type list or ndarray." - # ) - if isinstance(data, np.ndarray) and target_data_type == "list": - raise ValueError("list expected but function got ndarray.") - elif isinstance(data, abc.Iterable): - if isinstance(data, dict): - print( - f'WARNING:"data" argument passed to parallel_data_prefetch is a dict: Using only its values and disregarding keys.' - ) - data = list(data.values()) - if target_data_type == "ndarray": - data = np.asarray(data) - else: - data = list(data) - else: - raise TypeError( - f"The data, that shall be processed parallel has to be either an np.ndarray or an Iterable, but is actually {type(data)}." - ) - - if cpu_intensive: - Q = mp.Queue(1000) - proc = mp.Process - else: - Q = Queue(1000) - proc = Thread - # spawn processes - if target_data_type == "ndarray": - arguments = [ - [func, Q, part, i, use_worker_id] - for i, part in enumerate(np.array_split(data, n_proc)) - ] - else: - step = ( - int(len(data) / n_proc + 1) - if len(data) % n_proc != 0 - else int(len(data) / n_proc) - ) - arguments = [ - [func, Q, part, i, use_worker_id] - for i, part in enumerate( - [data[i: i + step] for i in range(0, len(data), step)] - ) - ] - processes = [] - for i in range(n_proc): - p = proc(target=_do_parallel_data_prefetch, args=arguments[i]) - processes += [p] - - # start processes - print(f"Start prefetching...") - import time - - start = time.time() - gather_res = [[] for _ in range(n_proc)] - try: - for p in processes: - p.start() - - k = 0 - while k < n_proc: - # get result - res = Q.get() - if res == "Done": - k += 1 - else: - gather_res[res[0]] = res[1] - - except Exception as e: - print("Exception: ", e) - for p in processes: - p.terminate() - - raise e - finally: - for p in processes: - p.join() - print(f"Prefetching complete. [{time.time() - start} sec.]") - - if target_data_type == 'ndarray': - if not isinstance(gather_res[0], np.ndarray): - return np.concatenate([np.asarray(r) for r in gather_res], axis=0) - - # order outputs - return np.concatenate(gather_res, axis=0) - elif target_data_type == 'list': - out = [] - for r in gather_res: - out.extend(r) - return out - else: - return gather_res diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 3ec3f98a..edb8b420 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -28,7 +28,7 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At # new memory efficient cross attention blocks do not support hypernets and we already # have memory efficient cross attention anyway, so this disables SD2.0's memory efficient cross attention ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.CrossAttention -ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention +# ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention # silence new console spam from SD2 ldm.modules.attention.print = lambda *args: None @@ -82,7 +82,12 @@ class StableDiffusionModelHijack: def hijack(self, m): - if type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder: + if shared.text_model_name == "XLMR-Large": + model_embeddings = m.cond_stage_model.roberta.embeddings + model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self) + m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) + + elif type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder: model_embeddings = m.cond_stage_model.transformer.text_model.embeddings model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self) m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) @@ -91,11 +96,7 @@ class StableDiffusionModelHijack: m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self) m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) apply_optimizations() - elif shared.text_model_name == "XLMR-Large": - model_embeddings = m.cond_stage_model.roberta.embeddings - model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self) - m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) - + self.clip = m.cond_stage_model fix_checkpoint() diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index b451d1cf..9ea6e1ce 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -4,7 +4,7 @@ import torch from modules import prompt_parser, devices from modules.shared import opts - +import modules.shared as shared def get_target_prompt_token_count(token_count): return math.ceil(max(token_count, 1) / 75) * 75 @@ -177,6 +177,9 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count def forward(self, text): + if shared.text_model_name == "XLMR-Large": + return self.wrapped.encode(text) + use_old = opts.use_old_emphasis_implementation if use_old: batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text) @@ -254,7 +257,10 @@ class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase): def __init__(self, wrapped, hijack): super().__init__(wrapped, hijack) self.tokenizer = wrapped.tokenizer - self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ','][0] + if shared.text_model_name == "XLMR-Large": + self.comma_token = None + else : + self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ','][0] self.token_mults = {} tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k] diff --git a/ldm/modules/encoders/xlmr.py b/modules/xlmr.py similarity index 100% rename from ldm/modules/encoders/xlmr.py rename to modules/xlmr.py From 9c86fb8cace6d8ac0843e0ddad0ba5ae7f3148c9 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Fri, 2 Dec 2022 16:08:46 +0800 Subject: [PATCH 028/461] fix bug Signed-off-by: zhaohu xing <920232796@qq.com> --- modules/shared.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 1408dee3..ac7678c3 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -111,7 +111,11 @@ restricted_opts = { from omegaconf import OmegaConf config = OmegaConf.load(f"{cmd_opts.config}") # XLMR-Large -text_model_name = config.model.params.cond_stage_config.params.name +try: + text_model_name = config.model.params.cond_stage_config.params.name + +except : + text_model_name = "stable_diffusion" cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access From 4929503258d80abbc4b5f40da034298fe3803906 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Tue, 6 Dec 2022 09:03:55 +0800 Subject: [PATCH 029/461] fix bugs Signed-off-by: zhaohu xing <920232796@qq.com> --- modules/devices.py | 4 +-- modules/sd_hijack.py | 2 +- v2-inference.yaml | 67 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 70 insertions(+), 3 deletions(-) create mode 100644 v2-inference.yaml diff --git a/modules/devices.py b/modules/devices.py index e69c1fe3..f00079c6 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -38,8 +38,8 @@ def get_optimal_device(): if torch.cuda.is_available(): return torch.device(get_cuda_device_string()) - # if has_mps(): - # return torch.device("mps") + if has_mps(): + return torch.device("mps") return cpu diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index edb8b420..cd65d356 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -28,7 +28,7 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At # new memory efficient cross attention blocks do not support hypernets and we already # have memory efficient cross attention anyway, so this disables SD2.0's memory efficient cross attention ldm.modules.attention.MemoryEfficientCrossAttention = ldm.modules.attention.CrossAttention -# ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention +ldm.modules.attention.BasicTransformerBlock.ATTENTION_MODES["softmax-xformers"] = ldm.modules.attention.CrossAttention # silence new console spam from SD2 ldm.modules.attention.print = lambda *args: None diff --git a/v2-inference.yaml b/v2-inference.yaml new file mode 100644 index 00000000..0eb25395 --- /dev/null +++ b/v2-inference.yaml @@ -0,0 +1,67 @@ +model: + base_learning_rate: 1.0e-4 + target: ldm.models.diffusion.ddpm.LatentDiffusion + params: + linear_start: 0.00085 + linear_end: 0.0120 + num_timesteps_cond: 1 + log_every_t: 200 + timesteps: 1000 + first_stage_key: "jpg" + cond_stage_key: "txt" + image_size: 64 + channels: 4 + cond_stage_trainable: false + conditioning_key: crossattn + monitor: val/loss_simple_ema + scale_factor: 0.18215 + use_ema: False # we set this to false because this is an inference only config + + unet_config: + target: ldm.modules.diffusionmodules.openaimodel.UNetModel + params: + use_checkpoint: True + use_fp16: True + image_size: 32 # unused + in_channels: 4 + out_channels: 4 + model_channels: 320 + attention_resolutions: [ 4, 2, 1 ] + num_res_blocks: 2 + channel_mult: [ 1, 2, 4, 4 ] + num_head_channels: 64 # need to fix for flash-attn + use_spatial_transformer: True + use_linear_in_transformer: True + transformer_depth: 1 + context_dim: 1024 + legacy: False + + first_stage_config: + target: ldm.models.autoencoder.AutoencoderKL + params: + embed_dim: 4 + monitor: val/rec_loss + ddconfig: + #attn_type: "vanilla-xformers" + double_z: true + z_channels: 4 + resolution: 256 + in_channels: 3 + out_ch: 3 + ch: 128 + ch_mult: + - 1 + - 2 + - 4 + - 4 + num_res_blocks: 2 + attn_resolutions: [] + dropout: 0.0 + lossconfig: + target: torch.nn.Identity + + cond_stage_config: + target: ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder + params: + freeze: True + layer: "penultimate" \ No newline at end of file From 5dcc22606d05ebe5ae89c990bd83a3eb068fcb78 Mon Sep 17 00:00:00 2001 From: zhaohu xing <920232796@qq.com> Date: Tue, 6 Dec 2022 16:04:50 +0800 Subject: [PATCH 030/461] add hash and fix undo hijack bug Signed-off-by: zhaohu xing <920232796@qq.com> --- .DS_Store | Bin 0 -> 6148 bytes launch.py | 10 +++++----- modules/sd_hijack.py | 6 +++++- v2-inference.yaml => v2-inference-v.yaml | 1 + 4 files changed, 11 insertions(+), 6 deletions(-) create mode 100644 .DS_Store rename v2-inference.yaml => v2-inference-v.yaml (98%) diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..5008ddfcf53c02e82d7eee2e57c38e5672ef89f6 GIT binary patch literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 Date: Tue, 6 Dec 2022 16:15:15 +0800 Subject: [PATCH 031/461] delete a file Signed-off-by: zhaohu xing <920232796@qq.com> --- .DS_Store | Bin 6148 -> 0 bytes modules/shared.py | 2 +- 2 files changed, 1 insertion(+), 1 deletion(-) delete mode 100644 .DS_Store diff --git a/.DS_Store b/.DS_Store deleted file mode 100644 index 5008ddfcf53c02e82d7eee2e57c38e5672ef89f6..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 6148 zcmeH~Jr2S!425mzP>H1@V-^m;4Wg<&0T*E43hX&L&p$$qDprKhvt+--jT7}7np#A3 zem<@ulZcFPQ@L2!n>{z**++&mCkOWA81W14cNZlEfg7;MkzE(HCqgga^y>{tEnwC%0;vJ&^%eQ zLs35+`xjp>T0 Date: Thu, 15 Dec 2022 21:57:48 +0300 Subject: [PATCH 032/461] allow_credentials and allow_headers for api from https://fastapi.tiangolo.com/tutorial/cors/ --- webui.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/webui.py b/webui.py index c2d0c6be..13a4d14a 100644 --- a/webui.py +++ b/webui.py @@ -90,11 +90,11 @@ def initialize(): def setup_cors(app): if cmd_opts.cors_allow_origins and cmd_opts.cors_allow_origins_regex: - app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_origin_regex=cmd_opts.cors_allow_origins_regex, allow_methods=['*']) + app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_origin_regex=cmd_opts.cors_allow_origins_regex, allow_methods=['*'], allow_credentials=True, allow_headers=['*']) elif cmd_opts.cors_allow_origins: - app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_methods=['*']) + app.add_middleware(CORSMiddleware, allow_origins=cmd_opts.cors_allow_origins.split(','), allow_methods=['*'], allow_credentials=True, allow_headers=['*']) elif cmd_opts.cors_allow_origins_regex: - app.add_middleware(CORSMiddleware, allow_origin_regex=cmd_opts.cors_allow_origins_regex, allow_methods=['*']) + app.add_middleware(CORSMiddleware, allow_origin_regex=cmd_opts.cors_allow_origins_regex, allow_methods=['*'], allow_credentials=True, allow_headers=['*']) def create_api(app): From f23a822f1c9cb3bd2e8772c75af429e06515eaef Mon Sep 17 00:00:00 2001 From: Philpax Date: Sat, 24 Dec 2022 20:45:16 +1100 Subject: [PATCH 033/461] feat(api): include job_timestamp in progress --- modules/shared.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/shared.py b/modules/shared.py index 8ea3b441..f356dbf7 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -171,6 +171,7 @@ class State: "interrupted": self.skipped, "job": self.job, "job_count": self.job_count, + "job_timestamp": self.job_timestamp, "job_no": self.job_no, "sampling_step": self.sampling_step, "sampling_steps": self.sampling_steps, From fa931733f6acc94e058a1d3d4655846e33ae34be Mon Sep 17 00:00:00 2001 From: Philpax Date: Sun, 25 Dec 2022 20:17:49 +1100 Subject: [PATCH 034/461] fix(api): assign sd_model after settings change --- modules/api/api.py | 2 -- modules/processing.py | 6 ++++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 1ceba75d..0a1a1905 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -121,7 +121,6 @@ class Api: def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): populate = txt2imgreq.copy(update={ # Override __init__ params - "sd_model": shared.sd_model, "sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index), "do_not_save_samples": True, "do_not_save_grid": True @@ -153,7 +152,6 @@ class Api: mask = decode_base64_to_image(mask) populate = img2imgreq.copy(update={ # Override __init__ params - "sd_model": shared.sd_model, "sampler_name": validate_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index), "do_not_save_samples": True, "do_not_save_grid": True, diff --git a/modules/processing.py b/modules/processing.py index 4a406084..0b270278 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -50,9 +50,9 @@ def apply_color_correction(correction, original_image): correction, channel_axis=2 ), cv2.COLOR_LAB2RGB).astype("uint8")) - + image = blendLayers(image, original_image, BlendType.LUMINOSITY) - + return image @@ -466,6 +466,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if k == 'sd_model_checkpoint': sd_models.reload_model_weights() # make onchange call for changing SD model if k == 'sd_vae': sd_vae.reload_vae_weights() # make onchange call for changing VAE + # Assign sd_model here to ensure that it reflects the model after any changes + p.sd_model = shared.sd_model res = process_images_inner(p) finally: From 5be9387b230794a8c771120577cb213490c940c0 Mon Sep 17 00:00:00 2001 From: Philpax Date: Sun, 25 Dec 2022 21:45:44 +1100 Subject: [PATCH 035/461] fix(api): only begin/end state in lock --- modules/api/api.py | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 1ceba75d..59b81c93 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -130,14 +130,12 @@ class Api: if populate.sampler_name: populate.sampler_index = None # prevent a warning later on p = StableDiffusionProcessingTxt2Img(**vars(populate)) - # Override object param - - shared.state.begin() with self.queue_lock: + shared.state.begin() processed = process_images(p) + shared.state.end() - shared.state.end() b64images = list(map(encode_pil_to_base64, processed.images)) @@ -169,12 +167,10 @@ class Api: p.init_images = [decode_base64_to_image(x) for x in init_images] - shared.state.begin() - with self.queue_lock: + shared.state.begin() processed = process_images(p) - - shared.state.end() + shared.state.end() b64images = list(map(encode_pil_to_base64, processed.images)) From 893933e05ad267778111b4fad6d1ecb80937afdf Mon Sep 17 00:00:00 2001 From: hitomi Date: Sun, 25 Dec 2022 20:49:25 +0800 Subject: [PATCH 036/461] Add memory cache for VAE weights --- modules/sd_vae.py | 31 +++++++++++++++++++++++++------ modules/shared.py | 1 + 2 files changed, 26 insertions(+), 6 deletions(-) diff --git a/modules/sd_vae.py b/modules/sd_vae.py index 3856418e..ac71d62d 100644 --- a/modules/sd_vae.py +++ b/modules/sd_vae.py @@ -1,5 +1,6 @@ import torch import os +import collections from collections import namedtuple from modules import shared, devices, script_callbacks from modules.paths import models_path @@ -30,6 +31,7 @@ base_vae = None loaded_vae_file = None checkpoint_info = None +checkpoints_loaded = collections.OrderedDict() def get_base_vae(model): if base_vae is not None and checkpoint_info == model.sd_checkpoint_info and model: @@ -149,13 +151,30 @@ def load_vae(model, vae_file=None): global first_load, vae_dict, vae_list, loaded_vae_file # save_settings = False + cache_enabled = shared.opts.sd_vae_checkpoint_cache > 0 + if vae_file: - assert os.path.isfile(vae_file), f"VAE file doesn't exist: {vae_file}" - print(f"Loading VAE weights from: {vae_file}") - store_base_vae(model) - vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location) - vae_dict_1 = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss" and k not in vae_ignore_keys} - _load_vae_dict(model, vae_dict_1) + if cache_enabled and vae_file in checkpoints_loaded: + # use vae checkpoint cache + print(f"Loading VAE weights [{get_filename(vae_file)}] from cache") + store_base_vae(model) + _load_vae_dict(model, checkpoints_loaded[vae_file]) + else: + assert os.path.isfile(vae_file), f"VAE file doesn't exist: {vae_file}" + print(f"Loading VAE weights from: {vae_file}") + store_base_vae(model) + vae_ckpt = torch.load(vae_file, map_location=shared.weight_load_location) + vae_dict_1 = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss" and k not in vae_ignore_keys} + _load_vae_dict(model, vae_dict_1) + + if cache_enabled: + # cache newly loaded vae + checkpoints_loaded[vae_file] = vae_dict_1.copy() + + # clean up cache if limit is reached + if cache_enabled: + while len(checkpoints_loaded) > shared.opts.sd_vae_checkpoint_cache + 1: # we need to count the current model + checkpoints_loaded.popitem(last=False) # LRU # If vae used is not in dict, update it # It will be removed on refresh though diff --git a/modules/shared.py b/modules/shared.py index d4ddeea0..671d30e1 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -356,6 +356,7 @@ options_templates.update(options_section(('training', "Training"), { options_templates.update(options_section(('sd', "Stable Diffusion"), { "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": list_checkpoint_tiles()}, refresh=refresh_checkpoints), "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), + "sd_vae_checkpoint_cache": OptionInfo(0, "VAE Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}), "sd_vae": OptionInfo("auto", "SD VAE", gr.Dropdown, lambda: {"choices": sd_vae.vae_list}, refresh=sd_vae.refresh_vae_list), "sd_vae_as_default": OptionInfo(False, "Ignore selected VAE for stable diffusion checkpoints that have their own .vae.pt next to them"), "sd_hypernetwork": OptionInfo("None", "Hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}, refresh=reload_hypernetworks), From 4af3ca5393151d61363c30eef4965e694eeac15e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 26 Dec 2022 10:11:28 +0300 Subject: [PATCH 037/461] make it so that blank ENSD does not break image generation --- modules/processing.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 4a406084..0a9a8f95 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -338,13 +338,14 @@ def slerp(val, low, high): def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, seed_resize_from_h=0, seed_resize_from_w=0, p=None): + eta_noise_seed_delta = opts.eta_noise_seed_delta or 0 xs = [] # if we have multiple seeds, this means we are working with batch size>1; this then # enables the generation of additional tensors with noise that the sampler will use during its processing. # Using those pre-generated tensors instead of simple torch.randn allows a batch with seeds [100, 101] to # produce the same images as with two batches [100], [101]. - if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or opts.eta_noise_seed_delta > 0): + if p is not None and p.sampler is not None and (len(seeds) > 1 and opts.enable_batch_seeds or eta_noise_seed_delta > 0): sampler_noises = [[] for _ in range(p.sampler.number_of_needed_noises(p))] else: sampler_noises = None @@ -384,8 +385,8 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see if sampler_noises is not None: cnt = p.sampler.number_of_needed_noises(p) - if opts.eta_noise_seed_delta > 0: - torch.manual_seed(seed + opts.eta_noise_seed_delta) + if eta_noise_seed_delta > 0: + torch.manual_seed(seed + eta_noise_seed_delta) for j in range(cnt): sampler_noises[j].append(devices.randn_without_seed(tuple(noise_shape))) From ae955b0146a52ea2474c79655ede0d361829ef63 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Mon, 26 Dec 2022 09:53:26 -0500 Subject: [PATCH 038/461] fix rgba to rgb when using jpeg output --- modules/images.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/images.py b/modules/images.py index 31d4528d..962a955d 100644 --- a/modules/images.py +++ b/modules/images.py @@ -525,6 +525,9 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality, pnginfo=pnginfo_data) elif extension.lower() in (".jpg", ".jpeg", ".webp"): + if image_to_save.mode == 'RGBA': + image_to_save = image_to_save.convert("RGB") + image_to_save.save(temp_file_path, format=image_format, quality=opts.jpeg_quality) if opts.enable_pnginfo and info is not None: From 4df5009acb6832daef1ff5949404b5aadc8f8fa4 Mon Sep 17 00:00:00 2001 From: hentailord85ez <112723046+hentailord85ez@users.noreply.github.com> Date: Mon, 26 Dec 2022 20:49:13 +0000 Subject: [PATCH 039/461] Update sd_samplers.py --- modules/sd_samplers.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 177b5338..f4473832 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -462,6 +462,9 @@ class KDiffusionSampler: return extra_params_kwargs def get_sigmas(self, p, steps): + disc = opts.always_discard_next_to_last_sigma or (self.config is not None and self.config.options.get('discard_next_to_last_sigma', False)) + steps += 1 if disc else 0 + if p.sampler_noise_scheduler_override: sigmas = p.sampler_noise_scheduler_override(steps) elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': @@ -469,7 +472,7 @@ class KDiffusionSampler: else: sigmas = self.model_wrap.get_sigmas(steps) - if self.config is not None and self.config.options.get('discard_next_to_last_sigma', False): + if disc: sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) return sigmas From 03f486a2399df0a2b24c7aeea72e64f106a87297 Mon Sep 17 00:00:00 2001 From: hentailord85ez <112723046+hentailord85ez@users.noreply.github.com> Date: Mon, 26 Dec 2022 20:49:33 +0000 Subject: [PATCH 040/461] Update shared.py --- modules/shared.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/shared.py b/modules/shared.py index d4ddeea0..5edb316c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -418,6 +418,7 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters" 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), 'eta_noise_seed_delta': OptionInfo(0, "Eta noise seed delta", gr.Number, {"precision": 0}), + 'always_discard_next_to_last_sigma': OptionInfo(False, "Always discard next-to-last sigma"), })) options_templates.update(options_section((None, "Hidden options"), { From 5ba04f9ec050a66e918571f07e8863f157f05b44 Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Wed, 21 Dec 2022 13:45:58 +0100 Subject: [PATCH 041/461] Attempting to solve slow loads for `safetensors`. Fixes #5893 --- modules/sd_models.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index ecdd91c5..cd938656 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -168,7 +168,10 @@ def get_state_dict_from_checkpoint(pl_sd): def read_state_dict(checkpoint_file, print_global_state=False, map_location=None): _, extension = os.path.splitext(checkpoint_file) if extension.lower() == ".safetensors": - pl_sd = safetensors.torch.load_file(checkpoint_file, device=map_location or shared.weight_load_location) + device = map_location or shared.weight_load_location + if device is None: + device = "cuda:0" if torch.cuda.is_available() else "cpu" + pl_sd = safetensors.torch.load_file(checkpoint_file, device=device) else: pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location) From 5a523d13050a5ede43c473767f29dfe2e391136a Mon Sep 17 00:00:00 2001 From: Nicolas Patry Date: Tue, 27 Dec 2022 11:27:40 +0100 Subject: [PATCH 042/461] Version 0.2.7 Fixes Windows SAFETENSORS_FAST_GPU path. --- requirements_versions.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements_versions.txt b/requirements_versions.txt index c126c8c4..52e98818 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -26,5 +26,5 @@ lark==1.1.2 inflection==0.5.1 GitPython==3.1.27 torchsde==0.2.5 -safetensors==0.2.5 +safetensors==0.2.7 httpcore<=0.15 From 5958bbd244703f7c248a91e86dea5d52acc85505 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Fri, 30 Dec 2022 19:36:36 -0500 Subject: [PATCH 043/461] add additional memory states --- modules/memmon.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/modules/memmon.py b/modules/memmon.py index 9fb9b687..a7060f58 100644 --- a/modules/memmon.py +++ b/modules/memmon.py @@ -71,10 +71,13 @@ class MemUsageMonitor(threading.Thread): def read(self): if not self.disabled: free, total = torch.cuda.mem_get_info() + self.data["free"] = free self.data["total"] = total torch_stats = torch.cuda.memory_stats(self.device) + self.data["active"] = torch_stats["active.all.current"] self.data["active_peak"] = torch_stats["active_bytes.all.peak"] + self.data["reserved"] = torch_stats["reserved_bytes.all.current"] self.data["reserved_peak"] = torch_stats["reserved_bytes.all.peak"] self.data["system_peak"] = total - self.data["min_free"] From d3aa2a48e1e896b6ffafda5367200a4bbd46b0d7 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Fri, 30 Dec 2022 19:38:53 -0500 Subject: [PATCH 044/461] remove unnecessary console message --- modules/sd_hijack_inpainting.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index bb5499b3..06b75772 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -178,7 +178,7 @@ def sample_plms(self, # sampling C, H, W = shape size = (batch_size, C, H, W) - print(f'Data shape for PLMS sampling is {size}') + # print(f'Data shape for PLMS sampling is {size}') # remove unnecessary message samples, intermediates = self.plms_sampling(conditioning, size, callback=callback, From 463048344fc036b262aa132584b65ee6e9fec6cf Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Fri, 30 Dec 2022 19:41:47 -0500 Subject: [PATCH 045/461] fix shared state dictionary --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index d4ddeea0..9a13fb60 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -168,7 +168,7 @@ class State: def dict(self): obj = { "skipped": self.skipped, - "interrupted": self.skipped, + "interrupted": self.interrupted, "job": self.job, "job_count": self.job_count, "job_no": self.job_no, From fef98723b2b1c7a9893ead41bbefcb36192babd6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 31 Dec 2022 12:44:26 +0300 Subject: [PATCH 046/461] set sd_model for API later, inside the lock, to prevent multiple requests with different models ending up with incorrect results #5877 #6012 --- modules/api/api.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 59b81c93..11daff0d 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -121,7 +121,6 @@ class Api: def text2imgapi(self, txt2imgreq: StableDiffusionTxt2ImgProcessingAPI): populate = txt2imgreq.copy(update={ # Override __init__ params - "sd_model": shared.sd_model, "sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index), "do_not_save_samples": True, "do_not_save_grid": True @@ -129,9 +128,10 @@ class Api: ) if populate.sampler_name: populate.sampler_index = None # prevent a warning later on - p = StableDiffusionProcessingTxt2Img(**vars(populate)) with self.queue_lock: + p = StableDiffusionProcessingTxt2Img(sd_model=shared.sd_model, **vars(populate)) + shared.state.begin() processed = process_images(p) shared.state.end() @@ -151,7 +151,6 @@ class Api: mask = decode_base64_to_image(mask) populate = img2imgreq.copy(update={ # Override __init__ params - "sd_model": shared.sd_model, "sampler_name": validate_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index), "do_not_save_samples": True, "do_not_save_grid": True, @@ -163,11 +162,11 @@ class Api: args = vars(populate) args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine. - p = StableDiffusionProcessingImg2Img(**args) - - p.init_images = [decode_base64_to_image(x) for x in init_images] with self.queue_lock: + p = StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args) + p.init_images = [decode_base64_to_image(x) for x in init_images] + shared.state.begin() processed = process_images(p) shared.state.end() From 65be1df7bb55b21a3d76630a397c820218cbd12a Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Sat, 31 Dec 2022 07:46:04 -0500 Subject: [PATCH 047/461] initialize result so not to cause exception on empty results --- modules/interrogate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/interrogate.py b/modules/interrogate.py index 46935210..6f761c5a 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -135,7 +135,7 @@ class InterrogateModels: return caption[0] def interrogate(self, pil_image): - res = None + res = "" try: From f34c7341720fb2059992926c9f9ae6ff25f7385b Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 31 Dec 2022 18:06:35 +0300 Subject: [PATCH 048/461] alt-diffusion integration --- ...ence.yaml => alt-diffusion-inference.yaml} | 0 .../v1-inference.yaml | 0 modules/sd_hijack.py | 18 +++++----- modules/sd_hijack_clip.py | 14 +++----- modules/sd_hijack_xlmr.py | 34 +++++++++++++++++++ modules/shared.py | 10 +----- 6 files changed, 50 insertions(+), 26 deletions(-) rename configs/{altdiffusion/ad-inference.yaml => alt-diffusion-inference.yaml} (100%) rename v1-inference.yaml => configs/v1-inference.yaml (100%) create mode 100644 modules/sd_hijack_xlmr.py diff --git a/configs/altdiffusion/ad-inference.yaml b/configs/alt-diffusion-inference.yaml similarity index 100% rename from configs/altdiffusion/ad-inference.yaml rename to configs/alt-diffusion-inference.yaml diff --git a/v1-inference.yaml b/configs/v1-inference.yaml similarity index 100% rename from v1-inference.yaml rename to configs/v1-inference.yaml diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index bce23b03..edcbaf52 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -5,7 +5,7 @@ import modules.textual_inversion.textual_inversion from modules import devices, sd_hijack_optimizations, shared, sd_hijack_checkpoint from modules.hypernetworks import hypernetwork from modules.shared import cmd_opts -from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet +from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr from modules.sd_hijack_optimizations import invokeAI_mps_available @@ -68,6 +68,7 @@ def fix_checkpoint(): ldm.modules.diffusionmodules.openaimodel.ResBlock.forward = sd_hijack_checkpoint.ResBlock_forward ldm.modules.diffusionmodules.openaimodel.AttentionBlock.forward = sd_hijack_checkpoint.AttentionBlock_forward + class StableDiffusionModelHijack: fixes = None comments = [] @@ -79,21 +80,22 @@ class StableDiffusionModelHijack: def hijack(self, m): - if shared.text_model_name == "XLMR-Large": + if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation: model_embeddings = m.cond_stage_model.roberta.embeddings model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.word_embeddings, self) - m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) - + m.cond_stage_model = sd_hijack_xlmr.FrozenXLMREmbedderWithCustomWords(m.cond_stage_model, self) + elif type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenCLIPEmbedder: model_embeddings = m.cond_stage_model.transformer.text_model.embeddings model_embeddings.token_embedding = EmbeddingsWithFixes(model_embeddings.token_embedding, self) m.cond_stage_model = sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) - apply_optimizations() + elif type(m.cond_stage_model) == ldm.modules.encoders.modules.FrozenOpenCLIPEmbedder: m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self) m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) - apply_optimizations() - + + apply_optimizations() + self.clip = m.cond_stage_model fix_checkpoint() @@ -109,7 +111,7 @@ class StableDiffusionModelHijack: def undo_hijack(self, m): - if shared.text_model_name == "XLMR-Large": + if type(m.cond_stage_model) == xlmr.BertSeriesModelWithTransformation: m.cond_stage_model = m.cond_stage_model.wrapped elif type(m.cond_stage_model) == sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords: diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 9ea6e1ce..6ec50cca 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -4,7 +4,6 @@ import torch from modules import prompt_parser, devices from modules.shared import opts -import modules.shared as shared def get_target_prompt_token_count(token_count): return math.ceil(max(token_count, 1) / 75) * 75 @@ -177,9 +176,6 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count def forward(self, text): - if shared.text_model_name == "XLMR-Large": - return self.wrapped.encode(text) - use_old = opts.use_old_emphasis_implementation if use_old: batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text) @@ -257,13 +253,13 @@ class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase): def __init__(self, wrapped, hijack): super().__init__(wrapped, hijack) self.tokenizer = wrapped.tokenizer - if shared.text_model_name == "XLMR-Large": - self.comma_token = None - else : - self.comma_token = [v for k, v in self.tokenizer.get_vocab().items() if k == ','][0] + + vocab = self.tokenizer.get_vocab() + + self.comma_token = vocab.get(',', None) self.token_mults = {} - tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k] + tokens_with_parens = [(k, v) for k, v in vocab.items() if '(' in k or ')' in k or '[' in k or ']' in k] for text, ident in tokens_with_parens: mult = 1.0 for c in text: diff --git a/modules/sd_hijack_xlmr.py b/modules/sd_hijack_xlmr.py new file mode 100644 index 00000000..4ac51c38 --- /dev/null +++ b/modules/sd_hijack_xlmr.py @@ -0,0 +1,34 @@ +import open_clip.tokenizer +import torch + +from modules import sd_hijack_clip, devices +from modules.shared import opts + + +class FrozenXLMREmbedderWithCustomWords(sd_hijack_clip.FrozenCLIPEmbedderWithCustomWords): + def __init__(self, wrapped, hijack): + super().__init__(wrapped, hijack) + + self.id_start = wrapped.config.bos_token_id + self.id_end = wrapped.config.eos_token_id + self.id_pad = wrapped.config.pad_token_id + + self.comma_token = self.tokenizer.get_vocab().get(',', None) # alt diffusion doesn't have bits for comma + + def encode_with_transformers(self, tokens): + # there's no CLIP Skip here because all hidden layers have size of 1024 and the last one uses a + # trained layer to transform those 1024 into 768 for unet; so you can't choose which transformer + # layer to work with - you have to use the last + + attention_mask = (tokens != self.id_pad).to(device=tokens.device, dtype=torch.int64) + features = self.wrapped(input_ids=tokens, attention_mask=attention_mask) + z = features['projection_state'] + + return z + + def encode_embedding_init_text(self, init_text, nvpt): + embedding_layer = self.wrapped.roberta.embeddings + ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"] + embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0) + + return embedded diff --git a/modules/shared.py b/modules/shared.py index 2b31e717..715b9169 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -23,7 +23,7 @@ demo = None sd_model_file = os.path.join(script_path, 'model.ckpt') default_sd_model_file = sd_model_file parser = argparse.ArgumentParser() -parser.add_argument("--config", type=str, default=os.path.join(script_path, "v1-inference.yaml"), help="path to config which constructs model",) +parser.add_argument("--config", type=str, default=os.path.join(script_path, "configs/v1-inference.yaml"), help="path to config which constructs model",) parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",) parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints") parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) @@ -108,14 +108,6 @@ restricted_opts = { "outdir_txt2img_grids", "outdir_save", } -from omegaconf import OmegaConf -config = OmegaConf.load(f"{cmd_opts.config}") -# XLMR-Large -try: - text_model_name = config.model.params.cond_stage_config.params.name - -except : - text_model_name = "stable_diffusion" cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access From f55ac33d446185680604e872ceda2ae858821d5c Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Sat, 31 Dec 2022 11:27:02 -0500 Subject: [PATCH 049/461] validate textual inversion embeddings --- modules/sd_models.py | 3 ++ .../textual_inversion/textual_inversion.py | 43 ++++++++++++++++--- modules/ui.py | 2 - 3 files changed, 41 insertions(+), 7 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index ecdd91c5..ebd4dff7 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -325,6 +325,9 @@ def load_model(checkpoint_info=None): script_callbacks.model_loaded_callback(sd_model) print("Model loaded.") + + sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload = True) # Reload embeddings after model load as they may or may not fit the model + return sd_model diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index f6112578..103ace60 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -23,6 +23,8 @@ class Embedding: self.vec = vec self.name = name self.step = step + self.shape = None + self.vectors = 0 self.cached_checksum = None self.sd_checkpoint = None self.sd_checkpoint_name = None @@ -57,8 +59,10 @@ class EmbeddingDatabase: def __init__(self, embeddings_dir): self.ids_lookup = {} self.word_embeddings = {} + self.skipped_embeddings = [] self.dir_mtime = None self.embeddings_dir = embeddings_dir + self.expected_shape = -1 def register_embedding(self, embedding, model): @@ -75,14 +79,35 @@ class EmbeddingDatabase: return embedding - def load_textual_inversion_embeddings(self): + def get_expected_shape(self): + expected_shape = -1 # initialize with unknown + idx = torch.tensor(0).to(shared.device) + if expected_shape == -1: + try: # matches sd15 signature + first_embedding = shared.sd_model.cond_stage_model.wrapped.transformer.text_model.embeddings.token_embedding.wrapped(idx) + expected_shape = first_embedding.shape[0] + except: + pass + if expected_shape == -1: + try: # matches sd20 signature + first_embedding = shared.sd_model.cond_stage_model.wrapped.model.token_embedding.wrapped(idx) + expected_shape = first_embedding.shape[0] + except: + pass + if expected_shape == -1: + print('Could not determine expected embeddings shape from model') + return expected_shape + + def load_textual_inversion_embeddings(self, force_reload = False): mt = os.path.getmtime(self.embeddings_dir) - if self.dir_mtime is not None and mt <= self.dir_mtime: + if not force_reload and self.dir_mtime is not None and mt <= self.dir_mtime: return self.dir_mtime = mt self.ids_lookup.clear() self.word_embeddings.clear() + self.skipped_embeddings = [] + self.expected_shape = self.get_expected_shape() def process_file(path, filename): name = os.path.splitext(filename)[0] @@ -122,7 +147,14 @@ class EmbeddingDatabase: embedding.step = data.get('step', None) embedding.sd_checkpoint = data.get('sd_checkpoint', None) embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None) - self.register_embedding(embedding, shared.sd_model) + embedding.vectors = vec.shape[0] + embedding.shape = vec.shape[-1] + + if (self.expected_shape == -1) or (self.expected_shape == embedding.shape): + self.register_embedding(embedding, shared.sd_model) + else: + self.skipped_embeddings.append(name) + # print('Skipping embedding {name}: shape was {shape} expected {expected}'.format(name = name, shape = embedding.shape, expected = self.expected_shape)) for fn in os.listdir(self.embeddings_dir): try: @@ -137,8 +169,9 @@ class EmbeddingDatabase: print(traceback.format_exc(), file=sys.stderr) continue - print(f"Loaded a total of {len(self.word_embeddings)} textual inversion embeddings.") - print("Embeddings:", ', '.join(self.word_embeddings.keys())) + print("Textual inversion embeddings {num} loaded: {val}".format(num = len(self.word_embeddings), val = ', '.join(self.word_embeddings.keys()))) + if (len(self.skipped_embeddings) > 0): + print("Textual inversion embeddings {num} skipped: {val}".format(num = len(self.skipped_embeddings), val = ', '.join(self.skipped_embeddings))) def find_embedding_at_position(self, tokens, offset): token = tokens[offset] diff --git a/modules/ui.py b/modules/ui.py index 57ee0465..397dd804 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1157,8 +1157,6 @@ def create_ui(): with gr.Column(variant='panel'): submit_result = gr.Textbox(elem_id="modelmerger_result", show_label=False) - sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() - with gr.Blocks(analytics_enabled=False) as train_interface: with gr.Row().style(equal_height=False): gr.HTML(value="

See wiki for detailed explanation.

") From bdbe09827b39be63c9c0b3636132ca58da38ebf6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 31 Dec 2022 22:49:09 +0300 Subject: [PATCH 050/461] changed embedding accepted shape detection to use existing code and support the new alt-diffusion model, and reformatted messages a bit #6149 --- .../textual_inversion/textual_inversion.py | 30 ++++--------------- 1 file changed, 6 insertions(+), 24 deletions(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 103ace60..66f40367 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -80,23 +80,8 @@ class EmbeddingDatabase: return embedding def get_expected_shape(self): - expected_shape = -1 # initialize with unknown - idx = torch.tensor(0).to(shared.device) - if expected_shape == -1: - try: # matches sd15 signature - first_embedding = shared.sd_model.cond_stage_model.wrapped.transformer.text_model.embeddings.token_embedding.wrapped(idx) - expected_shape = first_embedding.shape[0] - except: - pass - if expected_shape == -1: - try: # matches sd20 signature - first_embedding = shared.sd_model.cond_stage_model.wrapped.model.token_embedding.wrapped(idx) - expected_shape = first_embedding.shape[0] - except: - pass - if expected_shape == -1: - print('Could not determine expected embeddings shape from model') - return expected_shape + vec = shared.sd_model.cond_stage_model.encode_embedding_init_text(",", 1) + return vec.shape[1] def load_textual_inversion_embeddings(self, force_reload = False): mt = os.path.getmtime(self.embeddings_dir) @@ -112,8 +97,6 @@ class EmbeddingDatabase: def process_file(path, filename): name = os.path.splitext(filename)[0] - data = [] - if os.path.splitext(filename.upper())[-1] in ['.PNG', '.WEBP', '.JXL', '.AVIF']: embed_image = Image.open(path) if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text: @@ -150,11 +133,10 @@ class EmbeddingDatabase: embedding.vectors = vec.shape[0] embedding.shape = vec.shape[-1] - if (self.expected_shape == -1) or (self.expected_shape == embedding.shape): + if self.expected_shape == -1 or self.expected_shape == embedding.shape: self.register_embedding(embedding, shared.sd_model) else: self.skipped_embeddings.append(name) - # print('Skipping embedding {name}: shape was {shape} expected {expected}'.format(name = name, shape = embedding.shape, expected = self.expected_shape)) for fn in os.listdir(self.embeddings_dir): try: @@ -169,9 +151,9 @@ class EmbeddingDatabase: print(traceback.format_exc(), file=sys.stderr) continue - print("Textual inversion embeddings {num} loaded: {val}".format(num = len(self.word_embeddings), val = ', '.join(self.word_embeddings.keys()))) - if (len(self.skipped_embeddings) > 0): - print("Textual inversion embeddings {num} skipped: {val}".format(num = len(self.skipped_embeddings), val = ', '.join(self.skipped_embeddings))) + print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}") + if len(self.skipped_embeddings) > 0: + print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings)}") def find_embedding_at_position(self, tokens, offset): token = tokens[offset] From f4535f6e4f001314bd155bc6e1b6908e02792b9a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 31 Dec 2022 23:40:55 +0300 Subject: [PATCH 051/461] make it so that memory/embeddings info is displayed in a separate UI element from generation parameters, and is preserved when you change the displayed infotext by clicking on gallery images --- modules/img2img.py | 2 +- modules/processing.py | 5 +++-- modules/txt2img.py | 2 +- modules/ui.py | 31 +++++++++++++++++-------------- 4 files changed, 22 insertions(+), 18 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index 81da4b13..ca58b5d8 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -162,4 +162,4 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro if opts.do_not_show_images: processed.images = [] - return processed.images, generation_info_js, plaintext_to_html(processed.info) + return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments) diff --git a/modules/processing.py b/modules/processing.py index 0a9a8f95..42dc19ea 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -239,7 +239,7 @@ class StableDiffusionProcessing(): class Processed: - def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None): + def __init__(self, p: StableDiffusionProcessing, images_list, seed=-1, info="", subseed=None, all_prompts=None, all_negative_prompts=None, all_seeds=None, all_subseeds=None, index_of_first_image=0, infotexts=None, comments=""): self.images = images_list self.prompt = p.prompt self.negative_prompt = p.negative_prompt @@ -247,6 +247,7 @@ class Processed: self.subseed = subseed self.subseed_strength = p.subseed_strength self.info = info + self.comments = comments self.width = p.width self.height = p.height self.sampler_name = p.sampler_name @@ -646,7 +647,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: devices.torch_gc() - res = Processed(p, output_images, p.all_seeds[0], infotext() + "".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, infotexts=infotexts) + res = Processed(p, output_images, p.all_seeds[0], infotext(), comments="".join(["\n\n" + x for x in comments]), subseed=p.all_subseeds[0], index_of_first_image=index_of_first_image, infotexts=infotexts) if p.scripts is not None: p.scripts.postprocess(p, res) diff --git a/modules/txt2img.py b/modules/txt2img.py index c8f81176..7f61e19a 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -59,4 +59,4 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: if opts.do_not_show_images: processed.images = [] - return processed.images, generation_info_js, plaintext_to_html(processed.info) + return processed.images, generation_info_js, plaintext_to_html(processed.info), plaintext_to_html(processed.comments) diff --git a/modules/ui.py b/modules/ui.py index 397dd804..f550ad00 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -159,7 +159,7 @@ def save_files(js_data, images, do_make_zip, index): zip_file.writestr(filenames[i], f.read()) fullfns.insert(0, zip_filepath) - return gr.File.update(value=fullfns, visible=True), '', '', plaintext_to_html(f"Saved: {filenames[0]}") + return gr.File.update(value=fullfns, visible=True), plaintext_to_html(f"Saved: {filenames[0]}") @@ -593,6 +593,8 @@ Requested path was: {f} with gr.Group(): html_info = gr.HTML() + html_log = gr.HTML() + generation_info = gr.Textbox(visible=False) if tabname == 'txt2img' or tabname == 'img2img': generation_info_button = gr.Button(visible=False, elem_id=f"{tabname}_generation_info_button") @@ -615,16 +617,16 @@ Requested path was: {f} ], outputs=[ download_files, - html_info, - html_info, - html_info, + html_log, ] ) else: html_info_x = gr.HTML() html_info = gr.HTML() + html_log = gr.HTML() + parameters_copypaste.bind_buttons(buttons, result_gallery, "txt2img" if tabname == "txt2img" else None) - return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info + return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info, html_log def create_ui(): @@ -686,14 +688,14 @@ def create_ui(): with gr.Group(): custom_inputs = modules.scripts.scripts_txt2img.setup_ui() - txt2img_gallery, generation_info, html_info = create_output_panel("txt2img", opts.outdir_txt2img_samples) + txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) parameters_copypaste.bind_buttons({"txt2img": txt2img_paste}, None, txt2img_prompt) connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) txt2img_args = dict( - fn=wrap_gradio_gpu_call(modules.txt2img.txt2img), + fn=wrap_gradio_gpu_call(modules.txt2img.txt2img, extra_outputs=[None, '', '']), _js="submit", inputs=[ txt2img_prompt, @@ -720,7 +722,8 @@ def create_ui(): outputs=[ txt2img_gallery, generation_info, - html_info + html_info, + html_log, ], show_progress=False, ) @@ -799,7 +802,6 @@ def create_ui(): with gr.Blocks(analytics_enabled=False) as img2img_interface: img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste,token_counter, token_button = create_toprow(is_img2img=True) - with gr.Row(elem_id='img2img_progress_row'): img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="bytes", visible=False) @@ -883,7 +885,7 @@ def create_ui(): with gr.Group(): custom_inputs = modules.scripts.scripts_img2img.setup_ui() - img2img_gallery, generation_info, html_info = create_output_panel("img2img", opts.outdir_img2img_samples) + img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) parameters_copypaste.bind_buttons({"img2img": img2img_paste}, None, img2img_prompt) connect_reuse_seed(seed, reuse_seed, generation_info, dummy_component, is_subseed=False) @@ -915,7 +917,7 @@ def create_ui(): ) img2img_args = dict( - fn=wrap_gradio_gpu_call(modules.img2img.img2img), + fn=wrap_gradio_gpu_call(modules.img2img.img2img, extra_outputs=[None, '', '']), _js="submit_img2img", inputs=[ dummy_component, @@ -954,7 +956,8 @@ def create_ui(): outputs=[ img2img_gallery, generation_info, - html_info + html_info, + html_log, ], show_progress=False, ) @@ -1078,10 +1081,10 @@ def create_ui(): with gr.Group(): upscale_before_face_fix = gr.Checkbox(label='Upscale Before Restoring Faces', value=False) - result_images, html_info_x, html_info = create_output_panel("extras", opts.outdir_extras_samples) + result_images, html_info_x, html_info, html_log = create_output_panel("extras", opts.outdir_extras_samples) submit.click( - fn=wrap_gradio_gpu_call(modules.extras.run_extras), + fn=wrap_gradio_gpu_call(modules.extras.run_extras, extra_outputs=[None, '']), _js="get_extras_tab_index", inputs=[ dummy_component, From 360feed9b55fb03060c236773867b08b4265645d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 00:38:58 +0300 Subject: [PATCH 052/461] HAPPY NEW YEAR make save to zip into its own button instead of a checkbox --- modules/ui.py | 30 ++++++++++++++++++++++-------- style.css | 6 ++++++ 2 files changed, 28 insertions(+), 8 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index f550ad00..279b5110 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -570,13 +570,14 @@ Requested path was: {f} generation_info = None with gr.Column(): - with gr.Row(): + with gr.Row(elem_id=f"image_buttons_{tabname}"): + open_folder_button = gr.Button(folder_symbol, elem_id="hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder') + if tabname != "extras": save = gr.Button('Save', elem_id=f'save_{tabname}') + save_zip = gr.Button('Zip', elem_id=f'save_zip_{tabname}') buttons = parameters_copypaste.create_buttons(["img2img", "inpaint", "extras"]) - button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder' - open_folder_button = gr.Button(folder_symbol, elem_id=button_id) open_folder_button.click( fn=lambda: open_folder(opts.outdir_samples or outdir), @@ -585,9 +586,6 @@ Requested path was: {f} ) if tabname != "extras": - with gr.Row(): - do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False) - with gr.Row(): download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False) @@ -608,11 +606,11 @@ Requested path was: {f} save.click( fn=wrap_gradio_call(save_files), - _js="(x, y, z, w) => [x, y, z, selected_gallery_index()]", + _js="(x, y, z, w) => [x, y, false, selected_gallery_index()]", inputs=[ generation_info, result_gallery, - do_make_zip, + html_info, html_info, ], outputs=[ @@ -620,6 +618,22 @@ Requested path was: {f} html_log, ] ) + + save_zip.click( + fn=wrap_gradio_call(save_files), + _js="(x, y, z, w) => [x, y, true, selected_gallery_index()]", + inputs=[ + generation_info, + result_gallery, + html_info, + html_info, + ], + outputs=[ + download_files, + html_log, + ] + ) + else: html_info_x = gr.HTML() html_info = gr.HTML() diff --git a/style.css b/style.css index 3ad78006..f245f674 100644 --- a/style.css +++ b/style.css @@ -568,6 +568,12 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h font-size: 95%; } +#image_buttons_txt2img button, #image_buttons_img2img button, #image_buttons_extras button{ + min-width: auto; + padding-left: 0.5em; + padding-right: 0.5em; +} + /* The following handles localization for right-to-left (RTL) languages like Arabic. The rtl media type will only be activated by the logic in javascript/localization.js. If you change anything above, you need to make sure it is RTL compliant by just running From 29a3a7eb13478297bc7093971b48827ab8246f45 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 01:19:10 +0300 Subject: [PATCH 053/461] show sampler selection in dropdown, add option selection to revert to old radio group --- modules/shared.py | 1 + modules/ui.py | 22 +++++++++++++++------- 2 files changed, 16 insertions(+), 7 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 715b9169..948b9542 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -406,6 +406,7 @@ options_templates.update(options_section(('ui', "User interface"), { "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), + "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"), 'quicksettings': OptionInfo("sd_model_checkpoint", "Quicksettings list"), 'localization': OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), })) diff --git a/modules/ui.py b/modules/ui.py index 279b5110..c7b8ea5d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -643,6 +643,19 @@ Requested path was: {f} return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info, html_log +def create_sampler_and_steps_selection(choices, tabname): + if opts.samplers_in_dropdown: + with gr.Row(elem_id=f"sampler_selection_{tabname}"): + sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling Steps", value=20) + else: + with gr.Group(elem_id=f"sampler_selection_{tabname}"): + steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling Steps", value=20) + sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + + return steps, sampler_index + + def create_ui(): import modules.img2img import modules.txt2img @@ -660,9 +673,6 @@ def create_ui(): dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="bytes", visible=False) - - - with gr.Row(elem_id='txt2img_progress_row'): with gr.Column(scale=1): pass @@ -674,8 +684,7 @@ def create_ui(): with gr.Row().style(equal_height=False): with gr.Column(variant='panel', elem_id="txt2img_settings"): - steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20) - sampler_index = gr.Radio(label='Sampling method', elem_id="txt2img_sampling", choices=[x.name for x in samplers], value=samplers[0].name, type="index") + steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") with gr.Group(): width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512) @@ -875,8 +884,7 @@ def create_ui(): with gr.Row(): resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - steps = gr.Slider(minimum=1, maximum=150, step=1, label="Sampling Steps", value=20) - sampler_index = gr.Radio(label='Sampling method', choices=[x.name for x in samplers_for_img2img], value=samplers_for_img2img[0].name, type="index") + steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") with gr.Group(): width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") From 210449b374d522c94a67fe54289a9eb515933a9f Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 02:41:15 +0300 Subject: [PATCH 054/461] fix 'RuntimeError: Expected all tensors to be on the same device' error preventing models from loading on lowvram/medvram. --- modules/sd_hijack_clip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 6ec50cca..ca92b142 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -298,6 +298,6 @@ class FrozenCLIPEmbedderWithCustomWords(FrozenCLIPEmbedderWithCustomWordsBase): def encode_embedding_init_text(self, init_text, nvpt): embedding_layer = self.wrapped.transformer.text_model.embeddings ids = self.wrapped.tokenizer(init_text, max_length=nvpt, return_tensors="pt", add_special_tokens=False)["input_ids"] - embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0) + embedded = embedding_layer.token_embedding.wrapped(ids.to(embedding_layer.token_embedding.wrapped.weight.device)).squeeze(0) return embedded From a939e82a0b982517aa212197a0e5f6d11daec7d0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 03:24:58 +0300 Subject: [PATCH 055/461] fix weird padding for sampler dropdown in chrome --- style.css | 5 ----- 1 file changed, 5 deletions(-) diff --git a/style.css b/style.css index f245f674..4b98b84d 100644 --- a/style.css +++ b/style.css @@ -245,11 +245,6 @@ input[type="range"]{ margin: 0.5em 0 -0.3em 0; } -#txt2img_sampling label{ - padding-left: 0.6em; - padding-right: 0.6em; -} - #mask_bug_info { text-align: center; display: block; From 16b9661d2741b241c3964fcbd56559c078b84822 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 09:51:37 +0300 Subject: [PATCH 056/461] change karras scheduler sigmas to values recommended by SD from old 0.1 to 10 with an option to revert to old --- modules/sd_samplers.py | 4 +++- modules/shared.py | 6 +++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 177b5338..e904d860 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -465,7 +465,9 @@ class KDiffusionSampler: if p.sampler_noise_scheduler_override: sigmas = p.sampler_noise_scheduler_override(steps) elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': - sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device) + sigma_min, sigma_max = (0.1, 10) if opts.use_old_karras_scheduler_sigmas else (self.model_wrap.sigmas[0].item(), self.model_wrap.sigmas[-1].item()) + + sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=sigma_min, sigma_max=sigma_max, device=shared.device) else: sigmas = self.model_wrap.get_sigmas(steps) diff --git a/modules/shared.py b/modules/shared.py index 948b9542..7f430b93 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -368,13 +368,17 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "img2img_background_color": OptionInfo("#ffffff", "With img2img, fill image's transparent parts with this color.", gr.ColorPicker, {}), "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."), "enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"), - "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "comma_padding_backtrack": OptionInfo(20, "Increase coherency by padding from the last comma within n tokens when using more than 75 tokens", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1 }), 'CLIP_stop_at_last_layers': OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}), "random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}), })) +options_templates.update(options_section(('compatibility', "Compatibility"), { + "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), + "use_old_karras_scheduler_sigmas": OptionInfo(False, "Use old karras scheduler sigmas (0.1 to 10)."), +})) + options_templates.update(options_section(('interrogate', "Interrogate Options"), { "interrogate_keep_models_in_memory": OptionInfo(False, "Interrogate: keep models in VRAM"), "interrogate_use_builtin_artists": OptionInfo(True, "Interrogate: use artists from artists.csv"), From 11d432d92d63660c516540dcb48faac87669b4f0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 10:35:38 +0300 Subject: [PATCH 057/461] add refresh buttons to checkpoint merger --- modules/ui.py | 6 ++++++ style.css | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index c7b8ea5d..4cc2ce4f 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1167,8 +1167,14 @@ def create_ui(): with gr.Row(): primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary model (A)") + create_refresh_button(primary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_A") + secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary model (B)") + create_refresh_button(secondary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_B") + tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") + create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") + custom_name = gr.Textbox(label="Custom Name (Optional)") interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3) interp_method = gr.Radio(choices=["Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method") diff --git a/style.css b/style.css index 4b98b84d..516ef7bf 100644 --- a/style.css +++ b/style.css @@ -496,7 +496,7 @@ input[type="range"]{ padding: 0; } -#refresh_sd_model_checkpoint, #refresh_sd_vae, #refresh_sd_hypernetwork, #refresh_train_hypernetwork_name, #refresh_train_embedding_name, #refresh_localization{ +#refresh_sd_model_checkpoint, #refresh_sd_vae, #refresh_sd_hypernetwork, #refresh_train_hypernetwork_name, #refresh_train_embedding_name, #refresh_localization, #refresh_checkpoint_A, #refresh_checkpoint_B, #refresh_checkpoint_C{ max-width: 2.5em; min-width: 2.5em; height: 2.4em; From 76f256fe8f844641f4e9b41f35c7dd2cba5090d6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 11:08:39 +0300 Subject: [PATCH 058/461] Bump gradio version #YOLO --- modules/ui_tempdir.py | 3 ++- requirements.txt | 2 +- requirements_versions.txt | 2 +- 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 07210d14..8d519310 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -15,7 +15,8 @@ Savedfile = namedtuple("Savedfile", ["name"]) def save_pil_to_file(pil_image, dir=None): already_saved_as = getattr(pil_image, 'already_saved_as', None) if already_saved_as and os.path.isfile(already_saved_as): - shared.demo.temp_dirs = shared.demo.temp_dirs | {os.path.abspath(os.path.dirname(already_saved_as))} + shared.demo.temp_file_sets[0] = shared.demo.temp_file_sets[0] | {os.path.abspath(already_saved_as)} + file_obj = Savedfile(already_saved_as) return file_obj diff --git a/requirements.txt b/requirements.txt index 5bed694e..e2c3876b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,7 +5,7 @@ fairscale==0.4.4 fonts font-roboto gfpgan -gradio==3.9 +gradio==3.15.0 invisible-watermark numpy omegaconf diff --git a/requirements_versions.txt b/requirements_versions.txt index c126c8c4..836523ba 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -3,7 +3,7 @@ transformers==4.19.2 accelerate==0.12.0 basicsr==1.4.2 gfpgan==1.3.8 -gradio==3.9 +gradio==3.15.0 numpy==1.23.3 Pillow==9.2.0 realesrgan==0.3.0 From b46b97fa297b3a4a654da77cf98a775a2bcab4c7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 11:38:17 +0300 Subject: [PATCH 059/461] more fixes for gradio update --- modules/generation_parameters_copypaste.py | 2 +- modules/ui_tempdir.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index fbd91300..54b3372d 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -38,7 +38,7 @@ def quote(text): def image_from_url_text(filedata): if type(filedata) == dict and filedata["is_file"]: filename = filedata["name"] - is_in_right_dir = any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in shared.demo.temp_dirs) + is_in_right_dir = any([filename in fileset for fileset in shared.demo.temp_file_sets]) assert is_in_right_dir, 'trying to open image file outside of allowed directories' return Image.open(filename) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 8d519310..363d449d 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -45,7 +45,7 @@ def on_tmpdir_changed(): os.makedirs(shared.opts.temp_dir, exist_ok=True) - shared.demo.temp_dirs = shared.demo.temp_dirs | {os.path.abspath(shared.opts.temp_dir)} + shared.demo.temp_file_sets[0] = shared.demo.temp_file_sets[0] | {os.path.abspath(shared.opts.temp_dir)} def cleanup_tmpdr(): From e5f1a37cb9b537d95b2df47c96b4a4f7242fd294 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 13:08:40 +0300 Subject: [PATCH 060/461] make refresh buttons look more nice --- modules/ui.py | 6 +++--- modules/ui_components.py | 18 ++++++++++++++++++ style.css | 28 +++++++++++++++++++++------- 3 files changed, 42 insertions(+), 10 deletions(-) create mode 100644 modules/ui_components.py diff --git a/modules/ui.py b/modules/ui.py index 4cc2ce4f..32fa80d1 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -19,7 +19,7 @@ import numpy as np from PIL import Image, PngImagePlugin from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru +from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, ui_components from modules.paths import script_path from modules.shared import opts, cmd_opts, restricted_opts @@ -532,7 +532,7 @@ def create_refresh_button(refresh_component, refresh_method, refreshed_args, ele return gr.update(**(args or {})) - refresh_button = gr.Button(value=refresh_symbol, elem_id=elem_id) + refresh_button = ui_components.ToolButton(value=refresh_symbol, elem_id=elem_id) refresh_button.click( fn=refresh, inputs=[], @@ -1476,7 +1476,7 @@ def create_ui(): res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key) else: - with gr.Row(variant="compact"): + with ui_components.FormRow(): res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key) else: diff --git a/modules/ui_components.py b/modules/ui_components.py new file mode 100644 index 00000000..d0519d2d --- /dev/null +++ b/modules/ui_components.py @@ -0,0 +1,18 @@ +import gradio as gr + + +class ToolButton(gr.Button, gr.components.FormComponent): + """Small button with single emoji as text, fits inside gradio forms""" + + def __init__(self, **kwargs): + super().__init__(variant="tool", **kwargs) + + def get_block_name(self): + return "button" + + +class FormRow(gr.Row, gr.components.FormComponent): + """Same as gr.Row but fits inside gradio forms""" + + def get_block_name(self): + return "row" diff --git a/style.css b/style.css index 516ef7bf..f168571e 100644 --- a/style.css +++ b/style.css @@ -496,13 +496,6 @@ input[type="range"]{ padding: 0; } -#refresh_sd_model_checkpoint, #refresh_sd_vae, #refresh_sd_hypernetwork, #refresh_train_hypernetwork_name, #refresh_train_embedding_name, #refresh_localization, #refresh_checkpoint_A, #refresh_checkpoint_B, #refresh_checkpoint_C{ - max-width: 2.5em; - min-width: 2.5em; - height: 2.4em; -} - - canvas[key="mask"] { z-index: 12 !important; filter: invert(); @@ -569,6 +562,27 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h padding-right: 0.5em; } +.gr-form{ + background-color: white; +} + +.dark .gr-form{ + background-color: rgb(31 41 55 / var(--tw-bg-opacity)); +} + +.gr-button-tool{ + max-width: 2.5em; + min-width: 2.5em !important; + height: 2.4em; + margin: 0.55em 0; +} + +#quicksettings .gr-button-tool{ + margin: 0; +} + + + /* The following handles localization for right-to-left (RTL) languages like Arabic. The rtl media type will only be activated by the logic in javascript/localization.js. If you change anything above, you need to make sure it is RTL compliant by just running From 5f12b23b8bb7fca585a3a1e844881d06f171364e Mon Sep 17 00:00:00 2001 From: AlUlkesh <99896447+AlUlkesh@users.noreply.github.com> Date: Wed, 28 Dec 2022 22:18:19 +0100 Subject: [PATCH 061/461] Adding image numbers on grids New grid option in settings enables adding of image numbers on grids. This makes identifying the images, especially in larger batches, much easier. Revert "Adding image numbers on grids" This reverts commit 3530c283b4b1d3a3cab40efbffe4cf2697938b6f. Implements Callback for image grid loop Necessary to make "Add image's number to its picture in the grid" extension possible. --- modules/images.py | 1 + modules/script_callbacks.py | 20 ++++++++++++++++++++ 2 files changed, 21 insertions(+) diff --git a/modules/images.py b/modules/images.py index 31d4528d..5afd3891 100644 --- a/modules/images.py +++ b/modules/images.py @@ -43,6 +43,7 @@ def image_grid(imgs, batch_size=1, rows=None): grid = Image.new('RGB', size=(cols * w, rows * h), color='black') for i, img in enumerate(imgs): + script_callbacks.image_grid_loop_callback(img) grid.paste(img, box=(i % cols * w, i // cols * h)) return grid diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 8e22f875..0c854407 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -51,6 +51,11 @@ class UiTrainTabParams: self.txt2img_preview_params = txt2img_preview_params +class ImageGridLoopParams: + def __init__(self, img): + self.img = img + + ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"]) callback_map = dict( callbacks_app_started=[], @@ -63,6 +68,7 @@ callback_map = dict( callbacks_cfg_denoiser=[], callbacks_before_component=[], callbacks_after_component=[], + callbacks_image_grid_loop=[], ) @@ -154,6 +160,12 @@ def after_component_callback(component, **kwargs): except Exception: report_exception(c, 'after_component_callback') +def image_grid_loop_callback(component, **kwargs): + for c in callback_map['callbacks_image_grid_loop']: + try: + c.callback(component, **kwargs) + except Exception: + report_exception(c, 'image_grid_loop') def add_callback(callbacks, fun): stack = [x for x in inspect.stack() if x.filename != __file__] @@ -255,3 +267,11 @@ def on_before_component(callback): def on_after_component(callback): """register a function to be called after a component is created. See on_before_component for more.""" add_callback(callback_map['callbacks_after_component'], callback) + + +def on_image_grid_loop(callback): + """register a function to be called inside the image grid loop. + The callback is called with one argument: + - params: ImageGridLoopParams - parameters to be used inside the image grid loop. + """ + add_callback(callback_map['callbacks_image_grid_loop'], callback) From 524d532b387732d4d32f237e792c7f201a934400 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 14:07:40 +0300 Subject: [PATCH 062/461] moved roll artist to built-in extensions --- .../roll-artist/scripts/roll-artist.py | 50 +++++++++++++++++++ modules/ui.py | 37 ++------------ 2 files changed, 53 insertions(+), 34 deletions(-) create mode 100644 extensions-builtin/roll-artist/scripts/roll-artist.py diff --git a/extensions-builtin/roll-artist/scripts/roll-artist.py b/extensions-builtin/roll-artist/scripts/roll-artist.py new file mode 100644 index 00000000..c3bc1fd0 --- /dev/null +++ b/extensions-builtin/roll-artist/scripts/roll-artist.py @@ -0,0 +1,50 @@ +import random + +from modules import script_callbacks, shared +import gradio as gr + +art_symbol = '\U0001f3a8' # 🎨 +global_prompt = None +related_ids = {"txt2img_prompt", "txt2img_clear_prompt", "img2img_prompt", "img2img_clear_prompt" } + + +def roll_artist(prompt): + allowed_cats = set([x for x in shared.artist_db.categories() if len(shared.opts.random_artist_categories)==0 or x in shared.opts.random_artist_categories]) + artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats]) + + return prompt + ", " + artist.name if prompt != '' else artist.name + + +def add_roll_button(prompt): + roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0) + + roll.click( + fn=roll_artist, + _js="update_txt2img_tokens", + inputs=[ + prompt, + ], + outputs=[ + prompt, + ] + ) + + +def after_component(component, **kwargs): + global global_prompt + + elem_id = kwargs.get('elem_id', None) + if elem_id not in related_ids: + return + + if elem_id == "txt2img_prompt": + global_prompt = component + elif elem_id == "txt2img_clear_prompt": + add_roll_button(global_prompt) + elif elem_id == "img2img_prompt": + global_prompt = component + elif elem_id == "img2img_clear_prompt": + add_roll_button(global_prompt) + + +script_callbacks.on_after_component(after_component) diff --git a/modules/ui.py b/modules/ui.py index 32fa80d1..27da2c2c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -80,7 +80,6 @@ css_hide_progressbar = """ # Important that they exactly match script.js for tooltip to work. random_symbol = '\U0001f3b2\ufe0f' # 🎲️ reuse_symbol = '\u267b\ufe0f' # ♻️ -art_symbol = '\U0001f3a8' # 🎨 paste_symbol = '\u2199\ufe0f' # ↙ folder_symbol = '\U0001f4c2' # 📂 refresh_symbol = '\U0001f504' # 🔄 @@ -234,13 +233,6 @@ def check_progress_call_initial(id_part): return check_progress_call(id_part) -def roll_artist(prompt): - allowed_cats = set([x for x in shared.artist_db.categories() if len(opts.random_artist_categories)==0 or x in opts.random_artist_categories]) - artist = random.choice([x for x in shared.artist_db.artists if x.category in allowed_cats]) - - return prompt + ", " + artist.name if prompt != '' else artist.name - - def visit(x, func, path=""): if hasattr(x, 'children'): for c in x.children: @@ -403,7 +395,6 @@ def create_toprow(is_img2img): ) with gr.Column(scale=1, elem_id="roll_col"): - roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0) paste = gr.Button(value=paste_symbol, elem_id="paste") save_style = gr.Button(value=save_style_symbol, elem_id="style_create") prompt_style_apply = gr.Button(value=apply_style_symbol, elem_id="style_apply") @@ -452,7 +443,7 @@ def create_toprow(is_img2img): prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys()))) prompt_style2.save_to_config = True - return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button + return prompt, prompt_style, negative_prompt, prompt_style2, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button def setup_progressbar(progressbar, preview, id_part, textinfo=None): @@ -668,7 +659,7 @@ def create_ui(): modules.scripts.scripts_txt2img.initialize_scripts(is_img2img=False) with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _,txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, token_counter, token_button = create_toprow(is_img2img=False) + txt2img_prompt, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _,txt2img_prompt_style_apply, txt2img_save_style, txt2img_paste, token_counter, token_button = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) txt_prompt_img = gr.File(label="", elem_id="txt2img_prompt_image", file_count="single", type="bytes", visible=False) @@ -771,16 +762,6 @@ def create_ui(): outputs=[hr_options], ) - roll.click( - fn=roll_artist, - _js="update_txt2img_tokens", - inputs=[ - txt2img_prompt, - ], - outputs=[ - txt2img_prompt, - ] - ) txt2img_paste_fields = [ (txt2img_prompt, "Prompt"), @@ -823,7 +804,7 @@ def create_ui(): modules.scripts.scripts_img2img.initialize_scripts(is_img2img=True) with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste,token_counter, token_button = create_toprow(is_img2img=True) + img2img_prompt, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, img2img_paste,token_counter, token_button = create_toprow(is_img2img=True) with gr.Row(elem_id='img2img_progress_row'): img2img_prompt_img = gr.File(label="", elem_id="img2img_prompt_image", file_count="single", type="bytes", visible=False) @@ -999,18 +980,6 @@ def create_ui(): outputs=[img2img_prompt], ) - - roll.click( - fn=roll_artist, - _js="update_img2img_tokens", - inputs=[ - img2img_prompt, - ], - outputs=[ - img2img_prompt, - ] - ) - prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)] style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] From e672cfb07418a1a3130d3bf21c14a0d3819f81fb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 1 Jan 2023 18:37:37 +0300 Subject: [PATCH 063/461] rework of callback for #6094 --- modules/images.py | 12 +++++++----- modules/script_callbacks.py | 26 +++++++++++++++----------- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/modules/images.py b/modules/images.py index 719aaf3b..f84fd485 100644 --- a/modules/images.py +++ b/modules/images.py @@ -39,12 +39,14 @@ def image_grid(imgs, batch_size=1, rows=None): cols = math.ceil(len(imgs) / rows) - w, h = imgs[0].size - grid = Image.new('RGB', size=(cols * w, rows * h), color='black') + params = script_callbacks.ImageGridLoopParams(imgs, cols, rows) + script_callbacks.image_grid_callback(params) - for i, img in enumerate(imgs): - script_callbacks.image_grid_loop_callback(img) - grid.paste(img, box=(i % cols * w, i // cols * h)) + w, h = imgs[0].size + grid = Image.new('RGB', size=(params.cols * w, params.rows * h), color='black') + + for i, img in enumerate(params.imgs): + grid.paste(img, box=(i % params.cols * w, i // params.cols * h)) return grid diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index 0c854407..de69fd9f 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -52,8 +52,10 @@ class UiTrainTabParams: class ImageGridLoopParams: - def __init__(self, img): - self.img = img + def __init__(self, imgs, cols, rows): + self.imgs = imgs + self.cols = cols + self.rows = rows ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"]) @@ -68,7 +70,7 @@ callback_map = dict( callbacks_cfg_denoiser=[], callbacks_before_component=[], callbacks_after_component=[], - callbacks_image_grid_loop=[], + callbacks_image_grid=[], ) @@ -160,12 +162,14 @@ def after_component_callback(component, **kwargs): except Exception: report_exception(c, 'after_component_callback') -def image_grid_loop_callback(component, **kwargs): - for c in callback_map['callbacks_image_grid_loop']: + +def image_grid_callback(params: ImageGridLoopParams): + for c in callback_map['callbacks_image_grid']: try: - c.callback(component, **kwargs) + c.callback(params) except Exception: - report_exception(c, 'image_grid_loop') + report_exception(c, 'image_grid') + def add_callback(callbacks, fun): stack = [x for x in inspect.stack() if x.filename != __file__] @@ -269,9 +273,9 @@ def on_after_component(callback): add_callback(callback_map['callbacks_after_component'], callback) -def on_image_grid_loop(callback): - """register a function to be called inside the image grid loop. +def on_image_grid(callback): + """register a function to be called before making an image grid. The callback is called with one argument: - - params: ImageGridLoopParams - parameters to be used inside the image grid loop. + - params: ImageGridLoopParams - parameters to be used for grid creation. Can be modified. """ - add_callback(callback_map['callbacks_image_grid_loop'], callback) + add_callback(callback_map['callbacks_image_grid'], callback) From a005fccddd5a37c57f1afe5234660b59b9a41508 Mon Sep 17 00:00:00 2001 From: me <25877290+Kryptortio@users.noreply.github.com> Date: Sun, 1 Jan 2023 14:51:12 +0100 Subject: [PATCH 064/461] Add a lot more elem_id/HTML id, modified some that were duplicates for seed section --- modules/generation_parameters_copypaste.py | 2 +- modules/ui.py | 252 ++++++++++----------- style.css | 12 +- 3 files changed, 133 insertions(+), 133 deletions(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 54b3372d..8e7f0df0 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -93,7 +93,7 @@ def integrate_settings_paste_fields(component_dict): def create_buttons(tabs_list): buttons = {} for tab in tabs_list: - buttons[tab] = gr.Button(f"Send to {tab}") + buttons[tab] = gr.Button(f"Send to {tab}", elem_id=f"{tab}_tab") return buttons diff --git a/modules/ui.py b/modules/ui.py index 27da2c2c..7070ea15 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -272,17 +272,17 @@ def interrogate_deepbooru(image): return gr_show(True) if prompt is None else prompt -def create_seed_inputs(): +def create_seed_inputs(target_interface): with gr.Row(): with gr.Box(): - with gr.Row(elem_id='seed_row'): - seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1) + with gr.Row(elem_id=target_interface + '_seed_row'): + seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed') seed.style(container=False) - random_seed = gr.Button(random_symbol, elem_id='random_seed') - reuse_seed = gr.Button(reuse_symbol, elem_id='reuse_seed') + random_seed = gr.Button(random_symbol, elem_id=target_interface + '_random_seed') + reuse_seed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_seed') - with gr.Box(elem_id='subseed_show_box'): - seed_checkbox = gr.Checkbox(label='Extra', elem_id='subseed_show', value=False) + with gr.Box(elem_id=target_interface + '_subseed_show_box'): + seed_checkbox = gr.Checkbox(label='Extra', elem_id=target_interface + '_subseed_show', value=False) # Components to show/hide based on the 'Extra' checkbox seed_extras = [] @@ -290,17 +290,17 @@ def create_seed_inputs(): with gr.Row(visible=False) as seed_extra_row_1: seed_extras.append(seed_extra_row_1) with gr.Box(): - with gr.Row(elem_id='subseed_row'): - subseed = gr.Number(label='Variation seed', value=-1) + with gr.Row(elem_id=target_interface + '_subseed_row'): + subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed') subseed.style(container=False) - random_subseed = gr.Button(random_symbol, elem_id='random_subseed') - reuse_subseed = gr.Button(reuse_symbol, elem_id='reuse_subseed') - subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01) + random_subseed = gr.Button(random_symbol, elem_id=target_interface + '_random_subseed') + reuse_subseed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_subseed') + subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=target_interface + '_subseed_strength') with gr.Row(visible=False) as seed_extra_row_2: seed_extras.append(seed_extra_row_2) - seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0) - seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0) + seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=target_interface + '_seed_resize_from_w') + seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=target_interface + '_seed_resize_from_h') random_seed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[seed]) random_subseed.click(fn=lambda: -1, show_progress=False, inputs=[], outputs=[subseed]) @@ -678,28 +678,28 @@ def create_ui(): steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") with gr.Group(): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512) - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512) + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") with gr.Row(): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1) - tiling = gr.Checkbox(label='Tiling', value=False) - enable_hr = gr.Checkbox(label='Highres. fix', value=False) + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") + enable_hr = gr.Checkbox(label='Highres. fix', value=False, elem_id="txt2img_enable_hr") with gr.Row(visible=False) as hr_options: - firstphase_width = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass width", value=0) - firstphase_height = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass height", value=0) - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7) + firstphase_width = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass width", value=0, elem_id="txt2img_firstphase_width") + firstphase_height = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass height", value=0, elem_id="txt2img_firstphase_height") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") with gr.Row(equal_height=True): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1) - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1) + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0) + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs() + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') - with gr.Group(): + with gr.Group(elem_id="txt2img_script_container"): custom_inputs = modules.scripts.scripts_txt2img.setup_ui() txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) @@ -821,10 +821,10 @@ def create_ui(): with gr.Column(variant='panel', elem_id="img2img_settings"): with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode: - with gr.TabItem('img2img', id='img2img'): + with gr.TabItem('img2img', id='img2img', elem_id="img2img_img2img_tab"): init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_img2img_tool, image_mode="RGBA").style(height=480) - with gr.TabItem('Inpaint', id='inpaint'): + with gr.TabItem('Inpaint', id='inpaint', elem_id="img2img_inpaint_tab"): init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_inpaint_tool, image_mode="RGBA").style(height=480) init_img_with_mask_orig = gr.State(None) @@ -843,24 +843,24 @@ def create_ui(): init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask") with gr.Row(): - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4) - mask_alpha = gr.Slider(label="Mask transparency", interactive=use_color_sketch, visible=use_color_sketch) + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") + mask_alpha = gr.Slider(label="Mask transparency", interactive=use_color_sketch, visible=use_color_sketch, elem_id="img2img_mask_alpha") with gr.Row(): mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode") - inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index") + inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index") + inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") with gr.Row(): - inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False) - inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32) + inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False, elem_id="img2img_inpaint_full_res") + inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") - with gr.TabItem('Batch img2img', id='batch'): + with gr.TabItem('Batch img2img', id='batch', elem_id="img2img_batch_tab"): hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' gr.HTML(f"

Process images in a directory on the same machine where the server is running.
Use an empty output directory to save pictures normally instead of writing to the output directory.{hidden}

") - img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs) - img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs) + img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") + img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") with gr.Row(): resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") @@ -872,20 +872,20 @@ def create_ui(): height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") with gr.Row(): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1) - tiling = gr.Checkbox(label='Tiling', value=False) + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") with gr.Row(): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1) - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1) + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") with gr.Group(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0) - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75) + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs() + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') - with gr.Group(): + with gr.Group(elem_id="img2img_script_container"): custom_inputs = modules.scripts.scripts_img2img.setup_ui() img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) @@ -1032,45 +1032,45 @@ def create_ui(): with gr.Row().style(equal_height=False): with gr.Column(variant='panel'): with gr.Tabs(elem_id="mode_extras"): - with gr.TabItem('Single Image'): - extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil") + with gr.TabItem('Single Image', elem_id="extras_single_tab"): + extras_image = gr.Image(label="Source", source="upload", interactive=True, type="pil", elem_id="extras_image") - with gr.TabItem('Batch Process'): - image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file") + with gr.TabItem('Batch Process', elem_id="extras_batch_process_tab"): + image_batch = gr.File(label="Batch Process", file_count="multiple", interactive=True, type="file", elem_id="extras_image_batch") - with gr.TabItem('Batch from Directory'): - extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.") - extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.") - show_extras_results = gr.Checkbox(label='Show result images', value=True) + with gr.TabItem('Batch from Directory', elem_id="extras_batch_directory_tab"): + extras_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, placeholder="A directory on the same machine where the server is running.", elem_id="extras_batch_input_dir") + extras_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, placeholder="Leave blank to save images to the default path.", elem_id="extras_batch_output_dir") + show_extras_results = gr.Checkbox(label='Show result images', value=True, elem_id="extras_show_extras_results") submit = gr.Button('Generate', elem_id="extras_generate", variant='primary') with gr.Tabs(elem_id="extras_resize_mode"): - with gr.TabItem('Scale by'): - upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4) - with gr.TabItem('Scale to'): + with gr.TabItem('Scale by', elem_id="extras_scale_by_tab"): + upscaling_resize = gr.Slider(minimum=1.0, maximum=8.0, step=0.05, label="Resize", value=4, elem_id="extras_upscaling_resize") + with gr.TabItem('Scale to', elem_id="extras_scale_to_tab"): with gr.Group(): with gr.Row(): - upscaling_resize_w = gr.Number(label="Width", value=512, precision=0) - upscaling_resize_h = gr.Number(label="Height", value=512, precision=0) - upscaling_crop = gr.Checkbox(label='Crop to fit', value=True) + upscaling_resize_w = gr.Number(label="Width", value=512, precision=0, elem_id="extras_upscaling_resize_w") + upscaling_resize_h = gr.Number(label="Height", value=512, precision=0, elem_id="extras_upscaling_resize_h") + upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop") with gr.Group(): extras_upscaler_1 = gr.Radio(label='Upscaler 1', elem_id="extras_upscaler_1", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index") with gr.Group(): extras_upscaler_2 = gr.Radio(label='Upscaler 2', elem_id="extras_upscaler_2", choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index") - extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1) + extras_upscaler_2_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="Upscaler 2 visibility", value=1, elem_id="extras_upscaler_2_visibility") with gr.Group(): - gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan) + gfpgan_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="GFPGAN visibility", value=0, interactive=modules.gfpgan_model.have_gfpgan, elem_id="extras_gfpgan_visibility") with gr.Group(): - codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer) - codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer) + codeformer_visibility = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer visibility", value=0, interactive=modules.codeformer_model.have_codeformer, elem_id="extras_codeformer_visibility") + codeformer_weight = gr.Slider(minimum=0.0, maximum=1.0, step=0.001, label="CodeFormer weight (0 = maximum effect, 1 = minimum effect)", value=0, interactive=modules.codeformer_model.have_codeformer, elem_id="extras_codeformer_weight") with gr.Group(): - upscale_before_face_fix = gr.Checkbox(label='Upscale Before Restoring Faces', value=False) + upscale_before_face_fix = gr.Checkbox(label='Upscale Before Restoring Faces', value=False, elem_id="extras_upscale_before_face_fix") result_images, html_info_x, html_info, html_log = create_output_panel("extras", opts.outdir_extras_samples) @@ -1117,7 +1117,7 @@ def create_ui(): with gr.Column(variant='panel'): html = gr.HTML() - generation_info = gr.Textbox(visible=False) + generation_info = gr.Textbox(visible=False, elem_id="pnginfo_generation_info") html2 = gr.HTML() with gr.Row(): buttons = parameters_copypaste.create_buttons(["txt2img", "img2img", "inpaint", "extras"]) @@ -1144,13 +1144,13 @@ def create_ui(): tertiary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_tertiary_model_name", label="Tertiary model (C)") create_refresh_button(tertiary_model_name, modules.sd_models.list_models, lambda: {"choices": modules.sd_models.checkpoint_tiles()}, "refresh_checkpoint_C") - custom_name = gr.Textbox(label="Custom Name (Optional)") - interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3) - interp_method = gr.Radio(choices=["Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method") + custom_name = gr.Textbox(label="Custom Name (Optional)", elem_id="modelmerger_custom_name") + interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Multiplier (M) - set to 0 to get model A', value=0.3, elem_id="modelmerger_interp_amount") + interp_method = gr.Radio(choices=["Weighted sum", "Add difference"], value="Weighted sum", label="Interpolation Method", elem_id="modelmerger_interp_method") with gr.Row(): - checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="ckpt", label="Checkpoint format") - save_as_half = gr.Checkbox(value=False, label="Save as float16") + checkpoint_format = gr.Radio(choices=["ckpt", "safetensors"], value="ckpt", label="Checkpoint format", elem_id="modelmerger_checkpoint_format") + save_as_half = gr.Checkbox(value=False, label="Save as float16", elem_id="modelmerger_save_as_half") modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary') @@ -1165,58 +1165,58 @@ def create_ui(): with gr.Tabs(elem_id="train_tabs"): with gr.Tab(label="Create embedding"): - new_embedding_name = gr.Textbox(label="Name") - initialization_text = gr.Textbox(label="Initialization text", value="*") - nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1) - overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding") + new_embedding_name = gr.Textbox(label="Name", elem_id="train_new_embedding_name") + initialization_text = gr.Textbox(label="Initialization text", value="*", elem_id="train_initialization_text") + nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1, elem_id="train_nvpt") + overwrite_old_embedding = gr.Checkbox(value=False, label="Overwrite Old Embedding", elem_id="train_overwrite_old_embedding") with gr.Row(): with gr.Column(scale=3): gr.HTML(value="") with gr.Column(): - create_embedding = gr.Button(value="Create embedding", variant='primary') + create_embedding = gr.Button(value="Create embedding", variant='primary', elem_id="train_create_embedding") with gr.Tab(label="Create hypernetwork"): - new_hypernetwork_name = gr.Textbox(label="Name") - new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"]) - new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'") - new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys) - new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"]) - new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization") - new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout") - overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork") + new_hypernetwork_name = gr.Textbox(label="Name", elem_id="train_new_hypernetwork_name") + new_hypernetwork_sizes = gr.CheckboxGroup(label="Modules", value=["768", "320", "640", "1280"], choices=["768", "1024", "320", "640", "1280"], elem_id="train_new_hypernetwork_sizes") + new_hypernetwork_layer_structure = gr.Textbox("1, 2, 1", label="Enter hypernetwork layer structure", placeholder="1st and last digit must be 1. ex:'1, 2, 1'", elem_id="train_new_hypernetwork_layer_structure") + new_hypernetwork_activation_func = gr.Dropdown(value="linear", label="Select activation function of hypernetwork. Recommended : Swish / Linear(none)", choices=modules.hypernetworks.ui.keys, elem_id="train_new_hypernetwork_activation_func") + new_hypernetwork_initialization_option = gr.Dropdown(value = "Normal", label="Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise", choices=["Normal", "KaimingUniform", "KaimingNormal", "XavierUniform", "XavierNormal"], elem_id="train_new_hypernetwork_initialization_option") + new_hypernetwork_add_layer_norm = gr.Checkbox(label="Add layer normalization", elem_id="train_new_hypernetwork_add_layer_norm") + new_hypernetwork_use_dropout = gr.Checkbox(label="Use dropout", elem_id="train_new_hypernetwork_use_dropout") + overwrite_old_hypernetwork = gr.Checkbox(value=False, label="Overwrite Old Hypernetwork", elem_id="train_overwrite_old_hypernetwork") with gr.Row(): with gr.Column(scale=3): gr.HTML(value="") with gr.Column(): - create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary') + create_hypernetwork = gr.Button(value="Create hypernetwork", variant='primary', elem_id="train_create_hypernetwork") with gr.Tab(label="Preprocess images"): - process_src = gr.Textbox(label='Source directory') - process_dst = gr.Textbox(label='Destination directory') - process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512) - process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512) - preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"]) + process_src = gr.Textbox(label='Source directory', elem_id="train_process_src") + process_dst = gr.Textbox(label='Destination directory', elem_id="train_process_dst") + process_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_process_width") + process_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_process_height") + preprocess_txt_action = gr.Dropdown(label='Existing Caption txt Action', value="ignore", choices=["ignore", "copy", "prepend", "append"], elem_id="train_preprocess_txt_action") with gr.Row(): - process_flip = gr.Checkbox(label='Create flipped copies') - process_split = gr.Checkbox(label='Split oversized images') - process_focal_crop = gr.Checkbox(label='Auto focal point crop') - process_caption = gr.Checkbox(label='Use BLIP for caption') - process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True) + process_flip = gr.Checkbox(label='Create flipped copies', elem_id="train_process_flip") + process_split = gr.Checkbox(label='Split oversized images', elem_id="train_process_split") + process_focal_crop = gr.Checkbox(label='Auto focal point crop', elem_id="train_process_focal_crop") + process_caption = gr.Checkbox(label='Use BLIP for caption', elem_id="train_process_caption") + process_caption_deepbooru = gr.Checkbox(label='Use deepbooru for caption', visible=True, elem_id="train_process_caption_deepbooru") with gr.Row(visible=False) as process_split_extra_row: - process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05) - process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05) + process_split_threshold = gr.Slider(label='Split image threshold', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_split_threshold") + process_overlap_ratio = gr.Slider(label='Split image overlap ratio', value=0.2, minimum=0.0, maximum=0.9, step=0.05, elem_id="train_process_overlap_ratio") with gr.Row(visible=False) as process_focal_crop_row: - process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05) - process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05) - process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05) - process_focal_crop_debug = gr.Checkbox(label='Create debug image') + process_focal_crop_face_weight = gr.Slider(label='Focal point face weight', value=0.9, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_face_weight") + process_focal_crop_entropy_weight = gr.Slider(label='Focal point entropy weight', value=0.15, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_entropy_weight") + process_focal_crop_edges_weight = gr.Slider(label='Focal point edges weight', value=0.5, minimum=0.0, maximum=1.0, step=0.05, elem_id="train_process_focal_crop_edges_weight") + process_focal_crop_debug = gr.Checkbox(label='Create debug image', elem_id="train_process_focal_crop_debug") with gr.Row(): with gr.Column(scale=3): @@ -1224,8 +1224,8 @@ def create_ui(): with gr.Column(): with gr.Row(): - interrupt_preprocessing = gr.Button("Interrupt") - run_preprocess = gr.Button(value="Preprocess", variant='primary') + interrupt_preprocessing = gr.Button("Interrupt", elem_id="train_interrupt_preprocessing") + run_preprocess = gr.Button(value="Preprocess", variant='primary', elem_id="train_run_preprocess") process_split.change( fn=lambda show: gr_show(show), @@ -1248,31 +1248,31 @@ def create_ui(): train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=[x for x in shared.hypernetworks.keys()]) create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted([x for x in shared.hypernetworks.keys()])}, "refresh_train_hypernetwork_name") with gr.Row(): - embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005") - hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001") + embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") + hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") - batch_size = gr.Number(label='Batch size', value=1, precision=0) - gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0) - dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images") - log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion") - template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt")) - training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512) - training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512) - steps = gr.Number(label='Max steps', value=100000, precision=0) - create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0) - save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0) - save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True) - preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False) + batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") + gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") + dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") + log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") + template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"), elem_id="train_template_file") + training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") + training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") + steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") + create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") + save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") + save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") + preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") with gr.Row(): - shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False) - tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0) + shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") + tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") with gr.Row(): - latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random']) + latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") with gr.Row(): - interrupt_training = gr.Button(value="Interrupt") - train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary') - train_embedding = gr.Button(value="Train Embedding", variant='primary') + interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") + train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") + train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") params = script_callbacks.UiTrainTabParams(txt2img_preview_params) @@ -1490,7 +1490,7 @@ def create_ui(): return gr.update(value=value), opts.dumpjson() with gr.Blocks(analytics_enabled=False) as settings_interface: - settings_submit = gr.Button(value="Apply settings", variant='primary') + settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit") result = gr.HTML() settings_cols = 3 @@ -1541,8 +1541,8 @@ def create_ui(): download_localization = gr.Button(value='Download localization template', elem_id="download_localization") with gr.Row(): - reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary') - restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary') + reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies") + restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary', elem_id="settings_restart_gradio") request_notifications.click( fn=lambda: None, diff --git a/style.css b/style.css index f168571e..924d4ae7 100644 --- a/style.css +++ b/style.css @@ -73,7 +73,7 @@ margin-right: auto; } -#random_seed, #random_subseed, #reuse_seed, #reuse_subseed, #open_folder{ +[id$=_random_seed], [id$=_random_subseed], [id$=_reuse_seed], [id$=_reuse_subseed], #open_folder{ min-width: auto; flex-grow: 0; padding-left: 0.25em; @@ -84,27 +84,27 @@ display: none; } -#seed_row, #subseed_row{ +[id$=_seed_row], [id$=_subseed_row]{ gap: 0.5rem; } -#subseed_show_box{ +[id$=_subseed_show_box]{ min-width: auto; flex-grow: 0; } -#subseed_show_box > div{ +[id$=_subseed_show_box] > div{ border: 0; height: 100%; } -#subseed_show{ +[id$=_subseed_show]{ min-width: auto; flex-grow: 0; padding: 0; } -#subseed_show label{ +[id$=_subseed_show] label{ height: 100%; } From 311354c0bb8930ea939d6aa6b3edd50c69301320 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 2 Jan 2023 00:38:09 +0300 Subject: [PATCH 065/461] fix the issue with training on SD2.0 --- modules/sd_models.py | 2 ++ modules/textual_inversion/textual_inversion.py | 3 +-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index ebd4dff7..bff8d6c9 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -228,6 +228,8 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"): model.sd_model_checkpoint = checkpoint_file model.sd_checkpoint_info = checkpoint_info + model.logvar = model.logvar.to(devices.device) # fix for training + sd_vae.delete_base_vae() sd_vae.clear_loaded_vae() vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 66f40367..1e5722e7 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -282,7 +282,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ return embedding, filename scheduler = LearnRateScheduler(learn_rate, steps, initial_step) - # dataset loading may take a while, so input validations and early returns should be done before this + # dataset loading may take a while, so input validations and early returns should be done before this shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." old_parallel_processing_allowed = shared.parallel_processing_allowed @@ -310,7 +310,6 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ loss_step = 0 _loss_step = 0 #internal - last_saved_file = "" last_saved_image = "" forced_filename = "" From b5819d9bf1794071139c640b5f1e72c84a0e051a Mon Sep 17 00:00:00 2001 From: Philpax Date: Mon, 2 Jan 2023 10:17:33 +1100 Subject: [PATCH 066/461] feat(api): add /sdapi/v1/embeddings --- modules/api/api.py | 8 ++++++++ modules/api/models.py | 3 +++ 2 files changed, 11 insertions(+) diff --git a/modules/api/api.py b/modules/api/api.py index 11daff0d..30bf3dac 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -100,6 +100,7 @@ class Api: self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=List[PromptStyleItem]) self.add_api_route("/sdapi/v1/artist-categories", self.get_artists_categories, methods=["GET"], response_model=List[str]) self.add_api_route("/sdapi/v1/artists", self.get_artists, methods=["GET"], response_model=List[ArtistItem]) + self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=EmbeddingsResponse) self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"]) self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=CreateResponse) self.add_api_route("/sdapi/v1/create/hypernetwork", self.create_hypernetwork, methods=["POST"], response_model=CreateResponse) @@ -327,6 +328,13 @@ class Api: def get_artists(self): return [{"name":x[0], "score":x[1], "category":x[2]} for x in shared.artist_db.artists] + def get_embeddings(self): + db = sd_hijack.model_hijack.embedding_db + return { + "loaded": sorted(db.word_embeddings.keys()), + "skipped": sorted(db.skipped_embeddings), + } + def refresh_checkpoints(self): shared.refresh_checkpoints() diff --git a/modules/api/models.py b/modules/api/models.py index c446ce7a..a8472dc9 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -249,3 +249,6 @@ class ArtistItem(BaseModel): score: float = Field(title="Score") category: str = Field(title="Category") +class EmbeddingsResponse(BaseModel): + loaded: List[str] = Field(title="Loaded", description="Embeddings loaded for the current model") + skipped: List[str] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") \ No newline at end of file From c65909ad16a1962129114c6251de092f49479b06 Mon Sep 17 00:00:00 2001 From: Philpax Date: Mon, 2 Jan 2023 12:21:22 +1100 Subject: [PATCH 067/461] feat(api): return more data for embeddings --- modules/api/api.py | 17 +++++++++++++++-- modules/api/models.py | 11 +++++++++-- modules/textual_inversion/textual_inversion.py | 8 ++++---- 3 files changed, 28 insertions(+), 8 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 30bf3dac..9c670f00 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -330,9 +330,22 @@ class Api: def get_embeddings(self): db = sd_hijack.model_hijack.embedding_db + + def convert_embedding(embedding): + return { + "step": embedding.step, + "sd_checkpoint": embedding.sd_checkpoint, + "sd_checkpoint_name": embedding.sd_checkpoint_name, + "shape": embedding.shape, + "vectors": embedding.vectors, + } + + def convert_embeddings(embeddings): + return {embedding.name: convert_embedding(embedding) for embedding in embeddings.values()} + return { - "loaded": sorted(db.word_embeddings.keys()), - "skipped": sorted(db.skipped_embeddings), + "loaded": convert_embeddings(db.word_embeddings), + "skipped": convert_embeddings(db.skipped_embeddings), } def refresh_checkpoints(self): diff --git a/modules/api/models.py b/modules/api/models.py index a8472dc9..4a632c68 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -249,6 +249,13 @@ class ArtistItem(BaseModel): score: float = Field(title="Score") category: str = Field(title="Category") +class EmbeddingItem(BaseModel): + step: Optional[int] = Field(title="Step", description="The number of steps that were used to train this embedding, if available") + sd_checkpoint: Optional[str] = Field(title="SD Checkpoint", description="The hash of the checkpoint this embedding was trained on, if available") + sd_checkpoint_name: Optional[str] = Field(title="SD Checkpoint Name", description="The name of the checkpoint this embedding was trained on, if available. Note that this is the name that was used by the trainer; for a stable identifier, use `sd_checkpoint` instead") + shape: int = Field(title="Shape", description="The length of each individual vector in the embedding") + vectors: int = Field(title="Vectors", description="The number of vectors in the embedding") + class EmbeddingsResponse(BaseModel): - loaded: List[str] = Field(title="Loaded", description="Embeddings loaded for the current model") - skipped: List[str] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") \ No newline at end of file + loaded: Dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model") + skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") \ No newline at end of file diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 1e5722e7..fd253477 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -59,7 +59,7 @@ class EmbeddingDatabase: def __init__(self, embeddings_dir): self.ids_lookup = {} self.word_embeddings = {} - self.skipped_embeddings = [] + self.skipped_embeddings = {} self.dir_mtime = None self.embeddings_dir = embeddings_dir self.expected_shape = -1 @@ -91,7 +91,7 @@ class EmbeddingDatabase: self.dir_mtime = mt self.ids_lookup.clear() self.word_embeddings.clear() - self.skipped_embeddings = [] + self.skipped_embeddings.clear() self.expected_shape = self.get_expected_shape() def process_file(path, filename): @@ -136,7 +136,7 @@ class EmbeddingDatabase: if self.expected_shape == -1 or self.expected_shape == embedding.shape: self.register_embedding(embedding, shared.sd_model) else: - self.skipped_embeddings.append(name) + self.skipped_embeddings[name] = embedding for fn in os.listdir(self.embeddings_dir): try: @@ -153,7 +153,7 @@ class EmbeddingDatabase: print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}") if len(self.skipped_embeddings) > 0: - print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings)}") + print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings.keys())}") def find_embedding_at_position(self, tokens, offset): token = tokens[offset] From ef27a18b6b7cb1a8eebdc9b2e88d25baf2c2414d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 2 Jan 2023 19:42:10 +0300 Subject: [PATCH 068/461] Hires fix rework --- modules/generation_parameters_copypaste.py | 32 ++++++++++ modules/images.py | 24 ++++++-- modules/processing.py | 68 +++++++++------------- modules/shared.py | 7 ++- modules/txt2img.py | 6 +- modules/ui.py | 15 +++-- scripts/xy_grid.py | 4 +- 7 files changed, 96 insertions(+), 60 deletions(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 8e7f0df0..d6fa822b 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -1,5 +1,6 @@ import base64 import io +import math import os import re from pathlib import Path @@ -164,6 +165,35 @@ def find_hypernetwork_key(hypernet_name, hypernet_hash=None): return None +def restore_old_hires_fix_params(res): + """for infotexts that specify old First pass size parameter, convert it into + width, height, and hr scale""" + + firstpass_width = res.get('First pass size-1', None) + firstpass_height = res.get('First pass size-2', None) + + if firstpass_width is None or firstpass_height is None: + return + + firstpass_width, firstpass_height = int(firstpass_width), int(firstpass_height) + width = int(res.get("Size-1", 512)) + height = int(res.get("Size-2", 512)) + + if firstpass_width == 0 or firstpass_height == 0: + # old algorithm for auto-calculating first pass size + desired_pixel_count = 512 * 512 + actual_pixel_count = width * height + scale = math.sqrt(desired_pixel_count / actual_pixel_count) + firstpass_width = math.ceil(scale * width / 64) * 64 + firstpass_height = math.ceil(scale * height / 64) * 64 + + hr_scale = width / firstpass_width if firstpass_width > 0 else height / firstpass_height + + res['Size-1'] = firstpass_width + res['Size-2'] = firstpass_height + res['Hires upscale'] = hr_scale + + def parse_generation_parameters(x: str): """parses generation parameters string, the one you see in text field under the picture in UI: ``` @@ -221,6 +251,8 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model hypernet_hash = res.get("Hypernet hash", None) res["Hypernet"] = find_hypernetwork_key(hypernet_name, hypernet_hash) + restore_old_hires_fix_params(res) + return res diff --git a/modules/images.py b/modules/images.py index f84fd485..c3a5fc8b 100644 --- a/modules/images.py +++ b/modules/images.py @@ -230,16 +230,32 @@ def draw_prompt_matrix(im, width, height, all_prompts): return draw_grid_annotations(im, width, height, hor_texts, ver_texts) -def resize_image(resize_mode, im, width, height): +def resize_image(resize_mode, im, width, height, upscaler_name=None): + """ + Resizes an image with the specified resize_mode, width, and height. + + Args: + resize_mode: The mode to use when resizing the image. + 0: Resize the image to the specified width and height. + 1: Resize the image to fill the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, cropping the excess. + 2: Resize the image to fit within the specified width and height, maintaining the aspect ratio, and then center the image within the dimensions, filling empty with data from image. + im: The image to resize. + width: The width to resize the image to. + height: The height to resize the image to. + upscaler_name: The name of the upscaler to use. If not provided, defaults to opts.upscaler_for_img2img. + """ + + upscaler_name = upscaler_name or opts.upscaler_for_img2img + def resize(im, w, h): - if opts.upscaler_for_img2img is None or opts.upscaler_for_img2img == "None" or im.mode == 'L': + if upscaler_name is None or upscaler_name == "None" or im.mode == 'L': return im.resize((w, h), resample=LANCZOS) scale = max(w / im.width, h / im.height) if scale > 1.0: - upscalers = [x for x in shared.sd_upscalers if x.name == opts.upscaler_for_img2img] - assert len(upscalers) > 0, f"could not find upscaler named {opts.upscaler_for_img2img}" + upscalers = [x for x in shared.sd_upscalers if x.name == upscaler_name] + assert len(upscalers) > 0, f"could not find upscaler named {upscaler_name}" upscaler = upscalers[0] im = upscaler.scaler.upscale(im, scale, upscaler.data_path) diff --git a/modules/processing.py b/modules/processing.py index 42dc19ea..4654570c 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -658,14 +658,18 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): sampler = None - def __init__(self, enable_hr: bool=False, denoising_strength: float=0.75, firstphase_width: int=0, firstphase_height: int=0, **kwargs): + def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, **kwargs): super().__init__(**kwargs) self.enable_hr = enable_hr self.denoising_strength = denoising_strength - self.firstphase_width = firstphase_width - self.firstphase_height = firstphase_height - self.truncate_x = 0 - self.truncate_y = 0 + self.hr_scale = hr_scale + self.hr_upscaler = hr_upscaler + + if firstphase_width != 0 or firstphase_height != 0: + print("firstphase_width/firstphase_height no longer supported; use hr_scale", file=sys.stderr) + self.hr_scale = self.width / firstphase_width + self.width = firstphase_width + self.height = firstphase_height def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: @@ -674,47 +678,29 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): else: state.job_count = state.job_count * 2 - self.extra_generation_params["First pass size"] = f"{self.firstphase_width}x{self.firstphase_height}" - - if self.firstphase_width == 0 or self.firstphase_height == 0: - desired_pixel_count = 512 * 512 - actual_pixel_count = self.width * self.height - scale = math.sqrt(desired_pixel_count / actual_pixel_count) - self.firstphase_width = math.ceil(scale * self.width / 64) * 64 - self.firstphase_height = math.ceil(scale * self.height / 64) * 64 - firstphase_width_truncated = int(scale * self.width) - firstphase_height_truncated = int(scale * self.height) - - else: - - width_ratio = self.width / self.firstphase_width - height_ratio = self.height / self.firstphase_height - - if width_ratio > height_ratio: - firstphase_width_truncated = self.firstphase_width - firstphase_height_truncated = self.firstphase_width * self.height / self.width - else: - firstphase_width_truncated = self.firstphase_height * self.width / self.height - firstphase_height_truncated = self.firstphase_height - - self.truncate_x = int(self.firstphase_width - firstphase_width_truncated) // opt_f - self.truncate_y = int(self.firstphase_height - firstphase_height_truncated) // opt_f + self.extra_generation_params["Hires upscale"] = self.hr_scale + if self.hr_upscaler is not None: + self.extra_generation_params["Hires upscaler"] = self.hr_upscaler def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) + latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_default_mode + if self.enable_hr and latent_scale_mode is None: + assert len([x for x in shared.sd_upscalers if x.name == self.hr_upscaler]) > 0, f"could not find upscaler named {self.hr_upscaler}" + + x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) + samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) + if not self.enable_hr: - x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) - samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x)) return samples - x = create_random_tensors([opt_C, self.firstphase_height // opt_f, self.firstphase_width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) - samples = self.sampler.sample(self, x, conditioning, unconditional_conditioning, image_conditioning=self.txt2img_image_conditioning(x, self.firstphase_width, self.firstphase_height)) + target_width = int(self.width * self.hr_scale) + target_height = int(self.height * self.hr_scale) - samples = samples[:, :, self.truncate_y//2:samples.shape[2]-self.truncate_y//2, self.truncate_x//2:samples.shape[3]-self.truncate_x//2] - - """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images""" def save_intermediate(image, index): + """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images""" + if not opts.save or self.do_not_save_samples or not opts.save_images_before_highres_fix: return @@ -723,11 +709,11 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix") - if opts.use_scale_latent_for_hires_fix: + if latent_scale_mode is not None: for i in range(samples.shape[0]): save_intermediate(samples, i) - samples = torch.nn.functional.interpolate(samples, size=(self.height // opt_f, self.width // opt_f), mode="bilinear") + samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode) # Avoid making the inpainting conditioning unless necessary as # this does need some extra compute to decode / encode the image again. @@ -747,7 +733,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): save_intermediate(image, i) - image = images.resize_image(0, image, self.width, self.height) + image = images.resize_image(0, image, target_width, target_height, upscaler_name=self.hr_upscaler) image = np.array(image).astype(np.float32) / 255.0 image = np.moveaxis(image, 2, 0) batch_images.append(image) @@ -764,7 +750,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) - noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) + noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self) # GC now before running the next img2img to prevent running out of memory x = None diff --git a/modules/shared.py b/modules/shared.py index 7f430b93..b65559ee 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -327,7 +327,6 @@ options_templates.update(options_section(('upscaling', "Upscaling"), { "ESRGAN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for ESRGAN upscalers. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}), "realesrgan_enabled_models": OptionInfo(["R-ESRGAN 4x+", "R-ESRGAN 4x+ Anime6B"], "Select which Real-ESRGAN models to show in the web UI. (Requires restart)", gr.CheckboxGroup, lambda: {"choices": realesrgan_models_names()}), "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), - "use_scale_latent_for_hires_fix": OptionInfo(False, "Upscale latent space image when doing hires. fix"), })) options_templates.update(options_section(('face-restoration', "Face restoration"), { @@ -545,6 +544,12 @@ opts = Options() if os.path.exists(config_filename): opts.load(config_filename) +latent_upscale_default_mode = "Latent" +latent_upscale_modes = { + "Latent": "bilinear", + "Latent (nearest)": "nearest", +} + sd_upscalers = [] sd_model = None diff --git a/modules/txt2img.py b/modules/txt2img.py index 7f61e19a..e189a899 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -8,7 +8,7 @@ import modules.processing as processing from modules.ui import plaintext_to_html -def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, firstphase_width: int, firstphase_height: int, *args): +def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, *args): p = StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, @@ -33,8 +33,8 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: tiling=tiling, enable_hr=enable_hr, denoising_strength=denoising_strength if enable_hr else None, - firstphase_width=firstphase_width if enable_hr else None, - firstphase_height=firstphase_height if enable_hr else None, + hr_scale=hr_scale, + hr_upscaler=hr_upscaler, ) p.scripts = modules.scripts.scripts_txt2img diff --git a/modules/ui.py b/modules/ui.py index 7070ea15..27cd9ddd 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -684,11 +684,11 @@ def create_ui(): with gr.Row(): restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") - enable_hr = gr.Checkbox(label='Highres. fix', value=False, elem_id="txt2img_enable_hr") + enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") with gr.Row(visible=False) as hr_options: - firstphase_width = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass width", value=0, elem_id="txt2img_firstphase_width") - firstphase_height = gr.Slider(minimum=0, maximum=1024, step=8, label="Firstpass height", value=0, elem_id="txt2img_firstphase_height") + hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") with gr.Row(equal_height=True): @@ -729,8 +729,8 @@ def create_ui(): width, enable_hr, denoising_strength, - firstphase_width, - firstphase_height, + hr_scale, + hr_upscaler, ] + custom_inputs, outputs=[ @@ -762,7 +762,6 @@ def create_ui(): outputs=[hr_options], ) - txt2img_paste_fields = [ (txt2img_prompt, "Prompt"), (txt2img_negative_prompt, "Negative prompt"), @@ -781,8 +780,8 @@ def create_ui(): (denoising_strength, "Denoising strength"), (enable_hr, lambda d: "Denoising strength" in d), (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), - (firstphase_width, "First pass size-1"), - (firstphase_height, "First pass size-2"), + (hr_scale, "Hires upscale"), + (hr_upscaler, "Hires upscaler"), *modules.scripts.scripts_txt2img.infotext_fields ] parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 3e0b2805..f92f9776 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -202,7 +202,7 @@ axis_options = [ AxisOption("Eta", float, apply_field("eta"), format_value_add_label, None), AxisOption("Clip skip", int, apply_clip_skip, format_value_add_label, None), AxisOption("Denoising", float, apply_field("denoising_strength"), format_value_add_label, None), - AxisOption("Upscale latent space for hires.", str, apply_upscale_latent_space, format_value_add_label, None), + AxisOption("Hires upscaler", str, apply_field("hr_upscaler"), format_value_add_label, None), AxisOption("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight"), format_value_add_label, None), AxisOption("VAE", str, apply_vae, format_value_add_label, None), AxisOption("Styles", str, apply_styles, format_value_add_label, None), @@ -267,7 +267,6 @@ class SharedSettingsStackHelper(object): self.CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers self.hypernetwork = opts.sd_hypernetwork self.model = shared.sd_model - self.use_scale_latent_for_hires_fix = opts.use_scale_latent_for_hires_fix self.vae = opts.sd_vae def __exit__(self, exc_type, exc_value, tb): @@ -278,7 +277,6 @@ class SharedSettingsStackHelper(object): hypernetwork.apply_strength() opts.data["CLIP_stop_at_last_layers"] = self.CLIP_stop_at_last_layers - opts.data["use_scale_latent_for_hires_fix"] = self.use_scale_latent_for_hires_fix re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*") From 4dbde228ff48dbb105241b1ed25c21ce3f87d182 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 2 Jan 2023 20:01:16 +0300 Subject: [PATCH 069/461] make it possible to use fractional values for SD upscale. --- modules/upscaler.py | 6 +++--- scripts/sd_upscale.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/upscaler.py b/modules/upscaler.py index c4e6e6bd..231680cb 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -53,10 +53,10 @@ class Upscaler: def do_upscale(self, img: PIL.Image, selected_model: str): return img - def upscale(self, img: PIL.Image, scale: int, selected_model: str = None): + def upscale(self, img: PIL.Image, scale, selected_model: str = None): self.scale = scale - dest_w = img.width * scale - dest_h = img.height * scale + dest_w = int(img.width * scale) + dest_h = int(img.height * scale) for i in range(3): shape = (img.width, img.height) diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index e8c80a6c..9739545c 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -19,7 +19,7 @@ class Script(scripts.Script): def ui(self, is_img2img): info = gr.HTML("

Will upscale the image by the selected scale factor; use width and height sliders to set tile size

") overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64) - scale_factor = gr.Slider(minimum=1, maximum=4, step=1, label='Scale Factor', value=2) + scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0) upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index") return [info, overlap, upscaler_index, scale_factor] From 84dd7e8e2495c4fc2997e97f8267aa831eb90d11 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 2 Jan 2023 20:30:02 +0300 Subject: [PATCH 070/461] error out with a readable message in chwewckpoint merger for incompatible tensor shapes (ie when trying to merge SD1.5 with SD2.0) --- modules/extras.py | 2 ++ modules/ui.py | 2 +- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/extras.py b/modules/extras.py index 68939dea..5e270250 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -303,6 +303,8 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier) result_is_inpainting_model = True else: + assert a.shape == b.shape, f'Incompatible shapes for layer {key}: A is {a.shape}, and B is {b.shape}' + theta_0[key] = theta_func2(a, b, multiplier) if save_as_half: diff --git a/modules/ui.py b/modules/ui.py index 27cd9ddd..67a51888 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1663,7 +1663,7 @@ def create_ui(): print("Error loading/saving model file:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) modules.sd_models.list_models() # to remove the potentially missing models from the list - return ["Error loading/saving model file. It doesn't exist or the name contains illegal characters"] + [gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(3)] + return [f"Error merging checkpoints: {e}"] + [gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(4)] return results modelmerger_merge.click( From 8d12a729b8b036cb765cf2d87576d5ae256135c8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 2 Jan 2023 20:46:51 +0300 Subject: [PATCH 071/461] fix possible error with accessing nonexistent setting --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index 67a51888..9350a80f 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -491,7 +491,7 @@ def apply_setting(key, value): return valtype = type(opts.data_labels[key].default) - oldval = opts.data[key] + oldval = opts.data.get(key, None) opts.data[key] = valtype(value) if valtype != type(None) else value if oldval != value and opts.data_labels[key].onchange is not None: opts.data_labels[key].onchange() From 251ecee6949c36e9df1d99a950b3e1af2b5fa2b6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 2 Jan 2023 22:44:46 +0300 Subject: [PATCH 072/461] make "send to" buttons send actual dimension of the sent image rather than fields --- javascript/ui.js | 4 +- modules/generation_parameters_copypaste.py | 60 +++++++++++++++------- 2 files changed, 43 insertions(+), 21 deletions(-) diff --git a/javascript/ui.js b/javascript/ui.js index 587dd782..d0c054d9 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -19,7 +19,7 @@ function selected_gallery_index(){ function extract_image_from_gallery(gallery){ if(gallery.length == 1){ - return gallery[0] + return [gallery[0]] } index = selected_gallery_index() @@ -28,7 +28,7 @@ function extract_image_from_gallery(gallery){ return [null] } - return gallery[index]; + return [gallery[index]]; } function args_to_array(args){ diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index d6fa822b..ec60319a 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -103,35 +103,57 @@ def bind_buttons(buttons, send_image, send_generate_info): bind_list.append([buttons, send_image, send_generate_info]) +def send_image_and_dimensions(x): + if isinstance(x, Image.Image): + img = x + else: + img = image_from_url_text(x) + + if shared.opts.send_size and isinstance(img, Image.Image): + w = img.width + h = img.height + else: + w = gr.update() + h = gr.update() + + return img, w, h + + def run_bind(): - for buttons, send_image, send_generate_info in bind_list: + for buttons, source_image_component, send_generate_info in bind_list: for tab in buttons: button = buttons[tab] - if send_image and paste_fields[tab]["init_img"]: - if type(send_image) == gr.Gallery: - button.click( - fn=lambda x: image_from_url_text(x), - _js="extract_image_from_gallery", - inputs=[send_image], - outputs=[paste_fields[tab]["init_img"]], - ) - else: - button.click( - fn=lambda x: x, - inputs=[send_image], - outputs=[paste_fields[tab]["init_img"]], - ) + destination_image_component = paste_fields[tab]["init_img"] + fields = paste_fields[tab]["fields"] - if send_generate_info and paste_fields[tab]["fields"] is not None: + destination_width_component = next(iter([field for field, name in fields if name == "Size-1"] if fields else []), None) + destination_height_component = next(iter([field for field, name in fields if name == "Size-2"] if fields else []), None) + + if source_image_component and destination_image_component: + if isinstance(source_image_component, gr.Gallery): + func = send_image_and_dimensions if destination_width_component else image_from_url_text + jsfunc = "extract_image_from_gallery" + else: + func = send_image_and_dimensions if destination_width_component else lambda x: x + jsfunc = None + + button.click( + fn=func, + _js=jsfunc, + inputs=[source_image_component], + outputs=[destination_image_component, destination_width_component, destination_height_component] if destination_width_component else [destination_image_component], + ) + + if send_generate_info and fields is not None: if send_generate_info in paste_fields: - paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (['Size-1', 'Size-2'] if shared.opts.send_size else []) + (["Seed"] if shared.opts.send_seed else []) + paste_field_names = ['Prompt', 'Negative prompt', 'Steps', 'Face restoration'] + (["Seed"] if shared.opts.send_seed else []) button.click( fn=lambda *x: x, inputs=[field for field, name in paste_fields[send_generate_info]["fields"] if name in paste_field_names], - outputs=[field for field, name in paste_fields[tab]["fields"] if name in paste_field_names], + outputs=[field for field, name in fields if name in paste_field_names], ) else: - connect_paste(button, paste_fields[tab]["fields"], send_generate_info) + connect_paste(button, fields, send_generate_info) button.click( fn=None, From 1d7a31def8b5f4c348e2dd07536ac56cb4350614 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 06:21:53 +0300 Subject: [PATCH 073/461] make edit fields for sliders not get hidden by slider's label when there's not enough space --- style.css | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/style.css b/style.css index 924d4ae7..77551dd7 100644 --- a/style.css +++ b/style.css @@ -509,7 +509,7 @@ canvas[key="mask"] { position: absolute; right: 0.5em; top: -0.6em; - z-index: 200; + z-index: 400; width: 8em; } #quicksettings .gr-box > div > div > input.gr-text-input { From 269f6e867651cadef40d2c939a79d13291280bcd Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 07:20:20 +0300 Subject: [PATCH 074/461] change settings UI to use vertical tabs --- modules/ui.py | 45 +++++++++++++++++---------------------------- style.css | 27 +++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 28 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 9350a80f..f8c973ba 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1489,41 +1489,34 @@ def create_ui(): return gr.update(value=value), opts.dumpjson() with gr.Blocks(analytics_enabled=False) as settings_interface: - settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit") - result = gr.HTML() + with gr.Row(): + settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit") + restart_gradio = gr.Button(value='Restart UI', variant='primary', elem_id="settings_restart_gradio") - settings_cols = 3 - items_per_col = int(len(opts.data_labels) * 0.9 / settings_cols) + result = gr.HTML(elem_id="settings_result") quicksettings_names = [x.strip() for x in opts.quicksettings.split(",")] quicksettings_names = set(x for x in quicksettings_names if x != 'quicksettings') quicksettings_list = [] - cols_displayed = 0 - items_displayed = 0 previous_section = None - column = None - with gr.Row(elem_id="settings").style(equal_height=False): + current_tab = None + with gr.Tabs(elem_id="settings"): for i, (k, item) in enumerate(opts.data_labels.items()): section_must_be_skipped = item.section[0] is None if previous_section != item.section and not section_must_be_skipped: - if cols_displayed < settings_cols and (items_displayed >= items_per_col or previous_section is None): - if column is not None: - column.__exit__() + elem_id, text = item.section - column = gr.Column(variant='panel') - column.__enter__() + if current_tab is not None: + current_tab.__exit__() - items_displayed = 0 - cols_displayed += 1 + current_tab = gr.TabItem(elem_id="settings_{}".format(elem_id), label=text) + current_tab.__enter__() previous_section = item.section - elem_id, text = item.section - gr.HTML(elem_id="settings_header_text_{}".format(elem_id), value='

{}

'.format(text)) - if k in quicksettings_names and not shared.cmd_opts.freeze_settings: quicksettings_list.append((i, k, item)) components.append(dummy_component) @@ -1533,15 +1526,14 @@ def create_ui(): component = create_setting_component(k) component_dict[k] = component components.append(component) - items_displayed += 1 - with gr.Row(): - request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications") - download_localization = gr.Button(value='Download localization template', elem_id="download_localization") + if current_tab is not None: + current_tab.__exit__() - with gr.Row(): - reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies") - restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary', elem_id="settings_restart_gradio") + with gr.TabItem("Actions"): + request_notifications = gr.Button(value='Request browser notifications', elem_id="request_notifications") + download_localization = gr.Button(value='Download localization template', elem_id="download_localization") + reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies") request_notifications.click( fn=lambda: None, @@ -1578,9 +1570,6 @@ def create_ui(): outputs=[], ) - if column is not None: - column.__exit__() - interfaces = [ (txt2img_interface, "txt2img", "txt2img"), (img2img_interface, "img2img", "img2img"), diff --git a/style.css b/style.css index 77551dd7..7df4d960 100644 --- a/style.css +++ b/style.css @@ -241,6 +241,33 @@ fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block s z-index: 200; } +#settings{ + display: block; +} + +#settings > div{ + border: none; + margin-left: 10em; +} + +#settings > div.flex-wrap{ + float: left; + display: block; + margin-left: 0; + width: 10em; +} + +#settings > div.flex-wrap button{ + display: block; + border: none; + text-align: left; +} + +#settings_result{ + height: 1.4em; + margin: 0 1.2em; +} + input[type="range"]{ margin: 0.5em 0 -0.3em 0; } From 18c03cdeac6272734b0c09afd3fbe47d1372dd07 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 09:04:29 +0300 Subject: [PATCH 075/461] styling rework to make things more compact --- modules/ui.py | 127 ++++++++++++++++++++------------------- modules/ui_components.py | 7 +++ style.css | 35 ++++++----- 3 files changed, 92 insertions(+), 77 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index f8c973ba..f787b518 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -19,7 +19,8 @@ import numpy as np from PIL import Image, PngImagePlugin from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call -from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru, ui_components +from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru +from modules.ui_components import FormRow, FormGroup, ToolButton from modules.paths import script_path from modules.shared import opts, cmd_opts, restricted_opts @@ -273,31 +274,27 @@ def interrogate_deepbooru(image): def create_seed_inputs(target_interface): - with gr.Row(): - with gr.Box(): - with gr.Row(elem_id=target_interface + '_seed_row'): - seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed') - seed.style(container=False) - random_seed = gr.Button(random_symbol, elem_id=target_interface + '_random_seed') - reuse_seed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_seed') + with FormRow(elem_id=target_interface + '_seed_row'): + seed = (gr.Textbox if cmd_opts.use_textbox_seed else gr.Number)(label='Seed', value=-1, elem_id=target_interface + '_seed') + seed.style(container=False) + random_seed = gr.Button(random_symbol, elem_id=target_interface + '_random_seed') + reuse_seed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_seed') - with gr.Box(elem_id=target_interface + '_subseed_show_box'): + with gr.Group(elem_id=target_interface + '_subseed_show_box'): seed_checkbox = gr.Checkbox(label='Extra', elem_id=target_interface + '_subseed_show', value=False) # Components to show/hide based on the 'Extra' checkbox seed_extras = [] - with gr.Row(visible=False) as seed_extra_row_1: + with FormRow(visible=False, elem_id=target_interface + '_subseed_row') as seed_extra_row_1: seed_extras.append(seed_extra_row_1) - with gr.Box(): - with gr.Row(elem_id=target_interface + '_subseed_row'): - subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed') - subseed.style(container=False) - random_subseed = gr.Button(random_symbol, elem_id=target_interface + '_random_subseed') - reuse_subseed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_subseed') + subseed = gr.Number(label='Variation seed', value=-1, elem_id=target_interface + '_subseed') + subseed.style(container=False) + random_subseed = gr.Button(random_symbol, elem_id=target_interface + '_random_subseed') + reuse_subseed = gr.Button(reuse_symbol, elem_id=target_interface + '_reuse_subseed') subseed_strength = gr.Slider(label='Variation strength', value=0.0, minimum=0, maximum=1, step=0.01, elem_id=target_interface + '_subseed_strength') - with gr.Row(visible=False) as seed_extra_row_2: + with FormRow(visible=False) as seed_extra_row_2: seed_extras.append(seed_extra_row_2) seed_resize_from_w = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from width", value=0, elem_id=target_interface + '_seed_resize_from_w') seed_resize_from_h = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize seed from height", value=0, elem_id=target_interface + '_seed_resize_from_h') @@ -523,7 +520,7 @@ def create_refresh_button(refresh_component, refresh_method, refreshed_args, ele return gr.update(**(args or {})) - refresh_button = ui_components.ToolButton(value=refresh_symbol, elem_id=elem_id) + refresh_button = ToolButton(value=refresh_symbol, elem_id=elem_id) refresh_button.click( fn=refresh, inputs=[], @@ -636,11 +633,11 @@ Requested path was: {f} def create_sampler_and_steps_selection(choices, tabname): if opts.samplers_in_dropdown: - with gr.Row(elem_id=f"sampler_selection_{tabname}"): + with FormRow(elem_id=f"sampler_selection_{tabname}"): sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling Steps", value=20) else: - with gr.Group(elem_id=f"sampler_selection_{tabname}"): + with FormGroup(elem_id=f"sampler_selection_{tabname}"): steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling Steps", value=20) sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") @@ -677,29 +674,29 @@ def create_ui(): with gr.Column(variant='panel', elem_id="txt2img_settings"): steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") - with gr.Group(): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - - with gr.Row(): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") - enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") - - with gr.Row(visible=False) as hr_options: - hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") - - with gr.Row(equal_height=True): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + with FormRow(): + with gr.Column(elem_id="txt2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") + with gr.Column(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') - with gr.Group(elem_id="txt2img_script_container"): + with FormRow(elem_id="txt2img_checkboxes"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") + enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") + + with FormRow(visible=False) as hr_options: + hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + + with FormGroup(elem_id="txt2img_script_container"): custom_inputs = modules.scripts.scripts_txt2img.setup_ui() txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) @@ -816,7 +813,7 @@ def create_ui(): img2img_preview = gr.Image(elem_id='img2img_preview', visible=False) setup_progressbar(progressbar, img2img_preview, 'img2img') - with gr.Row().style(equal_height=False): + with FormRow().style(equal_height=False): with gr.Column(variant='panel', elem_id="img2img_settings"): with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode: @@ -841,19 +838,23 @@ def create_ui(): init_img_inpaint = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_base") init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", visible=False, elem_id="img_inpaint_mask") - with gr.Row(): + with FormRow(): mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id="img2img_mask_blur") mask_alpha = gr.Slider(label="Mask transparency", interactive=use_color_sketch, visible=use_color_sketch, elem_id="img2img_mask_alpha") - with gr.Row(): - mask_mode = gr.Radio(label="Mask mode", show_label=False, choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode") - inpainting_mask_invert = gr.Radio(label='Masking mode', show_label=False, choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") + with FormRow(): + mask_mode = gr.Radio(label="Mask source", choices=["Draw mask", "Upload mask"], type="index", value="Draw mask", elem_id="mask_mode") + inpainting_mask_invert = gr.Radio(label='Mask mode', choices=['Inpaint masked', 'Inpaint not masked'], value='Inpaint masked', type="index", elem_id="img2img_mask_mode") - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") + with FormRow(): + inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='original', type="index", elem_id="img2img_inpainting_fill") - with gr.Row(): - inpaint_full_res = gr.Checkbox(label='Inpaint at full resolution', value=False, elem_id="img2img_inpaint_full_res") - inpaint_full_res_padding = gr.Slider(label='Inpaint at full resolution padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") + with FormRow(): + with gr.Column(): + inpaint_full_res = gr.Radio(label="Inpaint area", choices=["Whole picture", "Only masked"], type="index", value="Whole picture", elem_id="img2img_inpaint_full_res") + + with gr.Column(scale=4): + inpaint_full_res_padding = gr.Slider(label='Only masked padding, pixels', minimum=0, maximum=256, step=4, value=32, elem_id="img2img_inpaint_full_res_padding") with gr.TabItem('Batch img2img', id='batch', elem_id="img2img_batch_tab"): hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' @@ -861,30 +862,30 @@ def create_ui(): img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir") img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir") - with gr.Row(): - resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", show_label=False, choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") + with FormRow(): + resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") - with gr.Group(): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") + with gr.Column(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - with gr.Row(): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") - - with gr.Row(): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - - with gr.Group(): + with FormGroup(): cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') - with gr.Group(elem_id="img2img_script_container"): + with FormRow(elem_id="img2img_checkboxes"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") + + with FormGroup(elem_id="img2img_script_container"): custom_inputs = modules.scripts.scripts_img2img.setup_ui() img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) @@ -1444,7 +1445,7 @@ def create_ui(): res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key) else: - with ui_components.FormRow(): + with FormRow(): res = comp(label=info.label, value=fun(), elem_id=elem_id, **(args or {})) create_refresh_button(res, info.refresh, info.component_args, "refresh_" + key) else: diff --git a/modules/ui_components.py b/modules/ui_components.py index d0519d2d..91eb0e3d 100644 --- a/modules/ui_components.py +++ b/modules/ui_components.py @@ -16,3 +16,10 @@ class FormRow(gr.Row, gr.components.FormComponent): def get_block_name(self): return "row" + + +class FormGroup(gr.Group, gr.components.FormComponent): + """Same as gr.Row but fits inside gradio forms""" + + def get_block_name(self): + return "group" diff --git a/style.css b/style.css index 7df4d960..86a265f6 100644 --- a/style.css +++ b/style.css @@ -74,7 +74,8 @@ } [id$=_random_seed], [id$=_random_subseed], [id$=_reuse_seed], [id$=_reuse_subseed], #open_folder{ - min-width: auto; + min-width: 2.3em; + height: 2.5em; flex-grow: 0; padding-left: 0.25em; padding-right: 0.25em; @@ -86,6 +87,7 @@ [id$=_seed_row], [id$=_subseed_row]{ gap: 0.5rem; + padding: 0.6em; } [id$=_subseed_show_box]{ @@ -206,24 +208,24 @@ button{ fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block span{ position: absolute; - top: -0.6em; + top: -0.5em; line-height: 1.2em; - padding: 0 0.5em; - margin: 0; + padding: 0; + margin: 0 0.5em; background-color: white; - border-top: 1px solid #eee; - border-left: 1px solid #eee; - border-right: 1px solid #eee; + box-shadow: 0 0 5px 5px white; z-index: 300; } .dark fieldset span.text-gray-500, .dark .gr-block.gr-box span.text-gray-500, .dark label.block span{ background-color: rgb(31, 41, 55); - border-top: 1px solid rgb(55 65 81); - border-left: 1px solid rgb(55 65 81); - border-right: 1px solid rgb(55 65 81); + box-shadow: 0 0 5px 5px rgb(31, 41, 55); +} + +#txt2img_column_batch, #img2img_column_batch{ + min-width: min(13.5em, 100%) !important; } #settings fieldset span.text-gray-500, #settings .gr-block.gr-box span.text-gray-500, #settings label.block span{ @@ -232,10 +234,6 @@ fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block s margin-right: 8em; } -.gr-panel div.flex-col div.justify-between label span{ - margin: 0; -} - #settings .gr-panel div.flex-col div.justify-between div{ position: relative; z-index: 200; @@ -609,6 +607,15 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h } +#img2img_settings > div.gr-form, #txt2img_settings > div.gr-form { + padding-top: 0.9em; +} + +#img2img_settings div.gr-form .gr-form, #txt2img_settings div.gr-form .gr-form{ + border: none; + padding-bottom: 0.5em; +} + /* The following handles localization for right-to-left (RTL) languages like Arabic. The rtl media type will only be activated by the logic in javascript/localization.js. From 2bc86712ec16cada01a2353f1d978c1aabc84dbb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 09:13:35 +0300 Subject: [PATCH 076/461] make quicksettings UI elements appear in same order as they are listed in the setting --- modules/ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index f787b518..d7b911da 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1497,7 +1497,7 @@ def create_ui(): result = gr.HTML(elem_id="settings_result") quicksettings_names = [x.strip() for x in opts.quicksettings.split(",")] - quicksettings_names = set(x for x in quicksettings_names if x != 'quicksettings') + quicksettings_names = {x: i for i, x in enumerate(quicksettings_names) if x != 'quicksettings'} quicksettings_list = [] @@ -1604,7 +1604,7 @@ def create_ui(): with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo: with gr.Row(elem_id="quicksettings"): - for i, k, item in quicksettings_list: + for i, k, item in sorted(quicksettings_list, key=lambda x: quicksettings_names.get(x[1], x[0])): component = create_setting_component(k, is_quicksettings=True) component_dict[k] = component From 9d4eff097deff6153c4023f158bd9fbd4f3e88b3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 10:01:06 +0300 Subject: [PATCH 077/461] add a button to show all setting pages --- javascript/ui.js | 11 +++++++++++ modules/ui.py | 2 ++ 2 files changed, 13 insertions(+) diff --git a/javascript/ui.js b/javascript/ui.js index d0c054d9..34406f3f 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -188,6 +188,17 @@ onUiUpdate(function(){ img2img_textarea = gradioApp().querySelector("#img2img_prompt > label > textarea"); img2img_textarea?.addEventListener("input", () => update_token_counter("img2img_token_button")); } + + show_all_pages = gradioApp().getElementById('settings_show_all_pages') + settings_tabs = gradioApp().querySelector('#settings div') + if(show_all_pages && settings_tabs){ + settings_tabs.appendChild(show_all_pages) + show_all_pages.onclick = function(){ + gradioApp().querySelectorAll('#settings > div').forEach(function(elem){ + elem.style.display = "block"; + }) + } + } }) let txt2img_textarea, img2img_textarea = undefined; diff --git a/modules/ui.py b/modules/ui.py index d7b911da..2c92c422 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1536,6 +1536,8 @@ def create_ui(): download_localization = gr.Button(value='Download localization template', elem_id="download_localization") reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies") + gr.Button(value="Show all pages", elem_id="settings_show_all_pages") + request_notifications.click( fn=lambda: None, inputs=[], From a1cf55a9d1c82f8e56c00d549bca5c8fa069f412 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 10:39:21 +0300 Subject: [PATCH 078/461] add option to reorder items in main UI --- modules/shared.py | 13 ++++++ modules/ui.py | 112 +++++++++++++++++++++++++++++++--------------- 2 files changed, 88 insertions(+), 37 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index b65559ee..23657a93 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -109,6 +109,17 @@ restricted_opts = { "outdir_save", } +ui_reorder_categories = [ + "sampler", + "dimensions", + "cfg", + "seed", + "checkboxes", + "hires_fix", + "batch", + "scripts", +] + cmd_opts.disable_extension_access = (cmd_opts.share or cmd_opts.listen or cmd_opts.server_name) and not cmd_opts.enable_insecure_extension_access devices.device, devices.device_interrogate, devices.device_gfpgan, devices.device_esrgan, devices.device_codeformer = \ @@ -410,7 +421,9 @@ options_templates.update(options_section(('ui', "User interface"), { "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"), + "dimensions_and_batch_together": OptionInfo(True, "Show Witdth/Height and Batch sliders in same row"), 'quicksettings': OptionInfo("sd_model_checkpoint", "Quicksettings list"), + 'ui_reorder': OptionInfo(", ".join(ui_reorder_categories), "txt2img/ing2img UI item order"), 'localization': OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), })) diff --git a/modules/ui.py b/modules/ui.py index 2c92c422..f2e7c0d6 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -644,6 +644,13 @@ def create_sampler_and_steps_selection(choices, tabname): return steps, sampler_index +def ordered_ui_categories(): + user_order = {x.strip(): i for i, x in enumerate(shared.opts.ui_reorder.split(","))} + + for i, category in sorted(enumerate(shared.ui_reorder_categories), key=lambda x: user_order.get(x[1], x[0] + 1000)): + yield category + + def create_ui(): import modules.img2img import modules.txt2img @@ -672,32 +679,48 @@ def create_ui(): with gr.Row().style(equal_height=False): with gr.Column(variant='panel', elem_id="txt2img_settings"): - steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_index = create_sampler_and_steps_selection(samplers, "txt2img") - with FormRow(): - with gr.Column(elem_id="txt2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - with gr.Column(elem_id="txt2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="txt2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="txt2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="txt2img_height") - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') + elif category == "cfg": + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="txt2img_cfg_scale") - with FormRow(elem_id="txt2img_checkboxes"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") - enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") + elif category == "seed": + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('txt2img') - with FormRow(visible=False) as hr_options: - hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + elif category == "checkboxes": + with FormRow(elem_id="txt2img_checkboxes"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") + enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") - with FormGroup(elem_id="txt2img_script_container"): - custom_inputs = modules.scripts.scripts_txt2img.setup_ui() + elif category == "hires_fix": + with FormRow(visible=False, elem_id="txt2img_hires_fix") as hr_options: + hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="txt2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="txt2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="txt2img_batch_size") + + elif category == "scripts": + with FormGroup(elem_id="txt2img_script_container"): + custom_inputs = modules.scripts.scripts_txt2img.setup_ui() txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) parameters_copypaste.bind_buttons({"txt2img": txt2img_paste}, None, txt2img_prompt) @@ -865,28 +888,43 @@ def create_ui(): with FormRow(): resize_mode = gr.Radio(label="Resize mode", elem_id="resize_mode", choices=["Just resize", "Crop and resize", "Resize and fill", "Just resize (latent upscale)"], type="index", value="Just resize") - steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") + for category in ordered_ui_categories(): + if category == "sampler": + steps, sampler_index = create_sampler_and_steps_selection(samplers_for_img2img, "img2img") - with FormRow(): - with gr.Column(elem_id="img2img_column_size", scale=4): - width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") - height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with gr.Column(elem_id="img2img_column_batch"): - batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") - batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + elif category == "dimensions": + with FormRow(): + with gr.Column(elem_id="img2img_column_size", scale=4): + width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="img2img_width") + height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="img2img_height") - with FormGroup(): - cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") + if opts.dimensions_and_batch_together: + with gr.Column(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") - seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') + elif category == "cfg": + with FormGroup(): + cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=7.0, elem_id="img2img_cfg_scale") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.75, elem_id="img2img_denoising_strength") - with FormRow(elem_id="img2img_checkboxes"): - restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") - tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") + elif category == "seed": + seed, reuse_seed, subseed, reuse_subseed, subseed_strength, seed_resize_from_h, seed_resize_from_w, seed_checkbox = create_seed_inputs('img2img') - with FormGroup(elem_id="img2img_script_container"): - custom_inputs = modules.scripts.scripts_img2img.setup_ui() + elif category == "checkboxes": + with FormRow(elem_id="img2img_checkboxes"): + restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="img2img_restore_faces") + tiling = gr.Checkbox(label='Tiling', value=False, elem_id="img2img_tiling") + + elif category == "batch": + if not opts.dimensions_and_batch_together: + with FormRow(elem_id="img2img_column_batch"): + batch_count = gr.Slider(minimum=1, step=1, label='Batch count', value=1, elem_id="img2img_batch_count") + batch_size = gr.Slider(minimum=1, maximum=8, step=1, label='Batch size', value=1, elem_id="img2img_batch_size") + + elif category == "scripts": + with FormGroup(elem_id="img2img_script_container"): + custom_inputs = modules.scripts.scripts_img2img.setup_ui() img2img_gallery, generation_info, html_info, html_log = create_output_panel("img2img", opts.outdir_img2img_samples) parameters_copypaste.bind_buttons({"img2img": img2img_paste}, None, img2img_prompt) From fda1ed184381fdf8aa81be4f64e77787f3fac1b2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 12:01:32 +0300 Subject: [PATCH 079/461] some minor improvements for dark mode UI --- style.css | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/style.css b/style.css index 86a265f6..7296ce91 100644 --- a/style.css +++ b/style.css @@ -208,20 +208,20 @@ button{ fieldset span.text-gray-500, .gr-block.gr-box span.text-gray-500, label.block span{ position: absolute; - top: -0.5em; + top: -0.7em; line-height: 1.2em; padding: 0; margin: 0 0.5em; background-color: white; - box-shadow: 0 0 5px 5px white; + box-shadow: 6px 0 6px 0px white, -6px 0 6px 0px white; z-index: 300; } .dark fieldset span.text-gray-500, .dark .gr-block.gr-box span.text-gray-500, .dark label.block span{ background-color: rgb(31, 41, 55); - box-shadow: 0 0 5px 5px rgb(31, 41, 55); + box-shadow: 6px 0 6px 0px rgb(31, 41, 55), -6px 0 6px 0px rgb(31, 41, 55); } #txt2img_column_batch, #img2img_column_batch{ From 9a3b0ee960b0c61c4f60e3081ae6f2098533d393 Mon Sep 17 00:00:00 2001 From: hithereai <121192995+hithereai@users.noreply.github.com> Date: Tue, 3 Jan 2023 11:22:06 +0200 Subject: [PATCH 080/461] update req.txt The old 'opencv-python' package is very limiting in terms of optical flow - so I propose a package change to 'opencv-contrib-python', which has more cv2.optflow methods. These are needed for optical flow trickery in auto1111 and its extensions, and it cannot be installed by an extension as only a single package of opencv needs to be installed for optical flow to work properly. Change of the main one is Inevitable. --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index e2c3876b..4f09385f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,7 +9,7 @@ gradio==3.15.0 invisible-watermark numpy omegaconf -opencv-python +opencv-contrib-python requests piexif Pillow From c0ee1488702d5a6ae35fbf7e0422f9f685394920 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 14:18:48 +0300 Subject: [PATCH 081/461] add support for running with gradio 3.9 installed --- modules/generation_parameters_copypaste.py | 4 ++-- modules/ui_tempdir.py | 23 ++++++++++++++++++++-- 2 files changed, 23 insertions(+), 4 deletions(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index ec60319a..d94f11a3 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -7,7 +7,7 @@ from pathlib import Path import gradio as gr from modules.shared import script_path -from modules import shared +from modules import shared, ui_tempdir import tempfile from PIL import Image @@ -39,7 +39,7 @@ def quote(text): def image_from_url_text(filedata): if type(filedata) == dict and filedata["is_file"]: filename = filedata["name"] - is_in_right_dir = any([filename in fileset for fileset in shared.demo.temp_file_sets]) + is_in_right_dir = ui_tempdir.check_tmp_file(shared.demo, filename) assert is_in_right_dir, 'trying to open image file outside of allowed directories' return Image.open(filename) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 363d449d..21945235 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -1,6 +1,7 @@ import os import tempfile from collections import namedtuple +from pathlib import Path import gradio as gr @@ -12,10 +13,28 @@ from modules import shared Savedfile = namedtuple("Savedfile", ["name"]) +def register_tmp_file(gradio, filename): + if hasattr(gradio, 'temp_file_sets'): # gradio 3.15 + gradio.temp_file_sets[0] = gradio.temp_file_sets[0] | {os.path.abspath(filename)} + + if hasattr(gradio, 'temp_dirs'): # gradio 3.9 + gradio.temp_dirs = gradio.temp_dirs | {os.path.abspath(os.path.dirname(filename))} + + +def check_tmp_file(gradio, filename): + if hasattr(gradio, 'temp_file_sets'): + return any([filename in fileset for fileset in gradio.temp_file_sets]) + + if hasattr(gradio, 'temp_dirs'): + return any(Path(temp_dir).resolve() in Path(filename).resolve().parents for temp_dir in gradio.temp_dirs) + + return False + + def save_pil_to_file(pil_image, dir=None): already_saved_as = getattr(pil_image, 'already_saved_as', None) if already_saved_as and os.path.isfile(already_saved_as): - shared.demo.temp_file_sets[0] = shared.demo.temp_file_sets[0] | {os.path.abspath(already_saved_as)} + register_tmp_file(shared.demo, already_saved_as) file_obj = Savedfile(already_saved_as) return file_obj @@ -45,7 +64,7 @@ def on_tmpdir_changed(): os.makedirs(shared.opts.temp_dir, exist_ok=True) - shared.demo.temp_file_sets[0] = shared.demo.temp_file_sets[0] | {os.path.abspath(shared.opts.temp_dir)} + register_tmp_file(shared.demo, os.path.join(shared.opts.temp_dir, "x")) def cleanup_tmpdr(): From bddebe09edeb6a18f2c06986d5658a7be3a563ea Mon Sep 17 00:00:00 2001 From: Shondoit Date: Tue, 3 Jan 2023 10:26:37 +0100 Subject: [PATCH 082/461] Save Optimizer next to TI embedding Also add check to load only .PT and .BIN files as embeddings. (since we add .optim files in the same directory) --- modules/shared.py | 2 +- .../textual_inversion/textual_inversion.py | 40 +++++++++++++++---- 2 files changed, 33 insertions(+), 9 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 23657a93..c541d18c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -355,7 +355,7 @@ options_templates.update(options_section(('system', "System"), { options_templates.update(options_section(('training', "Training"), { "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."), "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."), - "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training can be resumed with HN itself and matching optim file."), + "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."), "dataset_filename_word_regex": OptionInfo("", "Filename word regex"), "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index fd253477..16176e90 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -28,6 +28,7 @@ class Embedding: self.cached_checksum = None self.sd_checkpoint = None self.sd_checkpoint_name = None + self.optimizer_state_dict = None def save(self, filename): embedding_data = { @@ -41,6 +42,13 @@ class Embedding: torch.save(embedding_data, filename) + if shared.opts.save_optimizer_state and self.optimizer_state_dict is not None: + optimizer_saved_dict = { + 'hash': self.checksum(), + 'optimizer_state_dict': self.optimizer_state_dict, + } + torch.save(optimizer_saved_dict, filename + '.optim') + def checksum(self): if self.cached_checksum is not None: return self.cached_checksum @@ -95,9 +103,10 @@ class EmbeddingDatabase: self.expected_shape = self.get_expected_shape() def process_file(path, filename): - name = os.path.splitext(filename)[0] + name, ext = os.path.splitext(filename) + ext = ext.upper() - if os.path.splitext(filename.upper())[-1] in ['.PNG', '.WEBP', '.JXL', '.AVIF']: + if ext in ['.PNG', '.WEBP', '.JXL', '.AVIF']: embed_image = Image.open(path) if hasattr(embed_image, 'text') and 'sd-ti-embedding' in embed_image.text: data = embedding_from_b64(embed_image.text['sd-ti-embedding']) @@ -105,8 +114,10 @@ class EmbeddingDatabase: else: data = extract_image_data_embed(embed_image) name = data.get('name', name) - else: + elif ext in ['.BIN', '.PT']: data = torch.load(path, map_location="cpu") + else: + return # textual inversion embeddings if 'string_to_param' in data: @@ -300,6 +311,20 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ embedding.vec.requires_grad = True optimizer = torch.optim.AdamW([embedding.vec], lr=scheduler.learn_rate, weight_decay=0.0) + if shared.opts.save_optimizer_state: + optimizer_state_dict = None + if os.path.exists(filename + '.optim'): + optimizer_saved_dict = torch.load(filename + '.optim', map_location='cpu') + if embedding.checksum() == optimizer_saved_dict.get('hash', None): + optimizer_state_dict = optimizer_saved_dict.get('optimizer_state_dict', None) + + if optimizer_state_dict is not None: + optimizer.load_state_dict(optimizer_state_dict) + print("Loaded existing optimizer from checkpoint") + else: + print("No saved optimizer exists in checkpoint") + + scaler = torch.cuda.amp.GradScaler() batch_size = ds.batch_size @@ -366,9 +391,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ # Before saving, change name to match current checkpoint. embedding_name_every = f'{embedding_name}-{steps_done}' last_saved_file = os.path.join(embedding_dir, f'{embedding_name_every}.pt') - #if shared.opts.save_optimizer_state: - #embedding.optimizer_state_dict = optimizer.state_dict() - save_embedding(embedding, checkpoint, embedding_name_every, last_saved_file, remove_cached_checksum=True) + save_embedding(embedding, optimizer, checkpoint, embedding_name_every, last_saved_file, remove_cached_checksum=True) embedding_yet_to_be_embedded = True write_loss(log_directory, "textual_inversion_loss.csv", embedding.step, steps_per_epoch, { @@ -458,7 +481,7 @@ Last saved image: {html.escape(last_saved_image)}

""" filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') - save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True) + save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True) except Exception: print(traceback.format_exc(), file=sys.stderr) pass @@ -470,7 +493,7 @@ Last saved image: {html.escape(last_saved_image)}
return embedding, filename -def save_embedding(embedding, checkpoint, embedding_name, filename, remove_cached_checksum=True): +def save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, remove_cached_checksum=True): old_embedding_name = embedding.name old_sd_checkpoint = embedding.sd_checkpoint if hasattr(embedding, "sd_checkpoint") else None old_sd_checkpoint_name = embedding.sd_checkpoint_name if hasattr(embedding, "sd_checkpoint_name") else None @@ -481,6 +504,7 @@ def save_embedding(embedding, checkpoint, embedding_name, filename, remove_cache if remove_cached_checksum: embedding.cached_checksum = None embedding.name = embedding_name + embedding.optimizer_state_dict = optimizer.state_dict() embedding.save(filename) except: embedding.sd_checkpoint = old_sd_checkpoint From e9fb9bb0c25f59109a816fc53c385bed58965c24 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 17:40:20 +0300 Subject: [PATCH 083/461] fix hires fix not working in API when user does not specify upscaler --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 4654570c..a172af0b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -685,7 +685,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) - latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_default_mode + latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest") if self.enable_hr and latent_scale_mode is None: assert len([x for x in shared.sd_upscalers if x.name == self.hr_upscaler]) > 0, f"could not find upscaler named {self.hr_upscaler}" From aaa4c2aacbb6523077334093c81bd475d757f7a1 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Tue, 3 Jan 2023 09:45:16 -0500 Subject: [PATCH 084/461] add api logging --- modules/api/api.py | 24 +++++++++++++++++++++++- modules/shared.py | 1 + 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/modules/api/api.py b/modules/api/api.py index 9c670f00..53135470 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -1,11 +1,12 @@ import base64 import io import time +import datetime import uvicorn from threading import Lock from io import BytesIO from gradio.processing_utils import decode_base64_to_file -from fastapi import APIRouter, Depends, FastAPI, HTTPException +from fastapi import APIRouter, Depends, FastAPI, HTTPException, Request, Response from fastapi.security import HTTPBasic, HTTPBasicCredentials from secrets import compare_digest @@ -67,6 +68,26 @@ def encode_pil_to_base64(image): bytes_data = output_bytes.getvalue() return base64.b64encode(bytes_data) +def init_api_middleware(app: FastAPI): + @app.middleware("http") + async def log_and_time(req: Request, call_next): + ts = time.time() + res: Response = await call_next(req) + duration = str(round(time.time() - ts, 4)) + res.headers["X-Process-Time"] = duration + if shared.cmd_opts.api_log: + print('API {t} {code} {prot}/{ver} {method} {p} {cli} {duration}'.format( + t = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"), + code = res.status_code, + ver = req.scope.get('http_version', '0.0'), + cli = req.scope.get('client', ('0:0.0.0', 0))[0], + prot = req.scope.get('scheme', 'err'), + method = req.scope.get('method', 'err'), + p = req.scope.get('path', 'err'), + duration = duration, + )) + return res + class Api: def __init__(self, app: FastAPI, queue_lock: Lock): @@ -78,6 +99,7 @@ class Api: self.router = APIRouter() self.app = app + init_api_middleware(self.app) self.queue_lock = queue_lock self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse) self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse) diff --git a/modules/shared.py b/modules/shared.py index 23657a93..2a03d716 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -82,6 +82,7 @@ parser.add_argument('--vae-path', type=str, help='Path to Variational Autoencode parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False) parser.add_argument("--api", action='store_true', help="use api=True to launch the API together with the webui (use --nowebui instead for only the API)") parser.add_argument("--api-auth", type=str, help='Set authentication for API like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None) +parser.add_argument("--api-log", action='store_true', help="use api-log=True to enable logging of all API requests") parser.add_argument("--nowebui", action='store_true', help="use api=True to launch the API instead of the webui") parser.add_argument("--ui-debug-mode", action='store_true', help="Don't load model to quickly launch UI") parser.add_argument("--device-id", type=str, help="Select the default CUDA device to use (export CUDA_VISIBLE_DEVICES=0,1,etc might be needed before)", default=None) From 1d9dc48efda2e8da6d13fc62e65500198a9b041c Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Tue, 3 Jan 2023 10:21:51 -0500 Subject: [PATCH 085/461] init job and add info to model merge --- modules/extras.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/modules/extras.py b/modules/extras.py index 5e270250..7e222313 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -242,6 +242,9 @@ def run_pnginfo(image): def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_name, interp_method, multiplier, save_as_half, custom_name, checkpoint_format): + shared.state.begin() + shared.state.job = 'model-merge' + def weighted_sum(theta0, theta1, alpha): return ((1 - alpha) * theta0) + (alpha * theta1) @@ -263,8 +266,11 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam theta_func1, theta_func2 = theta_funcs[interp_method] if theta_func1 and not tertiary_model_info: + shared.state.textinfo = "Failed: Interpolation method requires a tertiary model." + shared.state.end() return ["Failed: Interpolation method requires a tertiary model."] + [gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)] + shared.state.textinfo = f"Loading {secondary_model_info.filename}..." print(f"Loading {secondary_model_info.filename}...") theta_1 = sd_models.read_state_dict(secondary_model_info.filename, map_location='cpu') @@ -281,6 +287,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam theta_1[key] = torch.zeros_like(theta_1[key]) del theta_2 + shared.state.textinfo = f"Loading {primary_model_info.filename}..." print(f"Loading {primary_model_info.filename}...") theta_0 = sd_models.read_state_dict(primary_model_info.filename, map_location='cpu') @@ -291,6 +298,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam a = theta_0[key] b = theta_1[key] + shared.state.textinfo = f'Merging layer {key}' # this enables merging an inpainting model (A) with another one (B); # where normal model would have 4 channels, for latenst space, inpainting model would # have another 4 channels for unmasked picture's latent space, plus one channel for mask, for a total of 9 @@ -303,8 +311,6 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam theta_0[key][:, 0:4, :, :] = theta_func2(a[:, 0:4, :, :], b, multiplier) result_is_inpainting_model = True else: - assert a.shape == b.shape, f'Incompatible shapes for layer {key}: A is {a.shape}, and B is {b.shape}' - theta_0[key] = theta_func2(a, b, multiplier) if save_as_half: @@ -332,6 +338,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam output_modelname = os.path.join(ckpt_dir, filename) + shared.state.textinfo = f"Saving to {output_modelname}..." print(f"Saving to {output_modelname}...") _, extension = os.path.splitext(output_modelname) @@ -343,4 +350,7 @@ def run_modelmerger(primary_model_name, secondary_model_name, tertiary_model_nam sd_models.list_models() print("Checkpoint saved.") + shared.state.textinfo = "Checkpoint saved to " + output_modelname + shared.state.end() + return ["Checkpoint saved to " + output_modelname] + [gr.Dropdown.update(choices=sd_models.checkpoint_tiles()) for _ in range(4)] From 192ddc04d6de0d780f73aa5fbaa8c66cd4642e1c Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Tue, 3 Jan 2023 10:34:51 -0500 Subject: [PATCH 086/461] add job info to modules --- modules/extras.py | 17 +++++++++++++---- modules/hypernetworks/hypernetwork.py | 1 + modules/textual_inversion/preprocess.py | 1 + modules/textual_inversion/textual_inversion.py | 1 + 4 files changed, 16 insertions(+), 4 deletions(-) diff --git a/modules/extras.py b/modules/extras.py index 7e222313..d665440a 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -58,6 +58,9 @@ cached_images: LruCache = LruCache(max_size=5) def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_dir, show_extras_results, gfpgan_visibility, codeformer_visibility, codeformer_weight, upscaling_resize, upscaling_resize_w, upscaling_resize_h, upscaling_crop, extras_upscaler_1, extras_upscaler_2, extras_upscaler_2_visibility, upscale_first: bool, save_output: bool = True): devices.torch_gc() + shared.state.begin() + shared.state.job = 'extras' + imageArr = [] # Also keep track of original file names imageNameArr = [] @@ -94,6 +97,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_ # Extra operation definitions def run_gfpgan(image: Image.Image, info: str) -> Tuple[Image.Image, str]: + shared.state.job = 'extras-gfpgan' restored_img = modules.gfpgan_model.gfpgan_fix_faces(np.array(image, dtype=np.uint8)) res = Image.fromarray(restored_img) @@ -104,6 +108,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_ return (res, info) def run_codeformer(image: Image.Image, info: str) -> Tuple[Image.Image, str]: + shared.state.job = 'extras-codeformer' restored_img = modules.codeformer_model.codeformer.restore(np.array(image, dtype=np.uint8), w=codeformer_weight) res = Image.fromarray(restored_img) @@ -114,6 +119,7 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_ return (res, info) def upscale(image, scaler_index, resize, mode, resize_w, resize_h, crop): + shared.state.job = 'extras-upscale' upscaler = shared.sd_upscalers[scaler_index] res = upscaler.scaler.upscale(image, resize, upscaler.data_path) if mode == 1 and crop: @@ -180,6 +186,9 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_ for image, image_name in zip(imageArr, imageNameArr): if image is None: return outputs, "Please select an input image.", '' + + shared.state.textinfo = f'Processing image {image_name}' + existing_pnginfo = image.info or {} image = image.convert("RGB") @@ -193,6 +202,10 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_ else: basename = '' + if opts.enable_pnginfo: # append info before save + image.info = existing_pnginfo + image.info["extras"] = info + if save_output: # Add upscaler name as a suffix. suffix = f"-{shared.sd_upscalers[extras_upscaler_1].name}" if shared.opts.use_upscaler_name_as_suffix else "" @@ -203,10 +216,6 @@ def run_extras(extras_mode, resize_mode, image, image_folder, input_dir, output_ images.save_image(image, path=outpath, basename=basename, seed=None, prompt=None, extension=opts.samples_format, info=info, short_filename=True, no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=None, suffix=suffix) - if opts.enable_pnginfo: - image.info = existing_pnginfo - image.info["extras"] = info - if extras_mode != 2 or show_extras_results : outputs.append(image) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 109e8078..450fecac 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -417,6 +417,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, shared.loaded_hypernetwork = Hypernetwork() shared.loaded_hypernetwork.load(path) + shared.state.job = "train-hypernetwork" shared.state.textinfo = "Initializing hypernetwork training..." shared.state.job_count = steps diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 56b9b2eb..feb876c6 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -124,6 +124,7 @@ def preprocess_work(process_src, process_dst, process_width, process_height, pre files = listfiles(src) + shared.state.job = "preprocess" shared.state.textinfo = "Preprocessing..." shared.state.job_count = len(files) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index fd253477..2c1251d6 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -245,6 +245,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ create_image_every = create_image_every or 0 validate_train_inputs(embedding_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_embedding_every, create_image_every, log_directory, name="embedding") + shared.state.job = "train-embedding" shared.state.textinfo = "Initializing textual inversion training..." shared.state.job_count = steps From 2d5a5076bb2a0c05cc27d75a1bcadab7f32a46d0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 18:38:21 +0300 Subject: [PATCH 087/461] Make it so that upscalers are not repeated when restarting UI. --- modules/modelloader.py | 20 ++++++++++++++++++++ webui.py | 14 +++++++------- 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/modules/modelloader.py b/modules/modelloader.py index e647f6fa..6a1a7ac8 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -123,6 +123,23 @@ def move_files(src_path: str, dest_path: str, ext_filter: str = None): pass +builtin_upscaler_classes = [] +forbidden_upscaler_classes = set() + + +def list_builtin_upscalers(): + load_upscalers() + + builtin_upscaler_classes.clear() + builtin_upscaler_classes.extend(Upscaler.__subclasses__()) + + +def forbid_loaded_nonbuiltin_upscalers(): + for cls in Upscaler.__subclasses__(): + if cls not in builtin_upscaler_classes: + forbidden_upscaler_classes.add(cls) + + def load_upscalers(): # We can only do this 'magic' method to dynamically load upscalers if they are referenced, # so we'll try to import any _model.py files before looking in __subclasses__ @@ -139,6 +156,9 @@ def load_upscalers(): datas = [] commandline_options = vars(shared.cmd_opts) for cls in Upscaler.__subclasses__(): + if cls in forbidden_upscaler_classes: + continue + name = cls.__name__ cmd_name = f"{name.lower().replace('upscaler', '')}_models_path" scaler = cls(commandline_options.get(cmd_name, None)) diff --git a/webui.py b/webui.py index 3aee8792..c7d55a97 100644 --- a/webui.py +++ b/webui.py @@ -1,4 +1,5 @@ import os +import sys import threading import time import importlib @@ -55,8 +56,8 @@ def initialize(): gfpgan.setup_model(cmd_opts.gfpgan_models_path) shared.face_restorers.append(modules.face_restoration.FaceRestoration()) + modelloader.list_builtin_upscalers() modules.scripts.load_scripts() - modelloader.load_upscalers() modules.sd_vae.refresh_vae_list() @@ -169,23 +170,22 @@ def webui(): modules.script_callbacks.app_started_callback(shared.demo, app) wait_on_server(shared.demo) + print('Restarting UI...') sd_samplers.set_samplers() - print('Reloading extensions') extensions.list_extensions() localization.list_localizations(cmd_opts.localizations_dir) - print('Reloading custom scripts') + modelloader.forbid_loaded_nonbuiltin_upscalers() modules.scripts.reload_scripts() modelloader.load_upscalers() - print('Reloading modules: modules.ui') - importlib.reload(modules.ui) - print('Refreshing Model List') + for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]: + importlib.reload(module) + modules.sd_models.list_models() - print('Restarting Gradio') if __name__ == "__main__": From 8f96f9289981a66741ba770d14f3d27ce335a0fb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 18:39:14 +0300 Subject: [PATCH 088/461] call script callbacks for reloaded model after loading embeddings --- modules/sd_models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index bff8d6c9..b98b05fc 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -324,12 +324,12 @@ def load_model(checkpoint_info=None): sd_model.eval() shared.sd_model = sd_model + sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) # Reload embeddings after model load as they may or may not fit the model + script_callbacks.model_loaded_callback(sd_model) print("Model loaded.") - sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload = True) # Reload embeddings after model load as they may or may not fit the model - return sd_model From cec209981ee988536c2521297baf9bc1b256005f Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Tue, 3 Jan 2023 10:58:52 -0500 Subject: [PATCH 089/461] log only sdapi --- modules/api/api.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 53135470..78751c57 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -68,22 +68,23 @@ def encode_pil_to_base64(image): bytes_data = output_bytes.getvalue() return base64.b64encode(bytes_data) -def init_api_middleware(app: FastAPI): +def api_middleware(app: FastAPI): @app.middleware("http") async def log_and_time(req: Request, call_next): ts = time.time() res: Response = await call_next(req) duration = str(round(time.time() - ts, 4)) res.headers["X-Process-Time"] = duration - if shared.cmd_opts.api_log: - print('API {t} {code} {prot}/{ver} {method} {p} {cli} {duration}'.format( + endpoint = req.scope.get('path', 'err') + if shared.cmd_opts.api_log and endpoint.startswith('/sdapi'): + print('API {t} {code} {prot}/{ver} {method} {endpoint} {cli} {duration}'.format( t = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S.%f"), code = res.status_code, ver = req.scope.get('http_version', '0.0'), cli = req.scope.get('client', ('0:0.0.0', 0))[0], prot = req.scope.get('scheme', 'err'), method = req.scope.get('method', 'err'), - p = req.scope.get('path', 'err'), + endpoint = endpoint, duration = duration, )) return res From d8d206c1685d1e7027d4af82ed18d106f41d1cc4 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Tue, 3 Jan 2023 11:01:04 -0500 Subject: [PATCH 090/461] add state to interrogate --- modules/interrogate.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/interrogate.py b/modules/interrogate.py index 6f761c5a..738d8ff7 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -136,7 +136,8 @@ class InterrogateModels: def interrogate(self, pil_image): res = "" - + shared.state.begin() + shared.state.job = 'interrogate' try: if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: @@ -177,5 +178,6 @@ class InterrogateModels: res += "" self.unload() + shared.state.end() return res From 82cfc227d735c140447d5b8dca29a71ee9bde127 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 20:23:17 +0300 Subject: [PATCH 091/461] added licenses screen to settings added footer removed unused inpainting code --- README.md | 2 + html/footer.html | 9 + html/licenses.html | 392 ++++++++++++++++++++++++++++++++ modules/sd_hijack_inpainting.py | 232 ------------------- modules/ui.py | 15 +- style.css | 11 + 6 files changed, 427 insertions(+), 234 deletions(-) create mode 100644 html/footer.html create mode 100644 html/licenses.html diff --git a/README.md b/README.md index 556000fb..88250a6b 100644 --- a/README.md +++ b/README.md @@ -127,6 +127,8 @@ Here's how to add code to this repo: [Contributing](https://github.com/AUTOMATIC The documentation was moved from this README over to the project's [wiki](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki). ## Credits +Licenses for borrowed code can be found in `Settings -> Licenses` screen, and also in `html/licenses.html` file. + - Stable Diffusion - https://github.com/CompVis/stable-diffusion, https://github.com/CompVis/taming-transformers - k-diffusion - https://github.com/crowsonkb/k-diffusion.git - GFPGAN - https://github.com/TencentARC/GFPGAN.git diff --git a/html/footer.html b/html/footer.html new file mode 100644 index 00000000..a8f2adf7 --- /dev/null +++ b/html/footer.html @@ -0,0 +1,9 @@ +
+ API +  •  + Github +  •  + Gradio +  •  + Reload UI +
diff --git a/html/licenses.html b/html/licenses.html new file mode 100644 index 00000000..9eeaa072 --- /dev/null +++ b/html/licenses.html @@ -0,0 +1,392 @@ + + +

CodeFormer

+Parts of CodeFormer code had to be copied to be compatible with GFPGAN. +
+S-Lab License 1.0
+
+Copyright 2022 S-Lab
+
+Redistribution and use for non-commercial purpose in source and
+binary forms, with or without modification, are permitted provided
+that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright
+   notice, this list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright
+   notice, this list of conditions and the following disclaimer in
+   the documentation and/or other materials provided with the
+   distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived
+   from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+In the event that redistribution and/or use for commercial purpose in
+source or binary forms, with or without modification is required,
+please contact the contributor(s) of the work.
+
+ + +

ESRGAN

+Code for architecture and reading models copied. +
+MIT License
+
+Copyright (c) 2021 victorca25
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

Real-ESRGAN

+Some code is copied to support ESRGAN models. +
+BSD 3-Clause License
+
+Copyright (c) 2021, Xintao Wang
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice, this
+   list of conditions and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+   this list of conditions and the following disclaimer in the documentation
+   and/or other materials provided with the distribution.
+
+3. Neither the name of the copyright holder nor the names of its
+   contributors may be used to endorse or promote products derived from
+   this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ +

InvokeAI

+Some code for compatibility with OSX is taken from lstein's repository. +
+MIT License
+
+Copyright (c) 2022 InvokeAI Team
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

LDSR

+Code added by contirubtors, most likely copied from this repository. +
+MIT License
+
+Copyright (c) 2022 Machine Vision and Learning Group, LMU Munich
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

CLIP Interrogator

+Some small amounts of code borrowed and reworked. +
+MIT License
+
+Copyright (c) 2022 pharmapsychotic
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ +

SwinIR

+Code added by contirubtors, most likely copied from this repository. + +
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [2021] [SwinIR Authors]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+
+ diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 06b75772..3c214a35 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -12,191 +12,6 @@ from ldm.models.diffusion.ddpm import LatentDiffusion from ldm.models.diffusion.plms import PLMSSampler from ldm.models.diffusion.ddim import DDIMSampler, noise_like -# ================================================================================================= -# Monkey patch DDIMSampler methods from RunwayML repo directly. -# Adapted from: -# https://github.com/runwayml/stable-diffusion/blob/main/ldm/models/diffusion/ddim.py -# ================================================================================================= -@torch.no_grad() -def sample_ddim(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - ctmp = conditioning[list(conditioning.keys())[0]] - while isinstance(ctmp, list): - ctmp = ctmp[0] - cbs = ctmp.shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - print(f'Data shape for DDIM sampling is {size}, eta {eta}') - - samples, intermediates = self.ddim_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - ) - return samples, intermediates - -@torch.no_grad() -def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, - temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, - unconditional_guidance_scale=1., unconditional_conditioning=None): - b, *_, device = *x.shape, x.device - - if unconditional_conditioning is None or unconditional_guidance_scale == 1.: - e_t = self.model.apply_model(x, t, c) - else: - x_in = torch.cat([x] * 2) - t_in = torch.cat([t] * 2) - if isinstance(c, dict): - assert isinstance(unconditional_conditioning, dict) - c_in = dict() - for k in c: - if isinstance(c[k], list): - c_in[k] = [ - torch.cat([unconditional_conditioning[k][i], c[k][i]]) - for i in range(len(c[k])) - ] - else: - c_in[k] = torch.cat([unconditional_conditioning[k], c[k]]) - else: - c_in = torch.cat([unconditional_conditioning, c]) - e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2) - e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond) - - if score_corrector is not None: - assert self.model.parameterization == "eps" - e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs) - - alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas - alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev - sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas - sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas - # select parameters corresponding to the currently considered timestep - a_t = torch.full((b, 1, 1, 1), alphas[index], device=device) - a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device) - sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device) - sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device) - - # current prediction for x_0 - pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt() - if quantize_denoised: - pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0) - # direction pointing to x_t - dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t - noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature - if noise_dropout > 0.: - noise = torch.nn.functional.dropout(noise, p=noise_dropout) - x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise - return x_prev, pred_x0 - - -# ================================================================================================= -# Monkey patch PLMSSampler methods. -# This one was not actually patched correctly in the RunwayML repo, but we can replicate the changes. -# Adapted from: -# https://github.com/CompVis/stable-diffusion/blob/main/ldm/models/diffusion/plms.py -# ================================================================================================= -@torch.no_grad() -def sample_plms(self, - S, - batch_size, - shape, - conditioning=None, - callback=None, - normals_sequence=None, - img_callback=None, - quantize_x0=False, - eta=0., - mask=None, - x0=None, - temperature=1., - noise_dropout=0., - score_corrector=None, - corrector_kwargs=None, - verbose=True, - x_T=None, - log_every_t=100, - unconditional_guidance_scale=1., - unconditional_conditioning=None, - # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ... - **kwargs - ): - if conditioning is not None: - if isinstance(conditioning, dict): - ctmp = conditioning[list(conditioning.keys())[0]] - while isinstance(ctmp, list): - ctmp = ctmp[0] - cbs = ctmp.shape[0] - if cbs != batch_size: - print(f"Warning: Got {cbs} conditionings but batch-size is {batch_size}") - else: - if conditioning.shape[0] != batch_size: - print(f"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}") - - self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose) - # sampling - C, H, W = shape - size = (batch_size, C, H, W) - # print(f'Data shape for PLMS sampling is {size}') # remove unnecessary message - - samples, intermediates = self.plms_sampling(conditioning, size, - callback=callback, - img_callback=img_callback, - quantize_denoised=quantize_x0, - mask=mask, x0=x0, - ddim_use_original_steps=False, - noise_dropout=noise_dropout, - temperature=temperature, - score_corrector=score_corrector, - corrector_kwargs=corrector_kwargs, - x_T=x_T, - log_every_t=log_every_t, - unconditional_guidance_scale=unconditional_guidance_scale, - unconditional_conditioning=unconditional_conditioning, - ) - return samples, intermediates - @torch.no_grad() def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False, @@ -280,44 +95,6 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F return x_prev, pred_x0, e_t -# ================================================================================================= -# Monkey patch LatentInpaintDiffusion to load the checkpoint with a proper config. -# Adapted from: -# https://github.com/runwayml/stable-diffusion/blob/main/ldm/models/diffusion/ddpm.py -# ================================================================================================= - -@torch.no_grad() -def get_unconditional_conditioning(self, batch_size, null_label=None): - if null_label is not None: - xc = null_label - if isinstance(xc, ListConfig): - xc = list(xc) - if isinstance(xc, dict) or isinstance(xc, list): - c = self.get_learned_conditioning(xc) - else: - if hasattr(xc, "to"): - xc = xc.to(self.device) - c = self.get_learned_conditioning(xc) - else: - # todo: get null label from cond_stage_model - raise NotImplementedError() - c = repeat(c, "1 ... -> b ...", b=batch_size).to(self.device) - return c - - -class LatentInpaintDiffusion(LatentDiffusion): - def __init__( - self, - concat_keys=("mask", "masked_image"), - masked_image_key="masked_image", - *args, - **kwargs, - ): - super().__init__(*args, **kwargs) - self.masked_image_key = masked_image_key - assert self.masked_image_key in concat_keys - self.concat_keys = concat_keys - def should_hijack_inpainting(checkpoint_info): ckpt_basename = os.path.basename(checkpoint_info.filename).lower() @@ -326,15 +103,6 @@ def should_hijack_inpainting(checkpoint_info): def do_inpainting_hijack(): - # most of this stuff seems to no longer be needed because it is already included into SD2.0 # p_sample_plms is needed because PLMS can't work with dicts as conditionings - # this file should be cleaned up later if everything turns out to work fine - - # ldm.models.diffusion.ddpm.get_unconditional_conditioning = get_unconditional_conditioning - # ldm.models.diffusion.ddpm.LatentInpaintDiffusion = LatentInpaintDiffusion - - # ldm.models.diffusion.ddim.DDIMSampler.p_sample_ddim = p_sample_ddim - # ldm.models.diffusion.ddim.DDIMSampler.sample = sample_ddim ldm.models.diffusion.plms.PLMSSampler.p_sample_plms = p_sample_plms - # ldm.models.diffusion.plms.PLMSSampler.sample = sample_plms diff --git a/modules/ui.py b/modules/ui.py index f2e7c0d6..d941cb5f 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1529,8 +1529,10 @@ def create_ui(): with gr.Blocks(analytics_enabled=False) as settings_interface: with gr.Row(): - settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit") - restart_gradio = gr.Button(value='Restart UI', variant='primary', elem_id="settings_restart_gradio") + with gr.Column(scale=6): + settings_submit = gr.Button(value="Apply settings", variant='primary', elem_id="settings_submit") + with gr.Column(): + restart_gradio = gr.Button(value='Reload UI', variant='primary', elem_id="settings_restart_gradio") result = gr.HTML(elem_id="settings_result") @@ -1574,6 +1576,11 @@ def create_ui(): download_localization = gr.Button(value='Download localization template', elem_id="download_localization") reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary', elem_id="settings_reload_script_bodies") + if os.path.exists("html/licenses.html"): + with open("html/licenses.html", encoding="utf8") as file: + with gr.TabItem("Licenses"): + gr.HTML(file.read(), elem_id="licenses") + gr.Button(value="Show all pages", elem_id="settings_show_all_pages") request_notifications.click( @@ -1659,6 +1666,10 @@ def create_ui(): if os.path.exists(os.path.join(script_path, "notification.mp3")): audio_notification = gr.Audio(interactive=False, value=os.path.join(script_path, "notification.mp3"), elem_id="audio_notification", visible=False) + if os.path.exists("html/footer.html"): + with open("html/footer.html", encoding="utf8") as file: + gr.HTML(file.read(), elem_id="footer") + text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False) settings_submit.click( fn=wrap_gradio_call(run_settings, extra_outputs=[gr.update()]), diff --git a/style.css b/style.css index 7296ce91..2116ec3c 100644 --- a/style.css +++ b/style.css @@ -616,6 +616,17 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h padding-bottom: 0.5em; } +footer { + display: none !important; +} + +#footer{ + text-align: center; +} + +#footer div{ + display: inline-block; +} /* The following handles localization for right-to-left (RTL) languages like Arabic. The rtl media type will only be activated by the logic in javascript/localization.js. From 7c89f3718f9f078113833a88a86f02d3205855b4 Mon Sep 17 00:00:00 2001 From: MMaker Date: Tue, 3 Jan 2023 12:46:48 -0500 Subject: [PATCH 092/461] Add image paste fallback Fixes Firefox pasting support (and possibly other browsers) --- javascript/dragdrop.js | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/javascript/dragdrop.js b/javascript/dragdrop.js index 3ed1cb3c..fe008924 100644 --- a/javascript/dragdrop.js +++ b/javascript/dragdrop.js @@ -9,11 +9,19 @@ function dropReplaceImage( imgWrap, files ) { return; } + const tmpFile = files[0]; + imgWrap.querySelector('.modify-upload button + button, .touch-none + div button + button')?.click(); const callback = () => { const fileInput = imgWrap.querySelector('input[type="file"]'); if ( fileInput ) { - fileInput.files = files; + if ( files.length === 0 ) { + files = new DataTransfer(); + files.items.add(tmpFile); + fileInput.files = files.files; + } else { + fileInput.files = files; + } fileInput.dispatchEvent(new Event('change')); } }; From 3e22e294135ed0327ce9d9738655ff03c53df3c0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 3 Jan 2023 21:49:24 +0300 Subject: [PATCH 093/461] fix broken send to extras button --- modules/generation_parameters_copypaste.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index d94f11a3..4baf4d9a 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -37,7 +37,10 @@ def quote(text): def image_from_url_text(filedata): - if type(filedata) == dict and filedata["is_file"]: + if type(filedata) == list and len(filedata) > 0 and type(filedata[0]) == dict and filedata[0].get("is_file", False): + filedata = filedata[0] + + if type(filedata) == dict and filedata.get("is_file", False): filename = filedata["name"] is_in_right_dir = ui_tempdir.check_tmp_file(shared.demo, filename) assert is_in_right_dir, 'trying to open image file outside of allowed directories' From 917b5bd8d0cd47c9dc241c1852ccd440a8c61668 Mon Sep 17 00:00:00 2001 From: Max Weber Date: Tue, 3 Jan 2023 18:19:56 -0700 Subject: [PATCH 094/461] ui: save dropdown sampling method to the ui-config --- modules/ui.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/ui.py b/modules/ui.py index d941cb5f..bfc93634 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -635,6 +635,7 @@ def create_sampler_and_steps_selection(choices, tabname): if opts.samplers_in_dropdown: with FormRow(elem_id=f"sampler_selection_{tabname}"): sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") + sampler_index.save_to_config = True steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling Steps", value=20) else: with FormGroup(elem_id=f"sampler_selection_{tabname}"): From 4fc81542077af73610279ad7b6b26e38718a0f81 Mon Sep 17 00:00:00 2001 From: Gerschel Date: Tue, 3 Jan 2023 23:25:34 -0800 Subject: [PATCH 095/461] better targetting, class tabs was autoassigned I moved a preset manager into quicksettings, this function was targeting my component instead of the tabs. This is because class tabs is autoassigned, while element id #tabs is not, this allows a tabbed component to live in the quicksettings. --- script.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script.js b/script.js index 9748ec90..0e117d06 100644 --- a/script.js +++ b/script.js @@ -4,7 +4,7 @@ function gradioApp() { } function get_uiCurrentTab() { - return gradioApp().querySelector('.tabs button:not(.border-transparent)') + return gradioApp().querySelector('#tabs button:not(.border-transparent)') } function get_uiCurrentTabContent() { From e5b7ee910e7bb88f08e8876b5732cb034c6fe529 Mon Sep 17 00:00:00 2001 From: MMaker Date: Wed, 4 Jan 2023 04:22:01 -0500 Subject: [PATCH 096/461] fix: Save full res of intermediate step --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index a172af0b..93e75ba6 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -705,7 +705,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): return if not isinstance(image, Image.Image): - image = sd_samplers.sample_to_image(image, index) + image = sd_samplers.sample_to_image(image, index, approximation=0) images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix") From 02d7abf5141431b9a3a8a189bb3136c71abd5e79 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 12:35:07 +0300 Subject: [PATCH 097/461] helpful error message when trying to load 2.0 without config failing to load model weights from settings won't break generation for currently loaded model anymore --- modules/errors.py | 25 +++++++++++++++++++++++-- modules/sd_models.py | 24 +++++++++++++++++------- modules/shared.py | 9 +++++++-- webui.py | 12 ++++++++++-- 4 files changed, 57 insertions(+), 13 deletions(-) diff --git a/modules/errors.py b/modules/errors.py index 372dc51a..a668c014 100644 --- a/modules/errors.py +++ b/modules/errors.py @@ -2,9 +2,30 @@ import sys import traceback +def print_error_explanation(message): + lines = message.strip().split("\n") + max_len = max([len(x) for x in lines]) + + print('=' * max_len, file=sys.stderr) + for line in lines: + print(line, file=sys.stderr) + print('=' * max_len, file=sys.stderr) + + +def display(e: Exception, task): + print(f"{task or 'error'}: {type(e).__name__}", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + + message = str(e) + if "copying a param with shape torch.Size([640, 1024]) from checkpoint, the shape in current model is torch.Size([640, 768])" in message: + print_error_explanation(""" +The most likely cause of this is you are trying to load Stable Diffusion 2.0 model without specifying its connfig file. +See https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#stable-diffusion-20 for how to solve this. + """) + + def run(code, task): try: code() except Exception as e: - print(f"{task}: {type(e).__name__}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) + display(task, e) diff --git a/modules/sd_models.py b/modules/sd_models.py index b98b05fc..6846b74a 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -278,6 +278,7 @@ def enable_midas_autodownload(): midas.api.load_model = load_model_wrapper + def load_model(checkpoint_info=None): from modules import lowvram, sd_hijack checkpoint_info = checkpoint_info or select_checkpoint() @@ -312,6 +313,7 @@ def load_model(checkpoint_info=None): sd_config.model.params.unet_config.params.use_fp16 = False sd_model = instantiate_from_config(sd_config.model) + load_model_weights(sd_model, checkpoint_info) if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: @@ -336,10 +338,12 @@ def load_model(checkpoint_info=None): def reload_model_weights(sd_model=None, info=None): from modules import lowvram, devices, sd_hijack checkpoint_info = info or select_checkpoint() - + if not sd_model: sd_model = shared.sd_model + current_checkpoint_info = sd_model.sd_checkpoint_info + if sd_model.sd_model_checkpoint == checkpoint_info.filename: return @@ -356,13 +360,19 @@ def reload_model_weights(sd_model=None, info=None): sd_hijack.model_hijack.undo_hijack(sd_model) - load_model_weights(sd_model, checkpoint_info) + try: + load_model_weights(sd_model, checkpoint_info) + except Exception as e: + print("Failed to load checkpoint, restoring previous") + load_model_weights(sd_model, current_checkpoint_info) + raise + finally: + sd_hijack.model_hijack.hijack(sd_model) + script_callbacks.model_loaded_callback(sd_model) - sd_hijack.model_hijack.hijack(sd_model) - script_callbacks.model_loaded_callback(sd_model) - - if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram: - sd_model.to(devices.device) + if not shared.cmd_opts.lowvram and not shared.cmd_opts.medvram: + sd_model.to(devices.device) print("Weights loaded.") + return sd_model diff --git a/modules/shared.py b/modules/shared.py index 23657a93..7588c47b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -14,7 +14,7 @@ import modules.interrogate import modules.memmon import modules.styles import modules.devices as devices -from modules import localization, sd_vae, extensions, script_loading +from modules import localization, sd_vae, extensions, script_loading, errors from modules.paths import models_path, script_path, sd_path @@ -494,7 +494,12 @@ class Options: return False if self.data_labels[key].onchange is not None: - self.data_labels[key].onchange() + try: + self.data_labels[key].onchange() + except Exception as e: + errors.display(e, f"changing setting {key} to {value}") + setattr(self, key, oldval) + return False return True diff --git a/webui.py b/webui.py index c7d55a97..13375e71 100644 --- a/webui.py +++ b/webui.py @@ -9,7 +9,7 @@ from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.gzip import GZipMiddleware -from modules import import_hook +from modules import import_hook, errors from modules.call_queue import wrap_queued_call, queue_lock, wrap_gradio_gpu_call from modules.paths import script_path @@ -61,7 +61,15 @@ def initialize(): modelloader.load_upscalers() modules.sd_vae.refresh_vae_list() - modules.sd_models.load_model() + + try: + modules.sd_models.load_model() + except Exception as e: + errors.display(e, "loading stable diffusion model") + print("", file=sys.stderr) + print("Stable diffusion model failed to load, exiting", file=sys.stderr) + exit(1) + shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights())) shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) shared.opts.onchange("sd_vae_as_default", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False) From 8d8a05a3bbb50fdfeab51679a919d2487bd97976 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 12:47:42 +0300 Subject: [PATCH 098/461] find configs for models at runtime rather than when starting --- modules/sd_hijack_inpainting.py | 5 ++++- modules/sd_models.py | 31 ++++++++++++++++++------------- 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/modules/sd_hijack_inpainting.py b/modules/sd_hijack_inpainting.py index 3c214a35..31d2c898 100644 --- a/modules/sd_hijack_inpainting.py +++ b/modules/sd_hijack_inpainting.py @@ -97,8 +97,11 @@ def p_sample_plms(self, x, c, t, index, repeat_noise=False, use_original_steps=F def should_hijack_inpainting(checkpoint_info): + from modules import sd_models + ckpt_basename = os.path.basename(checkpoint_info.filename).lower() - cfg_basename = os.path.basename(checkpoint_info.config).lower() + cfg_basename = os.path.basename(sd_models.find_checkpoint_config(checkpoint_info)).lower() + return "inpainting" in ckpt_basename and not "inpainting" in cfg_basename diff --git a/modules/sd_models.py b/modules/sd_models.py index 6846b74a..6dca4ddf 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -20,7 +20,7 @@ from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inp model_dir = "Stable-diffusion" model_path = os.path.abspath(os.path.join(models_path, model_dir)) -CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name', 'config']) +CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name']) checkpoints_list = {} checkpoints_loaded = collections.OrderedDict() @@ -48,6 +48,14 @@ def checkpoint_tiles(): return sorted([x.title for x in checkpoints_list.values()], key = alphanumeric_key) +def find_checkpoint_config(info): + config = os.path.splitext(info.filename)[0] + ".yaml" + if os.path.exists(config): + return config + + return shared.cmd_opts.config + + def list_models(): checkpoints_list.clear() model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"]) @@ -73,7 +81,7 @@ def list_models(): if os.path.exists(cmd_ckpt): h = model_hash(cmd_ckpt) title, short_model_name = modeltitle(cmd_ckpt, h) - checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name, shared.cmd_opts.config) + checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name) shared.opts.data['sd_model_checkpoint'] = title elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file: print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr) @@ -81,12 +89,7 @@ def list_models(): h = model_hash(filename) title, short_model_name = modeltitle(filename, h) - basename, _ = os.path.splitext(filename) - config = basename + ".yaml" - if not os.path.exists(config): - config = shared.cmd_opts.config - - checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name, config) + checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name) def get_closet_checkpoint_match(searchString): @@ -282,9 +285,10 @@ def enable_midas_autodownload(): def load_model(checkpoint_info=None): from modules import lowvram, sd_hijack checkpoint_info = checkpoint_info or select_checkpoint() + checkpoint_config = find_checkpoint_config(checkpoint_info) - if checkpoint_info.config != shared.cmd_opts.config: - print(f"Loading config from: {checkpoint_info.config}") + if checkpoint_config != shared.cmd_opts.config: + print(f"Loading config from: {checkpoint_config}") if shared.sd_model: sd_hijack.model_hijack.undo_hijack(shared.sd_model) @@ -292,7 +296,7 @@ def load_model(checkpoint_info=None): gc.collect() devices.torch_gc() - sd_config = OmegaConf.load(checkpoint_info.config) + sd_config = OmegaConf.load(checkpoint_config) if should_hijack_inpainting(checkpoint_info): # Hardcoded config for now... @@ -302,7 +306,7 @@ def load_model(checkpoint_info=None): sd_config.model.params.finetune_keys = None # Create a "fake" config with a different name so that we know to unload it when switching models. - checkpoint_info = checkpoint_info._replace(config=checkpoint_info.config.replace(".yaml", "-inpainting.yaml")) + checkpoint_info = checkpoint_info._replace(config=checkpoint_config.replace(".yaml", "-inpainting.yaml")) if not hasattr(sd_config.model.params, "use_ema"): sd_config.model.params.use_ema = False @@ -343,11 +347,12 @@ def reload_model_weights(sd_model=None, info=None): sd_model = shared.sd_model current_checkpoint_info = sd_model.sd_checkpoint_info + checkpoint_config = find_checkpoint_config(current_checkpoint_info) if sd_model.sd_model_checkpoint == checkpoint_info.filename: return - if sd_model.sd_checkpoint_info.config != checkpoint_info.config or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info): + if checkpoint_config != find_checkpoint_config(checkpoint_info) or should_hijack_inpainting(checkpoint_info) != should_hijack_inpainting(sd_model.sd_checkpoint_info): del sd_model checkpoints_loaded.clear() load_model(checkpoint_info) From 96cf15bedecbed97ef9b70b8413d543a9aee5adf Mon Sep 17 00:00:00 2001 From: MMaker Date: Wed, 4 Jan 2023 05:12:06 -0500 Subject: [PATCH 099/461] Add new latent upscale modes --- modules/shared.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 7588c47b..a10f69a9 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -564,8 +564,11 @@ if os.path.exists(config_filename): latent_upscale_default_mode = "Latent" latent_upscale_modes = { - "Latent": "bilinear", - "Latent (nearest)": "nearest", + "Latent": {"mode": "bilinear", "antialias": False}, + "Latent (antialiased)": {"mode": "bilinear", "antialias": True}, + "Latent (bicubic)": {"mode": "bicubic", "antialias": False}, + "Latent (bicubic, antialiased)": {"mode": "bicubic", "antialias": True}, + "Latent (nearest)": {"mode": "nearest", "antialias": False}, } sd_upscalers = [] From 15fd0b8bc4734ea85bca1acfb12b51465ab9817d Mon Sep 17 00:00:00 2001 From: MMaker Date: Wed, 4 Jan 2023 05:12:54 -0500 Subject: [PATCH 100/461] Update processing.py --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index a172af0b..7c72b56a 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -713,7 +713,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): for i in range(samples.shape[0]): save_intermediate(samples, i) - samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode) + samples = torch.nn.functional.interpolate(samples, size=(target_height // opt_f, target_width // opt_f), mode=latent_scale_mode["mode"], antialias=latent_scale_mode["antialias"]) # Avoid making the inpainting conditioning unless necessary as # this does need some extra compute to decode / encode the image again. From 4ec6470a1a2d9430b91266426f995e48f59564e1 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 13:26:23 +0300 Subject: [PATCH 101/461] fix checkpoint list API --- modules/api/api.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 9c670f00..2b1f180c 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -18,7 +18,7 @@ from modules.textual_inversion.textual_inversion import create_embedding, train_ from modules.textual_inversion.preprocess import preprocess from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork from PIL import PngImagePlugin,Image -from modules.sd_models import checkpoints_list +from modules.sd_models import checkpoints_list, find_checkpoint_config from modules.realesrgan_model import get_realesrgan_models from modules import devices from typing import List @@ -303,7 +303,7 @@ class Api: return upscalers def get_sd_models(self): - return [{"title":x.title, "model_name":x.model_name, "hash":x.hash, "filename": x.filename, "config": x.config} for x in checkpoints_list.values()] + return [{"title":x.title, "model_name":x.model_name, "hash":x.hash, "filename": x.filename, "config": find_checkpoint_config(x)} for x in checkpoints_list.values()] def get_hypernetworks(self): return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks] From b2151b934fe0a3613570c6abd7615d3788fd1c8f Mon Sep 17 00:00:00 2001 From: MMaker Date: Wed, 4 Jan 2023 05:36:18 -0500 Subject: [PATCH 102/461] Rename bicubic antialiased option Comma was causing the the value in PNG info to be quoted, which causes the upscaler dropdown option to be blank when sending to UI --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index a10f69a9..c1b20081 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -567,7 +567,7 @@ latent_upscale_modes = { "Latent": {"mode": "bilinear", "antialias": False}, "Latent (antialiased)": {"mode": "bilinear", "antialias": True}, "Latent (bicubic)": {"mode": "bicubic", "antialias": False}, - "Latent (bicubic, antialiased)": {"mode": "bicubic", "antialias": True}, + "Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True}, "Latent (nearest)": {"mode": "nearest", "antialias": False}, } From 3bd737767b071878ea980e94b8705f603bcf545e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 14:20:32 +0300 Subject: [PATCH 103/461] disable broken API logging --- modules/api/api.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/api/api.py b/modules/api/api.py index a6c1d6ed..6267afdc 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -100,7 +100,6 @@ class Api: self.router = APIRouter() self.app = app - init_api_middleware(self.app) self.queue_lock = queue_lock self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse) self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse) From 0cd6399b8b1699b8b7acad6f0ad2988111fe618e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 14:29:13 +0300 Subject: [PATCH 104/461] fix broken inpainting model --- modules/sd_models.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 6dca4ddf..a568823d 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -305,9 +305,6 @@ def load_model(checkpoint_info=None): sd_config.model.params.unet_config.params.in_channels = 9 sd_config.model.params.finetune_keys = None - # Create a "fake" config with a different name so that we know to unload it when switching models. - checkpoint_info = checkpoint_info._replace(config=checkpoint_config.replace(".yaml", "-inpainting.yaml")) - if not hasattr(sd_config.model.params, "use_ema"): sd_config.model.params.use_ema = False From 11b8160a086c434d5baf4971edda46e6d2126800 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Wed, 4 Jan 2023 06:36:57 -0500 Subject: [PATCH 105/461] fix typo --- modules/api/api.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/api/api.py b/modules/api/api.py index 6267afdc..48a70a44 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -101,6 +101,7 @@ class Api: self.router = APIRouter() self.app = app self.queue_lock = queue_lock + api_middleware(self.app) self.add_api_route("/sdapi/v1/txt2img", self.text2imgapi, methods=["POST"], response_model=TextToImageResponse) self.add_api_route("/sdapi/v1/img2img", self.img2imgapi, methods=["POST"], response_model=ImageToImageResponse) self.add_api_route("/sdapi/v1/extra-single-image", self.extras_single_image_api, methods=["POST"], response_model=ExtrasSingleImageResponse) From 642142556d8ecdea9beb86d7618b628b1803ab98 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 15:09:53 +0300 Subject: [PATCH 106/461] use commandline-supplied cuda device name instead of cuda:0 for safetensors PR that doesn't fix anything --- modules/sd_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index ee918f24..76a89e88 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -173,7 +173,7 @@ def read_state_dict(checkpoint_file, print_global_state=False, map_location=None if extension.lower() == ".safetensors": device = map_location or shared.weight_load_location if device is None: - device = "cuda:0" if torch.cuda.is_available() else "cpu" + device = devices.get_cuda_device_string() if torch.cuda.is_available() else "cpu" pl_sd = safetensors.torch.load_file(checkpoint_file, device=device) else: pl_sd = torch.load(checkpoint_file, map_location=map_location or shared.weight_load_location) From 21ee77db314ede7ccbb18787962347c09a4df0c7 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Wed, 4 Jan 2023 08:04:38 -0500 Subject: [PATCH 107/461] add cross-attention info --- modules/sd_hijack.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index edcbaf52..fa2cd4bb 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -35,26 +35,35 @@ def apply_optimizations(): ldm.modules.diffusionmodules.model.nonlinearity = silu ldm.modules.diffusionmodules.openaimodel.th = sd_hijack_unet.th + + optimization_method = None if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0)): print("Applying xformers cross attention optimization.") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward + optimization_method = 'xformers' elif cmd_opts.opt_split_attention_v1: print("Applying v1 cross attention optimization.") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 + optimization_method = 'V1' elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention_invokeai or not torch.cuda.is_available()): if not invokeAI_mps_available and shared.device.type == 'mps': print("The InvokeAI cross attention optimization for MPS requires the psutil package which is not installed.") print("Applying v1 cross attention optimization.") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 + optimization_method = 'V1' else: print("Applying cross attention optimization (InvokeAI).") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI + optimization_method = 'InvokeAI' elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()): print("Applying cross attention optimization (Doggettx).") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward + optimization_method = 'Doggettx' + + return optimization_method def undo_optimizations(): @@ -75,6 +84,7 @@ class StableDiffusionModelHijack: layers = None circular_enabled = False clip = None + optimization_method = None embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase(cmd_opts.embeddings_dir) @@ -94,7 +104,7 @@ class StableDiffusionModelHijack: m.cond_stage_model.model.token_embedding = EmbeddingsWithFixes(m.cond_stage_model.model.token_embedding, self) m.cond_stage_model = sd_hijack_open_clip.FrozenOpenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) - apply_optimizations() + self.optimization_method = apply_optimizations() self.clip = m.cond_stage_model From 1cfd8aec4ae5a6ca1afd67b44cb4ef6dd14d8c34 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 16:05:42 +0300 Subject: [PATCH 108/461] make it possible to work with opts.show_progress_every_n_steps = -1 with medvram --- modules/shared.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 4fcc6edd..54a6ba23 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -214,12 +214,13 @@ class State: """sets self.current_image from self.current_latent if enough sampling steps have been made after the last call to this""" def set_current_image(self): + if not parallel_processing_allowed: + return + if self.sampling_step - self.current_image_sampling_step >= opts.show_progress_every_n_steps and opts.show_progress_every_n_steps > 0: self.do_set_current_image() def do_set_current_image(self): - if not parallel_processing_allowed: - return if self.current_latent is None: return @@ -231,6 +232,7 @@ class State: self.current_image_sampling_step = self.sampling_step + state = State() artist_db = modules.artists.ArtistsDatabase(os.path.join(script_path, 'artists.csv')) From 79c682ad4f2d982b26fa1a15044582d1005134f9 Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Wed, 4 Jan 2023 08:20:42 -0500 Subject: [PATCH 109/461] fix jpeg --- modules/extras.py | 2 -- modules/images.py | 2 ++ requirements_versions.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/extras.py b/modules/extras.py index d665440a..7407bfe3 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -19,8 +19,6 @@ from modules.shared import opts import modules.gfpgan_model from modules.ui import plaintext_to_html import modules.codeformer_model -import piexif -import piexif.helper import gradio as gr import safetensors.torch diff --git a/modules/images.py b/modules/images.py index c3a5fc8b..a73be3fa 100644 --- a/modules/images.py +++ b/modules/images.py @@ -22,6 +22,8 @@ from modules.shared import opts, cmd_opts LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) +Image.init() # initialize once all known file format handlers + def image_grid(imgs, batch_size=1, rows=None): if rows is None: diff --git a/requirements_versions.txt b/requirements_versions.txt index 975102d9..7ae118cb 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -5,7 +5,7 @@ basicsr==1.4.2 gfpgan==1.3.8 gradio==3.15.0 numpy==1.23.3 -Pillow==9.2.0 +Pillow==9.3.0 realesrgan==0.3.0 torch omegaconf==2.2.3 From 4d66bf2c0d27702cc83b9cc57ebb1f359d18d938 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 17:24:46 +0300 Subject: [PATCH 110/461] add infotext to "-before-highres-fix" images --- modules/processing.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index fd7c7015..c03e77e7 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -136,6 +136,7 @@ class StableDiffusionProcessing(): self.all_negative_prompts = None self.all_seeds = None self.all_subseeds = None + self.iteration = 0 def txt2img_image_conditioning(self, x, width=None, height=None): if self.sampler.conditioning_key not in {'hybrid', 'concat'}: @@ -544,6 +545,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: state.job_count = p.n_iter for n in range(p.n_iter): + p.iteration = n + if state.skipped: state.skipped = False @@ -707,7 +710,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if not isinstance(image, Image.Image): image = sd_samplers.sample_to_image(image, index, approximation=0) - images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, suffix="-before-highres-fix") + info = create_infotext(self, self.all_prompts, self.all_seeds, self.all_subseeds, [], iteration=self.iteration, position_in_batch=index) + images.save_image(image, self.outpath_samples, "", seeds[index], prompts[index], opts.samples_format, info=info, suffix="-before-highres-fix") if latent_scale_mode is not None: for i in range(samples.shape[0]): From 184e670126f5fc50ba56fa0fedcf0cf60e45ed7e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 17:45:01 +0300 Subject: [PATCH 111/461] fix the merge --- modules/textual_inversion/textual_inversion.py | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 5421a758..8731ea5d 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -251,6 +251,7 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat if save_model_every or create_image_every: assert log_directory, "Log directory is empty" + def create_dummy_mask(x, width=None, height=None): if shared.sd_model.model.conditioning_key in {'hybrid', 'concat'}: @@ -380,17 +381,12 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ break with devices.autocast(): - # c = stack_conds(batch.cond).to(devices.device) - # mask = torch.tensor(batch.emb_index).to(devices.device, non_blocking=pin_memory) - # print(mask) - # c[:, 1:1+embedding.vec.shape[0]] = embedding.vec.to(devices.device, non_blocking=pin_memory) - - - if img_c is None: - img_c = create_dummy_mask(c, training_width, training_height) - x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) c = shared.sd_model.cond_stage_model(batch.cond_text) + + if img_c is None: + img_c = create_dummy_mask(c, training_width, training_height) + cond = {"c_concat": [img_c], "c_crossattn": [c]} loss = shared.sd_model(x, cond)[0] / gradient_step del x From 590c5ae016ae494f4873ca20079b30684ea3060c Mon Sep 17 00:00:00 2001 From: Vladimir Mandic Date: Wed, 4 Jan 2023 09:48:54 -0500 Subject: [PATCH 112/461] update pillow --- modules/images.py | 2 -- requirements_versions.txt | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/modules/images.py b/modules/images.py index a73be3fa..c3a5fc8b 100644 --- a/modules/images.py +++ b/modules/images.py @@ -22,8 +22,6 @@ from modules.shared import opts, cmd_opts LANCZOS = (Image.Resampling.LANCZOS if hasattr(Image, 'Resampling') else Image.LANCZOS) -Image.init() # initialize once all known file format handlers - def image_grid(imgs, batch_size=1, rows=None): if rows is None: diff --git a/requirements_versions.txt b/requirements_versions.txt index 7ae118cb..d2899292 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -5,7 +5,7 @@ basicsr==1.4.2 gfpgan==1.3.8 gradio==3.15.0 numpy==1.23.3 -Pillow==9.3.0 +Pillow==9.4.0 realesrgan==0.3.0 torch omegaconf==2.2.3 From 525cea924562afd676f55470095268a0f6fca59e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 17:58:07 +0300 Subject: [PATCH 113/461] use shared function from processing for creating dummy mask when training inpainting model --- modules/processing.py | 39 ++++++++++--------- .../textual_inversion/textual_inversion.py | 33 +++++----------- 2 files changed, 29 insertions(+), 43 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index c03e77e7..c7264aff 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -76,6 +76,24 @@ def apply_overlay(image, paste_loc, index, overlays): return image +def txt2img_image_conditioning(sd_model, x, width, height): + if sd_model.model.conditioning_key not in {'hybrid', 'concat'}: + # Dummy zero conditioning if we're not using inpainting model. + # Still takes up a bit of memory, but no encoder call. + # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size. + return x.new_zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device) + + # The "masked-image" in this case will just be all zeros since the entire image is masked. + image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device) + image_conditioning = sd_model.get_first_stage_encoding(sd_model.encode_first_stage(image_conditioning)) + + # Add the fake full 1s mask to the first dimension. + image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) + image_conditioning = image_conditioning.to(x.dtype) + + return image_conditioning + + class StableDiffusionProcessing(): """ The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing @@ -139,26 +157,9 @@ class StableDiffusionProcessing(): self.iteration = 0 def txt2img_image_conditioning(self, x, width=None, height=None): - if self.sampler.conditioning_key not in {'hybrid', 'concat'}: - # Dummy zero conditioning if we're not using inpainting model. - # Still takes up a bit of memory, but no encoder call. - # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size. - return x.new_zeros(x.shape[0], 5, 1, 1) + self.is_using_inpainting_conditioning = self.sd_model.model.conditioning_key in {'hybrid', 'concat'} - self.is_using_inpainting_conditioning = True - - height = height or self.height - width = width or self.width - - # The "masked-image" in this case will just be all zeros since the entire image is masked. - image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device) - image_conditioning = self.sd_model.get_first_stage_encoding(self.sd_model.encode_first_stage(image_conditioning)) - - # Add the fake full 1s mask to the first dimension. - image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) - image_conditioning = image_conditioning.to(x.dtype) - - return image_conditioning + return txt2img_image_conditioning(self.sd_model, x, width or self.width, height or self.height) def depth2img_image_conditioning(self, source_image): # Use the AddMiDaS helper to Format our source image to suit the MiDaS model diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 8731ea5d..2250e41b 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -252,26 +252,6 @@ def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, dat assert log_directory, "Log directory is empty" -def create_dummy_mask(x, width=None, height=None): - if shared.sd_model.model.conditioning_key in {'hybrid', 'concat'}: - - # The "masked-image" in this case will just be all zeros since the entire image is masked. - image_conditioning = torch.zeros(x.shape[0], 3, height, width, device=x.device) - image_conditioning = shared.sd_model.get_first_stage_encoding(shared.sd_model.encode_first_stage(image_conditioning)) - - # Add the fake full 1s mask to the first dimension. - image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0) - image_conditioning = image_conditioning.to(x.dtype) - - else: - # Dummy zero conditioning if we're not using inpainting model. - # Still takes up a bit of memory, but no encoder call. - # Pretty sure we can just make this a 1x1 image since its not going to be used besides its batch size. - image_conditioning = torch.zeros(x.shape[0], 5, 1, 1, dtype=x.dtype, device=x.device) - - return image_conditioning - - def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, steps, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): save_embedding_every = save_embedding_every or 0 create_image_every = create_image_every or 0 @@ -346,7 +326,6 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ else: print("No saved optimizer exists in checkpoint") - scaler = torch.cuda.amp.GradScaler() batch_size = ds.batch_size @@ -362,7 +341,9 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ forced_filename = "" embedding_yet_to_be_embedded = False + is_training_inpainting_model = shared.sd_model.model.conditioning_key in {'hybrid', 'concat'} img_c = None + pbar = tqdm.tqdm(total=steps - initial_step) try: for i in range((steps-initial_step) * gradient_step): @@ -384,10 +365,14 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ x = batch.latent_sample.to(devices.device, non_blocking=pin_memory) c = shared.sd_model.cond_stage_model(batch.cond_text) - if img_c is None: - img_c = create_dummy_mask(c, training_width, training_height) + if is_training_inpainting_model: + if img_c is None: + img_c = processing.txt2img_image_conditioning(shared.sd_model, c, training_width, training_height) + + cond = {"c_concat": [img_c], "c_crossattn": [c]} + else: + cond = c - cond = {"c_concat": [img_c], "c_crossattn": [c]} loss = shared.sd_model(x, cond)[0] / gradient_step del x From a8eb9e3bf814f72293e474c11e9ff0098859a942 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 18:20:38 +0300 Subject: [PATCH 114/461] Revert "Merge pull request #3791 from shirayu/fix/filename" This reverts commit eed58279e7cb0e873ebd88a29609f9bab0f1f3af, reversing changes made to 4ae960b01c6711c66985479f14809dc7fa549fc2. --- modules/images.py | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/modules/images.py b/modules/images.py index 2967fa9a..c3a5fc8b 100644 --- a/modules/images.py +++ b/modules/images.py @@ -447,14 +447,6 @@ def get_next_sequence_number(path, basename): return result + 1 -def truncate_fullpath(full_path, encoding='utf-8'): - dir_name, full_name = os.path.split(full_path) - file_name, file_ext = os.path.splitext(full_name) - max_length = os.statvfs(dir_name).f_namemax - file_name_truncated = file_name.encode(encoding)[:max_length - len(file_ext)].decode(encoding, 'ignore') - return os.path.join(dir_name , file_name_truncated + file_ext) - - def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None): """Save an image. @@ -495,7 +487,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i if save_to_dirs: dirname = namegen.apply(opts.directories_filename_pattern or "[prompt_words]").lstrip(' ').rstrip('\\ /') - path = truncate_fullpath(os.path.join(path, dirname)) + path = os.path.join(path, dirname) os.makedirs(path, exist_ok=True) @@ -519,13 +511,13 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i fullfn = None for i in range(500): fn = f"{basecount + i:05}" if basename == '' else f"{basename}-{basecount + i:04}" - fullfn = truncate_fullpath(os.path.join(path, f"{fn}{file_decoration}.{extension}")) + fullfn = os.path.join(path, f"{fn}{file_decoration}.{extension}") if not os.path.exists(fullfn): break else: - fullfn = truncate_fullpath(os.path.join(path, f"{file_decoration}.{extension}")) + fullfn = os.path.join(path, f"{file_decoration}.{extension}") else: - fullfn = truncate_fullpath(os.path.join(path, f"{forced_filename}.{extension}")) + fullfn = os.path.join(path, f"{forced_filename}.{extension}") pnginfo = existing_info or {} if info is not None: From 3dae545a03f5102ba5d9c3f27bb6241824c5a916 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 18:42:51 +0300 Subject: [PATCH 115/461] rename weirdly named variables from #3176 --- modules/ui.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index e4859020..184af7ad 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -162,16 +162,14 @@ def save_files(js_data, images, do_make_zip, index): return gr.File.update(value=fullfns, visible=True), plaintext_to_html(f"Saved: {filenames[0]}") - - -def calc_time_left(progress, threshold, label, force_display, showTime): +def calc_time_left(progress, threshold, label, force_display, show_eta): if progress == 0: return "" else: time_since_start = time.time() - shared.state.time_start eta = (time_since_start/progress) eta_relative = eta-time_since_start - if (eta_relative > threshold and showTime) or force_display: + if (eta_relative > threshold and show_eta) or force_display: if eta_relative > 3600: return label + time.strftime('%H:%M:%S', time.gmtime(eta_relative)) elif eta_relative > 60: @@ -194,9 +192,9 @@ def check_progress_call(id_part): progress += 1 / shared.state.job_count * shared.state.sampling_step / shared.state.sampling_steps # Show progress percentage and time left at the same moment, and base it also on steps done - showPBText = progress >= 0.01 or shared.state.sampling_step >= 10 + show_eta = progress >= 0.01 or shared.state.sampling_step >= 10 - time_left = calc_time_left( progress, 1, " ETA: ", shared.state.time_left_force_display, showPBText ) + time_left = calc_time_left(progress, 1, " ETA: ", shared.state.time_left_force_display, show_eta) if time_left != "": shared.state.time_left_force_display = True @@ -204,7 +202,7 @@ def check_progress_call(id_part): progressbar = "" if opts.show_progressbar: - progressbar = f"""
{" " * 2 + str(int(progress*100))+"%" + time_left if showPBText else ""}
""" + progressbar = f"""
{" " * 2 + str(int(progress*100))+"%" + time_left if show_eta else ""}
""" image = gr_show(False) preview_visibility = gr_show(False) From 097a90b88bb92878cf435c513b4757b5b82ae299 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 19:19:11 +0300 Subject: [PATCH 116/461] add XY plot parameters to grid image and do not add them to individual images --- modules/processing.py | 2 +- scripts/xy_grid.py | 38 ++++++++++++++++++++++++-------------- 2 files changed, 25 insertions(+), 15 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index c7264aff..47712159 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -422,7 +422,7 @@ def fix_seed(p): p.subseed = get_fixed_seed(p.subseed) -def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0): +def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0): index = position_in_batch + iteration * p.batch_size clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 59907f0b..78ff12c5 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -10,7 +10,7 @@ import numpy as np import modules.scripts as scripts import gradio as gr -from modules import images, paths, sd_samplers +from modules import images, paths, sd_samplers, processing from modules.hypernetworks import hypernetwork from modules.processing import process_images, Processed, StableDiffusionProcessingTxt2Img from modules.shared import opts, cmd_opts, state @@ -285,6 +285,7 @@ re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*") re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*") + class Script(scripts.Script): def title(self): return "X/Y plot" @@ -381,7 +382,7 @@ class Script(scripts.Script): ys = process_axis(y_opt, y_values) def fix_axis_seeds(axis_opt, axis_list): - if axis_opt.label in ['Seed','Var. seed']: + if axis_opt.label in ['Seed', 'Var. seed']: return [int(random.randrange(4294967294)) if val is None or val == '' or val == -1 else val for val in axis_list] else: return axis_list @@ -403,24 +404,33 @@ class Script(scripts.Script): print(f"X/Y plot will create {len(xs) * len(ys) * p.n_iter} images on a {len(xs)}x{len(ys)} grid. (Total steps to process: {total_steps * p.n_iter})") shared.total_tqdm.updateTotal(total_steps * p.n_iter) + grid_infotext = [None] + def cell(x, y): pc = copy(p) x_opt.apply(pc, x, xs) y_opt.apply(pc, y, ys) - return process_images(pc) + res = process_images(pc) - if not x_opt.label == 'Nothing': - p.extra_generation_params["XY Plot X Type"] = x_opt.label - p.extra_generation_params["XY Plot X Values"] = '{' + x_values + '}' - if x_opt.label in ["Seed","Var. seed"] and not no_fixed_seeds: - p.extra_generation_params["XY Plot Fixed X Values"] = '{' + ", ".join([str(x) for x in xs])+ '}' + if grid_infotext[0] is None: + pc.extra_generation_params = copy(pc.extra_generation_params) - if not y_opt.label == 'Nothing': - p.extra_generation_params["XY Plot Y Type"] = y_opt.label - p.extra_generation_params["XY Plot Y Values"] = '{' + y_values + '}' - if y_opt.label in ["Seed","Var. seed"] and not no_fixed_seeds: - p.extra_generation_params["XY Plot Fixed Y Values"] = '{' + ", ".join([str(y) for y in ys])+ '}' + if x_opt.label != 'Nothing': + pc.extra_generation_params["X Type"] = x_opt.label + pc.extra_generation_params["X Values"] = x_values + if x_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds: + pc.extra_generation_params["Fixed X Values"] = ", ".join([str(x) for x in xs]) + + if y_opt.label != 'Nothing': + pc.extra_generation_params["Y Type"] = y_opt.label + pc.extra_generation_params["Y Values"] = y_values + if y_opt.label in ["Seed", "Var. seed"] and not no_fixed_seeds: + pc.extra_generation_params["Fixed Y Values"] = ", ".join([str(y) for y in ys]) + + grid_infotext[0] = processing.create_infotext(pc, pc.all_prompts, pc.all_seeds, pc.all_subseeds) + + return res with SharedSettingsStackHelper(): processed = draw_xy_grid( @@ -435,6 +445,6 @@ class Script(scripts.Script): ) if opts.grid_save: - images.save_image(processed.images[0], p.outpath_grids, "xy_grid", extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p) + images.save_image(processed.images[0], p.outpath_grids, "xy_grid", info=grid_infotext[0], extension=opts.grid_format, prompt=p.prompt, seed=processed.seed, grid=True, p=p) return processed From 24d4a0841d3cc0e5908b098f65a9caa3fa889af8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 20:10:40 +0300 Subject: [PATCH 117/461] train tab visual updates allow setting train tab values from ui-config.json --- modules/ui.py | 35 +++++++++++++++++++++-------------- style.css | 2 +- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 72e7b7d2..44f4f3a4 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1281,42 +1281,48 @@ def create_ui(): with gr.Tab(label="Train"): gr.HTML(value="

Train an embedding or Hypernetwork; you must specify a directory with a set of 1:1 ratio images [wiki]

") - with gr.Row(): + with FormRow(): train_embedding_name = gr.Dropdown(label='Embedding', elem_id="train_embedding", choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) create_refresh_button(train_embedding_name, sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings, lambda: {"choices": sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())}, "refresh_train_embedding_name") - with gr.Row(): + train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', elem_id="train_hypernetwork", choices=[x for x in shared.hypernetworks.keys()]) create_refresh_button(train_hypernetwork_name, shared.reload_hypernetworks, lambda: {"choices": sorted([x for x in shared.hypernetworks.keys()])}, "refresh_train_hypernetwork_name") - with gr.Row(): + + with FormRow(): embedding_learn_rate = gr.Textbox(label='Embedding Learning rate', placeholder="Embedding Learning rate", value="0.005", elem_id="train_embedding_learn_rate") hypernetwork_learn_rate = gr.Textbox(label='Hypernetwork Learning rate', placeholder="Hypernetwork Learning rate", value="0.00001", elem_id="train_hypernetwork_learn_rate") - with gr.Row(): + with FormRow(): clip_grad_mode = gr.Dropdown(value="disabled", label="Gradient Clipping", choices=["disabled", "value", "norm"]) clip_grad_value = gr.Textbox(placeholder="Gradient clip value", value="0.1", show_label=False) - batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") - gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") + with FormRow(): + batch_size = gr.Number(label='Batch size', value=1, precision=0, elem_id="train_batch_size") + gradient_step = gr.Number(label='Gradient accumulation steps', value=1, precision=0, elem_id="train_gradient_step") + dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images", elem_id="train_dataset_directory") log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion", elem_id="train_log_directory") template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"), elem_id="train_template_file") training_width = gr.Slider(minimum=64, maximum=2048, step=8, label="Width", value=512, elem_id="train_training_width") training_height = gr.Slider(minimum=64, maximum=2048, step=8, label="Height", value=512, elem_id="train_training_height") steps = gr.Number(label='Max steps', value=100000, precision=0, elem_id="train_steps") - create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") - save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") + + with FormRow(): + create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_create_image_every") + save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0, elem_id="train_save_embedding_every") + save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True, elem_id="train_save_image_with_stored_embedding") preview_from_txt2img = gr.Checkbox(label='Read parameters (prompt, etc...) from txt2img tab when making previews', value=False, elem_id="train_preview_from_txt2img") - with gr.Row(): - shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") - tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") - with gr.Row(): - latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") + + shuffle_tags = gr.Checkbox(label="Shuffle tags by ',' when creating prompts.", value=False, elem_id="train_shuffle_tags") + tag_drop_out = gr.Slider(minimum=0, maximum=1, step=0.1, label="Drop out tags when creating prompts.", value=0, elem_id="train_tag_drop_out") + + latent_sampling_method = gr.Radio(label='Choose latent sampling method', value="once", choices=['once', 'deterministic', 'random'], elem_id="train_latent_sampling_method") with gr.Row(): + train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") interrupt_training = gr.Button(value="Interrupt", elem_id="train_interrupt_training") train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary', elem_id="train_train_hypernetwork") - train_embedding = gr.Button(value="Train Embedding", variant='primary', elem_id="train_train_embedding") params = script_callbacks.UiTrainTabParams(txt2img_preview_params) @@ -1803,6 +1809,7 @@ def create_ui(): visit(img2img_interface, loadsave, "img2img") visit(extras_interface, loadsave, "extras") visit(modelmerger_interface, loadsave, "modelmerger") + visit(train_interface, loadsave, "train") if not error_loading and (not os.path.exists(ui_config_file) or settings_count != len(ui_settings)): with open(ui_config_file, "w", encoding="utf8") as file: diff --git a/style.css b/style.css index 2116ec3c..09ee540b 100644 --- a/style.css +++ b/style.css @@ -611,7 +611,7 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h padding-top: 0.9em; } -#img2img_settings div.gr-form .gr-form, #txt2img_settings div.gr-form .gr-form{ +#img2img_settings div.gr-form .gr-form, #txt2img_settings div.gr-form .gr-form, #train_tabs div.gr-form .gr-form{ border: none; padding-bottom: 0.5em; } From 81490780949fffed77493b4bd741e96ec737fe27 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 22:04:40 +0300 Subject: [PATCH 118/461] added the option to specify target resolution with possibility of truncating for hires fix; also sampling steps --- javascript/hints.js | 11 +++-- modules/generation_parameters_copypaste.py | 9 ++-- modules/processing.py | 51 +++++++++++++++++++--- modules/txt2img.py | 5 ++- modules/ui.py | 24 +++++++--- 5 files changed, 81 insertions(+), 19 deletions(-) diff --git a/javascript/hints.js b/javascript/hints.js index 63e17e05..dda66e09 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -81,9 +81,6 @@ titles = { "vram": "Torch active: Peak amount of VRAM used by Torch during generation, excluding cached data.\nTorch reserved: Peak amount of VRAM allocated by Torch, including all active and cached data.\nSys VRAM: Peak amount of VRAM allocation across all applications / total GPU VRAM (peak utilization%).", - "Highres. fix": "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition", - "Scale latent": "Uscale the image in latent space. Alternative is to produce the full image from latent representation, upscale that, and then move it back to latent space.", - "Eta noise seed delta": "If this values is non-zero, it will be added to seed and used to initialize RNG for noises when using samplers with Eta. You can use this to produce even more variation of images, or you can use this to match images of other software if you know what you are doing.", "Do not add watermark to images": "If this option is enabled, watermark will not be added to created images. Warning: if you do not add watermark, you may be behaving in an unethical manner.", @@ -100,7 +97,13 @@ titles = { "Clip skip": "Early stopping parameter for CLIP model; 1 is stop at last layer as usual, 2 is stop at penultimate layer, etc.", "Approx NN": "Cheap neural network approximation. Very fast compared to VAE, but produces pictures with 4 times smaller horizontal/vertical resoluton and lower quality.", - "Approx cheap": "Very cheap approximation. Very fast compared to VAE, but produces pictures with 8 times smaller horizontal/vertical resoluton and extremely low quality." + "Approx cheap": "Very cheap approximation. Very fast compared to VAE, but produces pictures with 8 times smaller horizontal/vertical resoluton and extremely low quality.", + + "Hires. fix": "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition", + "Hires steps": "Number of sampling steps for upscaled picture. If 0, uses same as for original.", + "Upscale by": "Adjusts the size of the image by multiplying the original width and height by the selected value. Ignored if either Resize width to or Resize height to are non-zero.", + "Resize width to": "Resizes image to this width. If 0, width is inferred from either of two nearby sliders.", + "Resize height to": "Resizes image to this height. If 0, height is inferred from either of two nearby sliders." } diff --git a/modules/generation_parameters_copypaste.py b/modules/generation_parameters_copypaste.py index 4baf4d9a..12a9de3d 100644 --- a/modules/generation_parameters_copypaste.py +++ b/modules/generation_parameters_copypaste.py @@ -212,11 +212,10 @@ def restore_old_hires_fix_params(res): firstpass_width = math.ceil(scale * width / 64) * 64 firstpass_height = math.ceil(scale * height / 64) * 64 - hr_scale = width / firstpass_width if firstpass_width > 0 else height / firstpass_height - res['Size-1'] = firstpass_width res['Size-2'] = firstpass_height - res['Hires upscale'] = hr_scale + res['Hires resize-1'] = width + res['Hires resize-2'] = height def parse_generation_parameters(x: str): @@ -276,6 +275,10 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model hypernet_hash = res.get("Hypernet hash", None) res["Hypernet"] = find_hypernetwork_key(hypernet_name, hypernet_hash) + if "Hires resize-1" not in res: + res["Hires resize-1"] = 0 + res["Hires resize-2"] = 0 + restore_old_hires_fix_params(res) return res diff --git a/modules/processing.py b/modules/processing.py index 47712159..9cad05f2 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -662,12 +662,17 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): sampler = None - def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, **kwargs): + def __init__(self, enable_hr: bool = False, denoising_strength: float = 0.75, firstphase_width: int = 0, firstphase_height: int = 0, hr_scale: float = 2.0, hr_upscaler: str = None, hr_second_pass_steps: int = 0, hr_resize_x: int = 0, hr_resize_y: int = 0, **kwargs): super().__init__(**kwargs) self.enable_hr = enable_hr self.denoising_strength = denoising_strength self.hr_scale = hr_scale self.hr_upscaler = hr_upscaler + self.hr_second_pass_steps = hr_second_pass_steps + self.hr_resize_x = hr_resize_x + self.hr_resize_y = hr_resize_y + self.hr_upscale_to_x = hr_resize_x + self.hr_upscale_to_y = hr_resize_y if firstphase_width != 0 or firstphase_height != 0: print("firstphase_width/firstphase_height no longer supported; use hr_scale", file=sys.stderr) @@ -675,6 +680,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.width = firstphase_width self.height = firstphase_height + self.truncate_x = 0 + self.truncate_y = 0 + def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: if state.job_count == -1: @@ -682,7 +690,38 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): else: state.job_count = state.job_count * 2 - self.extra_generation_params["Hires upscale"] = self.hr_scale + if self.hr_resize_x == 0 and self.hr_resize_y == 0: + self.extra_generation_params["Hires upscale"] = self.hr_scale + self.hr_upscale_to_x = int(self.width * self.hr_scale) + self.hr_upscale_to_y = int(self.height * self.hr_scale) + else: + self.extra_generation_params["Hires resize"] = f"{self.hr_resize_x}x{self.hr_resize_y}" + + if self.hr_resize_y == 0: + self.hr_upscale_to_x = self.hr_resize_x + self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width + elif self.hr_resize_x == 0: + self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height + self.hr_upscale_to_y = self.hr_resize_y + else: + target_w = self.hr_resize_x + target_h = self.hr_resize_y + src_ratio = self.width / self.height + dst_ratio = self.hr_resize_x / self.hr_resize_y + + if src_ratio < dst_ratio: + self.hr_upscale_to_x = self.hr_resize_x + self.hr_upscale_to_y = self.hr_resize_x * self.height // self.width + else: + self.hr_upscale_to_x = self.hr_resize_y * self.width // self.height + self.hr_upscale_to_y = self.hr_resize_y + + self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f + self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f + + if self.hr_second_pass_steps: + self.extra_generation_params["Hires steps"] = self.hr_second_pass_steps + if self.hr_upscaler is not None: self.extra_generation_params["Hires upscaler"] = self.hr_upscaler @@ -699,8 +738,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): if not self.enable_hr: return samples - target_width = int(self.width * self.hr_scale) - target_height = int(self.height * self.hr_scale) + target_width = self.hr_upscale_to_x + target_height = self.hr_upscale_to_y def save_intermediate(image, index): """saves image before applying hires fix, if enabled in options; takes as an argument either an image or batch with latent space images""" @@ -755,13 +794,15 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model) + samples = samples[:, :, self.truncate_y//2:samples.shape[2]-(self.truncate_y+1)//2, self.truncate_x//2:samples.shape[3]-(self.truncate_x+1)//2] + noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, p=self) # GC now before running the next img2img to prevent running out of memory x = None devices.torch_gc() - samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.steps, image_conditioning=image_conditioning) + samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.hr_second_pass_steps or self.steps, image_conditioning=image_conditioning) return samples diff --git a/modules/txt2img.py b/modules/txt2img.py index e189a899..38b5f591 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -8,7 +8,7 @@ import modules.processing as processing from modules.ui import plaintext_to_html -def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, *args): +def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, steps: int, sampler_index: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, enable_hr: bool, denoising_strength: float, hr_scale: float, hr_upscaler: str, hr_second_pass_steps: int, hr_resize_x: int, hr_resize_y: int, *args): p = StableDiffusionProcessingTxt2Img( sd_model=shared.sd_model, outpath_samples=opts.outdir_samples or opts.outdir_txt2img_samples, @@ -35,6 +35,9 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: denoising_strength=denoising_strength if enable_hr else None, hr_scale=hr_scale, hr_upscaler=hr_upscaler, + hr_second_pass_steps=hr_second_pass_steps, + hr_resize_x=hr_resize_x, + hr_resize_y=hr_resize_y, ) p.scripts = modules.scripts.scripts_txt2img diff --git a/modules/ui.py b/modules/ui.py index 44f4f3a4..04091e67 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -637,10 +637,10 @@ def create_sampler_and_steps_selection(choices, tabname): with FormRow(elem_id=f"sampler_selection_{tabname}"): sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") sampler_index.save_to_config = True - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling Steps", value=20) + steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) else: with FormGroup(elem_id=f"sampler_selection_{tabname}"): - steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling Steps", value=20) + steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) sampler_index = gr.Radio(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") return steps, sampler_index @@ -709,10 +709,16 @@ def create_ui(): enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") elif category == "hires_fix": - with FormRow(visible=False, elem_id="txt2img_hires_fix") as hr_options: - hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) - hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") - denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: + with FormRow(elem_id="txt2img_hires_fix_row1"): + hr_upscaler = gr.Dropdown(label="Upscaler", elem_id="txt2img_hr_upscaler", choices=[*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]], value=shared.latent_upscale_default_mode) + hr_second_pass_steps = gr.Slider(minimum=0, maximum=150, step=1, label='Hires steps', value=0, elem_id="txt2img_hires_steps") + denoising_strength = gr.Slider(minimum=0.0, maximum=1.0, step=0.01, label='Denoising strength', value=0.7, elem_id="txt2img_denoising_strength") + + with FormRow(elem_id="txt2img_hires_fix_row2"): + hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") + hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") + hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") elif category == "batch": if not opts.dimensions_and_batch_together: @@ -753,6 +759,9 @@ def create_ui(): denoising_strength, hr_scale, hr_upscaler, + hr_second_pass_steps, + hr_resize_x, + hr_resize_y, ] + custom_inputs, outputs=[ @@ -804,6 +813,9 @@ def create_ui(): (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), (hr_scale, "Hires upscale"), (hr_upscaler, "Hires upscaler"), + (hr_second_pass_steps, "Hires steps"), + (hr_resize_x, "Hires resize-1"), + (hr_resize_y, "Hires resize-2"), *modules.scripts.scripts_txt2img.infotext_fields ] parameters_copypaste.add_paste_fields("txt2img", None, txt2img_paste_fields) From 1288a3bb7d21064e5bd0af7158a3840886027c51 Mon Sep 17 00:00:00 2001 From: Suffocate <70031311+lolsuffocate@users.noreply.github.com> Date: Wed, 4 Jan 2023 20:36:30 +0000 Subject: [PATCH 119/461] Use the read_info_from_image function directly --- modules/api/api.py | 16 ++++++++++++---- modules/api/models.py | 5 +++-- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 48a70a44..2103709b 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -11,10 +11,10 @@ from fastapi.security import HTTPBasic, HTTPBasicCredentials from secrets import compare_digest import modules.shared as shared -from modules import sd_samplers, deepbooru, sd_hijack +from modules import sd_samplers, deepbooru, sd_hijack, images from modules.api.models import * from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images -from modules.extras import run_extras, run_pnginfo +from modules.extras import run_extras from modules.textual_inversion.textual_inversion import create_embedding, train_embedding from modules.textual_inversion.preprocess import preprocess from modules.hypernetworks.hypernetwork import create_hypernetwork, train_hypernetwork @@ -233,9 +233,17 @@ class Api: if(not req.image.strip()): return PNGInfoResponse(info="") - result = run_pnginfo(decode_base64_to_image(req.image.strip())) + image = decode_base64_to_image(req.image.strip()) + if image is None: + return PNGInfoResponse(info="") - return PNGInfoResponse(info=result[1]) + geninfo, items = images.read_info_from_image(image) + if geninfo is None: + geninfo = "" + + items = {**{'parameters': geninfo}, **items} + + return PNGInfoResponse(info=geninfo, items=items) def progressapi(self, req: ProgressRequest = Depends()): # copy from check_progress_call of ui.py diff --git a/modules/api/models.py b/modules/api/models.py index 4a632c68..d8198a27 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -157,7 +157,8 @@ class PNGInfoRequest(BaseModel): image: str = Field(title="Image", description="The base64 encoded PNG image") class PNGInfoResponse(BaseModel): - info: str = Field(title="Image info", description="A string with all the info the image had") + info: str = Field(title="Image info", description="A string with the parameters used to generate the image") + items: dict = Field(title="Items", description="An object containing all the info the image had") class ProgressRequest(BaseModel): skip_current_image: bool = Field(default=False, title="Skip current image", description="Skip current image serialization") @@ -258,4 +259,4 @@ class EmbeddingItem(BaseModel): class EmbeddingsResponse(BaseModel): loaded: Dict[str, EmbeddingItem] = Field(title="Loaded", description="Embeddings loaded for the current model") - skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") \ No newline at end of file + skipped: Dict[str, EmbeddingItem] = Field(title="Skipped", description="Embeddings skipped for the current model (likely due to architecture incompatibility)") From bc43293c640aef65df3136de9e5bd8b7e79eb3e0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 4 Jan 2023 23:56:43 +0300 Subject: [PATCH 120/461] fix incorrect display/calculation for number of steps for hires fix in progress bars --- modules/processing.py | 9 ++++++--- modules/sd_samplers.py | 5 +++-- modules/shared.py | 4 +++- 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 9cad05f2..f28e7212 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -685,10 +685,13 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: - if state.job_count == -1: - state.job_count = self.n_iter * 2 - else: + if not state.processing_has_refined_job_count: + if state.job_count == -1: + state.job_count = self.n_iter + + shared.total_tqdm.updateTotal((self.steps + (self.hr_second_pass_steps or self.steps)) * state.job_count) state.job_count = state.job_count * 2 + state.processing_has_refined_job_count = True if self.hr_resize_x == 0 and self.hr_resize_y == 0: self.extra_generation_params["Hires upscale"] = self.hr_scale diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index e904d860..3851a77f 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -97,8 +97,9 @@ sampler_extra_params = { def setup_img2img_steps(p, steps=None): if opts.img2img_fix_steps or steps is not None: - steps = int((steps or p.steps) / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0 - t_enc = p.steps - 1 + requested_steps = (steps or p.steps) + steps = int(requested_steps / min(p.denoising_strength, 0.999)) if p.denoising_strength > 0 else 0 + t_enc = requested_steps - 1 else: steps = p.steps t_enc = int(min(p.denoising_strength, 0.999) * steps) diff --git a/modules/shared.py b/modules/shared.py index 54a6ba23..04c545ee 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -153,6 +153,7 @@ class State: job = "" job_no = 0 job_count = 0 + processing_has_refined_job_count = False job_timestamp = '0' sampling_step = 0 sampling_steps = 0 @@ -194,6 +195,7 @@ class State: def begin(self): self.sampling_step = 0 self.job_count = -1 + self.processing_has_refined_job_count = False self.job_no = 0 self.job_timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S") self.current_latent = None @@ -608,7 +610,7 @@ class TotalTQDM: return if self._tqdm is None: self.reset() - self._tqdm.total=new_total + self._tqdm.total = new_total def clear(self): if self._tqdm is not None: From 5851bc839b6f639cda59e84eb1ee8c706986633d Mon Sep 17 00:00:00 2001 From: me <25877290+Kryptortio@users.noreply.github.com> Date: Wed, 4 Jan 2023 22:03:32 +0100 Subject: [PATCH 121/461] Add element ids for script components and a few more in ui.py --- modules/ui.py | 16 ++++++++-------- scripts/custom_code.py | 4 +++- scripts/img2imgalt.py | 22 ++++++++++++---------- scripts/loopback.py | 6 ++++-- scripts/outpainting_mk_2.py | 12 +++++++----- scripts/poor_mans_outpainting.py | 10 ++++++---- scripts/prompt_matrix.py | 6 ++++-- scripts/prompts_from_file.py | 10 ++++++---- scripts/sd_upscale.py | 8 +++++--- scripts/xy_grid.py | 15 ++++++++------- 10 files changed, 63 insertions(+), 46 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 04091e67..bb64fe20 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -560,7 +560,7 @@ Requested path was: {f} generation_info = None with gr.Column(): with gr.Row(elem_id=f"image_buttons_{tabname}"): - open_folder_button = gr.Button(folder_symbol, elem_id="hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder') + open_folder_button = gr.Button(folder_symbol, elem_id="hidden_element" if shared.cmd_opts.hide_ui_dir_config else f'open_folder_{tabname}') if tabname != "extras": save = gr.Button('Save', elem_id=f'save_{tabname}') @@ -576,13 +576,13 @@ Requested path was: {f} if tabname != "extras": with gr.Row(): - download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False) + download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False, elem_id=f'download_files_{tabname}') with gr.Group(): - html_info = gr.HTML() - html_log = gr.HTML() + html_info = gr.HTML(elem_id=f'html_info_{tabname}') + html_log = gr.HTML(elem_id=f'html_log_{tabname}') - generation_info = gr.Textbox(visible=False) + generation_info = gr.Textbox(visible=False, elem_id=f'generation_info_{tabname}') if tabname == 'txt2img' or tabname == 'img2img': generation_info_button = gr.Button(visible=False, elem_id=f"{tabname}_generation_info_button") generation_info_button.click( @@ -624,9 +624,9 @@ Requested path was: {f} ) else: - html_info_x = gr.HTML() - html_info = gr.HTML() - html_log = gr.HTML() + html_info_x = gr.HTML(elem_id=f'html_info_x_{tabname}') + html_info = gr.HTML(elem_id=f'html_info_{tabname}') + html_log = gr.HTML(elem_id=f'html_log_{tabname}') parameters_copypaste.bind_buttons(buttons, result_gallery, "txt2img" if tabname == "txt2img" else None) return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info, html_log diff --git a/scripts/custom_code.py b/scripts/custom_code.py index 22e7b77a..841fed97 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -14,7 +14,9 @@ class Script(scripts.Script): return cmd_opts.allow_code def ui(self, is_img2img): - code = gr.Textbox(label="Python code", lines=1) + elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_custom_code_' + + code = gr.Textbox(label="Python code", lines=1, elem_id=elem_prefix + "code") return [code] diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py index 1229f61b..cddd46e7 100644 --- a/scripts/img2imgalt.py +++ b/scripts/img2imgalt.py @@ -126,24 +126,26 @@ class Script(scripts.Script): return is_img2img def ui(self, is_img2img): + elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_i2i_alternative_test_' + info = gr.Markdown(''' * `CFG Scale` should be 2 or lower. ''') - override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True) + override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True, elem_id=elem_prefix + "override_sampler") - override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True) - original_prompt = gr.Textbox(label="Original prompt", lines=1) - original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1) + override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True, elem_id=elem_prefix + "override_prompt") + original_prompt = gr.Textbox(label="Original prompt", lines=1, elem_id=elem_prefix + "original_prompt") + original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1, elem_id=elem_prefix + "original_negative_prompt") - override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True) - st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50) + override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True, elem_id=elem_prefix + "override_steps") + st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50, elem_id=elem_prefix + "st") - override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True) + override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True, elem_id=elem_prefix + "override_strength") - cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0) - randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0) - sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False) + cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0, elem_id=elem_prefix + "cfg") + randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0, elem_id=elem_prefix + "randomness") + sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False, elem_id=elem_prefix + "sigma_adjustment") return [ info, diff --git a/scripts/loopback.py b/scripts/loopback.py index d8c68af8..5c1265a0 100644 --- a/scripts/loopback.py +++ b/scripts/loopback.py @@ -17,8 +17,10 @@ class Script(scripts.Script): return is_img2img def ui(self, is_img2img): - loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4) - denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1) + elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_loopback_' + + loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=elem_prefix + "loops") + denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1, elem_id=elem_prefix + "denoising_strength_change_factor") return [loops, denoising_strength_change_factor] diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index cf71cb92..760cce64 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -129,13 +129,15 @@ class Script(scripts.Script): if not is_img2img: return None + elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_outpainting_mk_2_' + info = gr.HTML("

Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8

") - pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128) - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8) - direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down']) - noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0) - color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05) + pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=elem_prefix + "pixels") + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, elem_id=elem_prefix + "mask_blur") + direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=elem_prefix + "direction") + noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0, elem_id=elem_prefix + "noise_q") + color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05, elem_id=elem_prefix + "color_variation") return [info, pixels, mask_blur, direction, noise_q, color_variation] diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index ea45beb0..6bcdcc02 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -21,10 +21,12 @@ class Script(scripts.Script): if not is_img2img: return None - pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128) - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4) - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index") - direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down']) + elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_poor_mans_outpainting_' + + pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=elem_prefix + "pixels") + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id=elem_prefix + "mask_blur") + inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", elem_id=elem_prefix + "inpainting_fill") + direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=elem_prefix + "direction") return [pixels, mask_blur, inpainting_fill, direction] diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index 4c79eaef..59172315 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -45,8 +45,10 @@ class Script(scripts.Script): return "Prompt matrix" def ui(self, is_img2img): - put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False) - different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False) + elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_prompt_matrix_' + + put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=elem_prefix + "put_at_start") + different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=elem_prefix + "different_seeds") return [put_at_start, different_seeds] diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index e8386ed2..fc8ddd8a 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -112,11 +112,13 @@ class Script(scripts.Script): return "Prompts from file or textbox" def ui(self, is_img2img): - checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False) - checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False) + elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_prompt_from_file_' + + checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=elem_prefix + "checkbox_iterate") + checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=elem_prefix + "checkbox_iterate_batch") - prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1) - file = gr.File(label="Upload prompt inputs", type='bytes') + prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1, elem_id=elem_prefix + "prompt_txt") + file = gr.File(label="Upload prompt inputs", type='bytes', elem_id=elem_prefix + "file") file.change(fn=load_prompt_file, inputs=[file], outputs=[file, prompt_txt, prompt_txt]) diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index 9739545c..9f483a67 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -17,10 +17,12 @@ class Script(scripts.Script): return is_img2img def ui(self, is_img2img): + elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_sd_upscale_' + info = gr.HTML("

Will upscale the image by the selected scale factor; use width and height sliders to set tile size

") - overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64) - scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0) - upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index") + overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, elem_id=elem_prefix + "overlap") + scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0, elem_id=elem_prefix + "scale_factor") + upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", elem_id=elem_prefix + "upscaler_index") return [info, overlap, upscaler_index, scale_factor] diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 78ff12c5..90226ccd 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -292,18 +292,19 @@ class Script(scripts.Script): def ui(self, is_img2img): current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img] + elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_xy_grid_' with gr.Row(): - x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id="x_type") - x_values = gr.Textbox(label="X values", lines=1) + x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id=elem_prefix + "x_type") + x_values = gr.Textbox(label="X values", lines=1, elem_id=elem_prefix + "x_values") with gr.Row(): - y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id="y_type") - y_values = gr.Textbox(label="Y values", lines=1) + y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id=elem_prefix + "y_type") + y_values = gr.Textbox(label="Y values", lines=1, elem_id=elem_prefix + "y_values") - draw_legend = gr.Checkbox(label='Draw legend', value=True) - include_lone_images = gr.Checkbox(label='Include Separate Images', value=False) - no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False) + draw_legend = gr.Checkbox(label='Draw legend', value=True, elem_id=elem_prefix + "draw_legend") + include_lone_images = gr.Checkbox(label='Include Separate Images', value=False, elem_id=elem_prefix + "include_lone_images") + no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False, elem_id=elem_prefix + "no_fixed_seeds") return [x_type, x_values, y_type, y_values, draw_legend, include_lone_images, no_fixed_seeds] From b663ee2cff6831354e1b5326800c8d1bf300cafe Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 5 Jan 2023 00:36:10 +0300 Subject: [PATCH 122/461] fix fullscreen view showing wrong image on firefox --- javascript/imageviewer.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index 67916536..97f56c07 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -148,7 +148,7 @@ function showGalleryImage() { if(e && e.parentElement.tagName == 'DIV'){ e.style.cursor='pointer' e.style.userSelect='none' - e.addEventListener('click', function (evt) { + e.addEventListener('mousedown', function (evt) { if(!opts.js_modal_lightbox) return; modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed) showModal(evt) From 99b67cff0b48c4a1ad6e14d9cc591b11db6e293c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 5 Jan 2023 01:25:52 +0300 Subject: [PATCH 123/461] make hires fix not do anything if the user chooses the second pass resolution to be the same as first pass resolution --- modules/processing.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index f28e7212..7e853287 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -683,16 +683,9 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.truncate_x = 0 self.truncate_y = 0 + def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: - if not state.processing_has_refined_job_count: - if state.job_count == -1: - state.job_count = self.n_iter - - shared.total_tqdm.updateTotal((self.steps + (self.hr_second_pass_steps or self.steps)) * state.job_count) - state.job_count = state.job_count * 2 - state.processing_has_refined_job_count = True - if self.hr_resize_x == 0 and self.hr_resize_y == 0: self.extra_generation_params["Hires upscale"] = self.hr_scale self.hr_upscale_to_x = int(self.width * self.hr_scale) @@ -722,6 +715,22 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.truncate_x = (self.hr_upscale_to_x - target_w) // opt_f self.truncate_y = (self.hr_upscale_to_y - target_h) // opt_f + # special case: the user has chosen to do nothing + if self.hr_upscale_to_x == self.width and self.hr_upscale_to_y == self.height: + self.enable_hr = False + self.denoising_strength = None + self.extra_generation_params.pop("Hires upscale", None) + self.extra_generation_params.pop("Hires resize", None) + return + + if not state.processing_has_refined_job_count: + if state.job_count == -1: + state.job_count = self.n_iter + + shared.total_tqdm.updateTotal((self.steps + (self.hr_second_pass_steps or self.steps)) * state.job_count) + state.job_count = state.job_count * 2 + state.processing_has_refined_job_count = True + if self.hr_second_pass_steps: self.extra_generation_params["Hires steps"] = self.hr_second_pass_steps From 066390eb5683945a6e094a817584ada6b1f7118e Mon Sep 17 00:00:00 2001 From: Wes Roberts Date: Wed, 4 Jan 2023 17:58:16 -0500 Subject: [PATCH 124/461] Fixes webui.sh to exec LAUNCH_SCRIPT --- webui.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/webui.sh b/webui.sh index 04ecbf76..c4d6521d 100755 --- a/webui.sh +++ b/webui.sh @@ -160,10 +160,10 @@ then printf "\n%s\n" "${delimiter}" printf "Accelerating launch.py..." printf "\n%s\n" "${delimiter}" - accelerate launch --num_cpu_threads_per_process=6 "${LAUNCH_SCRIPT}" "$@" + exec accelerate launch --num_cpu_threads_per_process=6 "${LAUNCH_SCRIPT}" "$@" else printf "\n%s\n" "${delimiter}" printf "Launching launch.py..." printf "\n%s\n" "${delimiter}" - "${python_cmd}" "${LAUNCH_SCRIPT}" "$@" + exec "${python_cmd}" "${LAUNCH_SCRIPT}" "$@" fi From 5f4fa942b8ec3ed3b15a352903489d6f9e6eb46e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 5 Jan 2023 02:38:52 +0300 Subject: [PATCH 125/461] do not show full window image preview when right mouse button is used --- javascript/imageviewer.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index 97f56c07..b7bc2fe1 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -149,7 +149,7 @@ function showGalleryImage() { e.style.cursor='pointer' e.style.userSelect='none' e.addEventListener('mousedown', function (evt) { - if(!opts.js_modal_lightbox) return; + if(!opts.js_modal_lightbox || evt.button != 0) return; modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed) showModal(evt) }, true); From 2e30997450835ed8f80ab5e8f02f7d4c7f26dd3f Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 5 Jan 2023 10:21:17 +0300 Subject: [PATCH 126/461] move sd_model assignment to the place where we change the sd_model --- modules/processing.py | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index a12bd9e8..61e97077 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -466,12 +466,16 @@ def process_images(p: StableDiffusionProcessing) -> Processed: try: for k, v in p.override_settings.items(): setattr(opts, k, v) - if k == 'sd_hypernetwork': shared.reload_hypernetworks() # make onchange call for changing hypernet - if k == 'sd_model_checkpoint': sd_models.reload_model_weights() # make onchange call for changing SD model - if k == 'sd_vae': sd_vae.reload_vae_weights() # make onchange call for changing VAE + if k == 'sd_hypernetwork': + shared.reload_hypernetworks() # make onchange call for changing hypernet + + if k == 'sd_model_checkpoint': + sd_models.reload_model_weights() # make onchange call for changing SD model + p.sd_model = shared.sd_model + + if k == 'sd_vae': + sd_vae.reload_vae_weights() # make onchange call for changing VAE - # Assign sd_model here to ensure that it reflects the model after any changes - p.sd_model = shared.sd_model res = process_images_inner(p) finally: From c3109fa18a5a105eea5e343875b540939884f304 Mon Sep 17 00:00:00 2001 From: me <25877290+Kryptortio@users.noreply.github.com> Date: Thu, 5 Jan 2023 08:27:09 +0100 Subject: [PATCH 127/461] Adjusted prefix from i2i/t2i to txt2img and img2img and removed those prefixes from img exclusive scripts --- scripts/custom_code.py | 2 +- scripts/img2imgalt.py | 2 +- scripts/loopback.py | 2 +- scripts/outpainting_mk_2.py | 2 +- scripts/poor_mans_outpainting.py | 2 +- scripts/prompt_matrix.py | 2 +- scripts/prompts_from_file.py | 2 +- scripts/sd_upscale.py | 2 +- scripts/xy_grid.py | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/scripts/custom_code.py b/scripts/custom_code.py index 841fed97..b3bbee03 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -14,7 +14,7 @@ class Script(scripts.Script): return cmd_opts.allow_code def ui(self, is_img2img): - elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_custom_code_' + elem_prefix = ('img2img' if is_img2img else 'txt2txt') + '_script_custom_code_' code = gr.Textbox(label="Python code", lines=1, elem_id=elem_prefix + "code") diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py index cddd46e7..c062dd24 100644 --- a/scripts/img2imgalt.py +++ b/scripts/img2imgalt.py @@ -126,7 +126,7 @@ class Script(scripts.Script): return is_img2img def ui(self, is_img2img): - elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_i2i_alternative_test_' + elem_prefix = 'script_i2i_alternative_test_' info = gr.Markdown(''' * `CFG Scale` should be 2 or lower. diff --git a/scripts/loopback.py b/scripts/loopback.py index 5c1265a0..93eda1eb 100644 --- a/scripts/loopback.py +++ b/scripts/loopback.py @@ -17,7 +17,7 @@ class Script(scripts.Script): return is_img2img def ui(self, is_img2img): - elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_loopback_' + elem_prefix = 'script_loopback_' loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=elem_prefix + "loops") denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1, elem_id=elem_prefix + "denoising_strength_change_factor") diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index 760cce64..c37bc238 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -129,7 +129,7 @@ class Script(scripts.Script): if not is_img2img: return None - elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_outpainting_mk_2_' + elem_prefix = 'script_outpainting_mk_2_' info = gr.HTML("

Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8

") diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index 6bcdcc02..784ee422 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -21,7 +21,7 @@ class Script(scripts.Script): if not is_img2img: return None - elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_poor_mans_outpainting_' + elem_prefix = 'script_poor_mans_outpainting_' pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=elem_prefix + "pixels") mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id=elem_prefix + "mask_blur") diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index 59172315..f610c334 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -45,7 +45,7 @@ class Script(scripts.Script): return "Prompt matrix" def ui(self, is_img2img): - elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_prompt_matrix_' + elem_prefix = ('img2img' if is_img2img else 'txt2txt') + '_script_prompt_matrix_' put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=elem_prefix + "put_at_start") different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=elem_prefix + "different_seeds") diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index fc8ddd8a..c6a0b709 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -112,7 +112,7 @@ class Script(scripts.Script): return "Prompts from file or textbox" def ui(self, is_img2img): - elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_prompt_from_file_' + elem_prefix = ('img2img' if is_img2img else 'txt2txt') + '_script_prompt_from_file_' checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=elem_prefix + "checkbox_iterate") checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=elem_prefix + "checkbox_iterate_batch") diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index 9f483a67..2aeeb106 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -17,7 +17,7 @@ class Script(scripts.Script): return is_img2img def ui(self, is_img2img): - elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_sd_upscale_' + elem_prefix = 'script_sd_upscale_' info = gr.HTML("

Will upscale the image by the selected scale factor; use width and height sliders to set tile size

") overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, elem_id=elem_prefix + "overlap") diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 90226ccd..8c9cfb9b 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -292,7 +292,7 @@ class Script(scripts.Script): def ui(self, is_img2img): current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img] - elem_prefix = ('i2i' if is_img2img else 't2i') + '_script_xy_grid_' + elem_prefix = ('img2img' if is_img2img else 'txt2txt') + '_script_xy_grid_' with gr.Row(): x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id=elem_prefix + "x_type") From 42fcc79bd31e5e5485f1cf115ad505cc623d0ac9 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 5 Jan 2023 10:43:21 +0300 Subject: [PATCH 128/461] add Discard penultimate sigma to infotext --- modules/sd_samplers.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 31b255a3..01221b89 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -463,8 +463,12 @@ class KDiffusionSampler: return extra_params_kwargs def get_sigmas(self, p, steps): - disc = opts.always_discard_next_to_last_sigma or (self.config is not None and self.config.options.get('discard_next_to_last_sigma', False)) - steps += 1 if disc else 0 + discard_next_to_last_sigma = self.config is not None and self.config.options.get('discard_next_to_last_sigma', False) + if opts.always_discard_next_to_last_sigma and not discard_next_to_last_sigma: + discard_next_to_last_sigma = True + p.extra_generation_params["Discard penultimate sigma"] = True + + steps += 1 if discard_next_to_last_sigma else 0 if p.sampler_noise_scheduler_override: sigmas = p.sampler_noise_scheduler_override(steps) @@ -475,7 +479,7 @@ class KDiffusionSampler: else: sigmas = self.model_wrap.get_sigmas(steps) - if disc: + if discard_next_to_last_sigma: sigmas = torch.cat([sigmas[:-2], sigmas[-1:]]) return sigmas From f185baeb28f348e4ec97cd7070ed219b5f74a48e Mon Sep 17 00:00:00 2001 From: me <25877290+Kryptortio@users.noreply.github.com> Date: Thu, 5 Jan 2023 09:29:07 +0100 Subject: [PATCH 129/461] Refactor elem_prefix as function elem_id --- scripts/custom_code.py | 9 ++++++--- scripts/img2imgalt.py | 30 +++++++++++++++++------------- scripts/loopback.py | 15 ++++++++++----- scripts/outpainting_mk_2.py | 18 +++++++++++------- scripts/poor_mans_outpainting.py | 17 ++++++++++------- scripts/prompt_matrix.py | 14 +++++++++----- scripts/prompts_from_file.py | 18 +++++++++++------- scripts/sd_upscale.py | 16 ++++++++++------ scripts/xy_grid.py | 20 ++++++++++++-------- 9 files changed, 96 insertions(+), 61 deletions(-) diff --git a/scripts/custom_code.py b/scripts/custom_code.py index b3bbee03..9ce1f650 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -3,20 +3,23 @@ import gradio as gr from modules.processing import Processed from modules.shared import opts, cmd_opts, state +import re class Script(scripts.Script): def title(self): return "Custom code" + def elem_id(self, item_id): + gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id + gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) + return gen_elem_id def show(self, is_img2img): return cmd_opts.allow_code def ui(self, is_img2img): - elem_prefix = ('img2img' if is_img2img else 'txt2txt') + '_script_custom_code_' - - code = gr.Textbox(label="Python code", lines=1, elem_id=elem_prefix + "code") + code = gr.Textbox(label="Python code", lines=1, elem_id=self.elem_id("code")) return [code] diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py index c062dd24..7555e874 100644 --- a/scripts/img2imgalt.py +++ b/scripts/img2imgalt.py @@ -16,6 +16,7 @@ import k_diffusion as K from PIL import Image from torch import autocast from einops import rearrange, repeat +import re def find_noise_for_image(p, cond, uncond, cfg_scale, steps): @@ -122,30 +123,33 @@ class Script(scripts.Script): def title(self): return "img2img alternative test" + def elem_id(self, item_id): + gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id + gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) + return gen_elem_id + def show(self, is_img2img): return is_img2img - def ui(self, is_img2img): - elem_prefix = 'script_i2i_alternative_test_' - + def ui(self, is_img2img): info = gr.Markdown(''' * `CFG Scale` should be 2 or lower. ''') - override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True, elem_id=elem_prefix + "override_sampler") + override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True, elem_id=self.elem_id("override_sampler")) - override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True, elem_id=elem_prefix + "override_prompt") - original_prompt = gr.Textbox(label="Original prompt", lines=1, elem_id=elem_prefix + "original_prompt") - original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1, elem_id=elem_prefix + "original_negative_prompt") + override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True, elem_id=self.elem_id("override_prompt")) + original_prompt = gr.Textbox(label="Original prompt", lines=1, elem_id=self.elem_id("original_prompt")) + original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1, elem_id=self.elem_id("original_negative_prompt")) - override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True, elem_id=elem_prefix + "override_steps") - st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50, elem_id=elem_prefix + "st") + override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True, elem_id=self.elem_id("override_steps")) + st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50, elem_id=self.elem_id("st")) - override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True, elem_id=elem_prefix + "override_strength") + override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True, elem_id=self.elem_id("override_strength")) - cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0, elem_id=elem_prefix + "cfg") - randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0, elem_id=elem_prefix + "randomness") - sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False, elem_id=elem_prefix + "sigma_adjustment") + cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0, elem_id=self.elem_id("cfg")) + randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0, elem_id=self.elem_id("randomness")) + sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False, elem_id=self.elem_id("sigma_adjustment")) return [ info, diff --git a/scripts/loopback.py b/scripts/loopback.py index 93eda1eb..4df7b73f 100644 --- a/scripts/loopback.py +++ b/scripts/loopback.py @@ -8,19 +8,24 @@ from modules import processing, shared, sd_samplers, images from modules.processing import Processed from modules.sd_samplers import samplers from modules.shared import opts, cmd_opts, state +import re + class Script(scripts.Script): def title(self): return "Loopback" + def elem_id(self, item_id): + gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id + gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) + return gen_elem_id + def show(self, is_img2img): return is_img2img - def ui(self, is_img2img): - elem_prefix = 'script_loopback_' - - loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=elem_prefix + "loops") - denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1, elem_id=elem_prefix + "denoising_strength_change_factor") + def ui(self, is_img2img): + loops = gr.Slider(minimum=1, maximum=32, step=1, label='Loops', value=4, elem_id=self.elem_id("loops")) + denoising_strength_change_factor = gr.Slider(minimum=0.9, maximum=1.1, step=0.01, label='Denoising strength change factor', value=1, elem_id=self.elem_id("denoising_strength_change_factor")) return [loops, denoising_strength_change_factor] diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index c37bc238..b4a0dc73 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -10,6 +10,7 @@ from PIL import Image, ImageDraw from modules import images, processing, devices from modules.processing import Processed, process_images from modules.shared import opts, cmd_opts, state +import re # this function is taken from https://github.com/parlance-zz/g-diffuser-bot @@ -122,6 +123,11 @@ class Script(scripts.Script): def title(self): return "Outpainting mk2" + def elem_id(self, item_id): + gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id + gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) + return gen_elem_id + def show(self, is_img2img): return is_img2img @@ -129,15 +135,13 @@ class Script(scripts.Script): if not is_img2img: return None - elem_prefix = 'script_outpainting_mk_2_' - info = gr.HTML("

Recommended settings: Sampling Steps: 80-100, Sampler: Euler a, Denoising strength: 0.8

") - pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=elem_prefix + "pixels") - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, elem_id=elem_prefix + "mask_blur") - direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=elem_prefix + "direction") - noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0, elem_id=elem_prefix + "noise_q") - color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05, elem_id=elem_prefix + "color_variation") + pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=self.elem_id("pixels")) + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=8, elem_id=self.elem_id("mask_blur")) + direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=self.elem_id("direction")) + noise_q = gr.Slider(label="Fall-off exponent (lower=higher detail)", minimum=0.0, maximum=4.0, step=0.01, value=1.0, elem_id=self.elem_id("noise_q")) + color_variation = gr.Slider(label="Color variation", minimum=0.0, maximum=1.0, step=0.01, value=0.05, elem_id=self.elem_id("color_variation")) return [info, pixels, mask_blur, direction, noise_q, color_variation] diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index 784ee422..1c7dc467 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -7,26 +7,29 @@ from PIL import Image, ImageDraw from modules import images, processing, devices from modules.processing import Processed, process_images from modules.shared import opts, cmd_opts, state - +import re class Script(scripts.Script): def title(self): return "Poor man's outpainting" + def elem_id(self, item_id): + gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id + gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) + return gen_elem_id + def show(self, is_img2img): return is_img2img def ui(self, is_img2img): if not is_img2img: return None - - elem_prefix = 'script_poor_mans_outpainting_' - pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=elem_prefix + "pixels") - mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id=elem_prefix + "mask_blur") - inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", elem_id=elem_prefix + "inpainting_fill") - direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=elem_prefix + "direction") + pixels = gr.Slider(label="Pixels to expand", minimum=8, maximum=256, step=8, value=128, elem_id=self.elem_id("pixels")) + mask_blur = gr.Slider(label='Mask blur', minimum=0, maximum=64, step=1, value=4, elem_id=self.elem_id("mask_blur")) + inpainting_fill = gr.Radio(label='Masked content', choices=['fill', 'original', 'latent noise', 'latent nothing'], value='fill', type="index", elem_id=self.elem_id("inpainting_fill")) + direction = gr.CheckboxGroup(label="Outpainting direction", choices=['left', 'right', 'up', 'down'], value=['left', 'right', 'up', 'down'], elem_id=self.elem_id("direction")) return [pixels, mask_blur, inpainting_fill, direction] diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index f610c334..278d2e68 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -10,6 +10,7 @@ from modules import images from modules.processing import process_images, Processed from modules.shared import opts, cmd_opts, state import modules.sd_samplers +import re def draw_xy_grid(xs, ys, x_label, y_label, cell): @@ -44,11 +45,14 @@ class Script(scripts.Script): def title(self): return "Prompt matrix" - def ui(self, is_img2img): - elem_prefix = ('img2img' if is_img2img else 'txt2txt') + '_script_prompt_matrix_' - - put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=elem_prefix + "put_at_start") - different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=elem_prefix + "different_seeds") + def elem_id(self, item_id): + gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id + gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) + return gen_elem_id + + def ui(self, is_img2img): + put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=self.elem_id("put_at_start")) + different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=self.elem_id("different_seeds")) return [put_at_start, different_seeds] diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index c6a0b709..5c84c3e9 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -13,6 +13,7 @@ from modules import sd_samplers from modules.processing import Processed, process_images from PIL import Image from modules.shared import opts, cmd_opts, state +import re def process_string_tag(tag): @@ -111,14 +112,17 @@ class Script(scripts.Script): def title(self): return "Prompts from file or textbox" - def ui(self, is_img2img): - elem_prefix = ('img2img' if is_img2img else 'txt2txt') + '_script_prompt_from_file_' - - checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=elem_prefix + "checkbox_iterate") - checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=elem_prefix + "checkbox_iterate_batch") + def elem_id(self, item_id): + gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id + gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) + return gen_elem_id - prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1, elem_id=elem_prefix + "prompt_txt") - file = gr.File(label="Upload prompt inputs", type='bytes', elem_id=elem_prefix + "file") + def ui(self, is_img2img): + checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=self.elem_id("checkbox_iterate")) + checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=self.elem_id("checkbox_iterate_batch")) + + prompt_txt = gr.Textbox(label="List of prompt inputs", lines=1, elem_id=self.elem_id("prompt_txt")) + file = gr.File(label="Upload prompt inputs", type='bytes', elem_id=self.elem_id("file")) file.change(fn=load_prompt_file, inputs=[file], outputs=[file, prompt_txt, prompt_txt]) diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index 2aeeb106..247e755b 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -7,22 +7,26 @@ from PIL import Image from modules import processing, shared, sd_samplers, images, devices from modules.processing import Processed from modules.shared import opts, cmd_opts, state +import re class Script(scripts.Script): def title(self): return "SD upscale" + def elem_id(self, item_id): + gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id + gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) + return gen_elem_id + def show(self, is_img2img): return is_img2img - def ui(self, is_img2img): - elem_prefix = 'script_sd_upscale_' - + def ui(self, is_img2img): info = gr.HTML("

Will upscale the image by the selected scale factor; use width and height sliders to set tile size

") - overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, elem_id=elem_prefix + "overlap") - scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0, elem_id=elem_prefix + "scale_factor") - upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", elem_id=elem_prefix + "upscaler_index") + overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, elem_id=self.elem_id("overlap")) + scale_factor = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label='Scale Factor', value=2.0, elem_id=self.elem_id("scale_factor")) + upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", elem_id=self.elem_id("upscaler_index")) return [info, overlap, upscaler_index, scale_factor] diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 8c9cfb9b..b277a439 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -290,21 +290,25 @@ class Script(scripts.Script): def title(self): return "X/Y plot" + def elem_id(self, item_id): + gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id + gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) + return gen_elem_id + def ui(self, is_img2img): current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img] - elem_prefix = ('img2img' if is_img2img else 'txt2txt') + '_script_xy_grid_' with gr.Row(): - x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id=elem_prefix + "x_type") - x_values = gr.Textbox(label="X values", lines=1, elem_id=elem_prefix + "x_values") + x_type = gr.Dropdown(label="X type", choices=[x.label for x in current_axis_options], value=current_axis_options[1].label, type="index", elem_id=self.elem_id("x_type")) + x_values = gr.Textbox(label="X values", lines=1, elem_id=self.elem_id("x_values")) with gr.Row(): - y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id=elem_prefix + "y_type") - y_values = gr.Textbox(label="Y values", lines=1, elem_id=elem_prefix + "y_values") + y_type = gr.Dropdown(label="Y type", choices=[x.label for x in current_axis_options], value=current_axis_options[0].label, type="index", elem_id=self.elem_id("y_type")) + y_values = gr.Textbox(label="Y values", lines=1, elem_id=self.elem_id("y_values")) - draw_legend = gr.Checkbox(label='Draw legend', value=True, elem_id=elem_prefix + "draw_legend") - include_lone_images = gr.Checkbox(label='Include Separate Images', value=False, elem_id=elem_prefix + "include_lone_images") - no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False, elem_id=elem_prefix + "no_fixed_seeds") + draw_legend = gr.Checkbox(label='Draw legend', value=True, elem_id=self.elem_id("draw_legend")) + include_lone_images = gr.Checkbox(label='Include Separate Images', value=False, elem_id=self.elem_id("include_lone_images")) + no_fixed_seeds = gr.Checkbox(label='Keep -1 for seeds', value=False, elem_id=self.elem_id("no_fixed_seeds")) return [x_type, x_values, y_type, y_values, draw_legend, include_lone_images, no_fixed_seeds] From 997461d3dd86f51c06ea0c2eff17ce8b8b48c0af Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 5 Jan 2023 11:57:01 +0300 Subject: [PATCH 130/461] add footer with versions --- html/footer.html | 4 ++++ launch.py | 20 ++++++++++++++++---- modules/ui.py | 31 ++++++++++++++++++++++++++++++- style.css | 5 +++++ 4 files changed, 55 insertions(+), 5 deletions(-) diff --git a/html/footer.html b/html/footer.html index a8f2adf7..bad87ff6 100644 --- a/html/footer.html +++ b/html/footer.html @@ -7,3 +7,7 @@  •  Reload UI +
+
+{versions} +
diff --git a/launch.py b/launch.py index af0d418b..49b91b1f 100644 --- a/launch.py +++ b/launch.py @@ -13,6 +13,21 @@ dir_extensions = "extensions" python = sys.executable git = os.environ.get('GIT', "git") index_url = os.environ.get('INDEX_URL', "") +stored_commit_hash = None + + +def commit_hash(): + global stored_commit_hash + + if stored_commit_hash is not None: + return stored_commit_hash + + try: + stored_commit_hash = run(f"{git} rev-parse HEAD").strip() + except Exception: + stored_commit_hash = "" + + return stored_commit_hash def extract_arg(args, name): @@ -194,10 +209,7 @@ def prepare_environment(): xformers = '--xformers' in sys.argv ngrok = '--ngrok' in sys.argv - try: - commit = run(f"{git} rev-parse HEAD").strip() - except Exception: - commit = "" + commit = commit_hash() print(f"Python {sys.version}") print(f"Commit hash: {commit}") diff --git a/modules/ui.py b/modules/ui.py index bb64fe20..81d96c5b 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1696,7 +1696,9 @@ def create_ui(): if os.path.exists("html/footer.html"): with open("html/footer.html", encoding="utf8") as file: - gr.HTML(file.read(), elem_id="footer") + footer = file.read() + footer = footer.format(versions=versions_html()) + gr.HTML(footer, elem_id="footer") text_settings = gr.Textbox(elem_id="settings_json", value=lambda: opts.dumpjson(), visible=False) settings_submit.click( @@ -1857,3 +1859,30 @@ def reload_javascript(): if not hasattr(shared, 'GradioTemplateResponseOriginal'): shared.GradioTemplateResponseOriginal = gradio.routes.templates.TemplateResponse + + +def versions_html(): + import torch + import launch + + python_version = ".".join([str(x) for x in sys.version_info[0:3]]) + commit = launch.commit_hash() + short_commit = commit[0:8] + + if shared.xformers_available: + import xformers + xformers_version = xformers.__version__ + else: + xformers_version = "N/A" + + return f""" +python: {python_version} + •  +torch: {torch.__version__} + •  +xformers: {xformers_version} + •  +gradio: {gr.__version__} + •  +commit: {short_commit} +""" diff --git a/style.css b/style.css index 09ee540b..ee74d79e 100644 --- a/style.css +++ b/style.css @@ -628,6 +628,11 @@ footer { display: inline-block; } +#footer .versions{ + font-size: 85%; + opacity: 0.85; +} + /* The following handles localization for right-to-left (RTL) languages like Arabic. The rtl media type will only be activated by the logic in javascript/localization.js. If you change anything above, you need to make sure it is RTL compliant by just running From f8d0cf6a6ec4911559cfecb9a9d1d46b547b38e8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 5 Jan 2023 12:08:11 +0300 Subject: [PATCH 131/461] rework #6329 to remove duplicate code and add prevent tab names for showing in ids for scripts that only exist on one tab --- modules/scripts.py | 10 ++++++++++ scripts/custom_code.py | 6 ------ scripts/img2imgalt.py | 6 ------ scripts/loopback.py | 6 ------ scripts/outpainting_mk_2.py | 6 ------ scripts/poor_mans_outpainting.py | 6 ------ scripts/prompt_matrix.py | 6 ------ scripts/prompts_from_file.py | 6 ------ scripts/sd_upscale.py | 6 ------ scripts/xy_grid.py | 5 ----- 10 files changed, 10 insertions(+), 53 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index 722f8685..0c44f191 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -1,4 +1,5 @@ import os +import re import sys import traceback from collections import namedtuple @@ -128,6 +129,15 @@ class Script: """unused""" return "" + def elem_id(self, item_id): + """helper function to generate id for a HTML element, constructs final id out of script name, tab and user-supplied item_id""" + + need_tabname = self.show(True) == self.show(False) + tabname = ('img2img' if self.is_img2img else 'txt2txt') + "_" if need_tabname else "" + title = re.sub(r'[^a-z_0-9]', '', re.sub(r'\s', '_', self.title().lower())) + + return f'script_{tabname}{title}_{item_id}' + current_basedir = paths.script_path diff --git a/scripts/custom_code.py b/scripts/custom_code.py index 9ce1f650..d29113e6 100644 --- a/scripts/custom_code.py +++ b/scripts/custom_code.py @@ -3,18 +3,12 @@ import gradio as gr from modules.processing import Processed from modules.shared import opts, cmd_opts, state -import re class Script(scripts.Script): def title(self): return "Custom code" - def elem_id(self, item_id): - gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id - gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) - return gen_elem_id - def show(self, is_img2img): return cmd_opts.allow_code diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py index 7555e874..cbdfc6b3 100644 --- a/scripts/img2imgalt.py +++ b/scripts/img2imgalt.py @@ -16,7 +16,6 @@ import k_diffusion as K from PIL import Image from torch import autocast from einops import rearrange, repeat -import re def find_noise_for_image(p, cond, uncond, cfg_scale, steps): @@ -123,11 +122,6 @@ class Script(scripts.Script): def title(self): return "img2img alternative test" - def elem_id(self, item_id): - gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id - gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) - return gen_elem_id - def show(self, is_img2img): return is_img2img diff --git a/scripts/loopback.py b/scripts/loopback.py index 4df7b73f..1dab9476 100644 --- a/scripts/loopback.py +++ b/scripts/loopback.py @@ -8,18 +8,12 @@ from modules import processing, shared, sd_samplers, images from modules.processing import Processed from modules.sd_samplers import samplers from modules.shared import opts, cmd_opts, state -import re class Script(scripts.Script): def title(self): return "Loopback" - def elem_id(self, item_id): - gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id - gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) - return gen_elem_id - def show(self, is_img2img): return is_img2img diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index b4a0dc73..0906da6a 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -10,7 +10,6 @@ from PIL import Image, ImageDraw from modules import images, processing, devices from modules.processing import Processed, process_images from modules.shared import opts, cmd_opts, state -import re # this function is taken from https://github.com/parlance-zz/g-diffuser-bot @@ -123,11 +122,6 @@ class Script(scripts.Script): def title(self): return "Outpainting mk2" - def elem_id(self, item_id): - gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id - gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) - return gen_elem_id - def show(self, is_img2img): return is_img2img diff --git a/scripts/poor_mans_outpainting.py b/scripts/poor_mans_outpainting.py index 1c7dc467..d8feda00 100644 --- a/scripts/poor_mans_outpainting.py +++ b/scripts/poor_mans_outpainting.py @@ -7,18 +7,12 @@ from PIL import Image, ImageDraw from modules import images, processing, devices from modules.processing import Processed, process_images from modules.shared import opts, cmd_opts, state -import re class Script(scripts.Script): def title(self): return "Poor man's outpainting" - def elem_id(self, item_id): - gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id - gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) - return gen_elem_id - def show(self, is_img2img): return is_img2img diff --git a/scripts/prompt_matrix.py b/scripts/prompt_matrix.py index 278d2e68..dd95e588 100644 --- a/scripts/prompt_matrix.py +++ b/scripts/prompt_matrix.py @@ -10,7 +10,6 @@ from modules import images from modules.processing import process_images, Processed from modules.shared import opts, cmd_opts, state import modules.sd_samplers -import re def draw_xy_grid(xs, ys, x_label, y_label, cell): @@ -45,11 +44,6 @@ class Script(scripts.Script): def title(self): return "Prompt matrix" - def elem_id(self, item_id): - gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id - gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) - return gen_elem_id - def ui(self, is_img2img): put_at_start = gr.Checkbox(label='Put variable parts at start of prompt', value=False, elem_id=self.elem_id("put_at_start")) different_seeds = gr.Checkbox(label='Use different seed for each picture', value=False, elem_id=self.elem_id("different_seeds")) diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 5c84c3e9..2751f98a 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -13,7 +13,6 @@ from modules import sd_samplers from modules.processing import Processed, process_images from PIL import Image from modules.shared import opts, cmd_opts, state -import re def process_string_tag(tag): @@ -112,11 +111,6 @@ class Script(scripts.Script): def title(self): return "Prompts from file or textbox" - def elem_id(self, item_id): - gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id - gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) - return gen_elem_id - def ui(self, is_img2img): checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=self.elem_id("checkbox_iterate")) checkbox_iterate_batch = gr.Checkbox(label="Use same random seed for all lines", value=False, elem_id=self.elem_id("checkbox_iterate_batch")) diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index 247e755b..9b8ffd85 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -7,18 +7,12 @@ from PIL import Image from modules import processing, shared, sd_samplers, images, devices from modules.processing import Processed from modules.shared import opts, cmd_opts, state -import re class Script(scripts.Script): def title(self): return "SD upscale" - def elem_id(self, item_id): - gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id - gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) - return gen_elem_id - def show(self, is_img2img): return is_img2img diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index b277a439..f04d9b7e 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -290,11 +290,6 @@ class Script(scripts.Script): def title(self): return "X/Y plot" - def elem_id(self, item_id): - gen_elem_id = ('img2img' if self.is_img2img else 'txt2txt') + '_script_' + re.sub(r'\s', '_', self.title().lower()) + '_' + item_id - gen_elem_id = re.sub(r'[^a-z_0-9]', '', gen_elem_id) - return gen_elem_id - def ui(self, is_img2img): current_axis_options = [x for x in axis_options if type(x) == AxisOption or type(x) == AxisOptionImg2Img and is_img2img] From eea8fc40e16664ddc8a9aec77206da704a35dde0 Mon Sep 17 00:00:00 2001 From: timntorres Date: Thu, 5 Jan 2023 07:24:22 -0800 Subject: [PATCH 132/461] Add option to save ti settings to file. --- modules/shared.py | 1 + .../textual_inversion/textual_inversion.py | 30 +++++++++++++++++-- 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index e0f44c6d..933cd738 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -362,6 +362,7 @@ options_templates.update(options_section(('training', "Training"), { "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."), "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."), "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."), + "save_train_settings_to_txt": OptionInfo(False, "Save textual inversion and hypernet settings to a text file when training starts."), "dataset_filename_word_regex": OptionInfo("", "Filename word regex"), "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 71e07bcc..2bed2ecb 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -1,6 +1,7 @@ import os import sys import traceback +import inspect import torch import tqdm @@ -229,6 +230,28 @@ def write_loss(log_directory, filename, step, epoch_len, values): **values, }) +def save_settings_to_file(initial_step, num_of_dataset_images, embedding_name, vectors_per_token, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): + checkpoint = sd_models.select_checkpoint() + model_name = checkpoint.model_name + model_hash = '[{}]'.format(checkpoint.hash) + + # Get a list of the argument names. + arg_names = inspect.getfullargspec(save_settings_to_file).args + + # Create a list of the argument names to include in the settings string. + names = arg_names[:16] # Include all arguments up until the preview-related ones. + if preview_from_txt2img: + names.extend(arg_names[16:]) # Include all remaining arguments if `preview_from_txt2img` is True. + + # Build the settings string. + settings_str = "datetime : " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n" + for name in names: + value = locals()[name] + settings_str += f"{name}: {value}\n" + + with open(os.path.join(log_directory, 'settings.txt'), "a+") as fout: + fout.write(settings_str + "\n\n") + def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_model_every, create_image_every, log_directory, name="embedding"): assert model_name, f"{name} not selected" assert learn_rate, "Learning rate is empty or 0" @@ -292,13 +315,13 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ if initial_step >= steps: shared.state.textinfo = "Model has already been trained beyond specified max steps" return embedding, filename + scheduler = LearnRateScheduler(learn_rate, steps, initial_step) - clip_grad = torch.nn.utils.clip_grad_value_ if clip_grad_mode == "value" else \ torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \ None if clip_grad: - clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False) + clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False) # dataset loading may take a while, so input validations and early returns should be done before this shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." old_parallel_processing_allowed = shared.parallel_processing_allowed @@ -306,7 +329,8 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ pin_memory = shared.opts.pin_memory ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method) - + if shared.opts.save_train_settings_to_txt: + save_settings_to_file(initial_step , len(ds) , embedding_name, len(embedding.vec) , learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height) latent_sampling_method = ds.latent_sampling_method dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory) From 19a81ac2871ec900fc8b7955bbc2554b6c5ac6b1 Mon Sep 17 00:00:00 2001 From: cat Date: Thu, 5 Jan 2023 20:17:39 +0500 Subject: [PATCH 133/461] hires-fix: add "nearest-exact" latent upscale mode. --- modules/shared.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/shared.py b/modules/shared.py index e0f44c6d..b7a3ce5c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -576,6 +576,7 @@ latent_upscale_modes = { "Latent (bicubic)": {"mode": "bicubic", "antialias": False}, "Latent (bicubic antialiased)": {"mode": "bicubic", "antialias": True}, "Latent (nearest)": {"mode": "nearest", "antialias": False}, + "Latent (nearest-exact)": {"mode": "nearest-exact", "antialias": False}, } sd_upscalers = [] From b85c2b5cf4a6809bc871718cf4680d49c3e95e94 Mon Sep 17 00:00:00 2001 From: timntorres Date: Thu, 5 Jan 2023 08:14:38 -0800 Subject: [PATCH 134/461] Clean up ti, add same behavior to hypernetwork. --- modules/hypernetworks/hypernetwork.py | 31 ++++++++++++++++++- modules/shared.py | 2 +- .../textual_inversion/textual_inversion.py | 14 ++++++--- 3 files changed, 40 insertions(+), 7 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 6a9b1398..d5985263 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -401,7 +401,33 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, hypernet.save(fn) shared.reload_hypernetworks() +# Note: textual_inversion.py has a nearly identical function of the same name. +def save_settings_to_file(initial_step, num_of_dataset_images, hypernetwork_name, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): + checkpoint = sd_models.select_checkpoint() + model_name = checkpoint.model_name + model_hash = '[{}]'.format(checkpoint.hash) + # Starting index of preview-related arguments. + border_index = 19 + # Get a list of the argument names, excluding default argument. + sig = inspect.signature(save_settings_to_file) + arg_names = [p.name for p in sig.parameters.values() if p.default == p.empty] + + # Create a list of the argument names to include in the settings string. + names = arg_names[:border_index] # Include all arguments up until the preview-related ones. + + # Include preview-related arguments if applicable. + if preview_from_txt2img: + names.extend(arg_names[border_index:]) + + # Build the settings string. + settings_str = "datetime : " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n" + for name in names: + value = locals()[name] + settings_str += f"{name}: {value}\n" + + with open(os.path.join(log_directory, 'settings.txt'), "a+") as fout: + fout.write(settings_str + "\n\n") def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): # images allows training previews to have infotext. Importing it at the top causes a circular import problem. @@ -457,7 +483,10 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, pin_memory = shared.opts.pin_memory ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method) - + + if shared.opts.save_training_settings_to_txt: + save_settings_to_file(initial_step, len(ds), hypernetwork_name, hypernetwork.layer_structure, hypernetwork.activation_func, hypernetwork.weight_init, hypernetwork.add_layer_norm, hypernetwork.use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height) + latent_sampling_method = ds.latent_sampling_method dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory) diff --git a/modules/shared.py b/modules/shared.py index 933cd738..10231a75 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -362,7 +362,7 @@ options_templates.update(options_section(('training', "Training"), { "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."), "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."), "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."), - "save_train_settings_to_txt": OptionInfo(False, "Save textual inversion and hypernet settings to a text file when training starts."), + "save_training_settings_to_txt": OptionInfo(False, "Save textual inversion and hypernet settings to a text file whenever training starts."), "dataset_filename_word_regex": OptionInfo("", "Filename word regex"), "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 2bed2ecb..68648550 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -230,18 +230,20 @@ def write_loss(log_directory, filename, step, epoch_len, values): **values, }) +# Note: hypernetwork.py has a nearly identical function of the same name. def save_settings_to_file(initial_step, num_of_dataset_images, embedding_name, vectors_per_token, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): checkpoint = sd_models.select_checkpoint() model_name = checkpoint.model_name model_hash = '[{}]'.format(checkpoint.hash) - + # Starting index of preview-related arguments. + border_index = 16 # Get a list of the argument names. arg_names = inspect.getfullargspec(save_settings_to_file).args # Create a list of the argument names to include in the settings string. - names = arg_names[:16] # Include all arguments up until the preview-related ones. + names = arg_names[:border_index] # Include all arguments up until the preview-related ones. if preview_from_txt2img: - names.extend(arg_names[16:]) # Include all remaining arguments if `preview_from_txt2img` is True. + names.extend(arg_names[border_index:]) # Include all remaining arguments if `preview_from_txt2img` is True. # Build the settings string. settings_str = "datetime : " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n" @@ -329,8 +331,10 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ pin_memory = shared.opts.pin_memory ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method) - if shared.opts.save_train_settings_to_txt: - save_settings_to_file(initial_step , len(ds) , embedding_name, len(embedding.vec) , learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height) + + if shared.opts.save_training_settings_to_txt: + save_settings_to_file(initial_step, len(ds), embedding_name, len(embedding.vec), learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height) + latent_sampling_method = ds.latent_sampling_method dl = modules.textual_inversion.dataset.PersonalizedDataLoader(ds, latent_sampling_method=latent_sampling_method, batch_size=ds.batch_size, pin_memory=pin_memory) From b6bab2f052b32c0ffebe6aecc1819ccf20cf8c5d Mon Sep 17 00:00:00 2001 From: timntorres Date: Thu, 5 Jan 2023 09:14:56 -0800 Subject: [PATCH 135/461] Include model in log file. Exclude directory. --- modules/hypernetworks/hypernetwork.py | 28 +++++++------------ .../textual_inversion/textual_inversion.py | 22 ++++++--------- 2 files changed, 19 insertions(+), 31 deletions(-) diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index d5985263..3237c37a 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -402,30 +402,22 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, shared.reload_hypernetworks() # Note: textual_inversion.py has a nearly identical function of the same name. -def save_settings_to_file(initial_step, num_of_dataset_images, hypernetwork_name, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): - checkpoint = sd_models.select_checkpoint() - model_name = checkpoint.model_name - model_hash = '[{}]'.format(checkpoint.hash) +def save_settings_to_file(model_name, model_hash, initial_step, num_of_dataset_images, hypernetwork_name, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): # Starting index of preview-related arguments. - border_index = 19 - - # Get a list of the argument names, excluding default argument. - sig = inspect.signature(save_settings_to_file) - arg_names = [p.name for p in sig.parameters.values() if p.default == p.empty] - + border_index = 21 + # Get a list of the argument names. + arg_names = inspect.getfullargspec(save_settings_to_file).args # Create a list of the argument names to include in the settings string. names = arg_names[:border_index] # Include all arguments up until the preview-related ones. - - # Include preview-related arguments if applicable. if preview_from_txt2img: - names.extend(arg_names[border_index:]) - + names.extend(arg_names[border_index:]) # Include preview-related arguments if applicable. # Build the settings string. settings_str = "datetime : " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n" for name in names: - value = locals()[name] - settings_str += f"{name}: {value}\n" - + if name != 'log_directory': # It's useless and redundant to save log_directory. + value = locals()[name] + settings_str += f"{name}: {value}\n" + # Create or append to the file. with open(os.path.join(log_directory, 'settings.txt'), "a+") as fout: fout.write(settings_str + "\n\n") @@ -485,7 +477,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method) if shared.opts.save_training_settings_to_txt: - save_settings_to_file(initial_step, len(ds), hypernetwork_name, hypernetwork.layer_structure, hypernetwork.activation_func, hypernetwork.weight_init, hypernetwork.add_layer_norm, hypernetwork.use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height) + save_settings_to_file(checkpoint.model_name, '[{}]'.format(checkpoint.hash), initial_step, len(ds), hypernetwork_name, hypernetwork.layer_structure, hypernetwork.activation_func, hypernetwork.weight_init, hypernetwork.add_layer_norm, hypernetwork.use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height) latent_sampling_method = ds.latent_sampling_method diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 68648550..ce7e4f5d 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -231,26 +231,22 @@ def write_loss(log_directory, filename, step, epoch_len, values): }) # Note: hypernetwork.py has a nearly identical function of the same name. -def save_settings_to_file(initial_step, num_of_dataset_images, embedding_name, vectors_per_token, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): - checkpoint = sd_models.select_checkpoint() - model_name = checkpoint.model_name - model_hash = '[{}]'.format(checkpoint.hash) +def save_settings_to_file(model_name, model_hash, initial_step, num_of_dataset_images, embedding_name, vectors_per_token, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): # Starting index of preview-related arguments. - border_index = 16 + border_index = 18 # Get a list of the argument names. - arg_names = inspect.getfullargspec(save_settings_to_file).args - + arg_names = inspect.getfullargspec(save_settings_to_file).args # Create a list of the argument names to include in the settings string. names = arg_names[:border_index] # Include all arguments up until the preview-related ones. if preview_from_txt2img: - names.extend(arg_names[border_index:]) # Include all remaining arguments if `preview_from_txt2img` is True. - + names.extend(arg_names[border_index:]) # Include preview-related arguments if applicable. # Build the settings string. settings_str = "datetime : " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n" for name in names: - value = locals()[name] - settings_str += f"{name}: {value}\n" - + if name != 'log_directory': # It's useless and redundant to save log_directory. + value = locals()[name] + settings_str += f"{name}: {value}\n" + # Create or append to the file. with open(os.path.join(log_directory, 'settings.txt'), "a+") as fout: fout.write(settings_str + "\n\n") @@ -333,7 +329,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method) if shared.opts.save_training_settings_to_txt: - save_settings_to_file(initial_step, len(ds), embedding_name, len(embedding.vec), learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height) + save_settings_to_file(checkpoint.model_name, '[{}]'.format(checkpoint.hash), initial_step, len(ds), embedding_name, len(embedding.vec), learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height) latent_sampling_method = ds.latent_sampling_method From fda04e620d529031e2134520e74756d0efa30464 Mon Sep 17 00:00:00 2001 From: Kuma <36082288+KumiIT@users.noreply.github.com> Date: Thu, 5 Jan 2023 18:44:19 +0100 Subject: [PATCH 136/461] typo in TI --- modules/textual_inversion/textual_inversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 71e07bcc..24b43045 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -298,7 +298,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ torch.nn.utils.clip_grad_norm_ if clip_grad_mode == "norm" else \ None if clip_grad: - clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, ititial_step, verbose=False) + clip_grad_sched = LearnRateScheduler(clip_grad_value, steps, initial_step, verbose=False) # dataset loading may take a while, so input validations and early returns should be done before this shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." old_parallel_processing_allowed = shared.parallel_processing_allowed From 847f869c67c7108e3e792fc193331d0e6acca29c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 5 Jan 2023 21:00:52 +0300 Subject: [PATCH 137/461] experimental optimization --- modules/processing.py | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 61e97077..a408d622 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -544,6 +544,29 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: infotexts = [] output_images = [] + cached_uc = [None, None] + cached_c = [None, None] + + def get_conds_with_caching(function, required_prompts, steps, cache): + """ + Returns the result of calling function(shared.sd_model, required_prompts, steps) + using a cache to store the result if the same arguments have been used before. + + cache is an array containing two elements. The first element is a tuple + representing the previously used arguments, or None if no arguments + have been used before. The second element is where the previously + computed result is stored. + """ + + if cache[0] is not None and (required_prompts, steps) == cache[0]: + return cache[1] + + with devices.autocast(): + cache[1] = function(shared.sd_model, required_prompts, steps) + + cache[0] = (required_prompts, steps) + return cache[1] + with torch.no_grad(), p.sd_model.ema_scope(): with devices.autocast(): p.init(p.all_prompts, p.all_seeds, p.all_subseeds) @@ -571,9 +594,8 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.scripts is not None: p.scripts.process_batch(p, batch_number=n, prompts=prompts, seeds=seeds, subseeds=subseeds) - with devices.autocast(): - uc = prompt_parser.get_learned_conditioning(shared.sd_model, negative_prompts, p.steps) - c = prompt_parser.get_multicond_learned_conditioning(shared.sd_model, prompts, p.steps) + uc = get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, p.steps, cached_uc) + c = get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, p.steps, cached_c) if len(model_hijack.comments) > 0: for comment in model_hijack.comments: From 81133d4168ae0bae9bf8bf1a1d4983319a589112 Mon Sep 17 00:00:00 2001 From: Faber Date: Fri, 6 Jan 2023 03:38:37 +0700 Subject: [PATCH 138/461] allow loading embeddings from subdirectories --- .../textual_inversion/textual_inversion.py | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 24b43045..0a059044 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -149,19 +149,20 @@ class EmbeddingDatabase: else: self.skipped_embeddings[name] = embedding - for fn in os.listdir(self.embeddings_dir): - try: - fullfn = os.path.join(self.embeddings_dir, fn) + for root, dirs, fns in os.walk(self.embeddings_dir): + for fn in fns: + try: + fullfn = os.path.join(root, fn) - if os.stat(fullfn).st_size == 0: + if os.stat(fullfn).st_size == 0: + continue + + process_file(fullfn, fn) + except Exception: + print(f"Error loading embedding {fn}:", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) continue - process_file(fullfn, fn) - except Exception: - print(f"Error loading embedding {fn}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - continue - print(f"Textual inversion embeddings loaded({len(self.word_embeddings)}): {', '.join(self.word_embeddings.keys())}") if len(self.skipped_embeddings) > 0: print(f"Textual inversion embeddings skipped({len(self.skipped_embeddings)}): {', '.join(self.skipped_embeddings.keys())}") From b5253f0dab529707f1fe2e11211a10ce2f264617 Mon Sep 17 00:00:00 2001 From: noodleanon <122053346+noodleanon@users.noreply.github.com> Date: Thu, 5 Jan 2023 21:21:48 +0000 Subject: [PATCH 139/461] allow img2img api to run scripts --- modules/api/api.py | 27 ++++++++++++++++++++++++--- modules/api/models.py | 2 +- modules/processing.py | 4 ++-- 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/modules/api/api.py b/modules/api/api.py index 2103709b..aa62a42e 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -11,7 +11,7 @@ from fastapi.security import HTTPBasic, HTTPBasicCredentials from secrets import compare_digest import modules.shared as shared -from modules import sd_samplers, deepbooru, sd_hijack, images +from modules import sd_samplers, deepbooru, sd_hijack, images, scripts, ui from modules.api.models import * from modules.processing import StableDiffusionProcessingTxt2Img, StableDiffusionProcessingImg2Img, process_images from modules.extras import run_extras @@ -28,8 +28,13 @@ def upscaler_to_index(name: str): try: return [x.name.lower() for x in shared.sd_upscalers].index(name.lower()) except: - raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be on of these: {' , '.join([x.name for x in sd_upscalers])}") + raise HTTPException(status_code=400, detail=f"Invalid upscaler, needs to be one of these: {' , '.join([x.name for x in sd_upscalers])}") +def script_name_to_index(name, scripts): + try: + return [script.title().lower() for script in scripts].index(name.lower()) + except: + raise HTTPException(status_code=422, detail=f"Script '{name}' not found") def validate_sampler_name(name): config = sd_samplers.all_samplers_map.get(name, None) @@ -170,6 +175,14 @@ class Api: if init_images is None: raise HTTPException(status_code=404, detail="Init image not found") + if img2imgreq.script_name is not None: + if scripts.scripts_img2img.scripts == []: + scripts.scripts_img2img.initialize_scripts(True) + ui.create_ui() + + script_idx = script_name_to_index(img2imgreq.script_name, scripts.scripts_img2img.selectable_scripts) + script = scripts.scripts_img2img.selectable_scripts[script_idx] + mask = img2imgreq.mask if mask: mask = decode_base64_to_image(mask) @@ -186,13 +199,21 @@ class Api: args = vars(populate) args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine. + args.pop('script_name', None) with self.queue_lock: p = StableDiffusionProcessingImg2Img(sd_model=shared.sd_model, **args) p.init_images = [decode_base64_to_image(x) for x in init_images] shared.state.begin() - processed = process_images(p) + if 'script' in locals(): + p.outpath_grids = opts.outdir_img2img_grids + p.outpath_samples = opts.outdir_img2img_samples + p.script_args = [script_idx + 1] + [None] * (script.args_from - 1) + p.script_args + processed = scripts.scripts_img2img.run(p, *p.script_args) + else: + processed = process_images(p) + shared.state.end() b64images = list(map(encode_pil_to_base64, processed.images)) diff --git a/modules/api/models.py b/modules/api/models.py index d8198a27..862477e7 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -106,7 +106,7 @@ StableDiffusionTxt2ImgProcessingAPI = PydanticModelGenerator( StableDiffusionImg2ImgProcessingAPI = PydanticModelGenerator( "StableDiffusionProcessingImg2Img", StableDiffusionProcessingImg2Img, - [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}] + [{"key": "sampler_index", "type": str, "default": "Euler"}, {"key": "init_images", "type": list, "default": None}, {"key": "denoising_strength", "type": float, "default": 0.75}, {"key": "mask", "type": str, "default": None}, {"key": "include_init_images", "type": bool, "default": False, "exclude" : True}, {"key": "script_name", "type": str, "default": None}, {"key": "script_args", "type": list, "default": []}] ).generate_model() class TextToImageResponse(BaseModel): diff --git a/modules/processing.py b/modules/processing.py index a408d622..d5ac7eb1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -98,7 +98,7 @@ class StableDiffusionProcessing(): """ The first set of paramaters: sd_models -> do_not_reload_embeddings represent the minimum required to create a StableDiffusionProcessing """ - def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None): + def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt: str = "", styles: List[str] = None, seed: int = -1, subseed: int = -1, subseed_strength: float = 0, seed_resize_from_h: int = -1, seed_resize_from_w: int = -1, seed_enable_extras: bool = True, sampler_name: str = None, batch_size: int = 1, n_iter: int = 1, steps: int = 50, cfg_scale: float = 7.0, width: int = 512, height: int = 512, restore_faces: bool = False, tiling: bool = False, do_not_save_samples: bool = False, do_not_save_grid: bool = False, extra_generation_params: Dict[Any, Any] = None, overlay_images: Any = None, negative_prompt: str = None, eta: float = None, do_not_reload_embeddings: bool = False, denoising_strength: float = 0, ddim_discretize: str = None, s_churn: float = 0.0, s_tmax: float = None, s_tmin: float = 0.0, s_noise: float = 1.0, override_settings: Dict[str, Any] = None, override_settings_restore_afterwards: bool = True, sampler_index: int = None, script_args: list = None): if sampler_index is not None: print("sampler_index argument for StableDiffusionProcessing does not do anything; use sampler_name", file=sys.stderr) @@ -149,7 +149,7 @@ class StableDiffusionProcessing(): self.seed_resize_from_w = 0 self.scripts = None - self.script_args = None + self.script_args = script_args self.all_prompts = None self.all_negative_prompts = None self.all_seeds = None From eadd1bf06adbd7263875640a6446d3b0184d1561 Mon Sep 17 00:00:00 2001 From: noodleanon <122053346+noodleanon@users.noreply.github.com> Date: Thu, 5 Jan 2023 21:22:04 +0000 Subject: [PATCH 140/461] allow sdupscale to accept upscaler name --- scripts/sd_upscale.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index 9b8ffd85..332d76d9 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -25,6 +25,8 @@ class Script(scripts.Script): return [info, overlap, upscaler_index, scale_factor] def run(self, p, _, overlap, upscaler_index, scale_factor): + if isinstance(upscaler_index, str): + upscaler_index = [x.name.lower() for x in shared.sd_upscalers].index(upscaler_index.lower()) processing.fix_seed(p) upscaler = shared.sd_upscalers[upscaler_index] From 8111b5569d07c7ac3b695e28171aede728b4ae56 Mon Sep 17 00:00:00 2001 From: brkirch Date: Tue, 3 Jan 2023 20:43:05 -0500 Subject: [PATCH 141/461] Add support for PyTorch nightly and local builds --- modules/devices.py | 28 +++++++++++++++++++++++----- webui.py | 7 ++++++- 2 files changed, 29 insertions(+), 6 deletions(-) diff --git a/modules/devices.py b/modules/devices.py index 800510b7..caeb0276 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -133,8 +133,26 @@ def numpy_fix(self, *args, **kwargs): return orig_tensor_numpy(self, *args, **kwargs) -# PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working -if has_mps() and version.parse(torch.__version__) < version.parse("1.13"): - torch.Tensor.to = tensor_to_fix - torch.nn.functional.layer_norm = layer_norm_fix - torch.Tensor.numpy = numpy_fix +# MPS workaround for https://github.com/pytorch/pytorch/issues/89784 +orig_cumsum = torch.cumsum +orig_Tensor_cumsum = torch.Tensor.cumsum +def cumsum_fix(input, cumsum_func, *args, **kwargs): + if input.device.type == 'mps': + output_dtype = kwargs.get('dtype', input.dtype) + if any(output_dtype == broken_dtype for broken_dtype in [torch.bool, torch.int8, torch.int16, torch.int64]): + return cumsum_func(input.cpu(), *args, **kwargs).to(input.device) + return cumsum_func(input, *args, **kwargs) + + +if has_mps(): + if version.parse(torch.__version__) < version.parse("1.13"): + # PyTorch 1.13 doesn't need these fixes but unfortunately is slower and has regressions that prevent training from working + torch.Tensor.to = tensor_to_fix + torch.nn.functional.layer_norm = layer_norm_fix + torch.Tensor.numpy = numpy_fix + elif version.parse(torch.__version__) > version.parse("1.13.1"): + if not torch.Tensor([1,2]).to(torch.device("mps")).equal(torch.Tensor([1,1]).to(torch.device("mps")).cumsum(0, dtype=torch.int16)): + torch.cumsum = lambda input, *args, **kwargs: ( cumsum_fix(input, orig_cumsum, *args, **kwargs) ) + torch.Tensor.cumsum = lambda self, *args, **kwargs: ( cumsum_fix(self, orig_Tensor_cumsum, *args, **kwargs) ) + orig_narrow = torch.narrow + torch.narrow = lambda *args, **kwargs: ( orig_narrow(*args, **kwargs).clone() ) diff --git a/webui.py b/webui.py index 13375e71..ddfaea95 100644 --- a/webui.py +++ b/webui.py @@ -4,7 +4,7 @@ import threading import time import importlib import signal -import threading +import re from fastapi import FastAPI from fastapi.middleware.cors import CORSMiddleware from fastapi.middleware.gzip import GZipMiddleware @@ -13,6 +13,11 @@ from modules import import_hook, errors from modules.call_queue import wrap_queued_call, queue_lock, wrap_gradio_gpu_call from modules.paths import script_path +import torch +# Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors +if ".dev" in torch.__version__ or "+git" in torch.__version__: + torch.__version__ = re.search(r'[\d.]+', torch.__version__).group(0) + from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir import modules.codeformer_model as codeformer import modules.extras From d61a5aa4f623f6630670241aca8fc5c2a6381769 Mon Sep 17 00:00:00 2001 From: acncagua Date: Fri, 6 Jan 2023 10:58:22 +0900 Subject: [PATCH 142/461] Add files via upload --- modules/ui.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/ui.py b/modules/ui.py index 81d96c5b..030f0685 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -550,6 +550,8 @@ Requested path was: {f} os.startfile(path) elif platform.system() == "Darwin": sp.Popen(["open", path]) + elif "microsoft-standard-WSL2" in platform.uname().release: + sp.Popen(["wsl-open", path]) else: sp.Popen(["xdg-open", path]) From d782a95967c9eea753df3333cd1954b6ec73eba0 Mon Sep 17 00:00:00 2001 From: brkirch Date: Tue, 27 Dec 2022 08:50:55 -0500 Subject: [PATCH 143/461] Add Birch-san's sub-quadratic attention implementation --- README.md | 1 + modules/sd_hijack.py | 15 +-- modules/sd_hijack_optimizations.py | 124 ++++++++++++++---- modules/shared.py | 4 + modules/sub_quadratic_attention.py | 201 +++++++++++++++++++++++++++++ requirements.txt | 2 +- 6 files changed, 312 insertions(+), 35 deletions(-) create mode 100644 modules/sub_quadratic_attention.py diff --git a/README.md b/README.md index 556000fb..1913caf3 100644 --- a/README.md +++ b/README.md @@ -139,6 +139,7 @@ The documentation was moved from this README over to the project's [wiki](https: - Ideas for optimizations - https://github.com/basujindal/stable-diffusion - Cross Attention layer optimization - Doggettx - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing. - Cross Attention layer optimization - InvokeAI, lstein - https://github.com/invoke-ai/InvokeAI (originally http://github.com/lstein/stable-diffusion) +- Sub-quadratic Cross Attention layer optimization - Alex Birch (https://github.com/Birch-san), Amin Rezaei (https://github.com/AminRezaei0x443) - Textual Inversion - Rinon Gal - https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas). - Idea for SD upscale - https://github.com/jquesnelle/txt2imghd - Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 690a9ec2..019a6f3f 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -7,8 +7,6 @@ from modules.hypernetworks import hypernetwork from modules.shared import cmd_opts from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet -from modules.sd_hijack_optimizations import invokeAI_mps_available - import ldm.modules.attention import ldm.modules.diffusionmodules.model import ldm.modules.diffusionmodules.openaimodel @@ -40,17 +38,16 @@ def apply_optimizations(): print("Applying xformers cross attention optimization.") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward + elif cmd_opts.opt_sub_quad_attention: + print("Applying sub-quadratic cross attention optimization.") + ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.sub_quad_attention_forward + ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.sub_quad_attnblock_forward elif cmd_opts.opt_split_attention_v1: print("Applying v1 cross attention optimization.") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention_invokeai or not torch.cuda.is_available()): - if not invokeAI_mps_available and shared.device.type == 'mps': - print("The InvokeAI cross attention optimization for MPS requires the psutil package which is not installed.") - print("Applying v1 cross attention optimization.") - ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 - else: - print("Applying cross attention optimization (InvokeAI).") - ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI + print("Applying cross attention optimization (InvokeAI).") + ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()): print("Applying cross attention optimization (Doggettx).") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 02c87f40..f5c153e8 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -1,7 +1,7 @@ import math import sys import traceback -import importlib +import psutil import torch from torch import einsum @@ -12,6 +12,8 @@ from einops import rearrange from modules import shared from modules.hypernetworks import hypernetwork +from .sub_quadratic_attention import efficient_dot_product_attention + if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers: try: @@ -22,6 +24,19 @@ if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers: print(traceback.format_exc(), file=sys.stderr) +def get_available_vram(): + if shared.device.type == 'cuda': + stats = torch.cuda.memory_stats(shared.device) + mem_active = stats['active_bytes.all.current'] + mem_reserved = stats['reserved_bytes.all.current'] + mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device()) + mem_free_torch = mem_reserved - mem_active + mem_free_total = mem_free_cuda + mem_free_torch + return mem_free_total + else: + return psutil.virtual_memory().available + + # see https://github.com/basujindal/stable-diffusion/pull/117 for discussion def split_cross_attention_forward_v1(self, x, context=None, mask=None): h = self.heads @@ -76,12 +91,7 @@ def split_cross_attention_forward(self, x, context=None, mask=None): r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) - stats = torch.cuda.memory_stats(q.device) - mem_active = stats['active_bytes.all.current'] - mem_reserved = stats['reserved_bytes.all.current'] - mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device()) - mem_free_torch = mem_reserved - mem_active - mem_free_total = mem_free_cuda + mem_free_torch + mem_free_total = get_available_vram() gb = 1024 ** 3 tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() @@ -118,19 +128,8 @@ def split_cross_attention_forward(self, x, context=None, mask=None): return self.to_out(r2) -def check_for_psutil(): - try: - spec = importlib.util.find_spec('psutil') - return spec is not None - except ModuleNotFoundError: - return False - -invokeAI_mps_available = check_for_psutil() - # -- Taken from https://github.com/invoke-ai/InvokeAI and modified -- -if invokeAI_mps_available: - import psutil - mem_total_gb = psutil.virtual_memory().total // (1 << 30) +mem_total_gb = psutil.virtual_memory().total // (1 << 30) def einsum_op_compvis(q, k, v): s = einsum('b i d, b j d -> b i j', q, k) @@ -215,6 +214,70 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None): # -- End of code from https://github.com/invoke-ai/InvokeAI -- + +# Based on Birch-san's modified implementation of sub-quadratic attention from https://github.com/Birch-san/diffusers/pull/1 +def sub_quad_attention_forward(self, x, context=None, mask=None): + assert mask is None, "attention-mask not currently implemented for SubQuadraticCrossAttnProcessor." + + h = self.heads + + q = self.to_q(x) + context = default(context, x) + + context_k, context_v = hypernetwork.apply_hypernetwork(shared.loaded_hypernetwork, context) + k = self.to_k(context_k) + v = self.to_v(context_v) + del context, context_k, context_v, x + + q = q.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1) + k = k.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1) + v = v.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1) + + x = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold_bytes=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training) + + x = x.unflatten(0, (-1, h)).transpose(1,2).flatten(start_dim=2) + + out_proj, dropout = self.to_out + x = out_proj(x) + x = dropout(x) + + return x + +def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_size_min=None, chunk_threshold_bytes=None, use_checkpoint=True): + bytes_per_token = torch.finfo(q.dtype).bits//8 + batch_x_heads, q_tokens, _ = q.shape + _, k_tokens, _ = k.shape + qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens + + available_vram = int(get_available_vram() * 0.9) if q.device.type == 'mps' else int(get_available_vram() * 0.7) + + if chunk_threshold_bytes is None: + chunk_threshold_bytes = available_vram + elif chunk_threshold_bytes == 0: + chunk_threshold_bytes = None + + if kv_chunk_size_min is None: + kv_chunk_size_min = chunk_threshold_bytes // (batch_x_heads * bytes_per_token * (k.shape[2] + v.shape[2])) + elif kv_chunk_size_min == 0: + kv_chunk_size_min = None + + if chunk_threshold_bytes is not None and qk_matmul_size_bytes <= chunk_threshold_bytes: + # the big matmul fits into our memory limit; do everything in 1 chunk, + # i.e. send it down the unchunked fast-path + query_chunk_size = q_tokens + kv_chunk_size = k_tokens + + return efficient_dot_product_attention( + q, + k, + v, + query_chunk_size=q_chunk_size, + kv_chunk_size=kv_chunk_size, + kv_chunk_size_min = kv_chunk_size_min, + use_checkpoint=use_checkpoint, + ) + + def xformers_attention_forward(self, x, context=None, mask=None): h = self.heads q_in = self.to_q(x) @@ -252,12 +315,7 @@ def cross_attention_attnblock_forward(self, x): h_ = torch.zeros_like(k, device=q.device) - stats = torch.cuda.memory_stats(q.device) - mem_active = stats['active_bytes.all.current'] - mem_reserved = stats['reserved_bytes.all.current'] - mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device()) - mem_free_torch = mem_reserved - mem_active - mem_free_total = mem_free_cuda + mem_free_torch + mem_free_total = get_available_vram() tensor_size = q.shape[0] * q.shape[1] * k.shape[2] * q.element_size() mem_required = tensor_size * 2.5 @@ -312,3 +370,19 @@ def xformers_attnblock_forward(self, x): return x + out except NotImplementedError: return cross_attention_attnblock_forward(self, x) + +def sub_quad_attnblock_forward(self, x): + h_ = x + h_ = self.norm(h_) + q = self.q(h_) + k = self.k(h_) + v = self.v(h_) + b, c, h, w = q.shape + q, k, v = map(lambda t: rearrange(t, 'b c h w -> b (h w) c'), (q, k, v)) + q = q.contiguous() + k = k.contiguous() + v = v.contiguous() + out = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold_bytes=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training) + out = rearrange(out, 'b (h w) c -> b c h w', h=h) + out = self.proj_out(out) + return x + out diff --git a/modules/shared.py b/modules/shared.py index d4ddeea0..487a7792 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -56,6 +56,10 @@ parser.add_argument("--xformers", action='store_true', help="enable xformers for parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work") parser.add_argument("--deepdanbooru", action='store_true', help="does not do anything") parser.add_argument("--opt-split-attention", action='store_true', help="force-enables Doggettx's cross-attention layer optimization. By default, it's on for torch cuda.") +parser.add_argument("--opt-sub-quad-attention", action='store_true', help="enable memory efficient sub-quadratic cross-attention layer optimization") +parser.add_argument("--sub-quad-q-chunk-size", type=int, help="query chunk size for the sub-quadratic cross-attention layer optimization to use", default=1024) +parser.add_argument("--sub-quad-kv-chunk-size", type=int, help="kv chunk size for the sub-quadratic cross-attention layer optimization to use", default=None) +parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the size threshold in bytes for the sub-quadratic cross-attention layer optimization to use chunking", default=None) parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.") parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find") parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization") diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py new file mode 100644 index 00000000..b11dc1c7 --- /dev/null +++ b/modules/sub_quadratic_attention.py @@ -0,0 +1,201 @@ +# original source: +# https://github.com/AminRezaei0x443/memory-efficient-attention/blob/1bc0d9e6ac5f82ea43a375135c4e1d3896ee1694/memory_efficient_attention/attention_torch.py +# license: +# unspecified +# credit: +# Amin Rezaei (original author) +# Alex Birch (optimized algorithm for 3D tensors, at the expense of removing bias, masking and callbacks) +# implementation of: +# Self-attention Does Not Need O(n2) Memory": +# https://arxiv.org/abs/2112.05682v2 + +from functools import partial +import torch +from torch import Tensor +from torch.utils.checkpoint import checkpoint +import math +from typing import Optional, NamedTuple, Protocol, List + +def dynamic_slice( + x: Tensor, + starts: List[int], + sizes: List[int], +) -> Tensor: + slicing = [slice(start, start + size) for start, size in zip(starts, sizes)] + return x[slicing] + +class AttnChunk(NamedTuple): + exp_values: Tensor + exp_weights_sum: Tensor + max_score: Tensor + +class SummarizeChunk(Protocol): + @staticmethod + def __call__( + query: Tensor, + key: Tensor, + value: Tensor, + ) -> AttnChunk: ... + +class ComputeQueryChunkAttn(Protocol): + @staticmethod + def __call__( + query: Tensor, + key: Tensor, + value: Tensor, + ) -> Tensor: ... + +def _summarize_chunk( + query: Tensor, + key: Tensor, + value: Tensor, + scale: float, +) -> AttnChunk: + attn_weights = torch.baddbmm( + torch.empty(1, 1, 1, device=query.device, dtype=query.dtype), + query, + key.transpose(1,2), + alpha=scale, + beta=0, + ) + max_score, _ = torch.max(attn_weights, -1, keepdim=True) + max_score = max_score.detach() + exp_weights = torch.exp(attn_weights - max_score) + exp_values = torch.bmm(exp_weights, value) + max_score = max_score.squeeze(-1) + return AttnChunk(exp_values, exp_weights.sum(dim=-1), max_score) + +def _query_chunk_attention( + query: Tensor, + key: Tensor, + value: Tensor, + summarize_chunk: SummarizeChunk, + kv_chunk_size: int, +) -> Tensor: + batch_x_heads, k_tokens, k_channels_per_head = key.shape + _, _, v_channels_per_head = value.shape + + def chunk_scanner(chunk_idx: int) -> AttnChunk: + key_chunk = dynamic_slice( + key, + (0, chunk_idx, 0), + (batch_x_heads, kv_chunk_size, k_channels_per_head) + ) + value_chunk = dynamic_slice( + value, + (0, chunk_idx, 0), + (batch_x_heads, kv_chunk_size, v_channels_per_head) + ) + return summarize_chunk(query, key_chunk, value_chunk) + + chunks: List[AttnChunk] = [ + chunk_scanner(chunk) for chunk in torch.arange(0, k_tokens, kv_chunk_size) + ] + acc_chunk = AttnChunk(*map(torch.stack, zip(*chunks))) + chunk_values, chunk_weights, chunk_max = acc_chunk + + global_max, _ = torch.max(chunk_max, 0, keepdim=True) + max_diffs = torch.exp(chunk_max - global_max) + chunk_values *= torch.unsqueeze(max_diffs, -1) + chunk_weights *= max_diffs + + all_values = chunk_values.sum(dim=0) + all_weights = torch.unsqueeze(chunk_weights, -1).sum(dim=0) + return all_values / all_weights + +# TODO: refactor CrossAttention#get_attention_scores to share code with this +def _get_attention_scores_no_kv_chunking( + query: Tensor, + key: Tensor, + value: Tensor, + scale: float, +) -> Tensor: + attn_scores = torch.baddbmm( + torch.empty(1, 1, 1, device=query.device, dtype=query.dtype), + query, + key.transpose(1,2), + alpha=scale, + beta=0, + ) + attn_probs = attn_scores.softmax(dim=-1) + del attn_scores + hidden_states_slice = torch.bmm(attn_probs, value) + return hidden_states_slice + +class ScannedChunk(NamedTuple): + chunk_idx: int + attn_chunk: AttnChunk + +def efficient_dot_product_attention( + query: Tensor, + key: Tensor, + value: Tensor, + query_chunk_size=1024, + kv_chunk_size: Optional[int] = None, + kv_chunk_size_min: Optional[int] = None, + use_checkpoint=True, +): + """Computes efficient dot-product attention given query, key, and value. + This is efficient version of attention presented in + https://arxiv.org/abs/2112.05682v2 which comes with O(sqrt(n)) memory requirements. + Args: + query: queries for calculating attention with shape of + `[batch * num_heads, tokens, channels_per_head]`. + key: keys for calculating attention with shape of + `[batch * num_heads, tokens, channels_per_head]`. + value: values to be used in attention with shape of + `[batch * num_heads, tokens, channels_per_head]`. + query_chunk_size: int: query chunks size + kv_chunk_size: Optional[int]: key/value chunks size. if None: defaults to sqrt(key_tokens) + kv_chunk_size_min: Optional[int]: key/value minimum chunk size. only considered when kv_chunk_size is None. changes `sqrt(key_tokens)` into `max(sqrt(key_tokens), kv_chunk_size_min)`, to ensure our chunk sizes don't get too small (smaller chunks = more chunks = less concurrent work done). + use_checkpoint: bool: whether to use checkpointing (recommended True for training, False for inference) + Returns: + Output of shape `[batch * num_heads, query_tokens, channels_per_head]`. + """ + batch_x_heads, q_tokens, q_channels_per_head = query.shape + _, k_tokens, _ = key.shape + scale = q_channels_per_head ** -0.5 + + kv_chunk_size = min(kv_chunk_size or int(math.sqrt(k_tokens)), k_tokens) + if kv_chunk_size_min is not None: + kv_chunk_size = max(kv_chunk_size, kv_chunk_size_min) + + def get_query_chunk(chunk_idx: int) -> Tensor: + return dynamic_slice( + query, + (0, chunk_idx, 0), + (batch_x_heads, min(query_chunk_size, q_tokens), q_channels_per_head) + ) + + summarize_chunk: SummarizeChunk = partial(_summarize_chunk, scale=scale) + summarize_chunk: SummarizeChunk = partial(checkpoint, summarize_chunk) if use_checkpoint else summarize_chunk + compute_query_chunk_attn: ComputeQueryChunkAttn = partial( + _get_attention_scores_no_kv_chunking, + scale=scale + ) if k_tokens <= kv_chunk_size else ( + # fast-path for when there's just 1 key-value chunk per query chunk (this is just sliced attention btw) + partial( + _query_chunk_attention, + kv_chunk_size=kv_chunk_size, + summarize_chunk=summarize_chunk, + ) + ) + + if q_tokens <= query_chunk_size: + # fast-path for when there's just 1 query chunk + return compute_query_chunk_attn( + query=query, + key=key, + value=value, + ) + + # TODO: maybe we should use torch.empty_like(query) to allocate storage in-advance, + # and pass slices to be mutated, instead of torch.cat()ing the returned slices + res = torch.cat([ + compute_query_chunk_attn( + query=get_query_chunk(i * query_chunk_size), + key=key, + value=value, + ) for i in range(math.ceil(q_tokens / query_chunk_size)) + ], dim=1) + return res diff --git a/requirements.txt b/requirements.txt index 5bed694e..0dbea322 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,4 +30,4 @@ inflection GitPython torchsde safetensors -psutil; sys_platform == 'darwin' +psutil From b119815333026164f2bd7d1ca71f3e4f7a9afd0d Mon Sep 17 00:00:00 2001 From: brkirch Date: Thu, 5 Jan 2023 04:37:17 -0500 Subject: [PATCH 144/461] Use narrow instead of dynamic_slice --- modules/sub_quadratic_attention.py | 34 +++++++++++++++++------------- 1 file changed, 19 insertions(+), 15 deletions(-) diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py index b11dc1c7..95924d24 100644 --- a/modules/sub_quadratic_attention.py +++ b/modules/sub_quadratic_attention.py @@ -5,6 +5,7 @@ # credit: # Amin Rezaei (original author) # Alex Birch (optimized algorithm for 3D tensors, at the expense of removing bias, masking and callbacks) +# brkirch (modified to use torch.narrow instead of dynamic_slice implementation) # implementation of: # Self-attention Does Not Need O(n2) Memory": # https://arxiv.org/abs/2112.05682v2 @@ -16,13 +17,13 @@ from torch.utils.checkpoint import checkpoint import math from typing import Optional, NamedTuple, Protocol, List -def dynamic_slice( - x: Tensor, - starts: List[int], - sizes: List[int], +def narrow_trunc( + input: Tensor, + dim: int, + start: int, + length: int ) -> Tensor: - slicing = [slice(start, start + size) for start, size in zip(starts, sizes)] - return x[slicing] + return torch.narrow(input, dim, start, length if input.shape[dim] >= start + length else input.shape[dim] - start) class AttnChunk(NamedTuple): exp_values: Tensor @@ -76,15 +77,17 @@ def _query_chunk_attention( _, _, v_channels_per_head = value.shape def chunk_scanner(chunk_idx: int) -> AttnChunk: - key_chunk = dynamic_slice( + key_chunk = narrow_trunc( key, - (0, chunk_idx, 0), - (batch_x_heads, kv_chunk_size, k_channels_per_head) + 1, + chunk_idx, + kv_chunk_size ) - value_chunk = dynamic_slice( + value_chunk = narrow_trunc( value, - (0, chunk_idx, 0), - (batch_x_heads, kv_chunk_size, v_channels_per_head) + 1, + chunk_idx, + kv_chunk_size ) return summarize_chunk(query, key_chunk, value_chunk) @@ -161,10 +164,11 @@ def efficient_dot_product_attention( kv_chunk_size = max(kv_chunk_size, kv_chunk_size_min) def get_query_chunk(chunk_idx: int) -> Tensor: - return dynamic_slice( + return narrow_trunc( query, - (0, chunk_idx, 0), - (batch_x_heads, min(query_chunk_size, q_tokens), q_channels_per_head) + 1, + chunk_idx, + min(query_chunk_size, q_tokens) ) summarize_chunk: SummarizeChunk = partial(_summarize_chunk, scale=scale) From 683287d87f6401083a8d63eedc00ca7410214ca1 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 6 Jan 2023 08:52:06 +0300 Subject: [PATCH 145/461] rework saving training params to file #6372 --- modules/hypernetworks/hypernetwork.py | 28 +++++-------------- modules/shared.py | 2 +- modules/textual_inversion/logging.py | 24 ++++++++++++++++ .../textual_inversion/textual_inversion.py | 23 ++------------- 4 files changed, 35 insertions(+), 42 deletions(-) create mode 100644 modules/textual_inversion/logging.py diff --git a/modules/hypernetworks/hypernetwork.py b/modules/hypernetworks/hypernetwork.py index 3237c37a..b0cfbe71 100644 --- a/modules/hypernetworks/hypernetwork.py +++ b/modules/hypernetworks/hypernetwork.py @@ -13,7 +13,7 @@ import tqdm from einops import rearrange, repeat from ldm.util import default from modules import devices, processing, sd_models, shared, sd_samplers -from modules.textual_inversion import textual_inversion +from modules.textual_inversion import textual_inversion, logging from modules.textual_inversion.learn_schedule import LearnRateScheduler from torch import einsum from torch.nn.init import normal_, xavier_normal_, xavier_uniform_, kaiming_normal_, kaiming_uniform_, zeros_ @@ -401,25 +401,7 @@ def create_hypernetwork(name, enable_sizes, overwrite_old, layer_structure=None, hypernet.save(fn) shared.reload_hypernetworks() -# Note: textual_inversion.py has a nearly identical function of the same name. -def save_settings_to_file(model_name, model_hash, initial_step, num_of_dataset_images, hypernetwork_name, layer_structure, activation_func, weight_init, add_layer_norm, use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): - # Starting index of preview-related arguments. - border_index = 21 - # Get a list of the argument names. - arg_names = inspect.getfullargspec(save_settings_to_file).args - # Create a list of the argument names to include in the settings string. - names = arg_names[:border_index] # Include all arguments up until the preview-related ones. - if preview_from_txt2img: - names.extend(arg_names[border_index:]) # Include preview-related arguments if applicable. - # Build the settings string. - settings_str = "datetime : " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n" - for name in names: - if name != 'log_directory': # It's useless and redundant to save log_directory. - value = locals()[name] - settings_str += f"{name}: {value}\n" - # Create or append to the file. - with open(os.path.join(log_directory, 'settings.txt'), "a+") as fout: - fout.write(settings_str + "\n\n") + def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, data_root, log_directory, training_width, training_height, steps, clip_grad_mode, clip_grad_value, shuffle_tags, tag_drop_out, latent_sampling_method, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): # images allows training previews to have infotext. Importing it at the top causes a circular import problem. @@ -477,7 +459,11 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step, ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=hypernetwork_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, include_cond=True, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method) if shared.opts.save_training_settings_to_txt: - save_settings_to_file(checkpoint.model_name, '[{}]'.format(checkpoint.hash), initial_step, len(ds), hypernetwork_name, hypernetwork.layer_structure, hypernetwork.activation_func, hypernetwork.weight_init, hypernetwork.add_layer_norm, hypernetwork.use_dropout, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_hypernetwork_every, template_file, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height) + saved_params = dict( + model_name=checkpoint.model_name, model_hash=checkpoint.hash, num_of_dataset_images=len(ds), + **{field: getattr(hypernetwork, field) for field in ['layer_structure', 'activation_func', 'weight_init', 'add_layer_norm', 'use_dropout', ]} + ) + logging.save_settings_to_file(log_directory, {**saved_params, **locals()}) latent_sampling_method = ds.latent_sampling_method diff --git a/modules/shared.py b/modules/shared.py index f0e10b35..57e489d0 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -362,7 +362,7 @@ options_templates.update(options_section(('training', "Training"), { "unload_models_when_training": OptionInfo(False, "Move VAE and CLIP to RAM when training if possible. Saves VRAM."), "pin_memory": OptionInfo(False, "Turn on pin_memory for DataLoader. Makes training slightly faster but can increase memory usage."), "save_optimizer_state": OptionInfo(False, "Saves Optimizer state as separate *.optim file. Training of embedding or HN can be resumed with the matching optim file."), - "save_training_settings_to_txt": OptionInfo(False, "Save textual inversion and hypernet settings to a text file whenever training starts."), + "save_training_settings_to_txt": OptionInfo(True, "Save textual inversion and hypernet settings to a text file whenever training starts."), "dataset_filename_word_regex": OptionInfo("", "Filename word regex"), "dataset_filename_join_string": OptionInfo(" ", "Filename join string"), "training_image_repeats_per_epoch": OptionInfo(1, "Number of repeats for a single input image per epoch; used only for displaying epoch number", gr.Number, {"precision": 0}), diff --git a/modules/textual_inversion/logging.py b/modules/textual_inversion/logging.py new file mode 100644 index 00000000..8b1981d5 --- /dev/null +++ b/modules/textual_inversion/logging.py @@ -0,0 +1,24 @@ +import datetime +import json +import os + +saved_params_shared = {"model_name", "model_hash", "initial_step", "num_of_dataset_images", "learn_rate", "batch_size", "data_root", "log_directory", "training_width", "training_height", "steps", "create_image_every", "template_file"} +saved_params_ti = {"embedding_name", "num_vectors_per_token", "save_embedding_every", "save_image_with_stored_embedding"} +saved_params_hypernet = {"hypernetwork_name", "layer_structure", "activation_func", "weight_init", "add_layer_norm", "use_dropout", "save_hypernetwork_every"} +saved_params_all = saved_params_shared | saved_params_ti | saved_params_hypernet +saved_params_previews = {"preview_prompt", "preview_negative_prompt", "preview_steps", "preview_sampler_index", "preview_cfg_scale", "preview_seed", "preview_width", "preview_height"} + + +def save_settings_to_file(log_directory, all_params): + now = datetime.datetime.now() + params = {"datetime": now.strftime("%Y-%m-%d %H:%M:%S")} + + keys = saved_params_all + if all_params.get('preview_from_txt2img'): + keys = keys | saved_params_previews + + params.update({k: v for k, v in all_params.items() if k in keys}) + + filename = f'settings-{now.strftime("%Y-%m-%d-%H-%M-%S")}.json' + with open(os.path.join(log_directory, filename), "w") as file: + json.dump(params, file, indent=4) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index e9cf432f..f9f5e8cd 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -18,6 +18,8 @@ from modules.textual_inversion.learn_schedule import LearnRateScheduler from modules.textual_inversion.image_embedding import (embedding_to_b64, embedding_from_b64, insert_image_data_embed, extract_image_data_embed, caption_image_overlay) +from modules.textual_inversion.logging import save_settings_to_file + class Embedding: def __init__(self, vec, name, step=None): @@ -231,25 +233,6 @@ def write_loss(log_directory, filename, step, epoch_len, values): **values, }) -# Note: hypernetwork.py has a nearly identical function of the same name. -def save_settings_to_file(model_name, model_hash, initial_step, num_of_dataset_images, embedding_name, vectors_per_token, learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height): - # Starting index of preview-related arguments. - border_index = 18 - # Get a list of the argument names. - arg_names = inspect.getfullargspec(save_settings_to_file).args - # Create a list of the argument names to include in the settings string. - names = arg_names[:border_index] # Include all arguments up until the preview-related ones. - if preview_from_txt2img: - names.extend(arg_names[border_index:]) # Include preview-related arguments if applicable. - # Build the settings string. - settings_str = "datetime : " + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "\n" - for name in names: - if name != 'log_directory': # It's useless and redundant to save log_directory. - value = locals()[name] - settings_str += f"{name}: {value}\n" - # Create or append to the file. - with open(os.path.join(log_directory, 'settings.txt'), "a+") as fout: - fout.write(settings_str + "\n\n") def validate_train_inputs(model_name, learn_rate, batch_size, gradient_step, data_root, template_file, steps, save_model_every, create_image_every, log_directory, name="embedding"): assert model_name, f"{name} not selected" @@ -330,7 +313,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_ ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method) if shared.opts.save_training_settings_to_txt: - save_settings_to_file(checkpoint.model_name, '[{}]'.format(checkpoint.hash), initial_step, len(ds), embedding_name, len(embedding.vec), learn_rate, batch_size, data_root, log_directory, training_width, training_height, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding, preview_from_txt2img, preview_prompt, preview_negative_prompt, preview_steps, preview_sampler_index, preview_cfg_scale, preview_seed, preview_width, preview_height) + save_settings_to_file(log_directory, {**dict(model_name=checkpoint.model_name, model_hash=checkpoint.hash, num_of_dataset_images=len(ds), num_vectors_per_token=len(embedding.vec)), **locals()}) latent_sampling_method = ds.latent_sampling_method From b95a4c0ce5ab9c414e0494193bfff665f45e9e65 Mon Sep 17 00:00:00 2001 From: brkirch Date: Fri, 6 Jan 2023 01:01:51 -0500 Subject: [PATCH 146/461] Change sub-quad chunk threshold to use percentage --- modules/sd_hijack_optimizations.py | 18 +++++++++--------- modules/shared.py | 2 +- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index f5c153e8..b416e9ac 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -233,7 +233,7 @@ def sub_quad_attention_forward(self, x, context=None, mask=None): k = k.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1) v = v.unflatten(-1, (h, -1)).transpose(1,2).flatten(end_dim=1) - x = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold_bytes=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training) + x = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training) x = x.unflatten(0, (-1, h)).transpose(1,2).flatten(start_dim=2) @@ -243,20 +243,20 @@ def sub_quad_attention_forward(self, x, context=None, mask=None): return x -def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_size_min=None, chunk_threshold_bytes=None, use_checkpoint=True): +def sub_quad_attention(q, k, v, q_chunk_size=1024, kv_chunk_size=None, kv_chunk_size_min=None, chunk_threshold=None, use_checkpoint=True): bytes_per_token = torch.finfo(q.dtype).bits//8 batch_x_heads, q_tokens, _ = q.shape _, k_tokens, _ = k.shape qk_matmul_size_bytes = batch_x_heads * bytes_per_token * q_tokens * k_tokens - available_vram = int(get_available_vram() * 0.9) if q.device.type == 'mps' else int(get_available_vram() * 0.7) - - if chunk_threshold_bytes is None: - chunk_threshold_bytes = available_vram - elif chunk_threshold_bytes == 0: + if chunk_threshold is None: + chunk_threshold_bytes = int(get_available_vram() * 0.9) if q.device.type == 'mps' else int(get_available_vram() * 0.7) + elif chunk_threshold == 0: chunk_threshold_bytes = None + else: + chunk_threshold_bytes = int(0.01 * chunk_threshold * get_available_vram()) - if kv_chunk_size_min is None: + if kv_chunk_size_min is None and chunk_threshold_bytes is not None: kv_chunk_size_min = chunk_threshold_bytes // (batch_x_heads * bytes_per_token * (k.shape[2] + v.shape[2])) elif kv_chunk_size_min == 0: kv_chunk_size_min = None @@ -382,7 +382,7 @@ def sub_quad_attnblock_forward(self, x): q = q.contiguous() k = k.contiguous() v = v.contiguous() - out = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold_bytes=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training) + out = sub_quad_attention(q, k, v, q_chunk_size=shared.cmd_opts.sub_quad_q_chunk_size, kv_chunk_size=shared.cmd_opts.sub_quad_kv_chunk_size, chunk_threshold=shared.cmd_opts.sub_quad_chunk_threshold, use_checkpoint=self.training) out = rearrange(out, 'b (h w) c -> b c h w', h=h) out = self.proj_out(out) return x + out diff --git a/modules/shared.py b/modules/shared.py index cb1dc312..d7a81db1 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -59,7 +59,7 @@ parser.add_argument("--opt-split-attention", action='store_true', help="force-en parser.add_argument("--opt-sub-quad-attention", action='store_true', help="enable memory efficient sub-quadratic cross-attention layer optimization") parser.add_argument("--sub-quad-q-chunk-size", type=int, help="query chunk size for the sub-quadratic cross-attention layer optimization to use", default=1024) parser.add_argument("--sub-quad-kv-chunk-size", type=int, help="kv chunk size for the sub-quadratic cross-attention layer optimization to use", default=None) -parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the size threshold in bytes for the sub-quadratic cross-attention layer optimization to use chunking", default=None) +parser.add_argument("--sub-quad-chunk-threshold", type=int, help="the percentage of VRAM threshold for the sub-quadratic cross-attention layer optimization to use chunking", default=None) parser.add_argument("--opt-split-attention-invokeai", action='store_true', help="force-enables InvokeAI's cross-attention layer optimization. By default, it's on when cuda is unavailable.") parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find") parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization") From 5deb2a19ccea57a50252e8fcb07b4d17c6599def Mon Sep 17 00:00:00 2001 From: brkirch Date: Fri, 6 Jan 2023 01:33:15 -0500 Subject: [PATCH 147/461] Allow Doggettx's cross attention opt without CUDA --- modules/sd_hijack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index ef25dadb..bd101e5b 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -50,7 +50,7 @@ def apply_optimizations(): print("Applying v1 cross attention optimization.") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 optimization_method = 'V1' - elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention_invokeai or not torch.cuda.is_available()): + elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention_invokeai or not cmd_opts.opt_split_attention and not torch.cuda.is_available()): print("Applying cross attention optimization (InvokeAI).") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_invokeAI optimization_method = 'InvokeAI' From c9bded39ee05bd0507ccd27d2b674d86d6c0c8e8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 6 Jan 2023 12:32:44 +0300 Subject: [PATCH 148/461] sort extensions by date and add an option to sort by other columns --- modules/ui_extensions.py | 44 +++++++++++++++++++++++++++++----------- style.css | 11 +++++++++- 2 files changed, 42 insertions(+), 13 deletions(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index eec9586f..742e745e 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -162,15 +162,15 @@ def install_extension_from_url(dirname, url): shutil.rmtree(tmpdir, True) -def install_extension_from_index(url, hide_tags): +def install_extension_from_index(url, hide_tags, sort_column): ext_table, message = install_extension_from_url(None, url) - code, _ = refresh_available_extensions_from_data(hide_tags) + code, _ = refresh_available_extensions_from_data(hide_tags, sort_column) return code, ext_table, message -def refresh_available_extensions(url, hide_tags): +def refresh_available_extensions(url, hide_tags, sort_column): global available_extensions import urllib.request @@ -179,18 +179,28 @@ def refresh_available_extensions(url, hide_tags): available_extensions = json.loads(text) - code, tags = refresh_available_extensions_from_data(hide_tags) + code, tags = refresh_available_extensions_from_data(hide_tags, sort_column) return url, code, gr.CheckboxGroup.update(choices=tags), '' -def refresh_available_extensions_for_tags(hide_tags): - code, _ = refresh_available_extensions_from_data(hide_tags) +def refresh_available_extensions_for_tags(hide_tags, sort_column): + code, _ = refresh_available_extensions_from_data(hide_tags, sort_column) return code, '' -def refresh_available_extensions_from_data(hide_tags): +sort_ordering = [ + # (reverse, order_by_function) + (True, lambda x: x.get('added', 'z')), + (False, lambda x: x.get('added', 'z')), + (False, lambda x: x.get('name', 'z')), + (True, lambda x: x.get('name', 'z')), + (False, lambda x: 'z'), +] + + +def refresh_available_extensions_from_data(hide_tags, sort_column): extlist = available_extensions["extensions"] installed_extension_urls = {normalize_git_url(extension.remote): extension.name for extension in extensions.extensions} @@ -210,8 +220,11 @@ def refresh_available_extensions_from_data(hide_tags): """ - for ext in extlist: + sort_reverse, sort_function = sort_ordering[sort_column if 0 <= sort_column < len(sort_ordering) else 0] + + for ext in sorted(extlist, key=sort_function, reverse=sort_reverse): name = ext.get("name", "noname") + added = ext.get('added', 'unknown') url = ext.get("url", None) description = ext.get("description", "") extension_tags = ext.get("tags", []) @@ -233,7 +246,7 @@ def refresh_available_extensions_from_data(hide_tags): code += f""" {html.escape(name)}
{tags_text} - {html.escape(description)} + {html.escape(description)}

Added: {html.escape(added)}

{install_code} @@ -291,25 +304,32 @@ def create_ui(): with gr.Row(): hide_tags = gr.CheckboxGroup(value=["ads", "localization", "installed"], label="Hide extensions with tags", choices=["script", "ads", "localization", "installed"]) + sort_column = gr.Radio(value="newest first", label="Order", choices=["newest first", "oldest first", "a-z", "z-a", "internal order", ], type="index") install_result = gr.HTML() available_extensions_table = gr.HTML() refresh_available_extensions_button.click( fn=modules.ui.wrap_gradio_call(refresh_available_extensions, extra_outputs=[gr.update(), gr.update(), gr.update()]), - inputs=[available_extensions_index, hide_tags], + inputs=[available_extensions_index, hide_tags, sort_column], outputs=[available_extensions_index, available_extensions_table, hide_tags, install_result], ) install_extension_button.click( fn=modules.ui.wrap_gradio_call(install_extension_from_index, extra_outputs=[gr.update(), gr.update()]), - inputs=[extension_to_install, hide_tags], + inputs=[extension_to_install, hide_tags, sort_column], outputs=[available_extensions_table, extensions_table, install_result], ) hide_tags.change( fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]), - inputs=[hide_tags], + inputs=[hide_tags, sort_column], + outputs=[available_extensions_table, install_result] + ) + + sort_column.change( + fn=modules.ui.wrap_gradio_call(refresh_available_extensions_for_tags, extra_outputs=[gr.update()]), + inputs=[hide_tags, sort_column], outputs=[available_extensions_table, install_result] ) diff --git a/style.css b/style.css index ee74d79e..f1b23b53 100644 --- a/style.css +++ b/style.css @@ -555,7 +555,7 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h /* Extensions */ -#tab_extensions table{ +#tab_extensions table``{ border-collapse: collapse; } @@ -581,6 +581,15 @@ img2maskimg, #img2maskimg > .h-60, #img2maskimg > .h-60 > div, #img2maskimg > .h font-size: 95%; } +#available_extensions .info{ + margin: 0; +} + +#available_extensions .date_added{ + opacity: 0.85; + font-size: 90%; +} + #image_buttons_txt2img button, #image_buttons_img2img button, #image_buttons_extras button{ min-width: auto; padding-left: 0.5em; From 65ed4421e609dda3112f236c13e4db14caa71364 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 6 Jan 2023 13:55:50 +0300 Subject: [PATCH 149/461] add callback for when the script is unloaded --- modules/script_callbacks.py | 18 +++++++++++++++++- webui.py | 2 ++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index de69fd9f..608c5300 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -71,6 +71,7 @@ callback_map = dict( callbacks_before_component=[], callbacks_after_component=[], callbacks_image_grid=[], + callbacks_script_unloaded=[], ) @@ -171,6 +172,14 @@ def image_grid_callback(params: ImageGridLoopParams): report_exception(c, 'image_grid') +def script_unloaded_callback(): + for c in reversed(callback_map['callbacks_script_unloaded']): + try: + c.callback() + except Exception: + report_exception(c, 'script_unloaded') + + def add_callback(callbacks, fun): stack = [x for x in inspect.stack() if x.filename != __file__] filename = stack[0].filename if len(stack) > 0 else 'unknown file' @@ -202,7 +211,7 @@ def on_app_started(callback): def on_model_loaded(callback): """register a function to be called when the stable diffusion model is created; the model is - passed as an argument""" + passed as an argument; this function is also called when the script is reloaded. """ add_callback(callback_map['callbacks_model_loaded'], callback) @@ -279,3 +288,10 @@ def on_image_grid(callback): - params: ImageGridLoopParams - parameters to be used for grid creation. Can be modified. """ add_callback(callback_map['callbacks_image_grid'], callback) + + +def on_script_unloaded(callback): + """register a function to be called before the script is unloaded. Any hooks/hijacks/monkeying about that + the script did should be reverted here""" + + add_callback(callback_map['callbacks_script_unloaded'], callback) diff --git a/webui.py b/webui.py index ff6eb6eb..733a06b5 100644 --- a/webui.py +++ b/webui.py @@ -187,12 +187,14 @@ def webui(): sd_samplers.set_samplers() + modules.script_callbacks.script_unloaded_callback() extensions.list_extensions() localization.list_localizations(cmd_opts.localizations_dir) modelloader.forbid_loaded_nonbuiltin_upscalers() modules.scripts.reload_scripts() + modules.script_callbacks.model_loaded_callback(shared.sd_model) modelloader.load_upscalers() for module in [module for name, module in sys.modules.items() if name.startswith("modules.ui")]: From 848605fb654a55ee6947335d7df6e13366606fad Mon Sep 17 00:00:00 2001 From: brkirch Date: Fri, 6 Jan 2023 06:58:49 -0500 Subject: [PATCH 150/461] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c1944d33..fea6cb35 100644 --- a/README.md +++ b/README.md @@ -141,7 +141,7 @@ Licenses for borrowed code can be found in `Settings -> Licenses` screen, and al - Ideas for optimizations - https://github.com/basujindal/stable-diffusion - Cross Attention layer optimization - Doggettx - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing. - Cross Attention layer optimization - InvokeAI, lstein - https://github.com/invoke-ai/InvokeAI (originally http://github.com/lstein/stable-diffusion) -- Sub-quadratic Cross Attention layer optimization - Alex Birch (https://github.com/Birch-san), Amin Rezaei (https://github.com/AminRezaei0x443) +- Sub-quadratic Cross Attention layer optimization - Alex Birch (https://github.com/Birch-san/diffusers/pull/1), Amin Rezaei (https://github.com/AminRezaei0x443/memory-efficient-attention) - Textual Inversion - Rinon Gal - https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas). - Idea for SD upscale - https://github.com/jquesnelle/txt2imghd - Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot From 5e6566324bba20554bcc04f3dda798e560397f38 Mon Sep 17 00:00:00 2001 From: brkirch Date: Fri, 6 Jan 2023 07:06:26 -0500 Subject: [PATCH 151/461] Always end version number with a digit --- webui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 733a06b5..8737e593 100644 --- a/webui.py +++ b/webui.py @@ -16,7 +16,7 @@ from modules.paths import script_path import torch # Truncate version number of nightly/local build of PyTorch to not cause exceptions with CodeFormer or Safetensors if ".dev" in torch.__version__ or "+git" in torch.__version__: - torch.__version__ = re.search(r'[\d.]+', torch.__version__).group(0) + torch.__version__ = re.search(r'[\d.]+[\d]', torch.__version__).group(0) from modules import shared, devices, sd_samplers, upscaler, extensions, localization, ui_tempdir import modules.codeformer_model as codeformer From 3246a2d6b898da6a98fe9df4dc67944635a41bd3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 6 Jan 2023 16:03:43 +0300 Subject: [PATCH 152/461] remove restriction for saving dropdowns to ui-config.json --- modules/scripts.py | 1 - modules/ui.py | 10 ++-------- 2 files changed, 2 insertions(+), 9 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index 0c44f191..35164093 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -290,7 +290,6 @@ class ScriptRunner: script.group = group dropdown = gr.Dropdown(label="Script", elem_id="script_list", choices=["None"] + self.titles, value="None", type="index") - dropdown.save_to_config = True inputs[0] = dropdown for script in self.selectable_scripts: diff --git a/modules/ui.py b/modules/ui.py index 030f0685..b79d24ee 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -435,11 +435,9 @@ def create_toprow(is_img2img): with gr.Row(): with gr.Column(scale=1, elem_id="style_pos_col"): prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys()))) - prompt_style.save_to_config = True with gr.Column(scale=1, elem_id="style_neg_col"): prompt_style2 = gr.Dropdown(label="Style 2", elem_id=f"{id_part}_style2_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys()))) - prompt_style2.save_to_config = True return prompt, prompt_style, negative_prompt, prompt_style2, submit, button_interrogate, button_deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button @@ -638,7 +636,6 @@ def create_sampler_and_steps_selection(choices, tabname): if opts.samplers_in_dropdown: with FormRow(elem_id=f"sampler_selection_{tabname}"): sampler_index = gr.Dropdown(label='Sampling method', elem_id=f"{tabname}_sampling", choices=[x.name for x in choices], value=choices[0].name, type="index") - sampler_index.save_to_config = True steps = gr.Slider(minimum=1, maximum=150, step=1, elem_id=f"{tabname}_steps", label="Sampling steps", value=20) else: with FormGroup(elem_id=f"sampler_selection_{tabname}"): @@ -1794,7 +1791,7 @@ def create_ui(): if init_field is not None: init_field(saved_value) - if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number] and x.visible: + if type(x) in [gr.Slider, gr.Radio, gr.Checkbox, gr.Textbox, gr.Number, gr.Dropdown] and x.visible: apply_field(x, 'visible') if type(x) == gr.Slider: @@ -1815,11 +1812,8 @@ def create_ui(): if type(x) == gr.Number: apply_field(x, 'value') - # Since there are many dropdowns that shouldn't be saved, - # we only mark dropdowns that should be saved. - if type(x) == gr.Dropdown and getattr(x, 'save_to_config', False): + if type(x) == gr.Dropdown: apply_field(x, 'value', lambda val: val in x.choices, getattr(x, 'init_field', None)) - apply_field(x, 'visible') visit(txt2img_interface, loadsave, "txt2img") visit(img2img_interface, loadsave, "img2img") From 50194de93ffc9db763d9b08fcc9c3bde1aa86151 Mon Sep 17 00:00:00 2001 From: Kuma <36082288+KumiIT@users.noreply.github.com> Date: Fri, 6 Jan 2023 16:12:45 +0100 Subject: [PATCH 153/461] typo UI fixes #6391 --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 57e489d0..865c3c07 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -430,7 +430,7 @@ options_templates.update(options_section(('ui', "User interface"), { "samplers_in_dropdown": OptionInfo(True, "Use dropdown for sampler selection instead of radio group"), "dimensions_and_batch_together": OptionInfo(True, "Show Witdth/Height and Batch sliders in same row"), 'quicksettings': OptionInfo("sd_model_checkpoint", "Quicksettings list"), - 'ui_reorder': OptionInfo(", ".join(ui_reorder_categories), "txt2img/ing2img UI item order"), + 'ui_reorder': OptionInfo(", ".join(ui_reorder_categories), "txt2img/img2img UI item order"), 'localization': OptionInfo("None", "Localization (requires restart)", gr.Dropdown, lambda: {"choices": ["None"] + list(localization.localizations.keys())}, refresh=lambda: localization.list_localizations(cmd_opts.localizations_dir)), })) From 3992ecbe6e46a465062508c677964534e7397f72 Mon Sep 17 00:00:00 2001 From: Mitchell Boot <47387831+Mitchell1711@users.noreply.github.com> Date: Fri, 6 Jan 2023 18:02:46 +0100 Subject: [PATCH 154/461] Added UI elements Added a new row to hires fix that shows the new resolution after scaling --- modules/ui.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/modules/ui.py b/modules/ui.py index b79d24ee..20f7d2a2 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -255,6 +255,12 @@ def add_style(name: str, prompt: str, negative_prompt: str): return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)] +def calc_resolution_hires(x, y, scale): + #final res can only be a multiple of 8 + scaled_x = int(x * scale // 8) * 8 + scaled_y = int(y * scale // 8) * 8 + + return "

Upscaled Resolution: "+str(scaled_x)+"x"+str(scaled_y)+"

" def apply_styles(prompt, prompt_neg, style1_name, style2_name): prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name]) @@ -718,6 +724,12 @@ def create_ui(): hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") + + with FormRow(elem_id="txt2img_hires_fix_row3"): + hr_final_resolution = gr.HTML(value=calc_resolution_hires(width.value, height.value, hr_scale.value), elem_id="txtimg_hr_finalres") + hr_scale.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False) + width.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False) + height.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False) elif category == "batch": if not opts.dimensions_and_batch_together: From 991368c8d54404d8e13d4c6e76a0f32644e65ad4 Mon Sep 17 00:00:00 2001 From: Mitchell Boot <47387831+Mitchell1711@users.noreply.github.com> Date: Fri, 6 Jan 2023 18:24:29 +0100 Subject: [PATCH 155/461] remove camelcase --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index 20f7d2a2..6fc8b7d7 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -260,7 +260,7 @@ def calc_resolution_hires(x, y, scale): scaled_x = int(x * scale // 8) * 8 scaled_y = int(y * scale // 8) * 8 - return "

Upscaled Resolution: "+str(scaled_x)+"x"+str(scaled_y)+"

" + return "

Upscaled resolution: "+str(scaled_x)+"x"+str(scaled_y)+"

" def apply_styles(prompt, prompt_neg, style1_name, style2_name): prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name]) From c18add68ef7d2de3617cbbaff864b0c74cfdf6c0 Mon Sep 17 00:00:00 2001 From: brkirch Date: Fri, 6 Jan 2023 16:42:47 -0500 Subject: [PATCH 156/461] Added license --- html/licenses.html | 29 ++++++++++++++++++++++++++++- modules/sd_hijack_optimizations.py | 1 + modules/sub_quadratic_attention.py | 2 +- 3 files changed, 30 insertions(+), 2 deletions(-) diff --git a/html/licenses.html b/html/licenses.html index 9eeaa072..570630eb 100644 --- a/html/licenses.html +++ b/html/licenses.html @@ -184,7 +184,7 @@ SOFTWARE.

SwinIR

-Code added by contirubtors, most likely copied from this repository. +Code added by contributors, most likely copied from this repository.
                                  Apache License
@@ -390,3 +390,30 @@ SOFTWARE.
    limitations under the License.
 
+

Memory Efficient Attention

+The sub-quadratic cross attention optimization uses modified code from the Memory Efficient Attention package that Alex Birch optimized for 3D tensors. This license is updated to reflect that. +
+MIT License
+
+Copyright (c) 2023 Alex Birch
+Copyright (c) 2023 Amin Rezaei
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+ diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index b416e9ac..cdc63ed7 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -216,6 +216,7 @@ def split_cross_attention_forward_invokeAI(self, x, context=None, mask=None): # Based on Birch-san's modified implementation of sub-quadratic attention from https://github.com/Birch-san/diffusers/pull/1 +# The sub_quad_attention_forward function is under the MIT License listed under Memory Efficient Attention in the Licenses section of the web UI interface def sub_quad_attention_forward(self, x, context=None, mask=None): assert mask is None, "attention-mask not currently implemented for SubQuadraticCrossAttnProcessor." diff --git a/modules/sub_quadratic_attention.py b/modules/sub_quadratic_attention.py index 95924d24..fea7aaac 100644 --- a/modules/sub_quadratic_attention.py +++ b/modules/sub_quadratic_attention.py @@ -1,7 +1,7 @@ # original source: # https://github.com/AminRezaei0x443/memory-efficient-attention/blob/1bc0d9e6ac5f82ea43a375135c4e1d3896ee1694/memory_efficient_attention/attention_torch.py # license: -# unspecified +# MIT License (see Memory Efficient Attention under the Licenses section in the web UI interface for the full license) # credit: # Amin Rezaei (original author) # Alex Birch (optimized algorithm for 3D tensors, at the expense of removing bias, masking and callbacks) From 82c1f10b144f733460feead0bdc37a861489dc57 Mon Sep 17 00:00:00 2001 From: Dean Hopkins Date: Fri, 6 Jan 2023 22:00:12 +0000 Subject: [PATCH 157/461] increase upscale api validation limit --- modules/api/models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/api/models.py b/modules/api/models.py index f77951fc..22b88c59 100644 --- a/modules/api/models.py +++ b/modules/api/models.py @@ -125,7 +125,7 @@ class ExtrasBaseRequest(BaseModel): gfpgan_visibility: float = Field(default=0, title="GFPGAN Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of GFPGAN, values should be between 0 and 1.") codeformer_visibility: float = Field(default=0, title="CodeFormer Visibility", ge=0, le=1, allow_inf_nan=False, description="Sets the visibility of CodeFormer, values should be between 0 and 1.") codeformer_weight: float = Field(default=0, title="CodeFormer Weight", ge=0, le=1, allow_inf_nan=False, description="Sets the weight of CodeFormer, values should be between 0 and 1.") - upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=4, description="By how much to upscale the image, only used when resize_mode=0.") + upscaling_resize: float = Field(default=2, title="Upscaling Factor", ge=1, le=8, description="By how much to upscale the image, only used when resize_mode=0.") upscaling_resize_w: int = Field(default=512, title="Target Width", ge=1, description="Target width for the upscaler to hit. Only used when resize_mode=1.") upscaling_resize_h: int = Field(default=512, title="Target Height", ge=1, description="Target height for the upscaler to hit. Only used when resize_mode=1.") upscaling_crop: bool = Field(default=True, title="Crop to fit", description="Should the upscaler crop the image to fit in the choosen size?") From 79e39fae6110c20a3ee6255e2841c877f65e8cbd Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 7 Jan 2023 01:45:28 +0300 Subject: [PATCH 158/461] CLIP hijack rework --- modules/sd_hijack.py | 6 +- modules/sd_hijack_clip.py | 328 +++++++++--------- modules/sd_hijack_clip_old.py | 81 +++++ .../textual_inversion/textual_inversion.py | 1 - modules/ui.py | 2 +- 5 files changed, 246 insertions(+), 172 deletions(-) create mode 100644 modules/sd_hijack_clip_old.py diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index fa2cd4bb..71cc145a 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -150,10 +150,10 @@ class StableDiffusionModelHijack: def clear_comments(self): self.comments = [] - def tokenize(self, text): - _, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text]) + def get_prompt_lengths(self, text): + _, token_count = self.clip.process_texts([text]) - return remade_batch_tokens[0], token_count, sd_hijack_clip.get_target_prompt_token_count(token_count) + return token_count, self.clip.get_target_prompt_token_count(token_count) class EmbeddingsWithFixes(torch.nn.Module): diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index ca92b142..ac3020d7 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -1,12 +1,28 @@ import math +from collections import namedtuple import torch from modules import prompt_parser, devices from modules.shared import opts -def get_target_prompt_token_count(token_count): - return math.ceil(max(token_count, 1) / 75) * 75 + +class PromptChunk: + """ + This object contains token ids, weight (multipliers:1.4) and textual inversion embedding info for a chunk of prompt. + If a prompt is short, it is represented by one PromptChunk, otherwise, multiple are necessary. + Each PromptChunk contains an exact amount of tokens - 77, which includes one for start and end token, + so just 75 tokens from prompt. + """ + + def __init__(self): + self.tokens = [] + self.multipliers = [] + self.fixes = [] + + +PromptChunkFix = namedtuple('PromptChunkFix', ['offset', 'embedding']) +"""This is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt chunk""" class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): @@ -14,17 +30,49 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): super().__init__() self.wrapped = wrapped self.hijack = hijack + self.chunk_length = 75 + + def empty_chunk(self): + """creates an empty PromptChunk and returns it""" + + chunk = PromptChunk() + chunk.tokens = [self.id_start] + [self.id_end] * (self.chunk_length + 1) + chunk.multipliers = [1.0] * (self.chunk_length + 2) + return chunk + + def get_target_prompt_token_count(self, token_count): + """returns the maximum number of tokens a prompt of a known length can have before it requires one more PromptChunk to be represented""" + + return math.ceil(max(token_count, 1) / self.chunk_length) * self.chunk_length def tokenize(self, texts): + """Converts a batch of texts into a batch of token ids""" + raise NotImplementedError def encode_with_transformers(self, tokens): + """ + converts a batch of token ids (in python lists) into a single tensor with numeric respresentation of those tokens; + All python lists with tokens are assumed to have same length, usually 77. + if input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on + model - can be 768 and 1024 + """ + raise NotImplementedError def encode_embedding_init_text(self, init_text, nvpt): + """Converts text into a tensor with this text's tokens' embeddings. Note that those are embeddings before they are passed through + transformers. nvpt is used as a maximum length in tokens. If text produces less teokens than nvpt, only this many is returned.""" + raise NotImplementedError - def tokenize_line(self, line, used_custom_terms, hijack_comments): + def tokenize_line(self, line): + """ + this transforms a single prompt into a list of PromptChunk objects - as many as needed to + represent the prompt. + Returns the list and the total number of tokens in the prompt. + """ + if opts.enable_emphasis: parsed = prompt_parser.parse_prompt_attention(line) else: @@ -32,205 +80,152 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): tokenized = self.tokenize([text for text, _ in parsed]) - fixes = [] - remade_tokens = [] - multipliers = [] + chunks = [] + chunk = PromptChunk() + token_count = 0 last_comma = -1 - for tokens, (text, weight) in zip(tokenized, parsed): - i = 0 - while i < len(tokens): - token = tokens[i] + def next_chunk(): + """puts current chunk into the list of results and produces the next one - empty""" + nonlocal token_count + nonlocal last_comma + nonlocal chunk - embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i) + token_count += len(chunk.tokens) + to_add = self.chunk_length - len(chunk.tokens) + if to_add > 0: + chunk.tokens += [self.id_end] * to_add + chunk.multipliers += [1.0] * to_add + + chunk.tokens = [self.id_start] + chunk.tokens + [self.id_end] + chunk.multipliers = [1.0] + chunk.multipliers + [1.0] + + last_comma = -1 + chunks.append(chunk) + chunk = PromptChunk() + + for tokens, (text, weight) in zip(tokenized, parsed): + position = 0 + while position < len(tokens): + token = tokens[position] if token == self.comma_token: - last_comma = len(remade_tokens) - elif opts.comma_padding_backtrack != 0 and max(len(remade_tokens), 1) % 75 == 0 and last_comma != -1 and len(remade_tokens) - last_comma <= opts.comma_padding_backtrack: - last_comma += 1 - reloc_tokens = remade_tokens[last_comma:] - reloc_mults = multipliers[last_comma:] + last_comma = len(chunk.tokens) - remade_tokens = remade_tokens[:last_comma] - length = len(remade_tokens) + # this is when we are at the end of alloted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack + # is a setting that specifies that is there is a comma nearby, the text after comma should be moved out of this chunk and into the next. + elif opts.comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= opts.comma_padding_backtrack: + break_location = last_comma + 1 - rem = int(math.ceil(length / 75)) * 75 - length - remade_tokens += [self.id_end] * rem + reloc_tokens - multipliers = multipliers[:last_comma] + [1.0] * rem + reloc_mults + reloc_tokens = chunk.tokens[break_location:] + reloc_mults = chunk.multipliers[break_location:] + chunk.tokens = chunk.tokens[:break_location] + chunk.multipliers = chunk.multipliers[:break_location] + + next_chunk() + chunk.tokens = reloc_tokens + chunk.multipliers = reloc_mults + + if len(chunk.tokens) == self.chunk_length: + next_chunk() + + embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, position) if embedding is None: - remade_tokens.append(token) - multipliers.append(weight) - i += 1 - else: - emb_len = int(embedding.vec.shape[0]) - iteration = len(remade_tokens) // 75 - if (len(remade_tokens) + emb_len) // 75 != iteration: - rem = (75 * (iteration + 1) - len(remade_tokens)) - remade_tokens += [self.id_end] * rem - multipliers += [1.0] * rem - iteration += 1 - fixes.append((iteration, (len(remade_tokens) % 75, embedding))) - remade_tokens += [0] * emb_len - multipliers += [weight] * emb_len - used_custom_terms.append((embedding.name, embedding.checksum())) - i += embedding_length_in_tokens + chunk.tokens.append(token) + chunk.multipliers.append(weight) + position += 1 + continue - token_count = len(remade_tokens) - prompt_target_length = get_target_prompt_token_count(token_count) - tokens_to_add = prompt_target_length - len(remade_tokens) + emb_len = int(embedding.vec.shape[0]) + if len(chunk.tokens) + emb_len > self.chunk_length: + next_chunk() - remade_tokens = remade_tokens + [self.id_end] * tokens_to_add - multipliers = multipliers + [1.0] * tokens_to_add + chunk.fixes.append(PromptChunkFix(len(chunk.tokens), embedding)) - return remade_tokens, fixes, multipliers, token_count + chunk.tokens += [0] * emb_len + chunk.multipliers += [weight] * emb_len + position += embedding_length_in_tokens + + if len(chunk.tokens) > 0: + next_chunk() + + return chunks, token_count + + def process_texts(self, texts): + """ + Accepts a list of texts and calls tokenize_line() on each, with cache. Returns the list of results and maximum + length, in tokens, of all texts. + """ - def process_text(self, texts): - used_custom_terms = [] - remade_batch_tokens = [] - hijack_comments = [] - hijack_fixes = [] token_count = 0 cache = {} - batch_multipliers = [] + batch_chunks = [] for line in texts: if line in cache: - remade_tokens, fixes, multipliers = cache[line] + chunks = cache[line] else: - remade_tokens, fixes, multipliers, current_token_count = self.tokenize_line(line, used_custom_terms, hijack_comments) + chunks, current_token_count = self.tokenize_line(line) token_count = max(current_token_count, token_count) - cache[line] = (remade_tokens, fixes, multipliers) + cache[line] = chunks - remade_batch_tokens.append(remade_tokens) - hijack_fixes.append(fixes) - batch_multipliers.append(multipliers) + batch_chunks.append(chunks) - return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count + return batch_chunks, token_count - def process_text_old(self, texts): - id_start = self.id_start - id_end = self.id_end - maxlen = self.wrapped.max_length # you get to stay at 77 - used_custom_terms = [] - remade_batch_tokens = [] - hijack_comments = [] - hijack_fixes = [] - token_count = 0 + def forward(self, texts): + """ + Accepts an array of texts; Passes texts through transformers network to create a tensor with numerical representation of those texts. + Returns a tensor with shape of (B, T, C), where B is length of the array; T is length, in tokens, of texts (including padding) - T will + be a multiple of 77; and C is dimensionality of each token - for SD1 it's 768, and for SD2 it's 1024. + An example shape returned by this function can be: (2, 77, 768). + Webui usually sends just one text at a time through this function - the only time when texts is an array with more than one elemenet + is when you do prompt editing: "a picture of a [cat:dog:0.4] eating ice cream" + """ - cache = {} - batch_tokens = self.tokenize(texts) - batch_multipliers = [] - for tokens in batch_tokens: - tuple_tokens = tuple(tokens) + if opts.use_old_emphasis_implementation: + import modules.sd_hijack_clip_old + return modules.sd_hijack_clip_old.forward_old(self, texts) - if tuple_tokens in cache: - remade_tokens, fixes, multipliers = cache[tuple_tokens] - else: - fixes = [] - remade_tokens = [] - multipliers = [] - mult = 1.0 + batch_chunks, token_count = self.process_texts(texts) - i = 0 - while i < len(tokens): - token = tokens[i] + used_embeddings = {} + chunk_count = max([len(x) for x in batch_chunks]) - embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i) + zs = [] + for i in range(chunk_count): + batch_chunk = [chunks[i] if i < len(chunks) else self.empty_chunk() for chunks in batch_chunks] - mult_change = self.token_mults.get(token) if opts.enable_emphasis else None - if mult_change is not None: - mult *= mult_change - i += 1 - elif embedding is None: - remade_tokens.append(token) - multipliers.append(mult) - i += 1 - else: - emb_len = int(embedding.vec.shape[0]) - fixes.append((len(remade_tokens), embedding)) - remade_tokens += [0] * emb_len - multipliers += [mult] * emb_len - used_custom_terms.append((embedding.name, embedding.checksum())) - i += embedding_length_in_tokens + tokens = [x.tokens for x in batch_chunk] + multipliers = [x.multipliers for x in batch_chunk] + self.hijack.fixes = [x.fixes for x in batch_chunk] - if len(remade_tokens) > maxlen - 2: - vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()} - ovf = remade_tokens[maxlen - 2:] - overflowing_words = [vocab.get(int(x), "") for x in ovf] - overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words)) - hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n") + for fixes in self.hijack.fixes: + for position, embedding in fixes: + used_embeddings[embedding.name] = embedding - token_count = len(remade_tokens) - remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens)) - remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end] - cache[tuple_tokens] = (remade_tokens, fixes, multipliers) + z = self.process_tokens(tokens, multipliers) + zs.append(z) - multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers)) - multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0] + if len(used_embeddings) > 0: + embeddings_list = ", ".join([f'{name} [{embedding.checksum()}]' for name, embedding in used_embeddings.items()]) + self.hijack.comments.append(f"Used embeddings: {embeddings_list}") - remade_batch_tokens.append(remade_tokens) - hijack_fixes.append(fixes) - batch_multipliers.append(multipliers) - return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count - - def forward(self, text): - use_old = opts.use_old_emphasis_implementation - if use_old: - batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text_old(text) - else: - batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text) - - self.hijack.comments += hijack_comments - - if len(used_custom_terms) > 0: - self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms])) - - if use_old: - self.hijack.fixes = hijack_fixes - return self.process_tokens(remade_batch_tokens, batch_multipliers) - - z = None - i = 0 - while max(map(len, remade_batch_tokens)) != 0: - rem_tokens = [x[75:] for x in remade_batch_tokens] - rem_multipliers = [x[75:] for x in batch_multipliers] - - self.hijack.fixes = [] - for unfiltered in hijack_fixes: - fixes = [] - for fix in unfiltered: - if fix[0] == i: - fixes.append(fix[1]) - self.hijack.fixes.append(fixes) - - tokens = [] - multipliers = [] - for j in range(len(remade_batch_tokens)): - if len(remade_batch_tokens[j]) > 0: - tokens.append(remade_batch_tokens[j][:75]) - multipliers.append(batch_multipliers[j][:75]) - else: - tokens.append([self.id_end] * 75) - multipliers.append([1.0] * 75) - - z1 = self.process_tokens(tokens, multipliers) - z = z1 if z is None else torch.cat((z, z1), axis=-2) - - remade_batch_tokens = rem_tokens - batch_multipliers = rem_multipliers - i += 1 - - return z + return torch.hstack(zs) def process_tokens(self, remade_batch_tokens, batch_multipliers): - if not opts.use_old_emphasis_implementation: - remade_batch_tokens = [[self.id_start] + x[:75] + [self.id_end] for x in remade_batch_tokens] - batch_multipliers = [[1.0] + x[:75] + [1.0] for x in batch_multipliers] - + """ + sends one single prompt chunk to be encoded by transformers neural network. + remade_batch_tokens is a batch of tokens - a list, where every element is a list of tokens; usually + there are exactly 77 tokens in the list. batch_multipliers is the same but for multipliers instead of tokens. + Multipliers are used to give more or less weight to the outputs of transformers network. Each multiplier + corresponds to one token. + """ tokens = torch.asarray(remade_batch_tokens).to(devices.device) + # this is for SD2: SD1 uses the same token for padding and end of text, while SD2 uses different ones. if self.id_end != self.id_pad: for batch_pos in range(len(remade_batch_tokens)): index = remade_batch_tokens[batch_pos].index(self.id_end) @@ -239,8 +234,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): z = self.encode_with_transformers(tokens) # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise - batch_multipliers_of_same_length = [x + [1.0] * (75 - len(x)) for x in batch_multipliers] - batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(devices.device) + batch_multipliers = torch.asarray(batch_multipliers).to(devices.device) original_mean = z.mean() z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) new_mean = z.mean() diff --git a/modules/sd_hijack_clip_old.py b/modules/sd_hijack_clip_old.py new file mode 100644 index 00000000..6d9fbbe6 --- /dev/null +++ b/modules/sd_hijack_clip_old.py @@ -0,0 +1,81 @@ +from modules import sd_hijack_clip +from modules import shared + + +def process_text_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts): + id_start = self.id_start + id_end = self.id_end + maxlen = self.wrapped.max_length # you get to stay at 77 + used_custom_terms = [] + remade_batch_tokens = [] + hijack_comments = [] + hijack_fixes = [] + token_count = 0 + + cache = {} + batch_tokens = self.tokenize(texts) + batch_multipliers = [] + for tokens in batch_tokens: + tuple_tokens = tuple(tokens) + + if tuple_tokens in cache: + remade_tokens, fixes, multipliers = cache[tuple_tokens] + else: + fixes = [] + remade_tokens = [] + multipliers = [] + mult = 1.0 + + i = 0 + while i < len(tokens): + token = tokens[i] + + embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i) + + mult_change = self.token_mults.get(token) if shared.opts.enable_emphasis else None + if mult_change is not None: + mult *= mult_change + i += 1 + elif embedding is None: + remade_tokens.append(token) + multipliers.append(mult) + i += 1 + else: + emb_len = int(embedding.vec.shape[0]) + fixes.append((len(remade_tokens), embedding)) + remade_tokens += [0] * emb_len + multipliers += [mult] * emb_len + used_custom_terms.append((embedding.name, embedding.checksum())) + i += embedding_length_in_tokens + + if len(remade_tokens) > maxlen - 2: + vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()} + ovf = remade_tokens[maxlen - 2:] + overflowing_words = [vocab.get(int(x), "") for x in ovf] + overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words)) + hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n") + + token_count = len(remade_tokens) + remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens)) + remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end] + cache[tuple_tokens] = (remade_tokens, fixes, multipliers) + + multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers)) + multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0] + + remade_batch_tokens.append(remade_tokens) + hijack_fixes.append(fixes) + batch_multipliers.append(multipliers) + return batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count + + +def forward_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, texts): + batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = process_text_old(self, texts) + + self.hijack.comments += hijack_comments + + if len(used_custom_terms) > 0: + self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms])) + + self.hijack.fixes = hijack_fixes + return self.process_tokens(remade_batch_tokens, batch_multipliers) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index f9f5e8cd..45882ed6 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -79,7 +79,6 @@ class EmbeddingDatabase: self.word_embeddings[embedding.name] = embedding - # TODO changing between clip and open clip changes tokenization, which will cause embeddings to stop working ids = model.cond_stage_model.tokenize([embedding.name])[0] first_id = ids[0] diff --git a/modules/ui.py b/modules/ui.py index b79d24ee..5d2f5bad 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -368,7 +368,7 @@ def update_token_counter(text, steps): flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) prompts = [prompt_text for step, prompt_text in flat_prompts] - tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1]) + token_count, max_length = max([model_hijack.get_prompt_lengths(prompt) for prompt in prompts], key=lambda args: args[0]) style_class = ' class="red"' if (token_count > max_length) else "" return f"{token_count}/{max_length}" From f94cfc563bbedd923d5e95563a5e8d93c8516ac3 Mon Sep 17 00:00:00 2001 From: Mitchell Boot <47387831+Mitchell1711@users.noreply.github.com> Date: Sat, 7 Jan 2023 01:15:22 +0100 Subject: [PATCH 159/461] Changed HTML to textbox instead Using HTML caused an issue where the row would expand for a frame when changing the sliders because of the loading animation. This solution also doesn't use any additional HTML padding --- modules/ui.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 6fc8b7d7..6ea1b5d7 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -260,7 +260,7 @@ def calc_resolution_hires(x, y, scale): scaled_x = int(x * scale // 8) * 8 scaled_y = int(y * scale // 8) * 8 - return "

Upscaled resolution: "+str(scaled_x)+"x"+str(scaled_y)+"

" + return str(scaled_x)+"x"+str(scaled_y) def apply_styles(prompt, prompt_neg, style1_name, style2_name): prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name]) @@ -726,7 +726,10 @@ def create_ui(): hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") with FormRow(elem_id="txt2img_hires_fix_row3"): - hr_final_resolution = gr.HTML(value=calc_resolution_hires(width.value, height.value, hr_scale.value), elem_id="txtimg_hr_finalres") + hr_final_resolution = gr.Textbox(value=calc_resolution_hires(width.value, height.value, hr_scale.value), + elem_id="txtimg_hr_finalres", + label="Upscaled resolution", + interactive=False) hr_scale.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False) width.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False) height.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False) From 08066676a47b560235d4c085dd3cfcb470b80997 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 7 Jan 2023 07:22:07 +0300 Subject: [PATCH 160/461] make it not break on empty inputs; thank you tarded, we are --- modules/sd_hijack_clip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index ac3020d7..16aef76a 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -147,7 +147,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): chunk.multipliers += [weight] * emb_len position += embedding_length_in_tokens - if len(chunk.tokens) > 0: + if len(chunk.tokens) > 0 or len(chunks) == 0: next_chunk() return chunks, token_count From 1740c33547b62f692834c95914a2b295d51684c7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 7 Jan 2023 07:48:44 +0300 Subject: [PATCH 161/461] more comments --- modules/sd_hijack_clip.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 16aef76a..5520c9b2 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -3,7 +3,7 @@ from collections import namedtuple import torch -from modules import prompt_parser, devices +from modules import prompt_parser, devices, sd_hijack from modules.shared import opts @@ -22,14 +22,24 @@ class PromptChunk: PromptChunkFix = namedtuple('PromptChunkFix', ['offset', 'embedding']) -"""This is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt chunk""" +"""An object of this type is a marker showing that textual inversion embedding's vectors have to placed at offset in the prompt +chunk. Thos objects are found in PromptChunk.fixes and, are placed into FrozenCLIPEmbedderWithCustomWordsBase.hijack.fixes, and finally +are applied by sd_hijack.EmbeddingsWithFixes's forward function.""" class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): + """A pytorch module that is a wrapper for FrozenCLIPEmbedder module. it enhances FrozenCLIPEmbedder, making it possible to + have unlimited prompt length and assign weights to tokens in prompt. + """ + def __init__(self, wrapped, hijack): super().__init__() + self.wrapped = wrapped - self.hijack = hijack + """Original FrozenCLIPEmbedder module; can also be FrozenOpenCLIPEmbedder or xlmr.BertSeriesModelWithTransformation, + depending on model.""" + + self.hijack: sd_hijack.StableDiffusionModelHijack = hijack self.chunk_length = 75 def empty_chunk(self): @@ -55,7 +65,8 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): converts a batch of token ids (in python lists) into a single tensor with numeric respresentation of those tokens; All python lists with tokens are assumed to have same length, usually 77. if input is a list with B elements and each element has T tokens, expected output shape is (B, T, C), where C depends on - model - can be 768 and 1024 + model - can be 768 and 1024. + Among other things, this call will read self.hijack.fixes, apply it to its inputs, and clear it (setting it to None). """ raise NotImplementedError @@ -113,7 +124,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): last_comma = len(chunk.tokens) # this is when we are at the end of alloted 75 tokens for the current chunk, and the current token is not a comma. opts.comma_padding_backtrack - # is a setting that specifies that is there is a comma nearby, the text after comma should be moved out of this chunk and into the next. + # is a setting that specifies that if there is a comma nearby, the text after the comma should be moved out of this chunk and into the next. elif opts.comma_padding_backtrack != 0 and len(chunk.tokens) == self.chunk_length and last_comma != -1 and len(chunk.tokens) - last_comma <= opts.comma_padding_backtrack: break_location = last_comma + 1 From de9738044571877450d1038e18f1ecce93d24af3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 7 Jan 2023 08:53:53 +0300 Subject: [PATCH 162/461] this breaks on default config because width, height, hr_scale are None at that point. --- modules/ui.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index f946382d..a18b9007 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -725,14 +725,8 @@ def create_ui(): hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") - with FormRow(elem_id="txt2img_hires_fix_row3"): - hr_final_resolution = gr.Textbox(value=calc_resolution_hires(width.value, height.value, hr_scale.value), - elem_id="txtimg_hr_finalres", - label="Upscaled resolution", - interactive=False) - hr_scale.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False) - width.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False) - height.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False) + with FormRow(elem_id="txt2img_hires_fix_row3"): + hr_final_resolution = gr.Textbox(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) elif category == "batch": if not opts.dimensions_and_batch_together: @@ -744,6 +738,10 @@ def create_ui(): with FormGroup(elem_id="txt2img_script_container"): custom_inputs = modules.scripts.scripts_txt2img.setup_ui() + hr_scale.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False) + width.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False) + height.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False) + txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) parameters_copypaste.bind_buttons({"txt2img": txt2img_paste}, None, txt2img_prompt) From 1a5b86ad65fd738eadea1ad72f4abad3a4aabf17 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 7 Jan 2023 09:56:37 +0300 Subject: [PATCH 163/461] rework hires fix preview for #6437: movie it to where it takes less place, make it actually account for all relevant sliders and calculate dimensions correctly --- modules/processing.py | 1 - modules/ui.py | 40 +++++++++++++++++++++++++++------------- modules/ui_components.py | 8 ++++++++ style.css | 17 +++++++++++++++++ 4 files changed, 52 insertions(+), 14 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index a408d622..82157bc9 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -711,7 +711,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.truncate_x = 0 self.truncate_y = 0 - def init(self, all_prompts, all_seeds, all_subseeds): if self.enable_hr: if self.hr_resize_x == 0 and self.hr_resize_y == 0: diff --git a/modules/ui.py b/modules/ui.py index a18b9007..6c765262 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -20,7 +20,7 @@ from PIL import Image, PngImagePlugin from modules.call_queue import wrap_gradio_gpu_call, wrap_queued_call, wrap_gradio_call from modules import sd_hijack, sd_models, localization, script_callbacks, ui_extensions, deepbooru -from modules.ui_components import FormRow, FormGroup, ToolButton +from modules.ui_components import FormRow, FormGroup, ToolButton, FormHTML from modules.paths import script_path from modules.shared import opts, cmd_opts, restricted_opts @@ -255,12 +255,20 @@ def add_style(name: str, prompt: str, negative_prompt: str): return [gr.Dropdown.update(visible=True, choices=list(shared.prompt_styles.styles)) for _ in range(4)] -def calc_resolution_hires(x, y, scale): - #final res can only be a multiple of 8 - scaled_x = int(x * scale // 8) * 8 - scaled_y = int(y * scale // 8) * 8 - - return str(scaled_x)+"x"+str(scaled_y) + +def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resize_y): + from modules import processing, devices + + if not enable: + return "" + + p = processing.StableDiffusionProcessingTxt2Img(width=width, height=height, enable_hr=True, hr_scale=hr_scale, hr_resize_x=hr_resize_x, hr_resize_y=hr_resize_y) + + with devices.autocast(): + p.init([""], [0], [0]) + + return f"resize to: {p.hr_upscale_to_x}x{p.hr_upscale_to_y}" + def apply_styles(prompt, prompt_neg, style1_name, style2_name): prompt = shared.prompt_styles.apply_styles_to_prompt(prompt, [style1_name, style2_name]) @@ -712,6 +720,7 @@ def create_ui(): restore_faces = gr.Checkbox(label='Restore faces', value=False, visible=len(shared.face_restorers) > 1, elem_id="txt2img_restore_faces") tiling = gr.Checkbox(label='Tiling', value=False, elem_id="txt2img_tiling") enable_hr = gr.Checkbox(label='Hires. fix', value=False, elem_id="txt2img_enable_hr") + hr_final_resolution = FormHTML(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) elif category == "hires_fix": with FormGroup(visible=False, elem_id="txt2img_hires_fix") as hr_options: @@ -724,9 +733,6 @@ def create_ui(): hr_scale = gr.Slider(minimum=1.0, maximum=4.0, step=0.05, label="Upscale by", value=2.0, elem_id="txt2img_hr_scale") hr_resize_x = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize width to", value=0, elem_id="txt2img_hr_resize_x") hr_resize_y = gr.Slider(minimum=0, maximum=2048, step=8, label="Resize height to", value=0, elem_id="txt2img_hr_resize_y") - - with FormRow(elem_id="txt2img_hires_fix_row3"): - hr_final_resolution = gr.Textbox(value="", elem_id="txtimg_hr_finalres", label="Upscaled resolution", interactive=False) elif category == "batch": if not opts.dimensions_and_batch_together: @@ -738,9 +744,16 @@ def create_ui(): with FormGroup(elem_id="txt2img_script_container"): custom_inputs = modules.scripts.scripts_txt2img.setup_ui() - hr_scale.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False) - width.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False) - height.change(fn=calc_resolution_hires, inputs=[width, height, hr_scale], outputs=hr_final_resolution, show_progress=False) + hr_resolution_preview_inputs = [enable_hr, width, height, hr_scale, hr_resize_x, hr_resize_y] + hr_resolution_preview_args = dict( + fn=calc_resolution_hires, + inputs=hr_resolution_preview_inputs, + outputs=[hr_final_resolution], + show_progress=False + ) + + for input in hr_resolution_preview_inputs: + input.change(**hr_resolution_preview_args) txt2img_gallery, generation_info, html_info, html_log = create_output_panel("txt2img", opts.outdir_txt2img_samples) parameters_copypaste.bind_buttons({"txt2img": txt2img_paste}, None, txt2img_prompt) @@ -803,6 +816,7 @@ def create_ui(): fn=lambda x: gr_show(x), inputs=[enable_hr], outputs=[hr_options], + show_progress = False, ) txt2img_paste_fields = [ diff --git a/modules/ui_components.py b/modules/ui_components.py index 91eb0e3d..cac001dc 100644 --- a/modules/ui_components.py +++ b/modules/ui_components.py @@ -23,3 +23,11 @@ class FormGroup(gr.Group, gr.components.FormComponent): def get_block_name(self): return "group" + + +class FormHTML(gr.HTML, gr.components.FormComponent): + """Same as gr.HTML but fits inside gradio forms""" + + def get_block_name(self): + return "html" + diff --git a/style.css b/style.css index f1b23b53..76721756 100644 --- a/style.css +++ b/style.css @@ -642,6 +642,23 @@ footer { opacity: 0.85; } +#txtimg_hr_finalres{ + min-height: 0 !important; + padding: .625rem .75rem; + margin-left: -0.75em + +} + +#txtimg_hr_finalres .resolution{ + font-weight: bold; +} + +#txt2img_checkboxes > div > div{ + flex: 0; + white-space: nowrap; + min-width: auto; +} + /* The following handles localization for right-to-left (RTL) languages like Arabic. The rtl media type will only be activated by the logic in javascript/localization.js. If you change anything above, you need to make sure it is RTL compliant by just running From a36e2744e2b18a2582247bc5b95bfa0339dfa629 Mon Sep 17 00:00:00 2001 From: Taithrah Date: Sat, 7 Jan 2023 04:09:02 -0500 Subject: [PATCH 164/461] Update hints.js Small touch up to hints --- javascript/hints.js | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/javascript/hints.js b/javascript/hints.js index dda66e09..73ab4a26 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -4,7 +4,7 @@ titles = { "Sampling steps": "How many times to improve the generated image iteratively; higher values take longer; very low values can produce bad results", "Sampling method": "Which algorithm to use to produce the image", "GFPGAN": "Restore low quality faces using GFPGAN neural network", - "Euler a": "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps to higher than 30-40 does not help", + "Euler a": "Euler Ancestral - very creative, each can get a completely different picture depending on step count, setting steps higher than 30-40 does not help", "DDIM": "Denoising Diffusion Implicit Models - best at inpainting", "DPM adaptive": "Ignores step count - uses a number of steps determined by the CFG and resolution", @@ -12,8 +12,8 @@ titles = { "Batch size": "How many image to create in a single batch", "CFG Scale": "Classifier Free Guidance Scale - how strongly the image should conform to prompt - lower values produce more creative results", "Seed": "A value that determines the output of random number generator - if you create an image with same parameters and seed as another image, you'll get the same result", - "\u{1f3b2}\ufe0f": "Set seed to -1, which will cause a new random number to be used every time", - "\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomed", + "\u{1f3b2}\ufe0f": "Set seed to -1 will set a new random number every time.", + "\u267b\ufe0f": "Reuse seed from last generation, most useful if it was randomized.", "\u{1f3a8}": "Add a random artist to the prompt.", "\u2199\ufe0f": "Read generation parameters from prompt or last generation if prompt is empty into user interface.", "\u{1f4c2}": "Open images output directory", @@ -74,7 +74,7 @@ titles = { "Style 1": "Style to apply; styles have components for both positive and negative prompts and apply to both", "Style 2": "Style to apply; styles have components for both positive and negative prompts and apply to both", "Apply style": "Insert selected styles into prompt fields", - "Create style": "Save current prompts as a style. If you add the token {prompt} to the text, the style use that as placeholder for your prompt when you use the style in the future.", + "Create style": "Save current prompts as a style. If you add the token {prompt} to the text, the style uses that as a placeholder for your prompt when you use the style in the future.", "Checkpoint name": "Loads weights from checkpoint before making images. You can either use hash or a part of filename (as seen in settings) for checkpoint name. Recommended to use with Y axis for less switching.", "Inpainting conditioning mask strength": "Only applies to inpainting models. Determines how strongly to mask off the original image for inpainting and img2img. 1.0 means fully masked, which is the default behaviour. 0.0 means a fully unmasked conditioning. Lower values will help preserve the overall composition of the image, but will struggle with large changes.", @@ -92,12 +92,12 @@ titles = { "Weighted sum": "Result = A * (1 - M) + B * M", "Add difference": "Result = A + (B - C) * M", - "Learning rate": "how fast should the training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.", + "Learning rate": "How fast should training go. Low values will take longer to train, high values may fail to converge (not generate accurate results) and/or may break the embedding (This has happened if you see Loss: nan in the training info textbox. If this happens, you need to manually restore your embedding from an older not-broken backup).\n\nYou can set a single numeric value, or multiple learning rates using the syntax:\n\n rate_1:max_steps_1, rate_2:max_steps_2, ...\n\nEG: 0.005:100, 1e-3:1000, 1e-5\n\nWill train with rate of 0.005 for first 100 steps, then 1e-3 until 1000 steps, then 1e-5 for all remaining steps.", "Clip skip": "Early stopping parameter for CLIP model; 1 is stop at last layer as usual, 2 is stop at penultimate layer, etc.", - "Approx NN": "Cheap neural network approximation. Very fast compared to VAE, but produces pictures with 4 times smaller horizontal/vertical resoluton and lower quality.", - "Approx cheap": "Very cheap approximation. Very fast compared to VAE, but produces pictures with 8 times smaller horizontal/vertical resoluton and extremely low quality.", + "Approx NN": "Cheap neural network approximation. Very fast compared to VAE, but produces pictures with 4 times smaller horizontal/vertical resolution and lower quality.", + "Approx cheap": "Very cheap approximation. Very fast compared to VAE, but produces pictures with 8 times smaller horizontal/vertical resolution and extremely low quality.", "Hires. fix": "Use a two step process to partially create an image at smaller resolution, upscale, and then improve details in it without changing composition", "Hires steps": "Number of sampling steps for upscaled picture. If 0, uses same as for original.", From 0fc1848e40dbd46c93753a2937403e1139ecd366 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Sat, 7 Jan 2023 11:25:41 +0200 Subject: [PATCH 165/461] CI: Use native actions/setup-python caching --- .github/workflows/on_pull_request.yaml | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/.github/workflows/on_pull_request.yaml b/.github/workflows/on_pull_request.yaml index b097d180..a168be5b 100644 --- a/.github/workflows/on_pull_request.yaml +++ b/.github/workflows/on_pull_request.yaml @@ -19,22 +19,19 @@ jobs: - name: Checkout Code uses: actions/checkout@v3 - name: Set up Python 3.10 - uses: actions/setup-python@v3 + uses: actions/setup-python@v4 with: python-version: 3.10.6 - - uses: actions/cache@v2 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }} - restore-keys: | - ${{ runner.os }}-pip- + cache: pip + cache-dependency-path: | + **/requirements*txt - name: Install PyLint run: | python -m pip install --upgrade pip pip install pylint # This lets PyLint check to see if it can resolve imports - name: Install dependencies - run : | + run: | export COMMANDLINE_ARGS="--skip-torch-cuda-test --exit" python launch.py - name: Analysing the code with pylint From a77873974b97618351791ea3015639be7d9f98d1 Mon Sep 17 00:00:00 2001 From: Aarni Koskela Date: Sat, 7 Jan 2023 11:34:02 +0200 Subject: [PATCH 166/461] ... also for tests. --- .github/workflows/run_tests.yaml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml index 49dc92bd..ecb9012a 100644 --- a/.github/workflows/run_tests.yaml +++ b/.github/workflows/run_tests.yaml @@ -14,11 +14,9 @@ jobs: uses: actions/setup-python@v4 with: python-version: 3.10.6 - - uses: actions/cache@v3 - with: - path: ~/.cache/pip - key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }} - restore-keys: ${{ runner.os }}-pip- + cache: pip + cache-dependency-path: | + **/requirements*txt - name: Run tests run: python launch.py --tests basic_features --no-half --disable-opt-split-attention --use-cpu all --skip-torch-cuda-test - name: Upload main app stdout-stderr From fdfce4711076c2ebac1089bac8169d043eb7978f Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 7 Jan 2023 13:29:47 +0300 Subject: [PATCH 167/461] add "from" resolution for hires fix to be less confusing. --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index 6c765262..99483130 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -267,7 +267,7 @@ def calc_resolution_hires(enable, width, height, hr_scale, hr_resize_x, hr_resiz with devices.autocast(): p.init([""], [0], [0]) - return f"resize to: {p.hr_upscale_to_x}x{p.hr_upscale_to_y}" + return f"resize: from {width}x{height} to {p.hr_upscale_to_x}x{p.hr_upscale_to_y}" def apply_styles(prompt, prompt_neg, style1_name, style2_name): From 151233399c4b79934bdbb7c12a97eeb6499572fb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 7 Jan 2023 13:30:06 +0300 Subject: [PATCH 168/461] new screenshot --- README.md | 9 +++------ screenshot.png | Bin 525075 -> 420577 bytes txt2img_Screenshot.png | Bin 337094 -> 0 bytes 3 files changed, 3 insertions(+), 6 deletions(-) delete mode 100644 txt2img_Screenshot.png diff --git a/README.md b/README.md index fea6cb35..d783fdf0 100644 --- a/README.md +++ b/README.md @@ -1,9 +1,7 @@ # Stable Diffusion web UI A browser interface based on Gradio library for Stable Diffusion. -![](txt2img_Screenshot.png) - -Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Scripts) wiki page for extra scripts developed by users. +![](screenshot.png) ## Features [Detailed feature showcase with images](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features): @@ -97,9 +95,8 @@ Alternatively, use online services (like Google Colab): 1. Install [Python 3.10.6](https://www.python.org/downloads/windows/), checking "Add Python to PATH" 2. Install [git](https://git-scm.com/download/win). 3. Download the stable-diffusion-webui repository, for example by running `git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git`. -4. Place `model.ckpt` in the `models` directory (see [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) for where to get it). -5. _*(Optional)*_ Place `GFPGANv1.4.pth` in the base directory, alongside `webui.py` (see [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) for where to get it). -6. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user. +4. Place stable diffusion checkpoint (`model.ckpt`) in the `models/Stable-diffusion` directory (see [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) for where to get it). +5. Run `webui-user.bat` from Windows Explorer as normal, non-administrator, user. ### Automatic Installation on Linux 1. Install the dependencies: diff --git a/screenshot.png b/screenshot.png index 86c3209fe3a3b92e5afa584e9e6dcd0b3dcf2ecf..47a1be4ec43e315f3e47139b10b0f9a8045904f3 100644 GIT binary patch literal 420577 zcmd43byQVd^fqeI(%sU6bT`r<-JOR#h)6exlypjm^r7?64bm;0(hU-aBmEtG)%X3~ z?;H28d+)eoptAQ~d+)iPIp;HDt!;?1qBJTp5%Qx)k5FZ0BvcS5PZ7}*<>>u z;T}sy(~`6}zCUM{#?Sqv{y4^=7qa~0vaOB2sk-`jWMuj9@X%p2arCSqok?xky`b4> zIobM&7$QXg#y@|3vU@rC?-63iV`Q%XTs8e57V&!^Y)~}87iFqHHzHC{!~ef^>kBlo zQuw^f%a^5<@PMsnc+bfKDi=ybc^2~5`#T0tD!B;ALn}O&;7MLq!Q%!lY#YCB6Lq`V zyqwiB3@Ekk8apiX+u`=$7$`aJ7K0QXYBxe~ib_|nHig0ug|JW}_5AI>uS-&$tYu?Q zD>oyuxc;4Sw0e%BSn2y~r}Lv}^eBkO<0zpM5|8Hwe&|eS zX~I54fxYZwO})G&OizkzfR@N2BLqRx*ZF`pF8#K*9TjE%Z0ku}1PGBMgyGAMYrJAl zwHl;5d`T();x0=9#s>xk5QX;#GYqe$c)c~OCqfACASxj}qUaX`(PiQNRG_I^8Ei)p zLMXD9*dd~MQo8SErt1$*vG9*&5M#nKEfoLzG>VX%3$eqWl;^wr$b(({#G9t;;gxQ+ z(&XoE=9J4MEfYI=8U`1bnhL~$tt|+vvb$`xy$O1hlkkA~GHA@*7OUXgwZ`+LTGR%Y zUe>zBBECSdwcVOES4M$RhQES)3Yi%Zg=Y5h{se%bmROuUO7pm2(Ut8ILa{Rep?OZi zbXI&iAotTQih`iTaL_fkU~+@0p@$_fjp^*i)`a-9GNy}Vvn}&XdmUR>>(E*In`XXs zgAIZe6&!D4rj-}x-j4Jxyjyxbb;fo+#)4J)dQ|moo#Mr87n@qc4sRAdVfR=`fFgS5 z)rd<+gG_b?aw5sxxp-svmg;jM{XTZ46PnU?LK+#V+t0Q|mULON#7YtMw&gp*4=?w? z@k^9%Z-06Z5depS9lhz1p^RV0{D5BUW@&Tc zhvVC{CR4!<{hNyN1Y;~O9mO`Zx$&CMNv`DQLJr>Sx^q8m$xJ6^)^idX0&<(me8Er5 z(b88SJarkBDlY?6s1K(Ot?c3geysRZSE{uzt8!%WWh=?pF~um{lU9BunPtN@$Vd%q zG3NpawqVOm`&cCeI+6ihOw>nxdc1r~dfcrptpt}jP*WKR>DTuc4{4ff^#`_D@z8do znC*_)MVypN^C3U6#aUNdu=hOaDulUu{k}^WtfvvD%!x7vwkQ zu1zoF+_GPEWHFf#y_6WJCMP4WB?>lc5Ydk~mshKW3)DpwPUCpCG#sxi8s%Aqv)4wAwm(SIg`0{U#z}T-+AXf>^23Nue4|Y`A{Iam4C;hZfyD zDQvia@oa-2SdRqh)Nos6D@UPAcxJe*z}NwWYuk0RdizIx&QONe9-rg<{BBa8AcRPm z%v=3Z_-X<<`1!f6QKNf2^uQVCssZY--y`|2us^z~CxtI1PD3zB$>j+Hq(a4 z%Iw{DJ_I(ByZWuc-L)a5{L8Pz|_ zpg7DH8TBprdz<)jC9@~z9A{DHoSgis5-7d>;Ia3QQ-&pM_LhKo?&rRpm&s4*N2AGn zMZ?`aOc5_1{@@rb#06Lq(a3<5oq9~GzBS1c@M~6-#4}Wh=%ahxtkTmwe9DXIHd+ZDEp&Mkb zeb3QhbbTKk@4folMP!?!S7s>{ zdT7?=aZZwae}~9Il-7Wh^CuWtcw(Eu+BaLqf>#p0PW!h{$UN8h&J|IF4^OPFFq{$w zIXqkZ=6Z*S7l^xHVPiK-j_uO9K$$pPYnkNZyB7GYg}BkI>+iT7o-U+mVPrres@fzw zb#fg)Q6(r3i^%~OMpRq#kaIa(;HP9*Ts`jscrSO)w%;xV%mxreGD)_*0}8VHdiR)I zezZ?OENZ_qx66UXslu}#-;UdD`Bc-T*pLJ+6zHv}nNJ-`KG!AjLM|}MqE*_^7haUf)LHS15I6{&kwUWePKO=ESN^wAA{uTZqpuB0>IiB7EJKEG5o$8h!ux8KOAiLADb zSMy00$GD<8Js!G*YIC8%^P<54fb9uN+vl4Sm>n~p&Vt$A8 zkC9W8xgmp#7|yDQ#6H%0v|);kAa9)ZpN8(7>b6p?C~)8W^@eW$Phtvp1#P*x&vD;= ziN;Vg?)WZBU?cn`9>XEw#l1ULX=37%02AST&ezc!?0-x~ruoy$$&oZZ|K^X`KRgRC z#Sr=Sqls_jSXkzu(qh7K$JD2fpZT0{-2YBVuvjRO!6_*zxy>cP7-BNF?{7A&0W0K4 zm-4M~B@(HBkPE@j5IaJUd#}i}m z+vXJIi$tKOCq(EJV84H57R}lUF2XX)91>Oe%=K2;uQ|>>mV6VAx5gQDi_sH3m@~En zIQneS8z7)Ro=r-Yln}i5_4fUtGwFNp)LvGvdx~7=qtNXcT)maZ*W{rxwZoWTFS-~$}SqiP~x+u2R$|+(XUcd8x zMV4lQQX@_;8%>k3{gBP z<06>4SJC%f5+~RL%$rP#g1mWKiOC3jCy5*=#F#3=@!S_!cZ^7%7fy+l#(nqb>m9fw zwRfJq4psYRuM|d>q-%!isoZMAPFBC^Qk_wA@S$mH05LwxM$oxXt?KUOq>L@dF6&|< z%i+19en5`pkR8s}XVVE!8n?C`YyveQwTSRQY5G5fmaNhnL~!^~UG|`<&6g~U$BqW* z^22d{*lzoou0`%xAsP`Smw=OL=3+~oOI;_MDMfEhTQt3mksUvq`#TI>{>|RX-TLju zrYYC6Ql1Y*C3!mzY6611T|T2Yx*eOY=)L6W%8rWVz=eG)X9Jd@bBdTqTb*#eUGWnr zUI;!g!<1eg7GdjsS0XREE|P?P6e&_Uw$L{tZ=Z`*w?*}k5rXDJ9v>b_L~%`DeLkt# z34ZR_=bt09KTVE}o&w2bu)*Z7^LXU%LgZg|GMS}F5A$!sfq{Wa`!mn!=nT}`^w?%Mmu$@7P%yZlF$HX5Lt# zbN&YzNORK_lq{s^wFiEMslC~y;ywgxdofPW=9BQqBW{geQiJxYvVfR4AJJ+eC87?PpAKohL6o`I~VIjLMV9K5gwVEd_ zhq<_MuJO!CtvfI#D&)R$P=kkz?ruN+b(YITmBTRnO`Q$7d=mSN2VFc;+%`exAQUKm ziwq`td((X-b8?aq_fzLJNEYpT<*mgGI7Q9RAkpurf>el*Wr3VzLrxSXA7iRf zz*c^}WOcD4CO7*@zkzth)x*Hs(C_pVh{JpwWn;Hz4p}a2k2Fai8>&xJJz4?dJ{4FD zRaD!&GuZMTx`;Rk*n;*Qldk(xFEvuXdv&#V{^;smg1rWn%@Id^;Wm-uyrj*KZUxtz z!JBVVqdBJrRAaSEh)K6XX07vumnq?{Va26n60U33C*ItYY-QJUx=R<2;NPMgGEv8G zlXh&`x4ge)x{bla_~f9od}tBHkw}82l7ma`DU}m3rB^68`k>SIE+M*+%BwWy-wyJ9 zlDrog2Ehv@4oIJM0|)7hcjnck>`IDC_<#)`Tk@I%Q*`oYtTxrCvys5!1U^av`U~Ee z!U=>7EB;fdrDPtF?AS5xw?crY9|w{~Q#0DfAXUP7T8WE#BL{qRJkZSZ*;1>s3C*0v z&Lih-EMOpq$M?`OIh71BvD^ecU_=0+)N`x8 zke-skTh>vG&Y~LxkpRu|BR1ddT%eH0GY?Q;R4;8UzqweJiwi)GqfQ;Ax%_TA;R+WR z{A0T=D$Dr=`Rb|Pg{7tiE|r|ml`J3p?D4n7jnlepC&Afl>>#=0;x6p5Wp|{EPjLw>#(^HzGI7ylChYI@&g=7( z+(xO-R7dsfB^M}WcM@ca;k%mKX4}SXr&rq}t|JCd8=1NCG(X+&hOR;-Fn|<^_YH|F zHri_3k11uC*8NcObE|s`v4MVM4yAU|)q0Hp5L2@krXINi+Fyjsx$| zPGtp2h6d_|e0@>`;M-7L{bbrtGZ}8bG>r)r;w8wL*ft##cg?vJul}JTLt6R1tT)=( ze0uZy?IF%-gPfb}(mY2qc8Y)}Ur=~2<<>9YF4hx(C(+hoFjr6`uoV381;b#Ga(JXY zYMhD#U7Wr8eb_D8ziuEKuFPR3Y8lRX;HOt+)oRx=@&5}fRw~~8z&V~ZM zA3|FGxw0cl8>`tvSQwl_DG`wT4c#!q=4<2PEUoKsn3b~{h-4C(E0tgm#vRkV6*RiX z+lFCb^4NP?p!6X3{SFT8s&=2zfo^?9d%+XOE@?Qo?z`|m2~cbRwG@a@DEbOe$o4(q zRm!z^;5ny5lpS`4i&3Z!7$^PddPz?t&wcQPtB7e|xIm*qkCdE#TH2VT=O}yJOIX!Q zc)J8_ERuYAO7-Kz$M?_q&UVD&kCH-Qg$De$lg#?;sji(#n|%A^LN*aUIYs|w=-$)F zODC4LnTq_kPk0lR2AWC9(o7An_Q(npL35deN<6kx=!C*#G8ojM%4+M)`BQ`1HmE@_ zPKyFBzr(MquC(X199`n12@wOZf!fRD!*@cYbfuz>4dxlc8KEPFmK~lTdNGNlYowA@ zby=mRgmhNU_s7C0uwvtfUcte!Du@ehZHi+!-zRYXv7q;CbBOL`h@ieL3dbw0axmE& zW97)igVFy)$QC59uVRh4f-rmIN`y^gdCfDGdi_f)F?`PJ>rd_eaN((U2)Pk@s!8Yw z_4)ghW`x+Ro>^Qd=kIm>>B>Ce&zus`iIk?i5LxDwl+@dp7;23?QMi(OzYHM>fB8}b z6bXWoVMe<+DAmmw9Cw3n>vZjS-$T2L<^u}sfWa63wa*f0Y;O0heA8);o{I%0Kr8r+ zU~`6(F4<%eaU1hE)ECCIgXSDtI1+8$uH7JUo!uGy%KU6@uSBUP)f*JteM&AXkc&x< zNL`1BEaA`&stpAO#wtX$M&9}>Vl+Eqwbm7Tzb~~9S!`f%?31j9FhLQ+OGhEwf5fa`Al5{`OILP;Qea!)A{8$ zm)dh}R4K%XfaSnqJFn;dB+#8Jt5a+%9|Hvam5axMEG^|l7XbYN&vE^X@3Z+&%UVlW z^2xw(>TH_Ajz&9q=}<_A-tTN?RgI7fQwB~$s`MBCO0r`OXXR3a{gA96oTI{J8{4v#eb2JzhD3W< zz$|AS?>30YnEf6mIL@B&kd)v<1}NV2ydFO$!#L!ArWt;ZLrO6gwAe_Ip!vyQ8~0mCc)|c7G%p zL7iaByn;+-DXq>cnwOJ6KSb~X8PAb8w@)y{aNtb1r+9PM37U=r%+;h;1bxN$v+N>+ zrHU?g__9aXOC-a<9F>-6Keo46Pvm1{OU764!(++)5db7{FeZMh>tqSYa#Zvt+7@D5 zvXbEz(tR*aJBYKgoS0kr`cE}RaR)-kN}wj_%q>e?#1xZRr??Y4M6o)}Gs@{t+>d7Q zPW;QkPH8O&l4bu|{V~Nb%1Uti>q8+QAMX0}_)okG z_}QffGEW~7*Wj{yr-(>lDNt3O=C<(GeG)s2QxX<+ zb_Sn_+`$EYoo;&Q0C1a;vs`=3Z4b7JVD)?G{}A|Zn}JId*2}1>!s2Uyi-*Za&z*JwJLAv@T|_mHB(&Y|XDg8T+6o?&jE6)>`MwV`o@HBglk+77h^ha;{*o zRY;R?YOFV~@o$%9@-s+GAv!2j{|yziAf`vo-Mt~Es*3&Lou%kz&&_k>Z%S;}&(jB02fJ65 z7#aMm>pzc?wS75>_5A<-AzO^tz|Sw4N^1=1=pZWj8X6kY#qbit;#fm)6ek6==)rYw zJJvS!Dj+$g`@_`{$Z7L+=+{b`+Fd94t-W@RQiq`<+U2r2U&hHju_|n*KP(hFDg<0Wvf9nyfoiCy!ADvJX3=tJDNk&4ed6x2GeI0T zLfD0F7eos8G>W9NZ0WiJDwDPU><0#(LYhC@Aev5Bgnws?UZvY*V@1i5o|(5luYJPI zDdf1g(Fsx7k>UH>0>Tv?Fuu<8`wxJoO1C8n99{p%z|fNuml%T!H}o~9ZpY=g*`n5J zND|jh81jJuH+fD$S$hgZuj04b3Ya>A-tI@#8&9Wh3OJv=^Qry06gSNp(%pNUvH%ipI;n)T%*sFjQ5wFKU$t6_ zj%{Dz6I+aS?946109w8gfCl(K4QWh!XoFG7&8C9ot<$y@A0wlgls1jXIrUxz(VA?} zKPWPy8wp%!JAR&h{Xs?v29@^jujvlq(TMwIA#d}cL&Zex{-_trz}9pT68 zBjS%tvWv1zM`EQLtn7()U4B?L^};Xtg18*FO%LMpk`gO%*6D^jW*Zjl*^R~C*c;#S zn;o%$gNPAy_VEGd8#GDB{DkLJli@-4u}`O!*V_#Y33pN06uD?85=fP~`c6olC;07x zi13%TKblz%nDsV|n@@^Nxk8VMTS?#9v!!uGsZs)z*A2X`28`k#{oqttf zOl3Ta>!egraWuuZP|`5xvL$3+o(ma1F6ofha9P(N<#Z*r3|21Yt3(*oI&fm=KmU&y+2I>}(d!~uma^)9#4=+?}>2)#4 zQcR9~-|!@^xM?YgzQ5s3E##}mw8q{xebWPP_HHg{lTdE55-7~R4E4;W$$rbmuB77+ zpP%$x=#F#5vEWfbtqD+Q|2fex~eZMTJ)?gcnWraxMyPuUo|15-1nc zXxf*ln=wY%W*aUx%=9(#T~Bw-^O8#4Z>Q<~yMt^#dmbbXwac9WDRZAlI6PD3GSn$V z3V6l3%DKf#x!YvffsueIrx?p?l?}$EB9gR$N|q==fdF0GPpwRENVUTYt0yWCzCaQ> z2uZYKB6l5$GD*r`InTLfv`o4X%P!l#;wo(SBy{|75!yx38&g#Bc4H5nk!Z+U3Z%?2 zqxObeZY_dha&dD5d)T+?fw@pc#RBGsu3JOwe0Bvnt*_j@AgOy0p5v~eHTwFqCJ`}D zY4mD;76X4f!#hU9Y44RQMZ{wMHDU!_s$n8^Y~y*c{ zsyZPpfcf=;OuJCpZExb4=d({NgA+>T(uc_2E2ONQ8GYVOZBP=xuEccBN@ziG%rEW6 zyT+m;%xHiMMi%J~o!gbdhuBy15ZxDRdbkm9DP)F?V}E0_g{*#gf&B&z5HSU?8rp!9 zNlh@Zm@?T%Q5?b^`HtkpT+7(ko>tlrxhkpcT|scIIe34N((*~O@4hpEMBG9OzK4J7 z)oTU3S{63X_J|Bl5ngl@7n;pHTEmFWftGzXYYWVPta?v7y_Ht$efyP}~UMWeu>u1GG+_=1LIGp}!n$7l5$ zl_ukfTA&K{&7Nzi+m!kXw+hj9e)lq!F2F>BNu0r^*J=Ww!{aJTV=ITw+;~~DiLS?l zF_U`*R17GPFV1U5oBD!?=d@yZROhr{`clE~MdOlQYH$l#)SBEdfvs^1zOiMAW3OEm@4cfd{DB4h99-elL&-RYEeOA1rx#dr zPCCl~QK>}$dtry0qaU5M>@`A9Ui11dE+zY&mD+Pmvh74eJfRJ>oiisd2WrjJ67}{~ zw_N*~w@V2gV*L{&Okm!ebj3~xObI_#j$`^^tzvo8!bJX?iv z{+rilD%#lsHRQcT){d+;#opFA*8$5UWFpR(7Ud!zzrht$*sgkoYp$og9o4!JW{kf? zPOH&09*uQsPOn_R5Ob(%G@TZ7&Q1>O>>Rs(OM|l9!iZc!WAM3SAg8A}@mMFUF2Yc& z$WL24z&32Me=!@XqOFXh-6|xv^}=@ARNk;2Te{jYrd+WR82PSaQF7r&hdrd_qdtj# zX+Ggji}^(~VcZ>=;^$0JjSz-ySC_9&4SJO030g;JL(0zMN0$I(@KcLXaL8cg*LRln z0rS!}96);|@;YV5nQxRN!}{5ZZ*4Et11CO0*^F*2?p)~a*&!o@Y5L`@9QH_>vO)>Y zrz5K28ePeBvDM>iL8}d?n!Mn$j*TOQuETdiJ)cOmU?9 z@jePALof4USX2qWO&nO|^TOoRtc(5>KIWRuR7K9wUah02NNcB6v&QOqQ}Lv*9v7Bl zm@>nrD^MU`Aua0suwe2%?kV~^&92DXRz_&e*Cc)b7kBZgKstV~zlKou627)MtMdgG zkf@TZ{R8-RNTaQeJZV9p^`%|7qt%caUUQ^^fURX5&VC2=Z- zgE|^Po#;FOhk449yD`RsiBn{pU=q1d<&wxba8aAe1@2{PqRII=;_8PmYkan;K&XvT6~J(<*{+={PpK0Vt6OkrQQc z^>3ZA*7ytA6a_WOz(Hd4OD*trO8OD$=F`NOipYl<`0#YgT#BqSUL=BisJ80;LegR^ z!K}hcE1$i13c##eO6f0xyXRy$6_3~}_I|LHq+~?GHTi>?Yy3=2n|ZuBgirZXk#)2r zgKFgTycuZ}+rv+h(;jWF?UdOji2x#j;kM}U$tR>a`Xk@R1}@z^PkbEpK4YsT#v{_V zRlPk}X&H613fi#BfcPMKWZ|hQ*<1ib`&{+g=oiAAG?cdAqL;~mB?*M~dKec|<0Hfk z3{dDibO`n>j?gwz{ow* zN(-N;B2SuF2&a*_Jn!VIevHU(w+<*>%8*98bJW!ZdYX}I`lX|66D77F?3ZLqdApi6 zu8X{S)_`vGKC@(DB;vY05Ro=HpDAUOpbd$$lM$^{9=(hO&@k1y1#)cc4^y$w&B|}^ z%?@iBOxqvqt)zRKs;Id*nEeCXb~0akGo8*gd-(IC{m6H48<_Qeo$BRr_94|x*?N$) z2ZJr=5eI$C6S{o6ZsuNbq#LFJ8(%z^8%)#kw&iPN$E8TaAFb;oJRy~P;ydo~)r*S6 zEpT*YLP1nE93QuzhW_xDId6P72`FFh=zzEh*uvHBKFhU9zY-yHf2#-A#46|Q`*n44 z)(*C(V*FdXm_oBQzW(I7V4CZ)2Ei6@@DDuT=h=x{EY4d+=@JJ+<}bLwNE1ojeOfEi z1rC4ZtOV$m(bzdLuzBl?lYf_ng3l*ry^>Y_Sb{7(4@!KWwN?HQ4#oLAHVd@*M5my% z$vN%&SNI-cB_j0C(OzYb%*d7`lX;xE=2$B9^(#4G8kW5q0mT)3CE&1T?y>N~aQIkk z^F`uqO}yp$_XMUs8KZz2>#e-U7yZt=caWqKBK>uXa%Ukh+a>G3tM?&614EnI&2tC1 z)&Q?wX8%}pR=ey=kqH>~1Dr1^o+zQ{l_^?Lab+x_5-A9wUPsz!r?VC`>{S{M)otZN z`d@47SchohO?wBX@yI9U7TZp0vutfQUfGyNh&YO{Z-)5oHesIbpbg)3+(pn|P(`ey z^|t$n(@l!`V7I2l)Hc0fM|E5ta4qBZ$oo7f>{vvPF1}ZyBErtPpd&RsymiQ*o0<{s z8eV-DU)V_gGYYd>`bW3ZHg75M^}@P6lWSGZk}Gik!waRP<=`QWVpfAgo^ZLjCvDE< zqgD{!WLBhPvb#3d@l>Kak9+c~X7hNXbCFFJTBcP_iC zcCfHGxN^qm*gH^{Y5J)ZEHjM6T~G=z{FwCiCkUO3Zu#qE$Q}tWHm9zN(u*9crA%6g zY|_kR@aD3|ytZ1i$%8Iv$mTFoT5QKFajpH$Zb*8hwcOsmY!$3j+)E9@2c8{wUMi$! z?_QR!)~BZgE5{Xh-$~wXVL7>qjBucdDR{Ixx51L|!z(fZu04$#Xw>$)7bFWXy(giH z%!MhkpJ;qg!=>HkNnYDh{baqMDDDyTe&RDp7lU&r*+kw_d#`L()Fp!k9r-#PlLnpN zb%o6!zr2fKZEQe5Vli=b;#5+|s(HT67c1g<1fC)X?GFvdB;82s4xGcX?iy&q+HKQT zHBrg6%+@T&p9~GBT}jN1^@Zu$VzAYE?IP~;GMmYQOIBYPMp!~E$B9`?x_a)7gLxi> zUrvuGbzwJ@q)EI&sdk+XkX(4$^62SYVuZu~*$?=j2tF7?%@Qzgf>B?c@#HIqar!ut zJx$DPB4Sk`Rz@ymL=??88BYCFBU@YvS(+lSg9IQJR!iM#hi(cOnCF^8QzjLaxHi}= zMWH# z){bF@_NImr5;K{Qu$O}c$dDSnjWni3&6wGac@AKEJ7N9l$uA~>x|(Y_0-;~OTF}*6~8nL_bUgTPE=dJn_tUQ<+TDxKwVwry~UU1#GKIPuD=J8%m zF-z00Wf0G_BVT;>W&~C-i(vTxJlCle(ILo@6G-g9M_XlnI1na(y#2u_G7*d@v@qTjvxi`6C#!HRu?U#0x6 zc2WaoTzVV$sAa1;e7d{% zXMVf&5x>owNSfL2`dXo&MB=?^dn{EA+-0@Cm)OOY+J^~hTIe<^zCuvkcO`@Vi8SlJ zkeAt02>WRk%aJ4iK3MO+!c#Z+SUh{iS)}(VT~Et7ZCRHg;rn45+eqn;E-RKMhU`VP zuX_E_VTA*=2okId2hh!yRogaChw=O-C1*=+BK|x&t;U8JY>#+etGg!t*Ep@;T6FA@g%IF#*(VU*HFbw9O*nHRQK z9weL>&cXwf@4L2*pO!Hk<0vBJCV7?ZdF9`#i@48k`V>bt_1bZtKS8Ai+($?1q$(!u zt4=|@U$L`#6aj~3p+L@_%t-fR-EZanU7jcgEoMN9$zN3~T8FpRLjy_38znN;zl`5z zI*P5DN1#?tW18;lNrYgja@?iMQdF<(<+5{7ySBNA{Rn7(uA{&JNc65h+{Ne%pVTar za~}BFTqD7K-41F`RWE$|_FI4GyXD&hyXLym`AJgf6;fgKH0q^4deD0eXYwqa-hQbg z=An4Q)%~a!E4rVG1kWr^FT{hgnG2j(s>k#INzi>{cMoUVz4GnWwdZKIPRqxoI16~Q zEnaaLIl|A#modU2k6tvG2etTW?VBesIIiPigr+Rnu+CqO5lPlGk{!A!JH8jtO|Q982fWp5@T|1({!xq170g4)WXo4;#mxlk=k&{U1`si9uiuq(NyeSE z2C$?<^#pm^1emR_NS&M(pB=UsCO*r&+P%8_kfSzUYmLomjg=ABsCPTNMls1SvmQep z6xp`B-4;4HAJwtUD^t6H1GIeal@3%dimQFSVW{QEUMa90?%tntR{i<_8;+bVPKRs{1s%nL`?(RQuwC zz@eJYk+y?MI-p9D<&Vj6=E>VswgdVazKNh7<@deXjlNX*eViK^JgRn<=B82OhuR!I z#eQasiG8yoX2#PWC8ZkX&+2voP1XRcv{s_FtC9LSV*B>-FAhA$H;7zIwFIH@;$}z~ znwF#xz#=Djsi-Qc-b(2^eX(wqrX!Zkqzfsix9TPi-`L{|bEfD~B)I1)4oU}NClK)F zs$CEw9MR0u1T1}%iIi5Gh=AO7Buqs=6#Qvb)&4Et6GT3SrEp3v6iz4Z zy}lXbMCUThQ zk($+weQA4u8L?J^DS|r33X+H+==9@`d98iD`3;eDU2_CVMlOp{!59;LtF0elYell$ zFDLa064NtWDdwTXfhsxbhPNzt=isv+w@nsJ%sj=UvO4)aEiblA2Ts$c)nb-=Q{Yfo zU3#v%4H%4<5@~EZ@ZMHu4Ar<_Qii8DKGBQ)DVrG(%{vAPOmU|FmMf`U_0@x6>4GUq zz?PY|Trhm2`FlFX1jc(K(T6C8j{`JOr}SMAY@WXrUUXTotuMbWPO4)vY9S_%1Fu@YR;AnFb~+t$ZBM@Ye+4?(fGxK_jx0mC zV1Y*~e;f&#>E#aI%839E1EH`VFC%SLq=UiX3>*f+`lK|)z~s+ad^MCo zj69C4JtW$?gNz|est6H`MdEltmYJ_SC5f7HFxgw#jWW?Ea?A{wICwZcw*t0rWs(h= z4l;BGhdwbiACQ8Okr{{p0Vt&e@AvS+Q$B~O#zVtlZ_~;HXaGFMqzA_O=MiA zN(QN~x)G5xg3+;;e)P!`1-hOa3969;tGr|me!eX_qy$TZL{XMr*vWoMnir#clw){7 z1r!^}60e5S#{1bsWGe(ceeup6hLkvC8C9R0mM}jVrXQs=>&4+v)hrtM`5E}H)@q)4 z%dnSIi)F}mZs-Uha+b0>O2OXSO)(0j8msxp_@>76;c-K+AQYq2s1##0 zokN7_1ZUX6f^zY%zlA=?{m<=h{qSP@Epl(XF4S}W?2N(4-jpAqAjlSbHMlsA$X@;o z_6n35i9U`H%@n?zIDn5Tf_uW)vL%t|7Vd@cS@N^NYF()CH`vI@TBy$J3|Y>qAyO81 zJbLn>-sF#fX+u`_e`nCi>S5LQ6cMdd$_p+PX z!_8Hm3G=>YQipj`e>ba8hjUntw7aEj$2fb4ZhfvUgeGeyHT;?nG3@c5*o;h$tNZ?W z#jn$A6r+2RUn-$G8}bek5zdC7dR!IDQ|9IU%C)!9CN@XoXU^LnNAXhR|->U#EY{x-si%OgVmp& zz!vy!q^L%ybxpRy6~iwTKEkDFhF^wAMZ42%dS>#O4ssZ%w`Bd%b2PlfQCD25)+e>O zhQ2`-F%x^FH-iJHNSdk)zJpUM?CandtTX4zt5@_4%&6{6^o;EHTljdZPVfBK-d*l* z_dEF@A%f%MEU_X4<9=B?+b?U!`e&|T@mCKj!qb9b``dJ2YKt|79S;2CH?p$-TvAL6 zq-G+}h#Dcv($vw2xcO*mmOg?aANPmQdI+sqWST_h(fdCjGVh8e#A$t~szR2_{VJWG zTrL;B`UC>4Nm0~vFz>DbDnIe%iNbG4#WNbJ4J4;ZtHnTbl%-!P zv`J;&DC_DDus-kl^@EM~AZQ1k4Qj{|2NNc{g5jV;ShGO79sz#gmJ-O?ELq^d8xH`y z{{ARLDpvAAZN;wt10I;k3BskoBzR*>PV(Y`T&#y$Mw=}eBCY2W1I;tZ@{t&`cZr#EKf1BqZJp_w7; z^uP;i^4Yi+BRkzmnYh|QrDfCGuup0cg z9Tsxnpq)Zmq3(^x&m2Z;BnCL$nZA9_uy;Pp!@~g**;`1z8v9q^qn(YU%t_UZN@#NM zYsbF={&H2AStY=LGW~2Bn;Q)i3&sa}U*@oe2{q`{G(r7B=((x2OcNX5wG9kx8Kcu- zMDRyvT>&-Nxl1n&5%fxAn<&Sj>T&L|8Kx&*kNn7!Y5Up8K15p%i5o;$aY_Ui2*y2# zVD&)VLuun7Y$AoFf@&~pCLjOJsGLcHarIx5R{k^OB-#=}Gxis=Bk%ke^CK%5Nzpw- zgwbEF`e1R1g5M;kRA3AvI)Th}X++KUn%6wVyCVMH`i~m~+4sKV;LS^^d4;tkr$A&n zp%_a2EkS4nuWr#H?2TTHif=?s<=uQa)y7u)3U|+btK3KS;pTyY*(8^m9%IfYVx^-0 zyf(KMX3;QD_@(l`=FVkH;Rn;7_>bvFy9c(6X7|>k>xMsJimn?5d8AgszRklgtSMoX z$e%-y{hif}uj1I$s>itm*05m7!hFv&Kcck~Da8x=nDcV!eh&k3J<8pIY8N)T08XmwW=l^_J6-Lq_VgEw~_91L-g~boRt;gd6 z*%dEWs0-x({1*};FsK%_N`>bc4QIs^=)P^RD~W>)>WCCPyp)4ME4~XjK*eNQXhfVf zog&D(`;SiB3iDongQ@n9O1t&G>~17e=WQla|A#<`{qQJ9sbM~)_-2qp78ZbA)t3~l zvuF{7bsqgm6=WD;@14g+rJtl+;;>a044m&%PNpq4pC2dkA7}Mp>CEkb)BOlj$*bSF z!SX3%?CWm>|HhArUqL(W*&rNN^S>(a&MIOll)F1+ltT zo*R`CsO3yjjysyGdK7Q{Pqr7W9pe&ELzioyLT-!4V%Eq$EQ9{yQ9V#)wl+`c#sM-(r%*ESw2k4|pi)i(VH{2b%T@oGIv%}!jAd+~ zg%d1Je73}@!#N{@`=7x?cOcm5&@%0Zlz_Kyen1|f*_ZVi_80Xl3IQUn7 zBIsyQj0-0H`mX#n~FpfI#ALoCt$Nx?}Uh+-JlTw%+#NxngAr>&c*6PGz@%I_)(dOe- z&YTsJ>~CzvVKy;|u0hi`!t8h6N(Zas5Q|je)BP!r-1Z}9*ur~x91>y}Vf&=MQQIwH zFi#rts|U(Dq7$;0N=U8If1|E_Fc-=hv-uNjAQ(37hdA?VjxaRfAE1I6=zpO4zYfuc zk;uCTJAKG%e{oeTq`MnL zq@+_iq(QnHM5G%QT}nzzH@tJ<*1gX;|KIg~g$3(*<{UlldyFPk>Y}aumVJaG9LLs8 zTY7Ar;}5a%rFP1V^{-yXWy5YREUb=dFX- zr<-YzG7wyz?Z3i{U-9=pHx7&T1C>AefiM{+T3COJ77tAP_?dXI8R;f2WI+hGh;1V8 zniD(?$gu^+xTBeGVrXX9(p>4Q600R7 z&~>9G#axL?K*J`8J9W=52m&_W}!tV3G&I(m5Z{Av9JH_sdIq-W&EwRusf9Y1!*RD z4_Nl`%W?u&h2gx*UPO5i^e^Mt&N$@oj=^mHQRuV;K`4ZNffMf+o!05|nB`6}?yzi&Wei8kZ^2<-tCkzwp=? z@l=CnhCj`S5LO5wD}@STxMe8!YTC9t5)Bp4I9M=7ozO#@-`wYOim+V>S}h{!ESTB4 zIpbWjKMfLh+227i$?2F|Y;ILcF!tOsBlH4fZY!5JDZGy@n?K3xo8CwcWmG*$BG_f? zm1v*yk?c78e45hr;|i72D}`0*(GzO|7%NUy*O@+X?v#Z%rTpQ_{>li}kNIkD>a?gk z;=||y^T)O!NF4Jui+(5ydW|9aW3SA$;aBa#W*f0RLsniIUY01ToWYNEX$avuS2oM^ zn?I~9UFEDTukUw+V5h6=b>29o|eQL{78sdHdeoSE%T{mqJVezj>uu*5K8 zH()^X2b*|7@M`T$H(TZ9!yWe(@yk&~FLLsg!%GqUv+QfGhCNoKg%@bJaZqm|^Zg=b z$s|d0&GL;32Ho(RQ}aUCYxx&x?`BSHkizQUhz1%i*wUJFQy;&%KcVTESyj<@Fs|vS zA$L(-{G;O3ch6}ob$y&q$?&iV;)Xh#*AL;YQZR@Ny(ahsti`d|gyr9l1mZ`gcLZYl=h)({DuqE2Insu#vQvRiD zOUoA3WzhkH{*47iQ=ygnwxGF`57e)Dy$cCC@o>oY%k}4#w-OZ1n@ANQh3AB`Yfs3x z7YrK9wLh)Y36&yi9*yvD#c!UruWaxbdStLv1B3?qJ}YPQ%wCqFP?)E5Z-b5G%~2O; z^OhrnO<;7yc`nbi+gk?~QD#?y~=VTS5yl^e!l5HsvGR3(P77_EpKS zG*4Fcp;2-V_w%mFfJWc^&CbM`_t3O~(XviY!CWX#=Q+!!&hk~|V^dcO5&~b3!mXQ9 zMUs#!qni)s+wIb?W>{->ueC{#&?M*AFwkPPHE;UdhFvrGoTir$d7bzIJuLS4{QbPJ z>x%+HCaQ|R_^QX)kx@qeZKpHPiy*;*PN-Fo1dvU?lI4FRB1h5Vz*{OSew&tnLKWJ+ zEILTt^MN~qC5j;anA%Z)0xM|!ZN8#vd)TEdmlM7M7naq^G)I{G%dZ(kF2)*Hp_?$d zuJ)})8bP^cq!|*sROfo;2PgZ^2b%ACJitIz`PiuCnQMZaTDxxL zkYLT_-{TzmbjFF|6~|1Y5IcU}wvA_Qw@Xc}MoKBO$eN1t;`*zk_T``VE3J=^R`J*h z@A8jZRp2YOtMS<}sF2jG+CN9-Kn$4?P;Wj?FYAiI&>dq*n7FdyaCbZLS@sLfdW~(e^`2);F3TsxY;! z%mN-7zlU^p)Oyvt2o*Y@Y`+fhq_BcnUgO9VByD<8R45#4cve1^-)V%{z z?yC79Zz3!4+f$qHdRLViies|DhvBU#932KJE@rZJNgVUc!@3867eDiPK6cUTx8J@0iSo=)$`65sn241Iu2=d`qa>fkDvW)& z=-hcZ%DT3msKTsYbW~r}b#j?NZFZAVU_`Zp>%Z!un^bzl+AKh5dWP1QIVFZ(Xs zQH_>7{?@WQ6^o7exSISzfm7aImu(tT&`g)Dq2Rnpk!v^{&U@c6HhpI@gI?yyw2PIz zR8Bg@ZhWxo$EJNH+ni6OD$e(N$_ob9Tqqu&ZIMPT6vp3d<)jANe5Qp8x+sse!|3sE z(4NK)mkI4dotutFb$SWTCO<+h`i;CuET5Be4)h&8<1L`+sn#FED>*>eF5lcpQ)W(_ zBmO?Q{&{n95d;@E43$iYOWOgi-uknm-i}yG3mG2}$q=a3eG@fvx_@>74$yk~YJ{n6 zOF*JLaJRR`Uj`)+P^kd=Y>d!C&bK@XY<{lNStPsb?T015pP&yKyD( z;h;wMAF=6sn5cuP5@?`86;l&Su$LPZvDSt~ouq z@penLFCIUTRXi}ix?c8W@*X7T`owsQSwBT6p;d{$Vtci(_nZ4ACox(D^v5v`QMKI_Z=jhO4=0R{T$fWn1PVUaSTao=- zZCht9RaP=4nIi;^T(xm+IFEk7mX16&#V4<2v`}X4c)RJ1_Sf27cqMOvv7rwgxq`ZO zyXod6Df->_30?@)kW}6wk(|b>wJq`al>K8i1F=KB=~>E5^GWr#vLvBN^+Cjy3`aUP zX``$_O+)G4r=_Kj7d~v_Lg7w>49@E6y(S964#N2n=N7%)v=`hbwT?$ht2OF_oU3m= zgbm&px;t4NJUR=Lc_Jo=&9g=^QYQW)cEE6=-<{*|i1&E{AMIc-TQ=lXef>6_2Z<8r z4c=9we`jjV;h>&P0ARaEuy=a={*$I?eI;CfbfT)9?*`xPdF~Nd2BNB(Ut&E~+3BiqR0; z+MC%IALsAti7iKZKVKr&5ih_tzwc6iyu0q`&4v5$d8_fB#R9PjUOpNXZYbsEh|EN# zsDgacS^*>_UiaVx&4@Wr;Ak%Cogh!LZ^}>qlp*@%rpCbljiBPFE`#$ebwU$kGD}VHcLx}5oLYO|Z{x=8w%ahk<5~Oa2j?)P926v-nR0yM(atg$80+0 z*Vc`5wX%Cf{l_cYXkY|p`Y4x9r;QL=`Ig=i3bXK)-A2!p2lL|I@UtJr1GWhz{W9xB zWlV9%330S5B2pRBvBYLppmY{WY?ju9Dc5|t$tQNzoKFW$2K5mUIr?sLaaeDSX0jMtgGJQzu$LK*9`Dfeq>`yis zhB$y2gAzFpkl*{u3Ck9{o;koUX8x-3e5D$iRh)67Kr+Q{SK`PpoOEOJ*9~Tw7dNRe~)prtIdou3$M>M1pvuz26lZ0(8*JAcj+D*X) zo;T!ej+kH72U;$WH92V@&tm@|%F<5Vf1xIrhmlwpoBrvPlv>EGCPy+bxU|E>gg3x~ zId#UVlFxulkY0PnwTt76ft*R5OKjjo{Ud@Bo4eLmFRF#7gbo*leM`&P+=nL@NiEV= zlZ`5`lFVm2%37=BvjZTnF7*z`FVIy{Ig`}F;PjTTv^ufn)cNAHxp$jw$JX);r>)?* zra!crUlI6D3`gj^lNae&X-YfRm@l<_s@aTn>aq=nNobcE=f%>_$QQ0*ko6LC>3(0q zsb)elv-NOze4b#Rxax8w=%jssZkZ+`_0H5J-HDAJ=gd!Bj}0_-aCiINi9`b)uv1dD z^|gpD@3J&<8RVU5i0jBa(Db6Jr~w7-tz?1Zo&mWC$msiWslec;nkEu7pz{VRZj`D0 zu3Jhw?6T+bjOOld?8P%=OszmNsJT?vXFAmX5M4_Xf(vzCY#51u|I2!v?Gf{gR*3NBVXWGP{&VYpXvzk&#)%L!JYn;%;u{ zs{5>{GyiI9fgU8Z%2T<{S^NG{^Gh=|H%4COm5@o?z2uXhj>}C*M)!WCz33xGx6{A4 zu(p$*a8GGD*|=xsTI7C5T3eIY4Yg{PQ-5zu?PC57%4{f)v#6i}PmjQJYYNVO)gpJC zZn-ZhCn5#Y?5Kmy8E83*Z*Yo)*2k^CnzmQ&vUHj=mw2L5*f+lvRJWcFmdTqI_`1>L zdY51#@nja=F4v(AOFIsKCVJQLTiLcdu%nKYPlrV=*XqA{&KuN;>^K{|54sq3uBa2T zn`jJ4Dn_(4*z6ViUR5miq{`;KbMP15CL7CK8e8LUIxiG^K5Z9tRX(aU8&s=Qy|C6+ z$K% zAutpsvUP0=M^<*Pjz2MA7n<>Jwj_CZLFU>-@*>p>;~-2UZAxs$DI-n(ayR73_$5>6r;xxL6;RptlAnq<4MtcOg;MO(MrixJ?oZu*BZux-?U+za@P8T%qhQUnnH|G&ZYd9HvOe(z?S@-5Zp= z{^pfwH_zF%n=XVSeRBALc4j%o$U5Dl&#;TTgDg$gf6shpUryOYvnd&oaMY zV&>)CGg^YHq;EV8UJuSnoS#*}myHoYejcvf+g%eZdLvxwLp3h((c>^X%v3vQF@*aC z47mVM7{8X;<_`#IMMfkL18bvRnsJexa9o;JT$(JEDA+|!t|X<w&E4we_>K5C%N_408UtW%<5geg-m;MLLz2WW*EV81S?(UbnvKLVxk|vY zbPJcLL22DB*)74347;#{vyr)@kdAFhtW4o-V7z(YZhJMy{B2AJ^??vTHFKMP-!Odr z?W)1#NZO`kp=&hAav`#zR=C*ORM2&|8HeRi-P~0MRo`eg%#0{;v)S+b;_GY9uop>t zpFYW`63jH7yXzYx)rh47ZPl0{n+aa4-nRCl!tHfT;Z5C~aqh$jpWU6LE-0(MX)>W+ z$v+OLIn)jhG!t;D6Jx-5ww!ma-DyfbJGbI`*A#Wm%8GeA0M5Mv7nP=#b6LrHu-t40 z!MR5DNNlWib^mE7UaO9q-UWv$V05-9TZ*JelzMK2!?nksDY;p<^@wUXKX}ht|((Ty58lXtm@*t&1C!XBwbQ+ek!{jIZkg(tx77t6c>< zZ8$~)4lSSf@D(Ufte}0 zx%bB3BVos!JJaEOd(}FX=b_&V$vX)tikH*kU2*G&rznqfU$nT&Tp4w`bXHCmEL7Fx zIMO}mUe8KkM*&@hJFe8ySRfMo<;q84x$v(N_fmxMfCavl#5M&!K)c_-HA}iJ#dEg{ zrQ4aj4v)|gckA0VZm_DiSUgG~0j(rU;gsIMvu1&82F%XD0)g+nZc3W)k@F{F*EBb} z9XFT++$~%^#`n&v?YS))S=Q=~4Zo*-a%5i0DVFJ{nNZ)fJNo>PJng+qLTy^LseTB> z{tM#JeRxgbXCZ_7w7c)weV8R1$ldW+8?*P3zY_lF^vCNX6Wuu5UyxzawHu_9zCRj2-tk~zB zl@~2H#JP73-ViH-A@aE3&Clr;=_Z>Jg^J>@Y>ZduC-KIe_3)jSDL4AT82ydAB*DkW zK3KCL8JPdk!+`#5ouIzyyz^B1>y?Vi`P;y>OSQr%6RryJ(ptUXWa6_y~Hx= zW-Q}r6CPvemQ6#@mOK#Jzc!IdryeaX26U;Qz#q?JWHG#H1eXk^hxw^pFckBs0Kg3v z%mY-w=+pf^fDtQ~#R&z#r=`|AuwEp-w2|?^ivM8`$w3rvqd8B{eEnTN@~@QEfhl~=$19ij za>GkeU$?mK2cM`vmMk6-rkO%=roUvK-bHyI0#C)qsBCL6Bk}_>ydWYU484`#mJ(Egt z74{BEMP1s7yrAF^!Fa}69kvzbcn(QeXmAv`L=W21%HSe{xH96%QqRBBMX9I zFb%LeT|EP$)nsEEx;ZI5J)Ajoqs8=h(y^JnbE1u${Ka|?@ml(f!OBw5|QBt4pF* z%|Q6Kb1u}SZg^B`6Px|<%)t~Q(0VHVF}q1OtEW|**KneY9>sQ`6GHK@=t6>%v=imQ zE~CnVSh9-GHveJ;%7*#OwT%b&rfCcUrSToUo@`s9l(>3CV#6vkn{RYJS=g9FZFeqG z^^m{t2@tq!7@{BS=@Gua7qDl@e4?3jyXO($qK$ zDuN@&{O9CSE3VdsS*Ohu+10QSCzReCl@JVzeCno-Dlf}41ggG4vfI6|FTjjZO{ zTcg;q=utCeKl;4)>D1{ zBG8pCzpEUJ+ZCQuCSq#Abx-Iee|rK^%G1&3Y$YZz^cMhQm{6)``F8yRb;dE+Oa8{f z{CX+|(;OHO#Zztx{WseBbtICAT0IEail$pg%D?*bFl0yu-uh=5SJiZE{`?MwCi5J8! zK{|wJsjrIZ=OLN;)wPDI_Y%HJ`t$J`WPdS^Xo=^ctEcA2S(p9Qzc7W(L)*YK9)<=u zIRBq7B6B^%cHU+fLpR?-WJHhpdi#4tR`><4eeKp=kFA7b8o#BEbJ8w+#4y_V_ucZW zs6R)j<}8dCoJ^ONKre=B!~jHlH`w)$eZAyrnmpK7wycc<(a;@LHIR2T4qHk{jp5kozvaww{4XZQ$}cN_gT z`mG->AbyQ8Ae-VsAKOKeI8vFo7?9yMoS$g^)Uh6=pEPz^>l_j zg!dCwfQ7pcgFy)Y3tfY0=14&bU8EjDXB?zNz%-$=SvNA_0Oi((33WA~G719A3C5tX zKqK#JMCpAZBtr+}R_agnKgtB)?4EsuV}piF8~^Lyc2h%pdVCZvDq*_0=P;P;5fj^f zb4UDHy2=df_3HsL00vNsSw|-y%m9#dj*8oQSv%-K0PB+qry&aRzgrAb%=%)Cu_L<4 zEhaS?hDo(XXB;vEz^T}$_LAFef1qQbQ3hycY$SEA`woLz%PTnPrM3674Ojf{)6;!u zS=q2o*qfFLJgZl``hbZQ=ieBMiJjnefEaAUSM%9pI`^Dlu=<6CNRjt)0^?9+27?lZ z9fJmFc|6}x2S8XDT$}uNS_+$Lz0DtQ{th4$HD>~~+fm;O1;cl_U1BE0R`-#)J|C~J?G9%RBeB>9T7zY!ZV1LKK zN>6JV@+^J)>3(6^vC!yqcK%6eVGJHynY`M}MC!hJmq=Sm6rk4{nmFoEpGFgXH-3ijYokQk3# zQUS=~qyILI755YF z>gBjvr%-cKVjkTeGf!~3LaYL|cB-;#`1b|;-ILb<0<}1Ae2cb&y~i*7qGOD4X!c10 zyOlS!`iU9Kziyljp0h|cI@OdwJnss<#<>BtsJIbbRp$(+(#r^YeySczQ?wjD+8?R) zad(#Z{d%E2ag#mMEB7YNx5lDs()26HRJGkWRy(vKeyv-l?U z+_R1sDbnP2aKA*WY}%?YaT}e{g8}I)m#Cq?b$B2LpdVp!-Em1dyf1sJa*?pWv@h)M zYg=a6g?)WJEpk3m`=0+ zO}>Tx0`NPVHfWy$p$BosKSI9%1!$&`jNEh>Z1=V%>wpkx?1*M^TPXTv%3yZPI(`@^ z1?4ZB*#GUP*au^w|IogmN{TRI!D%pTgJSe$(71D88pOW4ve89%UmeFgR<>ynkVl9@ zfeFPjc7oY#t^K~5r!v8*0CW*35hf1KSv*iC^$gqO*Ehj*5oX=cZP#- zsb!_wx+1MP8zJ_+9IlDE#R1cw%i`AHZY%Ljw!#jHn_Hx0s|qqj7tEl(91S86=M)Vx~;6WL(0 z>l)QR0YHb8#ruA0pboiOiwCYjC;m*2xdk-Q#Fq0?W1)`)I^;XVaiX1q4!dE?rG)4nH6>x$g zfnfjgTUpfrSBfoY3($L@On_{+ZX^n*Hvp_W4K4xzv86v93K0+zgZ{d2SH*oq4ho-l z{~SEEMS_Zw((A1BU0_sAHyq<3y@TTNPF&@gR5UneR_Uuzrgg4<9k-$lO8(&7gGqkH zkiYyx#Vt8W(>oSh5E=M#fSp7J5K7o-S7F`SSA-bjC^7iT2XV&efD-X%P0fg!O;6>> zh!PUvsff>ls2u(1%SY^UBHJADDFT7f!*&}E@i0lii#4T@$TJmF&pJheL2AP!5~qNP z;s0$6D=J}ZuCZvC;lKT%V}vo_ugH>vLjs?aVlVkez3##Kb5zKlsKCO|j~qf?9IW$` ziEJ0KB*eZb0q36~8+S_}`$t7ghewA1yN1>+4cPlzy@JWP62Liw4jwu{PJ>noV4t<9 zUu66NGj~nNQzpbBoj^tbRb(66%ysIV*i^4uB6XD*fcdd+WP~^Y^T*`2dADPdl|WPk zLQG(KL0H)gr=f8OQlZ-pb)^1qloBdR#D9MTX7%L&rb&zfxa(|?y@QwJhglkm4v&@g zpM|ciNWZPwvITV%peRYR&5AP00{3-7mFDO|>#dmshCv|y5~SFF3la<<8MO)?C_1Yr zI|)6Mb`&ryUQ2z_s;9#v#Q!BfFc#q-f5gq?T7B&I$|BN<3xV?=R>6A$<^-b!QOmFI zBl8Li3*)$gajL;_RA!)y=3UVeqf(iisl?0ncGI)u5aFaBw+g(9dyyfB7_Jh#p7D>3 z{AD{#hhg#eu~0DKe_NVc4>k-kD_bKP&{qA=zZAzX@kV-ikQM8wE3xsYs&LemmYzBH zH>=DSO)h7u&2Es@R8{JNJd(#xBVP7J8{7b~8Nh<%lOaa(_vw}g^tEjuwC#mBqH@;A z2JZ?&+1{S4K}9~AX2BwsTXzD>gR)#D4 z=2*3jjW~EVz$IIH^=AQ-?V@TPJ=T3(wj&?fZVnD|yXVyzXvkZ6xaD+nCGKW#Ybd9) zJbbk29=h4cgMwm}6(<|GQe!411>ndU|9$@-jBY&*Fz7EUK^sJ=5~hh$fk`koz^`F? z>u$e_rnbv`n zTAKT>XiS>0+%JEN5qen6egAA+(NrO`@yhor9w4xo+SX5PGIXi zs{6A)6yhqe`*B)In7bF@V@KD*fLvvU`r9A)QvL-Pgv{kG{^THgME~dKuvGG21I`gX z77Co&+-)FIp1?(==<1d0-p)|{9j+i)EPtL&~xwp@_LG2I~6S>JvXJAbp9oy zs#kBwgv9f{iS~_>th`!|<*KaGuFE7{HQ&)XxYafU=mNC%u;QNu$A8um`+mK@NfL~@`TYrIF^2w`z| z=c-?U?PuJ=*6Ht_EEbu2(wJs6P_g*NGq446osT$S+&v3z`0df|@)z!5>mGT70#oHR z%de%WS3i3al{Q~QefkrjNC}cI$}+Xf#>mps#UDNq6pv{}^?|j16%0)v`!t z#U4EV;X_;^`!y+3_4UmjGA=h6hGda7Jg?^J_adx-G5VUJyw*Tjuq+o|;n*D`x_UKp z`kXREAMb}8SW6@Ae{Q_@y-;;n|D4SHeb%294v*1|vjXA;-ag$)h?Rm70P9ECnaI`Y}!^>{cu-P%G2uhqWOL-`m;apg zVqXHgamfE9#Uo@VQ9UjMgF;iNDQSf7&=PVB=LI@hmg|W+z}JMt zMEuN@mjns14c5F51Y8iOD;3Y&aYE)DrfXz-nOP`CV_l_mp4eJg;=2hAleBRKsl(&o zcL$3iy7WCj`Fi+~@Wq$2+E0Mn z!?%Tub#}qm!7v}Z1I-|A-r%@ywEsz`F5wxCP=!X&$cz{*CmSTk3*-j zit6jN!m)^tsw8B5#)UbHrkD>U2Wy<~w@4}x|Er`a@|tu~l<}SH3$I{qhju@?H~w(} zhaDa2OJ$B%5*Bj|Q?K7wG7_-MIqq#Qls3UP&J=C*!EB3P|MKe}8c%fIfH zr~Kx9yERHjroy3Q%{=qBGMZG;-Ac=@Dx(9~=GGmq#|p=8_bToQzFA)V={e-t^Tu2) z;^h2$8S6}6st~7S*{h$yE?abU%R6ua*U+SRui4Yrk43YNTD?+SnJAX&!j>6w?`$r_ zvJU3+O#qR#8GjZu_p&n__0RN*2weDMNHFPbR$b;h?UBsD7uK}f6kMcP^Xgz#Zn9SI z-gt>_I5Gj(>!qCj$wou^>gEPSl25xt{jdU&Y&1=9Ph~XN)AX_@TFPEAYh6M`JtlaR zjHD~uB{7$}hewMdkEyqF{gO>bUb0c|io*8Ge}4O;DtI1!{sTfWY)I{d1X4FQ(qr++ zN9_&}`zK&-MrrqSPY)S<#tt#$#*vPmo?S^thgeEn9PW4EAS=b+YQ-B%KJ=z4@lO5RP{0*l84fV;B z0tiIy-Duqz&xa2Lv4s}WsgYZwPWziNg%R;fLWYv;s5GKcKGC^$HeZ6~<_#}9cZi;B zHmB=dnR`eZTvg<*i~GQH)~$JC`u-j-K!~}O%xnL96b~+SRto9%15@G2*K*TXfz7#z zeF2rx;5A)|0N3v1d2kE_cE`NZAPE5zZ&>lGrg&XD%2o3nMerY%7aOL`ycXRM$pGKi zNLhY;22PV4YS4-p{9MEUHXY#Z{|*Pp@{E+SNT){TmyVZ>o*;|!c=NWhbLWe;vv^Oi z8X7fp+|*D*RCk|l^BGgs2luJSj!@{Gd_*N0bhu5Dvi#liHE_7^As9(&8Y&X6|VWfoE(ElyQY@|5iSe)GU5iHYx!s_Jz=wP^IYRs56d`CGM;+C{CdlCHj%_je#)1 z_N2(~08^S50PDqLO#Rwvc?9p(qRLa(pE=XqXi1164PzboKTcA%lkpYS30@(X#BEPYJN6B=&4{hX<3CNx4a~mm#!~7Q(KPVh%~C*53b};gOY* zZ^N4^aKu1*g;K&n+5Mbv_|ir&n+!WCAU9bv@QJ>cx~RPs$5O z$-c-#zXy9F8xo%5(&NI5;+{}pJmX3}$$yUE|Arosrw@_pNX>yoQON(t-&GbA6adH) zc|AJSsMt5@L7oJsj2T5t{I7`l1Yj9fdS3WB%~FWU8d#~jo&V{n_|-N$0-3GEF>ISt z*OUm5+-Eu3>gs`oTLK0IN=4U()A@y3`iX4YVxsBK8YAjKDu5sK_jxl>`D!RjqXqS9 zKMeiEhKa308Yx6$KJ%nc!ya>G+-7MN38hHuef9;07t4vLayK2}Y!j+`_;M%h6#U$4H0jW4fu^B(6&@x~x|ctZ;; z2nG}HUmX@Y+m5IUxsQK3sV3{i4-p2AVfw*VjlXYUe=n=yN_qzwwA@*5;GK3?*@HIP znc%}iqOO_;(f*0Aw=|pJ^=bM$4?{TPIVp(rbBo${M9x?|Qv5N3A?z*uPR}2VXj2hmC4Z;KZ^!^YG!!4G+s^nB88^zsrr!<=S;Lg zTPlJwXy(W^J&V4*H*5hnSds)9VD~jYNtdwJm8i>GrIUT$a(byv(KI49f|YOE%VD5_ zOg8FU4}tv-#tzf=8vdHvrhl+dlc@XNOGjmfmbw;{T`AO-p#(I zKdvGvS_*c?z4xv9xuQHEA#d6CGJL;(p^*WBVAa@$&7QTR?+s4pJ@*dwuCpJM&qF8P zjGNw1&%u%|>#DI4;~G1z*~{_he3Df(AL19SqHipPN*xCYR%1RaSBkaWIG^4YEgxqt zKC+(sYOmETA$uhCb^VZ$#|jx{Q1fL$JEk=iwXP&Vs$_rPj>6E;&@o7m7%w+u#(E~d zIWKsq>&F}V;?oP0E!*kf_A98ne%T;CWqv3$)Bww@(92l(_jUFhLNse#ag5{~wThRq z&8Rvf63QMgWyST!WUG}eyrd<68FXy<^^op>)yj^qV@A+Wg^0_I-Lp{;V>CBgUv`=> zB&iIW{{X5WmI{#XoGsRYnCmOAFHv&KSUu5Hh#a))RjNsXbBbES$nj{~BaaGLO!o;x zVpEk0P&Gw5rNNpLwLs*HW4&$*a}9g0#ixNsWmL4ssgq#|W&-+gy$*w<&NYb(boQ(o zo?frdvA5_e*IEe|a_(K+TS}_E{Ofrr7#XRCH!!*KV})i(H~a*}@&&R5XLI0syn$D< z*S!$~{Ea3ACv`uF_X!*J_Y{orSK4vyyq4+9EGoE5iVG>tg!ISpi^Z#7ne258wfl8~ z871jMPVPYoKZ|fOZ5!qhZ#oQ&FHKfx^_b9ctw=-|XhbekK?{#;Puo71ha{aa39)UI zcvOG>RG>tn`dX&Ck)&?kcQE(JTU;(oW7mGQV6cx(v!aB@g}1VBP&vTHZi$VvzfKK( zJSiVub7Zt^>lK_6Q*!WO%7C3CJm;WWdfW#ck$$zD{?1bCBlJv$4p|+4=R(IPO~oAw zkB+U_-Cwyv@xl#5_=-y{>0&;GJHMI_Im0bECRsA5BRc9--&ry${1S_C%+tr$BKgJ~ zs?Hfo`FSB>DOk;%a~8_-@RK3U9kk_%b(v5p(Nse9 zJ2K-4C=Hou4&cr0K1)JzUP?vqesJGU;vERXIy?Gz)kWR7Eat5|O1PxvBKVX5jW3%k zU+tHD*Z}sy_~)?37>kZ86cY{<@+-|K2ihz9q9x+`6Tf(>v5ak)z9=QApeg#om04DZ zQoR}bYFffQL4I-0`S^Q`twQb-LiH4HR=aLZIk+I<*^qA2f-y#8}|X*NIEFxiAY!t-}j2Jd;^Q zln;A7waht5lvh%vja`R1s>2-IZ%Tj6l;o42pe`{rj`_wH6)d@k{S~Q@@5xF| zgr(ue-9GILw4mi0?;|6{Qp5^samLy z0A7C%jC>bxlcy`E0YY|vDK`MVhFM8WbBy7}PGEa4W}URGtF&R78UUvWg@?Kk_$j$Uj36u)*J(Jy)AD|=#XXivtuV&`*FO4)hyX1*s}1pjWBJMo$cOP zV!3pU3&VUmW{Gq%|83WxB4Pxk9kNksV1$%nK+VE%r8NL!ayzfS!v*)5ywLMr{Od_= za$T+J+@w53{Q!~qo zvI?SqWo`qaw{D)yu{tQaS&+xrDM=y9`^?A{7*10LwKlA| zrn&=bRuSf4=Qg)x<|XOn0VMDnu~WD%*@{W`-K8qgWMA?Y$5^@T*mh5LuM0N5mn*L( z_Nj2W>4-*Sx8M)OqUX&zRtlooSj>;!EZ5iI=qH*J3a)91Y3IHxPR81wDcyhnQ`_g~ zph)h@^=t$1NXC!nj62*bYLZsg>zL#e@jPtax+3U7YHP=}zuMMq`UWQ?J`i1_v6;$l7~o$c8#mLTxhek`jw25KhWPMv z2h*d+{?C2dKn1NSEOz2Z)L)NJ_L1@7XRH(1&jB3~@W`9a560prKueDOnS7(u>!&jp ztgn*J0=+{TE}ome<*}6sb?HKSwjp7M$a(bcsH9ao`&V4AB%han+;yf_?TSb}6FT6t zx##qCo-X_J)%t7MgbFE61{}X2+_SmEP@dP;JOh9WeMZi`8b>V2x#8TJOVP6uQA|v( zF^jC{cUYNJi(*8B=j=0!Ns^BIouQF^lvwigbe4_6fv2(6`mlD7N)JVESj>0|YNeiW z9F-3?uvN*hbkrja<>Uwu%wP{VA0C%SmZxRbHI9?#`z1OzA*2nGjjKGlhkuPtsDlWTvP$az558v3 zl8=p(o_-2j_3cGTR41SQ-NTY%S9^j)CiG6t5KHHjQ?1qvX8tvBAjGb1Ii;^`Rbo5Q5yS&Rj93c=^=PV3$MZ)2+NQ*OMb#rz@n4Z+AT@~*HgUuQz8p*v*a&Hl*AKw zjOR5apMSJp+}oV#z_tE1uNIW}+T&C{#}&g_c=P3pum^AfrQ^1B8PdMH_`Y7)e2g_O z_lmi7M3C#qwYGHPgV2@d&D_<%>dBe_izrLd!&K9tS)zC}S{iVxYRvy1dvD#>_O^A8 zwxtwWthhr91&X^%Htqx|QY^SjvErn-d$AxXZUu@J52bif9CFg^GY+vqHJmwPxttAbO=G(Nk zg9r~f$?H8U$aCyC%-|03&>?^isI$CpT^zwEesnYlwY_)fqOYCV$qVs3su}V3E`C35 zMhaXaw^eqz{Vc6>XPkeL9nGj;Flb<*U&2Iay+6+yLw*p--(%i@^VKk@vdi4!bbQoY z$7R~j4^;o-gm%1OK+j4ElogQr9Uf?O)B?%X;T&jYv6tv_NRdk7U7o+lya!C#`ZT= z>SKI=f3^zFjLC$d9!9#hbEfEJ3GGI`?-~gri)*z=5sBrprJEAQ_OtmSopWeJk|){AOys${&9Ru<@9dvxY*<3MBTYXd?QNQt>RYg4 zly^<9E1vRgP>7Hz=!&bnb;m*96eF@k+2guEc*L{ZhJmd8hVwk@WAe9`%|SA}lb4!n zyNx~d4@u&l&m;Dy-q4)B>-2c-M#~#Swg;+;+yB@4IY{*^R^+1m?fOpOXuAFB>fPI< zLJ{4R8cO}l6{{ZY>~Sw4LxD~Bdhw^)wt9aL@%LY}HQ!>d&;WroSwcG)2)~Y#Yoxmg z|0egod{%($j88LaB8q7GZX*lCTFPUoNh!0ZnK)Zuz3o{*teeevQe$!jz}5T+OuCTT zr$2X17XQs{w|1a|)WA35on;QLlRlL>Ys)awbR?12V&>QV8q$33<>?pv()sp12#ybX zq+@DKtWn`fz?fd_L8>MdEL0`!I zE}C+4&LSVyXu)|0+g!U31Z=FD~_88r>OXN*LBRbZ@yWF?J z7&Dy{lOOEf5pFGa^ac=!02O_ak-1d$3}+8iqW-VCN9KhD7F}qiiIz~;fKE)*Mk$9c zGU3rH_mnXE+1r_nI+-X(9@MTw_ie_n+|$#qhIG3iwVXUP?9ES{ql zA->O@BGfdZVgbL-mb`!aM9VuPi*_^7-mP4$dp(9IEaatE$yY_T46xs(W1a-Q2&j!!FvtNi>#NNIam~Y_^DesJjq6 zN)_8GNSJGxzjiRg#Jy#79yLRf#)m7pn5@*5u3=evwF!ZiMx}ESqGZ`cV+;vXmv#FR z(2t#BL`400ZQ{ssVbPIL@3n06ziW>FDkCEWQO|$NB{~y;hlyJqoKd`h#PWPNv|Y{q zmd{!~f!#=hh`Wcn;O2eDyVVaebu}BQVvcMfSl-xIS7bqp6|nHB0*HzI`q;qmdWhOz z?ASdG!|gA~>gc$8rr8~>;kfKwqLWOi%imZ(QUg`c3n@Sixwd^#YvJKDpYg{~4W|o{ zvBb3xCiyYo_T~bST4%z#S(Xu9*wh-%i9u|teoYev(wMO8q)s_ynHvpQcV_^cAC^9NP$TciX&s`N$8)#DkRG`893`#!~ zB|(x`h4(Rze%C(p9H;F_&S1Bqm^_~Pl3T)mlJv&iiUnaHkyCXh5xvRNQ~`Gnng2JY&zur$%Cgb z<(dp;yG!Ee@YY5h=f`u!Z9X5O5FF9~?SlO5DHmD}uU~TK)#-?;~^h*YHhCGLH%ij+zzNq4w;bz9Fo9I7oBny(R#>wzJ$vU5j{A4^mZ(floQl?IQvNFvYmph3`Ht7{t<2cmB`MGAEliQAD_%@!W{jA9 z`m$`SMM%$IG*^`8yo`HQr!R6D&|j5rWw20ott{U(@!g>D=xe*V>U&(ui}(Kv3dy7Vy=#hn ztiLta!7czQDmFr3GbXbfFAHiXPwZNm2zNP_h-LGz_{oAvDU6ycd)^7SDKtJ;ig&bz z4-KV%*^p~N5;Tzj0Fq0!@w=#>G5e`{BI+?_FKrCD zl&h{=0Y4B|u|MXjyO$BK$2c79__-E?)oh4;|E1%@3^+JC2M*P&)ybwCJKQeA!rC6 z;ZW zvBy*T!=CPe)>LTrprXp`|A0D-p*6I>A*cjvE`5$&86Jciu$j}Fsw*0ddZpl4gDl~G z+%!ACc1OECODZPeccEJJX~1ysbA7w`Sy8h4>cnVZ6qW^Wfjrj}6b$>%{OsIQ}%d*N2;?T4L7H``K#&KN$ zFO_37Y3$15Y(6T<(f*?-a#wE1Z4LRPY@9WgCk~Q3XwA~&$2VK~56dRO87Z)M_8Y%r zB0uf`0z!}U=SY3K`+w*q7MhOt3z7)C2t5;oM{@?V0^bVsj^jMK5 zAS6V>(1{~DkLpA~bSCb#f)^v1qGGx0{HUTx}NESgn@}%q}OD0;Cgp-nNl@eRV&|l~Iovi(P zKbm~3MnO_?vdQufVzT|;%}m$DEdCEDoEdop(kqkrKd3ed9Y#l?El9xg_i7JmbHo~NrYpH9IS^iBX(oo;Elbmabb^TfoRl=dcrbhrNF_P(*DNb3DFg@WbJ^Z zn9{j9#nsp(c5|b%&kdkKeid=@f>iO>v(UclR2TOCOh$FaCVqS>YYJ$b| zv$z=kDTTV{Wo!}88TMN$VmAHS*l_s_YkJ#@WM@(wC%YSRY+gKjDdYgNWVw9TtBO=x z#QOD{*nAP|JAXpM7)16L(*g;j&JAB}hH|*JHnVm0%g(I~Rx(5Z&nbQmdn4=eaTY4$ zH(>B1l zeOP}WWu_mtB?xo_`GQ@%vORll3RL$*wv}D?cwY`-gMv{uCS{Sgd>8fa>;}Eq{WN~$ z&Ov|99lsyj^0(r7X8q1#xrZxUzn^r9n!mQ0*mn|@{YXSG8-prC%?@kE%yhnWF1ycU z%lylX&h_damvJdFL`w&fNs)wE&`$xs0 zVN-M-dmna;?R^y&&sv}H%}3~V%(ND+8mbUGt&yFj&hCQ=@4yvQ%#=mR#5U$3?DGyB0hF*;Vz5Me( zsYpFbUR0j&#kn-xf>RMdM>)JSUnSM&4mZqbRq`^VS2&G`L+6R^x*Ms+5N zB3{LRH>dv&M|Pg7x|W#tS6h?F48R0oRv_n}7t}!nK%N}FO(UtSXuGcc{X2ei z9-&SC2H|f9tI3__A3T43MxE1HjCPCH*)Lc4=tRL_BzlaQ&Rhh|^!K})`(eJ=^jrLm zXH*qZ?O7qj$Sc2;*LRX9Eh*Xaqmlpp4IPc^K}2OtO*6PPo!qOXLfD0UhD+Pk$axNi)8Y_YmnGPVQS2cdwEA#=Ozu-W zb9B5>djE+XbSLX`!LGe(%lhtqw7*BY0)_{2xR>=giRe2)EZseW-b_@Sq=9ZG9m3inr=GOf>9`7PFXD1lYCJ4CZ97L=H3v8h=buCe+A>zHtM$fweGGox&cXx*- zKG&zC&%v!Kb$Ou?5c30X7C2Vjy#nIBIv^%^>O~@zywmSehF4d=jS@dS`RN5 zK8^%0MuPwz)NDuRkM7~E;I&`fLaRdyLMsre*yRe~0}gm^iqIbIV>@MqSh#nHXfT@0 zr9^jS%ZT+YH+*Sb%)`zGBqG*Aos~L*vtS`+nYUNt=i#aVx^-Wz_SinKJRVX5t}XI= z<+Lwn)n9ZLjtf9LE?Sz_W(%`^XaZf^piLqX77yM}!SEsQ2Sgy{$sYI?AJBS3b$cFz z4Og4fJ85aH?c#r!#RD~M>ej}Tccqlya>L-=fRzU}$B>5`Uv8h1E3mqC4Ew_gnNN$S zqR$O`nNu59-AYcn(6Z&hQ9Jv?{Uf%AZb_@jlh*pp`0ft|Ddko(W#sphxo8f%6Da&w zelPd|X9Tk(@y5N0aJ?ai`t0tt`ulL%k=07s<1N|yK#NSO4r;1f2EVJJo8!Hk!#4J# z28m$k9K2>pz z0lVC?J?5=t0X3X5`5+f;8`J<*my!R~y1O7>1?39R_q^sAn!XsHl` z+Fon1REtB2nDAA1$}%@`sn>lqXgRVKj)hcS?K`PmGVpi2Um1d{RF|GC0BV0+K|qm5 z4+!10Blq;$A9WS=WJ~YH?n0QFDjp6Md@iP1yHmgjVV{#K(3%UfGujK)f|Ee%RSn|X zN6_ngRQUK6ES8=LdR?A$^MnHC0iMxAkyY z2X6)4KY=#wtNUD9rYu(zKM+9gc7Vt4c@Kw*TcJ&G6Q9F`$dh1@VFbJ3y`oS4jN|b| z>+R#E#`{>fN~r~WaRB@i)&89W0w1Yf>F2p&X{+L@-Jomi|#)EN|TbR zj^!~u1lrE4b;pfIxlz39qaR^+VF_0iQ^UI>Arz5yqAryfwEYThJbUe)6Yna)%24;{ zcNL}LRfZCylm(A{-Qym6dxL0}UUYTwyySD)B zO{$U6sSg}Ebib=Da5M@kdUiQ90cYdw=xiW1jB){=x z!eQqU16<`mSH}P_oejJU_mg}Z3P5`y*kp5_daj`ZuK#AXf%tx$1UyDv+$v)x1kgnK zo=qVm+^O#UuW#2dsl46=4v zDFYS74bQ!BF+Wx2C1zIY0vd2&FVgVrW+g3HiwF?9NP0JHD-JrBI|}T!6b|3vt#d z^NUo(b+)0A4h^V)<{f(d;ax{KQq=xiuvvgIUUla_!9bkJx%o9ID($6H9^*X3a77xy6cA@ zEfv7L&yG&Yw07O69&{&Id9#?=)%I3sd^W2vj?*GrOMhL?!OwAE;Xv@w9{jvc`m3)p zWy7r;Pc1M~yXK~E+37V)qiN5~%Y~{EDOMbJ*R@IMvpKnLdSTprNW`H}Po~wIB;X(*vdE&tGbL&@BKTBpUB~)V8UM}!RsNpl#kF10rhgTl`IvdI>~w}LQ_iHhUeJ#eUF%{21&}S*}iQD;GcDKhT#=;*R+GJmsjvszymh4;dTms&hFn1 z5~npR%L>Q_1>R~RKHuJDGfqSq+MOCmdK53o4{?Ww;l+bSG2}v6Ut9E4Qp-z0konDZ z_Ih`httZg149r_NX2!Wf9EacX8x>GWRea3vfmH0Et|_y1!j-w;CzM*S$Q5;$wFe4}HCG99!qL z3|RBd>8r6EHZ`O^H-uAMvmJ&$X3pyVK{#YQyn9blQvfj3zYsChw}(wRz6Xh9o?vDv zfS<(<`H^MP1A@V=cLaxCXYw_^`AB~v81wz;9w*PyB&oL8q`dNj@$F_d+Q}JsJ`}p( z!tXbljlx!+&kFM%Q-)7Q^RJe&mUkk+uxFs=(;@gzYRJl2YZD%*{#L=~zK&+%K8PJT z!wWq8B7$E}@UIGk?E|T6VU)y6ABp|kt^mh3;Np#IM+W^H_``6CrS|--694?=d?Flt zIryYG;*G(yyE~dYI*|l=P<>NVljN&<#o;$^q~yX@4}bq~^q_Naa5TcIxwwB!-z~M4 z3<<8h5uEOyZ>e@a-SC7SiWMTEf>NOSRC>u_-YG>wpNo7}f~>@Qw&(fy;lv#(gScl( zuebn-anDWBe<^hX*GGIiUg?LNw>ue8+udCL%&}x5TIlpjNZf|Rg{diFX6EGVyrlAn z5!k#oj!;cx9oN0-OSbl}IlnAyC}ba4@RjmwvEFB?Yma{RQDwZkvM?sej?lXEND9$Q z6w}I^yusXOFu|p4ok=gPol-u|$(J4TcCWD@nWv^EKfH{Vl|p~SXYc46_6_?j9X3NI?=Gs0jx$R434jNclS%~A43+oC+E@c+-KZ8AX#m3eSf_%2# zR4?d!V?>!AWN!ClbHpiCrgNB%B_Kwe1|()!QQsihXgQ9rS^gW?$ zwBLH&N}L;LtBaT$5gR29^w4h1^D>$oBJUFyn~pi|{+P=Coz}OK^f2&3m7OKj!+3HD zB~k3qRoYSC>CrKk-|jNC$h@^J}$td?-q#+K=vi#5gt2xqT>;PC~5vRA^*)A;v3)Z@jW< zmDL3!pFDpwze>HxZL^M2_(U%-=7}UBC2GhjH!7{P-0fgdB#k(Mc=nft(QSk!S^T9Q z;~;@2eWnUPt#P6(KEE4_D6tvW2XpKla+luOcwpN-jY4b+ z$Av3tHJcX4)h~ig3?jqKpE-LP?_U*m!sq)o`s-D@#;fTFpl@j&K0GFEav_B=ro))4 zwv_M=gCM?DnsGtCt^H$YRN8W$tdGnWmV1nf%sv z#=hP^A4_?k!uoJIAgelbG^x6!5J2f0Na#;*4srVMSz_ylT-1|Ju>|!22C07HKZM;u zx=kJ<^NUIJeAqm3WnAWpvYvK~63P<`8VcM=Qi(O%I2VXAm=xW(pRJXgLHv;nQ8G`p z`yWG{NtcFVByd!w1ySXnIp3lp+L`Ea_v+u+ttD3CO7^9Cut<5P2gwf8y+IFr1e*0| z9GUO*?^@-b?pcfo`MzmlC2q=q0cp_7-g1;|;t<9(&DP}#HAieic zLjU2C^NQh7tQ}yU%h!qOD0)o5abMQUn70RO3^qvOR+lT;%v;vpZ+$qHB;$~2wLGo% zIaC0eT0?k~T}v~u$Tf~XRZJYt?R0p}f7&9I!4B0p^UD6>D~juLMO&OTI21emOg_o} zS`c^7!!(N>v~iuMdB~1uJCXn2!y9b9Q7vtg%lFUA59{@jw_#z@U&A}@Av4L-!yv5y zPlv1ns|<2f^5v%ML2zhmLM0ckEZrQ{;LIvJGwX%H9)Zt8$hh2Ex&jPL_+XGo?MLsL zUxsgh@S@A9%871#qM`Yn`JY@Pq{+b7COl_&crxcl84KhRMp@ z(+0b;TsdA^|NR#>Z#y9bv`lUEYER_}jSJF}+u3eqSaQ?uqsgR8!V&A$;{@3Sp5K!z zd$hIjlT^7ip4WO^Uh;hSSWuk8z>0EbO^vHi7(~+V^~Q9e6%AX`YmFy|ooembGPg0h zIBI)%8W-fyrJMUh=TjN`Uv%E?V!@&GYED(DGsJy_{@>RRidkAkGn3pczA1dWZb|#< zWf{WNvOmc~w2=C%xs*@|0Fu}#mT<<|;Ixp%4xBU_^CV;!FnjGxrmyQ=k*@D0VH*aQYnB$;FB~b{L+T_h$J0 zJ}|f5N~lwcQDWc~4MpGwf1eoL59kL^0w~9N>7>)uX+lJ)lJf19(6gIS6|t@1xp1OGY#x_7+?#$~&&Bt9kMw3k z=Zp!CJ~{C-u-r5l3wKa%af*3FEY$%09{G2QKTUduuTLZ;*Fb^uMs=CL7w6~+K*YJ( ziEJ%q0bRQvO=vW~P>y@?NsB5znYVk6f2lwGU|VCViqcJKF3jGhUC%(*wwJ)kuf$&< z^f~MSS`cwQ@~WJatV8fJ$>#x}6z#U#IN3sRVA~Fq)pxJC8evu)s9>g48zeJ)J6V$t z+4YlMd}uC7n*D3G2V2{M{!k^aRr}YgH977|^z>eK$~T@5!0Vb{yfVYT#N_G0HeO>k z3%2d!QMUcIMN_73Sd2PeSK;rVcEO#k0;(-{^!o_Axqq+eSM=-Z!FYSHRpzTTEfce* zYM+vtQ~a&=o!4!FABx}DH?dax(YPfZ+neHKx)BP^MduLrJeVXg$|&8`7R>y)BB^-0OlEV4Op&IiYRhM9Jp6cm4UNUBn!K1=tP4s?tMyF<4M(6Z zDq@6f@%mhX)>E zVMMA+@#ju{CLK>pA={<&-jJs18CNiNIv}-g6U`;*1j~Kv+u_~JS1)(^J_i-k`-vA8 z`YieDNS)dnXVM0@CoNi0v<@GI+Vb0ve~|~RP5bF!spQ&14@vUrP2)&pZ?@mg28D); zppDKCr4(ct@_ckDj4$<`AhH>xQg9#IqHl||jQ|p`_ol@5`Dx890g$S z1CL4OK>MYJ)0q1)(nkbJjSNN^c%f<%H8kUx6A}ZeuQOtBtCDol4*~+M4ygZ7q(;vF zUSscY15Jc846K&7r9_2=cXi&MB`4*VxIx~wFF~woRTH}*6eW5kEp)#StWKD}Catl` z?5u6Jq+Dc)b(?;bXD!xTowonrM{ob6DugC&L_rZAKFw|*nahcd8)i&=l4ks=2kOXS z-*RicvoDV!%MY0r7#;qp<=N|hy3gn2^B_afs*nP7W9E%kEhVs3&+DDE4yfxmF`TN$ zi0)^#a@hLA){eEUv3Ll$}#<;WYh*Apvy<+f`nxvr_s zd&K_76kjV_($07@{f>-is}}+ipv*c^)1m8DT(F>^y!g;_^UB~#mduc_l_dL8$a-AH z{$H<W5Xw$u z!^E@j2UL-JCg7lsva^1a1{o*3id)Vzfpt8GLs9W(V=IA;b$!%sG;s1!06iAH#V>TPe(LsRUec(!WaUMezCtW9$DRak zWO+*E>EOG`RzTvwm1X5c8^w2IEEvSus$3g+@*R*dbcwT7z0oGZx0qfgSGQ4D!1>@M zs4+yS_JJj~vWcGK!<~mZQ31Z7Jqaur#i@5ca^g74^)N}Q`|zYDHWCjJ^+nT)gB1nE z>TvD%S?=$O(^@j-%zw_#3y)+NnD@12)YC$7$ zLxuvrHU6!)2^`+;S%2_md}D@I>M?M6xv)uUTaH;e(HEPf1?}X9Gg8azjHm|CV|VeF z0uD{RRs@>S_)wa{eMx{*R41+~5o)@{Zg>d3sF*wr*QX(BE3Q$W4FD4otUstl!2n&j z*Vy=2V%OoJLSNf;j?b<_wuNb#oT_wD8l)r`LQr61T~ec>5zfn~Qd*JZ=|LHht{gl55I~Z~KnBDTWDdv~ra|w>; z(LdgOT~hd*s2K%;3$U!_a>U{(&g}t zd60JDnBvHPNs2w-h3|9rD+VvGm(Qyu)|^Y_%aS{l{FeVIUdtg zUcM-%KLW4&#+BdDVpY5t@Llf9(JGqwd+6UvNYIY1OCGddU)yGhc&hiYKYCA8rNm=$ z;3m2Fu-(W$x>*=f&ckbkQJd0NlM=~eAW`yS)#4W1_(au|QuFlq+;Ul-Zb<<)HQ?UL z;ku#5pYt;(*rE4Q-_9m))^U!1#U0OAyn8dJYAKSP$vzjzKJ1Yioi9t&U-PbIb+p?r>w4-o9MI@oFCA}lI-}LPH!dGDO zSJF#N^iF}7wSiu*u^1Kd41sVgsWVF~8?71X;h9O0+u3D|W8Ll_NJ?RpQ`1^Q=!L&hw3tVa3G4#oCZ1hXt=G2w|gIHpw0Q;SktdcM)VQ)h)q zJ7u2Gz#7`z;z>ZoWOO6Cn3S_2RdcGTW&5>%fWi4pof*=)|LCl0X{l~h(xCyY?i zh*eCZZ=#ywu2a`7LN49+iAA-6`EC$c9e@%gV26M}A!S%DYQyh&BwqX5Mj5FwY0T&V zS`JK1$t8v`zSHPjWbLEcJ-c#~)aEq0=CnS6m1*Oo^zf1q8BnYuvf zlN=Hmyh@25%0LMvAS8fUYr$Kv6wt{QVT$5N%&d0O%&jH^@wHc zmh?DgTlI$i zDH`PkpVG)9(4adqp=quNE*ELW&H1U0qGpVHqZM5A?Hvsho6WAPz05-SK(Xy?hF3Cc zv16{RJ7ltK*Kt%}&xFS%cwGcS~Xg+h?ErT}HyiL5%Uo6LX`RLE|co$lH$kzIsQ z;;4!WGnaCIZjFvQZEz%kO;u@OgP`Mk-7t}oMsGe>m+bfy;ep$h5sW0(@c>RG%QM4w!%~cmSCBVmF+lAq#&tcQV(V(>IK-7m88;GtB~yIw zs&pa_7>wi_p{jY|l3U!$jU~EYyk#dzDXf^k?`QiuhE)S!x`dJSb-vu5Nxxnf{R{Q1 zNlxBeSTG%yy`A;uwh{(0|9OX)&C!}H+QTEE;HP7PN?!r$>vT+biB|CyJ#QPOWJc*| z@N(lm2t5mVTQ;WKtL2BSULeTih*8GAgv|`{cm4Kw6Nx0Eo-9ZxH|N*MhmnA z>0)iumpDHh;@B@~n?F!&_@C+U&vO}^47zo<4oiLx#NiyfmCGRiV>iL*`YU?j(XQMx zyD;OPNr4atpwV=UN610K8;*BTII~23pUODT7YO8sCRQ`Pv4qtl{G^nwI*>bChQhH0 zqO031QUwieK6bv6&eFiNCN=p(1LOjy!2;4ZZoyM3`1^SB}N zy_+pL>if>9f%Ko#{dmrsnd>L5wI(;DbBYnEU}@e9L>nS1tvMF0q!;w=_Tbu(&)`8q zly3Exu1qq;I{w8vFk%Q>ox_incp?#5vuF*0Uu1!)oIE;aFG`6c zic)eL1;N*wJ?5qP7ZOZo7jASwkpE>f6Rf0-f7VAk$?+kjC@Yr9(}yvKh8A%s$h!`q znSRImgx9$&%Yd6qpeE1XR8g&RA~)WW9P;dYu5{kx1H49~&ldijAJ8(on^rWImA5}X z%CSTZk8pV^IwdvYm{|1A5ooeKND#8GVq9c&Tu$)pP%dw2(qojbYt&x(O=YA6dM06* zdolYUzGx-IjzWsxU=7eYPcuj~+Q+Ww$U>UZ+r`iFwO+8B8Zzl>k~{g5V6M(2;+R%I zWOK^RIG3lBcoybq?k+^gBwv7QHKdncybdX8Dzq_7Qo>(D(ooAwIa>+k5HxxNcY{Bp z>b|mg^vy;zULw~{P6Ri-NnMMFJUGV^?DNpe$~F@EV3SdX7wOqf_0ftnAJmyfG`z>` z#lhDVR1(*dxa$;Tvc1JD7KV_X9KA%HJHO}TfYhHcu@;qAiJla2H96TOX_4gZvs*jSp9p82;>r0bVo`*CeUDL8 z0!<}Z+^6pPEU6@{FHWzmp)4k`yagV}JYdevT zw5F95B+=4KZ~JegO*mgs^obkmU}JaM=%8y!27DtU`?SFU?tgY;GP)Ew=qt*(y%}Q| zg+CzK;k3aJ!|bl+5R-g6*cG+(KJ8Rbt-Q*PSN(t$Uu`fzsGwk+m%9)aOo;XMtM%@( z90swruD93d;t+9Q+@nUR;9*`iim?tkaRU11YOcy*9Rhq$a>G6dg#-eGiQIW!O7g0IV;ob@d)^`981A_jX1*J4zL%Cd z+A|taaB2@Ncc{VLm=a&F&`^Dq*&e_d^P-c74fE_X$5-zVlR18prZ#_p{F^ndNn#*k zzCD}@PUQ7cXO-=;1x>BGSuXWR@yh)1`FqlzpWKWw=kNxO4=VGH6pYScQZh$Clkvr; zG6%pnzK$ZasO?uB;Y+@lDBq<=-CYDkr=BI&Pp1^kP(1}YbyiH{#_H=)bmHPQ<|;x__7%30uR#_k3-|~UlTF=>8 zZR{L*#V>`zT(f-H%Zp!t&b(;KgeKM=G!=LcEK7F-|$9P`l*CdMiVTmKC`7#AA!)#Yb>K7M>dOI1)?o^SOl#0RZ~O4hKhfmN=f7 zpADY#(kQ>kIBI>!qV|Jl;~^d)bPc1~1TRg8^9l`~RVkUxSam>HxZi(d$MAf}^#$Zb zLdhCO$5kbZcI+|>6bmK@tbv5CeT-PVj2+Ug`sAlO z8V+#x)`4Jgj)tK}?|k&I2DU1sv>}e4%6;~dgK~0m#rN09Oq5qhqnjZk9ffoQw|b*I z-w0WAbjO6UMeQ|&hKqrgEHb*QsViab{=(wsi&8^aZ>;6RiUedw99>uf%}SF*d!y#3 zqSes6=ga+LgpHGgLZJ&Yf1DxDU(~ zYtg+S@V-N6@^~%x7App$pqXOr;B9&2XEXKW;(SN|Ns791Oy5v`Y8;oc0%!RCF9yAO zdGUhgmJyPC;X*tAffxH(K_C;HEKuMq_ z1l};hBKJMyw7{)~`dZ%KRUV_w=EEDdHPhq1SO#aW7YjuBBaWKXGKVBmo3IrnA+ZQ+ z;%@E-89wGkcBA$?yzgR=pNBvT=jF2oit9Uxm^&v%hWq-AM6h9RFQANIjah)LAokvv zF;ooHe?LT2wYSz_(vbG`!HB7Ek9S<$AE4PFG^4neQd3zg)0i>s+)d>18@@XY zAKvch$;4N)2Ucm|r}=u~MLG~v->Q(7E3gm;R}+@^H?myLb4Jf1L@sp}Q}R$-!qP?pKg3ThCz#zv;GOh`w|m<0V4FVO zQJXH5UFHzN+=@~a=O~{MuQx|SzwNB|DgDYGv({3neAVF-MlR!JkZY;ME2G+DB1c$;kJYh_YWqm(#a%TWkRLMBo6)|)I#K|2b1V0NL9n=wJ5 zxbpC})GPFM{eAM%MO>Zpunwi~8H(8LZ-oc~k6+C`g^x-oD994W$Dm-@*-1mR#<{D% zl>1;q32ImC-@T4$7(A`##p})!2#N0CT#%<43m8<-D>j?eGZ4}!Eah}-`bfBwI;y%} z8DEAqV30YS=y`tD@v0|9g$v3lo*Ln!dx_rVVDwU@{fQGP;V188fFZswdg{LNhXw}R z>gF(mW#v^9YISYIh^^bBJAO27S3MHi@3LH8jx1}~9f?kF)1qht+MQE7;JNZqf&@dU z?I(HEQRW@~RwsI~Mn7UyukBvs+%QqErg-29&INyQ7WRqw`T=U?->zfI5<)uSX2gIw zhS_lhLpd-N9v1rfMLh>OvS(KnGp6nf;VXheHN^zd6L1xM4L|Xo+vb-B2<iulWv>{#U^>p3>TvyrP;p^Rj$8Ti2Qv5D22 zLa4LXo-;kM5^a+Q>fDiE@%<)7xgDTwp>@ghrun@YalhqaF%W*==e@5d@_?xEa-hmG znV9MJ`RI_#R2IQYG4d{_RNJbvIkK*GVCi$XEXAU8^wPu5i&6`#GFgO5P|+vQLSw2= zyv2PKFNqhJeN6;nroCWuX%E_KxTqPmW zr4e4085aC^FC@RaUQea07Dy=;SO>-UTxzmb#Qu)JMOozqB1kZdrCPgIJ@E?Aco*mXOD;wuWvpv4#{_+mnwG2pJOW}PwkW$e(Ms{18VEKP> z^%hKRuwArhOG_zIT#FTVcQ5Wz+$Bf~#a%;jmjJ~H#a)6s#ogVDI}{5JH}7|5?wNc4 zK_-)|XYXa3Z8O86EYHBPU9@G|Hx|C2iki$zos^|JE6*{i8IaQgrs+;Q*@*N}51vPO&F!CfrC zYp~|GA(d~U#0`7_0Yc`7jVzH6VoEuEJf&F)|3E9xZLO1t87U9u86%Ff{a}!XkG|~E z6!V|Z4`kwITQ{I*viq0Q?z|SN%s9RZI;U+jib?CBZkwz z8kyf)(LSm8!h|%^Af-2HloKr|NtIa$8W-()I!A;1O?qkH2KqBw(35A~YH}fEw!0RJ z?^K08-}6s$4~Fb@+1H`m{cve*irm!E~CRxv=7T)}Qo8L|b3A!G?j|lY3V|B+=}#O)=KLciG8~TN8=`_XG@c z$xd5r>cuR(2jm1UcNkFu{~i%fUOZV>$R{jjdaTR6JENuf=s32Wc@rn1X^tlyWljjc zE)R3(?$K;(kc5q;er&nM$nk6Zsz30^fUU+hZD1S3=e=nR*zlQRokt3?fU;Nz=%S(b z3s7Nq2ENZNx}~AJyJ)h-KH`8@TjrKdUi{Y5=AAn#&jDbeEvlUDPcn2~As`@%3k)RfPU-n;T69fgne_1;r2C+g7d08C9 zTj6G^2FrBI(~R(0>}~R_3xVaa?YIGNZl>jk)E>m1+uf-! zpk3tCiEW7kkY*b1xDorf8vD2oKis^oGK61cOfbrKa9f*Xzc8y8N*{a3ieTaFe#@*j}y zo65X*{|5i{H2#VEjZIDo^k$-D@rvLpDf2t1@5+A32qJ7_N!@JiCamX6;CRd<@~4k2 z&qG$?PnH-j!KMAk8Aq4kL>T}b*dlb{PM(rACv;+uG()25br1;_HFzh#_E&Pt*~95X z084NU|J9kvm)78K`WoWng^X^@S@0p|JHgxIuSIp}g!zPOwTeYD`4Lw-c)iWn`+G=E z9llr_V*4eb=P!wO3Wf$hhO0go(=4Ix>fx215qwhZ_!T!iNi)2TtNbM9v~j7aSghqx z@ZVmG<-a*NiYI@*)m7ge<@y3S`*FAt4ucTY!F&YU`q3MEg&3g0=*73N(Y zpm%35ca(mecnxTB-mjS`{@;hB41F4VnuVvJunB4XIF<4*R6rI2rkUx|Np0&Jx2*f^ zC?X*{3_gDot~I2U!bIo}ipkTL-K4upN2JCVm{$F%XKmZd{UBw3()re&Nngx`$%dKR zkSi~Uet2($jlbb{c-xpq-;0{qx%^Tk=WxT&>BBodzEP{4>%^nsI}?8P_SEQmpeOb1 z#;Fmm<(OqkOTfylM?*YVAjPfm^~>~QxZ;m}&QiE+&|k#}Lfp@0Q&CT@O;6nN&HdPo z!*{>6QuOrPZnP6)GQdK>@6lzgUKqY5YY}de7a*-%ky7mR_Fh3eU-mzA{1!H#5j1 zD(hUYCQ`0Wq{A&~w5GA~G8Lry7`@(gpz|#|+zuS4~;UJ#XyB!HlF|0dRIvX@g}m7lCwWQY2Tx?@PdSkbFM50^k~K zAw#BMvC}0bL!w~1*x~u%G!pAdcGAV19(m+lxbcDFKQsql1reulrylQUE~8NX>6}*5 zitUr{_{@mOh;AnqiX7(a3F;M5;FkK>UNvznYxP{WQzg<`*4cXQLP!kq&52TpgM*Bp ztjm$W#}h<%^#pmnLVG_e+QU*gUdz?)U_o`)#dM8<^+0dd@goLfF86YXJh#q z@910EgVPpdtN)D?YOjv~o`VbHiV9AieHkD9#j+&*Q~RdG#fw;-EVtTU{@aJPa)sQ;HB-lB+0 z1zHPlw6nx+C>gO-O4p0)(YE3MMAY?UGsX;qP4MPA*M*!-3F!g8EMg2?hIWcTsWF;S|z>#RL|7hDlb?;4LQs@>iu0vJZtqwu26 zi_x2nJyRxkrc4AH;H0|w3F8sSavb)u*Px>Tc3>T7tBX;_W!~{M>><5~Lx5KrOaYeI zYU?Q!teL#XCnxBEi9ifvmddACc>%@A$Kb7eQvp2 z^LZy6L;$*mxzKrNGB*bFH@Fe;#zyZDBdVTU}FNWqNf4(m=sV*zaXJ zy@qrQdb{`uz1!O2<9OctJT1wvYqgWmP}3!`@JbN`_VB9$4w6&g)bt?_n@OYZ2kC&uzD%GOXwX@hfvZtBuYXL{>W%5s1iTEDr`uK7NVd%D3GND zJCv$X8AxZS$;>c~Bg0?))q=#+zwvzcVIW}aT=KBQl zS8=O`j2y@Kx+zvLGcG=)#(ZZRmSO+B%_ODoc1Sbs^!ep+fN~nFDX&A>G4rpzN%UZ= zHC@;Xu_gH%MZrR6z`51|wiyZ!tcMBnG3Vjs{)pJCQ`0E+nGh28gcN_IXR~pHX(L8| zD+U)(EH8S*np_+|g)H?&4?{*0qFTrYpHEZHYZ@FsAB)Co%rWNVJ~Y?s`aD?v(j;jK&dFfv2?BXOpHPY$} zYRhgLy)pc?O@0E97^k)WK4*7s2kCF(IgfDLAbeez5WDoq zs?C(t(l3fEhWWfcTszB*i&Wu%=Ulh+8n+o|*pEB9S9231uQ%+F7vIZ^CX)SuGGt;t z_#@x;BA;c(W_uunRe*MCunrM*zQ+5etYA4x0xp&Am$o0h$bhPjJLEF{M@{@QEFQA& zS(Z4xuboMnw}$I=ot|F}3bhEnvq`Yj*U_X%{K|k;;krg}*+CS!Z=74EwXO{FBNT>E z?;g@Z{pbGJHl&Yyx!b>K zT|Y0s-~Oh2C|HSzuUAn$<`DKH0;^+&898fPZWP-CTiX-dGM-%*6L7u4$~Xxh7i#zt zHjw~?PV}+h8q~$j_cCPQZG?-Z@OMxE)CGT&>N4@Fi%sVVVoi*2VqoS zWXP84$*SqJb_kb+3->mRF2ScC#3K~?v`#-SOgIsWL05g6=mfQj2`&f2}W(?VbtKc8uoBp~TNOSlBDWtq3dt%z7R?pE#n5!x#_vy)z zHj6}Rz6+Fpu4F#BM|Q0@6H|vb1ou-Ett9Zbu*Kv&UebG)M{B~Yv+K>SH!%Bu#!#Rw zeeRyLqd;51AFV#^0tkk`%|PfC2IW^?Q<&w@na!;IOFN0adN0c?yKFkUlI||AT^C-R z56z|rVkquf65fz7BW`)R)Zh=qv9LU!T9Y42?NjqHKX{vo%_8v?{L=X%DnN0!7y6fF z2))Z(;v?&JU^Ow;PfLF>c+Mk_d|$lG6sZ56f7&pAu6sizdS#MvGF^MC$)40E#JCvI z+YG-ByC=D8Z_9;C|1NEMwv0)6bq8uI@y`wwO#r%Qn z$|T1;dLA`Wi>pg-T?*=+-8gxTfu5=!&vw)xbI}xq32#tjzDQbt;8o zA@wRUJHQ{JNcke^d^fb&d~I!`s2 zCluuEW}M_*$umoZIf6CTmu zq@uXBVAz(&2HEj9(tB+nfutRnb{wdMxi4?@)X3WVG*<#P)`BRMBIha0aw?_p`)HI7 z*T0IF+6Bpkx9gXRY|#BGPVs|4n$A9UJjkpYXr-9H#<6uKusr_&Rc=oB-!r}5?RS>? zRiZ388{KqA?pMshxsSL=hx?ONZ)YwZ7IG;~cky)T`&=#Dz3+_ib4-aVy=8`qo zG8(<5YmVB^%d5TsdqfH-@1NnT;NV)nP-#`&mhvUzf{sM!34Wm&AfjdK;j@HtYeKns z#;=KCj4Q7g;qYQ@I2*Bi=hpW+cNKSX*$nPJ!R}AC+c}_e^B=Y8yj69du+oGj?eAwH z+$Vg9`CSH=ldc$AeOVcp%%gD=Y7|RsVofaJNSnl!4lH=y3SRc`-_>2hY?JW8#{U(l zlUqD%%`^INcEgHj^4vP!2yX*)e!Lrk1x=j z!ZiSIN|8iP_fTf~Gln{!_l!$zF@MysXXjb~+A4APZ@(Ng0m5;$p|=9jrgu5?p`_uO zP_ak~AIif{s-$&;KzSqbuUpPy0Q`GQ-WAkNXOC4B!)oI5uM0Wu3ViLn(0T4pa7;{L zxH(ZSF=sG1DmFMyCDxc&PW|9c%kBINdo0=tgkSfuL5giPbRIlk7gJwgl$j+bi)pTS zOi!#ItUJT}CtYSencGoksP;o80>hLrbKgE7D`373tu{wF=^bf{03c~EzfHKyHJr<; z$FTbIX5=X$UnZxB;~1>Bbx^~r!LS#N9Gl>A$|nX_7|Vhroa$T(+j3tc&8wa7%Kiqp z*&3t^av&@t&#&FQ(B<{d>}yb^t+}f8buwbHm>{REJ;$Iqd6i^%e4$1(AbNb!9X94z zeSCC=J#_?%WUHDj0P#+qngUPBQ)+jeh%!W(E!U8w%bPL=0u6{pTX@qGT!MxK=2O5z z8VBvH>G?Ef!O9qaI!BQvB-n&eE!IW;vB0Q*YYx+~OpZAhKxg0fi>r~X(z9~$)+EC#4-x2k7u00o$3uB z?zB(j_|<1-WK8Q5fD0d@(N=*-A{|*Z+Drb!*{`%DCw&KNB7_|*Aru*jM0GR)T?Si3 zE%3S~p`Z0Yg@CQUhJJ0S=crw&|t^7NYxipuPF!v>%=Q1Pw&6H%D#R~ESDtWg8LL zrhUx%h>0d0&)ZD!HUqh*3Js) zhA#$KRh6>H3sY<(@r+6Mu(d0rYsHtSnJ?Z6@M=kl0qlwI7xedIZm74&l>Sm zI|LAphydaY3X)3pAJX?j&}g15Fni+exlW4Nf?_?n{{E)(gS9YSFK@~mSpgFkqFnkJ z4RTZTF`<)5lg<6_$GN-xz3JMR&>Y$9<3{q``3dRZP|A#9R-8MiX<(v)3#HiT1 zQN0Y$MvT#dsg#4sAqvWtbf&u-#?2yA2glChKOJ=mjN3HU?cyP@#|eyY>0&{-m(4p` zUaSf3ZIljLg$_0JwfcQpOzU#RMkW)*fkvwu9cq31k9(ZXQ+QCZ*FTk)lvbJAQSXTA zB`V<7q1V6nvN#);1WTtvE^i-SC~3={*LR!EW)qGD4fYSQZPfq7;9mU*JIU>+Y)efD zaz^YNk6(~m3QRk9Q?D#+N-?K#R z(U@vhvtkB<%JWO9?16;ySNTH=%GQT=6aX2^QUTZmW%(7kiglE+L^S)ix1bFKwf@LU zEk`5M+VjELRiQrRU%4QF)FDtsj~2`NaPQn9ypmfAu_#hAQS+D0M8~~;m#?pRxpM{} zaRV)EYmYyrzT4c!z!&I2?HPa!R;-9HI{Uh1Ly$1vGVPa$B+}qL`Jv1+q4Sf`Sza6(f>^l#s@A7|L`wKIgdR=ZG4#ltrCZ=JgLS82=Ye9vePIg00JK@ z-WAeUXM$A}66jdG(X}ndfpv&+w16^#vh|L&f*-+>f~jOP3X*oPFP_q?e#@1HbY7&lOJGo$403t<^F zTR3-FdJZ$RY}&rXS$YYxZFm6%enod#eDJDF70b5ER>8)Fo^B5XV#7HGtE)V~JcvEw z*R8YOr3&qpeT>=iTn=v5q2zp*Md*2C&cp+I3rpe}rC)g3G0i_*&v0wD7M7dBeMa(! z$}8YxvZ>5Ttm#R|d*`^@+`AJ~D?_55Kw>>+f4*!UkG_X|rN{4rt)gi|>k~U8xK>Vo zZ@HFUABj#LZA+*9+!lbACwKX&732gp7vIQ^MKcG7Tho|Zg&>HHGilE-NALhe66ACu zKd~K-_(7vVYlHmpN_#ofoap@#Z3FRc)ajYqSW3cJ;_D^sDDM-;L%+F-W)1}=M492D zMvjN5!=sfa=Hk&}xPyah9car(l5ulqy)2Ft-y`3hA-Zqf)f60?{L$j^$}hRk$DW99wk!X;2HlU!me5R-#U;-nmt68Ik5 za4*gu%tnUO?nH7NaihjJ`k^^|dm^H8i=UI29y;J$EHY1ZS}os*w`j`M{rLw+9f=K1 zMj?1#++n7+R)J%aI_ey%0tAPP|{nozgT<^q|P}LZ|SDteWWcbkXa*G zzHL2~S0ZTyv2BEbo%}VYixWBrjVp*CaPKoV`^+1$fZIBlr;2r$Pl;(w7n8;RVGFAM zE}@pBCxtle;V2Qw%@j6`b|RlZfFDb<;+XBs*JQjT@z+Ji4oMYIXY{f&dW$b5J}9TCVEGHk>&+WB=yc3V zH7rj>kN7H?@3tO&x>Hx$w;8AL-`spR7o}k_Soyoue)m6+9yF;@E5lmY!=5$AQ&8P& zWeSf;?Go5UB?1nieeTMQn@xP(o1_^4p|M51`sNgtwH8&*Tic|;PE1e)Ta}OE^)9=5 z5{gjfEK*smVVp_3+}0wpda^NYQ3{d2L%+`a)9l$W8NwkwNYT$KQQ$Gt1e^IK*ZZtAz)fJb5f27eUOn-&|OVqv3k?%1?|! z3G+h8neyugO2byb76&|A4_db(!ZTOgU)CI~2hf3Eb!+z5OUKe1BBE%Rc((yQ2!u^S z;Q_6IMAclWpp~G5dWN55$4_$F>mLuK?@0I%7&DzlJpNckf1;HvXIHBItsF>HK{@&z z-&UdaD_1&F#uVjZrZ#-pi-F3Bb$!l*G^E06dz`~X-1A7B2)Bc;Jb{^9Sx&=_{h&Ik zhP=#blA~~We>zW)Hxg52Mk33)^kZ!e^6Taw~Yfi$N4SBysr%wrCnv)SQo}_ROG00;aQD^SPk6 z<&Tc+#G;>v3sq%mc4ww+A8pHrVi>WeFI`J*lU4^h(Y&m4x& z7nofi=Rw@*Zsak*XIVX4&!nFI0WU!6zyrpW-Yt9f9QjW5)3Xx(8c|;+v1F<<+?5Pe zPjjS?{nkoFSVI&3Li}{tt|BTnA{+h4=~>H0)quqAt@>pfBRJlVI`SzISxGcQp|9@+ zJ`o3oG;-=**pnTf5!4)-F|3gNsy$9DIk~rOp`1d0GKOEvJIByieD-2!6=lL-(HcyS zyIQ*Tq&y~9R%>ahgx_GcJdd;56rxJ?@FC=?!T>zXy-hMX=-F|x26Wz-PM6@KsM16_ zfx}#&xF+ER*XrE6DO4p*pND3DfR$1V<1l+ZTCIKrwwNB(I}WodHseclEn(7j zmMW&AjE~pkbt%c?&wmb38=TbGau8cfE_5{z-r!tAdzz|(=zZu;ERHMD_zEm_bejpB zxcubw4pb)!0l1ESJ4p6YzW!LBJ7V zlH_fKis!E2X4ZcAIkh;LN;L80V!UyHY5{%0R)}OEqAdp6=iT&aPqLhYlo^Ed&Xyfd z7Y*8H70jZN*Vjhm?zy)ZA-s&EcJNg#;vGdFdhjm+Y*vuuriNj7=9$E-D{!m#oY5-+ zNkrRwT6MVYv?Krg3b@+!g)=TJ!G$xS4PH5(-S^E?^jrKWb%1S7vJY!OCYO^%RxDsN z!c>Oj*8Cp_c-#y)jlKwO*$7Qq!>6|8qXM-GTo@o`j71nYsEq1!P1CO^mwVjbrwdgjUhaxJJ}26tw8h@Tk(vNr68|7s|V$)R9Lu zHMC>3Ovh_xoht{YOVP|h&X13TQ({_fW28`c>;ejpRjKBWELXyyCfHiK zMo|ONyvC~bD0xrIwRArsCe2w%E5a`b<+O9WLUZ3}ODD@&+N5jPrfK5m=ht2wUIv>>U;@x-4V8u4RH$^k-ciq7jwG?>6X`i`tb4H@^?JK2 zGkkNq8qSc|d{9=Z_z!}AM4EKn??)XAF=%DKtD#HgFq@KoDfhi1Y8~$L?(?Gkv214P z_u5h)6KLz^jl(pk=Mhyj+*azVlqMnxZ&YgDIp@<8(sJo2HMM=QQF-OJkF)SuS}FJGNOv_kaj=dIZi zt!-=~S;TqXY9ku)U9m|lTq68Z!}%3_bbxI=;i||LLbvSuErgDzn6c70Lhh9=kI4v3 zAP@JehaUlyHSJ3k>x%^Q)Ah@j@VLJX)kW@kPS^%*l)L9{t;lQsjEOL|7hhHQv9++6 z&@>JSt_L)A#r7&UP}?@pnnmF`)gZJ>e_@<~8jQ*<+Q?74f16sjl8qx}y5u>K9k59I&LF@>BhL5HAV^pVkxrrK~T&>;sN zxxO_7W^$53iMM~weMe%JT{Qo77GD3yJv#4hoG4D1;E7HMhj%RPK##R|!h_1IL})G9 zXVUK%oydas)Q_x`(v?+rF`2VZrlT)kYT`oG90jANqd1g0!GE4STA({?WaSUP`Hn?T zCPHQ3EnZ(>^7;!$vt&n`KJLRCHUMR&Bm0N854QZrqQs-=6A#d*LeF$P)V(P8j zKQqoN@*9iIo^@s~_IOK-u-eNWj&cw`q#mR8J&x| zLgPdEsY6faIN1a*`d}z!XWs1thgCNDHpU&-rraBxxDV=%jKNzjn@J|ocq7zY$ETvB zojj>D=h0TXK56BTzke!Y(AD1bsJnyIyCA9sY8k^F8>*J)=S+*wX}H#jOBY@gE23jK zD`{i>akaX)o^69ga@$?>$s6QD(>_$X*Jo<@D0}Ol2QxF?r8>1^%v25Tj}~z5+IVm% z0ha6E7n;PG_pd-lTZqP1SEfdN=pwy5DEm6Jt|~KQO)E`dr*a$){2G-ZYTkNDeQ9rM zRPQ)oKa=FnuGjxcOsmUw1D7`W=Fk@q3`ppf0V&pDlF1?}I3$kQ2hk2-rW}p6@U&Bv zVF^|53YgWO4`QLV=%$OXuQDfSfV=Upp|&@`fB60n6<1Vtzu4B2y}{LAJCJoq2Dn6e zFScb4aKZDg1QJEfm;yZ^#cAV|Mg)L9Co-XdkW?=f0En);P!N;5kL(p71O*8RORXD9 zuNyj3^EO$mM4GRlGyFw}B>l+HNC#*U;ov%r$gou1l0ZtJL46;UC8xKe#3z2lN#I@S znz!*m^@IHdU{S?ECZZ;V<*o*fzdno!fwdgCkhr1)3Cct^YK?q+8^pi};P#*(USCt9 zS+wxp!0#TiYmvWFaenjE=e$(zJ_M9}9+UtLx^BKmmwxzjZ7y|JIv!kgBC!}b9?Zf; zX(3N1LEuJhk@L0-RS2_5E&C_p>A&nEO4A0J;`Rt7oc)%C+z02XuZoyj`}--@lxqaP zf2{N6$fRpENS{pDXrk~$%!8_M4F{nbm`^$SblCyd~LCdSk8 zjB-yaa$nPYM6qFC(iU=76kMLqh_U*T%*Nv|I2%h2xxDok_7nFgA_ysuAGbUn*tpyw zr;NL_^SXsS*{;!c67b3S&w+~w{9d-=SIT`h6s*5oy};{EJ>=|W`>W_*CbIoCJFn7L za*kJjA9FFxCcA7!*!Uj^n$YhTe0Jb}xd~~LQ4qKyzmN#*%mx^`e!eG|m4)Ei;HZ@J z|GI-SBYN7eUmA@6>oB)nP9e)wDGCz< z)F!;b2B93KrFSW+xTd^`L+46L-7hO)*!THaK(*{bN?i-m{?RB);p8FS9 zGdju?cN(i^0Vk%4aFZX(tDl*+=Jl~!=Q9u7Xr(IoXnp;F5(~tP!R0u3R^kWx-@4Yn zD~-x}*t&ax{w1g#eXO1Y0t^@xw+v3SzgN>HiN>YIobX1P zr|p`V+UJ|5JhFt|HF~mphS5oScyc&bVdTLcIbpxaiNc-hQawd2vh@i?6X^E)ykW zo9Bc4x*e$5TBlE5CmrAN_`tq@3Q=l>CqM7Mqo{^)9=c*82Fe#9&XaTaI(9AwKYI%*D2rP{t02WOF|K#yyk-ThhbG?o0d zdmB_ydttlPhl$Md{=%_5ak8}!J>YWqiCw4E!DCGP4dU|ZgLZKgROuJ|)kC)VCE3Z0 z=0+dRTNRJJHF>iaA$ev`I!K7ew%!wnS@FE@W3C=;Gx=CTD|q-i!3Vtc_>se(u+|}r z^I3*n<2_BvyHr=>cdl z%Hj2pS-dS>yj{`niAYrL-W>W#_ji&n7GxK6$8tY8Nqi<#zHMD?w(eXFwjFf#C3X;t z$Q$g6WAK!Wto5l?tACk3X&#O-4qPvrD|ex?t7lqE5ro_H^+<|hrN-R9NMH(T>^P*0 zP|Te6I$6u+5B?Y>LE741O01iNoQ<6H*1)dyw!c&ty?#Inc}q-!sDie7F#2bV6>aT| zwV^BVTEsY7Xh(K$^Nn+CSiBEsru=>`gCP09&J}W`QiiW+ngmCtD{Dj>*M&t^q}6JK z<8&W~=s`S^AG%;-%RqW>R-u#~52b}9Qs8|3fWM`|Q?lanW}}o6+Z3H+$=iS^!H?I- z`nF%=2{H8P`w1Edwm&a31T%B*+y`Bl(9HDSpVH-ICF2W9f71e?=V(j1wj&r>x1sY) zmo~7>dBIRzS@N)~9VlmCitW>2xxi8_*+w;=7XX6Ss|M5TOJkn5Vr4ds1&s@XEYBxp zDPJECA%PGd&)*QZj07)LyPFVu)pTIcA!bAIig}8|wAm0bUHZ+PKp^tfD>FSBe)qTj zl{=^K@T>&O=GTO(Mx#oigG*Ay3}Z-ovQnZ$+bqpAb)-_FCXRtBjXr`>h<@}THe6`C zvMC1lyOdU5ol}xelrYS6s9o$J`c@EUf9dN9f4okSjZMGaQBvh5yVH0#6G6+EBaH0CuNk>=wKFiG zrCd2oLRfXB?_QXeH(T3287p4@Gn_`5DDd`Vv!i01KSkDE2TyACOI2~vnFX<|@d|+( z#VERe1h^+&jwECvsxvDo;Jx zJHBpK8OuW*(#z^dqY_OQIia51I^|aY$D-yb|2(Fkh89lWMxi{I7bh&9KyRQe&2cph zdBAs2U{i6O$RA|_NzIudHIgZ3w25qJRfvNbBo?rHo1phB&Uh@UWWK#EYI|?-#z2e+ z{@?ykdo$Bzd+&;wfk-Sy`e#|yyY^`NjM2WeH;qeT&ZC?szXTa%ze8$hY zSxDU6=*!T8BeQhMU)VkqBXE9e+4UHt>+HZka$Zz>`9iOatQ~oQcN+@5Zhy<{%@^a< zsF8Z(j>c6Paa|R{zN--bCH`g1u3Z;u7&+*}HZMXr8X9(J`@|kJ z7U#TpNuOO2M+bf09y)o-PtD54V{)Kddywx~OeUH46Nd1R7t}sYsU-`O4CZ1y*8T-K z>@i)Ny|jx`R&jRv>D?KN;kPfC>?or@M)=LkmbW$%79y+bd3Oqp^okmSOio4~oyvVN zM}{!~y7%n*(XtFZc;r$whSk>{gSTsr_-zEks~(bRUR>eU4oK?mOL0RfzC&Wi%Hu?A z{babi%?lv`2BdvZD^*qTxEu6q-R-YcF|qj-ZkoX>KUv1nmi8nbKzz_>V!eU(L9CrD z7?yQ}vlt8iuI#uxIcJu1O;@v<;df5uz4lrKrt+SqK+d zNd{CN-W=+nP7Pl=5Y9Wk=4pq#D1KPxLGygXtX6D(x#RB@g@|^E(l2%uB7VI5;a;S0 zZxVz;=!A%ouwe!Q_8}#Nj)>B zG0mMKU@J;K(1HV!`?3D17FkIcM|N*WPKF%AY>W21;6wD^j~7dR4==unNT2pndNdqLol;WZ9j`lq>TfLG`#2+sfR|OvN7_a(;t3WqDx=JyJ=q8Z0$rbmkt_hDT3)N(@)$&1l zzb)1fI=AcDA-;u^&|kJaf2D zjLH+CZ_fa)RFMp1D0W!S*2)`>Y5!m}@*^HwnV2P~P_BLfY;B_W0+WDfZZ@Y*Gt3$X z>rTXp`}nn#K*Wq@4bMuFY<>ZhPh{&geF`;7{X!I-xP%KEdVOS=5cV4AJRu6j(yYlw zqGRF4)-tmwINd286t2?1g58LXX?IVS*&aV})XaUJ9&39I5-XlAGk2WKJ?;TL&GI#Z zx;_u^Tk#P=oaW)^JGXh)l^wabG1lv)y{@06qG-#9Y-;lLpbS4bgMIN^9(!tLH%c1_ zX%1Hfa;ANVNeInbJni?&^^_K&JXp1M5or#x-IPczRgO9PHhd2iw-%FDY@w(B%zMfF zWlmtgowGMGCzrrMUX$I5(07e@lm2=NC z=t~co?Nc#9hS<^Ayy^q=MuTDQL-qYO$o458<5={t?6_Le%OJA>nh85)W_jX)H%A@ zQoC9C)7P=*uJWdh?A=>$_}XiM-%dQ>yJU3fPJd+|_AyZJpUOwn4P?uqM>D{Z9Mn+w zhuFz@Xqey9lPBSd*k`IizLd zqa7*3(LM}HK4RdKbL4ep(+8g+C4WBYoqA%IGnmE8nlZDtiDM&%;(NvO8?0kh^ZRE= zx$k0-7Dg*-0{kYfa2Yv@Xtd>6*tc>JE=xa^E{*O5d$46--z|b3ed;afjm!E4@WD>3 zaQ(XABpstq$FkWg-lIS3B+))vXsi1mXdG>2dk|heinet6`(sHCfFDqWLv<^_u@{fC z9mk*Owwe}EWXQn3uQKJO8p-CR;jEX>5F3yiC92f&SNY6Z6^x~ zs*SmW_es&wR_xef9ek#5SEB5o=A+{9l)OPdSq;u{gXJm(61b3xC@gDG_(%eV?9`SS zKTyqsNDzH~zf)^aI?7K3j73C7TFejOXJsmVo zWRVG-$}a+BdcCO-bPl(hr7%*w#-Q0txGN>f)eH`SZfPXEeI2<1u?R)hERHz)c(+8} zp5wDUKo#Prz4|VH&<}*Zdy4O1{bH8+k398CGe@91&VZ|HAE%D8fXaK&hmNvHdoOL5 zj?bE4bvE&Zn;Mk4nR%WG)JdqQ5w>#1{?)MEWWZog%7iiV?0>XheaKQ-K0LGyDuz>O z|8I5O(SKq@gW4k!l8evw+z&htri?=C++_pNTh;D!0$8%@7#xq(m8oV*JMT3zQu_Q0 z)igxJl#A3J8`9N$U9QMOheH(KWqfr~P`gNCVyIu`rumA#?g zMj*5$yg%_-aC;6b<5@=gLHktJ*$+cGygjn>5 z`^l2l?tS4uU#h)Z+Q(Fk=AyPCp;PHDJvLgC>l|G*_r5UauB0EJz&8bWv(b(4#8K$g z1~PEU=Noe?!N3txM7dvTcc|>z(p?i$#+up9o%)etSXFEdZ$k5Vb4 z=1m1YY*bHsv?qsa%W(y_A8MXnzG|`xd-@vx#=uXr_+?JxljPy5h;Elq+Jxc-gy9Wi&qjq@ITXqH<8u zcuiPP+N-#O)w-hDun&h)2ae2U1ty9;QX(5nwQI~Y(mwQ?Ggm^SkkhCqUBuG4tNFT5 z33EqAKXzBmqjmY+@1j9j3~ezby!`6DZwX)EgIPa4r(F!LbzHaZJ$>pf#yh>sKXK9+ zzN!8TQ34(!X*ZR0GY+j>yU9Yk4nAt$V?kSGWVlAIz-dGe=ZO}^K<49oRHA`QBLC3{ zC)uo|i|B0?i^xM{y391s{zqO#elnGT05o+L0JFOmv9TI}e{r5#;0-hVF9c&}A$+aMObfm1|uO7bmF z!Ci@UdGG|6C@uu`_nj|0O-eUIJFfInf$lIX-N%GpR}>lo4SKpDIn$DpD@EQG73W3qdtt+FG^O$6<~lHZz{ZoDmkU{L)FWcOL_V(=1~< zg^MXX1105%l^QqyGN(RQrFq|t+r_*F^1f%bmHfBW5X^cOZl6OtZp4NwuqAw}UOMqg zRZLL<`oK7|u+B>g7aL_MX1tM)Zr~Q@&!s!^J{#XB^%ry|UIH7WW}d7^xCM-i(%Y3v z=WuNp|LeE-zooj;6jfmL@h5c?g@2yA^uh?(j%A^R2Kyc!3sWL(LKGJw7bv8CxvgSG z*w2e_b(DHp{Zxxn`LZV7@;V!)jsWMUq>KHk@NG8Qm5QIE=u~N1g3}dr8NMqrcWDfv z!m)3fJDm82;K~DWm)6iA*E)}87Pmy3gA8OXk^N8G-b~@GVeEu^67m_OV8lF1{a~q%w8)3EU^fZkY_9 zls5WkV5>BkJN}-Pz1Kk;scpZ0Kv^@Kw1}^#Fc&Mi;1fGbx$44G3*kO8%Fz^06p;9qp|(34B^gUUAnVnL0M?BBzD z$$lZV^EJmebb-l!kRCwC#iNBLee&KtHwtomZ$<67Z31^JWI3salEP>ni^ZDh-p#!S z`wON%X^8Kj&_+(sLa;MGV%|krcQFWbig^K_==qP!)Ld045!2adQv4WxVV|aBXxQIz zJ{Lw31*SDVrCq7~{*rp{ApxgXs7KBRs*i&B=Rv8x53)8nF#2E0=Ckl=s$7l+H<6D7aas4=YkxY$%uzKf?Nl=qSi&r@y_LV97y_@^__@d9|P?6 z#!nYWXa}Ih)RzG}LeO-`1!ZS*N;iEo*}%2ITI#LJ=cx7LzM4lrJ*Q5-GO}yz{77FK zlr23sF$FX#-FxxW?jwwDy({)WH*%S%r@q>I_-)?DIt(FE(!p`nu-eBNj*|!O|BTzF zE3!C?l#AE=8lz&#zc4~;?}7qGI~IZ>J+|pv@D$G6Olm59IL~UB%<)zU{fB~(0b(j` zpTADUI1W3=a6P*VyR0i)bv>$=7DPT3Mn1`b=VN%{-UG-L(i-%4evIYGY5NiOm&#I< z-?9HJi1XFG{uAUy-o$y_fitu2Vmd5@p0oiix=w!A$45fnyxF_MG>$9J+WgUwq3N%} zivYppL`x*62Zd_%KE#Pub+{IFTq%M6N+DuT=|T&r@ZNGElc95y(y?GGl(=BQhyfqh z*!|nygwVOtnLw9b8bo)1h!17lOWkum1Mp3>VE>t+l~Arv`rCYp~j&E#MVJ~b1IV3&DqG`m4TOX+#*AGDeH z+s>ZDGH~)vilb_^qx-5RB#VzG1%bYuFxT7Q=oi0u;FpLRrlCl+3}(yadnsCYBPpF$ z#Y`y4zk+jnj~0hoI;*Ub>A4jjGlNWE!=J)74yzEjj~4nitM|8FE(zPmE%8|$wHcIxK9^e@&Ho_6E>w?{s+EWWhoD8^b(%GK)(aSx41 zP|aGT4b}MQ4L*_^-AXS%%})L*{ICWuv=Xd{RC|UKLn5RVY||LiUSe$J-StW z_XNsF?Ez$Ak$buXeEn_JZ;f1zrvzGALy6oD_*K{&&_lP60ZCdqnsvkg$O2*OR&tYy zb<_m2qy&CCW@~r)PCYE&&e`%Wx&s_{EEie*L(eY=B*{ZhVO9q!svpi>fs^P20htrh zN@di`#X)D$q6b}fRFGkpv3$Z29NbKFi|M5=k>iM{fvLWt!9)A+33k+!OfWaHFf^Sg zehv}5qsLD&M$bR7d9#b4b<73d8va=xxseyjN4P6Sa+6!F!p!zq$gS33mM}Z9y>=Y9 zA{eCW$`@Ur@Pmz^EMHAg6SdwVfTXZmJ6ch`?$S(HkxZucZ;+$a+Evk(2nLFSQ2b2; z;$eyq*WYMSk+??MLr2A27%!Ct5GT-`yU*W6Be0?Vi)$GfM=RPv!$PWPSOt;$YMK+! zcd(UEVyOPR)v5xv*U@-_kBAh=yLt=cp)De3brbYnTP)iA2I*V57vWGk5>`20`6GMa zr?|YOYyM3>Nz&D zajh%ooTzXk@&yex?Q|8R#-fRs_x#qFN5e%EwCsu3Xo%+D9$j>)DYJPKvLI31MASFB z;xy{V2bMmktGzbnWReoq5yAEHOD#S+x;KmdI$Wow?Mv7@9mWXez3rf?JzL?t@lzaL z=K@}~76QoA6J8q_ycWg=Tdm|@w0>N&t*ARL1;`VSGPLa|O-U03FiEGZ_CmkJ_l&UO zqAFCC(zjObrQ=xg%^G^(N+JbN6#&}q(%(rhoJcRjc1&Eqye3dJ?~}Y&*~v=j9*^n1 zGFB+-zf^otYQ`O`V-g?5ToJj_r{}5)<2aTf}1wb?ziFLjO(G~Yt~X9N2iLKE5r1NX;km`v(j9R z?W=HYUXqNbKi2CqvJOODNEM6#FG8pb-1J}W-`A*0bHQ{8sn8MGO{)m3SF+@E9prl% z&7bfa2E4pcZAUTyyd@MywA_CjZAAp5i=(<6IdBJaHp3qX3l@q-7C)p~@OD$SA~$#` zJ2Be?xLBjE7S(sVcr*C|kFnT0dBm?i6*J6~y4~U~pv5$Kxzwe+iiKAGI2b85)IM2Q zknl-M`FBd-d#~B3Hglt6l-VAxpQHVMtHPhQvrR6l*gD~bC^nt(|HtBm8l#qINjq|8 z`Twc^gTqpXUx3<-eHLtoT>r_aZv<}YU}=Rg6eH@>OjxD`HZ6g`@CK2l&4qC8PZLrh za*=g5oXS`3idmeeWec*6nd;v(ishw1>t9O`lm8#8-YTlifNK_RDekVti@Uo!#ogWA zwLo!qcXxMp2*KUmHKjO3|Ge*a*7?tum5bcKMOM~4duGp`vC@fP4`9qY{dvV@Ur1Sj zX|t?T>7CL#Vrygj%c70cC{9qs6$viO^2Zr{S?u3z`-s&k3COH(Yq~jxNL@f9$4y)r2b>=s}TiTOFvThhQ}E@TIJe zq1y?X`Mpet4U*0AmGv-fuMN}MK(7CoG%SdZXv$|Sd;R*E^|SX2SZDBOn~KA zx&<51u-A%T2Cyp}&V*n2SK7b?j5B=z?(hK&+EhRB(A3i#b--BgkS?;Beyhb86l7~m>lL1{Ak*jge zubJ$%KMR+MCIlv#PYw{#yNc-EvKFZ+)+i?0cO*KA*0$!+?4eGa{81){hgO^cn6qh$ zN$Za?RRuVkLVv~{$osIBNUxgIl#wiIp4K9Fzg$25NDV>Zls{igXAqBLIKmNg%=*EdwqrQ+p%2O8k+au!I*q2^NaHz0 zoH~lExc^VaZd!(usFLqJW{Mva1TIY4Nv?xIaHrEiZrdTR!46dyE6Ec)*FfMZ9sy-u zEBR-T)<;tOMMwX4+0kX{38pbxeZikh6cM{}WTB@RKp_IfnMzy$A(H7)p>SBlu5mqx z@rPprg-SiM7^9L)T5+Zw;rBZofEc@3bu>d1KL};!7C#!E= zF*f3>EU_%Jc0to47Hc@{!<8LSf4~oL4l$HnZJ+1a6`a*j8&OcGid4?-4ld}}5JP{p zj7vhZO9-ZG<)IkuRa3=0;OB^E*IZ`UQGj%XADF-R&!O8KnLRZ+y7t`yf$Z3jG6X0YqI%MQ^F}5=#^Cezw=I* zY+)IYeVIToAEDKfw(v1lr$Rs#p|xB`6f}+yqQlYns8%FaRT+|n@2vW+GBWMuo>?Y; z9NU><1z$u~l4hN%Qkx~G8nUq}WKd>3M^Vn+1K#;VR8#UEYrBi<2vMou{dg<{|Icbn z8yj#IyJ&3*Rvs2u^XP1daA7FCu->!?by~gwg-&OGl(vGc zJ+#IeK0I0^VVXxBm{uxekjkWGSnTK^9TL^j5ih=kbFOb0-h2s}?IVBF@)zLk0rrk% zyGgT0w?;37TxB~R##veCo_d+yW5GPvK5S3=hS)nuCg;6MX+JAHgp$;WM;cBo?8`pC5$dcyHuN~ZPaxaprhU@ z;%)(&o8Bs-cfeSLJO?{!Hk}>3;w2#C!b|F@rSp5-V1A20$Sn8E_|c7J5O1_nYlST> zGCrpNS{MF; zaQ({snYXqIR=7U$I7&UcSkD!xDD~`T*XZo=ybC$RX3kj7T8O?Wwp#2(wjuH?2kukx zvAnox0`eP4UUQKC;ap|94Jt3=gPN+jp|?g@NG2Y_y1}1DS@z19LH0OhJ8s5NDb}l3 z)O{9W^Xhc)jM>S)<0)6slxBH2=XszB?!=D_Fopo07t>5WHxv9eM82ob$o1{XtWy}4 z8QfR`9!O5MV3o}_-m-&q_vulmy0HK`*@Z!<7Q3ipyANMj>WLTYURgm|?ZEtB7TK7; zXHxVM^|e4W9cE&ADc0#zJ&Z(C2-X@ znY^d>zvtHX`O|uADE9xW`Oog(&;z*J`POSYqEl;B5&L;b8h;1IxN7j9V2GuEe`V?U zpA*k&o$~R7|2L9~BV^+VtV-mzDfyBqjknN(rDW?3Q83gjilW&pk8H3-wkah?WM8fx zM&kpsT_nI_5W|vNi$bzWl}d$QX)qJV+e0aTV#Gi98y!*kVBn-J0tZWf-NQR()rnL} zGFULz`B_1uBG0fydAYB>16@yJf0OO-d$%Rd#EP1>Gxu-K23otzx17lhy4oo-+vd8= zSeEY;v{S>Ev{*L1g6>w!hYa8vm)07s$wGsA-j;LCbY!uMd7fUD6TIoh?27P9GrwQu zZe(6XU@@?DN&IhDh6nTRk5 z-$-uIjsMFq{nv04^65C%NpB5xlLx6GZ~jZQHBw_1EucJl_LP0pROBZ_dONvKFWPo{ zCW})(VR2knlB;B2C~JoG>Tm|w_!xRU+Hhr1la#McEZ+QRjCd&&V?11J6&f3F0;XaL zsIz5KktS>MB>C?v!AQi9W}skjX!kWuTQwEJ?m6H}7tZ8ULe+~$+}fJ+AOqtaSu3e& zJE9_%5LO!;fCPTR=vRebC#^g)*opyZ{fbsRi?V^1{k?55{8yp?8Ate;e{>LT7Y|B} zyJ8al$CHHM9i0K{-R_g+&g;%p=jBYzQ%*&Psl<&v#_cXA*F3oC*^qtuY z63yb4#J@>v&9a?@Svl7dwG_p9FpFc%inIsG0<>wWVn#R#Sl=?xhU27p+1?@=wc_zi z{G&|N)5=LQP~o@qUSNq1DAnc^^qrFa&X^*~K3EbnSyuNZ{qCYLWFg*4JZ zIvnRt`5ppis-TnLy^$AbxeXQ*8T^?7Wi`)=T+ztl(GHrWT@)pf{K!P#W`;(u86);z z-*G!8lk>83q;4BHl5Vo~^(}I?+dn5-d|KqNe=HYE6g|IF zEtkr-zqDM^%%Jt+&ou^!@uFCZuI|e>;BF#b)?lw4w~bU5K-E#y{-s_Vu72YP$?9;B z&WpGlnN+GH=k?A5&?$cus;1I9=4b>win^8lb1k;*{X_t(GyB!$0mqY60T@rp2ZB!= zA1&bd<0#)w{_|?H9;;BJH8o48;@z`WNmX>G;{y~OK+9G4pC#JOCE~x=^wU6{ai#xE zPOiO+(ybe&AL$rbw{t*tZH;=2|J4gsDhkpCH>}kz_Em!8O0jJeTfmdgrxLkCTq`(9 ztM}H!JT7pv!j<< zgsfZUSB2zFQ?5#+G#&<)X&cE?=U8Uk72)%fWnOu;g6>|4XfqKN!YuG!VOgbCN`K?2 zUa`C|mPN)f&Rvo&=XT>j-RAda3)qIlM(U7BK~rsa);b^fbyFX=X)$%*cN^DlF4;NU zteTx6(}Pn99IjM>rH7jHWV!$q$&}3aqT)3!_3Mwspac`8{-ebLMPLRa`fw6m9|ZA4 z-3!Zc7qRLxkTGv@SEJX;Xk&9nuJ@eOsrb#FYQm96#bq&U=n`A5%RzDwNfo{>ll9#G zXH=c8vdgdnngiP(FV<6i!_wE&q9;+UTmOt(%4rHua;-Q25VFOSUV@>k%|H`>V%fTG zRj1|17}vBIepAs;)3A~&YpJD24age%sIdhA_l#}CbPTjs%JNuE2dM=`&;fKtdem$= zJHXn)wFjq#)UK2?GF$GfbvwxzMk0%fe^l^RRp)BjOINm4I`kyBk*ileR4jY7fW zyde1(G|LxIJDImFPSFY;YEKoRVD!CH{v!I5cP*rk*VOW`qch)-1|E9qN^wOH@Vtly^Ay-yhjhch9uo?_rxa8|_8c{&B;H=kNl&Bb#bNeF!9 zn+xdw>i6VUF#9L&etR?0znhoMs~?UWS&R9l0SQBaJ27A)H|cIiP4C_kfYqqKDwg3Y zkm7xzVRN^Wc8I?2A`jm#Y_a>-@&qgp8s3>)sG~I2Z>zI{n#e;n%~s`qHqB=Y2?RCU_KJRle#Zb!j&Aa|k@Omc|25e+TE{dW?i9Q`L-f)9e~uDAcW;dY6A2 zVb($weH>S6iVJ7{kY+YThKbtELkac(9~DPq&|HEd3>*~Zev$KA;u;BcVx6qZr|t%X zlOm=3$@SU3Tp(*?oIbpmBU6YOTP;-=70{ttEw`8bcE5;kvR2NL3&TEW=EOE7$v$jj zhhW#FvzM?6e=1Y!luaL(d5|Sqt5Rm&nzcYNXCN+LSSFIX;!1jdSHzHQQ=t8nT|`%V z5n+21FRH0iFTAeNSaXXD&Cuun!u9d6od55(x}O&P%6%lOO*|PWL^Iu|p)>y(?bzpA zgl;2C*Lo@{-(~P*tv(TY{2!=7_9m(TN6Lc};1k9=?K1xzYbuUB+;z()s!o0~ zHqo19TIo#pIsCtBb{H|mQeH*q#;QtLuXsfz7<<5k?kct<%K?m-rDXC&Y1UCn)s%}r zn6Rz!-F~)Qs9|XYR7aB4EK0cFdrh=>M})jVm5QjAe(lGw!*L5#+O4zPJZ)Bc!BVFa?J*FJi|JAwQ>j zlDT%kG%KSw#9pnh)+3 z14=+3>4P{AFo_D?jk4&NaSQ>qKx?IO4b(IUmiAkePGY!&~_G_E7j7mz-V1Cimvk)q8-o)N&8L;+stz2#jnl-M3cV-SrrE0D&y zqa3E5b38%a+7~<>XghP|nO{>i9Bq3wGB{85;Y}`GqG1{$8Xs620=~PN&I$Uev}d9> z*I|%OKHTW9LC5oef9Lb)n&VR%9kVvEyAXauX>Mc=t#ATd>&#~9Y^Eq)tja0&^XtO0 zvoNg3O>07{-!lEhD7eeF@740GDGi~O+_tl}=qSzfuEbx?9}&4XbP}Z_H?z^YTWORD zve7yV@z8=QkQgF%7qU`nL1(??7~h^eDBcU84&yhXOo5Ry2hY@fXA0{QD71B6%<9ab zEGpIdNauh2t#phjz|-1sn9uyiA9p2i#c{YSZUIs+pg+PJZOA1KAmIr~KNU{gIeS$p z6)wDa%WSw{PhD1*e1=05zC_=J+;Pia)cquJ_0fS-U9@pu2d`{x8Yy*aXp$JTtaCYE zeZDl;y|NupVS^y(NUxm(j?8wcT*d(8v&l#05RMS1AB*{bB0F|E*cC^b_$DZ z;j8GgX_qyBe7=bG)f7_1|uoZUA zz!4gBTX3zRsj|wK&;baG7%h9Az6g26^s~FwF#!u4#rJ@Nr`_Kx)(=+8wL}c)7YqDt zj0^`C^Q;=bunprmWst1W7ZQ*K@J-jc;c{O&2Hm6)rovhWY}|g@0oq4Mbx}_F1-9U! zlHFiiCm17`P;Cx$ZWd9`)uj@}g~-XKFIX>@BV60DdG8qOqM0P61Py`|#>+zM%-!lW zM>P#UQv>||SvnIxNcBCxUSoYcj-KiOtpnfQt^<^w53XHygrdxkTmtqD4_+Qx-%e+_ z3?pB^ulp)9EcIz3v=ZBd>X`Gtp6DI}S~>sY5)cdHCkEb$f)86h9}-sOW3?9b!fpTU zN{}y@t@p?P&0DX--ERKdont_`Wac^R*a~(}g1hH*D=N)5G|`=lYxYkQY~Z^nvfg@p zMny!n68auAW-Sh_Mz)zqi0e zwibUCM6y~`Rm)4hTwv{st58}k46RV36JyzCEhO@xY1-5jLww~N^m1~gUwm)^T7`E@ zua>VR6jTDlr&vf8Jl^%J44HHuCopByvR^wu24|as{2p!qn#}jbGQoD|l2cGd!wJ^SR?- z3rpvL%(^<$M7=KpKNL}lfvfqgx)glz^FbbIW@?|1JhGg^5$wR`WG$ zKgpbPV8@0xVjIzHrq;-!pF!7P+#&WO#qh59EQzY~ zGW@~02Zk}Erj ztukzJdpm{ajgydCv>FGIBOd{il~z(~RTv+KOZr=@iND~=Qa(GyM`fy8#V%zxjg$u{ zrU}NVTO2h#Y&{p8%~RPMC`-y*SJbf#hQs(QmWu|K;3sKg3*ie&zogakHOKGCfK_wh{|* z7UK3ca=;Bq$U1B30hhrlNyu6U(X|ci5j)IRA+0`zDA}T+|FHa7XrV` z?&&yCz~F$5=_dQmXpYnK=GB$LzL$Kw+CEa-L-zWO`rES2@h>jk*eZ(h(h_m>i zfuKCxw2b>S;w~5=*KIi{$g3cUxwO38KlxGdcAyMWx zo?O2yDvxIUxn~RI%vOzN18)8TxtC^%lP|axs1{kHrUr8*TkC*1W6gx-=K{kbdrzO5 zZ(s}hWla>VP<=e_?nZ=LwF;XfzCoT)<1A(+<%G0|nk-f&m|qrM(S>FWIH(a^0on!y ziy4r$U$`yYmBNrQc#Y@)wQS<%K6(Ul3%c{>TCTj0jxqlAk}c;3#JxY1Gt(GU#Y4MF zE0pNOlq(%l=YDh26di2$!sWiS42Y|h)rPeX8pjC!;UT@LI|H}93R_l1%p_;7VB99oF9ru z;i^CR!eixGYd@PGiktZynycjZW|Jv6UDji^x|vRDM-%#3RQgvX8;bZwx5);&^0FdW$q66a*QOHi_(~q*`&zant#WzvS zwoy-KIz7wQfxaTRGzstm$1z6R!Bno9z?ip@1^b_TL)H>vIlMrR#W@Dp{q>w$cE4sc zT0p2!a_vcV7{T?g{2R3(&Q!`ISkA^D@5yGN?;xBc8(i=_z)I__Hu=ZrY@*{&Gs3?5@n(NC83e-_ggS2CLvtuy&5-`<#2|J zU`A9lcy$V2a8(q)X85NlCTCp0Q3@c#CP{QTH4sdA-dM~aqpO3_ED{e~W`HwXmnAik zur1PTrErjkQQ?fZ-8GpQ0BOXm+0QBanrZW7_CsFQ4EqEw)4)!1odNZ*w=%#ck zJ56Lp7TE!a+t6PZVg~v67JZ{hw^#8a-fI-!ZjnbA{|HYN#f>nsk~t__CZ%Q0`GrFD zuCB9MB~u6I79l=*CweZ$4`F;wiSpC%mrNO5Cuxiok?SSlW{QhO2#N+94fe(`o$b`9 zgJ#9-y7@+N^r(5SrTf+%uNKo{gsdkbiX{=b3l!t^Yrzv@OvFPhR^&-*!&f4R_Vown z?9DGv-m!lT=0bj#n(Wq~zM8sAbva(iUQegpZ`zCv)FLBpoX|TN&uMT$+c_71S}5mb zZ%ON3ia)78{-AFrHOa#ZT>Gw3VLGu*ReF7_>{#_KljZ;$B@OA8 zXvGAJ#Xq<@9-$!r!w?JMyaf|Zp}hUUH+TOzyHkV(ckKM%AX{+DY^1AI>HiqNtH9RD zOv-0j@MvY0rBF%f9jK-n0{-v_%_^^k?H?moouU%;OQkrXC4G|J-r&ay_%>L7L6&4i zA4|T^AoM?&bzh~zA;eT{u`|N^U(8C`2A|tJ%(WE-(MdaogRJ~SV6Fi8Msy8C#qpsP z&spW|rI)AzF)-~z4T@~MB^-tN?SK|}bSzutKnDie1s|4Ja96MWFN_T;u42=6Y&RL( zP-~&Y4<*0v3w#H>JE-+JGtXG=6lPI)iOuv8x>oWNthzq6o+Z1IRAGPIOpl1xWv8QW zq<&$s#=rjeU@G2+`Jw_r3}u6L?>CRNFYls#`B%8j4UG0y!baUq^lt5jQh~@#q_e(e zjNT`FwzJ=O=Ti2#0YiCLYzjwto}^DTY~Gfm&pa?qvm1%-fGFp3c|7N-h(Rgd1e z70<~PVTQ=vji6@`gY?>bREmDBO!Dj^?_A`5xIC_}p5XHiI1P^xQ~z(ta}iT1&O1#y zlSBumbR=&6ubuceHUqHc59lzRK@_qY{=I#2zqOM(f+Q(k$4+A9?Av3}KIm;jcO|Q9 zq`vgMT4uU&3Tyc7WKGT{a`$tQYpSoZ0*)9DQFV34r2&?ccx68~P5Su^wBNsm>~slVjX6 z!>GbHJIrLo(BuI6kg0DUFp3kn%xb zkJ~hwH{Y!{zI7<`M}!zY?nTvYEB_#w>|NF0f!OjB9WBkjt@ z7jPX%_@j`7GTKkWMbwEuBPsbn3pFJVY;&vO=A3zf8)Z@Y zIuxFv(FG85SuQo!AqpBvZ`6TPJ{)A$F3O&nY(vI+pt)yleh)9btfg`d)D@8;W5qgU z?%>8}PXEoWmO2ylGfDTPwC;qfw5>Vo1cVvbg3g-KUdaQcxrD{MYcr7w*_|lCuh3@Rb_cYCc%Q z)+6_)HT*g9AVHOSaB3-9QmGo$3(av$Io}$05pj8>cyKCv8fsON)d8cdQyR${Pz7$< zgOatWe6&y1umNw|w(fv97{J$j!&;@}`r?2^^Z#(a!%u(U9eAL6!#^}xTdN|;#-@;RLYHE>3Z7%q-@ftt9CGy_sDY>~e!2l!3x#Wda{CK7*S7&Z z)!?oWbjDl30eIJ4tn{SgeYBGacVMu}U(4|*Lf_^r!?Q1v7}X&IP!OW)8fO$eeXjdn zp3yeB8@|Tfyx(oR{@HoG;_PycMTH2YCjIl@mlsJGq^KU;O$a0`r043NurlkK`MvGl zC&8y*{nxqbwxOwJ#`2Dat}&K9&2}2KN!lFwQ&mQ7mykd&6>?r9cp{cqkc)hIBEww!2@%GTFVcP+KezK{I9K^h?nWmVs{DY%T>lR5AulI66=s-@ zDC%=c=5q=$a;+hJkQXXlkilH?yF=hM0zXCKY{2gMNj}_i_6K*HKXNrd(MaPHE~fee z*sEcs^GZX1AsFs=sp({&J@LSb07@^m*ZW-}VqSv6Ao(c5pipp3I47|qA|Q&x1LL01 zsQHbi8;9JXoR#bao2!=%UO)k=;yX(GMm0%{)pNE%m7fQjuiEgrKtEU_}JwpCmxU!;WQ^v87~q#u9OB+Tj| zf_RVide!mkkle0pEe0JX5!cjbg8WA9xb5=3tui$It;$5%%?gFJFEP(rPMj+YUSbq5 zyFYgoIJmXiJwmxnqH|1VbOC>tou+UquQ=q^-S8$|n5ZkHDE*Xp9m~^!V!){u& z*D|HIbAA$Clzs+E(0y`IEgZ3a7y`>a9lw4CdS`yhoX!vzG~HuZ)#Q8$ zE}~)9{@GtB2k&utp0h5!so(cyCfjP04s*Xt25p7_YrjJTt<8;odKEZm)rrX7Y$?j& zj*ktH2W&MkkzzeVS$GEoT06Me^Z7-Md#ItTp{@jsm{lt>hh{k3;aRK)AX}F+ksiuD zL~V*sxt9BcYVEP%-T6@(_g(*>epUnw4UB7Hvg1YVFODH_D3l3cC0avn^GTYm5um18 zn4DtCRyM*yEoe!~6rhOfc{?8HZ;~QAU;e@<{v$#bk^SU{OB4J@Xk^lpl^IiHPR?NSW7fky3y@L65dWvy9s!e^9cq z7B&Xk660l+8wBz>uUg49iip93MdcISM#j#OgG!%)#?`1W>U`faCa`{jt z!C{8F1!rt?S;3#E6l+o%p_lxkGKqWA%-9$Fx|rKPfEhl^y#B~0*Yt*n!5}!QS(fi4 z$6kJeoo|{oot|OMg{Bg>gzK3d_HT!+k=lqeMXaO%p9U%x67S3mILI78G^H^! zq2mfgiRMEK%P_vKLPF8Q|8c0_{*8cGC^7+D^hs1@tA8^M)!SiXe#@c*vk4UO}eeT$A zd}}c0Sg4Ej5l6W;(z~^hQvy1`_4??oZ$7~7`uH5bE+FoL=tTiEx+jZrMNk8g#B)2{ zK(7cgGDlOx7C(&SrKO1PGu}il6gprAcomPHmPSg6ugJ7=|3Xfnl0P-<=5{G7X9DgY3jDGrM;nr z;s_M)YSt%2Tfh4Y?2c8;-|4c5+9q+6SS;48tHi~N-$4pe^w*{CcI{(T zxh;L$9$Fk$!RlfwEeftj*PJ4cjNg7Zu8Ql$^A2sK?ClnPxeoj1RG$a4%HZNuTcAaX zGRpw}w^B{q2xJnnR7GBcIty7IekD&o({(jhFU^U2ISR;|Rno{^z$K zYr8GEeK$6`FE)9XKAdrF^q#KNvBvm7-!J5vNsHkwarl+dg$9}|gl;l#ZcK&{(?;~I z3G89hZFDckxgf(%WYOw-GLy=DO%0GcUr%6K>z&oAoG- z!iFK`n%}>x)|~0Izd$T9z(qCJQVCBy(oMOSbcNLUd8b!q%sS;pL8+n8I~AXa%)8=a zMuhe)HqJQ$tO_5gSVUw05FSF|E&Do(Gsi2b^}+?_`0zk%3mNo6Rdw%10^dx`F~u5o zZsu5voXmTWbWRwaGFLYrbSxrG-SF!1gzBv_O$vN7v^vgTTkLW3!xD?Q4WFM3Ev^=Xfg{!Kr%B;lFIC=f1tbiY5Z~%#&g6uLfU6?4itdxT}v9NmIfZiGF%o_YE9 zryNW6ZyA80QPMn4h8=&PY1Fu8h64*;b4{5rzybOw$3it&2OdGdym3wnTQ<AGiSA{ zDjO^nf#1Ew&S`r5oAUB2ICv?w#>GXX_6f*{zZ(9R+(J1-HgYW2R)ASr!s%Gst28%DZ z(Ip*We?p3LD%KTHC4w`ARc*x&bFBHtZLsMp&XK*RsARf8QrkLN+dA}qYZ`17d+@BJ zsaFc#ka;cRR@D8TvKge}Q2QF>SBeu@ZOcetEN8^JS#$`)qjaT*xW%{B_GP5W4S|NH zNy6&atp3(blU1=PcZxS1SkFWuXMstNvCh z9m1|PK@RG&qbZk$uknig;L0K^9);)xt!QnS2OkS?YOOo|)H0Rmr$|H5a&ckk-^JwV zg_gcv5pyZ$G}&ZFU8#ZI)sg71PYWOR;aN+Y3AMlBU#rVSd2L15S1S?k*NpYQM)yjy z8mtz_=`(-%Yv}lU&xJh1kwruAJfwlz2~;sAlE}oEp(4BoHtLtfu*irl-#q&oJQ2c8W zE7NM}mX9OL(Z%&n4-U1uXIniqAX@_bh*Sa(T}eNjBbO0AkwxumFgN=@ff3@q7{~Mv;T7_5&AMbDrIT!fkNtW{1!1%$T5#ppG30bIyLAMg-JtD&qH+DdQ zAB&SksV{bgqml~2i5$&Ok~0vxq^$F!cMF%Crk@+LZU3>y(8E zKo;KB-n_z?A>14Jj(OKy5R)IBU%kY>Yh>+Y`P1Dms;gO zvkugbySM<-Dnp3vG{lJ6@tdQ&KuVGVHlTL|2iZD8%BSJ&^<<5>iZICe+#=t36o#T>g1S* z^x9?JP?d#vcndd2_x^oD0%FAlBl%9JPI8^N~2$6+D+M^29xcxI> zt3LEz-v_uc-Dhs`yN6Wl2*h@DyDs)3O}r#Ap9zag6Xh`3Kj{wU>@`= zhQyX-e40(W#1)J17-X0uHy-lxO{{+ch+Om_=dlF?lp(@NYR1dEA6tz%TI<)5so(0g z=dUI1yhz(83GwV1rLCPMM2qQ6Y;w_bjN=C$IR7%V27!k~GpJoMf@hpfvXT*^eRJdp zgX@rI7n~pJ9i*)bgqa+9z`6%)Wa66h8-R|E6k4sXeLu{~AE)69f6E$N zr;!EFBk-z8+$J3AKsOd`*(_p?*AG(jlC(kvB@hdf>yq3q8m*{m3RTlKG{uL&taYu} zC>tZqliSoi9s&L}r_q9M{yJjF-?xUSVfYrLL4DRN>`+g`$`_lk;_$lzn=pjSimbr` zj-RV?z`GZH)pxQ|3op*r+6!dTfo(mI`!GPa5ULnS`YSqiPty*f9~}d-mP-l4p*T%>}$Q4z~oAS&X* z;CBKiFO%-cKxf*__;j*SivvW<$}i;9AjJmeyZDUkzc9d&x33Gz0RuTsWLN#%C1fA4 znZQ{R;yViLQF*|@5#F3aa*%%)yyD_!Oh$dWcJYbFCBVVRn5q=tzV@ zjP@@-hmH!df54arXw%=P0U%Ji(sC_CHyofXv==xW1hLrUDj1o|e1b0$*C|y=&3TcU zu-Ql~Suk!_nn@hl5bo?xgmGtCq_i_*_LzcI;TA7kh zu_(u-sFZW!#!a%AU8_!?elGNCn*B`07oIwg^+Rh95kuRmA3iv%7760Xr9U~W^0q%tW@r*Oat*+Le;IRlM zTQfro551C7Ch7i3{(rGa$HK=&Iv;65h9!gEcqNR#{wnrb^yKvpDq@l~{1~D5Y&qbm zDmDxy&DxYAi<=wm!HL~eeC3SDXvC3v8NlTuS?^oJ?8~KAhw88k8%IDm>ns`@_i0=C z2w`ZW0MO)BWtItmFT+W|XJ$Pn;5r0F=t-T@l{%rv#a|Z%0WV2=JNtB0QE5>}d*NBKwG1waG!Qq4YgA zr-SVxL|=8WQ0MrGzcSqI@3Nu@PB%H(W_)|Sz-yq-hd6vlE->UppbPm09W zXb~}`|02Cv8U0KoFYakg)?p6!>vCz|pcNOHK+Rac3bA=P+Ndi^s}i;uYobA?5_)s_ z=blyt)GC?2R~qg>KrTxVXBJt{5X8K?Mnr9+gkCKn`UNO)Z7(8)5~qZ-o7ArwhBfj^ zL-DA^o3wqJz+2p*8|9X-Cvo>Ywp3qGYXt-zPQ>e(G#+1+cW|Ke`+$Z2SlfNbOXo4* zS`8vrjw?H-)i;Hyvc^I*DB{;n1YLb@vsTx=7;!exhy-s=9sT z89u_uN+xOHXbQ8aO15AUpL@bqacBuxxO1x{ty6_k&W(b#SN-jn{C(@9#JJlUT27Wy z%yL;%X*T;y={=lQ0{hq78w?VH2cD8)!C*`lsv}itG*;4l5yrQl)Z+4?vR88Sk?BDS zMf8ZO*aJV*zhdJ>HAriQnU}#!U}aFy4O1K7`6x#h;1zv&7=TiG$V4%(_trY%#DSk- zbpy$YZJ2PLWGKHIv3_Gis$^s?;2r!OhGZ$c;YDc{>3!lFv7XSL5Y*F&d6GFjsHYlq z0ckzVlfY7D#Tg=VmTwaqs)91h26H56njFbx)%Db zqk&jlf|L3w5~FXBlIROJ%bh6k6!s*#)r9DL=Q8K$M`wC+Ize#aCG$jOP{mRR_{mWR z)2meR&M#Rp?>cQ50%nJvKSbsKkKiTuA7a?0^g3zyK?eK^gWS_?xuLbjI`l&)?LTwa zI|!q?c|-sw^0rY%FH;l%$5Dl`L%n1W2@4kG&~oH3{v&GH)WD)iXQJ`2fd#AmW#-iB z;odwN+675m@>$5352BbaHvB#EWwGqv&%XMFl;0OHD&)l;lFqW_&8x@*i_rj)t;&Bl z+2H5xvG(PWh=~SYMmVJ<*$m3y3SD3gYT)8BBa{wH@iA;dz+zqAmwal4!SV^!o6r|E zxw)LUdN&KBq7OMamaNc9Kr{2S8+KXt%m|_l|14{EBu`m$C0K9ApKoRs3oo;s9bsq4 zGBkxQQ_4{|v^bMBE~{hbXgU?0P(ogiyu~~*<+#YP(Oup;kNBtCzK23img><$YyI0l)&$~WW~IiLHh9?d`3h=`Sdr5uxbq9WdV z)QuXLPaQ%pyM>@TA9-#}+aU)eF$CmLUMI$-;7j~(9rY|>o{4zBqNer@Z9>10dF3(; zajuc~Y@wkYD@1G^$#sy~MDv-@37{H+VoUH1OO{^9Y6pq&Vn2X(@WL9+LN2=BxNkQh zl;?(t7 zd+h?0bt@6j;qOorwquRMlZ10Q(~7e9 z6k!|Cx>-_(?NY`f5NM+cX~!Ni%lpj>3xfjim)7KU8?w)?A4i+SsForq3Y=W=jI%N1 z$f0b7($9emOYb4>R7ozn%0m_dTWUHSr+6kw5Tdz8Qni52;%*UP%Pt~X&63o1PTnaN zj$sQ&h#$gh!n=gpC=VqfAHAnej#BQTtyORODdLWOpEmv5=TVKiTw!$RnI-|}?OS4} zF$O8YE!I=KxS1H0iNP8sb%=bpYzk(Y_@pS6B+-jAJhOsQh;|X{RI(bZdd6t=0Jjbl zG#c$3oJL4kJsGwyeMr>drQ+Y3MEEKN=vOtk{e+{#}eYbNX!)^g_Z)a!?8~Lp4RHc9ttF&D4Av;5(tVL4`A`e2!A_ zi(QNy0l2;%rfW4OG~G(A9^UxYj{d+5Z+V#*xi40C74lO>DCL~h=On^m(F;?1ZE@PTR+oaIsOkzy!j7HoDc;Ce4GY!WPka2)opbU zvi?f)R)$X(ol5gxJuClzvBh1v3W4Hq9b}&YEO-u5(ZWX}6%dG@NTf#Gv`3;O7G3FH zSP;Pv8UW7qEcy5HCJL5{|(jO8I-3~G0=(1@VKi9dcB?Zm#IyMEkdQOS&2p$_x?dNl39ecYy z64~%wri5tgbtuN$?Lq6-L-c#|i3G8H=+FxCpQq`TEeRI} zCfMIVoO5XNb$T90$V8R~m$#%K`vukz9mR9Y{@!fw^RnbP5a>meXqTDK`}pj4I42Z^ zSu!~4B6f;`<^)^3f4jdcF(2kq{&`&mc$Bim76-GJzof%JzB+b-Vb|Q^xL4)i4G=ts zxn&JMaBH6F8fDQ8d%!4%ds=NU7-xCAtKQIk*WU$HPRskh7cXi)eOwZ593V52)<|w% zpj40fS-bX*&VWgj#j=l$kZ;8;I->p4Af2f4?K@%eZ%DHG5!%%Y6+op(^ca`5f8{f{ zNi5r9Jh)?q((Hp$J$%KOw;MOq3883mU5$!yD`XEgl!h*6yRRDUkvv*yV`uoY+2jd= z#IM;}7g_L4B9ir};Ay+8Hd8q4UxaCYYjzE~;H&&=vY>2;DzISCHgh7Y%Z*IYVS(%A zyeNH2`$OLnvBDU)P22?#B$gkXe}YXz{^bGK#PuY_2@7cWY7fSeaDGZ=V+cpL6%*l1 zTf)p}qVAySSDYz$<-UTRD={)RoyZLP$r{3CRO`bc4cntN{@Fdj;hy{HUnMRr=tMaUPLD-;$pEl08^dG{&^7GxfU|V z?|9knHgd}0^nhahGP`QZ1^(uHamen`QUe(=mBA6*fnuc?#p29*+|Bp21AO1 zhB*}0730LZ;n6kZ8UA3!X=UJ3lK2AW8i4Xb6wKl|%bCVbj(Bnx zqiO4C<6jFd)4OM4cGgYef&(S?ABZ&HPp5aiku_4pt&Qf@(_QRVSw%BQ-wfuOcn>mhZ4K{W~r0kpTNzfo3&{RHQ zR1Wl0JGG-*s^bS3+U;NK&4IOCUN4<$P&!Jso%m1((z1k8I1r`8gz2P+$HmCFB|8@P z&Li}!k0;d!zm?3FyM$0IUj7nq$Ddd^V@^;Lo2Lo>-My}Y)BzUun_VzH%>O04|63_| zBK-L%?5tXUYAy#TRu=tOy-)e71~Iyn6kDvo z&PgW{7N6wRV9PUF3dVCVb@bmGiJ=r$b;EeO^K*cbI6!0KL(*p{f~>c-=udh&>v{nk zCw~ZvuM^HCwok09XPHxsIc)p1)~t|dLD6=u(g1$38zp2eK-yDUH~upRC;glUs`dfU zvMtEQAcj!~RJzVgj=zq}0^F3N@5B;JL+Jm)6fWY24=qCG;w&V}@qfPS;D1kl3aHeF zkO3#l6AJ>xutm$eYs_$%r(e0(=tit_DbH1#0CMY;YXx-R`ohGS6`h58R04a`tL@n# zPU?7;7l8;@arxjcY^l=7lx6FLMW(l~rUgdcXujIp0?rx6rtTc}UQ2aK@-L*umVFrN zl;h_g^0w5Do|W11Cy!gjmIpe)ol%ONxV+ct#$6p#)Zv^>fTt)`p}iZu4R}GL*0K2} zc8zVKyZJ6rri`8B-_~y7S)9w~)wRRErQ0MiGTM@Y7twbF=KeL8sumt(-NO;UMmE}$ zT*VY!y14TksLgZ?s7#o$WyErDg+97%MrbI8^;^o12C@=^7}fxFT44A_9Jn`A71YYd zxatG}8q>!8(eE81gI5zMY~%>_QpbP-OeIBz*pm(E!XqjGmF4lBudFm*Q_eIg75Y1g z#+elBOj1V`8PIp|Gk$4~vh157<8#4Rai@An#3LG(;}YE zUOP~K3#kSQY$qijdPr&QV+rsHhfOfT8(kE@2Y}xG@2qK8cstf!$Am%y>vD(*-9UPp=RYV=EoX)fg&+ULDro-mH|MO4-)XdpSs8TgYY zt>VsoDN6jv4x%ZM$@7Ity#`FZqE3^~KP7fP74vt|i+0K;-=7)3sCC9Tuiq$4;lpc` zGy*FRTUf$HsiW0B_z22^5u$pajRIx?(keBaZQ*R{Dx!5-TeO^YibhvAgT=Cx%DaS= zs9G_ej-MJtw|*hl+nLux>BH+9L1~qgg%%a>0-{foCrX{(yvD7kQ_lhyx4t{|618Df z*2zhp&L=d=Yspgg3Fn)}pQQH^eyvIWD}Bo%XN^&V^Apc6A>r3(6rfFmfq$z1`yr1( zR&3NRq+h#u)rKCD0A&C=bf}c;v_BYNpT(v-DrN~$49wjq71pizK9UJG{mVbrJ$SzK z(f>0SXll+Y7Fv$_OBLvP6P0g{Uh6JDumc@gxTl)9e_Nqqxf5L4RFA*3^JLvNaR43B z;>MC2`tOE@QQAXGh(nG^)*~bLXqLl3@SYz||K7p3^bz?eag28__>m!up{-^;EZ|N5 z0r&k8x(ttscX&A_op<=F2<)hVLBe&F`Jfq(G@4T_`Iy&YRy*wRyE*4HERXZBMJ+8! zb8U$7G#Aha%oaB;Z`ZGAFM|cGhqW?B&K>nYtu5Q&fy9E7v=RN02$>+c#F*lMcs&Zi znR4Mvu5yfLmyN^CkOeSF*AdwYfNo^ajTwu3ik5vqn{{ei@=VvI4SJWvLvj4^*Z1zD zx1muh>YDv5TUvcAn|6QR<2*arDKfg&aNy!9|JiSd-XA5^9qO66z1Yfl@#q}+#!A6o z(BJONHWyn8T48diC!V+j$ENlOjfT+#pYB@6kun|cWw~=^u0sM zQz11oqIy2DIX+XOZ&Z%%e0q^Rj{0*o2H{JNoUm%>yC0JQ<0ENZsSVGeS|9ME<{V&T zvQ(!jzk~!6tgiK{%SMaJji8As-E01I_GN=TtgkV)ZsOd*df@|sKG%khtY1~I*cx*H zZc|$hlPYS()R^`kSXOlkN&XVYuYT2HA}OV3)Uus-XKNF$BVA))t?Z{o?X#bmj3h6< zO+0rcv~g?hom6nDq^(m~#c-lzX;R)1Hc884O?>?i&J>q4U$mXYWWZ9_S0`&QxZ)81 zAyn1ITZ*lv43Sd{eQ>C&I%1ClR# z5?N(mdn6~QRPd{XG~8n>i|{mb+bmL63HSJ3NN(YDhbRl(6QIS3JZ=sNRGTLcZ^!}I zOsj&**ce+zfY|Y?gAPOxYUFTvypp3nhi;P+TPX)5hMn3T;e9gNmiF9n!nhmdG{ssB52N$L2-=zsZqJ*oHT}z$7zblotk^Fhr!7 z(Z#(n_Emljo^d5qHnI~MYQ`(JQYCxphg;w%ff``Zhx{v_JBZhe{0@8Z2bp?c2HrpE zJtfiJa6OYZj+7>MS^~EG#wpy(12%;xcHv4WW&RSL+fLj@G9&n}INHdJwqnOYq}Y?f zC#E|jHJi-GZ?-Uw((~cc!3HfoLFv4~v#p&qgLDeET7U)nq%6mv-sw3|X&?PgRmPxW zv%Emwix$(jl$t3^wh=!`tV^2cK;MmMCWxf~Rg)26Ex0o3G(8TD^pzPEokFI-`Iqqp zz_NFJ6NClSEl(J5v1u_;U+#su8a9BwoCfzFMqE}$`nMnDW@=po9&NuBh$rCwvJjj-;dU(MgJUf9) zLC4@7<21hKyq=)FuBpP=i0=C?B3p-_(A0i!WvX(A0J)8^IE3zrt1A?6`(dWj$zd>tO-qG2@3`rP^u-f zu0HsmZ{yFtK7jlmn=G^nx^8~85jF3Wts+y38=nm1d4>x_^`Atjf@k18Zs`uzo7vzg zsQ(Kz9l=26M(sf4xPe&b8Ivxh#}gY`&OKT9$!>oP0J=)g4x+CDbExI&2HBmaje zK`J}zTPp`OhaZXtCM2hUr^&ON=YOrkF%&%5#G0rQXcW6_fCGxpIQfM1;cc-X8zhx* zZZb+Yy6rsybzr$7r#wzPM8ANln4oy0PiRaxMl)RMgMmQ!nWN9wDpSlWtDzbrq5=J( zT9eO!FSdl1Y0p@sc06Gpz*wbOzo7~FNaYl-T5tp#&L)_yTxqOtEh_*<&P}?s5hY~9 zVm@XVxxUMJ%r_nJ_|^rkx5wIMMw+5|i*Vk+cN>SmBIP0+&;PvsjF;re^tJL=_-lL! z=_n_zJMT023fF6F!z@r;Kck1{zV#)wnoGTsN;9YDT~)2~IQ^Gs=i19;{#S|fmP;Pe zl`kBUh|3rOL6y?0?^3NV!BWmi-66iPmIdIj?^pN7sAu1nD1M+9wem})pIq8XZmcsw z?4J_A7Yg37q>6;?tKWVSYCX>v()5hcs+1NjmMviJs+as=)j%U!)LxkThwy#F6PO)d z*JoDjeBK_iui`{Hdp#+&JeWM`DQY#b^1cvkPX_knS;p8mpl@vCMm{NOUlNr(l{r$y zm30zSUnzso4nBLUBni;T3fKms4_A+kupb&=4ff%)id#!=5Fjx^w-G~`;?plsVCRG` z`l7GTsY^|)5t3RJR1?RJ^c@WX1TE7#gXIZ|LtE~~=Gf%1aN>qMhVJAyT0IOsz-Boj zVa`GXm_%W2-U5W3!fA;xp-I#;7N`-&(7da9T$;MO8dsz8gUWfC9hj0 zJ89yavh+5xP0NYW+r-i#i8JF$cfYaVtPAru(_1wsF-0qMW@z^uXj~X6FC{1&->~{s z7i+6!S4hJkN=wF69fdYTbfQ(#L`3oV6(w|Wq9+`M;!TKhIfx}ooVwXGu@W6}+%-yN zEHle7a{V=7{r26W8d&{Z>IHx6vFEFI$G!DpGTNmw<|-6^>){hb#|vo($!#D4<_U!i z<99LgaMCgdWHQRi3%)%e^!^L!k`Mnh;ZM)~g%fXw(~GUqU!Y`~M_VUj>*JSaEFJed zjnH-5dH=%8Q<{p|Wy2ET-dItcEH#QK3J1N8I1rh`7+fX{%ljkQsa6YmAPa5OD-Mb) zhprDX`8<*-Y=oYeoH%3OjueySiRYwj?)>XzpI1lY4Q2=$$1rk&#uw`Fd9_<7{Kg|E z3d=X{YC=jo{RY>+DJ@AQgwPMykqF&3#o+c*E#H;yk*T9Hgs)sJn<#DW52R}fk5Bq3 zotky|E8K2=Yll(M0ZiFV%IRCWzMNy`6Mp*7^#8(LD9XS)tCmHC zuEP$e(6P3de5F?|T+Vim1}8+VadOB`h(8K1TpL{5!tWHo^aKAX_T_&>?R5Fc(91@e z>!i=-Ymc?ej?cIf2OG3Ea6m2-e8JU{x$Eki&$GTO;fhFk32VDCQzlSb01NHw$l~4- zK*db}`d-TA#!szKjpWn|q9AutCh8vDh5y(YHkdpX+ygL2mVvs*M|1swB&< z$0;;EKZX-U_%&ZD=Y+8EeK~f!64iEnQng)0510A-p;>fMI~KMfEvRS)FhSgpaIk9s znE;%%ipY9Pp(K&Z*0xNkFwJ$mWP_P>(eKVL9Ic*$zxZlowPl(7A43(HfeSJ#kby&d zfwgK0mo9H+aHp|rG7n98+Qc;89q0u1VlZe)72c;&h=26x-};qEnOD10NhqH0-5i*cg2NDxHT3<9wc!U~^D+y{HWcl#L+0qit_7|l zO+S&IueB9Wui}?NNx{)%mTBB+J?*s|{WSCVp+c_+-`N_FF%l}SwmfC*>H{K{$zpr? zb9s zWewQC#~iM{4s4SlS&hO+@F%M7_G548qtAY(AOePUoi;J%(kc%y`%E*pMTiOJP*ZRHKo**P-gECr~l$`r#=s=E#ziQm+pqvb5r^RMe20C09@cjLj73D8t<^6 z5D&t=swVCy48AwT&YdhyXxXdaeuA?w_rmoU;@Hlj>j<7fh5aOZ=Y6Tog#9?j);>eS zqyJs4{J8XCC+1kh9G-T|mc_&_Wl(4<2jDqVyjBrZ$_Qfp6!8!Vkl>j9%3AeI2tRvrrhbV zjUuV6YqZR%z*)E9r{#3$W;MgY^`IBhnu;)r;Hq?JgS3jjscTqGDI*@k1ciyZijHtX z2(>=X*CMCkws0;DEK!TKVti|DP1;sR`2k1YeGMM41VjrTY z3sg=k7?JZCH1!{4Q7ltvRryXiTBDT$_p~<{Ogm(qauUoZVtAddh}@l(IF4Y`DvA|) z^{Kt+Hci-|9LO$mfK@W@gi^6h+N@!lTK0btK+)jB(uoi%4%&%f)2e7!C`Z|m$3lZ1 z&ryma;P1dZS!w_1YUE(0n*=sQ%;7&JbEI;rWu#mphmNcg% z`qgN79V>lW;JAw4q0fEtj$~5M`d`ZO*^&=f)}3X)w5da8Td6=!zm4YF0ll|Ak~mcA z-SOMdtN+hz*Vr+}`*HTTv!m;w zGFai|IojA42H7c*aZf7&QykpV-dfXgjw#XcPSUcI{skoeicH(<)CI+_lfe8!w-35ZIUzOW}$u4lc&^#g;p(q3WNCC zITawi@2hc7Xn4kZ0`Cv{g?~|5kd+|ehW05`?|VQ=Fb6Wp=;xK%@EfAsH$IYluVWT+ zIFFzHidd0y!FC>3bdMs55Z`eZc7IQOFX7njCM!4#8xy;uY6z}=noy@_kz7IaK*_ny z{aQme+%oQI!mnw|)K=_I4)nEyTD%*9_jK%A@md;#I5T%*pydNw zSQ-gVLG24N9H#I=R{}*xO|n#8#7GyyoCSgHxYwJ8%hm=alN-Lbe1C{l1#{EN=O;Mp zFM?9H7clN%HG*@fkMMe?p1|JXB@H%ouBj=xC2ZLkt7wXP=31q!kjHN>Aev|^?d@)8 zK_oc*9-^`sbdC6GHLsLZd_?M;bW@DdsSZ(Sl8-x0?=q_$EP@d<}5%!#!w2Q$Th=3U9|6Al9$(Dzj3Fpe3r=}@)mGX=+X ziAjd`|%;T#N zY$|f63$iyQ!@vdu`9c9qgPktB&jK=DKC2m@vHmK#%&9u{2Jj%&vAcv;?WFSB@WGj_NPC>ys=L2Tj5eKu7PI;F_Z{#(+N>@{P_Lmxbz8o!QnhUZNu{*;7aaZg47V~$6jhXhb z1-YwDnTwPHr~uP1$Na<33g6a~lzqi_dl?oG6?3I!*;2ouulw@$N?kEFBisBwcTh9~ z)_&Yf<`|Y4_KNIkC5!p;iS+16^uOloGVGOLFQ%ItWmx^&`njYkb@T$dbT9qybp$E3 zqhkLnF*YlBj-;h{hR&oj-xMp^z_v=nI~@>`dA?6>>MS@}k3tzdWzVo2_4R;SdEc$T z<$B!gNgFK46nEt{@<4jNBVxTTu?o$gq&a@%%ZeDI*6hJf-^psQq5f3+IA6w%q`RHW zvIP_Kyrxs5sQcuF8rNkWYC1#prqFk3P09Njz?j9!Y}vZ*m89N8;%NrC!-i0WUe z8j*5EP8VNkjf&$39cAH$J0P2QfveVBLlpHJf<}vGT?X+0YF^$GW!zNYVOxZf{TU`x@dNiDk zRkX)MW^jObH==cHd<3ZR(KxOqe7#U(Z_zgjwU@ERO=cFng7fTg$N zh*eEdHzm>)qk%<-bu_!N{k+Tex6xk}Z)!ru-TUx-Xxel_W*GF#?x7xhfK9weomOt5 zFM*I0v`zoBRj6bZ&R!+L$dlWXtj8R8OzRw4FJzTPc>emx=zI@vy3se3v0C%{3K3yYdUqZ7@U;qc;q~F z!JrvmZcF_j@(fo^MFlUB4Dm8cWhbc&F}s2S0VX4p$z+54#NIklj_P8y5*H0sSA8i1 z@wWwE#jqH@J*#bzVA6m1T+Xbfd>~-Oy?OV#wp5UAvmvpriIP&R%)-Z-;M6fSML%0( zlZ(~oHRdGZCx7fTlLfzvyBcR?0qUh|UyCoLC4OY=S8vB9V2&51T#dr6jd9CYS{s zkb1$lciA^&W?p%C)s=(cR=IOkvMCD{-v~$EEutJc7{r;pd$zE=4;kINwjiT(-ksT> zo=?h6(i zT}gin;1gBeHl|Rjex~yCt#z-9h>CL*#@bV2vTwhW`{`H%*-gC|h{$w4C5>BQVOP&Y zhJ`b?)p@UGf5yAwF#X!M-OU5977k;A-Pw0ds^^YJM8)9#l+EZ&s6FRuqwbkhC7!{c ze9*Hz{6h3Bj5F&R+&yT=mSA-LY5NP9K0t$V^{mmL z1SzG{y~IqpD>dQtFSI&{CR$=dSWetTxnI4y)kI$kkc%%=*bn8-PHKZrX$$*bqNxgo zzPmKG965vX_MP;xD;me((Ab;3Fy~HSE7eN<4~=sC&77?BOgwHK`W-w-jHD7ZC1_j) zO_J@bJbN&Z{RKxai(!O?mse{0ezJk*fFza|ThZq2w=XOCf>`G2_)vAQ?8k~bjZ}^u z7Fm>(`l43~m?gJ2oySJ0d?Ipl11`k=a=e;tXCtOIfXXu%1iP4N4DMgclQ0ON~Vp)iY-J z^SvVxizW`HvfTcH&HjQj?y1nFVU)u=`kxU~7Ia0pxr2SRNQ0P=HFC*PzVMKF@;O_F z&hHlZZwwqe+o2T8r8&puHON1j#)$G6bRdM8Joh|ht}-c298%K6z}7fT#$!U9vgr73 zqe?~1S(qb%%)|*^`-&agMWo1+=#P$4r7--C-rAqCZuXDHs`=Z*C!pG&m~80H(8w|# zPA^>|E)8PeR$;&U%sOmVgyZFXT{uK4 zx>L+v>M6Ug(8Pm@BWH)m(BoCN5dRaEoqs~3$1yyX=?ofo(ttxz*>~;CEpp`e>3S+5 zL$&kwz~ zA_l^%GdT(AnlBr)=*@HAMK~65p^xle=-)#%eo6Rp6+5bec4OAYgC%hn5>*QyY&fLG zR0{6ia0HI37W@Tmj>Qz;DPp&ReLoPYE@TTe1E>y*n+jS)&$~qpP8PjXK-7aZ;y!_Z! z_oSI{rxKDkfWH8K$tb5z9*y_^`6})t+~or9u{x2j(+C)?(kxj%0~Q9A8E4>{l$+L+ zC-str{XZHc(3GiP6zIPs(ew%^VXy&(c<2aYc*&m|)S$99dUxNnxdzY-`n7i7un;@X6V=GTlfve_5<+35Sl$nC&)Cm|QjFKROtajq2 z1fl;FjJ)uFNdA}qFUf!N4PUGu9NX{D|8uz)9M0u0=JDa36)^@{xUfM0&eUUtJji0? z{Q@1@&u7&tX@7fvnHbsR!M7F`bA3SHXDytNZP%bz95VD$Op0sKV=YOLdkTSSpW1@` zgg1;h56+Kv$#6UHxu-`J4ZHyd{7~#Oq>jhhvO<02YlzseBrAII%n{o}g|`c&OJ}h9 zw3f&)O234@vY5I@;H~if`t<6DN|M&p_75xl_CdI$K2C}-B46FLa zi0Wx{6h=T7O(3=}Juxy>89WqMbp0rNdaIE}shP51{p!~U&Pu8XB)IrF$mu89 z_bAKoV}coz0eX){aka{3h*ciHF8zM;{Mx&pRf&T~-u-oQG?mM%(Tzci(6(77+dEmP z?>L1mo{UcG3I(UqgqdI^(`kPk(=wR$SlOUiaB`k5bkO-q)vWo2=Gnk}mwcr(*}IVe zOsJ2@lSVtNasMuskxb0-Ku&;*VdR7dKkbr);`6{TfWpy;&^fSOqG#eEgtirN=?1Ai z*b{PYXN>(+KT$46A4MEgtkWP%&nsnx@^&I3m{YyWE#e{96drU#D8JPtS`%04=>TIg zf9d(7PfKvrSa{4&0C6|M1`V?VJ2>g-kRF3>u{{-|{*)J-77x`*&dcxNyI4QUr9UL} zTl3gUdd6XOg70>?hUC1xG_Kd%&%WBO{#h4iMntnnNPGUeo&&8(gJcnoA3ZpEF``q%eI%x zip72U%~zZ2>Fu*xk09KcE1QB6cH+K+&y>Gf%v?9yMXMx;_oQD#XYvwKn<&$qi|6ta ze3(9ik6tNH%hT`rek$0MC!bH`C^++$2^<|LI=(1uMDr7sdSU@n6paHtIw2fH`l>z9 zkc3lJh>i~*t^PnH)Tft4Y#_ifwsrk``HRiS+h5D}GV75UUS{^s$Bs+d@N#fO^L)#8 zG#Xk8;@ahc=YR|(l11K#STSy?Os3k(VT>DeY|7cU!1~~wH>fJ#GO5AXq-5HN{mTSF z3%|jjTYcxGua|K@qxv^ZROJGZ`ZtZ|rwiLsdqN7M2JALs3~a^AWISTHYKvBgERm4j zv|aYPak9eUbFvRLF$Eg3*ks|^w*a--$gm2M`^kpY_e!%Nb-2O08J!a8gU}ea87qIg z@@`%TUB1yPcXexUw^*LqYi7ZzP$2tM|GCzk7)TQV%jQZnM5~c5kwys`!LR|ExkDdh z;w;~1=lxW~Da=${JB(yi*ivtcP*cVP$|Cc3`duW8un+Mhx zIKGas{;(4mSNJB3kD7Ptorw<$wr`wxwhyuAd)TBh1Dv6yY9PgO7r97rk8wL3iJ_G9 z>VM9dZYk(ZvF2$EntuInGuSGqrEF=asTk0Jn5yIRf9Z5x5N{8+v_4>_?lX$WHy(UT&&10 z+ef=M5J)FszhrhlZTk}Y_+>l(NeATqQYe42={xeILlwrw`|~m_7qCUhW?wky@ z=38+{NRQ+~U-PiP#CFTRt@-T|J{`n|3LGck^)t5o?dxPbN>9gRpyt#_Z4vX+0B@r~ z+QK1;BIDw@B~I}QU6iybv~7XPHfX8#q<~wT=|ZtjSWqwjoGTdWGf9(PhM)*7mpqIa z8Mudray+rc!-6(QkVQpeG#tuT{-%J}g>;GX)20tVpg~gD#D60vJcVf&1n`R27s$Lr zY2xyax+nbDF&~;i!tUJU(uh1oS2WAxE=_e6B=~Pnqy}$Pas|t_>rCro5NRcfa+#O*J61rJ~;EqXk*vhrHR_Z%=f& zG=~)jZulDVMwAw^ok5uyKOhUAWt?SmVzwYRjt{CiHz*0m!!YT3(Zn@vjFuJFO1%Q(rlaMPWdp46tu)s3qH z_m@r!FVl1+UHQ`c@Y@GE(frL{P>9~aD}`yTh<3$RX<}kX<&_EjeA1_)J8$y9F)oWg zx-!1OOfh*YWQNwLJ^B+H`#Eht|LPOn($@AtoqAdd!Z=V1g!eo17?Y!_6_|&TtX|~gJEy`;R>?A zX`XmF{C&G^4YC*y!iGj#nVLe<)wgP z=EzUwhk$D3(kfzlfeBTYIqD~MW#}ty6_AN#8MDdM1e>r$PKV5Z8Ed=eO%1@HIJ^;^ zhF?TZw9?!33$Cwhvvlda`7ddU3wp=`s&EHg#gzh)a=N@1DGfR-__l!v-WkYG`l95* zq^#*c^z#qn5u2@m;aNrda4tmxQLK zz6yL^dmTw!68tI89(MPr))T~|hLj9ZyS<41w! zk}WD7VITC0J(Wx7*Yk9dhYW7(7wwzPe|nmvi(hADiAO@nrcSXWHV+gJ zIy`J&Zwz`Gr0Bz_Ed~DvF6&VEFNNgk_T|MdamR=)A*fpGu z3~>K{i2g8e;U#WAI?SDg_ozeqU_}O0so~5a3$&m~*Eq6T?2!{=RbrsWM7vr_=qSQL zpt;H;$`k@SPg=y>0Zy05lPu~Xc>tWQ%cj)y3PCJokvw;U~! zMiCk&fGuXHsN*9-Ni_gL-Oz3tX_uf^6B(MsjQAA$J1KG|L-vUfeu9wMY}WYxQ4`@g zA8G1HKWo(A0m;O|JCC`$cfh3fG`k+i)jBG7>$i_|bVj_w%9Y3Vod5FdI#!T{h9k}-8uiTDP6RcevERY9lYx9U*ajB z%Z$i@IM_?3MrXWsnDgagV*2}QB8ZbB#N4jnaeiccf_!Z|DZJY)j@vUcUZqFPk4#rCL88 z$d|jfI^zo>Jm9V$_Z2~pW_IJ(L=8Yb5w_Wv>vyBJMUPfezGg4`s@>kB{sxjqf7k9E zM=I?9W>T0k${;RdZO7(a$Mt)cP`tlwf+5A8EMT0GNyKvv&7lO-br+E zzI4_HTGo}nE#dm@(Xa^@c!W#z;DRHfQYyq|W}7`Eo>^Cd0&RF0XhAi8mnW6zn)cG{ zu~w@Q3hR;fQagdFZSdEIsoASFs8CAhQu<1iyzG@~_=O2#*8lZsDgF0@)?1scmr!(# zDstCeCzO0lGT^dalKB{PpqY^BB>KWS0Ty(U+T=jl@X)JHa;uqO=%#>%3zj*j;aA+g z=2W0SQu4(W^d3I2Kw8L)p&NRT5=no@@QrBzDJXU>6%C zK)z}H{;Nn-n|k@f4N~#oCWt`HEbf86H(nH1c0VbwGmKF2C5JyNOH4WJmaJdqI7yVX zUsPP>Wh3zVAnZT&>Iy{YEU?ZAc&wp=4gX{#eBAd+ANcHR698gTqigUSgA!41D_e^! zFyZ=Ci;#LQ-Z0VgxsB1tkZ>8s%%Pruo8Yfi(05n#%GgjymNoKeVf4H1J^-Ih`-CW& zab@_R&lF<06774sl!)51TYnMvlxj#>`ixv;Hr;TW?W<$Dg*~@_*BFF^wv7R7@ic2! z5j^6|&mcF#Ud~dk;c>4s<~)Z8(z#<_&%pUDV}>Z2<~+NZskj+ycMw?of8Sl;F`-rq zC{`m*dtA|S#Wc+=qvJJC(O9;mNbt zm#T>>I(vG;uRYMbYX2z3Se4l`;Ta2zbulcm&!+Vbqe137P0alZ5yf@wwU|YJ!j=>J z^1q>_%39_Uc*nxw&d5q4S9C3uZ*ilP_&`d5ItrDEwqKU@Tl<{gzkkn$5i(0Y;lqi) zFv?kYqbHJ+NTK?A+!I7q#r=9Blnc3M)P%Huo}CcA{<2&)BVoSyrM+ZIgi4Ox-j1ki z;NK|k^EadND-zk;kKz>49mBnUkQ)13(V>I=<5i3xh9J6TujiLbqdniHgBOQSn@@$j zpMA8xB38k`?J$p9n$5JDoi3fbIS|>uH|~_+n{06E^;1V(#bEA#d$~z==c<#Pkoz3F zY7Z~J@W`v2&#dm?Rvb^dW~<=wm3T_jK_+143-V9D>F8SjmFW=2wx7u-^EnwGX&cqAgngF{e4;do@BE zkyzt??MsCFTe$2b@MN!?~X1>;hw!nh3rDh>lMyXAY4H3?oFQw3*eVbE`{h##V8^H>B)mhSLE zC$C?@XAvCB1>gNEj0nYZ`JhB`ijHG*_5KhuNE~dvzDQb$Yi$$wk*q+I{WF7&dSdLdIIG~I=WOK8cYTq^NTB$Lw$-U4jJ}AAT@JkAeL`xAr zdRCKNsr}1%*@lMj!}S02m(Xrh<^QG|k)<8uNDF^|+Lh2dei9bB1Zls+KaY2hbB|0R zK~`tA0l!85XFU3E!At!Y`hOBcj|fGVHhBuxG?`-N-9A=INJPxB1n6lR8feI;UEiV# z_`@>&j?BR(xB)#H1wRBS%_z9|#i_;Z-flUX-42y7=iq#oD z`f(qkkPA9Oy^oEIu|9vR@EN8kh-nTc+`}c0-raf;sZQm2C>@}UzYiV?BMxXXV0Y|m z@yQ479Kk@#JzF>DZ)?aV^f^w;J9s^2^b5xJcWOlbp2BuJbHag!?>$++@G(RZ*Hs0Y zP_hbeF8lg`yK+`}6(}0mVU5X8jU};GyB;IC-%6Jx>$fiKErAgf8*-H;quyN@^HTC& zh)$Kc><`#u!$TyIgyKz!N1EMwB2n4eaj^|!$<6ONh#v*&uP-P)HzE8;cl=6|pjDu( zXb)3+tknD5)KYdB@r7>gpQcn`$4kgnx}LXZjq|7Cm`_*a3l90=PHplFA?$=fu5Yao zb79hWcws-Ol4BrOh&!2>qrdCNSP`eHu#M9jQEfRA@|Z*ZzOHXFc!zvF;U*dx!VXPD zK^@$%HWcL@nE9*b*`h#aAq%1Vwd&KD86Wse-=9 zXjdESu`8pwki4cxso`IjD}$?R`Flo-=WGJnDGFzY#Vak3Z@F9yrP9;mAbzPH*USfx zEDq++4mmv-ORc@nEkO_c*r5DSJI1jG$$3)LxirsmeCY~fOr+@Hlg%ryUW=6^$}pRh zg4Onj`{i#s-uDRU6iS>$`~haGs{`o%cPb5G1gLB6;7RvK>oL=4mdy}JT(Y^MB$lk#jbj-$58cq3$X1^=1eMB~>@1n?>1G!l2tz4OUl^mgEq z%ex|skjmfHG5WHtu!)xqI~H`PHcgsgW1rdveVPE|{9x6rif=#hI0u|lGQ?)96MbAc1D$l2>V?|`917_jnv-b{qe)v)I2n}oKl9Y-;>`-NMcW)-x zT4_EW$ef^938Ofr7=hR#W12KO)k#`a6FWnH37H|~JFYa{i;#cOIH-tH3H_say@M$_ z$;areO&&D1S+Fuj>-CXb`8@xI?f8mAXRbZAE3jjf^+vpR$*ZYWt^IJj>=9 zq~dZ`jOS)?9?U)*U#>yS>|HHC0iyg3BFmApSLA6r(6IDXIoo$*rr4=>S`Jh4D)G2=3u4K$RpGWF z=n$kj;TJ!rV)rERxXYb_^JL~3CgIM0YuvfuAwYVWCN!iUyYa7vPFNQvG0o2j{~xKf zif6V?qG2wP8N9u=CeH0vNmZE#6Ss}*n|byhC~^cvvY_sz4z6>Tjd=6lFO>!)%(h#% zB3+CAwh9Kuem}yheNbV=b;Y*)^H#`rnV_e7!E!D{g8bW&l11`d2se3Tv%|HkC^3bf zHBa5LT5wXyQuF#(W?T05amm;W1cWqI@_B#BZ`3L!V~_?^X4F3?jzaqvws3m9nzGmd` z6}G>xW)KZ#kF*{e(I8qH?fouoG^fq@H5UoF^74R7-m;RavTUu+v8%`>zF_Wg@5&0s z7~%e7iNS9!zsc8^5`-fe?8Za#_?0r|klMZWe)ZdpC@E>~qhwXQ$C`(LmdwvS4`V?F zxl2~VCoW^{j8BvP3az_Rr)Ne*`iXcPi*N2oFj{qlGiaY^k)&vzO|LuLQ5M$MpWc{Gb$Uj32$m-Tn9SW?Q&qe0sWKaNj(J#qXvMHMA^ z&x#wv6Si$h#PoN9F_>(`n4{DcC{{QY^&V<;oG=<0Wq3 z<3MfQ zO1M9LibF96{Yk2V&+dYMyxw~6EI38wzT5xUHI&*`gATK>Kfd8a1bNXGMEqznAPk!J z&lc}CDVZNeHx?8nbkO_?wM=L+j|Dv)UhdlqN(JvP_pW^GClLGh9i8VpN=FyK{5LN2 za-t)AUkJrA;JU~Q5$hV)x}!o=z8wStnQL^S@!g;6>N<61%h@Ywn ze1fe4t8#x6R~MpkdPI!AR%S%)(Hr@vk3^DkPj0bUP*6CWkr@kYDFpD6g5GFcMWu+^vLW&knZ70Z{NLRD&G@%q_YTnG?XGEK^#gz~2`Pxarjx6Zli z(c~uX6`nR=RE(kfnm+({#~6xtQh9Q4oFC>ftISQT79=w4?`8-NR*Z`uDww3I=Zn`( z;#TI19~e}-V^2rngi;+on}Pe@=4bLpe);IH9^`>U@OW4cgS6|d)nERw{Z^`Xrp;KW zJ1xNFB7Wca5_JC|1^@q}>Mf(947jdgK^mlnZiW~@q+@7-p`;O&Zlr|~q=wF+1(cEo zDM<-wq(OwCyIZbtz3_j{lBUTfBxb*-5n^JC80=bU}c-f=Fmcj*KXxjtkUGRXFO z#K@;OiZhMKJiBO5Ix?3lbM#or7hNO0D$t(z=(Wy%=rt>5VJ4nR_aB?CQ#T$|dsx<2 z_%Z8~tYz-R|}Q z4=nWI2R5sH!Q6CSDGGDR!E15v;X5kQ8iZT$`0$fXN6GF&FQ|IDR~oAQq>vMDOG->l za;P_=7ZZAnjE}PNz+X?~G)yX$$} zU|I+xDVe6^@B9-acGn%jyDN-4tamA!V#j>g5QgDH^Z6|?cwCI-*r?rIjq@MRbnj2F z4anLVi#P?hOu91b@F5bi3e}K|U7?QMfC7w~`pwg{6MiE}#j$HD^yOg*|Tb~UzJHbWS4Z%SAi+de#>5+!sq zp3XaUkf<@OA~y@Jd-~e;W)^R*Trc_1vuDC~J!i)hdW#zynHCoB2_B;pO7S}yL&#*C)3Y^ffi|CGI01U$JY zUxG{R>s)ZxN#(lq*QFx<0W^mqJiLG&9%DyhLKvE&tPPkf#3QzvJTn>u9%KZl4@bXaeS^ zxf_7-Ac_73PnTq66dn^G?mvMmtvM z#j}T~`%4}6oz#@a-*L-|1H$`+ITS_@x%AfjZuvUxD3v)rRy6 z@;e+-MS&l}g({DcA|l#r&8zu5T$+D!HlwBOQMB3j&CF>$GJA|y;`w%mvZoB6Dq<*qAf4t2>Hv1@R6WJaUl2}$?nRhKKO|WX>T#4ABLwP!_4eOrW9pa;EkE1k#L}_ z&m&5Co_Bm{Cl1mQTEdNWoGqsdQhCPyo2mhw5l*=Sr zK68=Q;Aa*w`WYc8jUrYSG?=1Z<$jtqIS5{(D?0_gDLWSCigZNNhpNvZyrGE}Jyg?B ztS_1J@qVa0?sRYEc<41#IHeQOm}lw*6t|F(ZPW)BDxY4k)@3#O2LdOvv@_N6AyvJR z30v!O5h7tjm+|p|CJ-x1`|rW?18~6Uv2Rjvn4u>epUn7?L#dBCOamC{--Pj^a?me$S3NkuRF&cU^&YIpxTt^x1ZxyK0cGs?3OHx0&^turlu1IiC> zWv{b_WhbMhpd3^Y<&*cGCm%uzD6SxR>+T>Y{6CzG@fX@tN9Vm!O8(w-N!_(W?9 z#M}_20nr}=;CY_OIT-}d$fbZ;(;UAKQOff?AFw0|6Z2d3JaJcy5H@%R@HQs^xRVt3 zLNnxc2htZBwq6H*tawelxH&f<#D%A{K~a^=ZObb_4lWi}q@92R zsOqw+kR}6U@*#LUB;xKR+T_~a#J4w|f6q7>q`X7Ii5TBkO49ZgRC%#J1U&y&1U&t( zA|OkrC?b-PzgfS4ABK?-K!STt*!66HxsLf~C$2E_L$2IllP*o`t~Ie1Pt3>21{^kU z$rkwgTONy@4dN+|@Gc55R_BcH8WFGjAA?Hl!XecXWFO}mh#Sc8y+2i!#?kzq`&{K+ z$TfMOEq`qC>eIoS&M=l84w;hJ!Cv9YjePbJO^2L|XptS>?VNbqM%#d-0W90QSsbS& zDpxlnVFk#@he7m(CH>s+PP~>Wr;*DR`Z|AS-k)fXMbn|~x645pqQ+IbNwn?@R#IM) zRa>!uo0Yd~#qfU9R`}f|y34W&Q@b}os7niy5TwKqijQOh!#@R|OGg$^#aPB!)SL4> zKnZ&z664n3tNe)qYNM>HXJf3ff|`jDtm(csN;QYZXgpJzyhaz#O!r@1fBhoRdDt)e%BQZb0~9Xh{=9N}kFJ~hi1KT5+lB4y!a zQLC<48`MHRfn`~nYi6gT7a0h};kuK2P)!?BP*bN!ZPrwCn(s@JY^Qz=_n=IAXmy<3 zLfrP{lQD0gZP@vSl}@kGPA#rz^JhB!>)Ho*Xj0Oa*ePpTop9O(?lEiRg~Tyy0{ECU zNa>@@HEa6X_)FmP_89y;OY#@je7|1O5w3*A$q*J;QmSpKF>O5N{5vMLrnwb+=)mw- z>#RLK+=Q&XXArF(K3)ZXuqa*n7WRF?tqjB_ z@k&c5JG%Dn1;^e?n0W>G^(BYJt4q1k9ew2I1-vfw(rx2_jfE4vS*f@}`TJU}n}{8b z94*9oNU|MoG0)n+NCqwp4zPKP7A++ygI&@BJXC5$@r2=|PIN@a@9E+cJ-d(fLd;~~hVWReO6s{KMDQit4u*046-<7PUqg+xmGdEW?JVQ-D= zjlwBF9;g>k_)qyCg^mPP9$;~8NxcXcwELmQPa^nE!uH-&?zA(3;Xj8^DgQMYWaJq) zX{E3~I}(3U1Y``|0{-osOQ&cYz`A6!-AMNqxdL2obv+Xen(ldo;>clNH@1kW^D5P{ zjOH?&cPGBRh)lG#BmtHdj5~ax-w(TSgiZdb6Z!3KF;)7A$Z>5lHNT*VelN5D*LYN^jX1R*C_wT0CQqr0hWu&l+XI*ZzVM0*yIaNvu=0H(%A+a2P|^!<~P(| zZ(dcde8l?|S7dPt7j_h@qM9fa)zSWTZ0q5(syl?k8R)UhG^zV^J4S~(b`UP662bRH2@fN&i$!ZGgdP#& z#ZG$i@@Mf$7LG^ZjFk-XvH-wPDKTKJZRcsxSxORi!een5%u)Hh@kW|oL2=h6s->c! z-I*@v9c$+9vD>Ddc4F-7q&O5H_zvgp zANGJ2@GmP`5KME_R_YJBz|~bWD(mf7rdB3&FxPBW|DAXgbxk>efiruz8jp!*eD|p& zEu|?juPc-XjuU2D>2uI0g!Ls0WW|!p{lyO-WZ^S&>wWdXINLLDwVSd_RdzkB`bv4= zxbhlfSj5ybv*(oivvQ84LJ;p8KE}I))94j7lR7Dl-u|DXGNR0D4EK#L&3f_jol2KO=9*t-Nu0SqhSQqv6IQVfJ8*5O3S( zbL(Y5yqmMwuA_Hf`rf|$%#dKQrn{VWa$05YY7q}`R~mW9!2DZbXeQAVTb`P8na1f| zo(3G>)|NXHjHpbRU^m^D4}b7f!7l51-6(B1L6p zT2+;XZ>L9P`Q?g7EL+V!4FiyP9q^(UQ-skRr^}u%-Y{vDwp1t2249le^NMu=&OiAv zNp=+qX&)hUGHnrX3qYJF?<>vcXCDEHE=0I4=%m#`T^HHX8aE~l3a|IVblDhj?c80o zBLFNZBPs3w-EbR%9GIj%&oKORGMN9ta6%U#C#Bc&a|ioBPx<~~U77y>E5OPsw_mI0 z4O&g24y{)V1de;u`fTj5Ueh~)h@Vj*3qSTN z!#e|aTdE(FCi8lMUbkUo*>`d-{QUM9+zel5-lgyI#=Bmb&U*yL>1DuPsee@dgJjq4 zEamWn+xwJrIlQe^&d9ZZPsYTk=7Y0WdE{fn4q}+re;IV$-;- zEr}cMq+b>|>vT#%0WfgtueYQ;p}!~-UJQJ7X=dmZED4|Q^JG6s`DBs5(h((ORJvr$<55+S*e!7I<_b&OUg#DWeGl{VT@Y@i)h|Jv`tQ6gYHWI}(|pe$ z>)>au5wdgL^?jc4tW5D?pklgTFH>0tr1Q>Y@|+6p+9y98tB}5vo{QYQ+=U-YgQ8cU zf^>mE+9+IsUJ6CybT>elp*O6^3&6cG)|gmB<_QzJ9d)E8`X8zr0ABrny)DP~;FI2bf(R{r;jT-LS^T#cmSgoS64<9TY z+ukBgdR{$_bn~s+-7}bqfDu27Q4 zV9)n*F^=u@?y8!X225i(8U=v#XSb?Jk(7V?zy8=Wz9(D!aK<4M_!m83Rb8+bUn~OK zltf4G$%uN7cTjOK&)>(O-Xp5a<*h1YygiFW46bmFE}~m-R8<{T(n7;FK?T;?H?~lp z_XQX}OU7TyVrTP!UIHA&%nLM|ek-A~rmS*KJ{C_l3sEMc-6z#%(LhYH-Zep9Ggf-b z{*P$#pDebjEyn*f$sr=OKMN4w0WM}p?Frc5F8;RlVvpq~S_7|csrw+cKwvVpHQ1O0 z6sCb5fiql6!9zmve~7trk9h36-Dqs8XrP#GG+V@iS2g_nCpGGxmRxUxk zdHGD0V4<#Ov8is_BRK-wkgnb)F8Gojjy(PuvAge@vU|4%)N&j^Bb&$|4dp>X!l+y6 zAmMko?3yWR<`^qNN_-@df|?yJo1EQ+WLcqW+)7lvV!uvd%k!to>HZ7~u(jJLJ!!n2 z41~s*U1l{Z*W~n*!sNHeEjq}bRp?M0u_o&gpO{kT7Sy)2@V1$y6C97*e2TL3iuE)y zYgT3=Rbrj|n%s{O6%mvY9Yq#;bSlu_4K_?_|3!`JCLz7L5*ER*m0SyRt*srrPw^-*U%*=Q1&j7&Ly~QDPt6b$*8WN?;Wpf;4a8n{cL~ zC)jJZV`h=}P}SZO?kLT>?P(HCUt4MCbQ<=|D`FG3<^`9N2#5j^PbSSCb2*Ebl0D9Ora+q+F5+fl z0H^c&S=p0=*o1nbf-1WL83)BKmV^sk=gAOK= z%U73lrQ2zt8}!ksdXv)hnDk@N6A7=Ay1`}OJ}poSajgq0zzvx_?Ole=3Nq_r3uLwi z+URQHd0qg(x{j?Rp29Rzt4+nBFzSzh+&q-Q^e_+ZQ!SW@#V(sdjmc{OSW9dl3^3`Jir{X3)l>^_gohF8_`XYa z<~JYc&rGK_rjJ_~-0#f-jAf|IMImHr(6ECPAhWyfZDE9v6E0SF8A{e@@uYK$tO_g5u~uYN@X09 z#%Izy>z&yOygfQK1Hv=Nm5h2TD|~exmXeJLEF=p^+Wn=cf$C zM{(dmFS!$fQKvz%&pPp?4GzRW?#hZVt9Bmz<>bTFT;>BN6X*Munei(KY7;w==p`?uLoDn@qtTJkZOZ z$u7OhWN0k+-u}DuSCKs9&9-+_Y4k;u+nuqF;w~mzO}b&oTVOL8edmvNS>A5l-+IZC zZSlqTdW7V#Yd?L*>8HWx@}nGc&tg_HboAuUf2QrbYAJu{*vZt_n-P9IAW>lMbtA;h zf>)<5dtcYz=U}NOtFg8NCvq7EZJWahYA~MV4=*LHUz%1os z6{#Z`AfQX5NjHu|5_nTq(_)d#ra0L-Df1TLS#%6}og4A|v_+-p_$KY5>4>UTrKlC1$@UH-B*{PYNF7A-N{nP=0z{(PHQ^U})fl1tE&Dl~0nJ1XPtUI2WKya0f_6kp~7Kg7sIJT9y^B=Xa zyU!1I4AIU`p9d3Si4|wZ_LOb-nznP>5E#i^Vi1M9fG`gP;0zJUCZ^dHd_xIDKJO<+0EK?#2w? zS}OLCF-yRp=;g$8{H`sKqAf=|XPNt92eyoS-7Z|5j3 z`di75dbWqMMpD5%v-=dqIqFu{QhUN|zCN4Qg&%KQQ*Ly^<|HvI3JxjG}TdD4j`PJfcyFz_3>{EY`` zNWEv;gGa_5s$kI{g5n~SM4%d5j}lGQg6Q*3E$AF2A$_G##r0`0;`F zg{}k9Lr+)ZIWwGlpznc{8Pa+A1otrygT*mgFp9CU5U6v`Ftn40jE(nf<|o=)C1x?A z45%8v0_;-_j{07!%58R_v1pRn*5wYJMEN(%<)-fKwt_If$d)&+hUKSTUQYemZq7v# zHui|~@XF08G%oURnC*Hs-XoJ-@rT{KrD-DmCc2f8PV}y8dAad$#SMS((W0TfR5kBh z6w?zGqWv0NtyN>mK6x>Az9bQBX}zV(UDybCqfQVxC29|gQ(wbOfLN~a3EeA$oxbVI z_hH|tHYfn>*}ei@Eda`=TWSSQ)@B6R#{e;!vtlHm(tnBD8>}s9-7H%QSe+3o*V^Rj zIO*~V2dW_obwTlV{wrSH$E|XJyiXe7CL%Y0{A{m#p_jv6S4b0l@TRRw7xC0cY_GLT zMieJ>_M7&@$7Mh@TN4mnb=J;NL`goJ`d>SrJj~$%Wb?z!!SRt`gzmqsSW%6=zc%Lq z>uq{3fJh&eF|3fz_0wKWr;LQqN?TLpF95+Li2{94Rv*1ZQ02vuTc;|VsF$Ldc>Lj=pg~MVtOGa zC!+D3gQWEjKJ2yHbKMP$E?xh?t6AQ7JsC8WC!mkBw``;OdG#?gz59tDmxOUun;@3g z7sYxrGEW@b2cM4|q;kv%f~*lX=AG?g)5?z0;LvA~d~KES^aO>wnT8@6LCPm&w5UOa zF)uNptgP^)sM8T|JG46EO}n=5#Pp_5b9ZUk+)Q7sbUn&DY^3>H!P`T~PLiT#rV|Jv zn4NYtq(vNVn=cvr0yQrp0kJ z9(iktNp5A1iJNN8@amDRibYnV>{=;>WV)-3+_rk9D1Uc?!zqWVzhhi$TjM5egxh%q z#l3@GCMbvuMln8&(k7c?tj$)F&QnWAR)i;3-i2}3J@AqD9{yeK$WW$ha*kOeDuwT~ z(rZXpwik!jL9d6yDm!pjq4YM)y(wpnnphSeUT*|mbN*w;xZTX;~ z49hrVxKIZ40=a3vCKioxO06TrbOyg_WgwIq1{kp}UwQ`f`%ICHWQ^{|(TvmvA?z1R zY#eP5u~-)i`awB!UC%P5evDV$6o_Lv7nvW0cTb0G@4=^3)V&kj1W-S511JL!8B$otRL1fy3#z(RfBm)a=^#B5|(Z zXW?Hf|NNx;boeFs3Bty5VVL_9;SlV2h+0pA?{TQ_SY-)<>$RIk`$=rSUZax$UY+*c zPw$*sHJAnvzV^4Isk-4-BHH;rSlt9AngTQs-2V{9NMP{`AddEnwE#CB7>IYQi~n_r z6n#A4M9r>PB4s1D|4e6pEo9a*n!=uuYr^xN%maVR_#*p+;-#+TujydD99 zj8FU80nw@eE4l-;Men&u>@5^~7V_3lwM>hCVtpw3$N!khRPCl4AMQ2U zOv!x1*7Ifhlkq=0t7;Ka56J)BaZlFD!`8WgFHHe^?xspW5ChcjHui2@vVHS&8d+WL z7NT??Z;4eH6}Ip)U^9Ch{Tot{9{*wFB09-_{HLYdzLQSO7oNuGN=ZNeLq($dZdGX#z2Oo%;Uqdj@G6HqQ^Q3 zYz|)aywu%P;WFfS6k5T9r@%DFL#Tm%a-zxc2qm58WOjUQJ2ZSHh_Xuy5JaVOin|`q zrz0{3QJt;^Yml%m_sMU0hbQ7fB)Jq~=#-T^SVNTFpjnHESVyLTDBYfr(aUgiy(;Cqpwe zK*MlMDyj$r*oHV-(Q*2uMf+^RkA`K!@BYv{Q&hRul*%HMW)QM2pPz=*qUD{SS`DSK z1c1=xGW9aVFf0=j!{xF-6j&c2&ADh-;h4}veR?BvCkdVmnPUS~?flh4z%j;`dw-Mr zg#M=+Rx6bT{aj}9S&pa(!wu$M^!#NUPEru1}kXbzpIa}^SGW)h5lALp_E)_mP zZEZy)$WBgiP@*7R8$hvM7%;^q(SMuIJ!@LSc#TCMA#z)_ZaoVMR0FY$C;1QI3TM@3 zb+laxVhH5a9k_3r|v9xZwvQGF0ehNdyFAAVh8$y^6bIE;& z=}2p-#L9HNT3-P?4}lyJ^6sX>T*pJ9QW?I-8{u!Y^L*Ir=)T01fLx*WnyNV$pU5B+ zJDMtaE71gy$utbPvW*}*gH&CXS)M_q8*5Crs!Wm6-oYO)wp&U<_DdmzAp0fWo*~+u z>AOzSz1HIP7SbQdyTt{V7bWxeO?&5JQgpZ4S!=|J_A&z3%*6JWF)r)OiT1Z1rLodN z8?Er}2;r4opRNe=r|HDatG%lcy^|HxZ{t-&O`7>Z_1 z3d7Z?d?K;>+Y&q7YbUx|GaddmMS&8n9D4IV2G96M>+_phlpVDuU8{WWVJ2Gaoo(2f%xKlxIt}kw?1GKlpx&b|aWV&LMf8mh zKjB9_8H{vpd?Kdb~*2n&Tn1Ygyt;ts`Dg zlY6<>oF8z3e>$RAlli50#3!s@X!Xn}HZ1Xbkql?&`HOF2T*3CaCw#o)BKujvG4j=7 zsjnNO&egue);=EVJyZJ<6f~*Q=a3q*hs8eDD)&~3cWl%iIg6`YmKm_V!;2FsteZT} z-@v{ND36mu#-XW-e5pOzf7P1R5XhAkY^nlrIN2e~4978(fqd`w-&CmPl9@R91XIU3%N}H(LVUpQ-rud6j;xuS}cqfWt&94 zt?;UHhE#=}^S0e~9G{|O{wcYc60Zmkm3z1^RPUOC$Nmkk{$%n0*PIy`SGBBDv5bef z5VbigoymI90gw(e4^sZ=S=_U1JsK|X;%!SP(D;600?7j2d;<0+@!xyORdowm;&j)x zNVr9HV*8{Ed>=%B{d9+VoRBm``__@sF>do~U-Tv6ET-&jK@1B2pyltHB;OBShB#aK z1of$BF%_H$vX1-2b~U2Jij%6&@vWS0NE@6?ncLP>$=o!%B3t|Q7G$U$+|?CEvY!|2 ze~CQ_7Oea;tmY^!5S3{E8Fn5a*5x>!o*q2=aP<5zZ#qdJ% z_M0zyLr)jYr8nb}gZi{<%#8_!0{akjL~$*&W53p?x+g-Z*+rrsS8$a-%Ki4pafs*MxV|dsDoVzB;fJ14_y(I_ChjKr!h%eQ=L)O zQ_~|CbMNDWC%eJCleX05y>!9-wNts#Yn)DFuIzs~@J>-F+%IPJKwlhh$6+MeVnl`e3w4pK9vK5|4Sjtc z%GHqa#(Br=;TeO7&quftWq8q^yo^A5&+rDu>c5?}o@aEV4Pp3M0WOq0x*FS+y-(Ar8@ZfXQl{agjt2{=YY zC`hGdC<-4MU$>twX@DZNSs(E+AXG2h%spmp_s*wJmc7H(A7}WyS;SsKsG4{3k&KJ* zh50EOr{lCB?XCw(k^1JXfy|Pi(+8AL>7K>Xi!VXw7m+ptR_j9Qaw75im*fJ-@eafB~}Ne>L~r;mD%jO7NOT?*Dy`T#b=S+BA>Dvn-akoiXbT-78o z`lVVse4YhPKi8c*)!N-%G~H=yu3Cr$nlnhgR`2>OCv|Ht&80XE;)xrB z!BD^Jl8?ob|4w}~amE`xUOI&{zi-j|Z@h}@PH(!WKUnfZ#xHKO+i`1K(2IMlXaGpu z%?etyzf92uSs**<)Jl)#obrFn$_vu1@%ca5ZRu(4?0!<6>F9G0z0qn}UOR^{oJvik zz$C7fKiYs}e8-}~W<*;wBx`#5u`!~A?#b5T={01&Z&{`B#Ug9X0UOhZ$XbYFI*4}} zITm-l_zsfFVL2M@8<6#D1q=whG4e^~H$OUHxihK|W}lq?u2#yg4dev{jWGUeg}Dw` zidK>KJvXEW*5m56;-;PA2Kot`cWw9N8Lfu|Wx8rQ!EdSeqk0+YuU&`^2o8I{Hhrx> z@g#R&dMQ6p%RB6S3aWWH!kvA03vs{8j!yg`S9zI0PPjC}cIL7!Lf$pdZ5)j za1vy`+Gos?mK=LSFHe{6xtt_@A^7e@&S{BgmM&c{503rr*hOk8#c0jD92fJ8OUk*u z<>~hFBac?Q6PGO}xM48rwHzenaB!Wt&^7Ks%$S!!P`i;bO{jPZM5S&nasbNZj zyz0(S;mo%hr)7`UE}1fCcNFYqqKCY|$Fr4ado(xHXSr(!w4w=0rz&DXnajm8BFSc8 z^{~86q)-WwYTnLA0w-uecpqRN?)8d$jtz>A zb@lp7)YrKL7{?T2!X-o(mn2hrf->WgK9^$ZV3*5;cN)T#>~KFtlWcFT?&p zltAdgDg7#5Qj9T{_tC z0OIoW3d3YTCKOeox%}n|_ZHu)m_qMF3FEU*(8mv1E)@Za&i4fTh9OcDtX)e!9R!LjD{?Yc+xEx%@_)`;W87r_m8&teYHAE)~9}gQ8;}2VUOz?%gp4?ne+)z;7 z*kBP+Qv^xg6k-LziBgO#TP9MttI=+F6VhP_){pi?PGS-6t2t~Ml4e@>(t`u}vyan; zq>D69)4gqbyT`!q_xCJ;n+X>R7>(%fo;lZJf~o45K}~@F5&3RXr@=mQ<++N+J3h_V z`>6Ux;Lg51RTG5`fC`vscx|p><}V>P2}ZeF^Wmd)RBi)+6hmZF3qr zDF0UOH7K@~&Jr1k9)t#CRdO8?vv{t>x<$vzA+}|UMFQ(iKMiwTd^kjucw>c`KQ}^& z9U?BiY&9NyYeaZ^dy5koi)y}V$-Coviuv9y%_&*6`3HXdu^*v;>7abOtEQw6dAL#* zI`KzAAeD{f>uny#=hq#UD)Aa^bem7)hy=g&8T6_mZ8_?(wp%;z~^ ze#$>_o8WVjM0<2@#53UCt~y>v9AAaVhUvJSfOS4A_A0>+*(6GSjPZ0W(k>0|&($&n zo6|K&F8>k^E2c_nSDYdaH?g69_cf8^;UhopvD;vGHG5w)HbEnM!*feEP}`n3+d+&x z8K|S%lK2((C$($^0c|6pFtZI?BTl-|0W~mX^y-vP-#!UWt1!B(s3OzRUp@>YTC62W zum?_aHFl?jc1MPb>Ml_-nW7A|@GSJ@dqybxd7!)n<22-AbGiqF+X82{$tN`=IH77w>b}iM;yc8BRWiORgV_5?^3MT96r&XBYP=ZL_!W+ zRR_E#wa;P;p-HxPE+%DR9_LaP2I>Vg#x7+kp*M=RF2+L$mF<mzYU#*y_ za-}r(tm8PXzC4{pK|NM!th#8Q*haf>c1JErnfIR3zr>{y$XV>uEOCEW5-rgONFExOF0<9C7Z{kKSQ0wZ)`@t^x3Aw*lfmL zt(5IUxYG-wX}I^veVSgIIzjljw95;dJF)Xn!GVH)8k`@cvMwkFjEZ#TqkhXfxhL|= zgCsaZjQzMxf;7qaGs@vW*{jWu#(xQUk5Sx{=BvTEbu7gCedKv}CA+_-o~eOgNCGgS z;{GU1vhMqi6GzgTMSe)0i6BrmSv@*uxt^3d{VD>juC|3O$8=eB+ zgyk7<;1)PXap3!Q^>;mg2ky=o={RPg2QuI!O@%$lE;>>!WsGJeXxPHe_-f9q;;DOucRT(}s+-bdN%EK4W%okn<^y4+ zMK44>u8}ywu#ciJqnr2ZD}lUO-`;k>DXsU57gy5e9@97da5T=%n;89 zJl$zQEjF0J=9uk1g3`k!AR1&wVE{gV4Bgf3;%TrxhfUzKc9hqOUwqLzIdw zFuX0CYab7SP9#~n7d-oos_RaxR;#b=^!}z{N@;IYKw>Qr^O0>dKWAy6jeDXMJjwM! z_^&bKSAJTEQDTyPG|IoZ1jZKsIztVFE6g+2s$KNCzvh80=R%fe-0Eeb)Bhj7%tT(d z89`DHecn6?>j?Qtk#1ei@$L5N1g#sL7_Bv(7zP)%`16m?WK)$B3r0Jusg658C~zs) zradS6^O;2Tt3T2A5IwfV7t9XyxFJq2yQ8^0K8Mg8Coz-Sezs1dnhnd>%3JF!;uyy$ zW~Z0i{u=quxRP#%%cnuj+c&c#wPn!;W0;)!z9=_|LIeYp5w$&}L|hXAA=S%LmrMHHjZv}(ISX}?lbMD*36LLs4%PXSAdPWA zEx(*9Od#FBvYaf6e*Z=Ze6-1du|yN|@s_{-y$4+fwZa;Smoqq5l^t$zsHr#~%!x!m zKc|FU=>!$5sTRHzqsP@{mrQ5Y5(hQZXrF&ID)_t$&x+QKQo{xOT6cZ)(bMCLKSCGWs@9>EBlCzfQyRy|?)5gSFqu zGMfeYb>=06@_aFN=FRTQ01Uq`{CFm8(=G>kzS1Y!sFm~g_k16qICX@YXW|OYVuNpl zeeZWejnX!na}MPy?bwIDoAPE~axf=bkMYAV(-u$7DGq!k!^53PwXQ}<drJ;kpVM%UlAKyK#mUlQE*h5*rOhV$_I8`HS7 z-_PWdG@oDCZ2!O=$PWJ!#T5b*aUXwf#M*xuR_YWP&pCe23~X<)vyDqfgv=`80$D|_ z0KSc8K#?2%E>)mR+)64$mFZ>XL<;3+&cxRb;#|u7f1{n{cJ7P{A~-=-8!kEQ!I8Tv z`;7GXVOMmx0%hJg1>ehsbw%sC63U+Cw6?G%9ol2Yw3KBxw2{{UY8wXT6DyO$Vv{tD z5dXl|ZeBjgF|2~uPUe0&&nBz<64K|)zabEgEbd+(eli@!PVdg)-rV_juVnK;Y34C| z<(oQIU%W}jI}8(yUofX$c>KCljQT0=?sg`+w%ps_*L{oi#PZ~KMShhLJ49c$(JEu; zac-t%U670fTV2d9s`OZ>=#`@(2DGL5_@g#)yuc}?wb4O-oCYZh^J`1^^$--s2XE&{ z3rRsFV!kK-Ku8Act5D8c*fKKZT`Oc|SFj5_*_kEmhn zE#cPRtYuy)4!ORP!<^yO6;mtNXf@M`DKH)0E!F1$+3}{rEUDq#r!m%F`6HCFP#82f zdpdukM-|RygzjAUeRj&#L{dY3pS_m9(kkjGtNX)3f(1SpPy{EgfeR8t)E-ZdwCHTGrk2M z-4h-84?k-Enn7}1^VHGRzFnk(;`ZsD3cYA=_$Unp&0`k^d#~jCYtmir##lSw;w%M3cRB?t89~sCj7uO+qtf+ zltO=qbDBtBwF%quZ_ok(wx0VH$XZ@>#@1oy%6wfAOM<<%MHzaC*97(eSTE=7wR#jRv~Veutyi7K@sUvDDoYSZk- zR&tv^Nv4&KbG&-3dBU&v-T`u!PDs=2+6T%~>FKaML@7OihEC`e_bX(XxY>Sp?6lbu z$(IaqizruYM~}mh42F!Vxo?6O-X85}OYflTOY};?-m5TWO+BN7(6MC1>hI4cP+Fb> zQEuutNWPNarU%#qA6qN)f3>`tepIqf)OB6iUhm??=hzY6!DA1lvk2>CGsl=uRP11J zB5M{@Y8OQa-JbSq?#L2psM@v?m50b(Zt7dJ!&z{ zSMjH6XYpqr zP3QINXIDwLNMs>Bod25$UB#4=vYicv)D>oVXw0%(AsXE&Ez`C+qtT5E@zncCmh->p z`SbZ}2uuD1XHGDizqHQ5ZzX%?XT14d;D(PwpwAK4r^FsF(cq$F#E1_(7Vjg{y^#I6 z0tklO;`Mb7VprnC!hn=-y6^iH=hGF2Yr)_&crZV{_EhK}j-E1oWb0r`MBm&P3EY9E zd_Mco+ZV5+TwOu=fHlNQQ_*FT6~jhj*;-jd-A1#EEsf|mRXEe@f;goBoQHAvkvh%e zto-AnXxhh@+x#w3RF9LNTCVtb3DL0EX~?+9Bew=8(~B>z9I z{wl1k#tR;WakoHmhXBRht+-2ZcMa}d3WN~cij?B+THK|S0>#~>#jQn)wBP>y&v(vq z-gA*Wx!gBdduG64Gqh{uP3z`%_Jn|d2zuMYhM7a($iMZP zJS&@+un=+YRfpgs!))5~1i_eY&mtYJg{`^Ek4BZff0r=?VWoQMuUu}#A6s2p+vjxZ zu$o58a>!JwMvJx`Km{Z4(zS|h6e+`RE{Mhq8YraBe9iMxaYg;}#03BHQslu?rL0-j zL)+O{%HLYX$$umm`f7ez5B$C>o2UkG&UB|cW99{^>q9c;1tUX}|Bq6D^&h1m4{(?j zhCkCYE%Yg42y!hR$z%dlWf2Ueh@mO9mxRVwf)`4QVy;h$VlFAPRT}lbn98s%QdJ|o zD4;Yk;}V%PSN6ECwsjB?5nwm-RTdkTX{Q@Cq-mGf{z@I5N-x!Bw8IoEu`G0*!(wwW zvHteBWYqPGlw*4hjlHckVl+u;W@C}WBxGIYQ&T^+FMey(-`P8!rX!5`dFrOoYDS?I zzaeJy2sh1sn{HjlGvCulNgDH}?HWqFKQ{xQfB7P#&E}CI!;vCHOXwAH3QV8yFJk5+ z1gK|79g(YKwTPcu5Rt9P$Jm}~=cG}VWaK>Q1If5xxG^soAkPFU#H^0Do?29>(;s-R zG%1m{CxNpC8<-fApWdqlr31i}Pu5+1^rxw=S2X-}cEp%CKj@n(14Fo2%=97uksw2t z6^YHoB=Z5?T@unB)*<}`-rIp}cD9Mwetufa=hdymE<(t4a7%-t;4Y&!8fNy!uYafQ%cOvnApbMh;kfVt=wOuIOEIf zl}vB)Pb%ZSDs-UvMBXNGEO->aT3?}5S|4CSYz5yH z9gES7$e`;Oz{zGQSzkMI>B`2~`iF#iQ!w6ZU0g&EK z{x%LGzUh!)Kku2xLiGN37(8@|cD8|K*Zb}b*nyQXI#ullXqki|Bk^yL3t}4iAME37 zZu#Hm(aOa?F_(!!Srzb=jX&`3Chw0v|AA}fC0D*u7%80##ri%kU`mpwX#Xf7@5r}W zJ`g))ADMV?eQ?Dm7JyfL{fZ!i-}JqPB~v}G17jMzLyB_O&~eZE!Wa5eT}gRK55-aa zOW6V|3V?k@2c3~*Sknoz`9O#&M7sqdG=DLPb|&Do_ysGNS8+7>$zxugxYXELDPEp< z6xd4tW*hG*k;S)XJB}m?NhOqq)@;e9I&r^<8g%I%OF_$HhURQ}crh}!qCA&;=%xbk z2b>!aMSiZ*vTQu=b%a<_`5ng(Yle9Z)Z^coA4QpG_BOrfal1-xVN>GYj&!|9Hu-De zXP=+o?Vp=le{N;RY4?rzdoVS#K1cp&>KuI9z_W74juY^LwJ(_m%ol$3`vBOVi27~1 z1$muB>{jn8X0>u&HVi>UeVs&^{tBBa1KTF%Oh}r|IsXfZTAbh>qnv8BK7mWFVaCIA z8P*7)hEA!gk+wBRzz#*QZ>>rWvZ32_g2z5Qd`BuIkrB z5D7I6u7&@bcmN#aufbq`xSb@a%I{u!mrpjO(QDHGSm_u3ejtSOW6GXm6F~|TWY5!~ z+E2>(?KsXw2Gr`gFB63^zfS^L=yc07eiie1*vn+(U&?2`u3^n+V@b1Z4rGtMIUN#$ zUNVtD%&KR30mw6DuK?82LVW*z)HbbMhN%e>?^_nOmE&gCJIDW|T(kaC?mt5((B3vY zUUy#9T7qnFeul8wOI;LRNR*KL7b`uz-{$+OWz*E7q3dUQF8@)7^{Ew`WmIvhZtHd* z{Ih_|*eDtR@O9%S{ka|V>hBXFe}i%tg<8SQ@mzItnw&26Vm-)qtyd*|E{ai6RUAI* zgQ5Ss>i4A}mBH4q=T-pN6-jaEZtk@RED7c_Le`;}7#*@<6v&(q6EKdFWVt5);Ha)> zwPc54uO*{=AprfM0W?XGcX)>}*Kb%&5_WKzjDC!UQ?`JDZ1IZSr(|w4P;IzuJlGfx z4kSi4vRTwE4du@g|G>L7H;mqQe8L84fj10fZKS-EFjato(W(70U8gYeLuka0Pu0qO zMR6Y})l$68Q8X&H^yJ1h{7N@;lzyN89P8`^-P0=WYc>gc$4{o3cHsmiXENUZ()l65 zl+U?qlbHJDN!m87m^XH`@K4OR1SMy$5qRbTCur@S zUsf~JDH9(n6mztpvyEAOsHVGa!fK|Ochvc)Qjo6DI4ej_exKna$wuCTc^WDUa&|5K z{CnEWZXl^}l#2}G%4Q|{znHp|t1?pAg3mG2y?F58&x9(@V#*rkNk0U{l0HI|r*sq{ z!00nGhyHN!l`>jd^ULY$Ba01+Cn%_ct|UZT{pMhH1{D)!5KwE`Q`zzs+eY&-=PMT; zmqx=i>F2UT8Rv*Wd7}LZl{gCWWp6)Dl}G;>Q#Z-1gTr%!a<5!b&8Bh^XA)L1RPrL! zzRi2FI-^ArmNUM7XCjI@PoQl7X5-bvS9O@>4fsJ(3+k}|4pdTy8a~JeTI6FS&%}E6 z3LtVJ67BpDMohBUx4z`XYjd~t8nnlm5>GtH^&vx4i%AtZVt|>}~RQy+jMsG(4(1A5wcsG%r*lbG^XXcQd!{DZy3`09Z`D`wiu7{=0^3JwX1u zWXEVVT*MOOwIqh%kn7Y{y9#nGjp&@Sle^g4Bu)rEYB<bPa9_>|3jM}Qv%oh23OT|^89$Bnm4O?KYKelEKV|DU)HDbnmf&a; z;}$L8itjs1QJAk$KN$e_|EZ0Dw{{!#mhmbK^(ANX`DSH39X zS6iYKF$4Pw_zRe6VZ0FClIlC?*7shD!`d&K;7S(mdlM|Z0ANY3~qF5}{(v7+}Blad^ zuf*KyT{HGl{&D23!AK|9irprvDVOgTp-(g`_S26hC$TSFydBOLi!Y5< zP!pnUuS7b`@)6pveqF)yF{ZOsuHrtSY(rVE)Nf>_@yy4+we|=meZi(lcG?IRmhf ze7^2#9y4vQ#||d$*^YI4B=qQT{_t)phn;;@3%sKsG9^zomUgOLWlpyQFiAtw4TE6m zw*62-M<>;>umRmm7aCtZ;j)0}acz$bQ^*UmY1a62nKHY!+!}SU_L#*)M+Ge52{IO1 znZz;r!Za5A=9=logg4BgiH$GLi57lilZo3-;CxR)*D4QUtxfKn6CJ9|DQii24y2f) z1u_bhv_E=NySs)k2C<-r>JE%3MS z-;I`$y6wK*z{$UV_vWaZOPRwwb^*GmtXa69RT%N%$4_OIvi;nm$R8XGF;l3iA4(EN zT#3PUuNkGx$-Taf!!_t^^g%>rvK%t%rEJBFPWh^MrX(5-UtTjZYm(1m#)I;d!R+*6 z_NnmbP$@QN`rrGaRG)fdWJcY9o2C&DATlz@*Wim%cP?A83)RjNqMUU z927-9ryLA;_TVq<0=(@B-CZIf)UBd~sw7KLikX)&lcY4l6j8+o{JF#s&mx2X&u^ela_HO>9iugqEB2{qV46{-W`M>7i!YlK} z&|JeC{S_bOnIIBuO^0WUJAgQWxDovW=3Ss!>frP;i03KbF%00%9UXx|J#@rSYRt-wVtGe zI%O3`8a-vb!M{j@v!fB}y>iiu05NhdRPx~a^tFa?VQs)%^CC9C(c$1{M>%6dhn`R5{K|P z25w7|QD-^h=OzEhrAN(~)j9Iu_3i;HQhhkSZ^Y_sy^&8b`%?Z{q+dms&Hl&w71veV z_1X}duYdAaY}YBD&K`FPb;GIrFX9>NU%VFTq?5XF2fVTnMHHz5XyE&JQJzEAJen&l z;}ZI3b(oYfIxvHlv(qJ{DqcHdb>vDVXUBW8F&a65 zCV0kywzMfdS>I8H5jze=S6DMbI92g$5Yy-L+DO`4ST z3l78N!*=R=e^+9KIftzW{{k>A=mLWRc-xi{GNp(VmDp?|7JC;!T`#$r2yIUX{eB<~ z+^E7&P7C(rtj3)lf?>RSX3X_iIMk4Qp?${r7a~2dYeF5k;fA>V#JQWhO;(TpY53g2 zR=)Q<%B2CHjtL^DQ+zB!$l9b{%P9xJqW<@RNm<1*uGH5LT+S_{!*Y&WaX_v%*$I1Ptuq1l>l{LuFCUt@sb-Z9*DZ3+KW_;z^B@5Rbt3BA0$*V=W15~e6w=rMyF z{0OxmW$VT!^Ns3L5hly#?;7@W6DF&>L`%ozgRXU-KuvCqA=}W;dbY~&)@8ntHOj~H z%qfF()+Ldfbq>~LIKKUiM=J_#4fNea*`JebyI|TE>eo-=temrCb7{5ZJ)jg4C!4L= zpft>oI`zufQi{C8W)8unvvkbX<_JubA_FO&h|k(YR3FSLzy1G>8UNetX8l*RrCaFG zODBYct75cn1EfhfRhzLx`-uYpIRuiW;q1gTC+v)-Ig2R4@PAktP!pymApP9`1{Ko) zwfV$vnL3 z$hU!1KE&urArVzEDDEouTt5j7|G|MGKP_u@=8D4dnhxwtBg>?sC+`%g4P$z}r@Ds( zW#I&WAdrzSYvcgK_*j@#qP2mBt*p-|Uf6yJf_>fCUXGsU_ zSpUXbELE`-LMN&~RK;48l2fYS#K`$JW<~*0Op_ts1JbdbOuasP-BaNAX6KVs$FD$X z8cDS;*A<4pj$}x%u&BHQ!&$uJ5>oU(6B^$Oq`s|1K2rY(|{1(A2ee2@P}<+SqX6~zeO~kre$YWi0Gnzepj>X(=|?q$kh9mb6*kjT)Zdj zc`FxDVxfwPp~oL!BkE5G6D34mk5_@YVj?6Jc*#*;QkzD|jHsxi(v=jwHK`~y5(a+l z4|_fT@T3x#$)$oZvcUFce$k64pNR{Qb>HKUCP^YU`8^@HuC35gF5P|=yGAs!ox?AO zg<01wqodV?48G|my)n3be^GrgW4?;pWujMKu2VsIbJHbbS-^9GCpYiTBT%vpio|Zk zFMhYM#aqQJI9CCn36D&Z3=P?ShD8W?34?2qZ49u8)Wh(p9-s74Y1w7^kNN%`%mEYy zoRb}%p&p`}^jUsidAy~%@OHdx?Ub(yNJ@#{pNSD*w{jO4Z#NJFaf|;dy7!p>Z$+0; zxAG-j(sM5AP#O$fqm9&lIu8K$L;xKt=!Zio4h5z7z|lDzc9JvyDF0|924_3MQF4I$ zz=h*+pY<5Gny~Qo-$O=QuzI-VI2ua}-NGvoU3=p$Ik)|zr$4aHfmyc=t;L4|IEWy= zRHe}XP$Gt|iF253;Kl0!*U*APDmgronJB0W+nSLx`96zVubybm%)Ahr!19{LJFpEt z`A1)@)z56pXYAH(TJ@BOtKa)Zt@gPa`4j>(=Ojr(DcEMB$zsL7nAAV-I9}56$-2Ql z{7hOJ0UZvU2*yVC8VM%AGkqK6&XZ(>_hoi|*z;zAhpMG;{N)=+Bq);YnMG;$!xyq# zkk`doppmiWz@KX(>kuzO*K_87~S z;_Drq1v8YduNlDpeQzd1p@i16*ce4UR4oL%O{#FKRRyncc$}e(UM*j-t+CJ?;mAle zJ+lL*jqM`nD_mD8bVvsbCR9;N&2BcMa}!O=extU$W3O7xoR%(-lB;YVNs_j2NYCUh zsI%{vN>UN?*(j{^E19OHm4?h9x=ZQSvf@Yy zo^q%pbll&wSO0&)+K!1))`Z}{Cykw;3SKjJ!J=c@UEPJq2M`x3u2nz_m!R@2l9dEKgqmxNGQb= zZGM~&dil=TmI}}_z9xxC(*1RR5Xxn!H@4UQm<)M6k%Z%JOS|~l~`!uB2O`BkF z8y(@`aa4E2RLtU=<@Xgb>ixJm12*uF-a2ugSw6)>HXmvf$BC_m=5){s%2cahwgTqT zpu`jsNJgN(X5t9N%iTBx%S5Ku>*z8Wpe$%~vkHzQ2n#}MlWm^E~u+Cu^ z|5gv}1MfD>cD^b#0g4C?%{UgM!k*uRf2sz~^o&R6{6Ef=l&{+i{}FREfWZ?U2Ymhf zKiaTab?=u7Za@`Mul8!XJ6R{dYz!AB9?40-QvRq-1W>6FZPZGJ={_t{%gdTQQKd}z zbom|&(Ho1|Hv2G#TZ|_zl3;sT4PTB9fKTZIo2}SrlTJNTo_=Z)E?4w2h4aGNH5xWG zF{^KLc-p2*pc9I`M!goI%!X%Tc0<2G)jZ3lwL(hYgeut{7~2qK@PLy~K(^~KUW?mi zlY?PE?fND%<^AnYCUS4%xhL(J$P#I{=FKoo{NI(3V1&B@8YprFI$@OOj1AA>)i-*P z&`7qcudv;**F zAw9nfQ%24RmYrPZ6^G4PGsu`Ax)(m#|8g4+01gEgrXieI0{*}zIOqVPf=P(7>X$MWH$XDC6I0X^>< z%Gkb|)SbHcKrQo;&J%(f(1hnKvjP2*IVCTR3H+9!rBB!hc~lNL zJ33odhNwy$lQ=k?9~7NyFyoEB8J@OG_O=c5flIb=ngJTfJ<6oTgwn}QKVVP)oJTUS z?2Z`9#r3}ZJ5u24@L+IJ2a(H>c*hrlN2N6h+noK_5$MERd#@RD zn}00QZOf+V_9-FVlzH!={Fg{y5K@vGP0YUO*WHyR02@)QOQ9*XCo>;etczkP`Ok&5 z`=H=mQbfpg!}fRTi&px5Uf;P=C**xXw_WdwSh0~JpwvLK$$J-GJ9~_Ot^Z@B4v`rcN5`{2}^Lf~Rn|BVn3fS8Mc0MpTZhkAX+;9s=Z!hQ`tRMXY% zM(>gSLwn_TerGU>$?AJakD3a$h&#d5(aP6zN8%t4b~?@_l%mn8nv^mFt_3Lr`h|D@ z^8Bo2rUk>t{yVCe|1+upGp;m4(VZ3mkz#O)?!<{8#T>2M+1f-Zf=R$S>$#e*S$?gb z67wT-xhn8%HmC~n(Q%2d`0O*__rz5kb9UFe!`E>$hZwRNcaI5J1xzlMZ1iX0ow>Gr z8nTrc)GgjcqiDq`uOM+P>o$LL+l(q#@KmwaY-fl3wGEu{i8Jtwiq~jPAkuuWVivbbP!oMNz|^PXBnQwv>n;C2^`ZE@={NM}A4} zdwSm+<*te2RorV0d_*Slych!;6=Wk(#Am(@b9|v6S?2D>|BTtS=3`)-DTs_ za-%-{VI7jTl0u3i@#Hc$ffv#-nK48nXAlr}U=+#3gZx#rnhVH8!v z>ykp*ir1McKO*)+q4v-ok8>h+>-|}F>sYL?l0Q0%9qv81!C0wkQIow)E zEP$xOOqFkSh8~IpQK3HViLAnZ|V+GjHRBH(wQ? ze9zo$gZIy}4@R8O!&THP;EsU~bp2ZpN(Rfp;Li~=5&op<==ry(B2@Xv-7hU(UfZrl zm$|V%>*f2Cd8g;B=X+&|KRR>wXpj=VN)aJEb-d-W?>z}m|$MnW>I$!`i%hrg(MJj*-{aBs-;`B zSTX~KkmDjf>mv9|POYn;v5f}Kixf5-Y9fg7523iv=is`BJdCFX&-tT) zOzCVVtCCJ7FSv{%&oMJ&EGI1P>1{dzq@TCwj#6^^*@6o&=>AuDe)*5^+{e7|_Z4=l zwDfKO@QVObP!aT>pCt)8Jpm57+|EfkYI}MWZ>J+R^2F(|sTLFz<>W zQhE`!eoSQ)a!ZcmiVt`IJi0OeqEiRZy%7S;)@dBEe}Ak7j5jb(s`DCU>-YsS{a{O2)dVhn!;tffnOTq+r7+(GrtN$_5B*Z@ zX|&&Ii0$yG)XL{F?l$+!#y~HS2{xw{Lg9Nn;~e{(-(s)-60rvO&rniSVR`@O>fYo%f(H{yFgL>of*-wPG8XZm zv$}CirlHWx9X_uwhSa-#YM_gjSD?NLf?c^lB8v&oab3h~S`GOp2m?&;1=}cgr7#0- zi{e5r^#Q|Jp-hNS)-Q#ujY7p*Ou4Hz{)2W{DU>H6CHNbLv&^V6(;SkqVg{?ui+ak5 z+Cw(ow+XV?Ek)&?(!|D?wX5()sY8_`W_dS!eq zba71CELv~L(U_0A0f*_qaq?@OR?}EGaU!(X{Hk*upDW7DTsAqu>^CplZ{e~!}44CUMXiX04lI4XZ z!Cgm;TNbolLc#FP!elZRN%?|#nFUdE#EC_Cn@Qqp7dQDl%jlzcy|sGz56HX9BK!gb z$HlQ2S;QCtOlFz;&DT0m(JICTFHBjg1p?Q86qz=*4!fGT_t)cB?K-gOj|x}K+L4Et zOdZt52<^kzJKk1Oxe6lZhTwxiuWd6Cg1eA|y}f$Tjz%}3+O|pP&rx!bmUm*BIhuo* z=~W^S5ppFQmR7p zESGB<4#a39#~Whaj9BRiMp&w&LbxV=v1+T8PFQ>#RBq+QA-RQZ4N1+I&Y;&2hgdK6 z7055EqyOfIu{X;b>yM?_2`Y%O1449$ia^Bg zYds)}Kn#>{0NV4o&J&coKzhHO`@cd_JI;TEBG&)Kfq(=-NiiS~ffxFoxd6~q^S_YL zo$8xR`5H_GvFn%2Y90{*)kdy(VERxsgQQ=5(&1$=cu{bJ0pP#>JP+X zZ}giO7e0ttvo^_gVjFNBV{x`}%~;Ih))->tCAB!1!o)VMYqyCU(0Z-hH;7!m2ip$k z^sV&t(YCoCCA(3gTRoK{uE?CNNnnV_N}9r~E)R-UCYr5<2u_u(w!1(gjqYH)dg??u zm#;fxIZZ%4+1LGt$6q2%)<~Le`Oud2@|}&TDv^FiF|M-$#HW&$oj@BBGtKFUiI1Y5 z($b{)Lf1QfUNoUp2gB3O^hmj+znJ}Mal8!_=|E@29cY2R5j8bn%F@gUJI4$xf zm8vrN!yDvM6f(k5lww#R-z8Q zHQcS_@W{h-#j8*~e3=Flx}!f@p6vDL6~$ei-P;)ST5F6uyIP+8miS!wC&MpZx+s0_ z5tY+!Ui+I;8(}#KpI6eA@i$?)YM)X_JII4%kUC{3Qz;ozNZUZ7hA2&$M6EQdyr(Hd z?P#LET%Zm`m<|GNu#G_7A0ydxF@mItLbCp^DU-Da5`8VfRQ}DM;g9$>>zD>Ke5Ps#pw<^B6ZsdxkO(vcf*M}#oZ08LnMV{UHrvpuetU5 z`PQ_whZfF$zdlg$!!a=PFT&U2;Kpu^?bbq?`W`ZPE|N#-T7y#8i%F?yHvRW+C{Um04cthGZibdYz#hRxK zzh7AwMZwc+s=%7gL^TBVRf&~8Ch+ERm%T_523G-8E(pW(9u{;RveW#X54y^sFcvgq zLOp13g3+uaw_~i2lK(cUAet>yt8(D!$R?0VCvv?n+HpyMi9_xBA*y?|Ea&Xy?S%)8{8%pEb>AoxSfbrD7lya+B?w=tc; z;n~%9QNQ;3c=S$?qoTu#r2B<^4{t{y(H2I6Kx_N=lZXudZZ84y7N=l;!`j?=Y7?xH zo(m(veslwFFbO8Pn?AOnSEh-5k-^&~0>KXuV)<2U)>6x?QZ~R5=mNkZ0vCJlIzn!{ z-(cZ9n*f##K%-() z=_|(x;yKC3(yT6}4Soz8rdU`m3E79W2@u;;$ZQ)*cvF!=o<;OuK_c~tplh|Deyh79 zA#Jj2?1MxPzJ)dXc1lRW5f@l6XQSdRWX{wP{i0potIsNa7ofv{C;H+=^gwKp-U9oC)#zzY? zFCT7Nw?n!6s9m;|&2|qf@fvW#;U$Sl7~N0V6`>N$e_z5(8xqg&TF$cDtF~j zOu}<@f=er?2#X|Gf#%Idl>Sxt1F{6IV5F%EW1ysal3K$%sEZf!%61iYRvNI`o_$5b zbi1pgM>+MSb@wm)mUD>=aoUvbc^<(o%p|FYw8Hb+0HJ==@{P$1jUB4%u>QaA9VhLW zL5P%qKiGT?x}ipy@)gDO2<->7PRact_(x-TQK}2(l9?|qCqK&)-C?t6oh>X^8wP5Y z!$|>ECoeQE!tu-z`<039!dLLSE2TV@Px0>~xvsb%$d}^M!+%T|178Wqj?5cigkBMy z)eA*xXiAD$cpzM^YwQMeW7^~g$j_bTV{o}+HWVLt_N`+qwYvQCd)ul+T}$8}Z8XPO2}r$8nR-2Xh>CHO|P7eAI`F^b+nm3Wyp` zK|F^v=LP+O$s@HmhneEbFeK`yeo2?3^$%BHiIyTY^v3V@g@DWg*an@~Lq*(;5uOs` z8rOO7NtaoN{HZfe4+kAK&14Y|m!~3bb3{cP#gH@0TRhjC&^T@?24y`ck{a^kY}bH+ z3kA z+JCWfH50-(U3j!TPebG}yg(-^d>3f{zv#Rw&(ofAcEX_V#ScNU#MMH%GAXjLwO}K(bv9JknbZQ124ME8&VVF%q(bIoH4=V^r2)1<=O$*> z#nK?t)CQoSQ$JSF88|A!%LiS#zgo7-A!Dr?{dBD=^z?xQQoHJ?P;^)E-vztmf1=Z4 zgphyF1r3<3_zy#6Cd9IJVpVsS1TOk-Rr0?@Vlt*`r>8*#2_bwa`WltsVLLjtVim^I zFk-6$rrXuI5Ag!LgtCdWwEnpkKF~oHHmp|iC==U*8A~3QK$l7K7wm&1=XlipHJe@N zop#O&&A6_Zti1^f$HzbH3yyxHe9==zQVr|^2LaKUI>NOkBVsg{-#)n=SKYt=+ibyO zGKUc(_S70PKI7z@TVz zdBO|eFt!)5eXsW-pMxcmjx>Uyn(icQt#;ngIzSoPWqak&;G#&-je zQY#uN{HdskSyWH)J_H&T(t*<(hQI=29NMA#TCC@3u3Z$PYM;m{PPvqy-ezL$NP~K? z-1!}7{=7Vy*PjhlGtml#TG@_Uh`iYh{bx|nTlCbnE_mC#LP9T>&G`+1ntg7R)LtGA z!KEu4g_IH8K|vq8Ya^S@@a-E)Y?8qDpXAmqaU%e>&jt>Dsi(dfu@sQ$sl!Y|z9o3u zLpxa3Kh#nRlAslWtq(I}1kd7dG2!l{SfbP5=g{34t37PEAhpX{m#R-c;qE>af{?S8@JJRY*c|)4nqXF-F{1e6(d`Z1a5!TVrf9^c zAVpJ69$RGZsAWcBCu(nGa)EOtrqum*645?~u{(QYWHQDt>j(7HKlxFeG3afO`g zZnqZoR>h8?Ra0>p}Z;LMma7_b*+jji&sxZ3l53PExd_>IdGnBHd9MV)j$0kf@H({?qWj!a~WYSIj%o6JBP@RbQCW50`vkfkM? zJjUKM?rjQI^l;L;jMkqImUU6pa?-p&{2sem#9Jh8pxtRBE9|5WW}a^ z0Izohe$u@BxCG#i5yr5-FG_%NOGgNCl~^!Jt;B3RHFT0BW!ju$Dq& zwwjjyFMqP60PxL{Pbzquvbl}5yj=Dize)Q^7X~ymXs7}-tkp0hI)}`d@XS($DTq1# zUyAmV5$NM@wK)8OK1A}%2GKM=;Hjl}!O$G1LQG>>HnBY{3-60IYI#j3m}sCK~?>V$*um*!PH-u!vo9G`9(7a1)l_FL#wasG_% zId0gZ6pzUo`l#B|ke#t*cHPm}Cv%f-8-yhnydhXdRHN5VE7v8eNX{I&CVg8nf2X7o zyu{R%GEs_d0Y%&71EaHLs+bpcmaEAexf9jx1?z=Vn_*9uOynuER-^KeawjazGf0Yt|aU6`fX1z zMA}{ZIFqnO%`SlX4b5^~LaPQ)afn*S`AVZf~XnXhE- zgX%2bXb}I%>{BSx=baC&B)r`EyT%I`Qe_&ulOt>U2Up<>I8y->Lr>N|X2DlPJ@Ln_ z>$khA6FoEnFfA3i2T50GO4j$?mrBIYd_jeWf=z;#r@+!s;HvheMTaR>68N8-^7U#Q z$EC*D?OFm=gT`2qbJ2z@6X!LvgfLxJ-F_#i^McYj-*fvt)$%PMD0kG!WTKz;Pg1GW zA7ESy$cO%AmX}_LP_pJWauA9hZgs%mbc>(aE7%RL;XU_|dmt5-8yhC^?3qQ#bZ)f{ zsFSLfaF0E5blcHg!N3zf9k_6C6Zx?mJ^gZ^{J4P{>mkl~=M{qTLr`HMFB;yo8UJJ6 zCB-)<>5$wOoX0CWXFN1VpmK-fHMd&g(q} zYuj2lHq6H>zj4X%J+YUj2Ofu?<9_XwdhSwwgl>()K%_{OATEnQ#U6B2fND8Au4IB` zAH#Olj!LiKJu+m!$^;isv>Nz$ zPPUCZ@$e|{oou`FK(}oc*h4G!)PpTkA!fBI44IB|6BC6mcFGt6#vfD! zncM#&HG_>4cL+7qaqUe|n^;XoL5@`_|Az>@+^u^&nE#gwS^P)*1>#%9szBbMNt>v7 z0k9ye`2pF@R>Nld9B{LpkznYaO+~J1Nt{8N!y-NX0@%)^jJY5|mG7Kg_S|;y!?#+k z=2X`2Y`@Ipzx*U_WBOyP&p#|-(rW{m@h^n??)}SK3dj4PRdMv)M#~<}ZVrAHw8mDN9A2*A>iP5rrepQ)P zw?bJ%dK0-dv^8V7fDU7Ov8o+Ad6~}al;VeEK+?=+g&LZLR!~T zvg&?(eWpztwRuv~)S&!U68<68958DUj7Qi38gUyynSa65X78*b*6nRkbBWYtVmf*CI2Jb?!`S% z@Qnb-#Xe!h2ZwRQM929XUr_=`Y5tddWEtk(bdEOAd`(U`{Shnjdya@loKc96T@PGw zw?s|3#|_pf&bXsL_v+%FayZJqAmcB>VI=IT6UaX@ec!cMAk%C=9X!{R3nUSaTz+}H zVt0E#z>>IYphKIVOu_hz_62m^Ng27_E_d~{Uindg5`U|6*QA_MC-k0S!biMWvW@O* z7`wNGkgiGxsQy#xc@O4lJ@i`9PTXBS30r1fbc!4!tV+BiDTrX$>;!wt_e_?YTi#avzk>>>lDCWs)-u4{HX3K69(%QN*aBB zy>_*fzt1P!jKYqw9`2zx5)R;*bi{`qPa^W*8Qek;g#{o{t~LQl zgwtFL)k^)vv-DrpSVhCHiNvPuu?1m3*F)@&t~4NH02!f>Ym-lh-RYu`>Zj?YL4Ky4 znWnH#^r2-r*^Jxga14;adBp$RRg|1PiYVUuS`V-`En49jcYz4uv*z;sQ8dLyQd`cfM^qeXiz zAE3y56JRYGMb8H+qnHDp$aT9~hdx_G8hu*5C!M6Z!hVI;_N8a=oF`m47 zFuwJ8BB&qS$8^roWph&{v|?BN1}bobI4EEm@s+X-0)*?8Y5i`$l+U!wvHGzlipi71 zMi6gfMLJv=@VE;SN+@(aLw~-?H50_GFwpwyoOj$PkR9!#*ka{YjX>#xx>BzTeHlgd z+^R*X;KbhT(Z{S`CmD9@#**-_9I`zm47{N8uO1Fr<_qLMZVC~7zIURtYG6jX$3!`w zmz{a1fpWPldljLNVNGQgJtJ3x=?3*OU_2Xxm`N2!v8#799z?L*fTnVA@)wlA8cN2E z4p11}I+suOjMa98e=3_U!fWXRk4$kHAhLj6Y{9E!44ii8a~8R8miCD;I_GJ!RAD=F zFBVs?UGW;y9Im+Ckd`BZfu#+V48F-=StC8w_33a~A@7Eh2?0c7_oT&@Vq9O-= z*`hVvdPe%!uCen}&x^FxD*xj!wd3|3KFvmVWDomc>O~u=alJS&FQ`}R*WupiPbG#_5 zYZK+&{X5yolD5ya%#$08~KdcggfsqvHQ?WY7eHRiqdSesho`;CsXWM^h??$@4ESNJ^0 zgxGKfgKfS~M)2fGA(YRf4LGSvlc}Fz_r``5^5vP2Q)B`*{~{fk zVczI#p*9(|HxSu_MRB*tU@l%f9L=2{H{@bzhc zeeB|>(MZLoMfu8d{x?wqC7l6;2`{E;12>7NzL!&oxVEDz@PN(T~h7M_vA*5S6h8Co|Q#xiS3F&SGq)Q}Z=nf^MMY_Jf`<#ot&&{`%SFYvYTFmc_ z=Y3+KD8q>ozB>8yG}S%`5|zSHj*}~s!kG+MMWTUQlbDr?-ah*jtU#s_rk4C{tniA3 zK?Cl`Ra_@y9vM3SPV(QWT(U4Kkr;02w*lzH>qFHtr&=*H)G{YK$cxVH!*<0C%(ai+ zeVVA|+&XuPxk6tsUtc5Cl_B5gmyR;c{v?{k*3Vq}UbKvNN=^e42|Y5qgODJTzntEu zXe;O8UQQWLASB8EQ2Uv@s(*=Zt??}T6+(>I3OA(Iic4!P>Vs>9S#NvZoM-@s^u4`kIV7nWs(}25uzeUzW1}*gNKHtduunw|WLF zxa>!ttk<|drk&*sMTN^c0|tXtU_CMs%N8f?u)(xv$4g~|M+YO3zmA{Xx>p505E7_# zIVxk2O19}R*;G>mpW85cuCieq8HEwEQRm$>3YlU@hLq#b8H)GSW&8yhh0;$;_i_8l z&~w4g~nDUM5YUI=~l%Z&2`;93tzq5AL|zCN&FbZE(3qTG*L42X>T z&xnsdadJsGVvpZK_?AIWWeL`OF0e13+<}UNv5s42@wu8ayMC$oU5eUcJWOsKzpw?h zEjF%ub>f(!zT9Wb&4;WVBNyP56wFzJHza5F{Afzp zj6Yo{Jy&dSoA)?;sva}t+{P*q`G+d`^8yDTw4;1_&v4}8LD4a%{l^;Y%EZ4#&y(K$ zeQlv=a7oF(scdW%1Mb;%bZA*|h@8+VaK0IpEu5@<)AVXnDlS!#0-_*nDuZ$p-OX^d zp$-{=eq?<2RJCTm;Zg$D&v1DY46 zRv*wz-)e=95bu%>1Zq8N@1GLNblp7+(k2$1dI~jtf2z|NkN(TLD@nif?M2 zKEXH%=YccqD1S=$W-&8YHJO}=- zuXc2`n5Q6a@Sv#dfPtT5oUa;vlwe0IH1*#1!Sfvf-R&y}pXRE?DDZb4eGZ}%iow#r z^c<>Iv!aGb2lOYc8+{TFW*e)powdm$dN%5^T>D;B>nqMdR7W0fslZ>zMcBP&S)r=+ z=nz(y#lY)~QCbYwvcBqUu_t?_>YXl!&i2~0L6v>ym<6@TY95#8-C2`5S}3KKyp@y*M7(!{!WqilH%b} z^R1lom+41OFEV=*-=y~lAAeoN@c%rzZm3J|-u(N=-*^z~9U>m<_k9bOXfA)?EXtd< zGhJf+TLhParvyc7F3kBRO{DHtl@aAdUq{#MS{F$h;lP9#_~rnhA&}|10|BN*Q*<6W z0cMmxcAfME^XUN%hUfvo!}IL5fKIvtCykb^*$6vR=}@s7oV@sea94j`UCSxJy;rks z8Dxw(N_<|z%arRTr>3anxnz$&xOcX$t46Qlu}Au0?WRVEtCK#qrudH7)tr1|ER$ir zhgR|V?j?8WfRZ_q(z2}97BbJVL&aMww;gC%T?=c%(0?A3HW)LGGdlR=#$1)kHUFxW zS7z!hv&>WeHTAf&ObUM+@^KYBkr$w8;+Z8^`VlpDwR>?F<;#HNxe$8%q4jlsZj zB1v$#x3geSj@Ar~SvQlLWm&iQ0wRRq37yNGnpz9eH-$$_*?x+QxpouV8~tTC@%BYB zTzkRQq!e1up>)94MyZQZjoYmuR%|qPtEMgR*?Kjirtvi1FJ{n`a`~Oe6M55^9%uW) z8>ik1MqS>x;fA1Bh$Z@gDdT`&=G|=ALi#Kdi3TQV@cv6i zr7YlroTDWtmG4)gV2O}R4Sd5>iJNy`%A}>(@P_FiCS#jC;n|O}f}WcZ89n8MXDAC0 zRJ@E`TB_v3lhB2RDo*YW1H140KWOTsT00h`YE?zPAI(0+)BEM|NXuW(YWUN;JW<`( zsca~C#UIf+_OyEcWL=G#ADX*QQRw;=QFATE6V$6as`R%&R*GOyh)(Xoz?b0rXJ5tY@iah1t#|s>!a9EKd zZX+Est1Fvh)8-i$iD6+>*n+Wx8I%&9byrT(^ezIgksbRCSe)reBFZdUB z!K_*YadK>?p+Qn9CNo?bf@+v*4-k;RD74;P>8I!)2(n z*R!eU=P6M>@(Xzp6AtoP#noAHf4z2^rf^vp+4eHa89GL|JBFQbP=hB#C{ntlvj4hC zOnLDKpEdialVsYawnZHWf3XqZt3~*Be7mQ(fdI`$;-%O<<{0Lg%>zx>oukI(ll#a7 z`N}noDcbGHy)__@=#sMW_0Mw0<1fo-=RyPkKbDcpxI7-O^@FKL`QwglYzsfC@OGjQ z5|;<-%U^eaEpAX2eI)a*DH^URbrpn5VpofNLg~;jmM7AJ9?PUT*~GtjjE++ z$IJT^p=>a(u>4k}m}zvzCL~hGD)1PJpdHafA)|QR=wIokA^W3u1zUaDR5-GGc&C7Wv8>T1E9J1av_8k@ zL&ZPgHx!Dznv#5#Ge;yBDozI9yngKw`$F2mO|^n%9-UqOZ{U{mm+>g_UMjbv7~&DW zwA3Q;41ci9D@2j-u@2W11+N8eqX$peLIgaNrT}Qc38KI9Bi5wi6zOc|HQuK9#noMO zT<>Wozsh@{CZDGQ*!LK+ARS5`fu5k;-8!FVrr_DcF0Es@NLCuGm z0`9i*ENtQV|2j}E{;!xBfG^|!T>WFtFD@cS`1IZb$i=g97J#}|czIlw^g8*c*!D}@ zMw)0oIoT*}^u%)ma$hgTVC8h?jt-u(B-;i8ZgR^7cFCfX(;GP!z#l)3YPA*OJYZrGIrezLTFT4@b8apdZauh^BpMyafWi8bRaSS_a{O)VL-WuC6p!HLIX263ZKMgtko=L`3>aL$`Su)xql-4M%3#^F*$;4Q} zDbBACE=lO2pl|+ z^9@8R4jyRntmiU3nvS6FIP|LKu5g+TKRXi!`7Y2@d4PM*Rg471O7P<43D!>w#h2df zb=`QOW3=+U^I4V~kfO{!_>e-rG2g&Wzrq?FB z<25R!=$#wdppMnYT&>C`>5IhYvtVN2trrjivx;<2xpB+6*AvpG9)YU|SjR7H9_8Mq zfi4WWgyPBe!b02;Wy6cCvZ!qN?e5Y+k7p?NsB1nu^6NVWT7v=Vq-gnnG;0?tWC{2ObCcUiSmA*g+4p% zV3HK|rD-76v68!39{hcG9?MlPd0hN}J6CtE8a?GYimeM{4?XHEtndxF)rT_JOLM6> z%W`~ca)+65(mkgrwsmuFX=_8LvW$zc?!Nn1Iiin>G_bmT7fiUOX2xlwQ$bVQ^O+_$QwibmWsDQi~3rgukxBC35>zh>&&EZe}i;plg#igcY=T15?jsiKOE4< z5R}vfF3$l^vMn|htf*GJd2T16!rVt%hMreuABVY*TcwWhk1P<nW>` z_yvAAF#Ly%cloY5sLq$k?9C zAL`0#hr95^_8}uOgVK-W!3xc>V>u$<;Bd%UES#h5Zm4N7&i=+IShIe)yI();j=!+= zuq!*?ImLJE^~`fI&-AA4f{)iJd~#g{MF6G{7^Ab-CDi%U-gU40#L5pt{2Hf90J-^X z6TlE1F!sI+Hvq&d!)mX68hvkW$ zR$@u2)v~`?Av07(3bn7~*%GA&^@rY&{iLRJD*nC$uG88^FGWYX~EJa>s@M6*7w-5wsl+z&bSF<;AG zB;^v4=(_Os@q{!7>P+}vPMmGF*<$+gmv&nh(6rB%ce@^tdRAx!4boV)#1taS=gqi) z3lCH2mQ8zt67+_rl%J?69|m>zi^&YoM?uH?X_CKHTy1}&7VP~uuDQore&9lcqJP`G zylb#tx7oL#hQmr~@qZvfY8|f`GL#Qr@?BZ{@$seKi9Z_QouU#pEUV>cJe&YpzJA5k zbF!V@HYsym9Y&3~JBpfnR39}w#J@i&DE});M%}^Xfw29Ek829_>v3J!f;_d9asnK7 zsaY88o5bqNav2TSDpkW(EP)5<+xhKW=T95)FCL}8e)AXUDJBnWHj78>zB%?R@#TR# zZ4=z3`hbR|aNzk zID;ZYeO_i>WKog>&TJM6jkrw8X|Qs+e7{TOwYF#aT9Gn{2GQiAd>C^Jj0_J7K>_5d zmwkmQ66WyjG{X{Z1b_MBOT!Y@zxH_jD+jMSRYf>ppPsIzaZ?Y^nHN5x_li>*E?v z98`8uD1guXC6#n)faIfnYdv_(;O3_nN^!$M=1R};{{41rVU#$HdhOJ-$prkN+147JmRPgqC5BtPHqN=dgo!oTRpB6-Nyv%vf1F=2(VBI8w?x+`ggD2~{-8n0amn3nxHTO>;G#*>7P+uwn?FdIp(N+{c%8p7|+ z=>)m+v==Z|;%T~mNs4)k=d6@4Hlsm9)%i8yP?{_q&_F(M2Gjp)!kluFBfp)u-@t-kEB z-e2r1dc%y}dPK>%W(o$(U-6omW|mS2GSqU+^VJ6l49W-4xQMdnsvXzGUhkd@8HPAKC?f(H={g_RR}W6~ z^`=u-sg#ZGry;Rlh<%ke3rqBkE>cA>M}2Fl-%@bVk9QkCQ1Dm{N{N9C5v+t1R~w$} z1gJ3FT@m-S@1u3*HyyVCs+?-5{C>z=1wU?r6)#R3FT2aMh6s6ou%H!x#Q3t~amxuW z?I~b>?akX_4Gmiz%{vxqeROZYcX{@!-UX+97kO$dKT9{FUa*(Y-G|H+GD-&?IVEqR z=BAW`IBnckZ;+zt493*!M7+ir@6(;@v?0v`yPPNsuBn};CZpmAizbr`Y%(~9blhQP zNQHz{THZj|f`YxdUY={XX)iux3ai)m+oKG?)uV0-GT|puMf7*@Je<((TRuyex* z6&-S!lVGU{ja4=h&pOfHa@ESLCi^)lK0dDw7vKEX?OS*d5K1_7?h&4z$a6*IrvDo$ z@t^3Z|GgAyK&Dz?!25pq6ruW0pBB2{6ad^(j{r3XZ;H&Qn3we#tSBl}ACnQ1X7x_` z+K+wKINHl3TgIOt*FQX$&$uYBDV1+-J8a!J&R8<&IO% zP#?`>H`(asOv#0!LRJV-kpbzmAHRPn;Tfwl{ARI`BB?79$9BXUZn9aC*Zxq%>6!rp zM;)BXnGLqT6?L_uq=XmLdaP5Kah!9SsXJ6KOld-#g`8myJStq-J?m1+u8?#b7+2Q2 z%17Yr)|zRJC!t6ZGWZ*3Zk=#285PXRUKhgr z3;Yge*vQ~{KA~GV>8)xg5(^q|Y+$7Dccc$4HN3W>^*K;8Z4SK_Fchu7cSuCjb7f37 zhqBv!ppAYsWB3<`Ghhl(t1UA9Sr%OKb^5rJ;X33j9LOO*;JDHZSh*02B$#xPlwtxh zwFlpX6K}Tm7=x~JI2}Md+8ovPiw1Y?V%Z()(Gv=QyMzn4L~-@II}6EyzW+XN>)jlQ z(9v_I(*uAW-54r=PvqRWh!40b$_$mCx7_B_1=BZ4EiBfy+^>oI&imm9(k~@b+1Q+FlD2-!H)1^30CeI5a$#XMl+&fUh^0?l%rAB86fj; z<9z>2)8;QLr9AcblxbXz%mNTJWfCYsQgPC<-J>mwPdLU{1O*8{`=c0O?EW z8>)JpZK5o$mxg2`vqcxdeIMtqs#_|2{1Whr*X46D)_kI#+83YF=dG80d4*d|rM`F* zlX8$db)T-p@xy~$sX74sS0G8eS~w5l#ew-%wut+uRHZ(=j^Mv9dm8_H*`t7W#)Rr4 z{`F%YbS_k(1=Zfty`8UC1#mZrMGiGwpw(gMQy-P^sD&$h8kPcLYZ6jy4NDY{^#U(c zwZjz373GjH7wrM1d1q>1)G&A_|Hlu1c%QFU?H|YB_B@A3ri%wiqSESY@+<8q@6{~C)jb-HZC}7vedT;DryhsZ^1d!Qm*le{d)fNVZS%xB zxYrCtHW;+;e|H0nMBq&ilrAmQ43%PTZxlJR+C<_Bz`O4q=~^W0`>=%o85cd*1qSWm zRNfgNWpc3gTyX+5M|@a6FBBJj$1lAW;~wcPh9*FNBpE(SVj)B))wF4B!JRj7t?9b- z5yP5Sh9XODs`zumD)YMb97dA>ymyN&IY4>MuljM8(@aQjqqp-HJ$m<9aL<*yhW-Fg z8x7<7=xd8k>ZqFhbLX}n7>BZ>YubH0D4O2SH6-o0V5Vb#u|i*M*#RYVnd=3{!&`4* zcW|OvYZQQ2K&B&E(kk7p2qt$gi@3eKx@m=W2alw(tv|~lSICvaU0clBiv--TA-}mq~vg`IEh|8X6d&#?I zxHK-t_QRUq1lmd17m#QnTIa#<cc0Gpae-eVJjb-aHyWEoEi<;jKGg8n$?emHdgs_zFS2}a^kPXj;bKEy@4@OdzJ^Wvj>->`Ej@}17yEY~-)S33Z#I0M=Ah`xB=H8?Owq9JML&A<| z@AI@dq4VMYZKuQfKOPlvVGH!+fR(cQ0;OaLkke#X;`but>42R=D!@^MnctEl&Q3M_e9USGELOc||`&7Z7<;~P9!w9sr%Ui6dF|Y{CT-Bxrq^2L*+bUH5pZ z2b6sx`tLJK2lx0vUtz@Z_t^`VGer{?1jaiCAOeN)l8Etq5E zs-{N_m)A>H&B^0W6|fWOFcTjCCR7Y1tB_!YOxd652510Ja9$jy4&qV9e9EC@(@T13 zuNiT)^;yb_hGYnviFj?^Rz!!%vzQ{?+VWLt6JRJyo!<>=QDgRWry&`ZJGTfSIlf2X z6ZhVz@;<3UMPkJK`HIqYrvcojM86fMawm<08M}Ky0^+TVTPRY`)Fwc_O>yiw6DRrVgvP7+$j=^&`z zSM$eU%{;(JYc@jKSgd7BAQZ@0aBk737x(a&`g0>0>9>T=<)!SLW9hSO|ERbv&P3@* zB3B3RToa4@gY0@SwSqSfX1W=E;*K+hqBc0e*O4~~p~kFRr2l(9PC=xDaMyZ zOdl#Bv)#r?Vw47QfnhM9A0=wiT7_O!1F;bhtw>yfQIc>V+H5&npVVmc7 z7h63ctt$=lj}1>n9wK~c-#)q;W=+&4h ziy>eIK7qr0IoWa-{Vr$G^R_p;{$>|i03J^jK3cEav_t$w0I^SDS*mPm76-lCCn|{Y z!z=|VAXyaDh(x%+CzU|E3%L0~?|!Ps0qD81M8A66MJay?*|`0$;5enLk5J-&$%tg| zRb~{`OFO2(Z|kEnmJZu5)tcU^-xzUKf90C|W--6WmS8B4rnguY1@n2{x=K4gZ~IOn z2k;l&n+?|Vxe32_90^+x7~Fo@nfJ4s%&_f4vs}%Gd7qyrt8{-(@9!8T!v9ZTYbX8x zoXdcuN5CuaPMT%vzA!=!P!-%Ue|?pAo~cmcf&ii!{_T!&F%7Et-jAvY6BTZnLC{24 zIl~kS29o6$Pn=|GF!D_!8!(k4fH=5sa_bdu3Wrl{)${iaE-Pv0H(>mA|D@Pt7D$FE zD;LBJ7H$UsjE%td=CMw_R&5+17;3T8ZCJ$DRpHtp=7*N ztLgC+Ho}T5C@cJKteNl0b3E39O>CgMq7!jca^1tnQQF5`4g}Rc5+^}8 zqR~JbQz7eHMSjfmi2t-UCe*k5H1v3}lcC}hdpjQT&-W^nUZY8(7Fhflr?Yr7m%Y<$ z5c7x{yDg|08$h+2DfVM*uCj$|TD9;@#}~TmTX29P&|O-{l|@hbxpD2bgx~O~K=9G2 z%8*a((^g4}p%3E0_QfGXZsL({+kQieqG5#|R>y3PF-oe}UG+6W_SLiNxpp4)jE1jqr%1;A3T@SXifHxABz4&vYLGrNV(OM`LsWE6SeE7|WE% z-I%vc4(kT(_>+V#U~o;H*WAPc%;Jd%8u|p@zZ2tY=G3};&38VCI9d~@{DeDX z5<4Ak-ZGYTLQg$|3eAUY{(w%$EuOrOCkUUX|naRno`t226zw=s!r(4?V(k zx?k3C0?am_4aAI>OJn^%Juw9?d#TpEGNu1n+2r-UU)lv5)(^J8Zb& zhG5N59gh;%uKV24$nZr8!JK^ z$WSZiI?(YG{5kbYnX5O~BlxBgQZrU1y*C|v7AGv|QAxqE>j!IjCUFq|efE@CV-7PN z&?p;RtH~4@zW&zVtI)6z%u?>4(6sb%s$L}VjU{s0ibce!`@r5#^hyk0mVTUhuY=3? z@I%CQ$hq@NM}ozr>y`v9FhJPdLi>>>j@ojk^S9r35(vH<4HwLQi~U^PW)-#9TiwDu zR>T)5`^F(FvOEL@E3=#mfzlb2&w00`tWvCBIjp5Fk>&|J@47fPygZnA-9_=G3iO~n zvNm|VxA;j86ha2_D=l>{9VkNRZu*r~T8<`R&Ot{2yUCYO{)Oiu!)E+F#kxhcd-oMv zZd_@48mcq+*DYV1t6u>YMm}{`sQs^I^SN!?C_e!wmgNxUny?FVLHtZB!frW!-J!oh zx&jUiGoA)$N4;27sxb z$S**e>fbmiWpx#;6mc~2er?OgEJ|pposheGr{YO*#eFACTP;O59AHmX%9zGDxp`L7 z;&+{?kZV+_R^V3!c%C2z!t}$;wXOuUiy%@5*@h+4EV`JvmfvgM+&5$S5cxPM%k%{J z-Cp6mH1_Su&7XfbPsqJEiho$8mo9rG>UG*?Bln+hlY5WNpLoS2NfLknVziBV)DlS_C{K#w0`h5W(23;!}N_fWzHj@c89IubrH@Xg$0oxoE zlckHQ$0vhKX#-ucWaB&AWVw9($B_|B8au85wx4@NI6Mx#;2+6sN0vOw0}L4LrNv1M z(}Y|!?;ukj75Gj?s|JMiDUsehz#8rYtVlIXx}E9(OyM^JbNo87gp$FjvWmHijLxys zS|1gTTrC;P+^EH80utxK^@+z}sJ_p1`S+83qau3mYz^~mH|ul@qb&6M9*fD)J{jhd zTCzsAtTxP%0Y{MvJ`I%(R$i8DetBH5?PO9^15~q0vMlv=bR%oh%M6D80ph5S-XJBu zumetb{ z(m0hGQb%p?@Cf67cdZMZlu)Y&WX5M5lMT7mzy7lK*rG2-ZiX3rDQ=6cbIq|>PGI{5 zowjN=45-Yl4VB=yHQ##1+c(H=#x_DxJcBOrV#aqDzQ$s@s4v>HwgI+x3Hf6~{ygBQ zc)2e2HZfXWpB}C@b}3%Y#Gfu$!~7zNHjU_t+a8QpOO7lChf=|J{7;NJBcFGA<^QFx z8uH1D?+dW$%5M6leoMA=bSIU&N~?{49ZyS66!0IvxJ^8d`x;a6 zI73YdsB=rXYgre(9HhTDbYYxh!*jwlinbWzON5ae`-H21=}qXguvJP&i4)6W*Y4cJ zYKwXeB1hSYYrKRg;r`(CKVPT%)Z@Ct7QBHDYf;T33aZs4xBLvr1+ z>$h_DzgY7ifK!N%(Cnft=yUoKLvs=>nzMoftvMy7&IqtpYRY3#0Bqo5BKe=_G@ zxxRA3`r?k9z@PH1D+z#psqQz6Ti%0x+WSauSr)fDe4}Qe2bU@WMpxZa)mj)C3vhwv z=6*T-^YkTNBkp7|t)#u!%k5tRYUlp*L)Yzak|TC9Ez60E62O1TDU0fQ@jN|o>0@Mc zu5Ay)6p)Uul^h}D$|4WQsT^K3lF20 zs>>pVhFVFjottreet*U1Ns&eOEzIBV7dJm#N`BA!9FD-7nUGe+BHVEdDc@Q8Pf)<% z|1&<=0>VH5d}jnTN@S?r0MxfTU1#w1p&f8B4MRZnopK%bj-0}n@uHQkm7-Y5_?8+RI zEim&C;w%ceGmAnVgu5fBdZmnRCi7J=Ym!53qK$Q^f`l}Ngnm?s_3su#9&E>1WbeH< z=bTYF!S>>Z^|W{N#(+$iy_3;tYuBY-ob(W1W~TD9LW}t0s#b|^@%S@>b21LS0F$t| zDDM_sH85q&@9wG=!gSkh$C==ZJ9^tp0+)MWG=E)9J~P`qgh=TI{Uf1UmYD3#q-E+hT6}q`!(7=(i zffl!FXx1sRi>vLiJpdSY{NhA^MLhG?gJ2?7sq2+9lR3;53H+A|0BJv$ z+Cjhm_Lh0yk*{kvcA*BaqMVaA&bA6Xwq zQj)Hw^vBD1YDr^A zC6m#jxH2O;d8BB_;aj4Lg#NRU*-QzD$hJY_i(RzdJ9 z^49_uWCk>Iu`UdmnG+jD2CI-lF|+50!Z&ZEa-*PWrTp3naNkd8!3636fzUGm9@}lN zX(BnmTRK2(s7w!c+I7;v1e}qIxYtjQl5Jz7(UfB=#>*&9{COmwDL)&Q4cMgmD^STM zK6yfHf~uQck8FOHzp*{B+C{^E!9fZC#ZxIK75DpBb>Fci;41J33_M?eaYOxZyDj0> ze_xH|^Kx-umdPH)6q{WTH2;PjhX9uUt>sU*W;;d4{pi~b0tacQC9=_b5oVWaVJo^e zF650*U*|)+L|_P`$ZV`7m4p^`GDYOJDr5Lei+Sf$ymQD91)SXYrjz=i)CYV0=9PD8 z2q4<=Y+sS5A4C(o04xc79?NITPMqDn`0lUJ<=%K!^MBPp4>A9bKDjYK`z657&pVd| zJPUyN0e47=1a>SxRxWWLw%9>_i(1f3)jxY($f zBjB)#gC_tN>xURl+S^`Xb!(|W=mmfQW~iA1B?_IS#ve7elf%I~iag6MV*as&dYx!} z29k0hS<>A;veQ}{tqak`KB*YqPP1$;eeQJFs$5`La_~g%;tA3hm)R>?{`9A^sH|U+ z?9&143oM6wsHXK5e$p5M%Wh5J;DG`N-{XBwXB})HpXPdEXiKtw>-=3Q5+4x z>_axTTs*j{T29SHzApvN)aC}>z@_Yd(AN>KD;^oucOI$6Ob28+NWlIC^K8wh4JY5} zgQWgfCbQFs9Fi$5p^9`?=UcEf&ImB5XsJS_4c~IGpya9uUkm0&9Ey{aUf${1xzT>hJ6lZxbJ#4LWGNuIc6I6bBa0`~dJhH5+>!J~e}{rIgRciuvnR9q3TNN1 zbU)(14eV$+MB&QkFF`)blpj6(14k>bojL4r2ZJGksc*p)p{L9Afbjv2O401VNdo{k zpg`93;C0>ibhOIlA1`)7LAhx7%h^{`jG4xD$jpU*uP=$*g(BABfU zwgzRUU^#dEpi5s$oBYrR@N+c`(l;ly3Am=v_&E!bssz%=hb_3A-dA9Cjs}~MjX)s) zbC-QY(|lB188BhAH$j6;_uUoON(x@S#z0DS0#H-!J|BPCHVJ=SF)nz>oc8>s*=Pv@ z27X2g?_ppN*{bz%5ggYvzt}t%>J6=cnvLu;8Uo*FJ6ILLx_X>t1wxeq8TdyM5Av4` z8Tj!C-RPRYg!wian{7>9`TbAT>*Y0N2>_h4EFq{WB7(B!opa9H2H%>g!}>;?KJELh zGX7v8Ik0}Fr=|G*$EpCr@`H=UxP8iKW2*BG3({BW1MoAxPZ9Z-& zzj7)&_lOPaC zy37z9!!z~07{BYwZp9TNV->`=AE_9u@G(o(HUY5`PQozwHV z-8pL#Kn^+p0G-foY+WlPSH7ne&ECt=gbM^@i0`cx5&%0nP;)jpGIa_islyGfc3#-m zw%U6_r87VZ=8&dRBe#5eJ%7?+ThGPJYfp;%$iR(U=d145g+ML12x2vJj0>4PpkMPW z=g)GS4L;?ZxzOcoF)oi;3qDt1dm7bMF1F@-fIYVi$H)x!7EIN_ldrd1?`sqm^sDDm zc~>2FDnQfwaR1Wq%lyyTyesdPgm*;SPSas$4v;x`_l=^4V9Hc~t4O1X#?qs&;{j5) zXXGuZ??E0;XEBmHw?|y$51{S6GgHJ{yfpit-^(S|q~Laj-_)hu+v(Je2pO+$9a#*Q;&h7- z6r$!nmOITb-der{?wA11Qn}_MVFx;?NzQN zqiD2$H2Ax5>h{eJwEn%W8Le zog9GL0dNGQ9FC;iRdMc$aRAPbCO9Y?W5-} zO}9m>P&XDQF*4ws42%8K>Z5Z5hQGxFq52*bKmAI%OJ^|I0ew(T-Z^_}u$lotL&XM) zfXbVaVf#Z?QZ2xxk7@Ne>b{Xlxk;^{Q;!^j^3K70VwFdc4VTZhuUf}{wRwmSm@Dua z*$V1X%5+&DP9<6R?V@=QnoZp&O_!fKW%nheVaTtb0&82zQxlFin<}Q~qFS*zRpAJZ z&E3~^HNP_rv!K&kdmppSL9XMA?Y4|W=Q-#&Ee`eJ#yb65f=$Pqla9aYLCWF)M@a`5P`5Zx@>H^KiQyZPH~_>ua@mFP?+R6Czlm81uNB%3g4p}t9Jim~iQ;B~}6wKjZAjs(GjqmY)2cSO&38`1R|omqsLw6gxt@87Dn$x|Rv?xo!SXX>IgS@txSY?U1#A zVrW*??4Z{|^=X5I^`zapy_}fg(Cy>A-I0XM-B_9C5QT>2xSi8X;$JP)I42@+kK=nm z0o)jbVU+;xjH3b&4W}FFo!7Np=P3C?mg92tL$1(b$Ys+PBcN~MC9q_*tHNlTzP_9|e(~V!jeaiz?Z2%ML z^8y6Q^&dDz&3u-_u8za;kFC-yKcIe$-}lllXfNI7K2sDNs0Z{o4z&zZbAElmY7Xa6 za=5rnbPK43%=U>6IKLfEi{hD;(X*>yebMHmmu8Dl+YBMc|49ws_ou{SCB}pD|1M^j z`iAt6uA9=HIKr~y8mmL4{n>8-C<-kP4P~MFaTW_BPBOP&AL2VsAH{6wKS*@U`NVYe zkwVW`e2xetNxzmVNzr*W?H~kd~9tN0sd3dUO}?ur0=0|D6jx zAJRH}ci@$C#M`DZhTq03;wtXDdZD$W@zxFh7i|PwNP|`F^W#>{0;JdzA`{mWIDudTJs9F|n3>}PTkng=x`eR@B z-^~kOplbtF_*E6z2Xntcr&oh@7~HYS_MNp6BL#a!W|Q4YW{1@xk#1rkZPh!4m)Oy? zl=(*sPZ;?UALvlry?a>W4rhzAl!@bcVV?HJXlpkId%U`0-4reowE}Mf=zXr_nyNb`b>4_CB5o2?au4X2 z?iZ(bi1*O?%N)!4}@QXfwVYHE=ATn3P-`MmU? ziK2^7vQUfb<-;r~LW8qVYi=?qa}j(x*H3~dXe5!ozKQq=K|L`T&~!B{8=MRTghpdmY zEb>Fp;O?8=xK~pU986r#epNJhedvTL>%Kqedq35qzP`P4h{)X?EK9e1y5Z{)?7E4^HbBIdkfkmN58H1Nq4!7U{kz~%uglLPmUR1_h3LiXUj#khV&}kWiS~Lg>KNPS>^k>;s-ZTg`TZP0 z{b*>8G#hi9Z;p$OC)&`|V(#17k~*v6>IJaZW(rJLR_#WOZ1S4t{ppcCr)b^W=hu=S zE)7lmC~z5bYcGCp?AMPe_%C6sr?Yn#N1hFy?*)WJW7hnCRDESoo9)&$61=!;@ggnK z5?q2i1c&0#LJ3mb-2#;06xSlbp%gFfP@q6?id*sG-Y?H{-udR7nfo6@CIk0gd+oK> zzOK1LD$X>c`4*dIA5H?J7gvl&r@lpUJn}N()VQC#HdZF$g#SYLWR7RU2H9BXxCBDo za+&iTl?LA zE18&MDi~)y(8PlF=$M0TBIP_aw1!@E3!vF)Q*j{ww`xnTv(SO4<%#y%!FB-uGDB3o zVm7Lm!iaFLJisU95$oKpwsC87T}*F6e}vs$N;ATsz8b{`k#=v*h1K5ACGLSqdn7U7 z1HT}o*ww>*mRV#3i|D$j?xXPPLs4=%$Uz22Q5I{sKkj<&7T9=7@ukMkHhiz@w^}n3 z3{`UZ>|5H&P=DMO#5)koB8Kdk6Rjc5HJ`ob+t(19Za%Hmj^cMqpN|b>znwHCkifOE zOb%BRtgiW6wz?|C?VL(+rj@u7*rVg5*^8~Wtb%GN;HlGGQ>eh=u;dEnfT5*43=D)T zJh9Clq3xg?IF9v)*Se`th&DKFYm}Lhk>KaK37>c9tGMMkrIG62i!B>4t={XkZw_oL zHS*zWi|gPX+O!)Ih)``F{pb_-hB4$@9PUQSx34miJ_Q(Pej6^QkEyOCbqa$ejUNj% zkH@(WPVU2&@-V}^?4!sZ1{5VXBqbXQk@dP6$^Bgo}L48o{y=m z4Byn3kk3=F!i1E{guH9}N1c5g?TFyT8XC~iZkjh6YD3;T&=FfSzOJqn;bf9rx<0$E zbvd6dH(p6QH&_RsOzF@tC74DT4dMzPc%mSfRyzF1k?8zgN_8q6qQ;dlaGAqM9IB@V z7#m5h&3!GL{JSB4u4X~7NSjX`0HG_#0jUSup@C_azp-Nql|?X(rDIn&z{7~uz8KH; z&I?}pScVQCNv)+ya>mo30!IoIKrqZ90OG@%8VJ^gxNEQuJGc@D#9}T1W`>4k`f)g} zNknFqwpyNbG_`EErNQVW{-A)ds+0JEVlm@{}AcM1GMEQ&CBS!Mo`6Q(@s14ridN-xmGuZH)g?4N^ z0P`wzEO8C{qx-}_do>v<-txIp_4=y0_-l(-{M9S zG#8u*dUb|wz$_GZc^!dX=+p%2JAw$`)zUCO*WX@GDe#IVa+v@Sdp#reSjEQiqGf!P z)_nKtB)*7tj5u%@Td^jCF~>10py`cOH6mgcBcR!AD(v9BM04_&qZl0KkGf%$StkMV z&ZW1%_=2zXB)m<5TOh87K)vvlH=%Rt^to7jlZQ9O@2}gHop_?BwoBp5Q}RFY zO>f8!825c8$X4p*^=Bpf`qEkWT^L^T@(jXiJU4en9fy1*&<69Gg0Hu^oX2N-3(%~a z$+9Q7IdV&+mJrb|1ml%HV53IP| z&F7S3r8!vBwdoPA?5aB-`nnZK?uGW8VzIF2}dM1fR3`c4tFL&qc4 z2kr?i8vaEguBp9S#osMT)z^+H0V?Gj;QMV#B->Fzhr6T?U~<1l1u8TDLz5nVFszXm zp+NxUE7*v6!Ay|4br+ajEm>D9Ux7KDT^UIaq>|_PxuqNCk^*lvj38@jI)G6wMqnv* zv?F&kx`29}2ZGhDwCsWd$T6(OSUFl+IQ2StYvP%Z1Yfq0`t#-61s2E zmIbs5QT=$$I_klYtIY+*px4`hB)yWQmz$P(Sm{L@0H^Q5-Fysp10giz?1loEA*x6u z0>MCXH!;JZI*v1BdV8e_ny_rh8`Ulvw%A>ts-ZfYZdobL!}E)A3?*V&NgaBM&BH)0 z1rSpnGx&=Y05Bt$|M^wxwCgVemI-V6v~r@5Juvr037;lvZX%1iYq<-t?zt(H`L74Z zvv&b4Ox-Gfu*<_E7kGY zSOJNy=dyC9@R)RNJ?Fattf7ueek){xAZ=N%U*>WwrSv)DS|R^3uE&~-a-PBRMaw&s z_)o3Q?eGcm%Sv{%kJLTqCCH`CJ;u{S;)jzxroC!{p67_JDqcEbw-)l{bB{H@kXM~H zv3#@fs*OAkywpq=t;8}s1~O<%25a)9&0hoPxqgPsy*Irvvt2^gTIVgr0pqa-H|r`x zc+sbwC=RL+7Li`(%RkcMw~8F&68wr&(;-6-@5z{th58G`Nri1_HoAOdn<)*#R#W$# zZB8B+Y`2o^0i{;5!jv?YALE!DbUZBb%JHsMOKK&mp!B=$by9;U_ivunvdClIPhSwb zmJx)Ws}X~B@j@_W)_umYt1G3N)B>7q>n2`~0&=NSiHs0*rB3S7cUAPlUO*!LHXEkE zHlh*b<8R4J971^sZ(vX_C!o$~o~P9)9gpZqkxHHbn{DPuPNNMMf$Dnhp*VZG!tFFR zosKkGyiba%GZn8KX#J-+BH7@QY}pQg7|I!tuUYWuThH9nCuJ3$Wo>4U@W+k70i?5s z`{44y+NL-@(vW(UTo&WzJ$Ln5B=LOY-3e|l;zN}NqQd#>R-YB{NxS`E7oMQ6AxDRN zB)BFEPG{dLOVCYFDl=zaT=i4J(Jfo;oNrf8Iy5T<{M<^a8D%?B)E$=mkf-+TBR(g6 zUgo?lBM9h`uQ2uSQdJrVY)1#*%i|%){v}BHh;u5#XS~$GLDY3-y%Jv#PazYo-}$X? ze8U6!S0FN&uD&o2>A0KEiyw!@RAxCb*kUY&^m_~R6?DWr7bn~a$&`l|-aVdmTDv^! zAp4vmu&99iVXNIkK#=qOv<1<}pqI$&ylsSc(s>78hRU7)#VXi3K|yf2!yfO*$Hi(u z=EZJ?RqewCp6K_b#@0>e`WLnVT2F$G)5xb$s&(=_e#$M;~c7INkhUdysU%O*e4pHAG0T8UwAa$Jb$BP(=K5iv6JvQx7yVcZ4JJy9@ zY3h>1`KN6z^m3PfU}xYWrZ0p>jGaVGkYJj1IDgTe1Zqdar%KSaznOi0-T5Mo~Ibp+tq2u2< zE$ET?G*}REEzuSm`0D}`w4s6>j#dOVoId*v6?BInH6k!>E0vHlEfav?kmGT`Weq0= z8!LlhxMXVFE)`XjYZwz11Kh91r4K3IRFf`65dC~7rkcGd4RRzNKQxL$w4;H=cALql zu|DIZN)Uc-MvS*r63IxIMmTFS>~pqE!>K~(;B{qmaPoVnEuE19{lhkb{-*;l*-#^) zX^2~ye)G)+V@%Nbz#p+!#GW$%rny)m&t7ceCUxhnIHqLcxRuw^eI7b;D}3@jzxIXQ znmEUUrf~ikR50yi^1nXuk6K>(<@_My2rw6rxslf?l5)b0mkA)4U*@G@zG>tX$0;J; z?@nEI=)V)bQ}(#23(zV+TcvHq4l<(-Y`R!?Q^zVGWv@qk3^nzIB2}U@YRUO zQ_JZ?G2_#l_l$AN4rrZO1HzB$_C*0g3{RAh9+5#n+cbXdZY7gAk3Q5`i%qzwzVpjZ z02U|K+gzTj?71+g&yAf80(P4A_ z!fH+d-ZdRMdqDGU{X}V$Iv)O7W+{59$(2{`bcm*cTXxh+q@kGS_>fN31P7v?+N*8+7;)2yhj`}qZTm*UiSXc;3_Tc_PVj1k6q5~kc9wD_F#$Us_Uu$28uMCY>u#Tv33)d9% z`=b#U^lST)*TmH308;uA_#@X?z3NQAR`=d}Sg)sumoY2l+%6%GnQxT#gSddC7Ui)WDra#6%GI`|-LNgof zvKk+$VYxvmxu3RtS4$A3tZexTgjq<1Z2Wb|A~!1hQ4VxejEx9nO)nup1l}Y{h})|m zMIjMgma-7w705hN7o?76n8a%PR^(WKT#Y3aXpx|bT3!}R6O9hET%B@xP_r5=#hDRS z+tfyTTc{%4K#4>%+oY`nPG#csb|kg(dkkY7&M65+&SM8=O|N}lUsjuAwD0RluSU-B zordjGEjMFE3RS=o$)1MgK#m(wd>_e%-}I7eGq>Imk_;U)WILu=e{ciIX63_dL+u3; zcI5Hq^D||-h8kqTJ{|;y`oUbQs%Yj>?dKVDmU(F-J$71VI8a5s;93ou$XggQEI?9q ze5l(|G}teQ%eD4ipaRYQ=(v`Z#gN8s;w*=W=za-5mPtg@7AD=+`(*e=4`Vj2s&pBG zhKTwlCsM`nGTuaCN2xI{UPtVei%mTF)D_Q81kSjqcB-ap*(>?TX>DRSj_3(jE5gKp zWlkOYo)yj>43ws;bQxR}CHp!Dn2%|;hsj6{7YyuZxu69#2CmV(Q+%bkL&^&^q9B=tQ+S zTOF|Dxg8`*Y6scHO%V6x1kAa*JsFyJ8;D<8;^EV`;@eEqwu2DdRn_*b+2h6R3yIX6@?9D%_LmFMdDT=fYm~z_wZrljz82skzYfCDlE%fl=1GZ zv6(RAO&n)Z=OJwKI&%&5nbw(3&9lh{78p~bIfcerr3xieyuaYCNQfQR@nDYa~`X*N>!WQ#>={5e^=P}86I*I1p zI=c=AlX;gmvf7Da5aBog@wJ>;+=DLegkLecN<`z-*>WU{9i<_>%5DCZMR-aV3=6Kp zp0vW*7y%)tvxAazwYb1E=1NRIuYcvZk|(Ob@tQ&lM6SwR?V0tfb>GssFwQ>TDGM$Q%vznDQ8} z_A*Uni8j}ept^zT5jiG zGQy~ygha%W;o!x12nZ(eu~a-Fx#~@}xabe7F?IH5?q+t*Tl=KT zTIWkSR4=o|lc6qg@0Ma4lzT{{EjXp4R~c$)Z^bVt-P-W!JIUfAibsjd>F=*I=2?%e zFH-d8Hx6PszXomxVbS+)$);DPAsH}_J_vrk;>&tRc}TM`t^5- z?t13;qn2jmjSf|*_z3Tzqi}j9vR;LtHquzm-X`Wdm}Sv_I){=ns8Ujxem)#nFLH)yrsVhR>zo|4--!&0dL+Z%etm3_>zKkX!(?^ z?)8$er;y{K5@4FOZZqW~Qw}MmBN0{`&FX|%UTNUMO`f$|fEw&q^;?59_-)!^$)0{G ziO%~AR=9pA*B!7j5*1eNQo0yr)4*5qp8i-n>t~5%$v68byCYglOkOZ(Jx`6k?5-4L zeh2JS6rW|~)U@M|A`u@AjhF(pmGWV~s8v8e-+o-e0XaG=ngPLR5P|ekX$kyeFZIGx z0b;oN7jej=&-{xJ^pRrE591ci=J=7y)I6>IP(`IW5H_PPFz)F_OG~D2`NPD0wiZEixDJ7oU7FfuQNK zycgw_r9jH?%>=`Eq?N=mBuf1_;t_Kg4sSU^fvJ8-MGd z#tzvJIApyY>R2LOm=j|l{6$l>12(`PzZq~=|Fi>yN z8EZ+ea#N=axmI6Oi2yk5Gu@zG{-QFvwpp`iB%sswSWB-)zZMU;F?}84RZXlBv_J%P zoey+fcwjcYF$c;a!diuIPtmiR=Kw2!pSbu7PtVr%9Z)8?6@*<%UpNIeVb#s~bvJa8 zVJ&znO4&t|PAt}WsMiYhqAY3dTf1d5T;)C2#RZ=?^2(@F?WH=TfbJ~%0{=l;h{$d}R zPZJ?O?ZF&l`bOKSf#m0`<_?%8f7bY=+)o|&zDPMaR<_9bO!}cT6d==#l=H8$m|}t#C;npO>*GGNyC}u}Os0#Q zZ+Fq#zr*b~8uIy2vIKMB-F3(7;yT!OUg-_V6~A&YIO z4TGAUU0%F_WV^cGCk4Mb_IHnDElQm}rkQVMX%A11X8p~HtT;5k`Au8wG3ul{tOH*& znk@Nd?n4@PeXmWAJIJofmfH07sY9yzv7y7OPU}+N40z_aNQrIy9j)5yiH$5ezq}^i zs!onV`$4gxbIbN-K!2b`A+LQ$A3g ze5((F;lE|gzhdbY5d6RyVn+k|2c5YvXXksr!?jV=d2kAqlY0Nb1a8~RlKHv{B$Kkp zoDa$X!I0w9Dh^WMhFBa$w5Bi`$rmleo5Y-|DX(r)lItd;0tseRknWTz7F*Ovr9KFat_7-xfv;`E_ZfZbT*e}g|Mx0 zG71%fMjO6^#aO8C*ui2b;SeWXY%mSm>(W!rkun9#JlaUF$g!yS+u#)^6T;*40-NFl8{ z@Fx@lz$NopU2cA4n68|-k1^fFy$Xzz0>UEaW`w>itinhdDsCJl;cTmNR=}KTHg^0= z!URA>yOtZVB;%4rt>S(&>OHrZu9z((% zWOE%C{g)>M>sMa)E+?u&%a6;XUU_ZnPxWEV*S|HEeLsZt&>jj`!xVZhZK9fOVurE? zu-hTQ3EaNeosg)ggGgbY3I^BX3x$E4YV0AQQ2S~gFVhwifv#?5bm5(Y4NxLO_W31p zaUFA+m)eTg_}CDRZZMlR=s|kJhp{3Uu}aSpzG@%dqq)=l`#X#?SAwgOgVP?A z!IL1Acn6ll35&=2$8fWX$RE2o&D^g}3pok4_j@^^N1Y(b>NGfCD8EL)>(7r>ZNz@< zv?iY_x;a--pgj8WH(v%$zgyvAxt11FSzCYOnWdYw3_$ce)w9o2RfIk6a(DAj1B^BI zEfmX91#=Mnci!}k9mEf=T-MiiriaRtAqGzG8}qKB+7Z}^2KFo$uIk}r`tl$Re8ASM zTwgIoREs3GvH|7ha)Y{3OCv!Y&*Af zk;C90g}Tmym++F?TdrP#Bow6MsDlnUG}?kbS3z%>dyfwE-6`13`&$n>lKOi~X!iJ! z+4lZJf;n*$ewFOOl;F2lmj#YNHCLVI?a5*zn2PTR;K;l}SCUi#U&z(s6+CQ%It*ItpYo2kN9OTZa|`UcArjX-xHjLxl4A}BJ=E?6}os=kmDUi9pN{XpPT&}InAFS zo{)aI*fgLCHeD(0Y{UEkw4N(7RJsN}Hv_d4E4aYfqv`Ac%nuYe1kYWohkw}vW@OUT zyVg^6gntNswc#Q}g)MM|Z)q|6luvnUheL+jUb^@;ksItYaB0^@89Z*KO3|kwUlrH# zXzXu4BUOz!5t4vuB|(mhrSMP`!N0Zm zF;c%G;M~ty9;~dx&C8)tOP9ofr6h4HW%2NXOP2>5*=(PAj5$f;@Pi3QEq6cWsgItZ ziKHVQ;B_X`E-^HYeg@t!P zA(A~oYwd+=Y!D8SPJ?Wo zHnchmSB3qub$5Pnk}V2a!3n`gUC;c8O*z|1+OipQx$de=_|#O+vbpX+7K9}26=Q3@ zW$XkL`Vq39ar{haYD3Gd864R{W!JiGlRq3<3qi+n2 zwbq6~GFXByJl1l}a{@GA&THS8(D3Y8wgcVhESG$2sJd8|h@6b>gj0qaCuX4r1^h^n zyjN_3h@s^U`$zSs&4rvq-}f)a*~S%rJc$U`S@Uch&EHybb0nUXBK_25JL3qu9tu$? zoG~OpOg$8W6gO=b`T2jmaRqMQk!IU5&5{Z~;G3wdj~*_+-~Gy)GyR$DScES3JhC?| z=bC}y@ImM~Zc_^&ZMaUk1q0d@;rc8Srz>NZjA`BY z7y|H95-Rp~;b43Vd>;^;NwS> z;+gEk>kG9n7?7_D=>A`SKVIzHB70~^puF5aNZ}|C3T=FBSo}MF{lvQ*Js+EBy>nJx zQIYntZ-{zn0lojZc{<4N=tI`Ox{U{o#9s~x%Hhin^)gLm6%dS)jROxkt>^-glgg{& zU>ZLT%07n7ZRUiMve)^UxRow-Q2z`rz?4!Zk}mN(gF`=LYUA@M9j5DnV0X6n>mXQP zTo^hq2^Ss!ohy2c#G4=g3@R)QPw(%y2fKGTqpK<~5rO(B|2pVZj^r%gYHUIYfNIUg zRjNlLds|0v2bLBjQ_ekH#Agc z55myB8o0BbzhfFvo}XfCXO~ClGxP$f;ZE&FT0XZvhgX+B*okojWpXXo^CCs%Wu#3W z;WthLNLvV7hp?jo-Rk&4wy{eRPd}I7DMafq?WSC#jnoTjJJ-IV8*|hCvw<;7O4R62 z!fOKh*I5?=^8lF+lVeDUz^dP}24$s&%RGIr(aeg1mmy?#89Y0BVu~PGd64KQWFvbr ziEtoknOUA=R0Kf2TcG;!h6jD#(q~C$ABse8V3wjK!*r0*Iu*>8Y9ODCkX_RO(+5(j zt(!WaO?<*U{r(R71P}A{J#MpActV99abOu{VU3{t8H{15U5bEuE|Td8LRj?cDExE- zf;}{;cu_2>)GF`5oFWf+*A8GIkyI8TIyc=LXYxFSroff8}DAny^hxu>(4|M^Nv1? z_dDtyejTjw2*9EHnl>D-hC4Fq`wWdpo-y+y>U|)OV2E`AC*664Rgf9EgVg)*99#8T zCTT3c`eznFi-3wa8Vlug0zrm15vmG~m0~4f9+P*@yl~VuoyhsVS<{Jc$KeEr+Ob6b zb*O0Lf^t*8X0b=AF*UEQ87u~U7-95^6k-HCrp;d``dQ4MJNdKOcM+?}HykDOlM zCngC$e?j;|{=v!9%sJmCGMQO7Ir!~HqsYUhvS4woW~kjC-8~tKW9oJ-_-jA+Bi+Dp zdAP;f=r^!nnw&50)pv$C1-W$au^0IYe6Z-{42O||%v+*d9oSkXqpw7Yn256bFC`FC zd@Cw^-hSJF^gOFOVJt&Av(W~pCJ0sRrJ`k9-Kck%Ud1E=e}h$Bzhr79l1x@9%r&DR zkTzE{0+4Uh9f+=5ke7q=t^P>zDIkPqu8C&Kq`D+2@aY56{w?DrN6~4U&;y?xWu)Bw zHI*Bzz7tcClvU5y7u4m(f|ucvq~Ie0?_cXQtb@!tBbLz9n));=tN=U4y!(v@zf@YP$FRs1Ss2Ea2-%s`izH)i` zSVzug#6a>QVlbRAW8R%Pi$rac#MFLTRA2ycC2->u`&*4{Lq5#$7`zEWUfJ5{Yk^fs zzf4TamM>UN>%fV8pIqU@VA67uq^!uKs^n}NBJda1!%%dn+|RCG{!~ziEAcz$7pGlp z&AuGEaWXsplnH*0&(J92x0&*_>(I(keBF-(cI8etUKP=0r6adxpm^3GD4dPFVWnljd3ZX)x{=`O zb%a^X*p(%qtzNWt6zRT%@O-Z}P#F5P@665DCn?Cgra&&YVjtX7Gq#>QgDs}{+iHTk z@v&t@*n6BzBO?;K#fa3>0MaF*P8dfm>O3+T@}|3m*%a`rGU81hq1-@yhOp zD1%_m&u-Dir%2((yu zW24jEOjkzWz?QHUQS8zEkZ{@0QB6XM4+L(k^Anc4q0}TzI^MeutaY40V-T~|o^p39+ZT5q5VCmfV9d0vg96#Vo&E+o zs-hKn{~=gYB!`Rnt-(R{?Y+>^_0yC3JvHP%o%)*gzdLm&9@rZF^I7oXH4PVGWojry zjkjeUB_BD16ea40VM~*T+9l1mzDEPHlOxL&13^}>h(~>>oxslOmz;99roGr?;y|Av z4EAGlD{V)G1A&5|H#+D7HSdCFQ(-?l%Gb0HjB0T2fJ7#cuq;I1DN;N9>f;#eWQ1kS z`B1Me5o>Qsxw+turibFFT0Rl7$jY9XRPt3?2o=cn7KHu*8IDbuSb2W-V5gfBj7#D` zH%l$Z(q^%=cf8DLE!FYj`QAweGZ`!6KdD*QS-Ny4-Pc7AV6p*)0!IK|(Wxe`OEL%z zio=@w-INH+{v7HiyspaA&~M%MQ!^AoN?aGyBUy^%pOaQR4e9Ek6?=V+Em5mBMJ<`xU)FMKzh^9Pl{Pj7aB~aLju$YX=eQaD1FQ!#sF=!yi$@Kwaoj6HOzlZg&p~`b&J9 zdr>|5tS_uAShSL$SnobbkE<7st~r(L$4NES5XEU2|Cy zOL+^tq`;_7VytmH-Kd?u`?7E0huq5uA16jbRX$j@osP5jwp_Rt`CHc_Vx>){TxKOw z%S<6L+je0hc0+cg!vcb-i*VTJbhogFhnAO|D>^sPm{I}|d#we`7}}O-oXg3S%v3AX z709bfJimKmZ%eb>$p4I8YbM%wC9GZQ$$|5yWcxdnG9}ZYj%(?wy~($jh;-I;1KobM+D{;q+J%<14b6XF*S=Rpi`q&xcmpGVXGEn;pkDb`CaWc$rk zULCy*h#o<+ms0V?gO^}WvesGq*ru~bJF-9_|0&~5@KGB{ehKExJ0ex$&_|7p`-Bq% zqJ{$nehy+OV19s9PW*yA*B6u-q(QzIGyJDvG~yRJ7&Ajv>^r)& znmi4Jv{6$fPnL%YH=uO z&FL^bB*1&<1^(T&<`>)uHe?tKS&d04;W_1WXUKD!McNYbbKJpXK_3OFoG4^PQW(={ zS^^&^<9KlHO_;(1v}ZJjixn(=kW)<=MVk)+axPzW>aGyQbpjUL?Pf^mE3MkY-0ssh zmqSv8cfXNjU>UGXzsLVdP}S=*=P~566l@X?Q_D)&ea|wK+X9`7PwfBwqSj<|~ZY;3u8koG2q-8&~$U z`&G}*s$n)Ztl>P`1HwTCy7mCM^g@@FLC9S$t>O}R_IUh^(32-Cg8*^vAI_!*v`SMkUj+H z#uP*o;UB}(0AS}*0r9=EA?;qsKvUba%+(>Pb7k(~>`R7v#V|Tfa@_E|bSYA#w(Fhu?Oj(F0ra2RNWLd&B=17*R}vuR8#aWk8Lu=B2JuM19Wt#u*dJ7#MYCIlC}s1mb{2VwL`s={-PHR_2Yk|CSI-}? z%wz{|u2vvdID#@?|Cykc zE6&fP%S>e>F2u2VRI(a8ZA?OMKo%bx=txXD2CPnR<-&=|kMU?iT=xX03N_&hF7XkU zstZ)!aDk&X7`#p&2+0IYs|3F_4Uj+x{ZwRO2@v4t2UVyjd@%o|>s&Td6VriTZf)^} zhHC2DXTyBZj<&rip|$bc?t0#)ltUM=z)^OxPT_VYTv}-RZQ)OzPdq8L-y6zDc1KW; z>X6f>#yl%JM0@Ckh^?pQ{ws9IWB6}GAYHkL2Bj>>Hpv*-ChM`z=K%QL5wes03gmb!_PBS7>(E(GY&4{FNov)}Y_|+3g3D&V*RM zrit}r&H6uEKKva|$ATTVe;7KRelv7DtP3&(X+>puX|A8asL0Hs`jdG-Ztka{Vz)12 znqdIf60LFb{7aavy(xe9)+=GxKIH1B+Jk;(ra}ucg>Lo8b1a$g2Ewq?^E~gXQNoFUhMXy%ly>D`AQ=o7id?v;q+1 zEU6VUp!fR<>AF&5b5IiK%ajXv89zC=2rzXhvO-68;E~x?&l0XfO}+komVxw>FHk)u z_IZ~=8j9QBR_e`653c)`#@|qr3H;s4nQry%>Bh=XCv?%9i|Zy>n@9*&okG?MeNQd! zFNUje{yt=4^HjpV$kP)iUGj(48#7*6iDlo}V8)SWu#CLtvGU4f3fx6zqxjigvuT-0 z@RS2oTdn~4pSxrwFw#9kT}u{f-J*KLg)|21E_Gbqz1mhIUAEhbeP*VjCtggXS#XLp zF&qICi3*mZ;)so-RdFXxT1%-hml z6324M8Y6mv4#DxH5N-*xYZ4jnKkVMDsagN+)pR;++wXl*iLEKs8LOFBI~V`ybZwe> z+4lT6$SexWoMLlbDDBViwZWggQe*G4JIUP6z?ca$slgXgEB(d1;{|)??@aEv$O+9& zd#oED77ku`pPjRa-`z?wJo!@+2DL&JS0%fqKb~xiH`yj${l=7A-U}V_J0kL_f5lwW zAbT3odfEoY=|F1TdT=aF_IMf7<~J?;TGiiVjeN0(E8=bn^@Ts{VJW`ZXBKIr)qc~q zZW#E6;CEy0WXPBuJ>8FwD61O^*llL$ZJN|QJn~r8{i)6zoh1G|1|f3`F2dRZGV9}!Uo+?<91_z8gPI$t_Gd)RQ^dDz)A^n!JFEs`F%UMY)H3cIbZOAKC zUx$07@O)M{u>Q?&S{XZO9YAHh&5!<)V7mA7&B#2B0rhNwLi#~L82t-~#d)HN;b9Hd z(%uO6m}7X(pGfTS>WCtH7_n#d>t!AMb$^xT_et*S?&By}Z}6S>#Qg%yu&`W*`3F;K zK85+*9ezYWRjZjyG-hNDT~zyy@?g!ThhnS1=P$gBHnzrZ=?b+OByv^bf3spic4QhR zj@(4lNP}s_Y@5Yk<1EA_f6xFr-rg=8VykGJKa!!|xYYvXYn4LTGh70xhyb6rE*A5{@IT7XUWIgKD0%-Q)@ifU`cR6>sM{$DYNU9Q39t}Z*)2j%T zJT>vHWG??RzDbh&D>o4kY!wT+{8oe$Doia5Ssi8L;r%Te_yad6xp3YMed#kg@F|u_((ClZkQ$C$z$^}-kvRDf@^3cr z|Je7^>i7SSP+tgwUodXG!#;%0dnV0439!+Z z`qy@`87n5yfLZDu?a&%X(iXkAIAtmg-m6k4=;<}D_ z5N{B@SSgxg;W1=jq_^o_%^4ne<`2QIumo>rQXxI?SJ=o@`)?}Ze`1bUb~4+V!#RS` zWjhBx$C(P?=c}2@;{_w8%vqj~r6Epy_lg4XgRQSA99qg<7byC79OSr0bElW_@z5>Ca8Sy2(8|Lv$XG+|SEE zVOKYf!Ws?f`E48QW^F?0xqYTn*H_+@RNWO_L<|_1;ppTcXyab%;&+wx$|;* zCbLrw8H}inHK{F3V^Ob)bO#eF#)z~LboMOjmOG`N>{fi!_sgJc{He8~Wn7>8sjYP& zNKxAuN43zSj!}kkG>+EFGMqtwFsqBb>@@|n{#LrRHX7J}$HHDq@Opqb2;KYL%wRbq zOC|REpwSE(Y+(a*MLbDRPe10Vlr>t~J9Ij$!DDv?LI#n;6C2T#;`jhtkJ?>2Qjy)S z9^S;5Bec8e#aK<>BBL}-<8B_$6ZR~k`+ngiG~1#k%?sGgp|PHPwX&-5`Z}kkd?>E; zjit`5dzMajMyLF6_!Ci7L9XJg+790<|NeJ@9f@e4haLg8gc8=$!g8Mw7E3RK{6trW~m10 zg0toi6fAn}bK$*f=SqCdSOK4x90~cLq76VMmyut%N<6;=DvId~A4Lq!qo}K^mInsv z_LAj53}og+Qzx4&JuW`#;)i8n-y#$QD0`)RLeS9cE`6GJi>{uZez!~M%9aarbQ6p! z+1f!yrg(PLH&Z2`UIQh>rhQc8FevzY5! zISkM|%vG~2V0Ep(9O$qMXhFA?$`aKm-Omp-R>%Fz>XnIti0mDNka=NYyrPEwd{63Z zNO9)#?nr>nSSn{1`~|G3{UC^YP*3DmYV`HDzt*|4YbhJa)!U8mK`WDvAUaA(&B2zz z!HmMhKR(Q^X7374<@u{SzQ;g2mwQv6^KAAIE^q`UhX1IEV>v$;UJfvdeNl6=u`fhD z~p1YP7#<;?tkCQC07Y4d&xc zuC4`MO2$J4>l?H35ZPP3^&|u8V|l%GJri`ve&G6ONg#4-Db)bSsm1E-(Se0-xaN~v ze{W*;41?Wo)5OF{EdB*`7rtSXaobqpq&Yyxg2%d#2G-SZ3ZXjT(gao4P9@JvQ@@R4 zLZQ0HH7YeRc)YygjXqVli?fYYF%wgY;!9 zFXjektp|kP8q)qrqh(@m5=5IwUt3d_XfCB;VqBtvPT>pdF69-5{VMgSeUtk8rA{k> zXI&ZkeU@ik=QMZOo7Ztw3LB_+tYSQUWsjd+ASYI=C)TbOi@o3caHg4A|I9hGj^Zkd zQLM4%kT#y*h>&!8@IY(pvTV-X_U2oWd%NeS<($hn%5g(o#nwLQrA^x$-eb=(;%`}W zodk06z(4SY&QZ?Xys*s`s)4u|skp>9FZ0VA08nHP(jLoDSTE#p!|IK`Y=}1{Y+C@z zFwGbblBgZT>V+*@8~trGTt1OCyd=qf59k_FR|t;9#sBepnXEp;#uM_RfQFozaqL}< z9d*l?F!F>F(VCql^}R6G%NNqXV{Pllq^z;nOLcji5p>9zIOHLh5eve4Wn4oLpoF^y zRqm)(IqMi9H8wbU>bdh{QM*@=#XT5mGEnyXvCp+|eZq3Ps>A%dp0mBu{dvrwxyoUN zGysB({slnUK(L^>i}q=e(|tCxza}*BNw=|&hqnrNi2oVq75~TYCdYtua-jfezUczs zi76H|${lekt7&jDaI!LKcIsz`fZ3Xon+_F;LvAH&v3~{tS)d5m4_%iue-_V!GxRH^ zcX7!HpzdguRYY7zo3nY4t@~RHdP6Jb9h_Gb!4dy}LT{)m^RCp z7Lcz{kh`2Cs1R(&djE$kW|s776HuB-Py3+rfo5BuWv|ggplVg`8U9gs!D6+tF7Y=) zI`-?Gf`a8LcG1})aeHwnQffyd**dJF>&_8J{WMq}oJn*hl3{Si3e7}{^mLFHjplOpKz{+$>X0D`u!xy`DxVfN|Jc4b7HW{AY$lNoM1Qax$MpC{(TmcTUu~aU*CLc#Lg3x8)2VkyD>|7+kCDrsS!|z8z@B6wW&f**J^>5 z#*9wK^)n1F*}emH`AnV2xlXc&7Z_&lgx~x=eeI_Y%ME2Zb89Wn6z%}a|KsW{gW?Rk ztX!GgQHC%C&y@Zb(1Kya7F-Ce&Y@60(hGk?2^s{TRm+V@^- zT}!y0Vl!!ZL*bB$Ovwr%XW9iTc^#=UZ5EPN93;dXy5RVckW~4=STUa(41d}^%aLso zR8Rbc(lWHPgkXVu9fO+U^>(p_ zmcglko=6PmZcRAt>d}zQT+BUl2{PW#{v9PQb*yrTx5yfmqKL~2HhRSd%>1?TUuxns z2O_Rm9T-gU$6Gl1%Os-Wy)C&Cvr;y`U%k(C`YCEb@Xatua2InAo)?MYsyzGH zbAh{x!?pJ4^K$Cf1)h9vEzHAi6Oz^`f@|vm=i-D64Jb31`%Tf4tuOj;!_3#tUu0Z; zxK&F_h}*`;JX$Qcd(sB!LcG|G=R1MKFks`FH5aXl+EC#>oBICpRZdj#Pr=}+qWFS^ zRqkzKU+T}iB|W({84uH=@^!sVcFJO(D^Mq^#OEH6hY?G}(`2xYUnb9+lHA9m_srz= z`3u~zM>p~Jy_-DNzXBMtF&1Wm5gavw0E35n)| zt$I$|6jGsLfdi*g%{S}FMQ^CJ%&_QBH?~3aKBeUrh?O|2R!5<(n?HnhTBn=TUwN?0 zvELPVz)dton;vo6Q9N)>+%!14Gh)EnEYH8#)^7Duj8)9NPZwTPOG6N!?o^VtV4(;n3vM{Y5-TPA^w+7-8~E3-N|df z+ka%X4jgoHxcUR8e=TI>h6%}ep10`}`V!jo(vQMb6E|XOaH3pPF3X^%nbwH|IDQAo@=d_08ul~Trfnz`s3;u*xq&H= z@;|%^s%OJChNh?TXelSOoaO~!+7iPHWKL%XVVAzpjTQ_RAp-%w0aO`*{ z+D6NQB)DFTpHiiHa8uaMWHnBFbji7N(7;+k;EK!|jCafTvGiOH2(H%PcC7X8V)HEX zv~$m4o`g3_SP3|_AnShErnMzo!_JU#CCFWBIb9GcBb>+@B)6(Z=yf?&$rg$16%ZO} ztvAMVgL~kh7+MwUksrDe)cQ`AZJfTXB+zTJgm%gfOrmC_)@hvKJZb8j<&iu<(e<~6 zn@&6-5ma!r^b#HE?w`Ri=3d;;>V#Zfo41frKL z@Up(k#K@c=?&I!y?~PIpIOMQS_pY(D7vQakVI3~Nv62a_!i}Va;jfp;1cZkYBwCQr)Z`449o!ie}w+7TGIMiMnCYK>nLb;4gLmj32BuWZDr0yCBSmT7-MsJb zEKS$hF$-UnDW`g003B;SBP}V750?oJF)C+nS0?UxD5S_tyoMxJVxGz}%jWYVjz+n*U0$v2+8Y-<2A41fSND zof0X(7{Tku08=*kHT+^F*0_!S+~5PEFKbDk{e`5vUQEc|v#J9(<+ud@LAy&f2nw;^ zloT#Bt3UoT3iJ*Js41H?Yo4G_fEdaMY> z7H)RIX({xs=ZS-~wBxbPHWy$#(blc|NHJGEG1Q&#oPo|zr|FPYV&vw+_`@H~B*)*g zH+t9Dc68ukc4xeDA|2%P&kuPI5l)GjxgIv z2#AEG9W11!Ag)OnXLSjl8r$$>O}JEGy9btVW8J-UhJEQ%9z^*@k@gkTkp3PVa$>?F83EsSF-z;r)_eH4~>bZoW|jTMRO z5Dv~qt$A%t7A4WM*ztE2@ zu89Cfly*^$Y7D2uRTp`hT#L8EnOf=MSvM;FxE8PEmQ5mm3y1C(yJWsdMoK3#u`))% zdB#=C(orzfXII}h+&JHsSVR&1o({@Lhr2Lw{U>%*zKMLt(tKBjv?3c0&Gnv*)#pV^eap%Iu7?;_cvB$sGKwnbtfkbJJV=Dw@(u=AE#q_yUZ z-WOA$mhHKo09!MR*<&^M6S?_;0O!#2VeZ|CrM*;kun1Znht!W;_L3ZhU9bQ z3WP2)+y#dHZhmpZtDtYjJMYOK20VC^_v2}#Ab%f7 zyOj_NF!znW`ZIs=IOwD4hSJ+CH#797ekTr;QRSh$>qXnI9bF3~(30Uu`xuts zSbflIxk_>Lac%mjf3gHB&r5HNuplXLhiK0eBcW=ccC%@`HGazbrvWZU;h77;*1o<>sB)syXM%<~zd%y7{QZUfI z$^mJFsBVajaCirCXwsc&{KWwuo={u(A4c0%4kfDBCw1PpG0@7(Ghg=0Iq3Ao9aFoj zD~42LZK!}Ur}pelUeNb}FT4F2P1)Bw6fsArd*=vVRzA8P5|jaHa|H_$D)}$3B=GWB z@M4UUpp_8A0J*4`^gJDuICVzx6lPsniMxxB#ZiBz@~+w+p+xr8D{vuNX3dvc869AjCpn0=(?w zM0AVJGIowmRvmu>I0Ar?S4!jaUoQCnQV3o+{@0~&jTo@OYboYR!{TGiYKWfP1y~m$ zFv5*}FKnm|#Jig)dfwn*OS!v9?7#iQpppdSVJdyJQ7V}O*jv9-E$T&_#9e=svu%CW zk^RjMTs&XYFjQ7CRlksh%T1z{KnlDHN4jl^Dm(RJnuZW(4f(wM zJv@{v^Ypnqu3}f)UIS}&e$F}%xW(eSJG@splN>y-G{o$RIR}T*dzz{Zo%F9dk)=ST zd5{-$r7BVBhOXWO!i!DVx6QwH1OB&yJa%%FU?vOLix45*S}`wH@R>yAm*uZSkJwGI zr@eX5&~QTsFMJAH9#aI-!EZQ|F-@j8n9ZnqeDGn~iUW{Yb&p1=AKZ2~=6P5>zla6* zG&9G@`Y$+@o#~dwaYfTty-y_tKHEo33smyT3N)HQE7e>5J`LgR@WNJeW$R4?)_~q` zkv7O-)^bm2`pL9h=CIX1&J@~y9kCfAJ2ZPomW96fq2Fu79d_$g==?~6xfNO`OwTuV z=mqZ&XxP1-Nr)%J;~!VzM$q|eI|A)vX%FW}GCjBb>*E3wuB`Chi^PaN7GM<$p-9ec ziJA*LN!+Gjhx_~_KmG8uwJ|Gw-ZF#rI*+h*KXwt$bPBT<{cFK-5b5^dmu)%h&^I5l zKkozQJtTrZ&v&P_NAM<0;arp)Z4ECzhL2XFp1t!qj-KvGbRgGpKk8=wii$!}Y#erm z^X}p_v}d^@_ehQ0TTmNw4w};RR}`}}@4m@CJ9+eqjXz(fCg*C5;)_BpO*u+(s8*k? zh*0In(f|+YVj?e5!mDS6I_h$aWG~IZ(=AZ#(h$E^K*wuxXZ_OWkV7kK*aL+KsH3-*<0fd%A82=yK0B`=3ti)%_dhejOacH zn+$L1LZ@!lrn!-3$@Y68uNEGWI>eRf7i^X|ehHFob4F?I^oV>~_YDzP7aF)Z6CQ{; zqu4x+l5u~&B+h=jCyVQBiEEMlK&Hcs%)V;|dGAG(X@MSa^+(Jh9r$+@=RzlB_+7&w zTY4>r1wblRt$uHVwf*hJxe*JZ%Ns?Hc;E`k}-1Im#RgV;n`-kE{3X4lyv25qqAWh7Wpo6*V&Tras>59*^;xq zMK5Vd>Q(Z%x1=|Tt2w@ap3kq6)HdS_yb$m;)ZcvGR*PJ7uunlwI)#D8erCB0W!EqD z4Qp`I-`_Bhi@72u2+2w>_Arr4C*73y#E?~3^KsdacC#_Ltf<+qdK4&gUX&-tUIrXsuF)Bj2A0<6h`H<%Q}nYE=N>BPC&_)VxC z@_{Ffr4K2WeO(RuYs$<()aJ|Q)|romP))KNv-AO+t%jW;-1aJIRn8IUVkp+VN z4bhQ!vLPr8^`F%Sls}rYZ)+7;``ez6Bb;jSoPP;A*@1-{TCxT&lssG8gnsK~A-Q+9 z(Df2#GU`Wo)3U|-I)JbRVCL+CTx`iRAXz2?#iXvvc)dc*JhA!$-<5ycLoDGwDT|)} zL`E_K5o6mFQ=5XUSZI(cxm3d$BUY@w^S<6AP}`G)c&HUgqg*Kbtj9AZ$#7yMaVtQk zI=QZfCo4`+yJH_D4jYhl(djLgj~B-PO&Nat(uF7YRF&ZHmxS?In`*Z~Xeygb<}k%k z*4Gz~d4g8u-VTM`FD1feJ!fd_?8@OKgKXtRm=e>_KnmhkqH=D4qn8|GBeF}owY(MX z+kzQ^+3vWjN;E|Qzja)aH*Eadod0QtvLuIG^z{mF+m9mVzJwjAT>u+{53~_)3*2GWL zJ_^O%!D{l$i~i3A_uzB5^#aK(-c=xr$<4^GR7%@GIYbw(`B5a+K5Jt$QT!Ihxgd2* zeE0rYX$^5$GPyG3`7MS8FAd7rp&LkYVcfYb?0x-~t4KlEDkJ(o3t&bPdM$e%&RkAe z45&o7dso9%SpT{Y5=D*F=0t=&Z|1n|$&+tSZZ^#cXLK9zhEEy1dOlF5jDzLmA>e&k z1^Mv-vzl)@^^XJZIzFLDM|7cp2~z7%n1mEtRrfcH%d>5&(_gsxr!_J&2402h0 z#?7eXNH~hQEU@(V(6A%1u|#RdaGgpW$8fKT(;`Ie=AcezRz|B4p-XT2=eSl|Gn}j3 z4L;q`p3_5dJ0-B4N>u-xBTL4nqcv{Ioath73r{Si*)N9<)OycmafKA=e8+U848OQ= z^HKMsPaU(t7TiLq9}2tykmd^WQFDYrF17kKgXd4;RyN|Fj#mC+Vyt)~OuJ%(2R-!$ zr852f8)cQ{ICu=Ggg*NV0~+`bQup85u$(hp81NX!VFa0-5ZSspn%8}Sm;5&grn(K5 zU+uvsHxKhOZH77jGrg|~xRPKG0E;nT?&+)H33m@Fbips=mGwZpc>R7mMjvMBggmic#nXLdYVmEA8!fv>Jxd=tuehMRGBO0G0pZv)# z&8>O?;&?3#3FZ9_5f?!SGf{;S%>=Gk-pLRbj{ABL~(zxx_f(wMvx z{G$Ou^5nr}+uIRlzH4M?hr`L?q=NPMhCb8F6{bW4S$s`fM)g|ios!U%#AZoY{9`}m zLtKHw>w$^6AiCJ)G?clJiI2!t&w)iY@5Ktd*xfGG`Kb)=w>+F%!b1~_SoT|UQi!oC zuKP}Nu<}=0-a;aiw@)m(#{S)zhrG#?*ayT}jy$c%`Sm0;RI5k=z29ozYao((>u&Z= zGr5!L4XwEiJ(dbD5P`6kHwR6fy(y#X?ENpo%1u&(Fdpq@#Eb-d6dXSEeCuLKJunG| z534Yfu=LS+&=~kxdZ>eC9(_>#E7fNhiWNqoj`1gnw;9QXq_~Y_5l`qu9g9Qk`<`sG z&;`OO%p5c!_ngTP`9$_(B@z?6OzPwz;vW7y9Ic}H-e2{Kb$nHXu3y+g=kY(l5kB?R z{3XTKF2HtU!=BVO!*=Dvf+lRo_5k68)r3N>3kuFzbWrC7QQdd{_*<-@k^RmJ><*kA z_Hd`LsAk+0jYNNTcB6u*(IlcO(eT)?K1UMv?rqZa>)#ACmfR(HF_OwSlVc@Xz`q5y zR_4H0cDW$EbCOzlk<5JHA+?HgPPpNr{LtAR(q^WzrD`I8BqSPRV}K8-RCN-A<+9GoobVKiK83`Za1*%bv9fV$b+ZF_Be2R9cQ4N*_e89nWJ6Mr) z>qCBu*Ui6+8e$dF{esTjylG`0p9?L}fHP$wymcVU)DY$D>&>iYq0uACXCHk++Dyo| zUDOMMgWJV4DBFk@hE4$Q_~g}mX81ge5Of^2pJLSF`*NfG_p9_M`ujo*2kV z2%Z^3ZB49Qq{zpaxrZmQ7T7vM0!R*I6n}*3;)o{o=MgPysf#4j7Tw5AIm-FioVL;7 zY5s#;|3I(~lvhNUkpy7V4b?6z1@b2!SxTj)kYt72PaDdKuEjvqEA>BA!u_e1%vEuL zEMMx}4WA(CR09}+r#iqFy&_M!?{NP~3?0o<108m;T`b1N)N(qVXs#VY8avg-AjNJS zYg&M36ZtRmv8`MU*c+-d(SHew?Zzai#*)dGOFJKL-ye}Jadj0)k7V(TDCjQQB*pfME9Zz~&%*l|(W(@B{>8;-RJRST6^dlwj;4!(AI;LFu zFQ+yU=R?ka?bS7-9m|7z1YG`_bznR$k{DMOP`{HnG)|QN{4TCEr0Dg=cuN~U`Bk%) zX93UI0G)Gm1gqki71BMK9*$z|zrC$>E}(|*)u&i{6;4gIi3a&@8w{&(d-14)TN3vf zc~B=&2RSOSqUU|5@H;a-Gl@rs7DE^D4NVa^gLCQg5Gb-%oXGp5i|`JTn7#pOB=$=g z9xrl-3#dSQ{+_qr?7Ueyu=DL)4im)acPnK`rNQ$8E#2O zpHL@-q=6}grILBQ?Kmze@C){nypV*CFuNM0kcQLXb8NZYE!ZUXKOvGSxdz9*vACar zf=XUA?iUor{$_ieo9tx{rI(%S@Drb2N&md^AX*udjF+bVqcN6b1J};BIO9Y|h)y%aNLrgj0Z;BbHOk-^~k3sys<02Ihodx972i8srQ@9P>bt}Tzxkwy4gU|U-LK@qW z&PDa}7cXDjdV~Y2hL>qGk;gUk?yLeywH}S|)`6SIiw|ilOJUD19?`cwk;U%A=Jf_o zRG*idsCB9Imnk=D^o=61r}c?4S8PL4SCb+-Qkeyh?7^y>{(+sQsjjZija`>#mqFLG zyjLpTR|JY)KCi-Rzvk9}Kkhubk^3Li{;4zA8M+i8eNg}%!a$`Cr}yzFoCPH6A#?G{ zbEk(5@S6~|KSMP}ih;yf!UyLP3gHI6L+JwCy6nOw#Pwzz4P5{94K-Zp3RsyODNl@bo#3Thk0->pYV*xA)1q^??lmVKx z8mjC@x0JHW`5bhBO}rio^Jgso4ynh$1ZDhnf~^zrvFNO!zL z35j(~A>lJZK#nCwfyI}c6%y%10GBdS)p~r>UQj+BAm>%8M{q>M)VwI{g{2;4fSi|G zkrk60$Wz5wy(nNz$2!Fi(*2rZve3oTbM>CKJynXrt?(SQ$L$p=TCoy;ZAhenqoX)% z4I425(6t7=4Mb&|*Voxnf?>K-<5Ij<&Hk(bH`w+cFd0)f-E`j9-3jb%^?wCVm{4qz zv0?3i5hrE-Bs?5rz3+;Xe$Xe?g(kG7scU3j6(kokz!pt2=!9hOG8o;?zcZf zpL!=nu^MF#K1{^YY(-fX%Vv5~0EZS@2S`14%Z(;DvTL~eKsm1O={L*0VV%F%1r*t@ z8In`&Lx^Yqmn%@pVUUjDT0*p`;tC@BQx*(s@ePt%9rZWbQ$9Ajbz4MPXYG3JqR zRhO{S*#hm;fmo&dJ=W;fQg@>R(yk|{J9u>#lJJ4RXFkpJzWVomhaBPRhyA>HFl_XK z1xiH$I+v9_dZn-~s=9%7H|sC-%D{5t&l=?KZmZCuf{GW2bm7C2L`0p;h1`R+hN@L? z7o>@dwKJh_v)=dKwtQT`vX3mf2<41>-}9%Gg;PWo(OHRSa8uo+JGW1GA?pwiBP zKtRIo`bj5^fq^$K{n zNPDE9#!^Pst?{NU;oh0+@{qjRe!Mi`Objv)v|cG2+%^TLN;JZ9Bc8M52=HngA?p>} znR%T@{%YFt>~<9_V?18;_DX-RDlG2gvlM4`6n8X^N@8m=PukNGbG!tcPq!K9yA|01 z;2NEAQ*>Aqig8R(6ST&vwtLt2updnjCG~Oy{+yAYdi6j|<8w+n4WPC>B{Rr+C2ik@ zsj;{yKj@;fmqF9vYk1YKp%jngCEKq1wX8XHU3L|1pJD$i3&JJ40KnCn7%ByOM4xkS z?O&e(lF8?WHq~(*N?&#FJNxY?0n-;PqfAfganFPRAH4!cO>aFz0GIT}9yk=*H%z#8 zQ4DXrbldfJpTAIr-YQoKw%@OgGQ+Q)l2q>PVZyJW-2Xem`Wa1`$b*~cInZ&I{G%|UCxr01b9X_9WWhJ(M)0eL zTH+sIj2jjHRwZ*i&QwGPY%AFqM)fr~RvlfDS>ZJ?@5qs^0xT57llfW}&$)`o@$XSp zzqpn(amz!djxcN=Rb6nunI7tn;&mKp4MH`&h8Kf4%M=a%94;VTwSo77>;kPcEJab+ zmO0yT!z_S6wv*eKD-4vvcUkzOB49ZZ(r(F!$zv1I`wZV?n z&xqZK^4ajakZ2*88#+~>J?1TlTP?YPxC3zf#UGGwgK%MLidY%5=ocl{^<`YQ1Bb@; z4ibXCj%d`MlIr-&pyx8JkxSZzQ;&7wX>T=#4hszaI!l1A>$t@2NhPk=zeb*BpmFy1 zAZ>N9m|c$_F5u{rCy}$7`I$T0#+HUDasjuQT!iFb8iN??EWFr@P}$I$CTI4+Ct>I& zq@UrI0aGcdeV_+qFqPm-Aaai;*MX(tpU4W#`u;hcbUun?IWuF54@mM`Lw(?B9K_2L zEO$OEPPBlX{f*&kM0sn#M>{QHjHoI-{XA^lYvQ8^@0lDD@*0j*1k41HmMD-(D4d3%%?w#(?oa}lOuJ8`sCPnmV zPtbC5U{gSrA)R(Kg4)Xd6TgwqjkvI1M;WQYbJK6!;dGTZP;>+_UQvg7Eqa&$u#U+d z2%1}@A^}N#7#0^B^4QEmy)wpo>^gB+QLTqEW z>rpB;IDT9lE8aFv8Z|*Fi>o~aEG2w-gkiSJLoS3B=dKWV$>2ch6XR9Acl@G z;Ie-(j(LU9jd{gEh5v?8_ClSSFg%-bYzcJwH~RU; zR?3PjEDD&p$wDi+M~R*cd?vfP?S4c@gb;bd`5wgGAM!ZVE4M`HmGsqR*qQAm`pe@x z=>fN<7Z(yfX2aKhUw88$7pw03x7YQz=|i?co_k+Ht6Nqd%1bc;9}7Q@lyn4@0-_b&roUA-Z2kY=|iGv35gB#}=0tG4IMNsr)PEk9pc ze}-Mqcwg8%zGi)M=l|(@(y`(32sRAAdZ;Vq+ZpCd38%TB1g{$?^p|;wFdaX^N1961 zIUljx3iZE+qWn53wW^s|SN=G^)FxZM?$#lE@IUHM0FE5+kc4Vn2Z)tNHqVdpR=I;$ z)3WTKSEEgw6cThv-fdMNOM4t}&sN08$*g+NxlXsn=*Q%TYIQA^XiV~wWsg3Sx_$I< z3eLDt?eDQ&;a{VX*9BuW+|s(9VwBQ4F-xg{rQG61&RjXR>$hBlN43%~LFN0JHSZs1 zYX0Oiu`}<2Qlv)$qYF?fLWb`7rnAxG?woQT$Bz<0#k<$$gMeKJT+?=|go6 zJR!c&O%79RS2x(qsFJ>u-(+xW@53glS{=H0;7S&o&6j3gSmkSFr{Ry|QP-Bz29vA0>!x3w?rT zOGr%RY<=D--$S9sHVz`)NvEEyB4j0#7L4lBUN6}OP6}A(jauWPEvpl630CBuC4XGN z(IPdw(+spzu{jo6LRl^#F>_WCs=TU>%>B#2du&A5))7<3_J_z7dA9aYRx>qBV~}X# zHxWb&pEk(aSnV5X>VQu$d_+erl@;7vRu<}Bpo>l-C$yHMMD6{xtSzfP^4XE9!@H$W ze^O!IhYhCuuLE1gYv6@qMSmXxftpLkO%;eY*}KdrAHF2WcWz9XYIoWbUlcE}tmJM< z*|56Y!qrniMK(VDt&RVCuud_np z<2q<86>X{(!3_ZAz~kYt@XoEg3x#ZCQm+iig`gl+^QqzgO@nnbo`cGEJEy`IO=Syr zs+shB>bHr>UB+-lUy3>EF|hvTqm~VFif2EOnHA8BisdeQZ^?==x1YY7m$OnN4N#Yg zzKpHm3hw=oXw|)3FKBix1L{FYdvrgU-cit7t#Qky-%&zyCVV$hlcGmdV{xEA-BL`f z;Sfo_OC-zfki+3*RW84Q2cqpHZ$nwBicaqZMbMvui0{+Um*x-* zAW>GepS?B#zz?D`MhSKM6G92bX^(WxmHYK7s<-ReI}AI*fTb?Kp1Wh2X;%rPsl}i6 zevgdnfseX&&VZAPVr%(~f_5P2g{pkT5di1<#Yrng&UZcMgLP66BA0Gm6|K(M4j!&* zuqY@HDRYok+V09W`4f3Eyf@gE(xB{i`FNVIjGIwgvofHVp^UV z&7~=%=1Ll#QSoF?K?CbXH|t0CB0_P6&y?=Me2FLwda(66ibTb117 z^Hohka&k~%p|kt-e(S%lxBPz;gbwUdE;*oqMKQM+0Cp^t04peFIdY=^r^Uoq+?Q$= zGf~T+VAGeMugJZMwtAS{{67BhLVA~Cx9NFV#@yws zfa740-0~LJ(`>I_c!9f>l5__394Pf7J;udL<%{ow4z&78@IBrK&YMsLi7X%#c_q)) zeMh4@TyvV2m)4(PusCm&k=7k(e0i5KjNfVcUQal!O5l@2WrQ&R@Wj@MT4U09j^2v} zIn&wJiwfc@J{+Cwt_--E9_h0?WlPzovjwL_VsY5g3uMpsas}658KT1D1p7=ra%CWG zjERvgBL+0;;2a4gZ)C1|_n-Xiz34!7{IiH6;|-hy`78=okpuMB_$&DqQ8U=%LY9*h zc^_wpAB-r69wuP1eAW>t=GkiBHhGTQc#xH}i#NJ|lsR*9qZ9rRm3w*&AzSr-~o_0TvRuA;IUnwf1sq4q>R zAwOL+=uoU<)x(w-oM8yP6Q0DC$U4k>QG;zTD2jZhk8H3>HWMT_X* zl51!53jbvPLS7zyV_1dlnorpT4+>Q@_TiNV^;`_6J6u%6R+)fm-?bw!ke3I(wz1@O z$)lrPmW4oi#yAhoWpo@p-+{A8LN-`W9eC^UM@@H4wiX9;zVqNGBNv#scnT(4jP>sx zOV#oTV0v-b1^VRC$F8A^Z7PCK1zTZa_l7pQLnasWcquU8)oVrUDQKc3u$(W5GRLb2 zPi<}P^;g1Xo=O}cR$P&k_*yRR77fU#Yl}iHg$CcqR zhIgbeDSQ$w6*2x9@j+E0u4<*FNHbuJvp*8G$cb#H)9G!0m=I|qx|hUV6;VZFE-4~K z714GKdkk@P3hv6=!+rFCc1ZYHxLmDd`I8^O9ed%dQ2%oM&JCyS4M|0jeNjl(HHR5J z`Fmbk@%Mlv&vkoR4@&)?Tg*nAo+QhxClZOfzx2yrtYn*Fx0RCom*g>Kf*Ww3KLuyY z^Qz3kG_IEki3WGD#)?39%HQB`5Q~AcWn#p?-gaw8A^_MCWhK+2Vq^2bX&kPfk zDV_;_z$l>~9$GrmZ?VOiM$mvN{n2r@7w+Qz6t4`ZqEDbAo0#i|y>%+?dMs#Y8Ehh= zknL?WLEq=J&0mE-mzE848ub7FF1Q%Xf1!(@l7!jT-w<*Woz^su_uWt6|a0ahl;KRTlB}L98#S$0WKgmAb&Sf!H ztNz+IWRgfvN@a$RLR~7I$e|6p`cTg~DKpQYXp~2vVX-62_jq#k)M9~l~4lah*nJfv03CzKz zIZg^X)l1oB4oG3}K<6u(JY^2w?RA->!3>M_-%359LNsL<>u-vwoK|@{R~=!j!Icwi zFsTA{vt$=dR}O)>OJL~Afu&EIEvqn+39cO7lRjEVZl8kx=4v91t;W#0-O_Hil9Qc% zGO?SqtQ;i?B=^{=>l%h(Pa){NITUv{XqMit8VhgfBse83?y1hj)cV}ebJ||*(6R)7 zRG;4WJ`XJYbks*v%m6NF>dCtbs-w*l29pDGC{q}m%@ZiE1)DiS9{7AzQOoxWdyY|~ zZ;ICpZLKJ1twNE@RvA5dRhx8GO9L>jnKjgdrBYmeOga`}FX@s(wiE~VySi|G+42lD z)#?JrKhi5QQpg;m%?8;b%rY#=#==GDIdV1na_VF1u`2w_Em7W{ullvH3UPrqshO#c zI*^@FL@Ey=#zyD5(?F~g5kb$J*LU1h;v@+DWNh}E<(S!_J9hI#C&?WjEFXdmIO=(*D*8;)eaVI(+fsqB|zJa4Sot zzDYBE!t^OiclMz`cGIhjd2Ic;IeG9!=7gf3I)RFhR}D_y$*Pj$OorZy(%bK2VpRq8 z*lX*W0~%dX=kFvn%Fh+s3p0zB;@>)2?i*Ap`%yf7?7!VZG!^8@{^a*Z2`{Jg&5dT{W!>r&Y z_g^w{bw4SR)K0tI+`(A`x%9WkPACdJ%*}nqUN6nk9SRZmb57-p5879E&6(>*^T z_=L!-;X~(Nthx=3GO%t2%mOh-B7MNdx zUh^C>-t-7 zQ_<#1=l@msT}9C02+sd7*6WzT*X{rf4R)%YYzI1Ux0uwom&(F*xbgE?8JyxF&F*Lu zemiy`6hp!1uW*=sS9yXuG%D2p%1V31n0bI7gR(<&2Gyz7vS}<4DfQWsV=>b$zh@MD zK6panx(S~5t3}9`KowTU1P%mom#W(tV+C_!FgZ6t$IBGYb=5GSLmS+8u;iNg(2wfq zhS78SjAuJc7Ov?**>~t|qw|GlDDZn|s#76YIv6;|iqKwvn-lYMgN?qa2p-96G)`H< z#bA6gJGro~jRg;u-?JdG?F0INkI8{1IrsC8F@c@`+Oo@EyD~0*y|m3ODFs&6yy@S+ z?;TV%uWPG=FzQ+I#y(0k7}*3Gy3YRfGIOy-vWs5jiZQ9{;yMM=E2q>|5W*=ki>NlU zlZ52dLoHHRObs!kc@`pK4MMZnrf#0%h%i#q$A~Yf*(*fqTO?-UJOY?;prJph;|r-G zgw)OAbw}Gy{cADZ4--z1Pv4M6$D@0v>J7L=Is|rejxdfgUv;uUWU(Rgz|oP{DQAcJ zO_jY`xHCE*anV6xEgDoZ)W=^SLa>A3?{doYZiFaEvn;8W|1%5tbGnJwh4_M@Zm^wH z@o|AkC_TE{m1K|P*%mviK&4%UFTMnd%ijGD$7L!MJMB_=TCq?iMo4__lH8 z`}ByW6=#67NBVq{GX>}*d*eG5#_2o&SG7T*v%2t-#7E}ZVAAbY=Gphvv5H4UJxGD$ zr$hH3p`1~K$kOGy{@A)v$hu9L=CRuYYX6yb4H6qq(S!Czo1BBtTeJU?(dlEQs26q{ ze&3%;u0sxtnXl`<;kmh%{e#8lD#|D?3UVW%U2m%TtxkH58JA1T`u}s=V?m!;tL6+? zDJsU8l`J8N5PJTo1|!T;7q;V6)S&30cDQ9ne+wK5SpqC=MF;4@KGVmSdzYcFrE27) zz;!S9&b(&3#ghRD+^2$qm4U< zZg#(O8b#~%MUf~tR|fwj%(M<#-0)+NRUYJy%adZ6svT&B{*?-QrbA0%&}*b zc}5A!HM`Ot2KSUeV!(kv9U~xY#{pvzCOI0ioKV0Pd0A?g6?K1dmHMuQH3o&a-Ou|=QFo(AtR z8R}5(!@1BNLRUkF4y9S>L4pA%Lm_mr6sSM*J3%VeB-V5sxbL~}mWWJ;RHCmUHz$hu zk%DuuHupCXxd!@{zI8X_00qRt)%0fawHD1bRMBzF;YN$;mXb6;PB@h4C@(Hf`zUvQ z>f`-zFO)$F@m2x->|Zx^AGDPV>N$58$&9)mlT-Qcm#V9yt21m&Hf|AgC}vQWF+U6z zZxl1N@FN_|57Kg3pe1`Yhoq_FDy--sZmrmu{Z1H6{dO+_wu|Utcj4@>Cm5_X%SrxA zGCF<}5Mv#HIQ} z@)t~hmqFuqBHTE7Qze_S2^>?W47vN0d@C>4DD11F65I6l@8pP}ueLjN;z3Bsd#P%O zW|L{yI?2Kq;Klk{J{OibFO1%~s^e#UKAf-xzK!wiFCdarROy~1#SS)^D?vX%M5(00p@a>4Yh z`PeIeQDs`-@|(Q!&7XF3d%Q`clNr5pU23QCjU3RQ+gpfHpA=tZi$v0VoA2nh)EyTI z_X0nXS*Le()qFD2yX&DfDhHqAX8UQWeb1%o8ub=JY!a!P@bkIh)QqD1VD#A8y zF*F=&b~BzNjtx!m%KU6e4K-ksYL1I*3S+ID#|FQMX?{&RVw|8v2LF1&cZQHhO@7T7}n7c_D z+l|%6w$T{Pj+1wPpXZz(&UL+e{Rw-mdCz;!F+St->slAmdnHKX_2@MlmHvpeme?m3 zsj?UB8_*C43b6xgR{lEjmw90SF=sy)uWPZ++3BUm`{y{dh=eRzY&};C`^QSu2~EeN zZjChEUum}IE0fvUPp~BU_v)_y(k@y6SzjvZI8X9hPPfs;*yA7?4VWSqhlZhhuvZ9{ z0i;N4McWKK{Yclyhf6CTa%5P_nh(V9L9OK^i#1K|WGl<-K9a#eaZ5s7s6 zW#ISd{f(M?Vd_D2|JP6f_h;Ujuj8B$@N0`!Xj+scC@gJ%f^Tgjuwcvr!P4e1TiQ#; zR8xWE6H7`=MM|B`xz+_Yz?5f1eQsRJ5sp0lw}2%S$-0uWio@c(-^(@|DP@Ute&>e7 zTip1VAUuRME4ZD4g}oLViu^aAMemL(5NJ)Qx)AJaMAK)SBHC7Ch4hq*v+qX6DIdVa>bP;`848JVB<1ZD>K(At`# z7eb__YoXlZ%=yU)Z=Jk2LU7!vMtfNzFvtPt=9>eA1by{zZ~{657>~IGejR8pzS=Us zB)HgC+u>-4tCFehdQHE6V$u}ZCb79aunC6J^+AqO2O-424d~{v5t*$Fran-iPdui% zvDzTlq{|HOTd`&pehtf{vuLFD;=h=mYG86g>$!Jwy)!3kx+*9CYfOvK>^yW}wbPjut&RYCh5BHx&y{f#v0N|7`~>odg)*B07YQRozn`Ld zTJHHHVh{@EH!m5bDh>a1`O2X*W&4*S)r04Stk}JY3oi8c@`p8FoW{6VpyI#6X@xMC zLv>`c^V>~(N0Y;Z4DmwhcEV7rLK`qV{KC(TjS{{xK#fx&4Wie zw59Rha>OOhenL5~9jq&(U)h}3{#30y5BO*5&`PC?%X4qNHtOp#dh?rE(W-HQ442?q zCm<|Ubsb&4nB3HNfR*$Jn++65JW*jluWUT|jsU61^5_r!#W6F1=gd!L59kfvn8Lp_xT^%_t@ejtRup#D=5I z4UzdP_L;0&H84b^S&e=R;DVQ{fc*Zlo1inbnDc`Q(bX+B@MoZMDMFfCqlJ2Uz?by- zHmYr&ouW~;aZ$o-7;vMf;re63l+aFJCd@9;*SQ6Z<~SlRgqf0sSv-EkAR~=XZ^*yW0@7lU+pYOrV0tvQ;IeL7P(oCLIPh8(${{`{#`eSUKpkY@>FcVLucWRgez>=Kt_KdeP+{v zz5w(b(Knn-_g*#U_D(3n4f{~r*O8AKc70Vrb~WifJO(Dn z{*2^3<3p7xcqMcEB!gK?7JV8e?;tb_7kot2?e5|@=4%6Qfd2+YPeBc__Hl`71(+!UkYa0;tm{=@yRNxL z$9K_9v$k<11Vuzy#_S2Oby3U0t1Xnp6%=Fa!w_k6&E&qJ(7JwzaVOzKm_DS^>H3)?;8B74!xoGhSSyfgG+r&7jtN$eb^DLrXr7L~ zhU{?3)Z7q4+%H`=`0)uuXM@Hx?g;Z&Jh3$Y;!v;5?s zFO88{&l2H}bwjPISTY{QkvJ$6sj6ZOn-Lq{!WYg55N3@x61iAk8>;(7lD^tHW~R^M z_;m}F2%)m^{JZJunHb5_LoYZp=SmWrOyB=XlsZQG+==*u&8N&g@hu0s6C4CCjL=j| zJW~dQvUVFjnCA*mHo5eA*Rb7Vb2~Re*uKuBiOn$B-AQ; zsp9w0@!)Cm?T)=t&lcBz?fdi+%SSYZ*_ z-5?F3(;_ITd0zZ@UO*1`{Jp_9RW7|DpIOQ<{}H!h7BS11W*%fJ~Li0&YE&`|??E?7N<)OC)UXx~ZMWK&EuKHKC*nMGkj0 zL$rO~n&-VN4Yw%mT7r2RcmxS5lzd)lyLmKv1_iKA!|ewYPs1I7BUbk}oJbZjCAh=f zVWuxCDaieMtqgru6RzSO99dPc`i*VcP;tdhC(Smus{$h%)WGAh=#2WMZ)jh^k`pan zFv#D~loK~Zf7rg!{hLAbNfns--BvQI27Xg!o^iDK4N;znp?sEqoxGXdv=L6E&qu4< z(w*mR+JO^&h`~W?9(k%q>2V|&!(~wLC5|RNE^W0xs3U8d`tTYojCkS16jmGSFsi>x z*A_$`oZC=Nj}kX>TgPntc`=XMQZGF)_=C~v%T2*E)4#{WfzcKSN8vLqy4bfCBV?KM zExVJ~+ZIjyQAu+ALrxa0C^qM&iWg8!w29^u{+OEFz|z8lcH$(eImam}4eIahkvtr} zkjfuJkR&FEk&PaR*%n56-WG5j^MZl$21TW75M8)CqruO3pR$&G8Kjii)4!99@KYL$ z+9Gki8x#c$w~Y1WT&-e&$lG97Myd=Mz)w83F$ZRo7vmeZl4yV1ZNml^frl=5-{v#g zGG3SCQI0Lmw>%2Q`mioG3)7G~hola0v5{AQ1yXa4lt0lB*%XAjrtME`p`%WYXT;KsT=U(g*qCc zEw4Ufh&yw&35BvkIYP-|4-opZK=KR>Xzv@Q=R3l9^8qY2QW1(ViI0o3m7wtupP}f6 zzq`L;{(H!w)qE^Bv^!jq?^e*T3B8nb>rmY$O4WVd39r=9sXP zR&Eg+^oO$aOmrsvU~^o3dCRrlNJ9WFWV}#xNbi;9Nqbwbc*%GNzv#h%T}rw9nq~f( zFiVr+4!@%1gB*^2{dLdWeBD--548K3z84oW-1kp>PZxQ$eECh-ka8=kViMb=>lt;e zK5D*~;9>tq8RFtL4yX!dDFQMMsd@sA&K@0v8`%HJB7eC$e2|Hr(jh4dge%_`ubz>G zuB!@^F3m`46r_7lbKj(X)h3xyp`y+i)s(KJCGBTdx|qdCUq|Fy1gf8&$Rx7$22u)F zyxD91^*~t|U-n|_kzzt|GA5G9ZL(|D{~y;>Yjv`lBVQ;Nl#4ZZY_Bz=-Eg&$bs}4O z!%ArCVT0`7n}pi6f?6i1SkPe4T1B}c@7~!U_ALL=C8VudNX@o_#dM$r`Pv@I6`8|! zdlmw{aqDHPs*>Ua9m=u%m|H@6dzsyQr0~+Z=iE7>sPNh{=-zhOH9!Jbt$OeHy2??- z1DZLVDX@pZ?>W|ZI?*k3h#Ndc)1aPiUBK+LrjItj@%|!BmdHeTyQ!&|VJRGP_d{)! z7MF?)J_TU~!TgDdaH>EIoDdy}TuA^K>2U`I12YfKTl)UnZ2eZL8Kc*X#;0B_iF+L$ z-X1w3;uhwwt$c~!;t$o`5Mr+dH^(hS;vH6^#=JimV>rLQKR9sU(<0P01`ap9kF2w= zH9msyVs@-zsiMIE3KvZEf-H7D%AuMu=RG0RU z>drcWZwDlhx_#^|p{`;2G*;>4)!bnnY#qEzuKjp#7->ZIzCMrc-H7V9`%M6FEI=u$ z)!1wIr4Qp)l8Kq}gmE#gUr`cc)fguP_10LQfHNAwI7QN)v;ySvBWz46aYpyjRE|V( zA-bBJO*<%6BG_P>Fhd-_3Gg4hgb>9_es{t2XD$E)-uuC z=%LPXCA}e>WZDKXfFO(=0X$c5&vdi-(%sjAPNp5BU-k7UxvJ|s@P(o=7*U;WOJQ5veyp~*eJ4ABXc=C_8xq%I|LsBbbDADC@q-|CjQQ0- z9<`D;rF7H6NO&zZFHolxCWWS-6U@3a1q0Qa>xA2%8rLIrE!T!Idoh#{`ET_GY5jXH zhBL9T{OQ*t{!OgacPrOI?OK~SKn;Q5NPR!WQB*+pk#>9u4cWgv)M{oqXNFS+g`xFy z8|(OjD>CtJR&8=?IsLAqbw6b~?XIcuF|Ee0)%?8$46CcvD%)N6Xi6Vf+z z&IYo+=73*mZ@Vzqtvm}lgKW4DgEi5@yY|cPo6)q!KDk^7%+?tiiH7zteWPwCU-%|E z5R7Xu1_cmJ=iITh?hkRX@f+#zam7tFpy(Ad3=+k;=#p{y;wZFL_LSRKr>tXk?2};V zB~1lXaF6BUD(p)IT{aYnjRX>%c)mM{qz-X}*%oF|?hZ&Qe_Id6I4{~I)rt{p^X8v< zz>_E`Qfw*enDX%byxsUFvLQ&dYCz!@O`Yb$)Nco_FU>CG^TgS^`RQEj`B{4xkPhX| zk94m!DLpGthyvDoEw2=YD%;Ui^^LN8oH%TC5*R@Me)Fa*^2FiEnZN>WpdR8cMxv#xxZ{1)am0ZA|y+_BLWK zySxyz`A1Rg-%!91=R$5My_@#-AzL%Kz&7JDH4M}d5fi@**`25Yu8E^>w!oDZ{=q6~ zoas1b)iNsP!2_eu8*fm1M)b7$F9J2se(o2?ul`CqBmDE45ga5m?GzeRN^9(5j>4~B zPhSy?gb@c6M(+dcGRkI(1Qv$Do@4JI3>ZoR@WF5*WkK=yCE8jsgD4RRC!(H8hp=;Q z_|4w&dxlD48#;yV`RH)YFr&P&6zrB} zh}jNor4UP)O6tco`@(xgoJb#c{4*K)9P6RBE%s_;c$ZYm#beJNQQt1s~NpJ zqb**XohsdNX9>Y~ujw#Lk_|wZrN#c-i$ZES)VgQif@`~bWSYtc139!%O~c6 z@}}}Z^<=$Nb|H*%SOg23$AQC-GHI7G)P0IUHJ)#pD6wFPItLip{!gg+x*Nr3-34Z0 zmE(I48KltT?0xe8;^;T7=T8aW6i`=#{X-f8$6Vi6(jE)(7T87}Q+XvEkwh=s1uz8# zJp<3s_w&Jn64btmxI!$=bkkhxBbClc2mNAD@Xz<7x|w#n;P4ccs!1Ee%`aM+GaNRR zlP9S@b$8G0|GBm4AX}F@2~~dDD!^ zr3yNf5wc;zh}$e)8`^nU1P&X#o1t!s0L{j46fRr?>rZHW?>MF1Frzjdx{y`eQWj_1Wz zWh7xt?rHpprz@-V`O-2_x?Q{GK<_-87M(tQv5BK0z`eIuhP}=`*ujm;`Y{Bz`Iq}+ zC@M!ZiD!9+vn{~d28q@wjISWaR-U{HW}>4K%T9xt1IXvNU89`H7Cxvez&V`Z{lHq` z6sYL51HmD{J^c1{s`Gc^9sk^J5d4uzp5*i53j!>fHOlS?YGrXKPiI_>v8AJVV#20kB}TtCc13flQWwPK{WLtzRCnIvxa+|+}MC*T?}8uc2B=$$eH zFjBUq`S5vRzg^Fj8slj{WD14NF%Zf!)dA>MDZ{E#2pymA0&{8mMGGLa03xpbs86u6AUHkNRmcfwKTLl@88 zfZ2>~9gs`z!Qd4h=sI{V~#0{B3uEy+$-~E%9i+x2WX$GZ$bDk`-NU{}-hnKc zYwoezEe}j5n^9bc-@+yW3?LFl@qLh#?CPK89YNADp4`3h^)E4|MIH#ReNbyBUqms| z1A{|<(@+5T#HJHiYxEzg>8SD}Up<({u57E0*=+#NrgaOBU_xlFL!p%ZIuz;z7t7A$d;ukRFa z;?ue&o|)kgVar%Z-(A}zcjX{LS>X$Ji&2v6neDHEbj=r&5*bfjMh0QS@T?!J9g1Ub z+Bm?qpA;vAySj(OVZ~vjoNEGD`dh$*z;ftibnps9=Ifx`vc2$jU}bh zWbH0Y&}z5KdBpnSp7LsCwyJB z^XRx*Gn;!HbyQMrY$LZEfI^G9SnOrC>TErK0`IVqsAe=^zKFM2hcF=hQ1w!YHBr;J z?E3UtFGF-=yW;%;TsLfkp^HHy_&a1Q*H}={Whwdjp@05}!@v+r>Fc8v3crj0|1RSf z?RbRg?`)5>pD-*vKyMdj!r)vlQar zb>z}u;+5Exy+@1;Ydx4VG1wx;1?K0Hgy${}H-IB+tEHUKR)8%oF@H%2-KL&m9;P^( z%ZlwBzuL)1M>+mNWlgOWsna3>b`h+kIqh_z0o%a-y6MEmau=c+5^$_H!=`Z3U9DPU zc;P0O1y3Ek&zUNDcpwM(oNP-e-db}+K+8oP9q0_-6E|~rJmd5yL;Le(JPSSIfU)h0 z5z6hesJA(!d0+4`7^cxq6aiX;JdN$Qj*QG z=l-N5y0Pv`BK~qZj_GA*YXQL8OQ6trE9!?%>)v3sb-uAd^tB*YSkap;LUs|tSk+c1 z?h07ER45q03dfBQCS+|%L2 z9rS50G%Z3BUtlxTaF2=qwkH#RvsLiU+g%sZVOY*Sy!T$&ol9$8BcAB? zP*$ig$Akfi|E6_or+{VmZ_=^ig=0DrilfT}-hH=iC(O#J-=kbffAaKCj1kJDnR>j( zzj9I^-3Q3k<)v8pKFL>~Y;czESY+76j>Wo+0^HpQ$?i`qTg_uplQ;|R>q+;IY?=p# zfdRo44KwimyuV1Hxw@|ZpGTr5%>S+pQXBhcW&8<6KjI(o6w)PQTBp_Elj4 zP(yb>-<2k_uFlMvGD2Q}GXKV!a+(-~CDGgmn5W`Bpo=zI@M$sZ=guXyoN2KX`J)8@ zqE@_E=SEcKE1!h~{jWB+Thy%^LTecsuE8=d`&JZt2WImzMdOduA7 z-y_U&?b*QSEIKo=v4{58XJu&a`XaE+-`u6=8SlL~;0>3!Avmo4;|1;m^s$9b|EWd8 z^Q0}0xYUOLY;?AsgcR7}=<~6EooTCg{(O7+ZfIih4t8!2dCaiM`b&mzp*_A`x_nY~ zGoM>~3%sZA1Rrk4YkIZQtN_(lLe@Wx&=)Ox7N4ZN<`tAsv^7fzf9=wnZ2qm#(@JNV zUkNXe1N^g6K;vyZT&A8NIftibRm7DS%GS`!9F&X zmUK%f`+7P=)Xxbkt1-wKRkNJSVOCa4zMB(1&B*)Leo)E808sYKYRfT?**~=)OH>D! z+psVf(mBeQllDgzWmOQh1;gGf1mI6sPZ*ta>?ilthVQHP_)UXk?oU1}5+%%Q(=Y@g zXqfNk13QxVL5vzkPw7x{%N+~DKq2?|Q8PDsoWJ=_PER?rCk;dBN zj}4-A-F)twwx7(_=D8{xSueU}G_#Pksdc*)h=#+aZpyrPCsL-EN+)xoCJ}8Fu%_QA zRS!M)-;|L(h8lQ_7-FSk_F+-Tv)E$3?c2d*9Y&k!iScF~N17)985AfQ``E>B$aYf% zKoke6GegY^H{-6bg}R#2%@#}@9o5j;E1rAbTZKA$y7Cqy6&(28+@u7ilUw%$RYn3B zrc(ia_OXY(qf4S|ud&EfF+I-V6@bcLvKQfIvyFTe`>$iN7bPH^+F*Axrs;W*O*VUu zJ)V;>q05*R7IgE8?G6;M_Iz;Z0cD|!iiIn;snBR6^dzIC_}t&sOoAu~G?e&stJ3YZ zH^-hYq#gF7{_x<*;`!+vy>RSjwx64|4}0NuZY+a=!tIZ2m4p3M6hXg~6vgKtF0`X1lS@IqrNCCrk(>TRqC@dpDM&;lmCL5sxubHrp(NS2>B5#LzV z!m&Az8jRo^0$9GD_=`VYxZ(NJ-MwQzR)#H?*;t5ClgMP2W7>xU-?l6kuP8kfV80zT zsDRuLS!bgoqQMejG|yvGQXz+Y0Vf>`{ZjjM zln>1*3};1g$ak+J0ENT9=ig~Hu)huD-iZX)K|3JP-G>DI#hO)4gAn!kHWy|1+FosU zNjG9_oURV5-A<7Sw$N@(m3Jkk76n|9mr>!!&as_Qm_PD6Z~NWC>`!g_%j_r-iA!ozy%l^phb!y0>4 z+80jLg97mYdHTn8T=J4cv+2{g`$5qrktmL^23A^P%JRKG7$`uv()2!u*!{`!I@?(9 z?7OBBKyi4M&!}&yz_!99DRms(Q#w@&D{ZEbX8@~WkqkRDtge9;7Pa6~8&`RZsv_v? zfqV&y-sw7;>r5xsj@ zI)6<$RizL@G+2`36bUpT%NunX!94J}iBo7NpW|(>Mxva&jLhp5f58# z&lzQ-!}AX*2Q>J?JfV;X)LD7leMT)$6atuOuo$nhhv0M1jCvUSy-)RiJLXB=_4!3S z_>Aq_5x|D3@+%{%=vN1^h`$##cv(4cbSVY!R>1vuG7-4>GpCpzP>eX^>@@h;hPrFB z<2-y{53@V4@D*_N-T3XfRmiiimbbeS)ds(PcQeg8pU5erB|>Pyc5>Uk%nPc)?et{r z=;H56n8MGEzb{_9>`Fy?zkPSR{;sn(93EYEG&I4UjODueA)X{32)cU#+$HBHq#h`K z5l4@^2eD6kjQai~;J+0jnMCNnz%lXjeq+rR`spswn9-M4sXW*QR8&g{^Y|_V)}+VD zy0XO{-2c3J3X>O{92?#zLgG(vRQqDBZN%*cROF-Fn$54Ku?`z`VwV}cDYKb z)zgs+U>+f~Lmz2F;om8@9eY8dLstE}$%hbMGzAIMb^Hg+V%suM)~rzLq5>~D_6|_P zoQPR6RqAJB%$w!P+SQ+@z2vxm>P}8WnM|e(X`r1Sl+U-x- z338E|5B4)+iBg~6afB`J!lrA>b_nh&f3LdGsT0yu+R|?M0b@sc*Np%U@>zM~`HN>t z7{BfNQAAi-rQ~Gaojv-VB18wk_!eZj7!DHP?i_e5IF%lKjJn3$Vd%37xLyPmIh}p@ zUMtqsZ^b%Wwr_|$zelfhe!p5iT-TIGTznc)e<3x1Ri#7(bMgL0YC>@BRnYXd7(?4L z26bA^DLhc=YKU@G^f`8bsLw{p>3Xx`^4iLQ}yKD6nkBqSO8Um)IVyXW^dRoP*!t#}*rF^^8K1-L_R;o@ z$8*dV$%POUw&%)W8aQZ!g#q5`kn>_|HL$%_x12GcJoYG?LcRnb;7kM;c1dYAAPV^}4kxG!Kw zM5E1;<~7c$B3q#<*%U2gJ)G?*nQz!?Own7p1$(=U!tBnwZs$jZds?dSS)5M?;`97m zy(pD<9``Gz0-u)K#CkdcxsBJsk|J-Eefq#4XBqe)KP!%pf8A401CI>1|DGB1`|=|h zY?^Oh>K_j_El_!Xl@XtKYwe%>x)^Lv%HE%^>jaqnChPO5BocXdW1BWX99_$vzgaO6 z3UeXJXGpp~9E8K%_Y0DvoMx;Z`GS^8VnsH7-wLoVPg|}6o=;Q{(1G*~<`|3DtmZi! z)^9H)X$yB|V^3(-Z;7MBl6Vd_+3oYL2CzMyRy|Yzv8l?gTv}KsOb7cA283yB+Bl(| zbi%fXKH^>}B$IDWs%JLwccT7_(jCPd+eAZpCAw-@@l#$^r8mQ3Xk!;P4d6Wbiqg~J z#!f*dZ1C$op+WI4E7TO41RYv@-aRTmz$ABAlaaCj=piiU7dM?6N+_YhY&~$}9PCqw zaiX-bkwq>_sB?4pW|u$YB5-nFcCfCV7ooHiOX0HKH3?J096K<%Z|f(`=VD5gbs}Rf zr%WK-sbUEiziGpWwuIw5tJ@B0B2UB^8q zUsT$+BIch=Q3A9fnvQKdemwnv_?E`=fI6^_(=n?4f%=NYiFkRHs=1E0J=jjR+xl~62vHu=V`E!6+XH{nf zqt}A=#o&IS>avi?U%lu4>h%or)hKs!_f^@GQUh)E*|@5Ap^p!ERrEM|__7YJ5Z>MY z?`1(FG#6$mLsSXr_5W0Wnjnf|LUiS)iY)3-{J*ezkV~>FVyyyM#29|?q?@wzqJNVl zoyIR!dx?}6C~lD0X%7&FAE!)BHmglK&S=bKDdRa!k>ejY_8O7 zstcXLa0xckA-#oo0g4ZpW)2CKv>ulJ`epqqXptYF@iM!%XfAalric^0@TjkVDy3YB)!F66#ykrnW%c#qUz4?Ze2VO_O3@6X&0G_8A}^h(K6IfaE2p@= zYCJX-v%9NOS~N~Hv2d&GowCI>Ep<;P>vF>HgIa*M6Mmt|N@OP z27;&=8^OJRT}}A0(z5?|iHUQ@_b&l(uJyY~QZ#(LTL%wm^y|HwrRW`_1;i+MroVtk zLmv$!a&tJi%tr*EYYcCkk`IdGn;rYb08hv>i{M3wI2Qs*Cs78g=K-*qzq8!e9^6Yp zpad6HUd?7&A^E3$Jrhv4@mCncweljPccn&pO*=T_n@2T3)gYy- z$NH$0AblQ1-CxO+{7@LE+LKaB3mnMob5|GQ|IL*HAsI&=)6*OdlsRj+9A2|IpSM~r z>$W78{A+q1d7|z1h4gQkr>;f*Z@pOG`MjOZ5bO5a{a9iTa%1?#wi3`_lOnDpd{C*K&Mq$fZ+i5cXf{cMIG~P-=+%N=R;qJ!zl6 z(nA0AigNmmoN@ZiMH!K-)fUlTg-+u*&>@Ic4!7J0+8#?$<5n4)HBl;jUZozA>+EACg4 z>f%7H7&?hoxni0@L?YLq8ccj`&iH*O!`}NyRH1a%O9276TO%x$U(F5}_3WY>T=4Z* zg$zoMD0>3_Cv%tn8@IH&-Jb?MAcL`jdKLRak%r`r8Td4EnBiJ-FFAZF_cU5cZr_#iQf{KcxSDFZTc7jrK*$j6X1U zX)(Ig1J5bqlv?FV5k_<4c>S&_Y=WiSsDxfqp>k@=uDLZw7C3Ug6-pGk4~c@8M&6-eI6E$2SQO4n&&JyH8JWmCA^qWF~) zwmelh$iz0zXC2ja-KHoI<^2nXrhJ>?7h~kQ_}<#dx4@p7W4)gCxmGA zF2s;!jI$_yIJ>?4p+6*3dp(<9xLVK)uah2dj9D+JhoIlMDjIVtW&5MfsMJC|G@Rmc zOmx1-#8y+iFsClZ4pr|HK*CCAc9z=SOpDfn`4>B*q z0bXOoSVJvqPgiw(<(*H{SDjG6KpXVBNnxQ!%(E2Cr#m?Kr6O7XVOuh$C;s`pe{6) zKEt@34`1$}_XFxxo|N!#r)LU$0*P;GRTQ|S+6R`Ay$?a+)5+1gEKbb0?oMt*gY)#p9|g$B zu9Y+<*A!}79{wIcK+tMlBf~TibstF9l~pRIf*eZeK@!0uEj+f8e?#%z+}XYuqWjz~ zZ|Wb4J6kx3k{t!{y{t(I?{a#)gIiWoycuD5%kQ7-H-DDwPV=2Rd1W z=I*3t2XK)V%kovnmj2(1`9v$*bGN^Mx+6n7dWEnV9obT??Dv14#S&A4+m)Aj;97~ib1!W#lTq|q{^+5L$Sq&G-%re z>h`U2O%S;cu}>Kh=__Y3s1Zf=RT8!`uo`Od)OYt}P~HPi5XB4A&LYB+2U+cV^Xv*< z1tc0&Rwg`uoSh!ojKXg~ANwk%mKa(ks+YJ?4hP|&kt(S)l9x0|RqlX6h>$$5$oFI} z`9McCxs5x1CCob8#5+-$mLIAL5dyiCn=s|ObP{byCIV`m&4WAID3iYZ)2ya}T2R~` zts`BUjz3j~mZLZw9Tb1e&_UHWQL*iscC>YIDnqnZTqq4?3Vf-V^Musl`GbIki(o_h zTqZJyFVwhgTk`I)7cS2nAzEGGVOwhDq~?4Y`-p{$)=5DTUCHzZX)tY572F4!mr`hp z5-KAF7jvkDl$13qxOV&^kb#rGF?GR2^FE5F0-<{zTiAHekZ^>jY1Y)sjq*=>X-SMJ z-V^aT*3%gQ1TW29X!SosrwxhEnES(pq!ztHL!2htNX;Q+_+@uI#>u0J*i-i2jPiry zhv21n4=1@(EiRrPi~x2DB7ME!A;1WOhTE;rKEid>V`EsA{t{K16!mOy2$WJRp4Gi;W5qioTDM_Ign zSNbg>ThdI)mw0*3%~cHr?cOqFG$iqt1q=8%ztS~}fSA9Sn<#d4(jyk?zSWh`*fs_< zu~{C>$g7d+8x8*MP_KmQh^6yymqM)Psw13KRYLl-yyg)3g9naWV6Q}fk=-0yDnVoR}fYfP34DTQ!I|0L%GH~yTkHG!zZs;V(T9@Iv`${OWOms9`f6d)^|W!#;% z>YN>RG}?|{Y32A$Mgs93jc$n*A?@f{l^|LjKEF)<-*z87G1SOgUj%V+1%0Oav#2j^ z*!U&X*FMdhf1JB`Iv7dkQ zmN#b}8=U&r4NvUG#lzgR2(Nyz5^fuRY>~|Th3dC2DEVz$pK#HoT5fpxMbptmai}ed zlP+1)sp&@3z$8<4!Lr6?ruEEHF@qMtmDO0BK25a$tO>j>P9CGrt)q{}-buujF|!MT zIkthbV5NqDmJ>crd=yjZX@`K8unQuXKpM0S%E7_X!3b@|ZCvelO$K#0vuSC@91erf zy-yAXzDAkA4r(}&hGX(CPkEtCr!@d6#3IU*j-^b+v|}{M*}&_?u)J(c#-1{QjE3mh z^+^~(L(M%_&Ik@vjp2K3zZvl_2&C7-lE?5!znb#%0m zoanUgUYu#?>#xNu{3N)Z zGCloSp(M4&gP3Nbb4qc7KR=sV^e0u+25iIkoENC3ro_#-QYLI-^qhL|H%Bf2|i#onH`>WaAvK)j`|$Ct#ubb(b0eYBF7NHg5})2>RI zs7I`-AkE(>n2{*Q3$0kss0(3|#uC%Mym^a7r_mo&B4;Ka;wY^p)zn%R4EL6 zKL?Yobp9r^uuC_UeNi`JOxd@OrLsY$TFoQN8ZLC0TZW6#2vBzAl**0o91k6Bs9oJb z!wp5VHC3sz_3<2a$5mGG9Puj^Nc#EaTN3`$O}YzNg%eL_%F>0ikh>w;Acj)EbaVDA zrPifmtY^Vn;m}A&3IzH#k0!O5f>PXJqV|xn3<~<{P}Cl_3bkmzs2O%`dn3P|C|s1| z+js177l&~SGfx5iriL;;WCNmw$ZVo;1UOuVDkd}=qA6s2{Yi{kR;Zu0&oGza_qtA1 z3Zv6N&xwUtay%Wsi$Cc-<6ZZ#nr6u@M){(BaxYta(Z9qaqwoSM^wVkZwqI$i2ihLT26E`Sy`9c|g5?w_&K6>LhP-Iotl z`yBpqmkz)2Lat?lF~1Vg4{0F(9OztRuD2h_1fS1l7Wl@S^U>KI1Op(k0DtfZuEzzT z-bg?4Gtf%Fn4de$#v5h#F3LIIXe~kG_xa7@JfUD?IiU(u{n)M55-*!}yaq$w&BYKj z;JPdwaPAU+d*|bDBl62Q;c;g(v^iOO?;nW$?R^rh2Q>UG=y_fJ?Tx4RX#@V=?cuw$ zqk}TGD0mAFgz{!fU>pYP7jD5~ru==&58`#YVtpZHX1jh3POF*v1C(iL-fKhZLS07c zTYOfhAlR=JPRW0m2PNJwTC-V($vb3hJP4TEjgrQq>2IGJGQzJ;FWA8?`%vAoBlyUz ziyLCFsE;jTVIx(Ukqx^E+f*+~D%cmEb@MsiCyP!7;UdLMu%^UgxPCi{Y{aW-s;;bC zrxzFb0xZy5j`^|5;UvbDgqF3RQn~w`#l4eFaYJlfdS(VMh4@iU< zU~O9QhQKfWH#CpAyn&f7e2zEM^=XB`o>R698#j=65NigLOt~46o&S{HccC<7Da6YYo&U7Bu(2w& z#D5A{(^>rQs4W#~K0kT-G;o!6E+Dm6P!Vb#Rsma7@%`MM@uc7zJJ8?XTyc$5eOLD&Ge$7K9T?$2b7$C+e95i>ISbSuiZHadO#Oo(MFbYacXaweVw zZD=efUQ}(cgabXAfqC`trnly&Njj{SgYx)uhj02GwvTqSNStg|X6XD%tDV%eB#iwJ zb<+A+jq`=Z2pMygg$huUunN1?S^`?H4z>*%`_4Y3eGOb^zsCn>0-@7Wj96Tv4fI!{ zEf*Uh4}iwDwxF}R2shXVdwLy`e0{rNcm}SZ?s|v_L?p+t_Uj#Z3PgJGq?q6jHJ;A0qejLUil`BZ@q2;XNPuTkLK$x$j7`PW9}bj$REN z@^0g^Rt?%#p+e#unfgy1qjw-~Ez2t@F+vlrMn{`$3WCc6lv()vB|7R{Jg#z;u-`q zK=|+W5F2>2Cas(0_FBv^2Kg77y$>W3VRJ(ew7gK7{~RvD-(e{9O(wZIzunJq_L146 z&p4AEC&woCY4Cew&>?<0&(fKVsFUaK(u)ePIg~xCaQ80J_!%^Hh^hN_Y5bm4M94pb ze|bUbzAiD780|Ly#Xk3`wUb9SV3Yd>R4cvese|(Qrwgo%TL#N}i=mNURc!^*Zfe!t|SGrdYfI zB!WWO1y}K!fR6dve0r^|j31MlZZ9*kT`>0SWC{2-A;d4AWj2dZc24^YV7XLZQL)uR z8O0dL>|9dOtbQiw-G8QDT0EDjR805-a-eRp581oRp@P@o{jtt-ujcZkvEa6*khxa> z#B}lI8V~f+Ga@$@)G>tr?WoI@e}R?UEVagy#QOz; zV)~AG;oQSdz}$TWv5rkNx?;~4&%zwm2rvns+H~uJX%s9bLwceKw;8gf>MXh=llZ4S zDj!pwc&CGkzT=58xqja>j{NGVrvWdJqZ;DMkdunuYSuAB`a4P~6)AfVu-0cx_fuw< zvjLOGR-~$OetIlXGxX$8elF1|RBSf;boe*5SX#8DpQ5Q;5r>x?t6^LTm;oPK^hqeB zJlCDHk1CUx&aB4<124o#s7X(ldm&J06+il~0ng`dycmKvK_l)gPs_66+PM`(-!kET z$>C1wba)UrZV6=^i}q7yC*7PHt>#PiASB7hOK&x-ffT4$X{A@av~}}96Ik#>x4xibBb3p@#^nYt*RtORLEs7rUz~)mMdU# zwEPhI%~wR?bE!#dLR`^QMxPi#KJdC{=4Jbd?J||&w+uYeFE>7feB@; z%Tp0vjkE#GB$GYWjH@h+603dxh-AHyK&Slrq}B7BDIQ|VK%|BA{4WMCc7XKSWrp4ucYUM?IlSX3%3U-tY|>=)L;kDdDC{Rg@-Sz>Z=r?QF& ztXnGV0SWm?1s4*f_9MULHJ26>ZBIYvuMXDV4G4YS1P^dh?0%;iL|}qbFx$N+=T=c) zbK`mFraKEp7w(s5?TEZR^~fo+&BU0x=6F@Io@@HAg!?!BSHd0sSHfH20STX-(z*G= z;8o;HLwxa216{m9P}mWS{_|0#q^auogO~4L)TN)cHy@84eMbYhA)34sdiF>f>YgQv z&<$~+MLHod>|$0Y*7-1c%%}uY>FZf#*`d$fBP0c=+ zW3clbs>qWS*AWyu4}T0#ig6Bv(rsjm`lS2rfZEF}p6lm{eDK3NrU_I`GMjV?Kfyzi z(_vh%7P1po%0xN!FzX%gd$e1;U5eoPO)8QT#-1KB$%TmvF{mYi`fA?Q*Gdq?i$*kc zme|@M^wI%qfm=2``P7)VV(VAb#+kWB7DM}Sb1fCET~-?&`j1ID-taeV+2Ngyii!8mX`1u;k$%gP2w*Oi&D^({G96sev(I(Ar{~6?IclU&?IwR~! zwC`)KE<*SBe*tYi4w?O3aFo6r)Em1TG+Nr3=}`Rp(6qFB-=!bA_3Vu3?`tM(2wDjg z%HW6Azp$S89-ym5iB>a25k5Lwo8hWNlF;^J($UhEjECF8q@4;A`TE0*^il$py@&|s zN>8tPRL-iO&5u3-a#GW&Mg|D;pumWz{innL1C$N|KOuo8{ebC?^Of9UIQ*pTT>P`?OJ3T%|Q1I-y>YEKUvlMF;HbLILX zDI3`F-CZxcW@z*&dTdQe{(7*FJPnWP6Y&XkKx67%QzVqG7s+QQ_P_)gX9tuyzP*%B zE3GGAWpORU+2}Pbz;Cbh6mp6aXEJCKuSW>zTt)Qo!(YWqFWzpED z9Ij^0Su0Yjn<*AagcY#q@@1`@KRUngA-7aaS@c zc#%ziMqv1ZK3{l|IU{-b+&Td&#}14LDgk#RK^*! zln=e6k+Vt*Tx872%3{b~wLB}J3H1%rIqBO{Aa@88sGgq@_koIA<}46c zmJQ&kL|i314|E{1yC3w4g56KQ^ricN*mnj=z@;+A@@ZxIGMt1h6@0G(HQU^Ku&zN= zPU~qx(Su$0qn%x3f;`Rl4w>^z#gV5Mcwe)aEw3{cDg{sT842JA|DVM*6dna>aHq9E3Q zHJO7)Ica7n6>+(GEO+^dIK!V)OCaW65awG5p z1X|V;ePY0BnL)d*aZa(uCk0{+ww)wWg(ZfZObz)bnM?ZW%2u7+e9;ZpyUDEcCtiTI zn>Dk@T)FDFJ2a7*PjHdCm$!<9pucCJ7smCdxlb!9HS{Hp>-rWM2mpY8f`=XQXE zI6(P>5s80Fn=a}5Ad(onDp6F7=KJN9_(CLtkE{)} zvM4Ld^09Fcf{H#e>s%sR$4(GfXUs#FNmI8@UHRfS52E-&T{kE)+00Ry;_uXWY}V9eltvw;2kU49ukmv)7DbqGij1%$T-}SZszkp)>1j(bHgo zpH!h1=#f+~uSW%kt>N>EFRem@*r=G5#fYvFvEkf1=h2uB<^dSUzU(m+k_oxxH?%-# za57dcT|SagXKbpUFtBr);8F}%^`L5T|IcHOthBZLvtHG8e>26c2xBexf2BPCD!7j& zUe4$FO1&0FiT|oqZkMNDycnq0746?>)Za1z@Wu51k#O$pJ9gPg!E>RnbOEck6z4Cp z9k-!a7)N=ms-wQ^6??0l>Lc~69`+Xndw2BK0{ZG=+#j{o18wB83wH_*EOTKjeeF}M zCxh;MIblI_6ZVmR1G&=HHP z_ERhjkoPY?%=C>z7vkmLZi_jLXY@?Kt%RPo~n8B~iZY9e-Iq_!`1KD9h69U%*t{!@^ zbaK_d`c_Zosw-*Wuou+eB$Zw}8`xp=E1ZqOXBjJi%@sjPB4QlZoq{?X_gPMVc383y zk3RjoHAtW-CQ)d7D{URJI9zjf?U6(&*)oRRnnD36Vp}XY z5ag@9&l<;-u_IaR1k2zRgag!)t)_7XYKbpIF1zY~Mj|0+!1hkRIaGh7%K&X~ zRe2x>=PJws|dqejXwG z>Mrf@^1kbLANgzTJID4K)VJGNtmhMH+sh{-mOgU7tvHca7ewiy{A%4!UXnj8ZT#|k zfxA@LXD|ID=i$q1^odYrhzEtC5toob41A5pRpf?{mRyldgs3`kNyMGWBoyZ^G)}88 zGeUg5WT7aXN3W#kyBG#xnFCaI$x%Gj(s<#5m2^7{iImWVF<`RJ9hq|0xIVmAt+u0P zK60`xY&P5Wj=vjP3L3z;d)H?cW>No}RFUvUC|@(Vlhw77_oi1S$f}@goMuicz~2nW#{{NEb;2uHPJ}E1&zUO?ZBDyXRPpKc$%GF~GtOnvt!s;S z=E`MURRJZKxs(a#L#oJkG9DC_Ndvm!#xyPsc9Qq)*CwW#*(4~Lq#3@g#*swfqvS_# zw-w8fESUcDPA`YUpI8Hab?$!hDB?=ghzfNXyO}7vshD~d3lh{wr*%W)8KuHWwhpd! zs}yLCg?~eV;aSQzwIw|iG0RS*1T8ks%j076n>_Iarv=e5jhm!khGC|%U_ug|soGtn z)mw`js4*fZ z6CI(+x+IyovE2yZ2BBq;i{p*S?; z)t;0rVNA`6^WnjSfHiLD^;IlgTR%#DOxMZ5P0(;v1R-wr4go&$s^T1cIhSX9*@2L} z-4(|&bRWmm>-ctxvFUq?`FyJ^fAyHfyK}gu6Z7WlhU@p*VgC2GqX)#Jin+H3?qPW& ziP2@<-ccdBJ?ALXd0+PKD^a1(>QtsujiH(C^V(iApkK>!qU|rmA_auRC$z>xMLq2+-tYC`lO-&#u z-^W;s^t7uy(07$xR?P}JcNQC5^2e}5fBBX^(kDoMjqT-}=%2yNgVLuZs@ ziOog`5(INdy;6HLJNdfxss-Gjb|<$cJw@0j5@x0U-Ca+esL@5n%8i4jwAmA?Z-d&- zly$Tfr@Ta_`_B&D2{vXfIa8i(O)j65#;<*7c#_s}!{GHPKAbdgp)&zk>z%Hjk8wDvt3eyE z`VEd7x}7~>S6?sJ7%KdI0w2>)p=Caa_>L$5zWT|PB@S2S>|Kv$Nf6EK*gJqb;pwee zhPd+8!kjS^&!%S|SFyQ0p)>Enaxg74^MKy>jfuIdwGx@t{s?!j?9{ve4xf)*xMAYS z)c9W1gUr3bJR&loO}DP5gIhj`0VQQLkKK?k%2~-~D?Jba*bt2uerj*GWH(%`dsjuG zA8>e&2I+y=fVym8?c>$it4TXBWA?mOw!KQ;s_H{GBI#qK#os;K zR3gz%4JFSc>4m8yq;t6n13@I|ykrW@asfwfwstx(%SE4)H(S_j0o!(qHI%8Ji912k ztgoT`=^r7}l)-H|&o6HK$j0vR!Xnd|F$jSAZdV&&v08SIoI3MSJ2JNS^VKKS=dxij zM&-dO=&55diwUpD+pC*{c%0GL@1_}I3N0DK7NfdLd!6W+>IE>@h}EjoVzaJSoE-$w zgf?UkKmLhdppL4Rz3Viy8ph>Kp6QgT6=(Z-5Z9w&G_G_dsSNTlXw4CvL`ANJu4$#h z6Fep(J(<9#>?9dv=#9dq95fYZ(&#pvJRgK)P$j3*<$q^ELauK5&}V|iyia0>@*V%G zVIL{|m#aZbgs3|mo{B$#0;;H1F0XE$&@|a@py3HIcotgnh#SWCf^&TcZ|>GeW@vKL z8bxS@G3LoiG5f=4i3tAF3e`{_Qc2}OL_sH+T_u9DnLXc;bvM9cG0A`$W@ZEOV#D73 z&U_?_>~iPj)ZEU^Jyto00z~+q5dZt@h+prNafz2xx|!;dXlmaY0k;Y6-e);-sqD%( zKht-Ep|*K`6IZrm?acbi!whc3wKE7dx8n+3_4CgRvie==*{wID>ot;S$TK%|La#QS zV^r$59Jt+THnzRme+J6$Tkg4y8|D9HQ#KHb-~Z+USW`B|;|w0SLfL;iu+28K{E^^{ z(;JA!2z1e#=!jvIe=zKq9{?6xT&PyO-8wxrCEUvd$MhrEs#MLpm_7>oXeyl&Fp*4h z8$6c#yrEcqua^~|ew~yvg9o8DNc^>I&nvn7EbGqpDYcG}!@4^IBA1mBjcH~Z;3RJl zqH|0+s*8T*9wr0kkPJ*tywop4aS{8~1rz!8TxHun7z?=07|xgXVU!extN8G;8;594C;dIlxL;kD6kzC6Go!!6UriwAx!&I+NW6Kvq`b1a6yPQUDN_Jqn^`Z6a^B<{nU5#K)d4k4;*+Y1J zf)6u4A*%)K@HWhz?!dFxn`3C7t5k5^`K4*U)21F}b2V)IKK2IJSlL#9XX&G<;>Yk` zgT|U((TE}jiaTHJ$K-O^q{cpL(3;wt380~9LS_#rpZER>v_E#!Wp40Vp{2em7+d+S8!`t%p#*D3U?2)m?IrxIG z{jeeAX-t8RKFQK&GCR|f$k(e0Iwm5|wrwaA!cQZf1xz5gr={vOjSR9H9r{LT(YJ2& z%d6?%NJ&Q562pUCmC#Y?dIcYJ@A1FIOBc|lB0`-r;*kX_&$@Zq$iie2R1AF2Q9C;s z?l~!ig=$K|mv}!_hAWA(AR~!Mz>0!Dx8SNsQ0q*0qz!mKXX|QG#5p)&YCy>yt5nB2 z5S#4x%Zu?0rELyn$^%Kd0)CZpdV>MHc&A51%0Fc_d8Dn|9tZS&}Ec-{H!jphlqG(%O0sN%A=KYw<5 zf^(%%hW~szF`EqY7n72&4VuikeZKRAcx@HGYKLGQvN_ziX1| zf=@F&$swPH9!{D`Ka1Q_ve5Nv+LjSS`{+g4b6BlOAbgO}J?)#)Kz4vn?FBFBo8c)~ z)L8V2feSCyfX%WNj3ImzG~haKJO%3P&Kc1d1iON@xA*6+W4Zn@&3O|xm-3)sgDwZ4Fe=|u4Aq;*qXe!x4vt4gWg;H^Y4-4KH}4te_~Fb zur|ef8NXHrDogORuBo=XfXeNHDj~KO~;K>|KLiptT%*AW^vQ$wUfN#}i>8 zyG$E75t21Sb}FUsR8BU@39h+Why*QXJ|E-W7r9^uGwNmHZ%)&q>IUDX>F3qZXBxu* zn)p*~9zBOpdHkta6v$DJs8}z;Ufp0E_PG?GC8~Sg}x1vK1v7st81w-2tHAlmuDOx z0cy?LJ=kmXAAI=9(`Q(ahkpJ!GLm>P5dR+FQ0(M7AIDWWH<|3=(PsEKL@UJKg+z<5 zvHS0t8)}4u$M}nMV3cAPcjv8w5t)I8JTNa9B~#8R1#E$u{GPShtX|09BETcE(frcg zx%_?dXCBCoCpHTrq86izt@GaxK61{X!yg|g2}aO*RV*kDzQ7s#0muB}@%BSaO#JBZ zTufhqY=#5590NE{i?y#`eclLI%Cn*v1p(8>3=fD;BRmqrGWv6YnXEm_!GV;Skv|-b z?)6OKcI;Iu7wBagar1T8$i~zWfa$0*xOEX(%1a94Z|;xjOJO|EPSb@!F>ilVIlf}9XYr$KW-{FO-fO#vO#jMS{_xlzQE-TtxcjlI#`sXN>c!+?dPEcfkNWR! zC*E-io%qbQvmT0Z%@+!n!Au4d*jk1#@q&NCE$6YX6Q0jp9L4Uk=~qsl#uhr^&%^TP zF0T>sJ6OngaA5ih&PG1g|3U*16dv0|K+{+d)rHV9j!D_#rCYZEUYK^2dJNx2#v=Up<^DHs7X23>Y8R}y=mQhG_LXR*9 zE9#T=5#3|c_suauIDW~e^zo91+xJN?iR4S`w?gq+)`T>G0ha3C8%fDHai|~HuB8nJGG{oUKb7VD`-wZ zCg6ELcSc}ANHMzl4wI`Jyox-NCf2(=GwQ2W$zs)omw-u z1K8=hRlIWW69KEND%nxwt(YDT>Rq4yG{#O)t1%EEQEIhqbm15WQf7ou;#=F(i3TOt zpgk+zV%VBVZ0Y2EoP|NI6|2_WI~eVLodiKmF2h9j5c;4xTV>F3sbn#TA+#VJ9>Aya z6q?$?$kwEHB;GYzymzDzvkCW!a{5^~1az`=J{{jPQ?vrrk!?epR(C%vcz!X&MRUbB zrak1co$MYALs6J7FcIfGultCM;4ROVLL9hbZ0SmkVB(`E9E*~)`+oIqen%HnlZ_QO zNuT3;@a?6FIc^}Aq>||*I93~-OR_bWf0eX?{o+XOvR|}R1fCOyy@~=SN2rLt6R zFxYJ2?@(3THsJh<7<+1E`}`|lGXLE;WAtF%nxM0TBKBm}Nq<4ZMm`HjXeQKh(PQ_I zu5M#1z@#>AKR0Xm-mD|ddU7^L^y;LFj9u$%1a=<&o@6Pb<=&aM>S%@s9GH-Ap>DZW zy@Ypv^;W{v$Ml?B$_a~-c_DiV-X!EDJvBLUB)PUNIXtLiQMv4?_^~r*Y)q3Vy;Nx6 zFL6V_>mEhwHJzo>74DTo@0XFHU`8IK6QUh+>0>h__|{NR1D#@DoP8>O&3OGWrBUrG ze(>#Cj?~mVpAYmw_oavqi%B3PaJ2ydg0UwX2hT^O|~+Iby1_52dTbc5s+!TAXI z`H(Vu5?Os%*GFHeJg`Cn84(q!aqg zUcSgox!+S2_MiYEn{iNFzbx`ySfpBI*(#z|eZQAl8$^38Jr*YK*3-%vd$OFozknI+ z93Ojnj0$#gJH$FWuk5d^L?Y2y|2C6=!^EN}-rB~!fi5H=xRTA)OFZEWWfgJNdvHKf zZY9%CEFu`a)zykQMV1mg(J?^45{H2Bo#sEq~G)(E3qb%Eh^NKl$l zaQtw^ldd-dob^~TWj3&NMvXG9E)*ukZYa@K{T|(L`)hecH6VkTUt8}NHEz)`V$XBE zsM0$5O(zH-tV2uGK{JDdRKh;VWvI_j0(@MYE)fhxrOEIelwnOjEv5OZ}7g* z+2|nNellt}vmliV*JO=rzjM;iTxnhzdi=J;F_kj-5j{LC$>~-6#7w`UX?aO1#0Sq? zhjQu1V+!M~`N=6tf1Vu*EG&BCG(MTLK3qx}RGpT7%Ev>^dFk{DC@lM19RG8m(q7~A zdwYxEz_g&cPBi~AJ_}hW_{qgzwtxwti5&ZlxM?p0jQmE~^RWnGpek2(Z@>gzEi z+zFzVkBD(=vT`RzW;MN=C&;f^ei}EX@u{`ymeNCx^-lkyp9a6S>NGzS)X`Xo_`7~# zKA@wJhQw-fK@^EJs+t`~lzHZRAKyoInK37Be6RWD9m`AvpJr{U7@_6#iT-Vo^;N_` z3;0W}eBWb^_B1$4UquM}54lsPi=s$TX`*E?8NdM46*iVz*?j0G6mP|mhAZEFStFcz zxA~6JqV;lm0cU7bbUG<^K~WjTu6SgAA%aeo(NqerYJF-(0}ymQ&zLI`CQ z(VP zRqeTy@I-F@lnktp+uIVLhh)q&T#9=Uf?J+R4c-5!nvy{l5lP{L^LLeUX$=0su#xUh zleAYcvbP${#4aC9 zQY9PI70UC+Dg)Iux@gfAa*iO6_#)ppRZTx!TaS=CVwm!86FS z5|UJlnl#4Z=2VS~E}Qi@cDapz$*LHmek3gwnOLgByK@zofbBUJ6X#r7;q|Wo+61q$ z;ftxg8*~z$bUYQi)O~c$IgO1ir_cxeVPj3%`r+_KkO(wmWWJg9XAH?HEsEBib`|22 zNRHO7(3`VEAGYmU@tQcHNEv>BY0H(=C8CnY4Cd5)ZU7)KhpE3Whs1!dQt~|x(IaiT&sKK0~T^ywd@3!C||=o%!rq-1=Qb?k}$H&ynEVNMV1SP_3(>v!~<7z|YB|4azv@_Hcbe*k8Fw8osX; z>qI21!`QSXAtN$j-uHBwnG0w*V1is(P{*3!JzMb~YMZIVADTtE^2*z+!OozJ+o51s z+DdRpm6PHN8b??}$e_^{>>}pTlmCdr49}Hii=P@%H&VCLI&qA<@MKlp4uHVX{x~d+ z)~9r~_A8blGQ!;n0I`Diyi#Us((=Z);1dJgi&1K&aZA>xq`Kjhjy7>142_V;Sb1bB z*c6>{G9@oeZ37}kinYu}yED6ye`$l`6G)%bgUD^DK&m8rK~!qi1d1@E7>5p_qQuCl ztEONrLYkB7>UwH8xJ`F$_2h5+xUvL{^4*M2$vXo=d3>CUp;6c{d>mB(zc5$b(t!c4 zrDE472?jY5e|cIqUP^aiavcAZ62tzih|2zxJgN@6!_)5&adDw==crf8&SAW-CX{)3 z$td(zQPS$$WsiyjQ|sIZOdB(Jc}2zLqmv%lt#{uX8tHDp5=Kf)cgz*Nj+q+~ljU|piEbO7VMzw2tPP2zd@b#yr^Fmbf5pBZn1qYU zoBh>EK}0;uErMa?6c1U`>zz{}m%Z@~Yva~6`+D>$H769;n_I2|wTNdPZsuizGbo>1 zlX@}nc=AOJiAR%@K)%Q)Io7^D%O3T3c_t;+U;>c+6#8x=pYQfG_JHs9;($r@+XcF1O=8Mj41#I8cxbubQmVCaOk`+7Y&s3@9{Nx z0bgXc&?GT8=}+UG;Q0MUny6<9(Ri(!`^y?wPZ z&%Zi8n*vYR#ymijQwVd%y67V*a(I@rRcgVYf+w-Ch0;LEyvI7N#WSq_+OApt(~Y z6~cWuBXx0&&?i$#dpxACq|*ea;guTxTp!_bImU5`SCMO4gvi4W7ffYUxiU$9_ehA- z+)b4Xbgtlfe#wWMawPA`?3A-RVHnDJn9x=siKFx3oR@;%=^qu=-{iuNV{ypkIjQ!q z#?o7htntr=KeYiDSBTXQ{xvaB=O@h*o|2N1!lhJmz?vIeyRPTi`rJIa3TSuE;}Y z9X=3^CQ)9c)H~R!p)j9s>T5v56W_?ijyN`Ou}E_E@!a+>2+9jOon3To<>xmc=9T5O z%nwQA+xk`;H@?z5cW$$XnN*m$ZEZq&dk|vA*zWk;9olfzDyA4rj5x-47r**WSVthI z3#m#PKuD6eqmryxf5Hh+PL1hyGs6$7-spqsa)6;>7kaaNr8X7%{T`AN(#HwxX$jJ%q&`8%CkQ*6#SJtyat~Lbyo~&x4CK8 zy5^eywHIBMWng0t*Vwd^)bl!9|m-VVq1M7J; zp!EeIg%{f~kINxyZLe|RKj~{m=Krd5-?`}j&0*$+vB~;+S7Y~DPcN#tAb8%v>nx;@ z20v4H>ML7od@P^9FyC6D@oBju0T@v2V&&TAGOXCTho^4dagTgzI5xETW0Hk3e`XbL zm&c6ST!FL0CZksYwCnt{#_PfG`-b{Z)7s$&Zm=3=&4c{)&kC-gOIpP(WWCHc80H0W z+Mz|-RH%Q{Nu$3Xn}&frkoSD5aVRq@51=dAzsOhf;%%CBO3haSsX4a|5-GG=2tQpw z6ZxNBP_(Tk0br3AhksFw`4(c`-L3q`5L{s2Qd%rRT9j48^VP~AlfzgB+?egFOEt6+ z89Rh!tz=Q}h@CI^E7EChR8;!r<3r@LB_nbk+h+{=_K`3S=ozxpKrwRpenA3?;UJUv z&f)7Mh>hI2PJ~h<=ifT%ZtJ8w<49|PAP9`@2+92tPt#lkOD#7OBRyffIvpi=G zto@HkASSw;^SV40LP!+0S%>FWS>nad1>4Vsl&i`}4QZi@%y0x{3Im?z@CFF}^PyfJ zR#uR_0y8gpI77=y=%GnTO)QPyeL!t>gl`>2a+UT_#qsm=BKC8!f$?)Pg`PUkjI+FF z>R-?GglrHPTO%2@ipVM@Ar?JY>B&rfzOrD&PsqJzVI4mV8x8$-)m!+<(8UlEx&857V%K`%POPC+1PchIgD0 zIcj$X>o7PJZf1GmdpLr?wNo`Cd3bA-m2SXG#zSuWKp+|mU3^^lW2AF_mv$-injVGb z`59|suZtc>b*BK-7*a1=`bD@GA=J%t=r@pUdSBf2&FviL3c*Qqs|qbwSW83@Obo<< z4&5d9a9K{H1eGJNvrkjtVTE^B-+W&Bm5OyKt3Sb)AZYPaJcD`2@wG5*dTfRY`X!GOxX{@S#hPJ*0$6 z*w^81zdi#215^!8Gq99evh|&SkkbM}{_ll9Z@s;c3;*4Q0m5^;dWap{bF`5_uYc)R z0t?eU4GNrz*YTQ0?92YSu-Cdq9qxP<=jyhT@?VA2*Oo>A574to-e#^(h_H}8aMkrUmsj-%M*ij-)@hso9s?KAGY84@QPQFOBi-sRRWm}1 zIK?!(CGNY`QzfDkV=4%l)bQp^tRD=nekFrDKa!;*=pKDoR{QhyTQ!eZb0Q{{HIcPF z(;o)OH3G`bP3;gn4(hS1YSGN1mf;eOW5;lYO0?lj#!q)tATeyE!VYZ%jWOR}L7nA?Hza=xWLx+d%x&c4<8FKRupzA(k1Oiy^xGb`$UbJB3 ziyekp8M>pHO>*V?;3yeenD5ujX_J(F3n=f=q!791aAmg$AHC+BUG~)`35O&xmxIFt zSKG%afRKiPNei?W`JvWQ&*;679HAb3T#_Bay~s#5futa-v)@&{gLhcb3x&oM9}#!C ze-)!%i+ac*x_cxUPo24(WvB-lYyxC7e~JjsHj9sSU*2X)uC8k$2i`+i}0uo%VMVmeup2f8k_cKJMt{&M9FI= z8!tK81FC+pQ&|ZaA*;jcrcx~rq8QRhIkC?NSCv4C*(-cMGmWuRMxYkZp&T5+VtIT7 zECYcxjd(Yfv*6q^<)IREYyl-?A$x6W+YjBd1W>NOIY8$sq{rWI(_t!DaOI=Srg8QI zQM?PKBTbJ4JaajgaqYi-O35Ik`BP?jbOzZs=25_?@MDP8y zMgh_s-}n!qS-$Z!%5*q`51h=31hXKYlH;k=_t>Z`avaF9^Y|@W2cEil!s~FPK$xP^ z4PYrS;MyWP{f5U9hIuTuH1WQHGS({lFe}hlI-0gsW+UB^Z4z5(#sPp_fOmXO_}`lh zA-<2tMW)+-0R$N1ch_5|DZSI}3BHf}rGVV7eG4j}@f!azYx_6Q`%k?-b-nqF{$BZK z2?lNPyOZtXf#sI(S9&U0*($3sj5?a+~N z{w`<#gn!ZG+O9$eOQ{!Q19_7(vL}(bpSVgA4~VV2eB(cizSwe~nFmVt^oc#Lp_u8J~X^>kJjP zxq4cCN)q6S&>5f%5&rB6jwjAeU6LDcb=0 z81e^+;={@$1t7<1V0BbGdM`AGafRGX)v$NY#$Fw5Qt8Iz)2H%FDbrg}8R~;;g#mj!Nu(n4 z!19d2r>85apkQt(Mvj_#F5JH3@7k)rB5-kKwd2X76zvO^R}EulW?jEDuTf2H;l5a_ z$~5vye~Krb?a;@tutil?=hQwu>NOPQHqbtuX|KVg+Q^8PSb7pHYTV|Yy;rvDzJ8K+ z-VapWgKO^lwkYq?B%NEv+#c}2%ebTp!BX6}nc#$CcpZ`cM&ZzNEhnkcC%uAxw0Aw8{3y~&Q2lQj zaqorPWC$omna16VQ(AtBZ05vg-iK zp}I{7S?_2Qd=U)d^R(!F=kU;HkXib+NZO+T z$88Iut=Zpc@KG^0a^bv@g2hBY;-UiIk&{(n7@a^jWOfu}4MYvKM-NQLpFlKhRuqtlwKEqg!=bFBcwvd(i!yHh!;obY2@R zcRW!>N0;O5cp37%+(!eSp9g@LCkEpG^@mr&*zgu#KIFW!QAfsFCn@fTZQ9hF@$t|2 z$+YM7=52sJe9^Iqn;^QBj5Mpk1(!V{d-3>{J^{hzhsyt9>z$(O4A^hq*tTukw(S+C zvC-Hz+cb947>#W;w%ynb8Z^my`|bbP``c%ni;U#v&BYpPJ?p`o^QR2Poyw$kFaU+2 z7T3&;K1-j62+QYk#mZUYM#2WaAq*wh0(vxQ<5sMjazf1jrgUN61rTFgTP{?g3c5tfrEKIwVH!62Lix|e{B02U`^I-b5 zI&`LWT)d1Wv(LkB5}ySvO8D_8H`b7zYpv+MhXhg*YpUkJ61R%Nqy@~$kAjG8Si90B zraX4b>VRW{qjwe?7>5*wWA?E{f`-BO@R{LJm}iL=x`J~p3?}Uqhr|ad=h*zQ!4Wh6 z?6b7`PD;qm4IojB9+?n`5bR+`MnouXXyX7U6pXzY(<3zYHYIQI!rhG-p3>7NN`h{m z(_2XnNxR=V#jf!-Z^B1K(b*k8%w1hiO&T2hOn2)kWs!r*(|l~o#fmU7F<)Q82krN2 z&z6B667mJDx>|lTbrY|;e6mkW2MNKSh)p9kzGe#gLi-|_V(VkOdx#o~o+!^7*F_>#wr>nFt~R(#o|gk-yokivu06r@Piu}bk1fG`&e z2JpyYUGL1n$U9-^#d$WNJGuKx~)>ehBFMxS_(yI0slAvF7yvI~QE ziN?(6v(U26keQ>U5RH_LZ5Nn5(gM!$Lbn34m_<0@PYB}VD=YDtZ2Eaa+p{4bNaXNV zsz2}x`WlJi(?MhW^A1hirp2Y;oM*B*xa#L3(`lZ}y6g8hDlm?|O|`_hIP6QZ(a5+@ zY^8zDkv3U>O6~iTRZPN-X2QuYM-yKN>ohU1X!_RVoZ$XRo&r7Dj*cT^PcO7YJ*d4G z_}kP>TBQ@C+Tl{HoM$3ktZ)5h2~`8a7ID8n0*3J1|7jU$E*ob=21Va*QU6|lyt5hy zK3o1hRSg>WcnvaizCIc`zv<{gcAju_0sXJgwQG)dWZ&alFDSIvxuigmaZKJ;z;-0z=wrNra8tr0~Krzo4IR)|ukXkXj`xjukB6b6}Hc467#_Bn51rbNH8uBWkjX(1Ouyvl6U|pE_Kq zB`~G*2oR$8>?fb|J*0bhVDy}Pb9$8(#hL*5T(~~LKkZFJOC-eOupU0yfJEgszWf~% zOE6h18DCa3Ls8DR>cvqQ{OPiyb?b8cx~e281KOo+IQ^!s#SMf9pg~J=lO`p$cMko= zN2^kncVLwWB}h56o=G;njWq7j?G|vMhQ1$q#K-DE^IXI@j=ELyelLb(1!LhP!vL#`$1)^_V z1`aLGZw87tZssY^T^~*mgj$`+{ihPlzAOG&Q6m*^Nq1qN4dZ1-Y~qm22&0xLf3tx_ zEa}{!99|+=5;(u_cG*i~XCMa(Ea_2kY)V`{2s67!ei0UhQGkpP48+KaGS>;z{xAOE;+64(1}~F&b{5p?dO4)AAzGq~Y9{nI4KfT5 zqP04N7S8eBw3H)_o4>-MgxeZqRmO0??LJ$4y=HGWR@dxB}X;+)j%~KK(1U)1FOCNn9xQb zlZKG1=w{a*#Z68{6{-z{*+ z@47px_wOO=(=cmb2=LtKk6v}5y1$nPpS_L#(@XsS^af7E<=tMdVvZUnW(AKSIlshL zKfl}{Qne;XT+arEe@c|E7`G?)>b@$h?Qc32Q%;e+L<}}y_tSh{FO)cS!~s-oYPwpm zf=VnF^;)>UsJdnZiiBrzE+ef7s9M823z&Z*U#8S@u{{o{#j>XVg0bl0gRoa9uCL@n zh;t8RV-*GyZs2M|DSiwiQaiH`Q^M8UqsL*c#2|={4atdO?$7K0N)IURRNC@(Z776} zei$)JMcbf9rN68dP41vrE}|SuFy7NVv%xi?KbJB5y-<@LHDTP%$7F!Kkb<6uvSZzz zhC5v}&Wnn$sR~`T&KNnK`*xDhoHc z0cnL{7JudwoA|Tvo6+y+VBkeWsJ)0-wdP`A;4fV@oif%dzfdoUwNA^yF!3N?b2Wa{ zi^9$70L;ZHgSd5$D2dUk%oC4UePD^YIYQ3-D&FB>_cM$?EK$?H$53 zHixE#fEVRl?An!}>PiZ?5i~|+(&r$1gs&KUNka~>s<(=e^JCl-KC(3r_U4t{tH3!o z4|6cVh0?>P7#o0-ziN@Vy*H|@<81z_j8n>_2S%{f*G4F*%TpR z3=C$MW`iU0NOIy6OqGu<6wT$*&uo*JoMnIc2^KIchW(02Mf0NST%Q+F-jf}*t|^4} z;*{jWC+J&PMKdld1J;ott)=Kkzho_7bw4gxh9Hfe0d`J8&Ao#|rRqq>8Pr~8iwgO4 z#pqXQ+{5r9(SF$G`wOeiOwD{Yw8BAfD_;t0Q@jUk7*E5SfeAgrWdo#XD~WGIg&8yh zgK7P^eq~c&$5LsKwigXNl32uTq$n~1(S`PD2bM7utc>bx;e(xnw!Mbo;c@ABt{K%B zW%~G(b88r-_nou&$1!^srbyP21&rIEUm6xO3+AGXUtl*1vtCGX8ExX-qzNB_rmL2* zuOmbRMnw#IX}2+j7xO#JH?jY>kVhSeR##>}>`U#Qv}U^iUU+VDfgR`(KZFg8-et zde7N^hd);M!7Og~qN`;iEc9TADT6ouG9yqV38fcb^D`-+S4*H?NjEn!fm+gAF(Pa~ z_hL-7kE23d6nTOucpT?07^yE8iLbhDhgVHar$cR$Zcnp8Bf3oVgt1Av2sugsP|lfW zNqJDLx57~_Ee!*?fTuSmMM*RsH^NL=A8T2A$lTF~2}sK{3zR{F!3dDTqn50tAhj>y z&qhFPB;%}LpcxGF(AC6xUMt$%5f85A2m%2WD3&1#sY`(0KNT~dcGffpW^t5mF08@xEvhC{pmqepW{?&=EB(JfqE(K7iGgSi? zl+AoS&82Z$nsH2xJ*gg%77J-3YJDu& zvj)1@X$29ISnY;GwmG`$%}_}4G$RfuANZP^pudj_xR|SBCQeq2j4ySsF6f3*54)}! zeoIUl!1IuSE#@8}CCox(r=>-Ihv~3UiI%c=Bbx6r+_N-CQZyo7CBW;`01=iM__AdH zgK}2R>JC9o#dx$!gJgU$F@i+b?Z83y%>ZTR&Cgp$@8?_iu9qRGu6ShA(o$qnM)x+n z^S-q{H1ZZhu|ASKnn)LRF0n6-lwFg`z*GR?tceM=#x}_rj_G*r=oGFdh)C78N;%yR zL!XB!p=jP+5r?xhs_IQrqTDYX!n(62LiPSJILd~bikIkYkOnJjsGy9iT z(E`(t#vVV1e#;bbs4#*J5Pr%yE97JcyZc?lmV+5-S=uo0>Z0kG1vS5F-@t)lU!EU7 z=ibW43KcLeVgu(c10X=n9g%mPtD(Ihnbj><2IUan7e#R4;7i{3%}S6#D3>|hm?Kn= zDvi52p;-j~fRklgW1PzHi%K?IY9P#CuK9>9veE^@u>lt;XAX&E>ruIRK!0KEfU{0; z!?i5kRztr}RN581i5oQ!SyA_lIJ0DK0w{3sDAu>pDx8kSZtJBATd5gBFVsk{fY?`J z$niy5&A+p%y|7!ZBsSl7vQS=f)w9>HhW;luF83BJRWgUq87)eLo#_>rAYxaH{g8KF zxBmcG$#1t_T_|-E8|AS1|H*z;80nSI`G^;uTCGa2`rPw+)d`W+{jd!BuR`&k=0O1O z|Fc-Al!h0{hFKKp(Jr7lJnEn>pn>%M(t*N4556cos|8#m5KbNnh1fccwt$R^*K8q{ zcK7s@&E^h5WmvoGzygRxksRP@9ca62_=4rk-MA|#();9&HG8T#-APocXLBA&`Ybo& z1|KdL<7E6$Kuw#GEis`9IG@nTx@Mm4BX>;gwTi0}wiDL+7<^O=HKbw~=if*hVOOQXJm z9UmkIvJLdJ7w~LhCsQ1S>~3G($p)l+&)#B%kloOHvnFD9K73qi@lkDV2NxJZ9ELHY zN1RQS!+;_kSP7sfu~rTr6hH9sFNZHMu7_K%|GUq*t_Trj>qbswTOHiE;G?jX5B&UT zb8$93S{z5<@@2h)Z7zz_0^p=p5`J6pJOBye?%#W}UVcwctiVf3Y_U+3Y#B>w392XP zjv+r$jT;iPg^31I6*`YQ^V`67zU^O z1$OR=5~}`Wz-Us;xsE!>&wiqq*e7PiWjE-rMhGygQ9&uXQxmIjc{o;VE6X7j=``}7 zUppLg^Ab&A!t1FPXi7VyJMoUMKW>lmg04#Sa4RW@au?4pWbMRe@Z0hs zLay=Ahzr+ZR9Incd5>*b4ZJm`CmHi{F2PsJbq2@di^;s<0Be~`!&GwF*k*fx#(0ICHyngP<{6}-bPT-z$pre z)lW(g)Hm}Js)5Bby6j-jdYmaDBs$Dx<;*Yy*Pc@ev})?NLLNh;reW8$)Qiu}rgf5c0t9R& z2PDCV7VKaqx{qLrDZ(Docl%tP*@PkIr%P~FwbC(2e6pwYh z|CfQ5UF}s3$aPl(w}Yts$6nIv3`j2f0w!14cS>Tzz?WTyNuw^I|(L-`QGJuk?<0IC{Aq<$O zlE@+~e8j1g2kWcZ45`#j^f8!IFB^5f4Z~%O+)W@*)<-jbs1r&8Wg+)0$qA(}-a}G3 znesQlar$H(4u>cip6%|$B}pZKR~!y=M9ej7r8k;qp;(do)v`fPb3D&XepN3&bgCNQ zHaL%K(Lr_!G}gB=;=anEvLR8jLkaS!g+snY@bPuRp-$5e>=j}Vk+1;!G_81xk|^8f zx%PV>6?T$r-9%cPmEG~-X0Dpy+X!xNe{|atf+XhhB3tY{qMA{Fm2SHpyfBgF5__b| z>{#@SlVi9XuW3@!~tCehW(E)y0g2 z(suaO6_E0=N$eE!aK<-G<>TdG*+_dAn*t)ib%c60UV^Gh&W46ku($EOj9l!nkV!qP z7d{Jhd5kOye|Aw(@Z9??F)OIwLYK4IBndtd^uQO8q`g1cyb#f@WqA~upuOJdNuLym z(5j<@ZFwFiPeochupH?@vMG$)e2X}l^eQe6HGVIGlJItZ5Yszx`uuF}>rD$+NNxzr zrgFlby(Ks;CZwLbI0CbBTPEIsj0tMuNM!x|Qb>1*h~boyRCGCFz-asQq(}3fy*)H6 zq_B6EIw8(cw99T~9t+)XFKM4-LANgA%e*l=RNX>~}e0(mOpJSlEP`_LwxpA|Ks=heyE~d;B3A zQb##&)XqSMi&8HwUk{66n{{;Ra>5PBc@$XsPLC4dEZ9q4+R2c#X)CeV^7Q8~z zVc-fxhISK#*MXg*nUEsnI&h&}611-9zUYe?pho;F*uhJ)1ILQuyIFNNyb+-w zmvp-s%fkmh(w^-W|2^Ss{;x1&0olm&0Iti}TdZg7rLq=5_P~#Sb4Ea6!?%&+{2-R0 zJVK^J>t@)fb*E38tUH0n0bRF8ptLu`|HE@?ac-%2J=RtAS{L;JZ68^6{7a3m0MIQ# zyfAj8LBGLkyoA;KR8h*iMX}43Lu^_h7k-f~7y z@t7peO;)jlalL(>7CEmyLp(FTM(e3>39$L34+t`c9j)D!GEQS6x=XSF=+Kx-C6<0 z?Mq5bJP?GHz1>_E|GQgg`LsN3Ce-SKR(C~O8qkdR^f+WA+`OTBJR9Ck9HEzi9$Sn^ zctY^Y+{~4zQevy2p&d>(%8oq)FWm7mVs$wu>FtfX9N{;>$6@7Z#Ws@+Ac<_WVAIHm zuU&Nc0_{h+vw^;ZE;5GbBJS?S=et0uhJO|_0Or+16InF7g8#LjB;#9tw4S}@qV!84 zwFp$Hc;1+s7rI&cDT#{Qty^SsNM3pJ_zzyTJ{xK9kxGt~tHWRyKl^Y!eTx7uXk&qT zn3*g6a9Q0Zb43&4PzNQ`((lN;?(Tf<+wAU|e3^Lt%W9-Y0tSFOA~L%o2Cn^+GaoLe zCitoZk>lQ{p_sc%+W-r1{j9;1mb)_B@@{C1oSM^3|2ee7FbYxJAwh##pLVjDQS_L^ zqozsx(hwG#_(z$TI!pzdJjR$2!S!ghWh!1-C{Xrb+T*d66_UNMksb$IPANDmCnw+h zv4be;Qzjw#+#rIh3J)-Dl-S#;zPgD0sNyanVnRQ6h@lg7@2K6TL+~iI2X20t2`MCF zF0IlgzGPe3S`Oqj-$jPV^Q(X)6CDC-2Ft`xQ3y$s+fbzHmQ8?qkbb%hu8~Kqpk2gB zm(p$rj~)nV`~5=(M!DTZJhqYF{6Y!Me7b$+e%G!5l8{&#&)x;@gvn-G6w-hy7r)W8 zxGI5;5TJ%_6j6+kM2P6fxwDIhbL+An61fqut>j2=xb}x-*3Zrdvo;gjn91$fk`K*; zecp4*NV`%ESb{7yPM52nlkLhSPt8?QBiapK)^i3J=H}u~e36Zz&}5|M&dXXs0-~a~ zg#9G-kTE?c33nR*+oJG4dLN~f`0#3?&VoJkVVfRc3EsIb_oezF0XcpJb%uR3TyQvHB8Q{Sl-mreLRF!U+gi$ z*COlFvU*dHu%0FnD)ArO&}P3{djKn8O9j4wC3Z;(fGVDtePOmoqVmhZnrB4NwCRsa zIJGPaM)Ih@DgLYq5!@usn|fFC#lpO~>dow>e$~*HP^s>ziDASkGjr5t8i^fXPaP+5 z3OtE^kosKSSgw|F=oCq$s`$&tpm1muhPxjiv3g+}+aawQ6d4X3%Mj$~Lc89A!7joG z#%wB3W`|HmUwrNPmE$4 z;iI1@BT8mr(&m1D#W+mLiU#}i+|jqNiC5SHU~wB|CC%w_JetG32q3dvK=$#!O8*Ip z_LgbnJY2=1zw(b{$GOCfRu5uAZIuZ4E>(FQG|8b}>(xR(z*!;VupUot^YaD8h>1SdHC#$QTC` zY5+@p0^HyiIg?-R+l^!*ynZEZdK3#&C^Zf5_YMLG*9~fdny$)?UAgGT4k4zBm{7Am z1T$ICQ?FKiVv?Y~0XR*{a!nE{{y(=uuCu{ zpdHvm7>eqG*Uk%7t1G?6mEP4R+zb!$Wk^u2SDOtr1{P8 zl;Fjzb5PI9lU(#@`~t9F*50l47_(bkC|t`?9RGdUCTlEtk-G=*+$9m=LC`840S zYgdj)o3f$|n+OB2&$-30awSQcwej~u-RyWr2O-pTe1TO&Xl|Ys{bF)$$Tre4(&~u~ zS@1d{+fr0iAHG?0|a2w^u76d>jm2>66$+I_t@4-_-95VGqgVBJ5_8^q| zJrJ5>8viR2xPihQ{q`CUJW(|-I4b<#c>zfT#J(GVR*_8jkd{2wWA?u>r!Rz0FT4RA-@K^}7xNTYR*iQ`6 zj7nvM>Z5hPcPWpZzNOTPIJ4cW;_P4816MmP(QKRAP4h=fA!B5;k1Zil7k*Ks16=L( zOUU{wMETNuK^ONlVYhr>M(zbG0_wf%zzvk@p7swuUaSM^If|4QUzIYIb-lFksb>0B zy<9p5-^5Q1tud$sgSqN`Vea;j1pem(qf)RTRtC`x>{98S)CzBoa~P+IwR)-qO2FhI zinP#|4ZE~;D{>MIu;{n~zWoZ5UhCI0LXVr8vRz$CiyL%T?EA`||H-AL2p=%z9BkUj z<1Tyd{>}2tG1-blL>XKyYtS?JA0!2*MIQ6%GRR+?r@W1Ia?+Qus+5I7E z_H(6fZuo)ytt~;DxUH!NQFZP&E$#a4Nvh1n2}@t<4?Uq)3%pSroxM!tEpN*m387v~ zp1`LY4*%mm4Z}4d&@U!IVDc??uev$gj%e$!M5*BnKmIfOclz1eL0ovc`gAmX+0dbS zW*~Be=)NqxfPIqi^0o(WJfW{KJIW<(Vq3_?L4=x>CLrhA%88<8_LbD28-Ojd$@ zjEpSuEy+zO92-A!vO-{Vbc9+p2$d8FsF~b#C=b(8#a&OLr*N#1`l=2qY_v*1(5Ik_ zYL>_&=jtVs)rqMgoRbWwMukMJrPlPwc2?loQI6=4++R1kmfu#Ny`@LuF>uy*rAL|(_QqkY?{Fm6+OyR{K5T%eB;RzGN zX8Oyzx@eP3OI0ZyO0vR&6m3?A9_7j?2{!q3IdFf6tl$`u2Wz$=G(>v$scyE0 zN9BH;KB!kk`S@iHSEv&iUC&eUP%}$19I&bi){P{~S(3^SngfVnVx{jd+zw~jE5vQE z$+taiifn0#ondn=7$5E)@tg%rOzu+`wMCXZzyC*T?J4<6;)&$gA0x^j!f4s5g#>I}$ z-M1s6S21O+6MrQ5gLP5Xdjs?A{$}$>ij*GJ4X0in-==}gb$dsKA$6VhyZ}tcIP=NI ztMKsmjn->o!@q9~8~B&4r}`NPi{8OGdrmQxYP3zmzR}{sx>wbx4Y=4`Q40`Ke6&&D9joQB z*42j1^4Ob%AmV-i8<&&BX*ygiKYg+h1y~hAe2xaP+XQ+Mvi&5%Cz@((<#gj zV@syJu_wh5eTsw|Q7Oay75kux}6*-Q=W@lyl04&mv#%f7>eZf`41(!33! zU~Aq#2S1nB5Ri8x0HTf41X#dIlr`_e{wZ?P)kvTQDofotRtH~yUroYd=p7>fnU)R< zwynN9&?rBC{>t3sh|A)5GBW%i+*V(U`}(R_-$ViahELAN)(Qg>{Ow|@0j+!^EX{f)=z-E`B*b7j2h?$P z6S=JI7iFK#Jgtag{Q;^vZafn&theh^Q6c$2?gg5u1vwa26hxU%M-kgB;dr8sP4vC2 zvlWs;=+;~cT6pajv?u(t0(Nq&_COZ7KQ2-rH@vKaFe!?6G)*I5c~L;gj@3LTZWCYh zs+}5+K}tIWRI1hd>$IW3cQ{!0Jxg7*-N+wos#N?Sik+ymn@-Aw1(K^W7(i1l zn-!HdHhN_5gv2N;jAv&zD5n{fs7^{_Aiy@eh*enN3CP9gmdoBU2e;Hv={@}Wk7VFl zIHa4xsOL{NDKD6y9PW5HCFqEwI%7nLDwN%onX~K`0v!QX4C8Tx50F3Ep+`vE=98wu>Q@YUn#nc*=6xBc;QVbxtA}HUQy+SG9>IeYZg)RN~ zTA`#JMv+Q@k*$3YI990eGeiItP5bV$q0KwOz=VmP&y!Z8<-0HHQ{GLtI?CpA-fj2m z%WBikhTr1GO}@A;pHuS$T~3boFi z^ZyMref|4U2GOjFUmj2fTv}3s36AG5fWi?JjJe$Vx|hA10|JuLPCs`ESFi@#{3tli zstuDR-HQ1ozFblpX62Rz^YcpWy5U!%I63uG_9E6UHLXCeV5$-*ZC4f8{RXC$fZ63M z0X{T&v8ho#QgN#ex$pfR`pLpsMVoU(S4=PmjcIT7!7!Hqpo?9-f7=#bZz5)K!W=Me zMM~?7b0z;N>~AK_qBKl7w@Y+EG+MQ@?^95OL;(i;VsW498g@WwE;T%eAqvu(2Vze? zni<`yxo6+AC^lyaP7|l~R3|48`hQ5Ea4Qd8wGat$^b#EFwCw#%2k)x4U@>0hi8_M4 z{o5kmRi7)8*3)b)VjN&fZd^!zOlJ}OIih!mKm52Ab>3g}AFA4#hyBy}&@ez(&jJb& z@BM|lIH~Bfh}&Q@P7enn zn{0wAY+@cI)1#O#IiZMMLny2XqCBUkQpEB$_6%b40(CNjr8Y?q+q*C`4A5i|2aJD;G-u~t#I$c*q%DLM=39JK0vV{Ez;~C zqNMN1H*U6(9-O4!zf`tR^BvH!R9_wYk2VyJv*edAk8p99qC&MAiLEJb~aYQ*b|rGxQg!@xFW*xXaH*q znx%Cllq`sfZ=-)^0PnFwz=eXJ0DrazY1iH^*u%?S3#z?UJ&=^hme`t0Rzcf)jkA_~2)~(J1y2rnz=m9OB4yDD%%F&BrpbIg(ag7~8(t%4 zDm;})8Qs~LZv9ia4(3Vue@|Civrg#c-G4;+8m(bv(rZU!g;{sM7#5!kqsEr^KyOJ71>eed@tfcGmR~)q=hBpXo6+w&UoXsw0uv z$%=bQd%EEl`TNGm*P{Glly`4z;k=W(!RBQHxvcX3{`;)%pp%w#0KvfP)1@Te9q0H` zLdg1j+DBM^&bJ=Wk6Wr9)?;YiL+3sp|342i!~4BK*Z03f0&ko4c}*(Ddy(@GSXoI& zvs~BzJdqH+zHhW9)9g+wZHZpz{(4!@wejuz|8!u~fKuiAv4a4>C5EueNe%?yrgA_r z`%AcORsmp*U(O$1ZjLbeWO3rR67hhIvks~1S0O%41(N9QkC+lvCn|#|>ds573ZjYo z1Fbo%HcwD$Ck<2kXMKeX8gOp-GWy_k+K+o9 zwH0xWKuT7GPMjTes8L{voyoZmCJN4V-4=P`5*<7k(des;&fv5GR2bSGHmDKQgQM_S+a zm*P8F&%$DwMi`_QIMOUmSe?s>2@pQi%OMV7o&pkVzD5p1v#-u(gg`=K>ltB0T{5?O zo9iRC@O*9NOLyU&DRL`!=C2E9UUV zRmI$!NVCktQcSHQ*Iihj^t1Z<#YmlKxRw?qi?T5pC?M4FjuPYyXosha`$`M{N&!MM zvgI|HTPL1q_a+%5i2}Kke8n9Q!feE3>|JO|W#-lFtN|$y>~iFb%krGKycU@G zWpzUd2j;o`U45khA}9-iT|GB(__;yv3vQCXm!NB&zY_9X$ktYrUJ(viUwk?AI zvHsZHAsP?YF0BQz%wiwm8_`RJ>kiV%bQZPH$!5IruBFUOZ6{`G?q%}vc_N(Minc0l zo=oHoSDF2#c>)|Gsxf`sX}yaCmys{$SZoqqBpjoOD?=S^ z&*a|bz?aBX%D?YG6%o#4`Y`Rn-iWRF?T<6_kNPPoju zE8lfeKM3LD;W%hlRa7XXRI<}Gac%AU^}*CJN|Ezl`oFxFuQf>}AzA-MP_D<4r<>xW zG9+LOl?D%go4o}#HJ_z66S-quCyC0urz0YY?%{)%X-bMXBz_*XSmcbXbK)6qX zlev+JK|xS~VNi)fkY%_}^B-4>9p)5Uj{l%moNh2=(LJl3-r(%^y1LBF>(LZk5Zn^x z>|SVO>8$Li%(>k5>UEb>z=9r6J*@Qnyyutq=Q%_)=O)*^@1yJQz;(m%)AhCL&2-G$ zedpWg6Uca(^uX~^P+1qRkf*@~!aC^wkJj|gf!{HT+0k0>5qI^sppOgk3*U#2p(qpo z*!;gyq9delmp_BFmqBd-FB3~GJs&$aBMon>ua7@>lD&dl;5DOmhD>_?(2E3Ii|)UK zV0!QM{#G>Ud|EzUq1|3y*X@^it~^ic{_yr=HAs@hcDAhD>j_^!Him0f)q!tL3WCAW zjYGF%n#Q4A)RAlU(rwsR@Fv`jX$0LU{1qY->A^gQZ}i)9`t5+7&(`IK+l67>dXvW5 zZ^v&e&kb|!*cFk~H7H2`xa1nARLt}C`(Pt{9X7w3a#6&HdQk)f{Ct2J@ld~98sx>) z4^j*U8%4lJ;Vd+<=iO!46gv&z1zZ=tc2s*fw`3JasM1U<(@PeMKtHTgEtb#}g!4?O zm|?luQszp^M*6!V{W$FuVZi&17DPAr*DM|-lvxBTlf9n}eU+P%oRJ0V>IixHg0D#k`#^pIx8LeW`bdp860Nck23Zi_Pkuve^RGr-UtSNRddtiJ5W;crzlV(z zLNR^$z;4l<@{n(PZ^X@rWWCzXp~}^4F*hW{kob|8@8+?A-Ee0)=8!;e8cEBveLa!T zy~f+sk2ep$Zq#IZH9?eLFyw&QiO9*#yqyX0nuvI~=Zq-FhH;?%9b+@cdAO?6&&90( z!Dy}GY#cn_0zBgE-6i<4E^J*CQd2U&z-cQ%(N2bwo6XYFN}>~n z@rjBKX%GwffRnRuVKsz+=WuCC%M9dZg&O(@c@5vw=>-RIOPLi$Bi(#I4sEk{@KoT4 zzim*Hl_t;f7=shT=~c$WgpP{WA8xoPFcZ2Wa35AiMvkPQ`C>i$$#qqMO9Lyl2d`nA z>0-Wu1*eXMlN!9e2+Br6@=5P&)lfO@8*k=e`8>j23T?qwk%VGo4yLv8sreU%1N9=D zK9=|heikU~it&>cx;bl2_Z9ghrYw*ThlZnd1^#Pg^(x4AFVoH1kU$BmoISO;FWD$@ zJXC8Ei^I!a7-utHZNZ#EH3q#CNm5?}$x`vTd+_4jfq#iy(zzw|C>_JmihX@v8j8IG zZgSWoHINYJgx9=bEPPIgn|2w|l6|bmH8qVT{2heW=)+m0&(s323v9Nm<1BPjd8uT` zKzQQ^j4wm9Ceo{~Nl9v*->Wf<1e4q+!nh}yH)#N=a&Z-&;2-Pr&G>f$-BzQ~e2#5< z;lfAMo#EOCCz;8FyL9eh8T+`8MBT^h(Nb>JV!>$s{HI^pj1I^gaj_5CpY#2>Z(E+N z^e7itjJrb4S9)`_I=@x#G&Qy<{ZE5GO zw_n-rW9nm|*TpBl_S*ko<<9;Zly8#n4YIfQ#v1TbCh-jTalXABw|t;J49c|kCX={q z#Fj{~^y~^#d-WzRe4Ill9NBjkMg#tWOtvA6{?U0G`S9hlRCCrc9nx<5Q{=JYP8v{dn1g#b(!~PYDV=d& z)z+wg<8ft3;dzu-1PDMhv&tqK$DwJmFsD~B^DSq^F;Y^u@#seoYBqZZmXXZN@a`T- zFdMJWg2MHw(h(B%qi4G5r0qz|Hpen z;q7v*vAFGhMU{0i_53;DUca zM50?zAVWZjq~Jkl_GK1Au!6QMd)%0{Rks*Pi%Ie4d~v%!X|gSdK?oxg$^f}wk7=r< zGD!C*fms+r#ox}cQAwFaU*%8o?n&9Br^j-rmwqExL_OBPHi>&&O{JS*GZK^TZ+OZ)T0w&=gP|+^7NkT@D3NQbUi=93l*@XBhQon>R@u zv&{ld`19Pu4O|B+*#3=+Q^ip-BER@oN_nwTl~}7rg0nU`UlBBRaSxMEc=7OaW7yP^ z#5fRXY~W?#-Y4diGuSrZOn=2-gwv)x`{o~DiT7=<3RkW0w3fxWU zTJq&;m^J}gHqe^^KZ2K42&ZNWg_w^JkT)?LV}^N{)GWxd+cm!zEPaW`vEW!=(476o zJSvxmu`sPQId70vbU;;8(?De9IAYk%?ZJY4@LX{+lX}Z%|6ow5iXNpQjZ|5FWo5(> z`9nLy;%Y2HCeO4WZyI|(i)R={TtNUS$bs`O>BdmaYm3k$p9GI+BGr6m>JdvEB`w@z z6a__eATEW@$NKXSEh_EB+o){$mL?B|5#x{T!!!^Z6S0wO+uVrLJJI$he8Fq7fxvis zL6zN?q|Czs+^0_7Y16BA(mO`4l{-}L%RXbnr?{m$5s4|qr!{#qC4yzwW z1eJ((rbbbjq}9fQGqu%H@7Ce(c6_UzqCEcW){L*7=yxQbn4z{ zhBl7;gfmBY&MscRrP%#RZAKO(tlCkLxSZZOnhAPzc5&KpJg+tR`}D`<9qx_hItb&q zE&G@urX@wKbLRToD15d1>h16&WMw+PZ*c%V$NypHfK)f8xijx$oOE|~fV!*EN^bPk z70kW$JrJ#hv~Oss*RcEj;U}+Kz_#vHx5!87@P>iKQwRJttWmzdLw6T$&1i9KE62)i zg4*jQV&MqHY-5{#j@s+5V^i=m{=&ywpgbBv@mD;~byhv~-l}@-1=XNgZts3&1NHl+ zo`l?tZ?&a~y<%WoF8J-y{uz#{YCB&KL$lv@iQ6@Rd^lW*+<0)|NLcsDR-*LpNK|`` zN4M)8qRp>Ev%_p6C-FLE+nYB}*RP(9^ruq{$wQ();~J;ar5&J*iD2h|8vqL`9|$uu zV=scsiZ-<=mM~RD2vfw0(ePnn`084&swib23Ad#~FLTF*_fvb$Dxc>M3U(whQ&>6h z=63V=q%d26TQr^#N)rb6m7&EpNw!%49@%!>W*%OxMt*4MVL^psrINT-{QNCiR>KbM#Lq=|i}=)do@ckeugG%dh-`k2q9!Ho|4B^PUDqOJLDJccDYUy_m>AIZeJ?FV z$vI~jX+GtNZ{sM4i4EppfuT>?QwxtQlq%lUr&ur_H2X$XxK1Yd%_s_?21;51{A_;l zWOA`!p`=gCm1bR8V=~<;Epb7&X;%F^BR1r;m<( zhGja$CBB^!j)8hbnm85)l#v9N9Y@owRAiwNYcWSryWe46nMBS-)xaYHj%pp4rV8W_ z4f8@c#0YkJoZ$+L6U-tsQmnBw7`*ZvO~@s#x_NdT(*bXJ9=Gxwx-3a`@qV>o84Fzy zKg;9FZbYVzA%6LgJLeSN+}Zw{Ime z-ne*t=v_+?(G|2LR0L+QLF`%1VwxTCXGl>@99ZTCk}u(SaOE}ezbu^EZQ03h)Pzyw zg?^+`rbyojQ?fuoGd&jM4Oc3PHyg*>IU-cfmtQPO5h#-XoG(`~@S|?>JbM_gT65QA zzbq-%Oo$OOa9=|}VzcM0ihrS#Nm7n1B+3$mCLqyt48&%ty3UbR?FIK$5@_~F!f-s5 zSghRN#R~6FM+=RAGR*?1+mt+vo0lse+gJE=oAmOB!gY|qxF(AZ@Hp;Kt(T3GP}UzD ztiC)Qz-@QoqjTYiXY)`-2Rv>-=zI75BozcYH}Z(?k0-chzn|YQ?LJmEj@iTsY3l4c zEbbnl-}g3f8`}N|J(7q7SXXnC79OPq{VRg*>gL&f&XZPeqpHQf^RjP)hCbdOQEzIB zHS4!sXM#rE5ut;O_43?(QC32X}XOcV}>S4elBO zgb>I-`Sz*$_ddJM#aqJ##nrrv?$yugehUqJz1q1oEh_mu_LuzcZvqU*>|2tzvpL5^ zYunRRDGm?&w_e+h{00^i(^2r%<=?kI?$PbvPu%;n1>eXC^*ch_HBB>jgp2Gcmfv?= zGqjoww!WarRn$(NlDwh)xf{Zm)_bkiWh`#b{|ndS{p$nj+a7MV#EM2idg{3wie-VfKTto{Rar8!WeA*4U@a(0Ot~b( zW|3KJ*rT=qWeQBfZC_vy5`-Y@*$U%PkJBil-UFn*G-O!61k>cb)w79CHiw~dFK3jX zy!v6#Eq%K-x5#TuFs`J?ojv#Vr^h8|uF7=BXOSX!C?rIItqe#bO-OWn``P(be}rLu z(Nxq(Ha0X_K(iWa!7=$^I)Zu=$*FBCj1Hn{d|I*8zoVV%ro!0t74#rCT$9O5xpoZ0 zS;apgsv|T|i5YqNlIG!6jmmCq-VduT{d&J=;p^|kISr?s&o2#^1VjLmKk;|+Y-?Q& zJn-_|jc7sDF|-~&zL9q@>qL}h>8YKo7Q`Zk09kO$QYetSSnpn`sqZiRcO7uKQN;8;GFiX$*K8kYPirmKTs$OePzbQ2Yc#1D_(lu*aM&rKcUDQ7JF#HS zX;d^2yTC^d&c#1#v0^!HGMP0M&X$%?D&%Fu8Gga9m{1zb!47c=K|JQ6L>zHe>&L}% zV0+^1WY$fIB4==*jhLimHm;eEtvSM5#!DGbr4qr!jfKu4XgY(ZIVk4#Zqv#NxyAt^ zpPtX>S4;W^VJHU#M4}>#l`$v&R*9Nohh38(aBi%Ij=226ax5pQEJFyWeDi-|*So4Y zr~~oi;@Gr)rAenMUMmwX)j{SYjqlS0tmr)_Zga4G7}Rb1ft(y(nwKy{Z|x^mCeR-U^ex$)s&e-SBrubVKNjH zS0mmTPl8})jvo~UBw8jPZe zDv>-I!qG`C;t2b`kyz{Uv{iLLnhdDrMoU8ps!X2;@XDTr)Ut|XMSRq3*Jb_ZcN->D z8%{Hx9i23?#kuwg@ur282+Y*`SL6^DGIX9`0iMu$cZq|hA7IyItEHGd>8JyTttO+u zQ2xFb+$fJ2CEq{Jc2su8V=K10#NVL>MMhRZRafj)Z?F&^hCFpzmKKQ*h;4w;z+t{-Gr z6YxlTmG|nf9vF8Tv8LPh_Iq^ek8^*Y+vkigw)5>XA`JJw3;II4y3N%-+pO@}hWSws}m5FcSN+Uve5BXneEvD4#S zM52o@hX#E_hyL|Ya9H}o%T4zf&)JGuyw#nneFP-LJ3E$OT;D2wR$v=C;|7Rrqj^89lNuFdsW?RnzX6zi0v9Y$QUV6%LwBS zgz;GLpDwzZw8A&Ys2{7|%065FW=U1;>|H{b4lC>Ioe@{8zP^+Z1Dgwtr0(6(G#_^A z;dW@`Dp04uaqzek;^pnj`Q+i#P%S`G3c%Qw!2rm)5O-fKTA{?bVH;QS5Gc;D?+e8s z?VP9)Ubz=8ei3dHfj4&gVnOF}xA>&+=a`N6e)R@o2%dlKX={e7zaa#l@3Q7x(Sucs zTbI9!5MDx#t~8>U^I(LM4)*kU#>wr~>ihqCfB)zgc>5v1$qhPC&YZ7(eGm(~r*%6_ zkefTG{rp!*Z||@DU57v1I}g;5JVS2o&bqdRsC^dt7VQs~tMLJ;y1a|=5wfWsEh|yY zG>qd)va2c&XIY>{?rKGPu>!TEWE$!WkYA4$1F>}CY)XDCWBA(+M>4JNNGdFEG!F%h>f)!xMd+p49Tp{jfxPZP04B1eMu=_viwxLLV2R1 zUm&bl2F-DSmohP=RIVsitUSKc5~CjF7l*Q;%A#*|biXp0B0V079L`7vFCC!UnSF|w zDB%Tp*T90!hqbqK&;-po4-lD(Zg?gkDfCeE{vZN6`uz7@{Yr-?6y~PR6?)6z-Pdn~ zal7&5H|6`;!65IpXgcKg1&?vD4Mi&af2DV{m{JqgWE!^dt*E`t;SRUnT`kVodZ*|c7J>Z(I3nm2AQ20Y zg>S77ex2y&5xIU)hd6nVX9=Q3qbt8xs68=l&}6Fo8K5`I>9k0vd-iv_ov5ALqUZ1e zT#z6W43O9DU}KQhgF@8~SpT{&`PTc#h7QFwUic()eS#Q(qC0i z4R#u!)hVtWJRSr6o|Vu7Zmk4&x#vSPJzDlVH+1y!D~CJZSqbgj_QV3e=4Dxr&0O!S ze@}=AnPH^oJTWbsR9LfdHg3W1wteu%o#Q$Afkzxiq)treKsRP*jjOoHtZ6qW*NEdT zfl($i?d4GkHAQa}5Q5}-ywSm|7bD2U9b{R4dG}Ok+|yQ#mQJ0;d;v7J^cM*5vO_(M zsA4_=7E$VlH~K;GA%66v>*3jn7UU6#(k#2U|1HAS*ISImq{1EOXG8dXb0cQW)9vuW ziP9k3@i26ggb;tocKP}C_IxNt6{VyduxO2Q{ym&H0t2V=lS5{kn#rJB01qt(tC}q( zG)wN3aH&pY7dy9qy0dHtrZ}d~v<`N)cn1B%a0MHbYn~iH6s_&E1Fz;=~-> z{_B3-g9r|TB2@|QE7C}7AEDu2qpEx$fC@|w-NGUl@?hV%!i30*+PO2PMDiGny zT7z)1t>Vf|w@2v@#Lk+f8^umDY){#DhUvvUQ?eV{S+TmOrk6e0JW@(beJhE1Wl&la zO+%jW7twio7JXBzy-h{iRb+c%qfI*CH=87p9gDuA69>Ob;fE6VnnVk?6 zbO>sTQ_`iJTOFWZ!CQrIRr=R+*3$oR^Sa@ z@Gx^XAo}mmi}0Dvw@~wP-@m87=BvZ975#q9ptr$poZMz(=bsP%{q+eK&7UOjt$%ma z@b4Xc7N32_--UXO`npMf&a3>xv2+RJ+d!PZrjOH(B0ctf{fIZzcCqVPZ%-IDXk^c? z@=qSuvH`>Wa#|86lD|Oje~R-Dd-DT-+P+0ha60=7B>X|evO((-*l>5e6~DZ%IH;ZG z*d|+kCt(T&^!EF81u2<+Jcn3n=MeQuPThO74c$!dBHb0g+J-ZTYGNAh_S|>)1lp%I zM2dP^;NQF6-Rejd>;9}UUW?LkoHPO7AUROwa5JLA-;Tf`QC6#6?*Fs%vxt0hut#=u zx2+F_GL160h+A}$&9f~->}txp@arBRo4f)yugKmkG<1Z&qFjOheMV{#F2%0u}~olqF{P! z)^Nm{P&>2o5mCIOc)iRIDEO5XY)bhkn1nZsV}rYh$S0T19mVkJ5>DJD=ZXzfo)thU1Pq6Kybcg@G1*;ySK(=ILjT!6KO$x{BPwil% zt(_ifg&-ZGVgZ&Yx5-?3n6Vgq@Tn^8YpGx+)kX4-UAc7O;`Jhd$++?sIEz@t=%=@H zwSVL$oEzYizr;t2LFbxIz#m~?oXxQ*6J(TTW^~uF=Eh>UvbPZFNRZ67|6GxB#^+B+N5Uy%RqJR z+b8_QbZac6&0anP~a;x|HgQoiu#FJyv&%f$w ze=d6TQoGkU0rX(n{PklJxlV_64BbSme40`WXo}uSQ>42SN%0;or@K=~+CkfmEZpjl zFW{$0e0uRUxL*JW_%eG@FKaaPG3>&(K6ZqtJs{q`1v+gr+J?!QUT;ZNye-*_T*u&u z*auURI-PlGjPBZT{B}`gnZ?B6T2iWu6OGZHJd~0DYxL@d{NYqWigenIbZ6F2kPgpDMSH*PhTXA4bMR5= zg$Ylv0WXpc5(qIzu;|&30SUwe;dahsJXcgHmEsl3QBEc8pQyuWoz2@29Ipw6Zu2us zo4|M1Z%`e~N^uV%tr;F=hu5N}Ufzu;il`ci2J#sUt;4uj(P7bggmz5g&DwE3#j#@| z%_HGSj3B*JZcjpkqE} zH=QsYN_}_9fmC6h-nbxnDjs@7IUY8p>SUv`@i-T4`dn2~UQ)=y9w)a`y!mANZ<2X( z&l}7-M#6?^sL8}Uo9zKk9>`tU92$I33J70Y|88S;=qVR=}DS&gytFPSPA z=TGLlTbLN?fAml{Gu|{8D(Ap?DUwpqAu1Lite6-K%fq~?pBc^I$7!a-Rp{v~abDNH z9@uCTq3>97Dk%H_r^8>N!4kX_YT``T{+m`Fr&y0JME9#uxwxYLVzC&5aIG>(q!d0( z2Vq&+n0aKZK!`R}mi`+R6(G0)y+y|@gQ=x}S2V0K*saBUT)9k2OtJ}Ma@lSQ9mYnOm~6_$Er zM&d}zD4^2~@BHCLc*``>xzEbSq7%gC520*khVP#~5!y2hxVYn{O~&Glrzw3GyOSbc zNXiV(G$EI;M$xp+5I4l0;b*yUC(C1L7mV37RS@HL1dGI!9hpp*aAw4toNYu@*3^t*PBg}*hE7>f27xojc$!MF?jPLH|-{%~p=G~aI%OGQ%n z)J^{X&=(-Y3Ez%Udi|qm6m+%*&w*Qz*ZFDJy0lHAf*-o_B=yTHs~33)eEtr|7sJ|C zi*$DNE>CltZ2l(d`F6yTK={}XK|l*l!ODBgXYumA;9Zd2L9kz>jO8Ew)h}P+-xQ}r zFd;j<3kyfRK(*bH$T&6d4knz&0d&$;@X;r9=c-%7w7jgEbS_2@KXP*|Tn$7$`9Zvs zfepr!p>uFD6fxf@CKNKxeq|!29!d3{*kFwy{YvL-ToOK+1zu?c^t3vD_|D4` zk=(1kuADlIP<`#**}<+8PeMd6Mu$|pu6n(v&AF2#mSS77Zg4BsDf1y-20r@>qV|xL zo7to^C(5soLsQ5e3*etSCBcA>cBHlN$%bLmk~Z9ZFVl~CfZ9Q`I9zZCjg2ciUlMc+ zz|6{`B$J$|_BZyjwb&g^hHW8sbDrK!KxaD`q+Je^n^{>QpL|#Fdl=@tVy&zoG0$?u z0LysMUf8w_ak!IxR}HL`mzei?UoIQzRaTzs`^m^YEx`#9Pfo#{WERrAgOqUM7i9Tx zGh~ZWlg->9gmioc*stL(?iozYds@8Yi$A#WJ?rK|_&KyY8rS2Dd1K~MqhV{&Ujf@( zYx==!xV(126bu#= zIIEItl9jk3@bTJosszjAep0YzrQ+96tAg2{Y>>l<$GDJe*Pb|RY&Eyx>Rh+bFr|@I zd`ohk;=^SFnJZs6Tu5u`V5{=BH11a{PR%D3hEm00H0<*wQ!kx|T87z~OmJzX&Gfnh z>C;Mzm&gj6#4D6Wi-T8HOv_?7iK8Kok2~#3cAk#vbbd*<1!M=$dQOkh4hZVMdZ3yADo5A4(lH0;?+829A8lK@YKP!5 zs-6k%{4l8DCR6v?D}R`-@M+Xuq?U;jC3zIrZEFQ@cJ02yI@y8ebd?lBvzc-tguEHW zp-R-NgYe6V;Ru?XyM`en8*i?K{ec$)>O8>HUX%c5U2KEchtduK^)O3GI%oF^=u;jd zu8ky6jKAA=DTG;1Zs!hCB*|=-`=3@{4RR{MQxEQ5?z&jJn=5ggo^6MqV~?KJayYmw z$UW$?aLZ{wk2Y9AJ{FN@w|z8)xWm7%5YY&%yBaXu*$(ON=!p~*V2cv$<%;R|a>4EQ zw88Gq+OcAfz8`9HK<1UJSdv*tF%?SVL%@yt&=ty+SIfdliOML=SM`X=uB`<-3hkBh ziLbqv#PNz~h$Js>43w$S(^Q?GEUE9okVf!BnNd-!T6r?6K=U*2-H;u*g1fg;v3w9W z5cxz5m)S5fwv0%C%IoQ>D>d>*>q7hpS_UtN zCe+W}JHMV>;2^!^=t_``9-spJ)I{Hsg~v@T+$s@Sn8VHvP-$o;LsCRm-?Ne=Rd`M{ zwN%IDnfa+g8!hyzMQj=sS*4W+#I-9^B4y!bf?@G%`gjRGlDT| zKa^HF)1-(Md<_{Tl`AO5gYu?B5VD)VIHTexwqqPfjiTj~cm2EQ#1hxW+t5B69A@$+ z!^e(G$6z?c_Jc8H-mele_VP!40DPH(e;tXL5j(u>6cQ9Gd?p-{_j=TB*ynxuAbXVee+Kx5aQkls|8ss% z6uS9$7`_#!T~VUK9}H=jASF!y0AgX=#nS`?Xz#_a*_GzFHJeR0N~Q2s^25)38uXWH z;qYUC&f;_mwn0%_gXN>C1P>hK?0|VhrsgIUSJdQghQ`~r857Qz<&G2aFB5t@jxs=26Mo1OIG&~i9vi*AZ>k0!T=jnyyUPAr?K9@%-<9&G8%v2X2dfo~l@0EmnB-hk zg3t31yLP|P?UZ_1YNFxF(w|Ln4C3L(d1Mo#WYTKEusF#kGao68EiIR?dAJ?$!u`Hm zG~qYMavYdyeq6rACA4lR{7xkC9KBTPq!AlKR2>D+`SV z#jyCbXDP)1Rivbg7Be*IAWcF{VN|qG9|iHX1!dM!i5C3~SSo;r`t>TR@T{!SK{ovd zOT8M!4=r;fb6mihUyqc14^RhmYK?4=V?Xlz{J8&&`m`VM`g->R8MLN{Wnf@6m<&~d z1u@bpzvk5NZfZoj_L_68ULDyB?&yQJ0?PUclV7a4Hgw}&s`T8fAAT=-PTwcBJr zDyAscY8GfQhJIxUKCz<(#H!#O3^5D6SwPO@f@ZApP?HtNHyg9Pj9Jv;T>MJmiFfU*mjq@wZa!Vk)!L3%h{_ViPh09=_R7TuJ% zG-L@bFqtM{l$|m3#gaATJT^@NxL7Om+vKv>@;S5>Mep)?l5%0RXs>W2nK!+Q3Rxmu ztQdo=RuP6vOiN1!x>-!oe$wc02Ku1sIAxN8&+fOeJcWTkGb+ud+Y)>8p3%5h@cXNpCQeUOmu@R~xw z4ph5z>!TKVlSZRV&JGe}5M}!%R}k_4=*UjK5>^DP2wnrsdDH@pGL>oD_E5-lJePY0 z=+iM~eBLkUcOCczPhE%80sReyEZUmmbc?_5E-ph=*?0-Pg&# z2$qwI&bf)30Pd2ZV_i2=vz_y>GgGFgvl)T29N$_!X6@{u-J|sY=E+-^&0u38xO|L` z<$C!*Dh8{ZC;HCJ5di|;?7Kz4jp-1188;H8OI|dVZ^mh+zc#9+f!<~q_3$c&UUp6R zcy%L^i5T@VD~IW({JD11=0dj9D^>Q=C?Fm{5GnA}TNu#Itkl?2UBIB|wa?du@RksN z_?9Gp!1DBluH2AcW9hVjU}?72`_YaIHeOM{*=;loemxIA>UuBle?r4%&u382ur9(xr6-_Bc!xT`}-M9`!gVDiy zx52?*P>{lP8IKS?R&khcQQlWT&Ka&34#$qglGhR=Z(wsR9}T6b&S9Z8ybG}Dgq25B zIR%4%gT)x)Z=Y75fPTK3XpAT+2Nbg3lq|>ERM=3LPmLS5!v_f&(&${Q?DW>Pt%t?> za3gdO8)}EbE6I>NR!&X7@ZumHZx}@(wXJY5Yo`S9KX!Jo>Iu7+eAt>|V@XM#ytSR5 zf;c}S1O!oX&a6#h6U>PUY{>^_cyQIs#-ld$K){bk=A$VwDuamc(QWv=z*LZ&k{>Y^_Y|aPZo$a3yEj>EJN3lR;HB{m?Ivl(AvmTHhhZ zYHsu$mX0dN#?7pxK#4F$M4En%SVtIddZoyn!&oXgF)@4ET|G)foG-;VD01@;-cM)c z?Nl`q0*O;B<*Jew@>r2pfw1mu-0DN+S}d~oS#xk0dCboYd=fB%w6N0$6DtF26OsLt z(ZNco1D5Z2CC2X!OWYog8Y=8=>}x2_iR{mqm3`=0#m+f=)ypLX8(&027-7VJc>UBe zJ1YRTS<=p1px3>(LdM_33jayMKz{Q3(KK!?YIXK|6mR}p!ageNHE>mrA=BV? z1j6yy{?izMtr-By7R~j!N3gI=Ix>jfE9{%|1dLU4rLjDG7&Qk5kv8I0McbiJZ%8ub ztIZkP{-O{*<5Q}UqBE638~5Fj5keuJ<`|ume;4W+*hT}s6*9Tu)Qvf{xPa&YhJ##F zY-+2Qza*W(#;V-2+Tm|6j=VGXgVtPoFNZF~a^4@#k(8Wg3}6>=TvSe;vpj$vQSDd| z>}N&@rpaY*FYx7bJ(F~#wXsQO`xZh?sd)Od^<&|x>b%j>$-!7^C^mEaGYC}j zRW#}ZnZw7GZ4Vt>AVMdu;6j*H%d>to5s8K*tfquMqMVq}glDM3{oD1CCT;X#-6B>> zr*2}f5cl(}M@oVNk{4l}DTBgxj>8DnPMDWPN!HE8Nnd|P~6AVBrkgP^~xMSi2FLYZviJ!Sw|X=km+o^{i%|3ERrQ1CIl6WzgW~W zJQ!S!ogQxM>|OBbv>Va6fbsKIG<^pBxR7jk^fMhl3+|6I_>xfAL0ZfKiO8odXj4Dr zTU`JV78F!rbd*pUGsdwsW*&d@M8!94JkaX28aBlFmiufhDbCoo|J z%Eg9@6~vmwqX4;h>ONn8HeyJX@pMd4k=kA2Mv8tijzp8Jqd@flJoA}j=0rm;14?Ps zDGO3hEPI8cX+e>j!)@@r ziTJM=?j`wh--yf%fwKow4fzBy;VweUmKJGL4~BKlwvMF74#93@6CCC8%}6??jVHSG zZbRAS>j0hclew3c$hGKl))9?LsSp8Hv`h#hsy3?_DTKCrSqRsQ+c2R!k!AIU1&gA* zaIH*yn(%e%tIq(wyg>d7$$a0@V&u3Vcu)kXADl1H@)&j7SlceHvZQS5i=lh|Ic4 zA=DC(RaXs!jyIQsTX1l9W2YAPle3I`Y&+4W3=Ta##Yl5=EqhA(#C59K+OhQyJLVY; zWd|2p{7p}g+Hk~ritm+gFB z<H*E!zhhI4Y16YfJGRJF?IUA=(=>m}M(q z(gpc_Y!*QbQpFBAt|FAfEm>L!0wyE7=D9&ePC}nkudzo4@(5wNza%Ap^A3iWo7daa z&yUFE@PMf9Q1Di0F-U>X-bIxg)-7wQ)wd_ir%He$n2#W#+x zm@976h0YLOk38Q^pEhn79d!N@gxS;63p&wa>T|6i`OeE1dP@u#Y4`9v937>C*!eT3 ze^pI!I)J(%<>eXXrEul1>%Yg3SM#ZLh&tHyg6YU(Yo7M7;|tToZH3U<$r8H@Bwp=> zO%eo;NCTK9B7}$m@c5+sq@PxgtU6MoXv#a2=%mBx>G5nX$~m}+Uy7iyFRf-IBgn$c z&whRnR{NbJZ+eU^Is7#$Bw^B=LWky&dwULTFdHZ;(>0DcCW?bUIRWJLuVaxpeKn+eDEI%1(op{n`cB7x9c z@~hnN&t$uCpxA$v7wP}X3$2%$r2x;7c7cB4`?l3S#2ZSq?*G|}Kxmji3rJpp&F;&x zMlz<~L6}l^ZpODn>t{$(-#Mnm`#H#I>)^`ZP6E<`REKU7m2AGY-!VgP0)Uv-1pC*S zx?1E_4*R6z?28=3(X(rpI|RUs_bH({Cl%1eo{{}4{#Y-3D{eI=lmil9C0WUsxDXam z>~e-uiA0zP0e-BBJ7#?I$ft?h`kCRz)lc-Pna*Y1@Rq)OT$1am_QH;Bcxg@&3qdg-!79;AW2V`(Z%YK*nn)zHw}DGN3%?de0)<}8%nM%&DfVAw^(KT@-B!a z(u=MbySgu@$eo&x*3O5p@zr@<`B*G49I@LLiTQkbAhFH{n=V;7jaFPTEv@^-zIqlM zwxEN2T5kGf?i<$5@p3p%PoFRRcR}{>fBnY~Czw1#yJHP2$!vYMUVtRQ59h{yWTx4+ z0~1GX|GI(QDm_A5xBcH66 z9vDuv{5j^+C@t=-Or(cJu7yUCSXwNZHlGc49iY0!=%{#3nK0o<_^>5syl!_F_L7S7gd zj#uPEy<=fl-Ee57iI7(9%6y|B$$8#LryFBW37qb>$D5Ux3?st_@!->{iKL8YR$Cyq zup~B=z>^{(gtKs?Zk{=LEvs;y`)F4ohshx5$#4_OU|cQNNh<=+Ws$4DU__5QDg3^w zXgZYAXH1h}W4>B0PPzEHG?gfPY%U#^AQnpI7bExy=qO&0PcroOR#@T#Lb#tF86l(M zg8!)7Mi%)cDz1bzxl$gI*pC<_ixsY;!F}W*O|!M@ka`O+Zp5!=^4yz&gp^;6Nd8gN z->Lm#`uaPToXPU2%$69`UU`3cK?^|%(3Lf#%V2puND_p5w4*DeQgmLC<|xf=Re zVotDPTrH_`ZH%R(_Ypn@w>t>0pU6j=PkaR03y&Y_CSadQ+HwHEa- zA`d!i{VB4L1af(yaAwrtNFy=3U6C$C@j9-!WaHQp(DSqCUfB%%mr#eDX!I^g5|WPu zkGzSK@uWmiL%|h!=w51{u;+8k8jpXd6eWuO9*-=4IM8y_Ua456DyHSY=Lh0+&A90N zwLxCgt68Heq|5~~QM&wZqeFf7@44VkqxG}>c0YF$kk~fc9jGDjvV-~k_n(tVO$ayg zHn!!v;KNttMdylQ7^U>;237bp0hY_Zj3jitlR4T3!x`c{lP*GNe>})=wroef!kcuU z$A~Cn*Ai6cAr9qRb@~?b9-{lYit#}GT@Y}7>EK&3r6$V$7 z7%}xO%4a$ z^tBFod2eE$me7H5VD#NZ@1}m>UgL8azXW|sMq~AMcrT8(mrDcS^Vz1GFhIeRRi!Qz zuE~(Gbr0EJP-L!wK7yA3QHerY$S@A0Y+Y*kEESJtd_<`3V{e>W%SKBDXO&Wsf^exq z(5F;%j|((khpO>d2)B_#P&0VEuk&YbgfK79ZbVjg$IGQ63CjbHK!5#YVh?)pF$z`> zziPO&QQ`)3LaOcb1mN5Ngj*ZCaB}VW$Rx+He_@5ihzglelMz_{rejGfAFaGG;n55^ zrlnJGPc;dm#vC8n<$;@Z!#P;7XZUr~=iv7&gl3Rcc`S`*kWmLF*wQ(7CC|$?zCW?) zu)RE2Saj~hk2%)1D!f0Nq}X9&m8GWQFDWEfjXH7TCQ|2H)$-H;5SEa$J?awU$65XT z03QPR@?jM%l$_u*Wyi|2SX9g>QGrbo&Bf=g83FYwHj)eu3K|dtMkVLc>A)-->-4uw zL-lf`zq*A3|M)?vylOt8s!1UV+CF2>>xP&5rWLAIgO-K3V;dq{o?Ff>6IlM@u-Vn! z%U|=e!TM^PX)OeTbz9^sdOs>FoTZ4MCHr=gi#N=OwaJ=7lSo}8gdLXXM}E;t#x;8H zB!g+qhn2v9DMUbkB5Fd2ok+?|Auftb{H;jsYt_;U&1$s~1{IlM>Zf>CjPSEBT49az zVqqo&*5C!I&kDfLCXT5@&Fs7St7_QQBT_M_A&|7?heoCTLfn z9P+-x-CaCEVz=tDzZt;y9@KHkeen<(6_flp>(=emH5Jv&?*I{q`@?$=IO+|pr1+f` z{1QV3Dt!Pl4?Canhh}UfFF&?FK#cYK#+g69T>hG3(4wCSP)+@4&dC5kWqSHxu5l44 zBdN%P`+3&n;x93X;S}`nL+F8BOt4{yq|~dU%csm2Oyo;iZL{kiy7DmNk+UpW-UA|w zym4<&;pqII=YKDS!jUQFd=Pi~pfdbPNr=apDxw$^z-!a`>F4_L*_?_gpJ6<4*6?xV zM8#5h0V4xUnwDXjB4p>re-O0)n&dK0R|R*{zWvZ7S^WLyYE@z(B2c@b(a3ZN47SXC z>Za>^e3=>fUmEebuubHyhT0iI%R73Jy7eTP#YfNeBv=#D;Tn4;+Zc@3`vyaf5e|6$67@i#Li~y6T4p>@2Vtc!LOOlev&6Q7VzKB~oe^Vs88>!)~`3qGQe4O~0&>C^ZNRpJuq(JGNCrABbSQEIKfm94`m% zE#Odrr%8RP^RkD26^x&GqZjYRG+ENjU@wYrGmS(D6UU3Uv&M9D-XF)J9Lm9r__g<81O|>w z6+6PG7_%%?YCY-DSSq&AwlEUmr=*_HhXYPWae4xor)^ z7D{gKy{>CJ`n2d~$0R$Z6$Dv5paS@AmQsS4KprQ)e8K?9Ut8FD`(gdT17Z>T5=t*a zioGvai)N4V%7YEL+6k7Dc&U<6SP_iFc5xW&Fl}T=?#~&_ObS>yQIXT8QH1$q#?-=(G%AQ(#_0^P;;jn7b<=8D*^L$Q z@@3yqQ9SW)$jnB=#&wqYJ zc0@gdX>Y#DeBJ-g3Y}jrBG<(@5Te5u-6o&8CV_t!A=iCmhJP;=-q(y-#fqpW(b_Y? z=AY}iuMm6{gur2O+!~FObfLgHLs#e#Bi9WwbgrRZ_>X1`cE4*j_@JF%po)MtofJht zCots8TJ%(^D%e?euQ@g!lxJ<1n8KkX=i~;cns4Aeug}|a4pBx)Mu^AI))bRmBRyc9{&70Y{qs&>10&okflFgK zZH#WIJkQT{4GYN-h2Rsnujgg(Objdg-LE{I1bdaJpFPsejeV_QJwbd@UEUTYpX%*x zk2GvQcRe`gPGVF2%Y1<#)WS;s)JJpbqsIuc8zVIL)St*UtBdV&-Cl`-G zgT`h7Hk!>))VjGD&IHQ&X|W1jmb#5Fr`<8mmF>`)aI#K8omg$W-dkwPr$1OOnrXfu zJxXeb;CX1UlF^3zP7QLlq>4TkIf744%tJ>K_hJrrnZa`}DU@cC$d-;RE+Mg4cm~e` z{VWYRAtX&0r$@^HciO016Q*T1k~JgOyp4+UXE9S1}r(+43enW5-Tn_ z4s++EC!EvcPU3>$CYaPHinYkYNP&RTN)-jFmGW{ja&BByYgRr)QXK|5Z4ucD2K!9d zJZ-gfJS2TNiXRjcPDfB-1ttp}Mue^LP4Q_-cz)X6p506 zEoTq-zq<^q{9h2H<9@=i5)w50ABh&O%r;0ut}#*R^$z%-I14EGUvw@We|PJHk*7T4)9R^lU@FR#In{L6BiEwJwq z>!2tDw*4Pgy1Tn`=>?XQZs`uCyE{dsW9ja0kZz=x?(SGxK*9nQf0y??@B4gjW_EC# zVf+KH^E%@=KF4u%i4xaXq6%xGPv)VN7dy(8LISCAQ}0|P2NaKbQz<`n8M9f~vhG^Op)s)TKjRzzzJ7cMD%s}rY%+Bi z2VS1=Q)}wR#Fc|!4R-FN6$`%#Y+>7N_xi=y%@5wDTn>-lWri#s4TtrJgEDQ4)d~}E@-S30 zNS3lte;H{Dj?AS|;$0IU{*T6$ZJKW!|Y! ziLbD-t{OC^3|FXy>99xzA|$Rol80)* z%@)&E8Sq@xWOMK>cd^-Bx}lgTipHR-?}T1~M8DH*AQh!1&3We2GknzQ_Rsgl_l zxn^WZGoxhbRdK+nwR9CUZr@D)%f9h#QNuW=*mgi)+xq#$PLNRxl=O@e03{6v3$QL5 zu5InE{9rd0?iUgf+dop5+FvFK)CF;i2*w`kEqhh1QsDGx?& zV8MUO`Armm#=`Qa929VD_@Uam2 z=Hkvw@o;e@dviz?NaL{ndDFJA(V_F!xMr5?LMGv~s#NGTT8JFlF@0Dv(l1%*qO#vY zvag@kvyoby`7IgkCv(Q?XPGf^r&1|Fu-8in_)#?W--ZdxKnHjD`qLb+KBKjuj@kLa)%jhJ zImvYlnp5Q9PyJ8;A!QP8~sky4ne7y#SC@SwK^v=I7GcpjZ}iZX*PY<2H** zs87~N%%>eNxV*xhv)$#y$dIkWV#;N7bB>`9{oY;Yt%D@QdCtHg-7E@EW031 zfBgjs?|vmRsFe%%hdfrQE~)h-p)>eov+-5q8}x&{0{Ro~)IUZwkj=}XlOc|kh02`< zCD?Od=C>xKjJ6cF*<=g15fxBlNIUmcqn9OEn0i|95wO@rw}z~eaVd?Y zaOKoRFJ(UoUq;Sst8>dJ7T_KBf7eX&(amibddF`{%{4SBgv#=ep6z+D=$u-to|ho| zO~gO2oQPCZ0y3z~OL!q%kf@+;eN)}yA-r0lP3f!4n>80qRc`0I_(*L-4p~c&W(`zr zE`pt#H&9n?<>%v3@hHh=>CH`6()VU|)+{rPnKtK4%?~>_Bd2i__OvW%mSmL9&;d)f zV+;BT66SkQ88ncu^SMXVI8iur`?f#4|L0mEPG3tuxdBRCbFVm|7mb7l%3ytyVNuqU z3mGS``TJ7?ty;X)YE87^59W9<8FGB2Xb8Hi5q}F3pSaMYK_T9Y7k|Gh%Sr2+hJo<8 zbF(&wAuH$BY}#m=b@*jqLF5c$^xS&K^jdlG){5LI&#EQUsZ}cjXo1}~=lS$y0cEy| zZqte^X;Rq`G+fz)B3jFE1p1`d4;&m+n@iS)NX@*XQ6;+Ua#_&R0d<@z@XN|BO>1WP z`}ScF;wBl@7cN*@S%$xjB4wv~kV|)Xngh>_pu6WCY(^zDw$2I^ZioMU{lt8yMBb?f zEu+>yot;oq)~gjJ6{5vbM#qdIqI_7s&PK#eSo3l6ylG-V*?ys-*fb-?(HIp9=5ajl zm7O^%LB9KGSDGy4TG8>?{CZ<*M^e~D<~DOS=KBxD1GovEbO;ppYjhlJKAQ?tI_8!3 z`Q~|pNlE#$fHs$;{g`8d(GxTB;am)nKJ=VoFv)8&FwozNF6f ziAOcVpHrsDVwHjku9wp*=+(>U3+8xq68v}$hhY;+XwjnLXugiU;Zx1b3WLh@eq@2pS%Q-{y*Vn8F zG4yuaSI?WGEoajc`M^{W4!u=Llt9yuZABdlpVOtW?FbnFxS};Yco=kv)HW#Te8M0G6n$ zfTv>jbok+67P4)YU{^`mq-i~Llk1#s?*|Y=%z!=i5oRm4$Z?6&(61+aYzd(+(t`Cgukzn9w;f| z7w+%P2?T!X2}&jm*U*w6MVY?4ynjIwU$5)NX)}xD%4~8X zCGt18cu0BOjLhSFQFUEx8Gtuk3={qt>*6y=Qk7-)4{XEYadR44wi5Jz*BBc*z^4mu2`7e_+QAN4#8}KkiBzo?6o2puYh)_> z*mWcS5uu>j2yIzV>zO)^Sd_{{klm!1>3Y4Pl^^D{OLJAzWC5#zlQ`-;f}#qD#Z>s4 zT4$mPD$0Lhi1k$GBU6-xT@fF(ai)FBYC1-|Ae&h(N%7tvPDQq7nuvi)2|#6y)yTv- z*z@D8z0WEwb}Ggr!}tM+3i1BlcC6obpTl;SS#hDA$x5mjqD##(WtdcACaeB=@jc2F z1=d-$Ln9fl@Gy zkqo(s`ZX_LKiPn4x>F$SWftE9Cda6DB`InY(3!N_qOPtA*f2<_Vs*UZSCwk2qIA2u z9OT?K?$)E3&V5=@LT;=-!h-HT_c*(HY@~847Fgdhwn)gaY^qm1xD9&r5O<}aeKQVm(=TAGHg-4O9vbu>xl*1%;NDzg?HM# zqR#xeU%)p2auEYcHlrbr3;kpMj>47$l+89>zMq8!fa?Coz!vPO5s-~t>n89SXcLat znLq6_0N?lir&+i?03hK%f9fdI{z0G(X#r-7U|e9w@Z)AwboewpdPqV03jgkqm2#rZ zq$^GHLA^eJb%F+EF9sadJK3h^zRSPEvP^cvw2Vf}$J-w?(&KTAJeJPn+(d8$ArObIk8@$@exeEmq6xgy5`?R(M$E zeVaQLTPl=BWc3(0{X|xFvFQCI>=?|8Y^kh6%y9~NuV0HM1KbR$P<0NN=>@n5 ztec54o5KONtJbnh_voCtUm;3g@Jg6zL5FKrtAqhKd72EQ#}>J+3gv2HCB6&gs(=&? z(dl8ClvtQ@l2AMq03x}moG`^=?0k~OBTrNc4UORSoOI(vpxOUGH^7DVupjsHfbKl) zD4Fwhaff{X%apMXgG%}xn^LR-X?sxRLPR*W2t?iM=3a>y0Hl{Z`yN0~GB$vILHMBN zP#ZWV#|Tg(gR6ES+d46SzHMgvR+IpgjaY`4o3Z#4G1%eE^i(H5M(@=oIOgKSQSU4U8d-p$Q*qO`voX2W6UG8i-cO8i$IxM zm9NER?FLVYWw^2>$D@!KCE4(5nzfYB(UGRjIk0W`u2M@da3fVSXV4`xzX@w_zyD|Y zHH-Ju6L<6h!ORF&X>mg2@Vrx`d6)gcWX$epBO-U}d9xs%218Iqz!{&#PR&>_IaK*m zR9e-p5P6gXPKSvNnY;Z$-n6L+^t_Y(f2||O+!mBe-}07lC4b_Dezyw^gO3*tE-Z+n z{+gLMYxbt~V>B5>M^Z9xqaW`GWH=J>`dZN~^od^C?#GD(73Z}pEK zaVLLTn*ggJgZBBJv*8bC7yrb4kv@o686hOR&$d^0O3Nt_1)5GjYRy)&AU1(34P?J zR>2+l%&}eyA2LYEn*6n^pfYc=lT?Ei`fFWYEWX30DyG3)g6N|aqBd(K5q$oerK8Cd zmA<}(2w@}M=P`x8Hq9hT9lez-OHh`bIap%^o=mJtps65gO=7n>sa3re-@VPixNfYb z{q?NeqDyLiSIUG-1zg$s(?t8S6@ez>{S#&E*HJPhS%l(5tdpB)s4enSOFLld4X0ie zF|3|}RWHBy#9mDy3ezU`lF%)c<_YIzLqoS1Li_O`TRlK)DHD#bQY4G;8~^kRu7fpG z>W9u!kY0rJDMDUDWrtHhek%(iW89cH%HBWHQ$o6z@SL69WzGDoxcV0K!q}AZ<6z_) z;lxf+dnzx;qi>TW<%2xR*DY%{ol%cE0a;zVHvBM6!gPywcu20&qo-VGTW#dhdBTj| z(Z$1z}W5zYSInpdD_-Yyd z=r53AA3H9(Sa~dt?m%;AvtH)D3Kua3H~{{qbNa1hBag;~ckQoKr5{ui9g^^6aRhf6 zhl+LLMf+d0N1Up$s}S>@PwJbSL}C**dM`Cw|L=D4CK*b(`06n&)XNDT$kPO>d%Qoh;Wlp<#`VkSp10w5Dbca~cH#QAGr(my`3u2k=t0E|UI zXH^A!>^LKgOF&`08yWTy_RR|l-GgYI{4WDqtS1ouEaX^?>ME1i#x-obfe-Gnkh1Ke zV2OTZOBu83*}+lt#0tDd0cF@(v1vp<_kltB8;ka-mfe*vMsOH&m&V8@)W?QcgcS;A zjOD}4zPTQ=D3o&OMXae|E0~h3RB>vRsF#R>vP%L@>T9Z%h?h`IlPEG?OlRO+HO>uP zs0%kH>K;XIqV%=*B7OwM0I^>|ci|)9KX1a`1=|4^E+gmR*0fb%GI*(Y@k}3$;#EPS zPLbHvu(7Tg)96inKRSsQi7cz(vZ-@6O31kC{gwqLL0-KQx0qcr>@Ml@`6=q=&Dnu!YGB>-o9BcQ^P?s-h%-ciN=7VpqW1MB^CkEBq48d-Ow^z z(BG!!Fw4bO5X{{hiLQtQ|d1gqvnMRCLKt$^>Wt9Fc0b&(Xik*q~bn1e- zDCOL$rD6&?>gTI};GDxPs(c5pqu?w`RxvZcyfNr|sme3GlPfDar#zCtiAV@WG0U0x zlkTl86T(%@H>+-;UE5z&cA;5gSXq#c(ep4z^_C`zLdAoaymY8b9@nIQkg}(h zR0U-mO51}~1BRoFI??691Cf9o;_H4Ko`Ufr9|zqF`giZd8Et`hYh*WhfmI1}9ZM#I z*FBo08Tcxj95bXB9&I^Bn3?b;DRyq`%>O8Oh0VrWv)@p2FouYEluss}#9@Ila=UsT z`o`afQ87~_$Q;L(mL#Evae(oL>0U7rAcd_YHss+0=ZFt9tP6b3)3!`czoZGyab2v@ zpgwGV=z_2)X;OJOzU;o$NH=F4BzsJv%=5!WPSCPR7l>c(gqWGs9cGIJ0T?jPz}A^AooJNk(U%t8stWZZAtc*1p2pzng{u z@xEIMP%aMscMhQg9%ssbDM-?W%qx(|*2MG2+dn@3^J0=uxrdQ*RzH*Bf5Z-jAZ zH68f*086mYBe(~_jcJR5CQhxl0xr12vUoEXjr9d?;CI;nE8N>ZUd;rV=Zn=P>+1Y? zK!&Cj_WnO}?ri@`S;^Xs=f~$q~-TOtzfBG`U(L zWHuH15$25yWLLB~bXXPBQM)ROt7tRZPcG02ND^FkfWe<_fTleO)2banT@4XY;^+i} zF^DYHSE`3)G;LPs6p5mjz~s(tHS@%BW!t4AiM0q{?Hm@0%HWN$K@?PwWuurZQj@z& z8O%@pb(jO8)?{CZby##jhW-f!-I>zXE}w{ zqv+O#2_S-!<;j@+aE6IaR}02}y2mEtX|vd|8H$)r(4mg( zT6r7Q@6&A$o~)*w1mr)rYUv4A=aY*^QX$;FBvlU}C~_*qb#+0FUY zJNk|=p<1V@k`p88AG$V&a(RS`XbGCNYvu0zEJZhVB#~C&Pa*omQ8fiK{4Sr^5SP`L z(7y$3xkwd=<`6m(P>>qQu~}OTX(~oalamksYU$@kTxT)N?=lt#(xOPuDBNwD(5kjQ zJ<2x||M&=9)pgwZTRq6zVl4;PELOV9aPbY&@*&CVvZY&CC|PjF@Rdy9kEb<`vFR}1 zZrIJ*id;gq)3vr$b>w#(TVi3M8ztjx?XBw^XPe53UV=Nj5LOY4mRmYUH?(hpL|nG$ znNw;T`qsl_67Wn`Y2|e$4+|wU^n+vL_v3V{I)7~wtF?NiVfCg23dKtGdT7@ z3pSKb27@OmhgbjYyP1u--;S`C89kjv+ipWH@y6P335aAW3py^5PLAeN?Vyu+VO{ca zK@Le;<4JpwFK*41PGWq=Fzo}PNC^uj?&+Gt;fk=3ujgf60&I~ZM&4D0$`lk-3nB3d zWN%nC1PYRQJhn8~MUOFSRIE2EM#ZTx){N%wYE!@tm8j(28m-_7mO^z|%6xLrGsIK>0g zb`3hu+&bVEW9I%wbXYUQ-#0ohc({2HxMS!zHrdFqHF0Kc;6tPE09`pFQ;*;RuAN9| z5-W2ja~RaC_{V3U0 zW6O~@>U2j=JVpBp@^Zw}IRCEINi&8b8usCKz&lUPJvK>#dTJ_)B?h*iPqyUMh*$1R zoU)bvi+$c!ty!Ub*gRdUM9XE1%;95sQ+S>^>XGWv455YxP<%B!2I4{hEnRpd0Yz3Y zmt*_n0qS?E@zqJ9hs%OW2Nau^|DG*m!z2MBXfpAxV={M3^cg zfDfg4ndN0^Zj!dXTGRZx`F70(^ie4D>beIxi=O$;j3Jqn%?HZx(WO)|;3Q4HBat;W zaoJMa&0G!vA$f$oREm#R$YjH3ryo)>Uo!;rv}l)#OpY9RWU#}v*r1VuLP)C)$~&U9 zVmb-zOL`hnWYoZMPHe^d4Mr7jey?_mzr*D@o;4=JqCK8vP=F{j3>$OhRc1l!dWo#? zMdub>lxi+klFCHupzoPIuk`hfwAZz!4mZ?{er{n$WcLKtP4lESPT24$Zgt77P}@#? zU|L0ahHhs}4OvTuo@g_&Jm>+W&qd{HCxLiPH2RjLc=KKczrmsotGx#)fu!7WCME8X0MXXe_=-)~N--yAjZM`*CG48{B3ROu zXMU7mcBmjj2t=IM+#2VtYf&L+H{!`Pt?o1=WUOcfWDZTI|E~}VJuCG2EVMFfKjWoc zb4;I$HZ78qM}nbIz1n{l5M=*dK>R0F=>O4I=V<=22cpCd=lcjmfsrERumIFcyEi}y zTsQ^&8CM}f1onGPiNN1U8Vw<)Ir!n*zi~%R>7bz;Kad85)<@8Nji(*}Nhgu7){9$m zI^TjIIoE>VdNlsxiKZohFcqg3p@fq*+awdqfI2p z%0yc)ez|)HwWKmEnybspR^oh5XPZek@ij zSFKVdnk4mToDo;*IqWRY5BC`r3VqGQSt7jyNUgRIoVY~wm`<;X-&=BK=14m zaRvQe`7y4OuYfrjHsoF>&-OZPIGqfUZx-D~4j>%JXsL(TFJLa%Z$aD=}_E+`ZMqtdSQypF_p-o1M%bCKrzvajOt z8h%}SiHJU%;%|0LEhwH^PtIr3i&sI?uZ{UtwA1WE-{DvS}Q2(=9{$YYU<1!vl!HB>$7HK@E-B#6BFX`WsIE7K*Iwff6v zV05ZKefqkBC1w6&N-#A{`1G*@^h-tgD4Yp{zP1uM^blR~iz(nY}`jny5e zn_czZgRL70C*bA++}Quam4eU2cUXo*_cZunwwtrQ){h7cy1^P|$@*m&tmLnJLZNCZ8-ref5? zhP{|0wS9Xv9WJ^Y7!&17XNDbh0vehWsDLmc`rcn-@#Q-_J_H{2D4{(g$jZcrd(uO5 zEAGS&l7lHFW;%0J!r**P@R+9fzlC z)PA$Q8#*y*4eG#I^_~eKT%CGKaPoY1-P|ODT1Va%OfhEPNvYYGL6^3b!b&*Iq9O&P zneG}ndY?HiI;a9B3=VIT)d_r71`#mQ@PQQANIQT5QQ`Xx$>rjgg%YKd_w=3UsMTiU zd0t&k$IwZBM=xY`zvDxx5FC`68EqScCAdttNueweIxa2_?IheahhX?T7=%%Nx8ssF zhT#=!xoDp1b?;1PAn&4GET_J%!vK&`&n9sxmdC_zaucU!i(4*VTPE~=&E^4HuFabA zeLFAv1UTf2Ul)Cd0>A9CF;oGgv=AF|SY#@08aB4U-Fq>!fHEpRecnCOlG||*`48L9 zHr@&lmjlyny1pLRd(k8@3Ylk=Nwdn7FyYWFNk@v33+578FH`Ze$a%IjO*bm&8Dn7R zO^Ua&`d!TEJdjx@6pO4J4Ti0mUNy8HYNk=!1@3eSnT>1%8x4Jc#}NlCH$1#LUJ76A z2(*6dQduu5bQshj%;-{oIND^-lx~mmHk0$JPCiXV{B?P~q3Crf1>AVxo=xmotPzmm zPk@3!qXN0NmP;IGC#8}q-`Vd#NfyU%c6S<7KSic&J*>^wIw9bxI*3n!nCV|CA^Rb4 zDI|p-r~8PkoHarQAOC%J1E;=&vkM=;OXTP~s<4PAt^}#=87}#7VHbrLZ$q6tRyQ7J zcc|0H3^1(}75ZFkxk~|I!)GM|nqhQh1-U%rW?p6mAQ1gX&aZ?&*5nVLOv55wsYu;H zzFvNtjGOCqg5KX=wC{WUI}?3}M0ZIX@uuEqz&Z&lY@-cg3Hr2)JAAO(!1 zG@*+X%cwbQt$t} z4gE{TmEil;i2lF7)d?LF)aoSuiXfmoQ__5X{95Am=%R*EQ0}(R-e;6= z((YFq$Zt@#@&5kV|BL&Zha$^w|4$8Y>bT{JDf)eE7tHnUOHk-iuKrs&++>hGsniD;~cL!44M4F zBf)!5W}8DoFxrY+3un-(OtbVWK-`{cAe($WR`^6ULwb3;VlN1+v*7NsH2*L!YE_sT zi$5Ny)q$<|sWru~;0`X@tUw9>cP%6z=Y-)ff9ygk&`?dLIF;?FGH~Qw-|p_+WB|7I z@sl!$PxX#irLL9zu+yzs$g&%BPDvqj)WGiY2fy6u5(Ra$R(p}prpa1!czrO}7zK=sQ0Pi-pIulZA3qgW06eS1(yf2lNgvS3Qs zva8tK@s39^s7iTBlSId@SQ9B79i+oNNJl&z=M5z>&E19)r<|!Aybh6|hht#`>V=ZB z@Iaq>0t1xkTO#NeXpU%K-^K^wOtAo=_4SLN_W;!4)&8i!r#CbWgBf=mrWJtNUJv@u zD(BURu92vJ*W#yZqZ4YRpu+{*4OCk`y9PvKgA?s2VywbO19sJBWdR&B#23wV!wDnM zO^Buwy!jJZ7&x~K7Q>ivT7~dOi2XwS1I=_?ogLvtk3tq!76`?NJ!L_znF+}RzdI7& z(b>i2G}B>+A^l@#J7a_F_nft(xC*vbwX$!WHkhU8%}AMl1QHYZ5)N48nvhUlnph+x zNd*Q&W~oDSWuKL*L~4_8!X)|864UKh7J|*?_N&8T0~;|e6)d0yk5E}0U%t}1l>~_l zp{v>!;6_Y+Qb%F~PKx%_tzLwcEvh}lB+7zD>U6^SqA>*IMX_v}k8v-&)4gO|TBK&p zWW;6|3KuL)lv&ma>*9OPb1E!pFSj=%L)-v-`iPpMhx6BcF$Za9qjXr+0fnui%-y`??m&NbjB>iax%3 z_)psr`Ol*9|0=B3fH1$leGE>`A}P!9ol|hq|iy&1l}ke7e5g>`XQSxX4kYu zpc-3(hlM8U;}ElkOsjm@-VT_CX&`P)JsvBXpCcbtVI7Y}T>nFCnuF&l2|3wdE2qLT z-^xC2$-8$uDqNz2MXW6Los~!Cb*Pv&`rf(LZG0LdH8jG7G*%=Eu7I}s2x-Fm6ONe5 zQh;e4ASU)t98)L@YE05Ukbn9;tV>;(SAA%`wJduA+09In{RToTi!?F2rn_qFu<+9( z`QCgLVIx#1m}-;l%&TINi19%j@*+0uS*fyPE7Ie9a?R68+(HxYo(bK);*;H|qBIpg zLRDa-dGCTr1f%XcTYDa|w}wWd+$V8-n@gmtlBfB$lS3Ir$vHo<=8t<(?45~(mQmH_ zOh}K6!=aq4B*ZDwP4T4xwo1`#QK&0+%Gg)KWE9b4x}W8$8HwVp4lTGt2uOlJ6k{!h zEA~d;HQ2ZKnZZIFSjQZEH%sOn;)wCY}A&sO+S3|x2i$^$PGT&{FgPOpO~kk=iQJCt3j~q z-Q7^I>8w!IWU}#MNib3IAeG59l;)A?G=s(>aD5lFhVx#0HgzLsRprqM0GKU@7*O~! zJfllU{X~8JXw#!MsD)&Hb_LFI`1q($Xz_J(Gf-T1od=o@_YZoR*e2?a)s|-FYFZiq z`1lC2P^SK+L#wW=mzZ)HS3d{JWUv6)tjb`0Ai-~blslL5ms4wV!^Y@j$=p{WtZm{Q z4CqBL@YB4+ILw;dpg}VmJIsmwnl_x3KBHF zhN1GohUBQn5h~iKnNhTfkq`c!*qL2}1R9?+Pt}4@ye?KPQ()FWph4nduE_f5q3#=* zM9ls%$h?$QaP3H>6<{<_`N8|SuYXk^WMJHE=sF}Wv7eDL41eHj@tDepLr$W>lt(Ev zeQ#4{1`;x_m4XI{&ezX%5N0~Oxn|>A%m-j$Gv0sW#LmUz?+lcFkWz>=Ul0}r6aUxb zsaOrXpt=4I=rsg~eyBx;()N2?;bG2L*zI!&kP{%3YQJ~8=n~9qc}-IN@r@EV z3U{q31+pEp3bFCCq)z!>0nn&cyH(0}(H$8NaZ0ZmJ&eYpez|5e8;=740;Ive$(yGJ zfdD1RUwE)CG(#l5CKmZ&DpK?+26Xuvgq8C#d+@YY#cSzqA##b$8ZwVOULDsF=WgO& z3aX9u^k}p8&5iBjQKhh0yFb>-s8iMmYDC#ub;LsoHt{luUGlGI^h3`hw3N^=4lD`a_fUF*1XeoRa+F&ggBB&ufYh-%iamTlS6#-q0ADb7 zb0PVzZ^U<@CTo-XqGquqc|3wqu`q(0Cm4jgS#kPlXqg-CXb6As zFTO9Bg0sW|QH znq8VzpH%o_Ft)i(?z++t`n1C%OaKz8)F_ z#00N(0kJ1bpZI=49-$AA@}gWl7!RsD!)jHl8`KGB?ppaWR8eASdR}GMn-4K8io8Ie z78N*Q(?jomZb>UW01P)n$sbB~bOC&XyYL<%a9OB)n){J)WEo06w{W&xz@7iu18zy{ zq)1;#!ErLWwal2&C2AZw4HV?mxq_PGzx}xLB7$HZVRcPqRc#sx^^WSZT~M<)zrPQHL%gi*hKVh>F^=lY?X~{be5cVF&tUjMUOwrr9yWo1@Rgq6I5ArWQ!E zDRj9-FLf30SbH)o!o(S?*$cN6ys9#vrc$M6dcQ&pv!Kh@ZEx)R3dtJ=1()FM&BUhE z9A~j&+_;%(pW-hhS=I4zgijC9yrOORPHS5M(lO!mOl|85kR1aC8RRsZ7S3_~{GRgs zL)iLbvb$amm`4Aq@*7NPOJWuDxjti@+M2`>Q=UAVXLhuFd(ccpu^=$v*x~=~p8xh4 z58c6yrJw(fTFw5&`Fq=%@$#>zspP=VqoTW_=jBk?-`_=F0(T)lfo5Vrwt`+Zcl-4$HmvK$s|R7!SvDh!7-L=B#$XsNWW^(A>V=>Q~Fqu4q;^cq?WdAbOF z7d|~Q7jS7Yp@OL7xtXQ^Yrxe5t*xmdB1`YSqKd^{vdQK37_7eCSjk(8`7UN*c zX-mBzKm$A)?BmrF&3O7YOt$&zOmFFrAuPesuwo?j($ zEB6+^vs_g6S{!(m5^ij6V(aUTlETu=!Z$G_I}pKVu!nXq3Cm?G@v=H&V1wsMEumIP zf+G1vqYxJ|w1)`DUR!WU1>T?|fRQFjMWMEtk7+rliz7fKgB5m(XV#mJ&oL@YcAp`zcNBgS`d$5XD4o5`V zX==t=v=9f8r9$K5)rkP(uqzE&M!Y3_j*%P_mu-FHj#*FfhX zN|yPPH@zlpp^&@u>1*)soab!j!v`Hfe~(HWo~1yGaZ5eD%d# zTV*oC;Yda>&Wxo8?JTjcwe?kFoEk-^GgalC#VAM>!=sHk>(w{`g!d@X2tos2_k?&% zgJemSX0g^~iN;s4)|QMB(MGn(9{4Dl zO7Q#a@9$2C`vI1ELEx0r3tQUY+e6`aMfAYuyi`HJ**&lSF8)(uIT9e&rhP|w%k~dc z=O4slob+E(T?*jYc&Bd2>?efh*m)Sq6h!9?vO|8O40gL!D0dlEZ5ZNA@umE4 zEhualiT-@Y9I+KL3I0IZFM+p~F$D9O#khlii=dcA6r$beGk>s}NQrwHjYFN|xdnt= zW#`3NM5Cw@3tiQ;dtqiEMo56_EEOeLow7~fb1DLz+@k9y5ttzWMoKT@A!N+ovI&XQ zu)dYVcIb5QrpNTkg?+FwN?eW9&az2L!^Z9|+X#N+ozOj-eV^T}x>xul4tPIG=4%I~ z3Q5d_$mh8?98*g*32?LMaQclLZ(96)su9)|>wrO@Vm?9lpr*uA9i_ku>8+}ydS|zL zOZSWMyaxxXqG+=dl9_HSGRv2n0yf#Dz95pk=I|$xx6X%m%f{FYSjW{O(lE;;y;^-! zqU{=q-OU;R-D>szhSJl%dk$u*pw`-vmCbv$W>qg0&f!A9PcBVB9rZQB9bBQx_gQ(o z9C-ebzS{wqnUB(pA#)mu?*;j&VAZ_mmcv~pz_f~dQfr-Fm!|cR5O9Rr9?P;aiu+gs z-nZgi&>_?8v#1#N*R0#07V9jI?jbES8Xt{4%m9kt*t4a&Hxef4Vn!-DGTmEE=2`P7${EFXx`;q&uQK?u=X)~YBLgyP?YnVGlez_Lk zJmgpP)YV~r(&N$#DfIf%-&gu({70KJT-UB7!2|!sOgAhjXv(JBk}9=%Pq!e382O5T zB4Mul#Z0DCHT#qfwRXnPvexBzW5G=}k5$K;j@ewZ2n&$zfOaEl^Y5Y}wm*fzOkX~0KTMK=%KpQF}P!S!pv9>`^*_g*gN3V3Ibxr4CyifP- zYvk+H&vve*v_0D4PcJA22D(Mb#V7^9F8AgpG&r-?#y0dg^BfE7sJ5q*0d!dD`*M15 zYiOpR^;eYIx1EX;1_xH&22ei_&1&Q0>xfe}+}DByJn-$p^Y~LT+&1QkyGo>q6^-#8 z1z3;J%4^u3#><1uT)AuOpijM~)cHyE_UR9sAE*1j`D>o;v!3P5ip@>7dj`7lCEj-~ z&!AwjZ|IZrtjFbtZ&_cZ!1>c1@^K$SnxhUa@WXcbkekOmcZoT(#Wsy12>HJxCm@Fg zMfBHicS6L}VB$nxHdnbe1DvuuL`qc?U&gQmT$%mTO!uBq?a(X?!n23;5lwi|M4_VTxz6hwN?ikpt?j1{)mWe3%7WFws)78d- zYAus>LiFUYPf$@(bs6=u_!s4BjKa8Yf;Ag-MGbW-@^%VralcJd&fp*J&2f(>5dDt6 zP8u`TGOQqS-oxFy1aa+pr#w;}KDY!t6>I#M>BzPAS-WyeL479ghHS`;tI_xh^N#T$ zQ%(`CimarOM^$ut6UbW?T&?%qGX?NGv@#H7`q)KUknF0nhh1_Q)BWnv{{-al86`?k z|FX5NA#}!lIhx>n{X+R{t_L6*TSreCK9vcrP`C3pa!wO8fnE{3#&2Z1%d2niIITdo z>`3slLefDD(Ih=eyq7YVN&Uh#mnh|Dj7Bmg*~lHcF7s$|ym2mLl0sXtJe#4+4-0@S z>&2n!3&ZboNyNKa(EYd?VRxIzo>!X?j^(t1F^FXJTqB@VpBZ!&VHK;@X;y`uOIeAk zPX>r`o!uno_agE~DVlfz@*=b!pz6W?md4hHX?JUV?(dU6?rN241AR^wBHYbuIiT@C z6K~J<@qk*BI&pW7Da3rO%P*j_DP%zlSNoV-nN})z-&eePj@YrdQV1xe`Asr0JDidM zeihvT30;W}nilO_z&l}l_}xq=i1axQer%}IBtJKQIcx|@=5vV8v>BElg6U3uHxy{3 zpVG;rnu)nv#mS&{7;3qB(Z>3-_>CmbkAhNaRy&wKrzW$U)f0nQtZR+(qLY=XP3hWZ zS*xR)6@@PWhr^{MmXPkKr7W|BiMWu{mMcTYvj`( zAJvB`Hif9)OKg@HYU9Obj3ITW7#5PUPmfMLGuNqJT?MbT*J=43!s|?Kf9C3ochnkD zDvR%IzmM(;jc~x^bWed&9rqQcT0wSHUKB)Kd+y|^ST%Kv>%Y@bz9h1yU<=hyv^_oS z`V9+s*6<@a%6I-OBkXjM;b6@Bw&*iQ+I-we?xaC9+jtffJzGXhV-7RSJ@PM@36i8Z zII49^E$~gNjb3qz-ET%&AM?q@ZMMy} zfI*`kJ12l2kOY3<%clwbPRj3V4y)h7a?gLDX#Ua;+p79WU9xpXlC`z^-k4Uc^)fs! z3;Vnqp>p#QBgXc9#lF!BY98>kn|u=lH=!;ymb@#r#XmFCp(Uq4T6A7rx0mvP^Il_; zhsw6NbutycYMgGN9tN+jh8Z^(5ju|S@(K&Sa5A(52_sM%J}A*tU#x8!G1jUMr5FLR zVn)T^>eOesVB`Vrl8bt!gHcizp_tM-EQ!;y-%~b`A-n>ltw$1zWRlbkS~uB6N6{`m zNW_yH?6~J9pO>3K!UUf=bynJ2YuO0A(hxPz;Sn+Hpe9KbO2nT@w3xnIUkf0{Iw~)b`s>%k4iOk7hJMb2vk_!)Rr1RuZT}vRO?1j{A5a+^+rgN832D**gR3G&bR86N#=cLnd3 zGBQ?c5Zbz-s!Is_OA`bkLbj=9AC_M=l|3b`nZ#oAPhN=RGxd3#sf`Oq|NjPYJ0!S7T5UDSy1WffCx>Xe7oaj4H} ziTeuMu~&;NzA1(W7K;$(aS#$EuS9h-tlRM~?Xpa)C@FJcdP>_=Q%)h65x;9^{#nM! zh4i2;%O}8x2IYHKT}nCcM}ZNXpK>87|7l(UE0OJ+-%x@*El3S_Z0Sk~WU1onSv)+> z@Rqe)!%r%8TJG9E_v|`?;BQSehL@~aIq79t-vZHRXb!q$HvU;;RLn&-{zz3l~(qP5i-Ai$3u;MPk-K{u< z7Kh^Q!6m^1MN4oEE-mg*3WXLZ?alvr-g)o)%$+;;e#n`8IWw6#XJzfZ*4q0A?4H>; zFrNh*Q)$8N1LRWLk0x`+g0sI40J{H}tX$B8b?(H*esyj`+J)P0jx%T+Y1T!|zRbe+ z&K!5sMTPD13}xD&CX5@OUY~i@z8`?nRv%^WIK53TJd-(hpBd!qRtX&Q1iht;^1}=m z@0Jp%M;YA@ol6cfhL^0I5q8hy#&u`#^%+Wv3t2TMjx6CW&}X^J&V-SY)XBo?bbG$Y zF|167L8n;npBC(E{IB+5l7lK3?vf#vI7>f5GS0L{l@4!wr6^2tu0jI=d=MKY?JAFN zmP6gjOXXY@d^9w`E7->;a6vMkQ2>zzSATu*HH~NbjqdqbgN0gkDu6IYaG2-s#gxF_ zVf4?JY&yb<9?^ekO2X~{;rEkPF0d?7KCAGB%i%g%ZxdH9I&-R(asL7vBMmv@(mZvH(so<)XRxH@_{`4}}%Gnh3b`y6RVi0cMKuKC;FUsk{1Vt|3=&aj+T z7LQ)+;AMr5rnvRITaR(PvwM+mp$NTR10iN0!IY_)(aS?W&DagY96pT^6%%nf&G!z6w(lD}9V_NF zy}l)xpSu7@mti7PYFDp%hdGp_k`>!-Sd=6*-<=)nu74f~{H?aA)CIB#X1bdQ!F~*B zWuBwIh`%$|zL4uG6CuRPUu!6RP6flSo&wB|6H=PXKN7%OSl+XT*hKB6i(|X-eE@dL<0`ICAh`{}54rjJm53&oAWSWi0UoPY(^iqa=70JZVQVn~@o?jlOOLiE~^woLH zt01Y;nJk?nAzucwj7)_&=r;!v&pVej{_PG*!DX^DMD7r|8bbR|f~ub-Qug z!~@exL=tBbdhF35#oouluWz%7DdV#Z_v8X7+ZSKFbyB9Ix7_-mx-7C2e=B^o2wn?v zFk>&?kTVh?n!Pw4Y846#ab+)FUx;t^bQ#^FaoCw}cMl4h#y7WIVmhkF`N`**=2|nO zhOp3KT`|PaBC(s{PXC%UU%5k0BRD1+I19zx?1usZCjU577Ey`5)VftR1x*vSE z?AI^uNV@5d)hde9F1#Pc>KtUoKn!YTI&A&P$d#TvZ&2R839lb@ocGUuU*R)-+RhE~ z?&9rf4yP*s6n+DfHBO;8$_lYT8?myN_Wo4e3W@o5sotF}3~?3ic}7|&I#L|pD$Dm) zahd1^y}c=)E22-z^U=aF$-6w~J=(HwIMET_TmCQT?@jXeFTS7s`8DE2N%%vpJ>S*C z*-GGhFM#V+k2%&qbpJ2p1OE&8$h-bG@(D0q{x-^A5dC5Bos5b3f=wUT4u zC1wn5s&^&U$#`nZviP84@@y4l+#7#kz}H6A++jcpzZ8Dsis91Vi6R zamMSQVa6n8#5ytT`4JrCQ47>r9oW5kEXY&HiybbnX!U7B+rydX4CZ(#*(uLR^Na@3 zZ~QWz=okC`6HWl}dDG`^3qhd)p#+sxtYw( zfSesvDfK`=+3IhfMq!mX_>UgMP0gD^Qo7vqdgsXV>n=-pBw^zovew?>og5P^%sjD? zyi{z3+FQhSb){+1&7+#IBQ_5ycQU8g-!T+g=Wj+)YQR&rJsju>I}cC4e`Cu1_?4k-m%pd<>RXxjEzNdZsCz1$mdbDTfJu#>?1yiY}w3*!LkcSF5L#GTj{%sgpl*568*1#V)#XMDO;cY?6ERd~v;f zI(~Z`bmjGphdAOcNw`yuXIkghmNqoQsbgsi-rDtFTA5Z8YqE|iehBQO9adiZ+j>nS z!PxH!HX}3k%yM-OlccfzJc-=&UfbaYoKL5%Y{$3>1v3pfqi*j1F0g?&qJN73mO8{? zZqbCIR2A$M1%3fsF-)2&dG_7&-CfGg|JMGh z|Ize|mV)`(U;d_#=*87}e40jS|ElxC+-e2Y#gG71*$~A~Pky?*Ao|}ZPg5EH+04CG zJ;-ozXhY39YMnK`N+7`=AmT+QCa+nxz7)0ovCs>H`)pz_iM%VH+^x}$N)7x1Z0-0- zH{;y;b@m56)O;S=H}>@z2gYydwE`(QS8dh-2!&aHqwUQh+(w#_e61tOOy{!S!@@U< zvF)v51_Q+TUFyxAHu^yR+R-Xye_X*C zA1j=yu851+RqoL$&f+3g3Eq;TbFSfZNsN5cPOuE+*+jWl4KJA^EWyiA>A51dw^v-! zG_YcWvB(_VR0eKwt8%*e3?Lm)SBaKPC>?-lVelOJW;F^et2!L)RG!!GkWP%uo7_Xg z0n&h%V81t1JfJ%NW~I@?u>dn+NN2oOC(A|XxRh&sqOmKlZbwP^ZQtrZ-D<`vQq^ zlaC}e) zW_q7y*5ek(1&@cXGVHy6zu1wl@5ZA&Kn*zh(xB5YhBFBJ=;YR@rK1+w)I$$spYnX!o=@ZiO`SgHP`+)ppE8FO5Resod!`zbQzxyUefO&?9g3)8_+DBpgvtah$03-l?2OnRM)#S}n zMf@6maW${{V&~hR%Wqf9W}Z7=|6F{%iulX_w#-tx3qR-JU%)*R8pao%>dF%MOPPo4 z*|EMwoNrOBodyhlk!n1}a^R%Gr$t}f zFWf+Ld3`fPemd9KnjHI7ueS*K?2(3Ugs#xArOcsp;Xb23x?Ze;Mmpae;G-p{HOoB| z8+)*Rkh=9}W;{j$F~>(*GyZnjT1tvrJ9~Ei*1b_{=SoFufcn7{cT_f9Cb zT>=+xAru1RP>5PzyL=;SNl!HjQl%?$7{{GlrceecTcR6ItU}H9+1CDk^4a7$*ZR5k zuxrdSi90y)+Y93*tsd;{F};+SSxS}SU1vS{kuvht!Ss>AVb^Y>qP}giI}a3dv<9)# z$!r0g71kwyGJvI^t4SfG2;3Ld=^n z0N)yP{Mha*gqMi6@)&XFD93~N)4m_x+fc3Eu~;p}b~Ahp6tXQ;8IJhP(UnA$Kd?uW_l4+YZzLyA4L-g1gHdX-LnLo+*k&s?7~n|7QQ5n_FxO30UFoPnyIAA(z1m!s3<`hC-q#)0 z1UykV8xR+p&7O61PB>bzf4C(mPj3DG0~{cuj_04GNOsI$F~LEsT4Ix zy7+k9gpt)1s7pZq@t%m^5ldTnRN6OZQCohN(URXJp>U%o)3+9zuklH*$#t2-4r`sh+cH-}PMQ;?Rmo2n zYR21>6;Op$&MZmHn|%phqu2Y7?z@8jm%zj#N5IJ5Fb5qy|EC|Me{QY*QuIG=eZ!QQ zyB0nAF3dMlbJmsq-b#2%U>N`Hzj>JEKg9Q2N4&+FXDy>DVgktz2o%wJyDc#& z-eLn3rHz}VWG&XQ1NzMQNnn~wT7LR4sF`og@$H_d+P{GdO_8e!*pqh;4o5en8gk|_yM+9wP$#T zoP($Rl%&>-t6c&WaXVI6YTqWV>w1zKRV!*0`9Pm2lp)5J#1+JjmZroZKFlq>0=V3X z*wwo&DsomP#Epn)gWA=v3QmSK?Q?{k5Mr2agqx8?e!wr5-qMY;#a>eXa$UCuuXp}6 z>v=k z(BA5p!S)l+awk*hhRS<0YGhE-Q%OAWeI+sDf|U6p=&}#OF6@vqQuv}%E2Uc$|j7)X_Ox>@uO!M zrm|Mf@y+NC<7i*7D{0da%?dILy8rO%N~TMZhfT@Q>ChZ99SO)Ulkgo01Mk?$-64G( zHzf%?KgY-J0+iI#>Z4opzW1GKxM|}n(YRKRKd!0&Ij!WrD~VIeiO06iM@FhXWL!WP zUxl^hh{@z~o&s^;q}>k2kI6?*I`xHt096H1DJJn8b`F*=yek-Vv3`NFs~eH*;vGA+ zn!?}DmnGx4lP`s(X2m*boQs(#e^uX{*~}kMlxM}!pSlmB0T0aIyvM^sh7!~zIy_bQ z0wzVfEIQh&R5~oOl5XweO%qf6QSxxRE$BAiqPhJZgInmElgGa(h>VHywTE!3*Lo{>d*0y~dV{X!9GSYUHYR z_gYW(KuRx{O;0S(y7ie9f;^6lZ}Pf&zw>Q0=N9lQC8Ya(!dadk(~50bQX_435^n+F z+_NtIKiPic{(_24pQO4mxfQ?_JaZWKuMj#kCyXoAR1U1aY&|rH598O#)=CGc{24v| zgLwrR8i=5N0Rk2Yq92a$kc$V5u7>dVx9^WijK2RjRBxUwBU$&C-n)NG2qoG+E)?^Ajy)TZ!b=G5h$MT~EZ2l0-+j{xOmB12-Pie8Fo8^0=()(q1NU z^P|={%E9@71kQrn+BjEt6*0nA;c}VK7sW-*-+l_->dI|wWJYZzpPT|WcqhNrXu>b! zM4tX=p|->HFTA>S+xoZwukmaJ_=U}1srx30Q}5e^8uq?`ArxEmrETF(?hVsfo8g#u zonv>V*DdveG#f4p?azkdc8wdj6Lhh=TKO~+f0N~sWJ6q;&nn#OUk_%lv$*LVnUSu3 zPfsSr#v$xDa2`fQ6$?)tI16I8qI%wvJSxde^_Y)1_=R(ulU}!^Y+9~$e&RJ*T`DYlR2Zd~ZuDr>m}kRm=!c_zb90dY zNz)VA#FKy!fvw^2{CH_GXbj$9u?v!58hwj9p}2{3mg4ig`=r9{pYe^|l;5~gYoJ-^ z(p*L@OT3)YB!H6w^u*G(yDh9J>oqyCO^haGz-dh!AKq|`HC6i!HA1GW*lu78F}$}e zV8(z`7Iba;RJEh{u{3vY8uW7W6)>(OJM>RGG+ZhdX&nnl&%f|}gF2T*Qa(CFZMJyb z%fU0g^IXOHl?ulp3uY`-+TK9Jc2r~3w8ZN7YyA6sd({MP z#u*b3ZjmDRl62BOw4GBexEv-BI3Ze|EFgu`p)m-HaKs)t%-U%~8Ne7Tw99tGf(!9~kaTT1mm-%SU#xUY91KrQ( z`RLlhwrP>$n)CIJcAiJqtfBLuzX&N$n_vAIMR##9Cke&2&nI^!GU?fW`ceII1euq8 z|1JDy4_DuzOgP-Wi>DtXH}H?H6;b!^y7=}#1#Lo|$4>-0%%R<=HMfHz6Z8T-#rWI4 z7(XD-)RFXwnQNh0LbSXTxo$l!b}>dT-;>=$G|%&}$hCGS%K9i6zo{N9+Er=U7*BZul@d6DQKr%34NlA^wlI0&d9q*1(zc`LK?~tyV+7^s`p_`V&+P z!|dQzC9l7Zhj20x30P)Y0$ocE9G_Py)fLg@Ou-DE30LxO3sYff%UPeD-5coq0js(t z@tOuIkf#xooe-YH)wT4Ns<|3gxSxR-mc(3s#Z1?dy`b?&s<@^fD0DFxCqBP{qDvmt zZWC#hkKNUrXpd&eFelaZn}K}$Q7K$6CqsHdV(JcuZEKnW(dTqjKSio&oNKEDrNrv( z9%<|8e3E*U&+|39;8OlTMLXe0O(8zl$vp@Uzd{egjMk6gDG5(M$2b?kp9EFk*ICn( z(X+<|N#^_}zfWsjsVI->SH1-*v>}W?OQez|t42?v+r5=8<4K$0Cu;lrz!jPgcoBJD z%XplA%jf7F-qaJlCH+VjBa`~hh1ROrNI?KbWwKf_`bwOUk>eR>kKF=mr-V4EFd1qa znWJ|p-bBpS-17@o5KSd#bHRaBDVb+IusdAA40VIz8bCwa5=XCG+GE<3k%gga%=p+T z#IMmbuA?b@p@u8(TS-MdEGx(i$J%mcgt|2pvj@Dc;!dASZ>p^jHgW2g*K;z-NU>m- zv@dGL+oD)!y&e3bBGb{KTtLt=h;sV_ZW8o{(UhCsay|(QXMQzaug|yGe`s}6VcFL< zfFvb;b^KZYAzt8nus)j(0cL{|Muyy^+M`7qb)$5nUpRbztT32SEC2@cVUulS9pQ`gzpEtJMIyL@dhBE0f)mVNH0A%< zU^{}y&Kvcd=Kr_;1UHC@?LE2vb>2FAHO^l0&-}|kbNDSc1#*w)%RM27c~4}uyBEPV z)vt`~eQ&p)Qa?R1nQeKjnpGQ!qb~lDz^(}kj*-7ee6|foqBNW5D>F7VeyCgN$7hRj zm+u{7{4tb}L>lX2loN2EKi+4lc{h;P_`qP=kB>aQgW!3ps~X&MTe1uG(9b?v334l( zrFrJks9T&n@?!w~#gyjV@Ne>nsT}9Px)LxiAMkb*HE$(@GV;r&00u8I+0g>yz)Td5 z7AW!WSoqz-!;kN(0a~oj+%OG_1ZDVds|at9ab5J_AX|oC?#{Csq@uv%y1vaDwi{9LFTp;31OlU;8Bg z*fdcO&t*O;@T%z?IL6l~h3|b~ISO{yt^9DrLTzTOs0~l|^4DXHG9}M;d~aK8F7B2j zue#6W->7U0nQ|oP9Pznaqm2ShdNliCkV=s3Lwokq#fHiiXbU(I0n3=e3FjrC{7MQi zWO)^-^mKqRv>QB!GtM0A=&NWNI|tSB%=KelpOZ>=0+$EDWDRCHsc5|C3;D@g$tFIs z_I1oYiortJN24xM+-I?(W1_}nyLAKhERBF-X9e z|2Qmk$p;u*oI!FXLih({vgg4;`yPLA-$qeDeP6~Fds78J=4Thlhe9DjXZjQ=w)Sgj z&vZ~R;LQHs(=iSpPSyKYFe)FgLLepE_i)O+gq%rH&AR<`f+~nqnEqJB{;Sk&!v7Cp zSQu#z1t1e45fn9Hn?$o7xRnH%vN{3WFfQX(ta+pr)MSS0EadRC=izDI2<1cYgXnx{ zk-Bk-6%O*Oj!8{OK5HfOIi=iv-dtqh$-EWRY-VRB#}iT%EYXrM z`TJ3ZISeAVNnrnH=K7PfQ_8izBPrhLY=--k`VS+=H_hkomrLr@^u>u*5G-f>+21IB zY3gB*!hpXop}lGids5ICa;;>VmS0Paf%6%Zm%bZvcm|$>FCF} zq*W<{8LVB}VRBcH%*jy!FQ|IWIcQ_XcQ-pW)RztPuXC@p3vrP#924YCzgcBI3bg6a zjY!^7F_{JK*ytvizZ43ILJVuAO9gr9QT%a*kYwq@GV&O2W~6l2je`T_@NXb5Ywd&H@kd3M2a6p+s&O-C{4QG<`1>(^QC zFGw-X6Ppadesjczb^I+ICAcbSI;1=M*lp9GAR&wZAi*&jQo~AvB|%yKNyxJ~Y1(p4 zdzpBXd!7HN;Am6gFH_!iy8&&_R(2fo#=g|{y*YpXxAyBEQ@$fFT|*AoLFYQ-<3cfQ z%?zv~=;giaW2xAto<*9oN6H63TC`LM^R|Q=HXI)iO%k{nzhl)y#{8|9tU_t;k{0*9 zjpB}+gM%imo2X=Im)RD=E@Nq&+4*<@x>)I5Df#xO^b zALRG2V2aYad-+nZ&P4=L%e6^0fQvQ1bB4Y&qIlW=#_EUq@+12kN{a(grEZC(StsUv z15_LwBYG^2oH+g4=ba7drqp>Xdbap$`wJ)0IJ$Hq_B@XVn-r))bm^CurXX+XrHGc% zxFc{HLH_yNi`TZFMH+~=A2=}_sFx|7@I;%wBZsqAoUxQO9OBfFw^Y2J3b=*|PLNIO z(qFe+Ha8XKeV23k;HB#Rj>h5hz4a@@T+~(yWkF*@V)xp5iYDhg)4kVVpp{$7ojjzU zY`18X(PcscnE{Plh;+^coK>{f9lrsQp#^+0&!kC7A&&_D5UARegSK0!!fdyzrm4R7 zlcMfkTQRs4k4vfQncfnHA)OQ*@8(}6E5B+iXLcZcGgvG8bAqj1e=eNk+sV)yEYnN{ zmVhjC?i=y!k-K=U#w?+Jaeh=h`!Jxi{oq|3Hqqj{x22|^zvnT6hP<7VzrUG}RUHsd zoq0KrFA0C};@6J@s(uRXRQ@uX@>5mY5Pdld&dp&{$ADlsX({h|yiALqVWS36! zsQoZ{2$hI+_G4?z*t3aAxPYGX#To!sS0+q7Dboy#J`hlN`_MDZbB_*sKoXCcGQ1hL z&Bw@XyVQiyLwvBzlJr%EV7vx6`IqBygplT$j;U<8*|LSmV$F0$PtK6HGUp#0ef z4I)*zx|^Vd&ljwm{<-qoB}-JH1&+8>w}xb=2|#VEciQwZ zY3dc}0`4*zG3El?GujBms>Mf6Kfi8dsE@0K`>H7VvonZTESLp#G*$mNsaYK zzIp?prn7|dmMSJfX_t(a9yw*{N}&-Sv*dJK7^Dy-@x))ym~T|s2#~nM&ncOR7Xaq zZz;dM6l+C!Z|ZAb0)_N2>fJ?&e*QRU>h;=}iCn0CvVB6~!B1JJy>)B_;Ovoc`*1@e zj8D(}e#riv(<(8LMh=~?{N(zJ`=VD+GN37vcgTjPoOYCP-Mg2|${=tl$M$`G#d6+xclO-opQBQ zE6>nBkN3rCQKKQ{ZGqsiqEn5Y8&|90mGci??jJ(sc2lGU0>+3XlG7m1rAZla0i(wM zKQNo1*LnQr`mFuy8mcHsRK7c%s2PKzyfDh@Dw@=t-3?tdGlrVs7%0ol6$Qx5(nGPP z&^Gw-;y35#lp=8)L=)Er|6oVS7a(&`0BZ0&rYD)l9qaDtQ!6W?{YaoBvQ^S<+Q4ddL4uM`rsHS zu{2`)m<#0InynVtrqW7N%YMju??l&<1uda@ZQIf+1$d;mwA)*4Fvl=oWb`)ySbu2w zz?tV4ec?QDPPV1%M@CZWmg(&X<2^IgMK4|EWTZYzaId#Lb-)d9HMN!^9Fsc61ev~w z!Ywm%#n-z9*HRB{i9BW1_B7;m0^vFBJ;#N6e(gywi(b1e%8zxxBycAWx+qN+435UT zW-@NRWKotLMW{4T<=a0clUzC_{1`}j&W|)XPebyc<6#?UU9vzjp(r?vG-cj> z#DTyK`xOKHJA;fg%N~f1n@^iVbw01-O;f6tB$504rO+L;@thd&)7alra4Vfpl=AxJ zpu%tA8-1SD*~}T?BH#s&fhzS5O##h$ou*MW&FII+-Z~DIW=~7c245qZ-GpltFeMpm zqJ3Y6u#8M~$o+>7#%r?6UNTUP^yOz`u=GUc4I3dKnv-crRC`F2q?*1WZZM3+W&<0U z7>`Ge45|gtr_y#)PKFhU@b)kdzL52yYaUBn__3C3A0N`!NSu88%aB6pr+PDql@0e%G0-?vLYm^s>K zJAL@Bpj^WZ4FfxMss|J$6~S%q$C#CNXr_DQ=ImCB9vY{*c`9iB! zk09t`C8V35vmcRf>tppOQ*N*FEI&-F5GJbb-=?nxOK>f$2q&XnH~3eHHq~V;394X` zGExsFh`MLUXU;Te`gk1kEY1}936S89zXeYRfYHk|?nEu%k#tlz#J;_>I)JGV|4S$B ztC8RPC_J{E>Hu-gl_>&5zV9T@uIq9*$i4Z#_RpzYL5T``et&?Q-Gh5pL+akEqwNL> z&kGm~%wY(Rp|jM3)Qf2{zBmg8_%Hs-7adq2ZLeO5yOelfOdNZ zR2=HL%!IxJY8%+elet|OI8#=$`c>@=nDf=C&77Z+@VX#9?1omB7J<}5_U6iPaf6e@ zaSu*no7oUK%C!b}j5{A{ zlXGHw1Vf9&b&#_%dh((gchkj~D54thgKX>kPv1$`V(iOjo64U}uBEis{N0TAAg-Ce zRf?4N0@oHDyu*nzb9Z;{rTk*WtS7ZlBRT>x1tq62OTn>)kAXrcIqVuS)tqx00lxC* zZQi&DQgb%s3a)rliLWHxmKo{VUaZZnvc{1SLJ6qV)E0kjKZ)3tzG5(NQ9K!)<{oWPC0^V)lcRu0j+amgLT^yw# z=65p9oTlu2UmZ!Qhb#nM^2$rcTzaVKG9V*Cj5AZbgC{dg6jAHn*Me~oDKkQ)YmD=Q z027H-HkWB~@BBtSu^~3fEl09v#Rcuk^Tv5JQ52_4cq{96p`Yp^rwJuqzL_QI5f;iy zN1pkTZ_#qQWiI&w$($ITEbpj^@<$13p7MPMhWX6rv1s6!-v@7ix`^2Q_qQb zX7K0pBQ}?3)odPXjQx(E|cyITGquVY-Yx9|EJ(_nV{5|+)Q?p(9 zflZ?*-Nl>j>gYChv^{kbqKb|cE4engVxb&tNa@yal`hb_2Ke6iGr8Ij^WIa@waH62 z;em~!2fDE4$P+CfogjMDo+`s&F!DgnkOrcn6iLcbm7i;-V1H7#oC(i14nrGLPd+^{ z0&%vU4boD<#+?3&Zl9-FjUS4y+unoCZq}>xT6xAW>>DVapV!&a$ptX7s!djNPY>%# z!%4Xil1G9L5@5m?sPD@Z1+?ybr-~E>fOjRMK@|eihNf22LY7D|pZniIw*MOe!Td`d zHck+z7Rcez@>c_JK4R+9SCicOP*i%2^TIUs|uC+?oeQ zbzRFTeo49DI>+~uQ8R6l*FX^LH2ha#eLPfqBGhuAIBwt#t@IlU7jzdlwKTp*<*uA* zKYb#s#t}*tqnTg2S0w@(2qT2?*NItm>G%cb9KO*Ej?wIMA+3&!$YrNMK!uLK_J!y8 zo1C@pcM65MdLJrUuaos>J7^QRxe=%t3y^B0*m(NUV8b;-zvtB0lh;7G<+G{rd9J#S ziqm2SeYxlyWhF+Lv?%tCo^f;148(d$TUbQ<(3|!c$SikyEvba6Lxb)H>MHLSVIppg>dPv)KuBefENS6`M zaxk+-waFK(0ByBb%*0ASElLN^$fay?U_gL@g)&o-ul-6$k{}HI6arhwpz-+6ZbB@R z>e*G25B}7tq22Un!<%#IV^V%L#uj5gz6429`OQJnV|KJExZTUliO!=ZZ)V~s+z88W zzwGylsf5&!K?YRAJuNdt!z3-54uDS=saphinF;y!sUN<3Ce zDq2L+5FAAMj)ZbO#$KFhV^945%_-BU?JqfWgiG_0+TI^2`3rd2dg;N#z{iHs6zPk^ zf;i_)$RY41GgSv>5+2?vf}Hr5YywNl~fV4hv& z_-m=F zI!0JohAQ)vH$GE!((>Y`%oC_pG4Q3oekaXYLshq?((ktKr*rY%^wlZtJm1DS@ck6Z z+!CF^0H0p-lu4uaFl$Rwl;upgi}=W=X_fh>uyWaC0B&5sBH$#6XQscMOBW1m*IbPL zefApE0%I6D#-G4^HZs4;aklGt9yG2$o8^X{Mpt(`DMh;9I|i9fx?K>F3qYxI7=0Xv zU^ywGh+$gucEgbF9VT!o`no}I^Z|KT`N5;sJj@FjwLq`r#;p)h_LJT+_dl_w(gZt` zD$JX}MGP;6Up+@?oG>w4Gjo8#q@xX`@J`Zf_pq0*Y|-#gMV8kKN~rDpva$|>8abV~ z&)z$oPfV;^;t4CX$8aHxO|+9|M3%Moo-drhGgmAD)gFLvcj(%Z9z;{=XEA3NS>H&$ z%-h!z0@IgixewmL1T*aEfF)UBjt@}tzmwM$i_WTnRLx<_uM0T$Hi+%J_=1Z;)yc{q zhSh#0(GljPEA9xm)hugN^h}*T^V?^Hz)-0R8Z4lvP_@VmL}6V97pE zs(ZZ{xgEd$J>&690rz7u0IcKR^jk$b1A75m{E1v^W%66Cc{o+DrO)L-?@b_pcO|UN za%Jxp1}5WO%WB6skSN0WJnDLbE~aY@ijDUv1R5Pcd|r6oN?=XG=>G{VBmG_+ zy1%{Sa%cCM-Ukv^Xf=?_h3n0$@H94}OLy-Eg2VXymJ<#FpQWLF1IF{2lP3R3hnDc% z1dL-@KYG1!4YU&a-enSM@;w!fUTD+Br$%_@p;)4(Co@INw(bE9!dZ3_ljZt?U^d z)RsGR1rg7sLY4>&E<7P)*BLJ|zG+Z$kHE#I1oz;J>G@)(v?u$EP6x>^fNNO=RjIA) z$#C}uwYjjb8<|xYPeda2+*9ioId!$!(sdsVdV)figtGRSugW*v+G&Yg8%TZ$+WTRo zrjN2cPJ6W6*nKs%?=^Nz5$(n9&#u7}Qm`<9r9!nYe4Yf`18b3{N|>WNbm!33AtTNY zBb}Bi<=~^aSiQBBSDHsglc)l_)M_qeZE4C}t$>D}u@MDyn3cxmIh~6MT3Ryqp<~>H zC}|}_0?Nld1xM3ZbME`T6g7Ww^PE9K<{eQFi9!=$_f_J?XDVY#fdB-RCzg=O&cZQc z;E?}5vH$yn5Q*pVyplRwzJApJhP32Yy9Ys_MfHiVd^QK>CMiw8AulQ6f(jX3-W4x7 zM2iKx({$(dcV=P$9DR|zZf!`U&Xb3S;X)f5QRCW!=+cHZhsfEx-Ttq!F@?BZfv*MqjO$;Ox{IvZNpmO?&An!}jsoODgxUBS3uLBH8(@vNvV#l$8u149L3 zi4f68?@{umoRPw_wSUY#IUm4Z1q*nGp+Qg#PiODS5tD9lK% zu(+4_Vh~y3i|17-`~u6tac`i(?c76scm_7-KP!A11*dJymP?^T8bZwS(An zV^iWN`|_+2L1QcW=96Nxx>W*E`tugGfyE;F$R_=l=`SfO??K)wrOPkPSU1>!2o*Fa z&z3W{PjbHfZ#OzUo=KAkoKXY%pRuJl}kFVy7I#}58eKUTyk-4}g<-m@S@f4Q{FDyjdUg#Y4 zNqjbk_Eh$4uqO0w{qP#y9XRkLA;YX%061?r3LtH%6|&4^*htLSI}k+Nj#9h1@mTwn zvkExiay(<+@=R=HQbPW6bJt0d{rM5q=i`^z@5URR;Q4RSUC!mrm{#~*&#Ho3NCtzH zB$AiNt%(A8}g?bX(}8?M0&5T6&IkE_gJkXlWQ)ic4oD zVDl;iVdPtZ!`mXExni2uA?&VwTP{T+6S&h!Qa%k-fmkXNA*$nk=V^?&5}(~Ptl^FS z@^k_ezq5uvmi@lz?Cr_>Rk3Lob``&=X8OfSwC@4+#n&;PmE@n^-ot>;*#&?iN-HyaTkL$fB(S?M)|4gSY9G!YFZ(yOLH1+`;zh?Q2u-o zf}QjJ%Im%EhlLO`vbH$mrdje#bXd)B;*5ww`=Jwm9&)G&O>_sL%YAc#S@D=eCwTJ9 z#)6l!sU(Lwk{;HAI}Cwv5lYkOdDqAivG=_83h~7a!3O>HZfmqgqE`~FS&K7bEeiRnvmVX;JUhH#Dj4xA8*!$X!csT3+9Q0`*{*vjM^V-E7Zj?EWri zse&x*X~hiL$F8BY1?X)$%k~)%&Frs?CAu3f8RSE(V2G1n|r;>a#yzdl@ z>i9zz&{|}~$?Nf$McRE^dHQR4=xH=SOIht&Z4e%k1efy%F@=*4feBI_htvG zWhFOK_a=>HQyXbxw~{8Ip=z-JJl+Awok9`RmBSTw;XMBNQU!pADwMYnuY#2in|IjX z*5NK-84GlTIxF-kw4+U{z^Vcc-DY%JMw+Us56N9JFfNQIsJ(^0$-5HXA-Cj!yP)Ip zTw+G-HvZ}j%O-mNi>~|($Xpd(jeU+WzZc`(hMmzNOumQw3MK9!%%~C zr!eHu3`lqP`Th6a&pG?KuJevJGcV?1t@Vxj{@elI+snV1C7-^}|8)%>Zk~0K#CI>_ zf%cB3+u~Zby8EvQ*S-sGp00{FpV3y0Ps!yORcp#GsUGXu&eh`>iEQ(krQeC9TINvG zd~Q9MsKQ^&IfcvjX^k3s^yd4=0yhtRc@``4M3&-?d@%8YkCb)jAlC3vIq8!Xaf!|Z;;IWS;6n1!5!(21}|$e6ZO6+^ZY{slg0Y`3X< zfqMErYp?uMT@qpPEyr(9_Jpa?QQ@7wYideKXJUi*pT)Rxi#o$U4e;cx1UO{!zv(ax zwEdWMwkAyRadbv(U#);fQ;{xgv1mld5#hcD?5yam{!4g-gsqmS9`1R^nZ7#^YAt4S*f2&QuTU@koas_O)(@Umh1DT{=tO zQY^tQ;!C}8bxP*~v&e5MAFlix2`hW|)L;m*Yr%;NsQ-IR-lU#0m3@3Q>mG=N7Wf*=Qsm?>)pyjEV8GbXk*2elcKG9t z0Ptpjiyxk4{{u;gArNZo|0c&uc=3ALX)@r%QY0KlDerm#7}Qfgr$k`Su2k{P6DG+Z z;&=8UUQ*@*oJ61!1V|zOf@L z_6UkeMB5Xb$s{g?!IJ-mn!CRP6o=iwv0)<`9ceEre-UaHVgo7#cXfVGKf0_F6tD87 zXlVfpW-apJj?!*+xrYBO&XSmkv_AJ-E?THLT8kCb2OP>Ib^RCgvokT_QAz>C-QHRD<7 z3#D%^IS4yLhw7keI)VNfoo_D>zODJbJ{K$~kyR4&tESnEp~Y6p=ncaO`4DbB;4(bW z87U$!EY|vX13u_dRG^^+g?}k_b}_@|;wbzxs^Xcnx;9UeQ9ZV$B3mFD3{kEQfQ zyg6g2pnhvag{nK*>U=ZF$7MkZoz-&r$p%9=^*T=%GxCw)inguBU&3SRLFU!5fUUTy zRe%ys&9~m5L5@~#zEJ%YQfkU(CG$9WL%tvK=N&~kYpPpql?wIB$`63@!gpfPif@^1 z5o5i9O5Lp79hDhFkCu%^Ri+3`Z6nCJ#@oA3=k|wql-(V^?tFKS%(N(xEyxocQeF}Q zL?#Ja6$$j)YCG{eFm}!-3-*`sRK;f#qG|Z)WY4B+7z`}aa2iLzFBa74x1%uptt}wR zGPyf)Au3e32aocW#Rt&9o%;=>rO{R(VWSUj&LSX&p!mqVcwS$UV%dDKCWh$l3{Qv} zv@#x5H!~g+P?Y6%>pqaU%3I%aohuk4?Le)uKwE1?4R=>z;MEGGSfP^dlb;b#=@PX? zmQ7%c@9r(Zx~}c)OL?MpXg(J-jW!S#tjJDYDJW)d%n%ot6} z^Vyw^v6{X9z&S`i{+=V#KL+-H#1}nn%Mb7JNepDD(MSGKCHkCA>iS+EJls2J3gtXp zSH#`FtS%otr#;L#oY!lo9;F&xfB_tfx(nHJw|^0A?({KWl9d+N;$+XYq~e>Xa;!$v zJOonD)946|dvrY7({{ftq}$g?Lz^%PXQc%O^=?@Mk%aBFO9p_0-MnPK77%i@)yhb7 zj-df1Zxt!vM{@nG*X%Fe!7?a&aJh0wKYUvG<1n!(d;Phv-!|mupsc8-Sjn4@f5_I0 zSgmSE?|2mH@3+3L1WUjPJq)8JxCij3u1!jPyD@GJ!6f8Q>C!Swr1x%DU(dsfDL`dT z8mZ?Ki$vnyru1226QNG@S#gwc+-)CNC{~%$TAWJ_{|41CzVF(w}O2S} z#*>2qR)a5QhnYv+ou9o6>bD|bXqL)o%8|yW23Y`^W{b}E7C~mUQ zayz;5>m=Uf>5IWr`ZWKGuLd7?ixSSXS4&GzVtfv|GCqlnh6=xJ6J(_HPXAn4BdH0~ zhCAomG%*+02T4!Qw+pG0idSYhsqiHg>uSpn3aWfY@oNzSQcAjpbd}krb z4$dRqtUfofUDm?x7NJ*Q(E~HaLbmhO#?v+o4eH04q#d3>kd#u>eXbbkR~ zjeWjpS;{4Ij-6Mx%`0arE+$}lV0A#r;5EuwU}nat5vC|;zl3vClns< zVA^T7mv1MVXV$KJB^VH>m>H;q>=P`2Gv5U!FVPu6&~fBqjy0V1mZivnwb_&I*vx8^ zJ36s&8_iI4H$aQ67!fm)DRl5Qj|Te8v9uK2ktw_UY)2Ag4ad zb0<~0_8>;5RldO+_SO2M?L<2Onj|f6^s7p1s4omY-z3Q9q>!g z$kfwj6O$FG#V=3A>i4^g%L<$yF3Ll`{99&q>yDeL(t5RLjcJ-rzInU!aO1DM5VG4| zWjMBcW-uDgsf7}OXMKSFH>bvkvXIc`y{R-TP+Hl|D2KTk^K&^Hff8I)ttN<{hA_xR zhtMPcEVvdRBQVL`^u@-E+MA`9;RNw}SMS52r>SegW#i^`w5^sLlA5!)7D9RGC~%U! z^|BZBW5lZtegd@}*8*R=Htj}ch(m3Sp>+_qRpk@^M0iKl8~>^3^pMRWb3pV)?RWfS zB>e-2#=hhiVW0ICVLAF3R7UT!&IK7do)8MR0$%sTEqA5$oe|S|g`nCeWj|oz+!*Y~ zjP0vu0Boi57aK902I1^%B^c{Yv3wdtGQVI59i;hwv^D~;i%eLIg{#rH=?>c23n);N zh!2NI{jtVhkoD6S&7VI%Hx`#^5M)>qf8)N=7=X0@#aldUZySB;>bdoibP5(n4ST8W_@4b<148HqPpVDIffa?YP`ZlWoA4j`s%ReH04-W)svBwKON+70%sq>1=yMg{DpcGSKBRKa21 z6#af&<=w#msG+d_QHngM%;xI_mN8+2lm71@K@4ugt>$j`h7T>OMh1GJ(FAOS?z*dA zl@1KlLgDK*Hz^aScFWg9?B9?2iYe6D=_!nor&Dz6fXiSQ5-|78w?Q=-p7P8I&^Q{~ zm)l0)F57q$1NI0l%DEqP;&~p1CFp&zqXW9B9vYG?a$<~w*+0IO=QWJJ z^ZY0&Hc+c|wSS*%uZ#}Z1;feZ$5OM2=W*eB7uI-N`YW9aZc)nqp{w{S1lvI(%?$ZC zR|H9u^rIYqYSoD4#UHcB`G$6-ywDQ^M*&9pac58HZ&$pJ>-7|(E@a~cs$YHG)b)l{ zvUmLkzBt>9w=?6H-YG;ZP;8W(EtT4qmhmpy#G7-2vSNKqtR3D;v!pIv5+92R(%P9H zJk8xowHM4$Cg-?u;p1^AjNozH%~aZ;4eFN-w-GIZ-~K+N7sEg&y{!Ml{{j$|Id04C zy+~i(k8o3uttUP9%^!lEI3!s2`lZms}juox<^zr$ZqY~XJ z?{1w^iKEAh_w_O@wH1CbuHgDlC&5x3t-hDU_j_~PtKT194*Hy3KyEwuF56rELx7l^ zhn4<2!>a2Q|7suGWxu=W6P7B-2I{gM*JO1NtdlaV6fwxo-{GcWL;l%%53O=YOAZ*{BPF{SJaWrql{=d6*KDC{j z{bxCXe=@BS2<2$-5Rz2SFn`Vl+Ey=<*QT?3o6HU*5(%nq8RY82*H)#_d?vX33W0Ft1XyD6GZs7!3()%SOmpSaj(v=vTDEg}smO4Zu)> z$xkg5f6-W!f|(}|Ig#q4&DK#tlvbXm2;_%YFU>5(ZVta)8k_MG^8iGCbZ88f%axVGx6G$%c< ztAzQ@(HB1_hwBfPB@kkaS5`1#-Wc*ZbX{OMqzO-Dh4UFPrH^=xu2Q>0CFF7x;_I5x zK^z4ip!>DdNkL`wD&QNr4QhE)tA?2drKGBls<07VA!IcUwmYc8zCEwR@Mtq#P`vsJ z4n4CH=~$7+LzIH7TWP=FlkzR_`xgHT4b0UsCQm;Z@)6)V8ESs5Oe`^}Uu>JtvXKaY zE7meTW#lp{(|zv7QLAarCo7DM9UACupQ7En?@#~imgH_7mZMxQD>}NF^YRqxN*@mB zz#`Jc6@C~b-@Ht`76CW0WZEiaS_I=v<;^XmM*0qiq+AJ$mY^wG4|-Tp$2Q3F9apxp zDwfaM&ss6p=@bmWGJa;tZAF-8*5%u9C*m|cTj(Sv*6is7-`Xf8EDeYJP@pb^2}>c^ z^vz?4ctyK=KvY5Rs)uSWwF{tABT8xVBw#Xl z=+yNKuU@<@C301CY2#n(b4oh{b*|0$_Qd@O!zJo`?3wNWt);=X{ip)Y)0d{sbd2qc z5w%B<|0Gmwg9O#`Mr3>nsI^FW_tw)@uC9Re*e8F$%SDE2hXKF)t)PNJ#g;e_XN?0Yj^Tk!HS<$hU721r$}gIL z@N#$3Rj+c7>NdIs{@&*dTM{CZyyMz9>-ccsG8*=~VdLNw6AH?figb@Na)thgn!%xe&UY zU-V5Va}?SwL$I-{-8j+YoKI>8BCYW?aWDiJnf_1|vc96m0`JOXmTXf??|I@+IARt1 zzJ8M5zfs3i#*j;hWt(j*UpIIvBOGcIJlUGbZc5E`<3KPenPT2Vt;T1XF7F{d3*C&W zBA?Bo+_^mO0RyYz*RGp!oFG&6%83f~sE z;AQY=AnN872*XRn`MsfZfjNM}eup22%8^u^nnzpA*C3EuQ=HN|{D+W~hFRzKr=Xw0 z_NFU$pYzO_0+CbOFLzBnM|-qrQxif-Pw-tlo#^Q2LVd`P{U{E z4R;IE9OUHob+d~jtFaXPX+@TPh^`!WX8akg;4}J_d5Mx@mrs6@=XJk6ft8yD&2kJK zj9B5GJ*FSrn+4y^%(nI5<$jsc_{KY?@Tf>M&gerKaxyh&VYmo?I(B5Fd#kY$42D`_nMK`Ce*4W4q zs5EqPrZ$xF*rBntB=6DKa+)mI&h2QdUq^MXHsjT@+~hq+zC9T-yRIID zG^+GQ^*rzQ60crh$$M#~u z4_RIveq(-he#;piB|1qZv&4cv|6S)0RqD_#phDRZp|Y&5usrB>s=cE9B$+iGy;p(dGdYPhH0C9&jXKT6M2Lil{W6*cLK`={pGvH9!@mXoDS zy;1He>~5Mp+=ASvG^zO!#9>WN^D^TmkjwVpGlC_BL;sg6L%PWZw*IkzE%D)4V&4BV zAKP(B<*C=8QV)Y<8-dv07fh{G*i7zkix#2QTPrdtwgf4M4H?mo9y{**qLG5iR=1rN$YmyEHTb8bTa>op=R899!RI zr8?J8dnwAZV3=Q_;FHobOo0-d-DnJf_sPH3Qm=EUd*4~cbncw+9M?VA3}{8T)#pSRfBji1g74UJo+As@3qW%_`Mi}>MtjX_)HyV>ZNAYRIrgs zy#3};V)%(o!Ps$|IC`jL?6HJyZ4V@uaZEQ5S)IXC$`R3XJq`h)JcI^bk|VbCgtO$g z%U%qbhARc-A_xW!+nTJze;1H)XMYS5;x2D9yeMslUsUvpU6dhM&+3}}Htq>2j`+sz zb`H;fz-9sWd?&GQpcNgxEnGs@e?c*jc$0I6 zEQV!Kqkq@-c@dj;L;AYx|$pSn>TK2c!F5C+G!TH|Re#pQn z`(4|CuP#dM>Ept{f**_-7-o|%*7R)D8o&B?{xie^`Ye5E=HHNkXS%grS2mr2=~(jKAXPk#2vcyjRik(p-CbJJBDNGt<(9PhChwHV1FN}*ks;51GywbSMw zpO$bz)m!(>0Q=GYS4XZ2+VRmHHDP#y+Rn1jLXHS7ZkDwx(HDaVbdkkaWMHlIU-BC< z;F@BKl54EqQv%_>Y^Y>jeRbLA@|1P}CDg3vo?H;Zl3w{I@+Wyh5BP{TDhF;rAc|9Ezl887k{2tcgeW4wI z1UXkvFG~IkW1D(ZPj<`DPxj_bx|FUb(#h|LISaDwcdA7M5Nlh#9}}GWQ9hjEw%kbg zL2T&1QbPaQ;FPgw@q;phR4MtS_MYil{qgvG*ZqI&#^ zUFa4}Q=GFp##Ty+F94ZhGL!FZ{Aj|8xstH}WMB^DC2ajrN!_m8DtzoAQzah1aj7KJ z)NAolDLdlnh6|o2ZJsHVP6}uzicXk@9dgkp-WZMLN)Ll1sM__CNn>G5fM~c*i;0~d zXh?(T+_!T1?JJbEi3_O$pv$ia!vSskYPErQoiCBGc*|3#48y?s1N~?xsY>;;Ok5$< z9LTW)4zhpZ&?WX77Im`d)rLPcrPpRqA=+;&_WSMGby_#%q~mq7yDj!&qqV?dk~`u` z;kUbOlIWksW>eumElh*N$-Xy%u*=2H??=JC^{C%0o~yWV4xJYavq1-v&HZThxtc#B=E_@%ZH#$Xg~f~;hw0}BX7fTml=!+0FTq&cx5R(?cnZ34(?U; z=oB9?0M{IAhp6>hRe!Jwj4Db>H!g}F-aPfPqJK_nWcKSioi#1Q5r4jk-5m)CSYpda z++FDs&ghaIXp^?oy;LcuSv#7p)~kJhMMP<}v=C;4i=DL8thGLEQj0tDZgbp?@7LXFBgz+Zlx$0Nl@# zlH#gGuU`VlMu#Rwv&y6$9)u?pjPzyf58IRYc&^DNJ0 z<#eetKQi5G0%(Lm>r4-(BotoKl2yd5;2mJz6rd_XumE_TW*dyx;><;i+^Sk@8!>P6 zd(fi3CF}}58XGZ(u?CFJtfUHNC_mbtm{2y%v2rirB}`Pt+5BYA#wO-oy`K)?mHG2z z9w9U^d!TZMA?o;;d2+m(JulwHvV`&sE1!4IJ?d`JRR8NFLFCClu#_rDm7?#`@|o*) z?I1MnzY##Xgn5G-%tzDWIEioo`3V$a9(z{wE!-s#vop;ObanMZeFJJ2z zqfhun*N=qw}%RV3U7z z_PpmNoAe%gv*$Dyht%D|J#HL#YcrH6_dgv66V8L;eIup-Hh8IK7^+&kD9) z*cVgXO8rw)nB1>R9fBSXN7K)3GcU3quAT!Kzz=bLR`62o>8Mu0oF0`}tXJDAG*}Zr z-$h2fa{CYljxKCywm1-5Or{)%4qW1Dls*?lyZEa+~qz|I#= zjY{UZ-{}_qdzb>my&q1gm{88_(!+}oV|0YY18xBS!0Tj8Gi1flI{AKWAmJm3FTb33 z;ZEpTelgzo+K-eG`uymzl{eD_J4L^|g!?dCF-Zzk)(!ZMp8{fHNE@w8uC%ArKGNhSdqy?W{^9^uG-L3sINPaIXtb`Cn`%?1;3{x+2udUGXbz`*caF-ur*}>{y&mf1L%>dm zZjS?MM$Xe!e4m}Kn{O)6$1UzX>F0CecfI56leTnxE{-Ym>q|}OoOcfQeM2BShrb8E zA1;#49d#C5tg?*Kb;MAN>V- zp5wIc%bs?XP&>*m?9QKM&|`;&yH`X+gvZl@^OC2-#j0JLJt&Aq2Ke)9)_uJNiQN0F zT|dJb+warTX2ckc5}zN`xcmh45n|EK00CC?kIE41gaUSma`>FZZ@yPoEr3?ZOH|_L zK)Z|shN+LATynwYxCWXckJVnTZQ8cz`k74HK6s=f>#x%8Cu`*_#b}6%=R|?IC+W`b ze%?O8*@cstj2GBWy^p#?KWOdV_Ivdrp?@(9DwL*1r{`NZ^)xS8c0{-|mCx4h^=R$j zjUZs4_-uLv$e?i`Yx`n#vQBNWazB#zEVG)=TGivJ4q4A;bu)zsbAg4)=0F}P^chrO zldHl;hw5xIhT8dVK?Ce;W+uSpri@3$+tBH5SA1S{G!J(1Jtm$lyiNQl5_raTMi%Q|jC=gchQnB{t+H)y$dN zu_(_2S5r#FvOu+jbWbIZE4}GY7?_Wwf|qew$nj-RPuw`sPr#wa_=#j^94fz?=<+th zE}%AMi>NPQj~pVFH<8`@(%;0Qm1bSfAh($;AJ%*GgRvdty6)5sz<&N$_d#xQ$FkVq zj2G?gY1$pOpjZ6~bS`=1N|4cnqxS7`cr>eh@yu|n#!g75?EeW0ay$Q~Je*R;I?h?u z%s>CQyVG?=evg`jSA7XhsB82|vVV}DU3LQg+v-2{i{`t~cXk(W&Hem#PrASKGTZ1f zuqw9V9(zf+7Wc!H^X!6N%U`mrdrzZ3{~|rav?eS+^tDptocnWeUDvo4E<}}+`zJ^% zpHHLNh1ljweZ>B!){sfHBAAdw4>XWcs=>C?#mJ)hN{8q6>q8j>mipaRVj}t&FbCD| zl}E(yc7nbu_ZSfk%N*_OVG*`D4Z;N|;^GBk*9(mGb{5I*5Zln?9eYMi`y97R&YOfGJX@7akl<51?>J%)!L|$mpBlS%< zKP2ujf<~$zPs{R5lk62`;=&QZwN<t9qdpSaxD|2tr*0qo$T6t+b!d0ACZW4tcMQ zXHQZoOeZTKbC^na%qSLDuy%FJ*9fxG^{Lsz5qq%#OxWT+DjI6zwD_CCDZ)6#s~A4L zJnTJHyvS?{cgj?nUgZ>>TFDpd$ zSOsd|N~U;Oubok)Wo>hH^k2yXI>iW-t2{7&`D9ECs1ue?#Tk{4A!0J$u#;g{DCrN$ zErK})u`Pd_?}qhf<`N?%2&rZeuMj0Skl=JM3*#;OrM;*|&>vb-b`4p>~AHkDU`xeDB8 za2%Ev#lg=0{$kF`{MgB&`0IgLM1mezziPtd z|LiaV<1V5QBU>3gAe=$lK$cTwg$f}lG?=Inudu03mM0xHcy1B?sY>aQl}<{}CvR8g zSoaiE`J^($VvbhaH-~0Pybg;yLoX^tei%m2Srz5_X;Z6(CR(}+R-%%9$l;8O(@2y<&O#;V5r}YI& zLSN_y5)K4AoWXTgyT3_5?3QT7n|@=1?CraLgSVYW2kaaSauDR&4F%J=QW^dQ#N>Ig+SvWNiD_dqd~VSCxvG_wz?+7z z3ZCWI4s=@Y+I2)Ii?=OA6hHg1kxU9Sv~b7-ZsLoC4?w;u3+A=?X#0m&Ak7&O znM7rky8yc|)H<&869OBI3Z>Yy9~j+KgX_}SDIi&B#B7?j=QZ4sRiv+I$abwHgl6gK z>^`{W{Yj$^Nq*{-_f9$|+T(!(R2jgZpQG@_u6|_p$Pciq_BfU+Z6MTBC}PSdOo#FY zReIc2a$DcPn6IE!BWC)4`0ofQ1`ZOG<@Cg$CwK>T9h;(-QH3nh3$xkt!sz&^XtKBJHP%YS}OA#j|JwR@*)s< zDPaJ(deN`)+@;k*s{GNE+Y-*Z^Mhh(<(}bqx*X z^DNRPWwhw(mYlp?~EYVZ$ViSQ|4_j z*WZx6j`uzv=)ek?!3Q4~V-6&m=nf=clu}>zdHk+BTyA%NXAqdC z9M+_$t{fPPg|~X6Fz#3OypQ4zBm?}f&t-FQfj{TtTK!MBZdYLhmseoxqZ8fH)dKF! z!~^ew)6ak!A+%5j*&N8_b{ zaJdwr0dU$yU%r+2i)@ocK= zSSX**5eEr+l}VQ|m5loKB=(kRv(9KFgq(dgo=WWc1dG@x(H$X#*OJ6k5$u*?neZ!m z=Wzk-HeyHQud3HwYVz@K^W9V?<$2`CNIA!h@#p^j!paeocp6x>M;V%w7N8HZW{T|}CY4pWJk<2%QffBTL|1BeTgaGP?J{DjDJ*?=e#myvjv)k}|G zIJW6-s#Z@atV7KoC6NX)&1yA_7^cz}w|?``&vFNS=MVs~c8HY6K?#Ig-}!|P1Dwfd z5;wl2+Lw@YQI*-B&n%nN;r{@Oa^=R3Jp(HANqJq6ke9Z~Lbw)9VlU1{&>oI$o~%` z`g#_9?J! zgQID*iTk%X=j#VYxCCl9;}{eFWL~vu<|w3|gJ+ulnRAvu3X!WDJnttWt0tJVp8z2& zZeFnG01s^SBD-|Dy?>>jgkEqvHPa5?GEyy~ruiPn*A8Q49vG0zf{{GkPjf%|P(KWb~ z*Va!AIDc|K>g+f^;Ug)sA+sZpbEHFqhE zqT)QV8jFKHq9vaR)F8vO(Y zs5UmQp3&2i7t0^Dp9U{BzrrlF49jJQ63O^0S2(Jk9(X<)a8hBK%j85!)>N!Sz?+Z* z!39)w(rE*G<{5k*q{GL~f2Q5OllS>iL`B`@|Ij*5S`Zn;To+QoZp>waGrd}SK0rT3jbTe(;3rx z{7>)uG3WeImcAG!YZ>KHjg+3%9d{V84_r97$16}>ic2x^qj>;=@BM*Oe2I3@aw9pC z>8&OffV>%jt#UH}GgaW}^)f!9Mw_YcmoHkPE_0BwgwnLc>42nD;> zVB2}=C1$J4oS%?{GaxIZIU5vXpd4a9`GX6e7*A90q2D{Pj99<=Xr5!-m6SU zC0%yH^r+)_TOb29X>@^yubswK6SGp78>3`kwX#YOdoKNTB^XFz80l+2pR%_nb+8@o zI!~Jw=BdYHq10XJC>9lO_(iC?yVvEaNV&RRGYI<&)VTGlH>&&HplpdC1_V?AqL6Bp zm;L(-k(Boyt>00P{O!Vb&y|Cv7Rraieay>VPlhfjznjK^dXg+QQU4-wiofa;EL^hWnm zu3X73K=w^3vjEfSpTNdrzpSC`I7-WG6LDm4N*s4jbnlnLVa5f>{h^PK`O?QwaGcZk zNJT-|a-aZV!*?D)O&Zv_nYnza{qTYf{nple{A# zzY|JaSDh+J_o0TwhYVxv$HaGrqN27r0RF0az8=5(t)teXGtDEv2CG1ngHFPlZvla3 zhodw&HlaNwlLtg^4!@;@HO)P?o+~2q;9!y~X=^P1{Bg7V4^gJ)g;vSjF1p82t>Cj9 zMr8}(@tBb;5J!m+HL+fkcQsY}6}G*cn&VHeI!e|)6lC8%zN%EE2U!g{rJz}OT9-qU4B+&s(jJr@|KxMEtqU+ceLRu6f8~9< za&afUcL8km{G*nDn9A0>t^nIs;OZoJvW;nbZW`}P7$#fX9?-`YYS#l^sFS}lEoS)1 zs`Ty5a1wW1@(Qi>eoa@1S^xfh!}*V5hH_tiymH_90?E`KKt|rQ!SS`KX2E)U+x*qZ z+DSXg(ka6cyO6i+Vjv$&7XwCYazh>Fzen^qX^SU~s1-#NQ|)m@BPnMD&NA22wN)}P z`UOdee0#=23l>=fVy2T(B2tpBf+`Y3EM;oYF9;VdL=;L-0ucs#Zw9!ewj*1t*>Tz&J^>o}ibFd|j z0#5Ev`B0_nq?zRIL3yO0#e44wwr^Ncp(Re=-WD5c^ z7Ohizsp)lx-1-GU*>f+7w`$a4RAlPM{-Z!*f5COqvEnFRQ$eb_p8-`AtXq{4>47Id zIZd<+4nnx%mXH-#ye~ikVcs)K>3O!Wl zdSW6*VtzBM=HADl8#W0_SIBIEdJDgZ;ExQ+9cNhfN;FEY-+#hjsT6ul8`ZqybbRK& zYik%0?{xdU%!dU9WpiZrW`=w+?76#aoBBm9TY)jh7lT1=5@Jigf4`Tukfj7Hv(nF@2cZG&{ z!_8IL<29x9T{o7mTGE3KF;GRE59{_CbU%naZnH&> z@y#uudwnk37!)vOGdHB%j;gcacKd~8YOB+b5pH825B{R*MN+4gZzIYDD`VECd1GJp zz9)Y`&i<9E7T&!Na_4Y6Sfd(*DUD>&MYm;mz|Wp}|JSGpbUgu@lH`9m_qq+Kc=z>=YWZVLpXA#- zoPEz$Ni`!8E03J?(t~U1Jv+5MYH8(>hXAhZR-pi7e1$KeXViY>=19dt;B(LUKr_$9 z$g$i=+5N6WGg*G7iMQC6nXs{l3FHUV1dL3|L7e0jaJJ(w`mr2=siCu{G?2jIamT3E zck+26U1^Ix_N_-|gz$z-1ciLHch0LGuhLTM3?I+09^4$Kt_ofeB~$Bm3E>a3{phKK z;b%in+^j=$zsItrjU-(0ZM8*SbY$@RV}rrPBvoZhn7E<@E-m#GAp5ky0uW#T3emc7 z=&5H|fF4NsJ~{A3@htkE<1PB{zw)~rM&Y;moeqt_QqsPbZ^+ zp4pS?OXlG&{jHMm!!GQ@Wj{NmeU~NoashXC%0X6VoTY@6&ppZ&VkgpnjQ0 zAMOS^0vcjk@#Y`op4=7PR{#ZsuT=>glDWk;$!eC40UIF+RW2>)A^HyA=Bs67h8vS~ zofhV?wP7GWTJC1X?=x?_9Z~H2seVYuzTtn$qm%?T8~7u(B1efje4Z)^xKP$*$=yr{$9~Mtwvs)&n)wY5o>Jvo^NN5lu>UY^o7d7K^609qc~R z$4>wIF4ZGyM0D1ADv*(fuj!kWnb3DCem${7D?38Lyds$_dIkiKs(WIg7!-;NtiD=B zOnXl?mVL?l_VaZ9-C}c8gxiPuNw;0Qx4p5~{Fs&fjR+hD@^>?Oj{NaC!45_)8Tkh! zb-c=M^5_fpW9gq7U(RXN@MLn;nT8r0*&ch7j49`9-nVG1*N}}6bcnOf+9XPw3if^- z5MeRS53y0&AsbFwGEK?Q6s3h4IRB{TY#P@i7^=|wS%Q6!mag>LK1vxo8{M6;d#N>@1r zn&vQte%%A1fpeFPp!j&{JCtZC^^Da(GhG2MUVQ1FRq(5km;WR_&efj=r@8?fwldXHwc zd5>my+$@Ph8T?Kj`(57JHcP!d=(oPUA~Q(_ZhrZyn+6@jx2vzZ!&2e7EtbY}xbUaT`O^vs4i zDJr?oJQecA0$Gf#8CUompXSG{H8Nl*=Hm^*q?MC*+HD(nmn_%R8i-QBvgM#Sb=eh` z4Su=u$j$z|giUb@s&bV2vH%r%;D~{+e{4o)){rTXyU>w5Ah+xIcA6Pe(efdNC>vYU zvUuYk**(_#wpwMlXB3CgyZY&$pTcc)gU2`%2dll>-{t#wB|Ya09~!W}*B zc;>~l4d+f0wz4MAyVPV!-xuRRELBo~I>#6##W#R6cz z@)VSWs2)lo1nx_s=HJLYp5ZIxw87IQl5 zhFQ5%^WKp%@r%;$1p^7wr4Grx2H^>&vRF-nUtV`q@2YsR=+tR z0u(7aZm{LX6_M4(RS{GpTnC8KI``|#`nUcaES}CTe1zLv&pCO1zXwiRZQNc!Z5+-vs{N}a&LEmSuv30tCxKQTi6b_R^G4uJ zYhX3*xV-?Rv>Y^v)6Q?GpM#Wcm;y~`VSco$1G0Q10|cKIs=HMX!aLt$(s5E-yOdV_ zMOvhZ4!8{*K6hre(d->pQ=;J|<9aczu0%kKlkLK|A{E#MMW`QsE4r8=c4m-JO354S z*KbjLiW2$K5Y%`GP<5o-X*B>1OYF1-*|lgc#~=MnO(k>(3*649+<}t zlG(%?pNCl^4^Cd<~N7?<~K)~ETE#6yAa+xqJoL(xMgou z(l$C>w2RIS)fGi7Fn1Pg^ReG?(2(~&)}sG4kKf_h-Q^+*ez>6Mu`C91>hxq-j6L9G z(*FDVV$HbuVZyIjQQ{q4+XekxG%!Jrtlysv(H$-AaK-gLyqxlwuO9~tMq*_jcZyiw z?r=DRGO3Oe3jK&CP+ua_LU5WbL7#qoAlJoryo}sQY=3kVihM6$39Dju|Gq3 zrHfApKac|HFp#v@oRF_WuFGd_V43njTZ+QJ5_V59dWd9aLWm<=XhX?1c>B&WLP%O! z3klsPCr0;crDy#TI~Y7}EJO=n6x0Q{1~ot|D*>E_Zz4%vt;?`#@n|CE+BU@Qf*ag+H%P;|AO=2j?GSp?z_+_Rt6vNy zj2&U?YEQeWmTH_qyiH9G9Mp&O*Them2>kuK?hp+re(6cJQFz56|Po$tJ7-OInQ z7HekhXFu^P(}R_}Lgc~ibTGe-hu987NrZnVx!B8rmFWqQmP*6+=P-=|>dtW|+xiM1 zc)n15hJV}Ze57%X%=tCJ&iW%YW^c>g_A-Y}8~aRmhtO7aDeP+8v-?sIB7}k2mbzMe z6;TVcyOayyMJFGly%##z(u;dvN4vr?#jk? zp=9F!nu-B;XBjTQhN&}RQyK`P3o3+{8GGAcV`iq}MRPE2s7pB&{Q*E=OrUS7wREO? z-q`*6)puw1{^0)4ooWEL&HbXXO-EIf>Kqo9FrZ{@I%VbE|An8nz*g=W1vAI@D5E>Y z_IS^Nvw9cBuzSjSP}qt#VJ!ZxclDQYt}%kC0b$^O}p?m_h%)s z$Kj3DfWHIt^TAHcw;$gTzO~s*F#ozy8G7jLba(e>>uMre;p(>P{@eCj){&oszK^~t z>9hs{F(aa-h0Bq1kN ziV>qx&?Xtk8Jl=1_=EolnFi4+RRd|%WJ1NaZrK$MW^KUa_0h8J1jlDTiW%&ZA7OK8 z*Aws`Y=8`4rE|e+b($#7f;Lq|2;na=o|%!t<`qsKWi0fJM#}*gg>(3-={>TNO&?7j z3xqjquPp+hpbVkTu7fhXva6Pr$OVEK5#gp0TjEp(*t5EcH2{gq*FK&o^ms_4u4XQU z2Gkt7H=NvVO5sAno${#Tdh4S$=Q{LSltwz2%b}Z{(PFZxVskEv|A}+)p0OB{OVxTa z;Ckm)(am`Zw{;N;qk7s_#9Ks0maapVp|z?@F^rAiWJYMQd(!|fm=W6lz;@M<8N`rE@~9QiMmE(~#(1=F%1I9uP9E=m zqnETnX^k>zbIt(yA(qizcU)e<84E255fCuLPRa}!?TS^Tl~Xq6Vj+j{n>FF3(tPr> z)B?bwXoY5GH1vfD4%(dC(Lh5Wu-j$d$DG_qw0`V&&P~rNTgl7MijH6DLsfc1<(4hGCTI%FaQBx(~a(v@O?m&0(QZl7*Px?|N zB$4QIoAfo-^|T>ElrEy`R?ZkDMtND0$RYz(2ItxrW{Op^051L>8 z+2r1j3(|hOUK5%A!2z&4LLTz#Ym8Y(gIGQ#KN0boxYo(tYinlisqMzB?LXEP9KT9) zWP(1iF}rs?a18Rx#aupJZtLZtJ`<#DLkO_|37f5aPq1w1hc{D^>@yHX?Sp$c_PKD1 zy^U3ruMnIkEiS!}DBdHB=!pD3)?;-Q4tP{GlD>V#)@9)B90xWNM)ZyIO+uFD z73rJ;(X|2~kK~)>y_?Paaxr~oSCDnJLF%>y;p?u|YI{GkHzs9Gvd@M$07)f>wuL>Y z4o)v$OyEiIg#h@tGoeoS`FC=S7`!e*^YenEy}u7-Kij4y6AMaTTwiJ-!&{_0-buwm z{RmLb4+s76uC1S#)XcihDN%-~*7~G5dl~J-l=1HKdcaLBrOBLDtN{Dwo$` zlZ&etw1`LIdmeKti+43J2-NC!00+?klL_-drPkk*Zyc?g7O-aiyc%0W!Z`wbLR4DA zuR`z~7W&|!eek2&MdS<~iQM{UN7d=@Ic=|AW!>pQzn9En@6c9xsZN7cGFsUUjzq#* z$J&5GByGZ}q=pcSkr&9WTs)HRz>sX@G=#_&EKe`oDW}t6plD1ADo$OiXk79B{~~x^ zYpAVJLI_{qpB#Pvcl|h=ZF1#bORz|=QLwl7Sb^*R?S80Kep^EVzGns$oieJ;3nelH zbsNztWe=wer*@b6<*tnp39dQRmGuYr&wc?UYx8*M6O2V~&2|xMTj~~*cH@<|`1K2F zPjtHgPwTGzfGY5uh((rP?OmsxIUH%1475z!q;L^wVsMeXWB!PUI(cp8=nmJ{62bv( z^3q4~XYNaUmaZ#c5WRzJ_5`VTBz?uEeJeuja$bRI&=Q4p@~i`a&EPF%Ab-3bkwLSs zNSY`@nQq*cFh`iO3r+t0i2gsYw&)w)Q2-3 z8>$jJlk|=c9=@TLBu2vZBUj@&l>)^XCyCsvvdB&bw}X4Pp5mtckp}IL9lks)pSEM& zj9P|1=LPL67&DFiEK^SUS*Dc83KZsuR4h!i_*tDRu7?UD%kaye^BTWjzZ+@4 z1-<>;e1}`aHtiSUNxhkX*FvQsvui77P09YX*vhlz65>%B}T7`T-GfcVtTWr!!9d zvCrPaVf&!;_!3@mqoGNp=9K>m)~eC;dMn~%uSk{hW^8Ou&*B^c`{mz$LF97=h3Mr~ zM2pn#%R$%Cn;H@(=Leaoh!%mwoVMf%c#0T(x9V)dPc7SgmyZX9b60`taZEuC{n7AU zi(20VUxsABPKjDVTP{WI$AP$RMo0mQx59LymVMTc_@Uzqoo#U8tz}sYTS~_Hw-iNO zC=P7$AUJ=1CNx-4M#h2dLBSF8MT}?sR=!M;hi{uD;``qnz)35W3IhEp<2G}ZNU-H! zSYPht#_I00>7JynZ`hsVH!f0r=KSax?YKSlSsV4DIGu|WZN{3Xlzns7M5mGA1r>QD zgGd*czTf`0S38e&YsDkVx&mL1|F(Ib_-yg*AA-8c^55g4yU#h))mI-sg1+_tA0sB) z|3uYEs-}uva1wuP$6q;D>l|iyV7AVX0f%FxwhMU>7Xv3Y(edjl0JR-$jO={u4Mrd0 zADhQH|G@?r6@f#ATJO8`Z+~0)%-yaxwKMbPxOf7WA!3Vu0hr*#9dIV=A^zsJb@r@r>XH^b>o!=7gkk!V^(xVdn(}$;{1` z>zmnWh9vdt7G)gz6WSr~YUD_)i%@Oh+6b7rwaje*)y~+Sz{htXG%N}|1Pf)>@oSe` zaR3lodxL(|8i@5(CFm(c?X|leVfbeWvz^-!%ZKwrfKJsIbaPTGYMbq{uyb#x5*Ng> zo|?DNGP(BiX6F-9(kIlRMt=FW^T6I50ysZSMLc7uPZTuRTS@KW5~WDK=3L66Ov5dx zIM(L{5wGw(o)iC6m;OW5tW|I(+6H{CLah}6F*!a*hJ+!oIDgXN*~lw`lV2T z(YXprZ$B<~4%g)%3>jVN`E!6_HUmItMrxW@>({{r39I8VP?MKlu?|L0%To#-H;irv zG`&3Bocrdl`Q}V>G*c!Z%nsjL=IB46#}(fhiHc;O+I3(HY`+RAC6)LhQixDgkp?H< zP+BdqPf)G@X=lZ8#Bm{7C0KU-ki88eL{qg|vK1-u2TMMu9NZX@#`TnJ4=8Tp7(-<* z@d(-&pgOHD(=pM(-WIQfNj(gl4w=S(3`-+EQM8h`SVk3&NXaj^6KH7M1Z@bfVX19~ z{VoYi{8Eem|3`iL{|43NMsC)=A3mZf2N0DG45aa0R3YZ;h}h2&in``QQjK_Om5kZK z?hF_xWd-!po+P}uZ7wfHq-ULhi!K~*^76lbg-qzP|yzvJREDofz%*3<1WshmwO zk-;>X-=_jTSBTY9jE%4%Eol%$p8rX~R~gryi1|V&)md#wDl=kRF?^ClW2!n6kUv*_ zV@ILXY)z<#Tig?F)jf(uU3u@Ej2toKA&=UQ6%OrbiV~IF>;kX1-K_RlL2IT=E8>Fm_RtS>^=i-DWS1nmn#O45AM`kRI8D6i= zfh`p}akzbb_2cS!K;yfsA<}{Cspnxor=x#;T6?^IZ_G+gk7BJ8I;mJ#0^SxLwBNGw zG)0Ri(+}SDp;(j>o_yU%b4za?8r?eD8~Gl}bjkN7e0~Cb=kM7Bnka0?`O&_s>W$Oa zF|wFLp45B7ew^W3#JRsw$`om-=a2YZ&OO0sm|0r)ILMsW-ZsAov5JO<%RiRV4yiPm zH-K~s(H_QzT6R6l3(x@~+Zwcx4wt7ZjwTT!3xRU4ub*NqWl<9araE6^(+s&mFx4fo z7|^$ymU$K13MWQR;S%$_?oJmX>WqR-CoXeRnZRw0W608$DplIQZ@QgPkfP0O&={k$ zf63>$IPM#aX$-v(E6C;4xjK^>^^SZ0uC9Wjq@6AFUMohg|KPsxi)9wine@;>JKWvh zEvDRdB9*_Cn%#3}r)f=eJLlY;QQ1XkgeP6QZqyw|Aw~ULwFyCG(ThD@oD@cO2GW0k zjW);EDrOx{mrYr!U$UpcK>nu5B=cELn=i&B9GvW}09OlCG^>+I7Hh_sbp9vjqO-Ce z6Bo9y_R=%#h2WW+j;j?!m+&P2>5Tsp6Lk~=v#)j^-*90jM+5R4=4=B5YMOr33?TC^ zPhP}A2bXq6*A-J@BeZ@*8IN?vN_an{e(&CTt2!(BALNt#zmQJ`kV3ze&6(u?3N#n} zzm0{6>cThiS044C@OwSg7VT{@SG<`TZkj)H2x?ZH_b=lO1AUIN($;{c2aS*R+}Xrv zmC@}|of7~wlsOl_uGZhuvA(ju2=$r6YrWAx8VfNTJVkXbM3m?5@5k88m>+yi3Hw*i z_b6Wg5^`}A6Bbe;h!O(m``=0{BZWV?U~!a8usbGUuEUwMBuI)CyB2n7WNp$tku;n{lRY8CsfyxSYo^zw37Szj;B!tgrD4vzpLpam4(qHT+XWgaE_n)8^;%B zZC93}sBJ|LK8q~q}eT%mh>5DWO`-g*IJW%v-GC5 zQcU}n-R72`%I_)G>c-^CSgtIq)MRL5q2s^`z`aG2O@AR}ad*~l23a3J%3iQEPX%Z3 zS1|Ld=lJ9tC_)Ti#`M0%A(4xo-c>2sVk4G6i zV^q&c_3QVO-V!Dp+zjS2oqO=e&P+VdSZd@A5s^GWHzT!QnYM1K86{*C^L`f%IY`tCPJScm?X)HfjV}ehr&zZMkMI`- zrN?~Bzis{=158qKONd6Zk@N-AW?}P5xi*OmCv*1xL|Zi^m4vlx`h_EL39K-;sP26g z(gp9=xp2zJcR1f@4$qpMao2bz&zc4;_Zl9gjc}6C`ou_*8Rq)Y2!JPWVE>C!@Os}@ zvUL{61L$i<)c<{hIW@*#0nW+ys%w+kBp7|U-hk~!^OgW(hW18K&7ada8z^hUw=(RT zM!}n?WeL@N>u_)nB0UUeBn*}nYZIDS$*9K4l5wCv6&wB5yjKgUvV>i?3yA#jK z>v-vRG;IkXBFuVA7WMPA@HPv>Uh6riNhi8+nd9Vp@5Mcmo1=_UeW=?{v z7tk9XqCuNA8jq*N0Vwed_)1#A!Mk2>IMgzCn~4x#{>`67nxk06)QLNRpPEz5c~G{3 z>sW|mAiNun?1d`Eu?bBP4NIsaH#N@-bE2z=XM7z3h%Y~XN}RkE9+TuX-zz#o@5Nr9 zUG2r*1fR5|p4z|q-C_vfApiLG(%gu=BC#@J=_`nia$Ea3b7*T?Qvh^5+=i`;GT%ooY0(er9aH?4u1PS_Jl{1| z9mnDxDd-|Xiu_Xl3Ixb_-9kB&uP7798!UxK=$KDOE-7gsPsYO zwVQKC5;5}Oe6&lW(NluX0#d;MOW)F_WHMvT;4U-tbVyO-o3?rh%WeP0-oK10zAQ^I^a%2YWqc@wbYld)b`SIH)i~+j@qsL+$ee-uig&@LtQ<@>ySdU2{ zw;Q5z?PJkGUsVz1OZr2xlk{b+wCf`bn5hc5%3uy_Mc;|qhO$n0E*WfY1t@#_BsCUu zSoq#2ss|4I_3Qm8&e0ab6NPU+^4W>+w<*LSl3@}fD6v` zuL5bj*9(B^^w(zqomlS|ZD0L=^Hq2;VbRM<_YQd^KYGdQ_h+8ToSj;`y~o4mu_gd0 zv4Cslz1W3{uXLpADn^E@`9Bw3bdYq9OzRhhpVL@tJ!RE`Z!;UBgtRt@Act{CHqgZC1JH5co(U4cbkXFl9YVov6qz6hvXG{|ri0IOidl zvSB*?)rZB7mp-7s0n&<`&?Rv;*9#C?%GAhMe3QEUy^#NP+`b_ByVxa*eCmdwCSf$? zT&B1CqabQS9mtq}=U9&uVzw;nIC|dMZr0eXfs>IV#vuSIC9}%y^mEp;`9mNKDlF3M zH6CuQ;1gm_%d)i~Qjy%Lahw&Ow|AzKyURFfzSE0pym}~*4$d69_!t^?eJ-!~3Pv*M z+e4dv`8@LeN!R}Do)we3=vddpqX5KByG6vao^!Wy`Nf8j14UerRu}qbVbl0`xw-5% zw|9M5cD&i{?V+yXq_wGNz>4fqwgy&L6-W2N! zWnOFhh6E0AJ>nSpXjM>|))}4(de8MpLf(|6#Ec-Y#hKRtEri;%mH2}5f@mZ-t>MkR zW`ZlOE09$m$?1G5*dY3LHI@gpE!1EPewGDWd3fXZzDvreg&$r<7x2v_%n)oqyS+&P zXP4j0pP0Jyy6Ss)iUwu*Em-;aBXXuKt~On^-;>3g08NSswbANyOa|(b+RgWwWX>N7 zg>%F!e2v`CVE_KRM1}!gKN{Cyooa+k(5vb!wTgOhg{j+!3(o663iFO)oKnxLkRnX| zepV=nf(e)uYx({xh6~7nnP7wH?zH(PO3LsxGE&O&aeqGSLhAWFlTKQN3lm9(YtK#B z^ldkKs<*WVb$t1e?>$M{*(pVuTPIux%M7hwRL0U|)H&}g&-6!J=ZrbYd1iv@kJ#M* zN|eC=pOo;cCv?#j>t}D@Z{shu@Q65(P&NQD;$=EOFZ&;>xds)%^1x_@M>%-Wyn?#@ z;`z6BftZD6x4RoRE05RC;Xt(4bR-PB=KfEnnK=t%jeTl6eKe+%vma>-hKDJjys_y+{Z4gTAJmVG~%tp@M9;}c!c z4o*@8Q(i{Fk{d=8!Q^`g@2<*7D3Q0sJ08yM@HbPsSI(G0G%srn8pM|=C0&ZHp8^?? z7APFT#bXMNho>0Ny!@G}J9R@-6?Z<2Dh<=sf__6L>5nWCg?M$B)qj6mH)f?|58VOH z2xQLIk)8pEzVPamm>VBNGo3@{M{eW%J>Pt75=t~%TN{u<$llp&H}P6krMBcqI)5bC zkbTd&L~C3B*J9{03v@r54qk6<6TsX$g>-6f4a<2(fa6dG9zGFT*Z|3O*w(|^ci~Rv zW52J=<>%!z{O*PE!xGzIXX7U`{_~+sn2vsjHV#B%@{vH%W6MJCq{5`d%8Db;Pyss$IPsR+{VsS7V81 z(y$WykLpQF*&|zgTLRM~nX+|I%H$KJ%(#=5n$X{W=OwsdRBm^ZQK~+P@jd;#W82%< zYH*w&;0Ic-RM z;8HM_xOf=}hqZsGmYs}oD?a+oan9~v1xW_j-fVqM)hI7;mzAB+=noS@)Y$J@>rCW3 zi-qKyk37y|e=_*TEAsQ{1xk}M_7T|WB>2Zk2hvl&p}m=oAvs^BH+>I>!!v~)tXM4> z=Q@g{i<_G5QEk!Zuq|XbzW>Q0i|zW!v$Mc0+rR-Y(W1Z~G;b+8H>?68qjRV~j`cma zRp96`r`;ft`hX&JQJbfuI!s*B``cc`@9}2It9~smi!5_8a;-_$)q3pjne-l)y_1)e za5Dtoi=tZ3+^Q4{x(wirJZT&Dh`uDJmMAIp&Q7~ zUBRiGa5}Vf_d8t&;W!`q)ZoXY&%1(zd{*u>?(M9m5*`l@{U>g%w_a{Bk)3&%qdfAhC2 zc0)n((L7T_;&pv_Ar?$=fuin21W6OHa$2e6{KOE^PI7uA$cQCHK(haMHwGg`my|*! z23y=F;aX!~dWhz0`mk@)hZ-rqdGn^Pyj_Q>m~Asl&!~6#A^Mg*N(*ah@J_7P_*D4q zm}+(fFj-BZ4-B>-X6LI=`8b*!9$m6g__xLelqrKrwN+q@d6w+5F6^fkg5{d%YdD`a zwElzaq-RMXb=g-^Td_2wqL2$T?Mgc%HY<=YHn2x0Byh&tHQc!@HAk2in~D=Odfk>o zS}8uhF5LoxEmRZP$n+BB8MjQBNuK)L%wqNwE_xX$wUDC(-BujGX2Z^z?9zleiQ4*< zDGkQ|&WSvgMp=k9KWF;4tO{pU;Oe_o%eH-jb90h3^vTx6%?!bdx8<+a<7GZ3e+#75 zy*LQns1O>WN3x613C(X2_%f%^ZWR{m#WN)Tn{>pqwNbw|pb+a8GaBin55Y0ZbSx|Q zTmMwq^4hy0c=j&PkJ!vNTz>ZW>6Vljqcz^ql)Sw{jYjJD)rrLii1Yy72{_eT@=z6Z zWVxSGU*T`&#jfjgS|$5pt7hM@*?4N~FeIGpt`*G?MRgWJzC(5_=F(kJ_LyNWe0(6i zX=C!oGkHWX$;=HJ@1|G+S*j5z@HNF~dF-0b7-ru(=Jsr2|E__;^;?V@oH|PsUT` zOs^xJ2M-VeRCo1i9+z16&w<)>z7YTxfL%xL5JRqQ<9M~7t^)HLs)rl}B9K&TOQT`d z|Ealu%D`#lWN^xcmNB=h{_N+Ym07h+JqAq)d}uZtYDhMNQ>Xk4M!FnB(5u}3XxZNq z&zZKx*eVj4>m)B|N(m!foyTu7G4yFR8*(LG1sO>**A>)+chyz>rwjfyDW#~>i?=F% zD`c;dq7p-iEykhuAV#y-N!WsGPnT1!SLC)f<*;{yC;oO+8towHbkVXi2;^U?Lo+C6 zqKi$KzrdsRlnOq{Xpa<$&CVvj@#cG%@Gi!V0c5>q_wb9g{G$#I5d}bALnZJlXwHaa z#<(-6W}QIQm**yqV36_ej<()9*wPeYWSWbIl0vN3afc3&XnHW^nOMmi5hQ)=Qo9FF ziJ?%}d`qlMXP5z^yT4RX!TZ;)$CfLN7;~Pup){ATU_5}zX}3B448K%4aj`HcNd`1 zmCi|FbH_G2s4(!#KX<;f^x3mxbVc-Z=0trl?ZQ&Cc0d*k3r4jK^RJf+>V)^Nr@fbD z&Fr$xJYQz~{pogDSodgu{Qv`G^cZbZI7-DB8mdqdh4 zXLB-m;Fk?Lq542xZSo-0)MS`BWFGI%6Uo!Lj5+66UfQf0L%=7o)IMX*eN{E<^8$!+Z_!qX(!xA^RZv@n-HS{GAGaZrDj?@uN1M`qzOd7{6`MqsRQ zkVjA8CBc0f(ayS+<#0!-{3i_m|bV{W&5X^;!F)NE5F!y){N;M<@}g?WA$AnUH z^e`AU9$-}dfoje3g|+jmjiiuOswxi6bHAQ=|DV3Bc;mkkLPSI6a&c~Q=JgR%+8dPM zK)69(+A(Ju@3!kx$3b&^!5=h}c?(;fWsFXsi`w;_n?5n3A|#!P|)* zh6!tmZ*^d?7V%~H&l?{GgzD`XYT<6g;{l*Zo}!3scjS?OqTe)?l?-v_Ntg#xRG#VE`B9LouW|MHGYo`RQRx^9Tql5#h{YhPly8 zI8QBdm!GKY9N1A~Z29eH(KuLI`yWg(zhr#-?pD-uvy;}k`?=RtMg{T8?Zw(o6nFF*jl-1#6XJ9KdwXKC^t9Y^1%p~`L3yqZNG?JI7TuA;8AtpEX?x| zZ;5&R+kZS}QHSs?m}tJx`4El@;Q3GabkIz8{0>>1&kP*20<22%tdcv8Ep;voLWGZ1 znea@NJ`iSI`OzWkOC@ZU|zG>41K5$~Fo*7~Y&p7?7m>8&3r$@XB91#`X!9?2Tqz5y|&(Ch*8zpqfY>`I^>00^=2((CO=mGskk-_!vW+ z)`91+af#J)8ip@v4d5r1rmt0F@Z-jUE5wIsDaU@-T>Ciq7N39Yea2K~xJO6%ras#c zF0L-koW}32-HN~{#Byzy$;Vs%WzjNvDCLD1eZiKV-a|8XwcqPDIo$?jrxR* z8TY+#k5%5I+a%f`mP>RpT>P?>L#pa=f>r%-%xUDqAlg%tE%$821d-IvA5|Ot$$Wri zgaJmXlp*q|c5#6PxhFZj|3qMkb^j@rxYm6bnJnr`2mfH?DM?eJNBX=($o71ymgL4j6lBE7u!G3L%_wn>Hy{_Vr{u?iUGz+xU`<33i@pUFtGnfofr}4V0b$sOf=>ah=Dtv%`!l)Xbzn; zK9;x>?qvKcsuA~y{?5;1ozeZt0r$XteM6ay-xX0m`-s|)c-zo_cWVm2(tj~N z?8euU&`bDZ?j0SCCB};oG+g~xD_ZM~+)VevgUOqEz@_Fejk2uZrIITYj|h=C&}{C- z6$*sOm!X+z87%1rda@3jx%nEJE%&ZJG^(jEnZGpmZY>M-G^&Q?9s6#9o(qbs!WFIzgoVG0_-h9d!7ZK%sb~x7bfd<8fKQC7iV{@5(B|>S(*o>LQoejd zq$wo`0#1@%BB(r8wzkQPj1_wGyz=SN7b=t*B{#E_?b4fe=!2_PW=_(NmCMB<9&#n;$W2G%vU%FC!~>yV05#=jIab-hZj7WR`5;B^|?=ux*5srJa~C9i(GaOi3rq zXbdUz)+vy8wZ#Q(&S?W#>v#!m=ydZfnF+a}UyABDEo>}ymk5VoLmy^m*)=JTk}f6D zTBwQwQ}61SD#WCE4D|e5POI1n?u$^PtXqmg)M9p^coh~X9-jKQlKZFSi6HGkVtru@ zzEQ*XWd;m&O9icL!>gCkcZwXOv$5`TpIQuDE_XipL`O+L-!XqsC#y^Zl`Le)GG0YfV=8zu^6%*0ZC zQdJFWqGx`9g}t)hUg>m!e$EhNsf_&|Exx2h2@UqbFBql{`f%F}@#vVH7HdF_9e zP@f$f5fwbpT0E2n_4Ti61Iq3TLC@IwsbwBelkF8x<#UQaX>IRw=kfTMP=bIpwnEuJ_PyUeZSUx6FDaI{)Cc+<>^A&)gT11YzT z)4;yo1d5~M@=G$?)sn)NLS@1e;PbCBrx}C55iW}SYLLzl-z^CX*pu|id6We^0Pks? zu;-zR2oZ@+qaMX&l8AtLCnwA{<~|L=WhxMaR0?;g zQp`5YwUBD*5#mY?cZGusoF#(%0VZ7GL+hBT3WKAeL|x={)Ahd;#6-3+VA?hoz-|NB zW`HGu@vepg3mLTldyV-TAnJbq@RnY6E~6rOrHrWVk3V34YDf&wcdk1ggJWakS^tygeu)=rk#P1SVTr`in zfGSW;Z5c&$8vC|bW5TpBjrd4?a*>t#mQ6}yW{5$(ZmS+F5C5jNL8_W%HR&G2s+E(~ zae^id8?PxUvM|O?xR}3wn67Nt@vlSvo0?(atLPG+sjbF*_>7`!u09_+F9TwMkcv6;NKEguB83-XBhaaqZhh3(OBlab_i z2@nD<6FR@9iEXtIw4Df4P(WBU#ueEiDHh6VLU|mYo_r{oBcTGHu^N?s;qP}dqMR%s z1)lgZxh*j<4YVRB)0P|uYrhVSV~`X3ZHmxHsfoIyKp9H2M|)9wvas|{uN-mObU5Ys z;2Eq+%l4L#rL1=^lfn(vWgWym8 z>6&g?8YP!MQkO4uSp%3xxY^ElP#*WaKh)n?v$lm>JBMt2K#<8Pr#&rjd_}Kbzo$fS z^k%F*(Xj~7J-5x#oRRJ2;L2CY%#z$>UeeJ_BFF}pOJt0gRIrfKK!ppW?f)qDnWZXY zl)+*%Gz5xvc_{ww zSHZ=eK92)$8-Z59pNJbLX9D~FaEJ^QN!PRH(ip}?6U`+n!o0yoJ@ zO13dDquD-XxRu{rRY?s}F?;>-b!zrP+3UZ!qVIkXeR};*BROLTXgEjcGT}oAa1AKH zM6wdB44MIFqLT#-*m4abijFeBSGCR+FH%U2<1{RiVU@zl(v)%7%0ShXyt3ef=Z0k0 z&-XylY_vT)svxZ6uM3P&P2~@3qBawXSVYxN$lFlkz|>bAh#K0~-+{1GIZUrdhTLP|==QCVcM2^a9&3B2=oM7+`Q{LQbnx%mu8`}kwOE6x&9T10M}YnD)tQ5{#r zLt`Phi8HqaQ(Aa6Yf%$Kn0HL(G{fzo)`p<(9;QN0TM8wy#5fQ#u}*$)yw2{Y>y8gg zv&FX*vi+(*{R2X?akOtz_`wL#KqxMp)516+gZr222j{w#nc&1^oA`Fed;Oc0BI0j1 zE+9(o9X^AM#gu?Cnq!V~N>nTk`}m&&35ft>p|=`Ho^r(6uzps39Y}>+mFt`_k30^q zO>}jS=`hi+U_{N&f;->EYg^@|I{%;V8`nw(@gYo=zn@AC9v@xeH z=SN4*NU5L76m>2MPy{~Uui#nV^6u~~Q9b5GTeimIF}Yi?h|*P9lC`LF@CJttVnDu% zYf)<;uknE?2e-dcwp=s`>z<7ry(JbX#RB`2lhEatDMIf3+C3h^4A({2!J`!FjIrz zfM5~!g00qXrRsANA00nio&LmGNx~`t3q|~*#L6btE2p`PDkLx_TsiIB zgndy>U1o-PN|ZCfW(p7=c^ixvY@z-wWIl^G+#ZsNtY7^n#V+2Y(K92avHtl-PG{?w z<=efjLYo>lO*fAR*y>*x$l_JWrP>IFjmjx=lNrpjBO*%J8Xm$B7M;G6TU_chmW2Zai+ z=cgg6Qy5|W#?Y{>jPlM=;vD1YcnGI%m7te1{5>e0Nwnp2;$CPGh_+_Y)Ys4;$f^qt z4HcGTS(DrI(-cG0Mu<>NTFNOEOf=O>c#z0T1pyIny$Y#5Mt#d;S1`O4f;lNYpIm0| zmo<7lJY(5jwlPru`1Ykpn9+ZpqD%zaCV{WvBZEbl*u;Hh)ag-k_!hmx*l)m2Lj+px z3g9VpzWn2WnNA9@QP7ozeNPN~xLNyb5#!=TzUJa%{gJ2J3;3gluhBnMaxar-1KJ#* zMSb%mD>1W?n?L6w3wsR=F{KSe{&O1U8i10n_!mU8@JkXn6VjM8M@bW|*5yXdfO74h zHA)O4ukk(IL>jxCPQpR^sEN-n-_MTAzsyZb?43=o&~u%%V19Km;C^&G5@}^eg18x6_ed>@Q}d+GN4syiX}j0XT^#MGjtF+ ziN{CQSC@gRY{s%XXEgKWUG)Kv&6Vu$bxiiK(l?~iBw}rEv0)Fp^YdxQUGF-A zK<$F{Ck-~U9a~PR!KE3|%mOZX`d2*?+4s$`X>uYn0<)ywg#=FsKOYY<7BO&@$aZ|M zbXj`U7eO$eYa-DMS=9tGkiFUA?IGh$#?+j{@ggSz%l3uEFtM1kX;Y;(638ZRf7Evi zGPGGo&j1$e+JI85ij9V{gTq;8HPlyRW_<7<;kw}5J9>TaowLIO0734oMyt+R1(qMI zbi29X{5XY+ZCxh3JdTQRv_!Dam}+(_>%P25{WYjWez9(g#&Z&nc>k&7FjRSa+|*MP zteDn%#sSeF%v;Yd=cpFF(+Snhm(ec9M)0>wa{o-7jJsD86Ho{vom+)fQ}YFF9#hyD z<@2lp+}{$wEeU9EHQ~#L+VKp?i1L@BEe%!4qV&Wwlw$!j@p>D(blffD!+Y|FAtKak zY8BwYvS_-F=%)LL%d)5bg@jkVimb0T@}tt)6<;8mV2e9^m{xk_S-+ZoXqn|V#<)Hx zk}Z;^KOQ1TlE$XQ8g~kuCK+3l#VsW)k0)E1_-$`d+})7FlSU13B3!CwG_)D@JYcKo zU9Ofi)J42n-r!F=^SrQHjw+0SGJt`GbC^A3Wz&c-Ds>Ezmv^IW^Q%Q8A@MNN zmP8E%E-lV&kp*3PIatpgg#$#aCx+AIeZoOG3y*WjTM#af{hhqem;Xm$g_48CkkbN6 z-2Z{_DPo=ju$ONUuxX4WeCGM@?Yoh%?vE$lN~cko zO4!@T!um9Zs6rUX9u8vwd%Ub~%YWxm7sz9Q3ZSXehO$jjh*cJrZ(0s9xR5r@a~PC2 z78DLk&rnE7nkOF$dqJk8@Vo{L@0T1|g_)vrVR(!FNZA&{Nn3@-o4r7hnd*Pth-64c zo~arl!tna_yGhj4ggxrCf*bH~mm03!!c$KFPTxUSz#I8Pb|X^Ez=46M|5qX_`XOr0 zw6%UuM~+zxbX9CyJu3Bg$DAJuLUDpPAF+e1Od1O*#A273nx-cm1(F|#+pZBfFvlXG zf&Sco{A+j*UqKDONQf+h9Km0X7@{vx4{LsU?hzZ@A~mIjep6-rcCl=ydOF0E_L9fe z6NF!q!6Fca#XhEZspDI$U&~47Oc_m~o*ev};mlfG_=Qn~Kjp(BkW~nsDbW^ZHby_C zoQzTR>MbzvszG0M0^sIkI&vG4urIHj0&SJWNA5jjC-_bM7y*gC;_9y_%~@)~{IZCh zscw>{M1J}^D>nnvYeyMfzeJXFIg zU|yQfU%9uR%Y&-!)63mKZ&Y|(;`9l|Tyc`eBihoKJc^qcGt%aw30Ky4B_As=Ubner z>))?1!(K+1iU)qMf(iU*U~XWngF~s9VZf7B-!{k+wfUVPP|d7G3%!z60v<=|C5)z( zox2;*X-CeQ9_?Z1uwTi23(9uXS){n?F|{FXw~UuXnC&stldl;V%5J-q*5#4SSz{gU zUPZWro~|jsz4xuJpv$94x>DaiUZH|uNdoOakvMxH-GG?l1J1OQ7VoCNukG9v^_v|K zG_I+DCdzV85lHu`u1DbXDHvk6gcv+7G~a0Y>4I|G2-$6mG*Mx7r+@m6B;3qNdn z;=sOu=M?84KsGZN1yM5V<2|e8FQ0J5ra&a_s}Ie+C)d~?|NO{zAeiOX6X!_TIP6NL zC6#5v`!1jYu7SHSo$4F`CqQeVNVJRijcfP%k2F9BH4OW;8^75xRH4yr_5U#T=J8Pe z-}`t(mXJM5c80NK8)fWE)*0K7owCmiO2(R<>|4k(_H7tDQAk4eEHz_`ln4#7q-05b z@A@q7*ZcGNJbsVgKbAju%zfYIKF@Qmb6wY=@W>f(18j*pqJq`~A;^*C)(c*n$_qS_Gi-oOL<&ZA#pO-=C1*pC(#gfh*{SahK&&pPew zZEoIOi85*dfvEGu9oMIWE7vLKVgZ^zo46jAa_(50isASa+Z0m`Q{;$(Ni^eZ4>N+2 zXz@kDMlVeRe&u|>f&VBD7aOjqnd&hR|0{BrZou>0Iojka{WTt7M_cq;j1%D5Qp6PB^r=>HuU@{Irs zOn##V_BlCL!H6U3G!PTRn$E2N$F_G*MV#wnW9ra^=EjJ)?Gb}qu09T+V85OJKD(EK zppXF4#9GJE)6H{cgG=nV@450o6h&SG81NkmRJU3Xe4k^2c{qR1BUCq!!V!aDaQA-B zag~nIdDE?-gZ;BbcCTbMzl|y{e`DBE>RZOeE4=A2bL0f(cI)VnurGt~b@wn0fiS3) z5t&$YHMljLpUzdj;qT>0~o+46T zeeUz`r1wc)Q~&}#yZ|s?sX7((bMUfWz)7>1p5CBBVU1EX8XiGWAqyLUe~*ZwC)6{O zl(S+i_7#|s3WL6O_KYvz@?{KiW6>5y{BGvlng!DG%WgCY4mIOs3)Rl{)0{4l#j8fB ziz@O~@(DO2<(*CYJ$rE8tb6IuxAw>$G5Q^e+ewDsF}b~o$cH~azUp!YvfY7Ee;dWD zmbpx7n#}Wr7nskLbzMjn8cym1(yMMkI=?b z7HnIGpbh&*wQ?#c-7ksF)TQXLFX?t`4)<2cDBpeymqytu)YX6UGhxN)x+uj~t;#lM*L-#mb^8>BF1U|b z7^u`mr{10uDAt~}s$)kSfLKB*Xdd-%#R#<2i*86vwlrDK=|x)vgjC%(>FRV%RTZ^C zH(6ZC8q4UZq4CoU%U0%ce*QXdS@a^wB!`s5^!56<#3!}du$6hM8=>M}>+Yfzi4obH0O$3Zq7dgVr}LqxVvp zM$y%z6IpnbLbEPuM`7P4%l~K4S>md|RyAok*M-?-+5rW$$IfsqnzMVXKeO72fTJkY zpanU^SH)F`d=|dCQM}V;@k{EK)D7QImNPaO(H>dfH?N*4t?rn-xeJ4LkWHs_Gz0Mr ze3t=YTe_??D@7({vQ@M=sGQTn#i31)H<+1Qaa&PSVe&xfxjR?P)?Ejmn1eWF;G775 z_Rlw=@d-J^$bY)$^u$lTM)xz}Ps&G=&0yMd7;5V{?JE@ah*#?K4PuNMNbe#W{7I!o z1~E=hI%4nqv3|o`=rfP*CPhc1Q!N{n3+&T$SewkZvNwkv`-OFi~D#<~e8i$aq4##qY+^cy5V&)no@jAo6Akz8nN zrlY;#V^}l@I-NwyF$0&>-tFqN)^V6HTE6)(;n-P;yyJLfU&YvHCR-zsKh^oUB#rUc za#>krEv@!9_&!^KZyGe`;*<87$r+ed`2@p1u`rJD9hnu^Fj(PB*vh2k!X7q4a2HzN zbKSPr?oLM;XRz0Ig|A~T$B8PKA+!{h%0LLU!EOr9LiRc=ijWUL416orukm`NL(X5P z&3&&3Th!@ik}?VHdZnOsFs~>shfp=dP>B$hq0QCsONrIp#)%!On{7vs52ht;*N?31 z^{w~^K-gY^SRy@3h zn#uR<@L{>9Vw3c@1<*C5CA+!lDuZ)Rkfu*CdET6vsZyB2Q3Ye!nMlq)PK5Rv#@Yu_ zTRoc+xm6oSL5pjNqAW^*af2#FOjrDMBVToe)ql|IP`0t>!&m}0FLOt3O{Rhf3oL+- z3ComgCmT!cWLs*C2?#vLtnMlsZ*7h6DO5n+hvh+FS_tR6w zYN}Lr+2H0p&cX24&uehWV?io27LHd}PLLt>4kp<_X?IiGf4Zuw%bIvp){{gV^>}c2 zw57vSsD*q~d$&Hq0m4%c&c)s&9R8iekZbjQJD6m7#xZs+A9Y1Hk;)Xl>K<)8QYZ&m}5YH5aP#i)#k!r$uxyl9g?K!xwtR zdcllbu9>P$S?G3oTmMJ``_#XJ7{wsR+*s@24+gB2EJ?vA$EsNIQpFcKew>LLn4_{T zdFCWBMs1DqLKJNcQz?_K?2;eyMl0;t-hL8_+RsA_*rK-YatUcN-4N61?F+dx1{uKL zvod3j)6acW8*^jcAeZeU(-h0#25F%Dz_W0BUTpHJNZN;ah1qi#nRokE`>^+J%c*8S zdZuMRh7aEkfB2-txiD6*^*++Sb=EbCo|aH$=e^ns^SY8PLYD8ylA77vP-ZapNcUW> zQ0@nQoxCTFYAomr&5e$=6zN_*ebV(RxbDVJ?ZT56qd1y`9?jPD2^sq%Wsr(IZttjY zpo5c7op1O#pVkrXS&CXEIXDD6iqWVKp1#5d!HsEjtEdh9O=}4uxmLdUxVapw{a}%^ z&MK`~w2{Jydm{i7u3F4xb~h^Qb1+JZm*Qqj7YB z1pEMWr$9FE8FK58ocq}IPS;MO5P**M{|LPP#bt+bo1WU7M@`tH>ML;C?K#pAznVig zznWS04qeE3FAMqJLzte`8$3A*`=K-EkiDz)F?BX!&Olg-pc<|cs7k-kAkPtIE#ELU5xLI zK~PSuxVD^euEeW;oX>tI_I5Ng(ks-xWBR)66sw-vwWxDoW#+8bib{~tzHAv+t^ljE zA=izO%eY%5)+HH<_|Pj7%2{JRvDk`K!B7M#ypIqy*qAKIF~zDxPLtmQ{+Ji-4DdC| z@+mH(YSnY5e0lb*4egu~r4_$-8Ez5Fs2vOv&6(eSMhMUd&`i^=4(z84jsq{#RH_fT z^GXm)WKFh?CASf8Ak7)I<#kY6Ck=|3V_%g9dW^j%H)uE_{E^nd2)49#_*JNCot05i z7cBGA8*rEvot1?+GVThcr!zh^(PI~QxcLd21o0=e=ATo>0NxL2W?NL?jeKe78|pXT zhPO5W^~{Vvvet*I2x)0>>*WO-_0-ee0|A|c0hcPHC=h)%lcBG@bI~B1b1}lVF#o2J z#*=zE%2#-Ck=YjcnM0P$RCyG!9WNc=VR?XV8-6!u32`oY@X5tH+|9o=#Q46X2zmqD zzff+618ol6%~8{Z5gu*F;*Fd?3(n$eaB5>F9XrL%BjMsaU#(8Ue6dO;*z#F4^cA z0XX(p_2%p0{%uT+K~2txf)<=BmVmB>^g-6Fg+|KnC#es-P0d0k!QNDUwkJlky~ z1H_){zeu&CiJ4f#&%*CZQknB+^*pcHNKXCu)C#k#q9&uk31^Inx|%LWoipv{5Wsni zDX7uNW7qvl`)sY4YZLL&ND5o`#?nyA%Fo+ahf-RiMEqq0sGt{%(SIb0+I`3$mBBEb zcL;QcWt>DVHtNJ7$!KORmVSBJbxLPdAEViN7C{99!hj}eUdfW`Mks894ZS!T_UTt5 zo&hba?`l}4!j3)SWu{FEW7~!ApSew3m%SjZP4y@mWnjBXgNtI;0Dsn@7OD0}Y8BQ7 z2T(uaDm7OY(Pw!j_aNIrbNN?Y0E)9{#3H8G_R4nluJkf{$T#C1>gOMop@%V<6J^m#mzZ6`Wyu0 z1H~NmBI{i{vm!CF0&+AwO_6dsyUwgg)W30BNkGzaSTnH3jJ7gywHz)&$951~-TE*N zL~gx=$JwJW*stG`!W{(R^nga8nu;;U-A&+}Tdyu8#w(k1`;CKzp}WRs`B$$;?Va+r zh&ink?b+jfxd!XEw(X%VXY9KTsiTBE@GHjpwU%bd1DC*?`Dstx^4#&^Tv1#=Mg2v_ zDt`z3?EYA`N#L4k){U*_vih^82M^WLY+OS?pgyh$ zFZ3;Vw1j|GF7QTF+#-KK;9?0&8#F0fR50+(CW72x_c2Gg1YsD2>iV3+1kS*0*)AC8W30P_dT@`;YYWHLXR5L=H=K z_;B#CX@h;P+XWcr#2;1+5?YcFb{^b>#Ez%{b@KbiL%EnzvwPdLd5oJ<(k^5C!?nwA z$552<82(D_((hW_7u;`$h2?}ALwjL+hE(3McPW$&?|mTUFrUJzbIH-pp)1e9l(W}O z&Q+L7JB+dgV&(6#|&>_k#H4|wVi?Q@3nm^Q#dXrDJ;`8b&lsY_QG_fe5#x> z`xq0xPr=ui_3lh57WC-k?G@bUmW6!*ZB62o7kh!cSRfa=CjSZwza7pspZzsoKc4Hi z&)#l)?VxB8%Goe)XnK?E&CdTuOoE#6=XgB+;DlXoQ?T)G;K)1={J?yydr)_ zLf<;wGNy%JWo&w(F1*LI$KKMSaM>__>~HJ!n51fQZFEIV!OHL4A<)bm)vn%QAjV?* z6y!@mjWjj@s8O7KQ@9XZ!dx@T_pBC|$1pvwY2H+&+pIf>MOiP$M78vAh&g5vt@&Eg zf>a%&8b4y{h3nc(Dd#L?L$wsy38ETV{u7zxBH61U{${mA*F+Ax@JN={FrJK$-su<< z<;weSN003zL_blrG89VG3Q^B>ujRRp;eynAa$!mnvs)c4$aBxG3>j4C8*7=i zl%UG$UnEk5+?q)=kLpr7g8m!hIfIchG7!!6fiC71g+6*Hael`4W)DYvEQ>SD*&fl_ zF9T7wXaAmytb$ssTKhNS$e2ji2?Nqt7@OO3xwi(Hci~!0Ys`pqMl;eCpP@pm1RhHY z4GOhdB@O8(bdwJkV2K+?-P`5(DtoyZb7=jAN_2DlB4Su85XO`v#q44y_P&G>PK9Q! zc_RW6zW6oSr2+`m>T?gHQC~U6zW6SD>t>jB6|W5FhJPPa$g8|nVQ6KUra}3LI^Puz z-B*)P+OgfyW1-99H`9W&q^|@=>#9ETtIEK99sO~~@OZjRc*z4`=JhN1*5{Si&c$cR zzL52l6gCk{c@ulJw=nM~PJERWvy+t7IXdDTr(WRCB zg3yHgdd?UjI=>u!E7b?mVAj{map}wAB(>1wAMYJNR`3`1aDDB6crc}IboPyCVt(TC z@*yxweKGZK=*DcMEim}|8@1^as0NMpUH=+)vc`4`7B6{X?nG5@8bh${2pzc%!OF=x z&P?h?6?yKmY4vnG~sV`@fS6GG{U6C(r$F8V2Hp7N3)z{r+CbBn!-Mco@KSWdm zQXt@j#a}^fdl{7Kmfpz4Zhr$4+ovzxrhJp_T0PFe*HV}ZV{1tJrUR+gf;{9U*bKFq zNgF-Q$28Hr3Dcz~4s1{pE)1Sv-+5MPN{a#8pKwplXF8nnzpuJfUKAp@&=PNv;%D-W zWlE%TQNJVSYK}itX0_nGTOsG=bR>uaf|snQU3oT#bGhN+f(EmK#IaqRV!hQyIiuYKD+|$!bC5Woad8 zz}b&4Z;iGA!AXz9pLn<%OVVfSM+#Ry>!Y?~g&F0niomjr+d=}~j`!yXbk0MC^#X|R z)D)cbVeyIh?Pfd_)ymZC-e49PtsSd}v3)Z-=u%ELw8?7YRpN6!KUyrU$*Wfb@U#-t zUVL`qs*WJWHQ!TNcp_c^6EqNLZUyBb5KN0EU(#*Y=>l;Qn%^LYm;kwb4tK{t< zaA}To8CN=A5t<@<>pI(R13|OkJg_A_|6F4<>fNPa`?jr=Q=f$h-xM}e;!-MYWxh@% z2nW?b@ZYpAAS~4srol2K6c5(iW}QUWF^{;~e6eStI0|PA!$>@F zBSELj22wMdZd%-2WM1S#H5h{WFMi8^Rl|P}Yu%rk|31n;2b8&48V$h<)xaoU69Oc9 zx>`~z#oYM!MwfhmAJNID zM^4!)Lr{JBKwLhMPX83TewL8w*xG)}T$9=~Qnu}LAuZXNeFVOkIen|V+f*W_jxI14 zpI7BP6cX4n6;_Ur0vFL*Awoue`=uaGdkj#bv4WXP4MeTHu@uNy2{g6s2@5ay zNQ?1tjt|I&!$~Qd+Vit;=4)+z;Qo-;79ue4!BJaF%o)rhOtFh747)h&YW&!yK-|7x z5jX#46i53@Vp-EuS6?{F#z5W11s;!H&q&=JwDk$S(+{E{Qilomn(?PbrT{j*gaEo% zxeH2#;xbu5jw~O!2K}k$$3JrM+*G;1ER(bxc$vJ}Qf)9Q|;@Wi^t8_}1K8s}zn z@(ZaTR!*>ObDyB@uf&;FNZ@qZQl{lM^m7!%f%{3T8i9)_EeXSDNy{wwj2mJ{UQ2=e zChkbCA;3o#J%ewv#Sy<=`5JU!&Enp`I7c6K`Suk7&fEEzYO|Fiv0QAl*BD+mkiUwc zHV34{XK|GS0j%>zj|5G#fF}s-Lc|?@pzv-Fa}qmrh*pr#!x_!$6RWuu}h(tOqY28NtQlWX3O^xpY^?q&BwI z$^>R7c{7%;9bX@rhV?hYq@?7Br_8#BX$Q9`9NfCJ?9I4z_SVH`{U0()4NuXK03`$F z@r`9@3Lvsy*U{hda79f7+PO}w+z+eFHb3`@XV`y$v! zafE4cSy4iJ$#qQp8C|Oo_*satfW<4z(j;&RY`;<&u9Rk8rHo`N@{GL_3s|vboP)#g zWMgkC3vW<(LroFJiZ&nWk``U6_4|eQdENGJ`H_>%UBr1D)aBOX?E{Ppb6BML4vzDK zAKv5CVhFfp!Ji7J#XQZukWvZse9ip}-g1cxkZ zh~gw$Xk@Y|E8JmUP|{Ml+U62e%xNtYi!k**V5FR(PVVrw2`%B*Cog4v+i>p&Th)+2 zwN?m^%vu-0{Q4Rig)$_2bYQ?dK5C16SDmCoya&5Ky<{5-S9<$71WoR1)FIVb7<6^w ziwIAgyLtP^yeDH9=n|P>li;K*d|Lf(wYCsgR$U2ED4(x(MFc(25^&{1iAz~s3bH`1 zAxlt4e!}C{^9w{_ova;&rAL6=a$ujvAYo`#2q!ThD;K5kC6BR^B$K*qG6P2#>jP#M z5FzDY>~cQ2JBW?iKtn%hNWqno&|%Ug0fz{h%D^FeroFk70IMSZny@Kk+G0#pIV{cj zsmXpmcNs)f24HSm5X}dxTqcbE=aBqQFDJ8diR<6cU?_kI02=G60FXu2tqF=5fDn74 zi?7i!Sc|CEm#&vD0YG{?Nc*zpRZhP<`N~Y}04cz5c!-3)AHL{Cc&c ze!%00i&H=RGMCe&yZTys2oQNShpGtXD>Cp*|EI1EZL~byHC;BKc&;TRb1g8s@?%PQ zy;fP8z@P1Z7dSym#vSWi53Cc4OHqf6SotiHkLFfMF0Wa$#pkOThS101fGu zcQpkX(*{V9@uiqfS+;Ed`Rt2Evy}1@ZBsvTnTK^|&MPBYv#PG&WUfaJ!wkk*$}Kn3 z*)pQw zbWGRx`3R1?wULkbh}1!w(F7l%d(@oxI%^|lcjAXBsiamHd#@aPjq5g-L+aQY(aDPf z^?8Puv5ZlCB27G@D;!2e~eAXBccCK|RnCwm?l;C?X6~)fi zu0?GiupTy(0%`8GS+>atv||=iDW+vI4FO!w?u+L~Vom)@-E$7X2z8r?H-ECO{lA9V z|3}vSGqY9)6ah1npAX*>ssMk4HZ0J7&5pOsnsa$7$79dE>#QnG&$uC02}NKBj2|_8 z=Ekqzs@?k zA!f{#KVl2Pe2$dk)Y`L+aAWQ1p_Dl*As)d0%^i3)iS=^`iQS|)ukwRWC%V1~Gw$$S}qiu8ekW|%mF zmqw4;yTcfQ_-4@;4&*}hykB@!G0DFkb0pg&rc2hwEJA|HQ@P&@H6%3LU@9f`8l>38 z!!Qv{u7D6-;B}ey7zHtJ6AzSp*o}nHEQtmYQ~4-JXNqaXx-~bhZNdUArH3MFXeoEK zX3HD(3)4$8wV5$X`#So0_@>Y{B8QDriW;Fjw40=~Y?>q*u(jwcac&NGk29FJZG*b; z6B?!Z+)m6Par@_J7Y4TGYAA&Ml4|%>PrI-Lqd1#{)^*(4j^-X)h_01idmSE5cmR{I=ly$-;T z6}EW%$sA2td$#9&v3d4NBZMT8oVhn)?{ThUwn`%%#PciEg{&>#h)NIJqgstUmz#az z`FNDI-3-O6$y_}Y? z-zi`FW%@Hd@spZwcZu|o7pX`ohep4g4?{9AQCxIfD-#i+my`A4qiI%z4=o2vOOEx#8#oh<<(X+wl-x>R~kH%QN2bBSRBZTOIbs!FX-U0k@b}N@h*)`6&A%MLP-Pu1K1d^ zet|OEd;B1eI790}A6%`ELwSI7o2;MVy!iTze_3fstmg_BVafm1fD$g}X06``iZ?(l zK!y!?t`B}%0k{fRf;rHJU$Xfp#>tQuCO^kbKMTMb;dl$)d5S;J;)hoS5+1TIP!I-M zoKM>D?$~vGfKf+75QYH(oVPx-ZC2xlK)UpRIf3*)&;$TdNKp6ur$NVDzD5J2 z<}ik)7Gm@pAnc6Kh`TCBj82y4RK}FJ%Zx<#9MfVP9cp^cZ0GV#x3|r=Y!+~xlN7cS zGi@c8YvubY&6C$)!%qN2oO#185XXeu$BNNmbj(V#$kBb1gwd+NGYcPxK-Y}xWAd41 zjd&!cjg45ezMDXI$4e2cTqq%(ks;YmhNp)C)^6ZcCFAzHNJ-a?z{4etCM7*2e*wyh_ZXP> zzfq33RM~l``$q-|7-4XJO8MoyO~0b|b416|B(*ux;UW!J9=RSz--*q%eZE#TuD`;j=n>XoL^W7Rj4mRQTkJXlfUDb)`WoXwg9COrc#JT+o>hq7-_ zzWVlsvG=ZB?NAOw&Ommfr~r-;dtLsFMiv)AaT0yfklPudDGigFAFbhOl1F zPL)U9A;xUqtu)eZr8O$^uy6MIyyX9m-5JlC5E=*w`uUl|b?j4gYtWkEtHg}EoEas#Fu<*%oh9<9jB$tZ0jyFGKwVgzCC0g>wh4A|KXS$1C3^;} z*DI1*u$X{Ux8LftfK@R7sOmy zMd6$>|NEisdGe$pRql3O3TE6OT1M(U75!PZMCy0qg;9nQ-CUEu<{JNvhW+pOwE`-J z7xE6M7oLCO*qx>xdAFgnr<31?0<)gkY>cP*m3Ll1KCF(wK4tYGf2&~Dl{2HLrSJ)l z(QZYZ(rbcSPXt`8SI6*KL}!lE0V&>&s0t1s@Um3;U*05^sa}EF_O<(o2Kj#xCUZ!DU)BL6 zDfQZc!qQvkDwf9+twmU%b2g zco?c6|0U~*(>u}0y0hsg64h^=pTlUDXOq15tq*=O!epf~HIUs5Ku@HZcaxtRHE)

`Q z*Pj6?h*Q?M6U|5;)AMGsl&o=}w!RomW8^%)V?@xJja=3%id*_IOSOzxsmlU ze8yYYg5!Q*cLEN*w2P zj=aE3S76jgWYxe!*gG7%9*E1QK>wnqgtZw8w8vi*T6GR_oued88rqdEOAVFE5Iy*i zrO(prgVQXHS6U32^jfS93V=vcV&#V&8rd2k#2>Ban*a-ChZHrVsViWe@X?dm-ss#Z z?(adYg>rCPp?2^g0#@qNa8a+t=+)#`m*kt=t*$ucb0ioq2$j^UjjJhC703%7x?o99S$6e(idt=QrpGI4)3CFp0*X>Z( z0W)?@kSS>iYdd0%DS4Ks!=`%bS~ao`{3PJS2=MmI$feA1k;0{=7pL9Znnm9U6Fh!u z*X#I4P>tyai1kkw-mFPHG4e!X^FU|w=H?OJ7<$C=(_!W;PovWYRd!`EQ7T%^TYA^J^-j=$N{-po;$D`22oOAJQcfRJdVbMIflpU zzwPygJY4n1W5VMAZCib_>R3#wdjQ(p<4RpRAFOI^eg#nY4+QoDg#Q3wuK))BQI{+F4<7r4=XIFb z5%MFCge#kyVL{kgntxvJ3IIQ=#uQO*IKO!@m*8@Fql$l<0cD)Y9JJFTX?)vP24|mf z(90OmUfBuvDut^Ti!q35k|9B4=n4=5rZVDwYyA)~eC6b?D)5pEXOlx)1VMtlBYyXR zB1w~C8}>~jrs7{23VvrmQyYzuQ-M0fwlWk9Q`)`?-hO;u>fI6)X0T1>VcN2mkzn608~Yi6(_o2f!Zl+jjK;CJ9uj$kU*MV=$D zuoK{m>RNmw!wXP@kc z0)bTe$t?RP$52ymF&=qaV8Cf(Z&&dT(fABigj0RggxNU}qSP+oMarw}Ejnv_{u z!z(JTx11$Z980-GHHq11N}5Hv(}4^l_oWCro`00j&>1Z|Ki< z;o)%te4uK{bTBU$$Cw}bL+B&_Lod4N5ksaBcSii%=RaK78U9;m=C)^0qLF%87k_gZ zZ_MrM-2Be|BOgX0*?{tI6C2`_$p=2&y;J2{y7gQ9p>5zg^@AZD^#hCbeJ}DEPQ@ARP8SIdU>~m2DQCEEw1rsMQmILH5<9k+&d;Ke8ClHlZH*} ze0JgHYo(a?YE;k`mLsujH790$XcCici)&{i>oze|vb*!qC0=AO zk!`y{6E*r=<-rKTY2t}E$1NtrgfsW)V0t;fZs*a~1F&-)JgrXE;8u)hVt(^vzrG3*;}~~S&yP!?{UifxN(luI zOeYDoz`iCMphjmlCdxKp5i4>Q`Mf)P&8^WMDVkfGn+s4G0y5{yBTdI~28o^w@vxpB z#lELuCsr9qTZ%YbcO?37>7y2=e94s_pWlthtq_pjSG*chgD~G?dg~3hIh0_(`wBxr zU15jbSNgqkV(t_Ku7uo! zGVEXWarbIj&fcKEg$jg!H-C$i5#j_l4%K6|msO?9Wc6g+ZU66^MHluQ8Vz_mdUnjE zASF!i%DSa%^}L80Ij&p&uD89PwubCm38r&T*rCag(Xo6E0Tote77@3k_SH9D9J*;2mc=-@} zU%74uCP1K0;Rz$RrrZCt9@>XCqz&ZFXPNW**~<{Oaqp!ld#e>Se6&2zp1pBC$hP3X zI0;b6u8VHV(~6F(9Fs&C*cI#HU;@7~Y#(=42NqCG6j{TRJKvEY>uaYCY_&QPTXS}` zXGr*oo&x)nCYhPUKE#ZJh^gCg+XC9e6`6VC+#YUkrWWwuH(HX6i%<<6TSSVlSAK`{ zFAwCJk{!&6mgInXAb-62DtQY}gI^Cxas**Vr{pgs8jnN^!O-u zhzbX!3Qf>{&}Q}<^eR7hV=6Yt;j2kk_y{fU6!EFfzMA8oH2JksT{0$C>vhF zl;ENEwxfWuAlNt#S)i7wY021g0fKpZg}@<4t$28M_bDM4riRN50w6MJEEpCrPoIgAYZCwyn{c)EDL!{nL9r$uK<-9G+0dxDfF;HA_-pbTc z%Nu+!*hI6+zg>=dg_ajfC z`7qXVOFwlm``GaXg)2SD%t+R`y7Wvz0F6(4Wl&sUJk+w`E?1H>tX6x)x6(JoMGO-3*DfWt)eshgQm|EoNTD9hM4|#=e-cgsHUB zDNKPmbx)uE@+)_pal00@757(MU7aT=WkM=m5!Q>C*uoJvTN3&b5Az!)u(ECTcI-q9GZYX5(^L?(^RWir?)yEh^`5gWAX6* zZLJuO`OMIpn`uaB9l&sSO2G%IB1=7}GbN?$?e;cU4K01P(uu{=797ds4>yN|mM4LE zo(v(d>mYCJW7tPTTpdkb%5si+?TfBp3}@QcT0EQ8eZ_>}oJ#gayG97|o7G{@u!JJw zfAfpRpsKguG>{({m-l;h_@-J3fUm;5SbNI;xLu>Em5d_>>*`x*o2>+5 zIUS7IQy_KrI~9mXTp+|G+`qr`{8cz&S%mx5pdYE*98H$iPCxjQnF#L@5`9la>*+4-Ad;8-qbs2DlBVu2>>Dw+ZV$mEzhU*80hW5#L1 zGEmnXJdQOMBIn5Z{yLilNmzCxf2Or z#kq6KMD)bLIZs;k%SnHZ2dsnjmBxIZJ;o;okBmg>Zfd^A2hgLx|NceRIsBQt3yyBB z-0e9jNZg(<)>yT_h0@!Z= zcrDb%Hyk?I?CILu`ab%n##*B6uUm)qe2NdBw9&xF@+WV#{e9#*C3TH1h@GbqKdSHIC)q`(A;n-YVL>Qm^=8y{+YI8ZNS~9GLop z=^?(&vJKL%q-iko#t#Q>cnYal15o_pgpI?forSW6d}t#V5_*G**k}&kR!rg#{rn>v zstr_w!b}cfdxM~IHXme(H?9-i(PXJDL)aT}WQ0d(M}U9|%o6|xZ(pNA8 z4hsKLy1e?N-m4ESEt#)NRrBgJ3i@OWE4D^B=ma99nd@{oZDz4DT}-kyTjS5X%tV_z zmMS$(_Cd6jWt=@g;E|wEe)|W0WuCH)DOU2*-N9DQi2+I1lnZl64kaCS`W^ZE^l8oj zE07x*hj(Z33sISLw^ws~n+y4pu6@f=fgBduz4}j{P;+mp(h_~ii*CjYkfC^Vw~2KK z2xE_i=XuN`1&l3uA!4}3+$poWnuMR(h9AyEq0_zA9uh49%%jL8hFq9ld1*i~xGG`G zlYg)C+^kKMVLNyA@gk}xz7f~tq`556hiTj&_K8OiT_)_oy1l;;Wjy}p(QRXQBXU0X z5C1YQ8__ZRmPZoh6Yrg(@&ogaDW3*4p~tP;`{Jf__FrT|qA`Wnxk|=lm*?I}bzc5* zBI#sVg5IJhaj@h>RbsTeuHcX7VU1#-jf_gti1-$1(a3&l|CPxcgI2YeFIZr2hAw-Nut&yQ9=u3omx%PsEHq zo%tnxypzGYReGUxa;WwB%-(_FLm9tZ`aIEeqvwiDJz{;ctDy8^eL`Ye|BQ#{4`zH# zc=9dvO^>AY(7tz*vBRAoKe<;mZaR6%Yv19{1b_FgH7%`&P@@^pC7GcAoBR91{0q>n z@0X&~`Z(};GwU5YCpP!g_jZZTKDaX`yfrhW_49pAB^>{&djRX)jW1ctFP-R(&xsHJ z*te7OlK+d-nn{aW(eT8%64{pzlb!nuM~>Is$NC+>lE%8ewI#)sJ();68aHj+z35;0 z0A(wVbJk_L54zL?CR}YsK9dCPd;5H)XI|O(v{TsWa9QPbKlowEMLfNqjzP#DJQb|P zn51UXPGP6raAeo`o1TN*QW*JGoi!GFsQ^xgqG)o=Xx! z$N1?oDXsnEG_&i`GU|R12R?B?10udh)l`#nU}lKmsJyMvQxFw)3CVC=3!Ppox8fjJ zXNy6!@pVZ=Yz2_Jjd&pM81E@TED>?Ogkm0r;iy+1WiMj6ji>6RTYp5cplRw9{DOrh zF<^rve+p?4*9Lfe2?1SH1ywCuMKSdhrpLna8SGFivI8x{9nQBAPG#er3RU~>8l@ob ziB!6z5G_4YwC)j%)hJSUeYPtw8_e0ED!!CIGXosiwj2O<n7BiFQ!O9_Jhm2+l;(sBV@7vusXCi>Dj$PIn>-OtXHsY9WzS=Sf z9C*P~u3Tsin-VsG)+X-Y@tktk)}so$%99DMK+Qt9=@9-78H}s!FdNkRas4v zd}YE4G&FDjGABqWJ`Jd6_PJGH;Nm>gRyMHgn>?YIv&QFaEbq`CCg|?R7tGylhuiSS zR75Jg=sH$|E+Ee(w*01~Er0V1+Ddq9pcu79*(Bt!;zwGRR z6FxrNSs&(m%Rlei^yu%OL{R6w>!-Y~jLkYY7FEtLZqOJyS>Bz8yY@^(jJA6WhlK<$ z=(ta`(%x%zj42!Gc)ssxd~*23xMyCH<*s)zDV`&(iC?}w z>}{>u;VMzxDk~lD)#LMHJ1>9vemVVOOlMbL^106MG3EMuZ+en%<>z;t@^Y0$!=9X_ z*m6z2vDN$2VxoIIi?LiknR}5#Ow`x3J}1Z$@3)r@j85+S8_jrzwK9g9D5V3~a1c1t zmxId$(0L^TQd9$q8LE=qM&lQpg&F-_FCNjEG*Z%<}V_rr7k4+fO{dpp8GnS4*?3@?&Gt4WZP*WIt9KNl zz+dz%YL|qc4Z_pSpA`62k4i40Ao$=-taIxnG#6b$_HXXGJ0SMW)&~}J-5pbvtRmYc zVA4RDeebCmwOABL)XvZ&+Upc3DgBNHXnDjIVEBXOVLrhG7Hg1Aql7}uV6zh- zgw9MQ86_?{?YG~Km7F424~3qLl*!ftF*79-oN$_&;pK~Qic-`)tWD!*q0;Sb)>VFz zUR59`t^TCa#{N6*!@l%aXF+0wvA190NDN1y(!g6R>bQrdH>Oic8y-Z|c||wt*LN?E zFFH8(2kE)yGgz(lxG5)AdhMGS-C25J=ApRx6Px9&H-DLYv?H-&>n*246Qk4X9U)4g z|JQ{>VYZ^nlOrE)IUTOlfC(v;Uql^xoKx{Rq9voW5!)^=tYV#M2bWoV&oEA$@l>}; z@(;`#@o4;?5Bb|Fu)V{TEp#Rn6jT>Lv&W;9^PiQ|ZDgje^ z;5+-__+ZJxi~Mdu&s$1q{$bYGhtkXU-^C=1>Z^uSw~X~swS%P>?=SPCfGIt3sH#|( z_?Eb)bxp#^=ANoU@ylGT^q-}7tNbQbS2XVReHGv6xiEaZNcG%@_U@o(ec$xN@RUBX zwoO%kiq_#fv3J7DG|}swD~Ge~?IT5@)90=|UNM&{#p1rnUl{)_;E!1Ew~h(U6)uPf zTn*F#yJyaH-O)8`Eq-Tm%G`qX48dTA7sbT1`FA5-VS4a5Qw^Il~N+!q?|srK(PD#U0$?eUw73>Gd$0su1)y7_@;0SQTerl228 zr7Yp%Vywmy24}qwCa8&@REJdZ)P>ffg1Z-wMgOLzN~wN8m$3h__X0NL@;+E9?89O= z;z&=`U8}ixC(i3jXd;1Ak&*@0>dh{f`uXiPrmBI>X)N@v@7n!#&Yt|W21#gh*Y2sT zjn%m5b+~G*>Z>G0VD8e#6ofOr{EUPoU<1s`TZFPPL@IeMb%8P`(55zE-F%H#oa=$g zyJbE0096H^KMN%U@%VN}q{z~sjEBxnfi|rZSqTW%)xwZCM<3yh%(p&a$)N8&))mi$ zEy*~K>Px;q6>9A22CL^<8NgD9DPWW8GU|G0p6^S&25dT03#isG*mAYY#k!m6CF^wD zQ35Mi>uj{NfnR@0b7qa3W`hVELRb(oZVOgVeRw~~MVE>EtR4)3JInoOFvxh$i}YU6 z4Y0yP#+!$kU2(Ej$(&_#8ZLZx?4_vPZTV<;2Z(bf|RdC5e z#;W04|M1LwYuO1UIFaQQi8nBvX~fFjJbnmBnef4IYc99hNut-YV@H~psr*x2c}`vn^Vhr>tGBU;dBJviOKcBp zhuk!)r>drQ9GsXt-Vv{J?4iiQ3=LJBU*ad{y=@=EygfRyVk~P6s`CBB;-5OJf8;+x z>M>9m+4Q!s<|zq#D`wje{Dw=eey6-M{uM?EE*l#P9_=5w-fKgUjZL_WbB*?$Ifo_> zmtJi3AG_RBO7o&AAYJnYdFOZE@`1#Osv#UF93BfQPx$gNs%2pgZ|vtspC=|AuP^jG z@bbh!_7pEy{q@W7?6T4b$35fo;yr7L?TOvXQHsWw&nrdHg+pGYCVot4!A`Ha;eY*q zIzy6Z`NFkm&Q7;^9x1Cp6DgF1n0l^wUH6bVaIxp-8evH)9EPMgn-a@;D<{w_&H_i4ed`06gl}XVM1#Kt@G%EBbz`&17 zZV{{#SI#hQY_1kqRMR$w$89IGwbz2P4WXFR;j@-7n$KSMc<_2CVxErZY!5kPA|pTB$T_DmSz_`xxUcxEaAG zeIR$1*ESBy!a3p^NKUuu)9B%B+XUw*Hv>k^>vu7vA($Ii067{&9dnT$-2mtA))vG% zMMUhTXYqqngc&~2ewv|fKMV|@4}on2OD{Y-;As;$Vb}PJz>PFOdl`Hlh07L*$)+SA zai?5^O<{L72#MD0gJ*a{NC-tdmKJLQe^?4hL}nDA=8HR|TBLN?+;h=scn&Ef2~C&( zqr>1f7a-?QKXB*%(d_KC@;rJGIVDs^m(R_i)-SLmhtDEAm%I(spk$K3>|yejVoji= zE7p?4%9BUi&LKwzl%t^=8oSsWkO3}sRoWGit-vJ?R{G4`2W0l51aM1806GbT%B^={ zTc>zbX58K|2W{i(_m76azG$*uu}W)InJ}R6`P&-r;la*ui_Qx_jZTatI#;;ex{f9E zCnzh3+vrd#@q>b+<7>d{60Q*3{Zg{bo95%UoJhK{5rGeQFEs+mTatv>W;12 zKf&ZD<`w?W0k&^kcDTOZq9~_gL&81xzLGnNz|NeJrplidADWQNGvrtHd_No<{nl^KUa!tqt$fG%)m?uj?0JND9Q7Uc^H{?nr-hpQ*cqQ&cPDyIxCeFZNhrJ?sa)S< z-)U%#hnJ5ejy>+#vkX5;o>_GM)8om>(>28vDZ@vIHpACWl_I^re_-_B+}?Md(|9KS z`5o`#==jO=V_!>H{o;JDLcFmBx?Giz`p)yK^@UPK{r@?^lh)t_|28KkynUCJ`#=z} z)`A?~zZ65?7xQ(dfrmhvIgJ_pFFLx?<^QPzW0-Om_ zmu4@M-`&>&wtR4F&6(SS%`kXJmUlfszz%)GYMGd_EO?xIHXEJX%h5gnQKD18KtzLZ z(pnYO@5l8crH`YwLVRM|P=KMp&wp`(+EPLlBEkOEpwbRFi5x4VBnKo{&7l2#3@{N> zpim|zVV5Bj5n0htLf)1eO$kDHz4NyrA_&PuQ?MULeeq~5NF$xXLcs70GG_-A@%2=3 z$vIx2Ir(*2<*e^-=HO^knysv8B^ z_8awZ6Ev7Xw0#q9Y?)lV=GMbUc;||$@$Kv%GV}98)4vL?6&>1qW-V=aV&%hfprpO| z^xL;@qhnFS$M4r)^STqw8VL03Pn_@>t@(2@kJF`D*f6&2TN~#gXViY~Me4TE+M%9v zU%%;q_#E7}=PkBwF9 zA06B($w&~E5htcl#sfx?i3c8PYks*-p~vm8FLTv^=*$&7UL^DCm)Qz zsUpAkUJ-9`?f>UA-{lFxLdU%~mRWQ@PZ+aJ5N!M@<}nI-hBuade11O#2MAtdC?LyC zR2$-IaA8JXShIaj5SsSVf`v(5#PXg+*%9hV zT|xR&TJu+&@t*3yR8DBUJ($jQmJ-3}j*NyhN^2d;M8qsgsPGVj`Q}^<1Cu5ejua}7JBJ@$uLbh`b^clz zYA5ge=q`gp90vHJLcR=4!MWi2e-d>8m(#1OmBI0d&u$LAR>&;9lR$BJ4 zyrXpY`sY?=!UXVr1{aa%vlW+o8NjIKx3Ug4fmP-Mkq+H2MH;<~O|fWL)WM9ei6gHp|McJR|zTg6UZ?E%VFUKk@{YQD0A9 z{`j)y8<=~wPo`7Xr++&AxVm)C>H8navtmcOCTTZ!Z}j-^i|Wjq^5`?E16_ANuJfoo ztreCZ5iz_{rK^TqUjKeB^lqczVmlPN-6L5a|3Jrnc}Lg%Dm~u6{`B<5>%*#=y-bgn zwaUc1ojiVYvQ>2W^>k%)@0%$!%qB!qSwBtr_u?=2n<`P2ock~mYTfRFT{;+Hqwg(+eo%DtsD1gjHPN`0po%dX zxoWPzVNeP4;IdgsO`zVf4dsC|3p8SeIpcGeE@H-UYchEpfkQ8?tN3|_vcnK)Xs|h7 z1|9Iueg0~URuupyK<8e-t<3>3W~hd_`N5=n>Aknm;5Nm??^lG-6v)U-&@;`ft)HzF zyJUcBe|-XMb8MWbai134d~7CO$+cn=Zj6SAK$Th8;<}kl)FUwMFWckp98M-?6K1c( z-E&XF2X5VGNgFQ6m@qVRD$TN zlF_ne;k93n7xJQ$y|)_q;bK@0tl)&?A=wx2?Z&p zGz7f>j0E0%3{=l7jaV`?dnF)DG$0p^W;Vg(KM!VCIIBzo7zs%Bu#k7W@1{g{ORPaS zi9L|X3zb>0ysL^_DG}+stV&uqOMk)wEdZ>SRx8h-ci&rG$pPbrr>#EGYi#e)!``yu z8jtx&`S0;K?6x9%J;Lgn7Y&%KyaTuN&139q=7-G7v<`{koD4Gu8-nne%?V4@^qn`qKD{BrJEv9sLGJ#z;Lp{4 zZt5rP2YTz~o4@=0p1p3LZlL|=tj+ai7b*F-J{s#(U(t6r1e4?Y@J}Xdw%N(8Zj&N3; zoh4HKaZz+peAcNUrb0#-G|?<@En-F4Gh}{lZ9~) zQ3NNill(!I(pu^HgPVvk)z%A1S2IemLbn zpp+W9-OrqYW$A)*FlH{mq#ITG;g$)~%6L(`zw*QNzgX?`0BA&L;v5M!IvtbgKg|aD zX)m!@l}iW62Y{S9q+hQPm`=$%L3J3oI3?ni`DOqH)qwby(#5DA4JjlA)A8YFWasEn zfCrg~kbs?7e;^4Qu-hz8xBNd|W7nqE|wIjGdVf%yrzNV0pSN@$PcS&4?g z`RoRzVZ_$GiZ&4sC7`V=ATlEtBXLfRr|0(uZkZ^wgM~65Zm1JORg^`{sfDcImEp<$ zS}`bZyroh$x(Ieh2X15a1tz_(D&lo+*GWZHbaJo}%V=i>XF$S%n)`|#OTm=6C#`4l zmCzs;e5uJkpfnYts!$xdYbz<3x4e)xjz72zO=?LzJ1XH+NZGu(!7;T}nd@5zPD-zZ zqVYTVJm7sC4nfaHFGJ&Jc%-FgLK*+z>-5w20;~Or3EtuJc_I%Bk2#*5jtNG040HFW z^QZqYnl}H#_@maZ)Axp3+W6Zk?Xv4{Zcr|{{+{rgN-^o|&9Ye`pgV8Ww1^=WYLu|KO_^>yyP_V=R=uLlGlrqMbv zj#*+k{py3)HlMr?JYlL{e|7WweQf6vwKQ0FCX?u|%Hoa)nb>D9Dr{}T8H#hwra~J10l$OUg$){&H^B&Y2PvS1^i)M(PQTqeR{ocJ}u|^4dpM00M z^ybF{AG{`Sgk{tU(#}*_9a`G>VfnuA94FtdvkCUR+&E7ZLw=i=(6xI<`!uWP&!zE% z*J=N)Z@D!?Wq(koQ^vhMKgSQdBK}p}o*w&S%O0FZZ|nd1D7wfU^>5@_awjvzacBcb zakUYr;2kaQ7b;9iirh?ifwOFe4_Yk&yB04de!EaAt*`Oc=4XS3+Ke(=qhzUlD|GYTvi=N59j9%T1-Qytnryivik!rU9k}}qy`#}P zYD}0$JLds}5+12AjiM`9c7t*g9<;U1*q*zKZu8HpD(@&NBX3*)PPC?R4T?B7$s3<^ z80d+iyz>&izNhP4@|nm}1SHv3k6=zVO7hous~s*YfXlL012^5>D77NX?$IzL>j+6A zj|7d^ZF%or8#Tcl#4d<>%CI1?<~7cG~C=9 zn;bWw=q1K2)qeb)i@sai{A8c=x?j*YQ}@)z(k4aS|5l)VYOVeH6I=fY`pKT5{+F)n zE~mPC2#$Zev3Jk=&TF!xwMRz2NiAQwZB5cy*P++LpWmKMPhQ5f4t?{nr!OGmChFx7JML{y__-7su`a{6E2MUARf35>4JFee1cb z^-I8*w?RdU+{2oWq6K)LqeMj|?<5?B71n2hg2(7ej zJWgR_EGQ_#r&ws$Yl5{FKS%~z5Cm6esdNBz)=MT38uuVe;xE~RovoZP(9c~f-Pr$N zbtf|}nM3@}4Q7VWn}H^lHf7p@sulg9&32+ROTdc^+zpUON=2LIAaU+1HF3RLM(OI1 zO2!=OWRIm&l?n-99>$&)7|}3$sdU^K2$;#3+lhrw;wHxwOq%9e;?>gQ0Eb#uMWTZ? z(|s<}o0*n@#2quFFoOq`qC{^mAkK9kX7lC!dt?`d$)&6TCtg-KR zGQ}UoA8$b+b&5Ljqy1+wQ1lD9u)AmU`isx>YyVGaZi!Bs`}Ds_bMugd7;6t(+Tr#) zDLAX6It^M`{fB~t(Z`!{vV(v#&Q}2|$r5dKIw|ssq4%-@TCOyx;Z`txHk!D_!gG){ zciYz*?FNlMgGOtMk2W0)-IXG)?0$9^aJsx(XrXLj8a$BOs^Ag!0yQ?9Sde)B`G#^+ zW>bD?>0nFChDr`9l2Q(oH-15t9!u-7EwUFxBc~5q8{x6kTh@s)YN=^bsTBQ;r-x~W zGvv7367I{sdODo{6}VljFh6j#y3Sx~@d79k4iRg*fsFsXmexv2*Vk5nkp~H*fc32? zQB6W%V8}S5aR{dE3Y)P+AQ`;%bgDiLTJbq6U?O;k;3S7>^X*z_gz>o|Di8|+j=gf2 z<&a)iER#H%r~s4|tKOBeL?c$%mo7?iRsz@rp~E}4)TlP(l42r{yHFz7_3(SRP#g@& z4rb&Ro-%B{s-qCyHfEwxIK{HuH4Z)+v3wfqAE-g}X`LvW;%83cB5gJT9=Wb>9SWwrC2#~38zNn;iW)qhF+)GH91GhvW!w}%DMj;b$tUWaB z@gZ+IAD>uAnh(a+Yo=<}LT?#qr~~d*?6wi6`qDo~y{;y<^P>K%0mZh)_WCZIS2b}T;!REYzN+a@n{V(lbTV-2@qdfJ+u)>m z{@0UM?2S%4=Kh6eUiXXqldawWt5 zA~MIb4KI|=$80_3uW{IEZHuMcJhy&;;m=5zvM^=Cru4le@-l3RW- zy>lv``j8eXMsKjm)E<`tqZxKc50Nms6bTSQ-L!OtXkc1f4-NLJ<= z=I;m$l5_~s2p$k8v`EER^PA@C=VON8K@Ud%oQeVx#g4DQ=7jWGGT~1+1*WUfTp@Uj zku%g|*A5af&<3+6dIL?<;VCy1iLPpWkb@CQPNg(9-UUrv2HX#HcpcLMHaoR-dJ`70 zDTm!C*4=o(uUqVbBj^Z^^ieMQ*BycITP$Sa#l|>d@V6T#9N_32&HQ$4Fv-Ef*U?G0eDvtCL@+(Ro|MoL;cch;lwD%ZG1i(O( z^!wIpSwcE4`!?3w*uztuQzYfv+;>q_asb!&D#u-o=o2#xaEiODJ6Tw{BrsrA73~M~ zaT>-mDV^>uIA~=ErsBGOWS5248ThxK1_dbgbQu53S%YWV6{1DUpg`s-)3EsEUiv+F z)6Z98+ur}=a8M6@3V)d@bvqAzY|OwH>z?6!hIKoDN)U9G=>M-EtP+9 z*Jm}=%XcXOAE>9#q?hL%+y0b}neMqb@ZXf;Y91)jXA++Fu2uV|Tmf-Gtr~ep9{OX# zs7qJgOB1pk08+I96(nM^aSJliy|bQpV>APEHSfMEBA(%1tF-A2>X=tx)i}D*n_oxB z%j7LX*LW!lym=hls;G4DZwCMbrYJF;VAnV=#dsmyFkS%@az!)G2j?zO#;Xjzbf$kf zi|CqZH5?>rIJ%M-N~fR!z^uEzJh&oWC{JHP64N76-NAP+v#YJ?YbbbvwwTSC=H@9#$3U#WI_r$K^}Z@ea-ub^bY-kD(Wy>A-57-`#W9l>%z{a^)6S zGMbW2Jf2Zt@er1WX7KPxkGQ}qU&0^H1!y@ELQ}Am$^QGXkr%qV+TUo~u?LViQlgUb8p-Fd8fhiq7An z!B6IGmlRgn;v@H&5`vz>N9}{UYXDenBhVNr9YaBX1@K5hjS8vGRo$L{r;rhIil!?t zHKL97Nt8agwEDY+jLv_m*IkD!nc1J-YLC1hkj}Eui~M-5HGT9)kIyeNupaFH#d^4t zS0^y}h+lUi^_%DA?a{{oA9O6;>ck1aQ~gtZDz{ZzTy;o_iq^0XJ9{l@^tbKWno5A% zNaljCGStQ)65du!Dgdt>Xcf17!g@+;aZBxpw@>ni8Q|*F{=u0jYZH#O9x;K?!9nWx z!mV=H$~Xnyzrdh79z;}{HEX?z_td5O{E^hdE1-=>iRymq4Z1TA$gtK2i7s(`$H1)~ z8i(9GxazpI*RJ;dtST@)Z-Co^Vx2CkqG|#xt!RL(mVqkOi>{84!&dIp%Mdxh{7R4> z)nggjF~HKPJ(W=E-GWrs&uM126w>C?s~&pm%zaAGk)*Syz0l)91^>Jyi=7Gamv_6A(f4=qGhS+#*B&=If1ZBf}bkXdU+3?wb6k+8n@j*H#}T(Q|0haZ22p#ooAl9crWqs(!q2 zLNIsy?6YP%_sbuho(#9Zvo52kS!^K(ox4MhY=N$4_@75|@Kh-<(h8%pzTEo8??q$N zNZXT<=O%Ug{yLLO|2mVMb%LC!#%{22J=k{Y-x!i{z>xg%7lvehxy=-H-;~r;ZLtLB zNpzBTv^`gt%N_IH0+)L3uCV%7*Iu^11sX}&S6~GQDIl5y2+bB@)i<3Z0@LvT4_25C zVVGyU{cuCsAc&LK^e`fCYeU~T)U;M9Y!R%Rwi|hIe^Of9Gw`ukYT_P#JA{`7@?;va z$;joM))a(SjyngW!)Yi`A11{}Xh0*|GA%wW4Gc1lL|c6eSnucnt3E9d0NZg$6E+s^ zSBLb4AQ{NFFEdK3R{HpXMA7XjfT!j`(h0gOaXQDooB~!w-%Fz*Dw-v62B01>oVK~p zIv~h%ysjpLcW!n}a$Pbgt03bhzV3O3R;4A~qW3>JsN@uKhEvvKJXys$v`Mq9a_H?r zf};#N>H%%MGlN$QNb;TFlCEgSQamRv`N7Cnu62i-Z*}O76q7jE6f=3TtT3e{-dKn7 z;bJA&ohj97RFR2=k?OBY=@t^~z$R724MI_RXz*lD25nSbu0sNom%oXkxw#Z{5hF?bMGZntPqPJ zFFt`tW%^d2o&kxyk`{_S@nZwn@@+-kxiy})S$H#0KywjE(-XiRP9-F@5tey1#RS%v zfR$knQ*E*DA20H(MsMY|Ic2GFdy}Vfji0;yMt!LF4)?b&fBfP9&zJuieEEL|E5v=5 zr$?ZP8aByFqCy7-QrP^LWh9L_s8yBdjWaZf-jlVyfdgn&16VRxI}89lgk6+1CcVWg z>*@i{tmx*ot3fj`nI=DU6m`GaWpAbG`^raPlZ7{2YT@z1(|M_F6(Yx zr9`n#EpEcG`3P%sk3P?3@Y)!_&%tOL4Y5S3#Ab$jFUC5&O9UdI4!<59J=8I<5w;p;l;)0sLL;gj6YpCE z3{l7~+d5M*w&d;_kH*_(*63==B9@V(B109*WRdg`r+w|5~s zv#n;r?9SDc?4V~87d9n#pR(3^Pnak%8TMs%Dd)GeR=x!8O6Yl2$gvQ#;XJSa5cBn^ z)?X-#`iFnZ1kWf_#5*FQ-a4$4aWlZL7;FfR-! zN`@c;wZ2d6G$JsR$aZkrKnt8Dm9;4!p1vIW|5A~z|C5T; z4&Y?N1RFIwOkQz)`(7-x`EEh`_SPd$5&B+X>ap{cAsi)=Zpg2+PIE|Tn|X_#X?2+R z_H;bSu#nRg(}|`dY^B%}z$v;V+ddC%bz)g-(UJXvH9u-ARGqThUZ;)_7Gx6qLVHQv zF#uued(EL*k3jAq9^!b&Nr$fN>68pBU@j1$Bym+JNbf>*g_rR%1cpoX*sO^{9qkx0 zgp%xlkf}?q>VCH983Yc2@BZtCVsP>8hi4-^+e21MrKC91+Fatx&=Q$Ny1yp-aywt2 z9A%UOTk*mV2qjTDJ&xo=Tk!)dI>YF%Fp?0;u0z0 zAyDtEmD1$Uj}8wp^WdK%;0i%6Pv#on+%sx^n)gGgPFH0)i|E+^zp`bc+g`Dn*GT|_vw&mln|AYL zOXaF)zK0yyyNNm#w`Hl>=a%xHSBrLjMzjHO?$*=q`u|w}NAX^X_*3Rb!*{VyCXGIA z9r+6Z0>qDW9rW}0yS+NVZ1R@Dtuf#I`}JDtI>VFOmZ_7sw3D>$5qd_wkrtE~Z(#q% zDZvb0h>}RXWxKTm{-nS&B3wvjD3g|N!B^y`I%K-V|SxB$(iQS2ObdeNYf3)5plA8C~EC=ZlrKLJn%mWq2WmeOX#nCV^iTciYA-^qqSR{bkEO*pW6>3vQI ze1{FXl8VvjLLf3Dz(I7qGZekU9~BprKGz4$6>>TeKyYM^ib=W@vWqTQhs_h#gY35! zDbCV1GqMpI!v(ohFfS(Bz;PU4!b+EBERT3&NQk(l6pmBokZtF(9zl|s%S@u95kO-X zr7WM08wJpC9H1J3?_tyx`4DvrL`b={P&`aqkJtY4qXBTmpxIg4G~HeVDUs0|$E2C) zb96Q|aEA%tLKmakwQRt%Lwi<1Th;<$GAw6BM^h$RNH+jz@D?d;c88QfUjgwlhSd4s zS(q&$#q9iQr<~SHs1j9Y;x}RUfVi-2%mNY<77Rer76?7MUa__}{Awm5<5_YIcIiz< z*qX%>?Dw;XzrZtva0i8!IU48Gz_8lMg2*66#*jt+(_Eo!Ye5GW9katsUL?Wlfp@~G zvgm*en}IdL+SiIzRma%$CABm?@z||zl$EFfF8&M6dbPMsiq%ch?ym%+flx8lHWMad zSWhLs5LGzeySb!oIUfj&9*dIt&U(u{_hWm9)$+=ceRe{s}?pR`63rea(E1sR%OZaGCA0gTZ)6Xszb(7Erd zB;7tzN%^5ex;NMsXJVpQOo=8QBFm+36005=MX=pj~!v>7I|Rj^>f zJ58|%l`6xNy^Y62y1*iUpcTdJJnl>lJ2HYH#!+U~)*PJ5fN(q%#2>h3TU~TY^KOtz zd9y@b`V`F4IS`lutbzyH3avyf1So|M;1Yx{4Ry_9S#KWQxQ2@H2IPh=qrqfp@S4S3 z90KP-j1`HVab@l)z)Em~9nLh^(eQ!%?Hbrc8RL9l7P&>r0iBN&s71YRJ0cc!6kZypP z0@~Q_L@}IZ@=XTbYd%wR<6x#j0(*cNahlcO38+=%!VHDCGgYIw_Zp&j&&iRTyQCD# zQl!8`fvl9nH;tr2X*a=L;IX!KU~bOYl@n7tBa){l)}gCSS}YFjeOl1ID=`YGW}Tz) zy#6!w!Rsq+W=6DO@Vf2O&3joLUX(^wYTBPyUG?SOC6xZxe{o&@7u|2kt07U_Uv+gx zD(dNZ$?N*}@=E38#x2C1m8hdasi>vl$!2mxXZm%zYP4N{_?37K{3 z%FFla4iW;x;T_@dExKRKWZXYC3A2LJftZdw`e8)&nOfzBfm6iYh4p?bV5)f70NLb9 zUBn72#bp>r?Dbb$Pg`qqhIxU38t=hNkw{Q*1AxPHsq&!T*jF1;m6$7$}kv2}<+LnM4|e<=wz&e4c^0rMB6r z;%Ff*DH|r0_G$8t*y!8Z5RTduRG&a`oE?(}z_@u3q5Q2WaHk)rh7J6PNw=Gu) z2QaFt=o}A)!&>w3-;7t1GI?AG_9L`UW5IMfdlpgj*yez;@*&@$gmQ~V$6D&rm=*VC z3dSXHM2!eDi4G-rek1Ye-Mb8k?2+(ouKxPCYM$EP?#7)BLWa;)wp`6pz!K@0kdp&t z_QMIsx1Xbzfyt*s^BZ1`XZhT6yP{q8Vl3mM-aGoMjZCYHgU{2-KmKd_4x`Sq>FfT@ zPs`i=UG2tyvq!D!c4$-Y0Z-UT$jIjKZN3SQT20(JhB_M3l-G6yL3o|=We6{du|FfS zF^>r#yO{Xh1Q!bI`MOjAV+dTTE9dgm5v=#t|Wm6=OODq|gHZGsCA@ zrz4VVoxBQR#J*p^wn~bTb5tlXK&&!k;}K=(8tO{J0-UTKt|Uoh@0*HIfT1D45WFz$ zkz|;8=*kvlhf+4TqQ@{ZlrZS?5o}Qk z1B2olz*AQM6DTaz79^MI4Led0Yz)hH5hQXiOp9x!>Ci$5GQpvV&R`tm&4;gjH~m1s>Rr2U|mH0jcU$m2JyN=&OPGAx8E z+L=;-0cK2*kKlw9`n&yfi><_tfgvQB3sKzqg;Jqx4OXmU_@QY$Jntv1vQd3I|I<5$ zCMTRXD?0i@Z{P~8f;`v%-Uf{~qH>%}VKu>SQ!Uh1?s9_^6A9M1P*#SpBB?8XgeYye zE1`$N8?z{X7_fd)JJ{<;N8pBDKEb+|=waqn02+d8KG3z?Z~95i+LYYs4J+X zS3g#K{>AF6e>`blHJM5w9PK~!`4|01`%mb8F;RWzRsTOTO}@SJO1Ap%AwouK&HO^z zk~GwsZdWCg#9xR!Ehtf8fs4`BnO1XP+{SXB28@pHQ5#;XOP6wkkQR#6?>rspfP^3A zsN*~zzODNJN{Q++^Rfk@p6Ggl+9r~MJ0du#mDHJX0t4?bHzTQ?5OK41=q;3TH;PDIYuvPI}%^hE=KO>uwjIrP_3 z-Gx7knCcy%jp&fpk|+IFz+_<@64@-S-VNrtdcqJt1Rb?~ztXg}>fCZbf`0dSzvYbvDsNSrAD3GYh39I!&mp)FYVpjF*WvfXqM${W8*K$U%(OGsTX)dj0s3+qg{rwLA~v)zRaT zY06xQR7k%q=$XtJE&U!hC26stv-uYKf`=I3&{EP{*5dk}trnBc(NR|N3DzMW9C!ql z^-5hSeQR+r9u#t5KITe!QBXmf4~3|e9^NGjl3%ikcsL$L>neQXp7<>t>w}R5ymZG$jcP1fh?F;eL(FWAh?ew!HzXjFQ%evEExi1_f)X zFuD|oXNy>hpVXl5>F&H66TM&V6WOaZ83amO79oAj6&Q;qm_V+m#8jQbrFXv3Dms?Y zjX97d1YI#1tjzo4D(zk0JMU!t z*0pzf|DE)gvQX>dls0<$di!TSZH^xYVDG?4e(Jwc;~NsXW+>O3Y&|z1YIV%D=fw$5 zp)$Y2mM&RVW@HoZwfY_!&wJy(i~y@8uQ{D&p3ZgGADd9W9+bD#d$;7wp|DC7x%)e4 zT^1ZGj5W-GWA|())%now?Fi;UP}+H2D%1xH1KiD(+KLU+zM^YKgN!aKLUh3-8)p$+ zm=*Q5ZLojK0s7*(Xxx@rDGF;gIMTqB$@I0LhPKkAPF|q{9AKtEBqYz7$_<->+FC#a zdT1u^h)yZN;k5@Qm+>-rzYc|<)bYDG#NB*IDghxy{ps2Bh+cX`SKy(6RL7}}0+D|^ zQz8Z!H8F_>TA$v)BQ9{sNJx;GvjotQ;B@WSyt8u*?EYJxMIh$)lDudM)@WLQR;3`8q;YXL}&i~BF;=}H`+$t5nJ^cl^AxckO3%-sRaq(hpgE|&-*HN_wI*& zZYNF|kR9AUIIunk`g(>N>_`p59l;$8ruV&CJzG*_IGid!j61L&`(;e}c}X+f4@;QC zBLE$h8Ql2;J_Ys+@c3Z8+~+1hL~qdwK|Y4~i-RKQYWTf0!6EQZK~Af+eae(-IHvc? ztQYL@1qfo=#1W`8v1D6AYLp4t0(LNIi&${uK*-u0Lz9**AQJ~ZW4t*H8hS%2q|gbq zHoXDcE7hg}aLM!rb{B*09CAoaSPN6+cDOt4DUYlZ>Y~e@I70@p0TjavM|A-|a2)0| z!8qb*S{b^1EN89mmJVoTZZhe9FmsHi7#FdCnqvRq>knLNdX zMYC?8fFkxlyqU%~VSO)@-9CYV-J>NGrZ@PQq(z{SXlpfrzdW`jkf>V=U(Ew)_#s06 zr%OW+foo3QT75q!BpN(T();5P==@m;>ds9wm`zOCnT8r;oUcBPTn)V2sALTuGB7=D z58|D|S%6$PV{RA(8GChm9 z#l+Lr0~(#5CURaTd|yS~1<=%Nf?op%Rc90U^9oIcI`=CMcCF&sT_K$dqvVCa1qWuD zPrM?8QAwMvo0w2>@{nUf^!>N%HQ65PfM$hZS3?%K=|gXxEg|%k6MTc7L;B+=Hh>hT za93a2f+5fGRQg6jCQv3(^c;eLvVo>nBg6Le%&e_lqS#Pd^K27b!?&?_Es;RUH%Ssx zaJ`xwe|5G(*t^9~42w~Jwf3{QM`q%+)ci*Ygx!P0grHk>NU;4qi$duU$#wu8Y;D3m z>A5SSP?+l%fzYH5KDDNlt%f@*VNS*cd<0k4XkY4TMP6v`={y52=R)|2B!Vjr9ey8U z1ym}H5yXw$S;$HVV_H#iQU*_UK~rj74Ow zQKZosrxXWfTO_=y4x{`^E_wk?kc0QqOHzP+L`cd2&*PTfzzb^O$?LZ_8WMM(L0fsS zEkQGeVTMJ+jflU#z!-OS!-NU2^=)7Tqm$3r%m@tdsTU9 z16-FzaRIPJ2DrX^yXK{=m4H_SLzuzzem0HuZNhpMgwp~1e5>A@=ht+2%Z!_yHnJqx zTPi-tg)Nfy@KuvH;3LZ8n@#4Yz{WB>?_($RQDxD^zrMcKzp;{Ae!Qx7`{!%jb{W0z z#_AgVT&2oNWK_=|G)q0&`9s?<-+FavrPqZxqrMg9^CCn@NQ2|iPnNhHd>F1U4`8)SmOvL%&@*&`W6rJ2+ zB-p%B3uGQ8`1V{`{*dD*d2QwDVV%M*&0Zp^9Q*sYb@Q6Iw>?^ z$}3&T*44+&n%TUl9T&PubZ|{@`P4l;`Fp#G7G39i@4Yx7JDPRgiK53`&$J^OOt3-> z!2>lmJ<@%c#?v2W{-kB`AuLfnrg?MP=Dy*S1wZ=>Q#oBs_%H zLEn?uZ>yHuU(rkOefClMpLYZ!W8VeC<7tY0b-0DE|Gp%y8Qw7GmLE_>4qW!b-uaB` z2C(D}6(@Gs9NLTFgxfwcf8rRpg5$xfF&J*7V|H0k^k7im$0{N&PBEg#q$=|^gWIIS z=A~t9;V>Yizs$lm^X>dzatu$+iV^ST!MNOU7|hEF_D$7GmuM^?&y@#ZWXrJ8drJ&7 zg^Zbit(9(IOLcbZvri`hYh;uXhj1^@#k)*lT426mH&LhHR*=v+@+oC$nO&S>A>6?0 z^_aGIBm+T5qEi>fg&`-D9z-lvkL5eZD+%fYb^NAi7AC!IS4!KiRIz9i@X>l92gPE3 zV8A!&78Bl6!CJX}DZB_wMIL&;?t(#u@9}*IVzn_+Z|5!}Dtxp}cve6exGg9d+R!Be zh;B#IIi5GV+EepEF_T#5RgA>xTwgtJT9ndzuk#De@F%W^eF0n$Xpm`SiKFO$!+T5hC`|Ioe!*BHPKZYkXeSSr*Zijevk!Sdd!h!x;>m=YcB=}EQ=qbVO zT~T^`6+SfG2|w4vj%ku*p{d&-=ZHzf9V;?8w65vh5@S6mYzrdGcLQ(u{Fteb#;~-i zczoQMvR0q0@m#55*mKYfj1R`h*-b6+6Pzp+%&np}i={(9hyjGP3PFjxCo6yv!$A<7 zN-K54MGTwn+J+CU06D?~7dNjZ(-xgG36vImj3aSA2nbXWyVJYZ}FTWIK5QjBu#OV1BUF1PqNQ847-y?ES` zrDcu8PCSo@rLf{WMAStvBb+KN-RicF56uiv?q#~a*P=AJXj3}Gozaal(8PFxa=^qJ z>)DxyHmE4NCA;F>qpcXl8rQ;~G9Mru7-dK$%?ZL5pU1OZDZ5$+zWRktfbTv^@a1*? zt|DFXDte`u<^&Ny%($oqj*V|oHY-G$>w z=$`Li%;T|vUT(r8SoFTS1jhu4c@t$g<9qR;%Ng57&d9bKc`jl`!C>O*DtYC?1aD>J z2$rx|^La|!_IHbM=8XNZ&phnmWipfL1tvERoPtLDhe0-F<|^xu{eyXdG4{3dnMTQO zx}jhI!cjMtCPfRJGl|%xwuV0LrtS~4%*`gV2;Yi6IW|38DSB+CTp6`jIk`Ee;mSe$ zJPJ-X!Q3!U!!AY>Q3+YFY!stQe$=vIxPFy8$>Ra!TJzxt$~6&B|GwWt(yCcjkF*Fx zCHy&Xa^|L=R|~uJK!e%T$ur;^g5uamW=NS06L95QvT(?hd$JeaK_z4|JnP`piePTa z!996$w~!Z^>^@QWl1Po}l{!-fS&MMrZB}c2H0vjVwK)sZ6!oClh+Uv|P=Kqnzh_s; za~~*Bc*Iz79;Ek}=z#fZ8xLWlq?)XasDSL8^kRD7gI(Dko3-xEY!K;eaY|DZ^J0@7 z(kvjmP99ylm8MV?5bOB%xPy@_1c+-3^{0WSARbun*_=l=z%}zbqD#5>x~^(FPNco8S7>dVO!uU=2P8GsSslE_}hm0aNdEg`tutPA`!Y7W?r6*|J_@t~stqC*uN&96jvJM%!AF@wOdMAT*S`}(+ZT-=o6b_Ke^xM|N zI>+T@t5C~_QzNDt^<)(yWjvvpdTGpNr;-D(fawZ*!OOCG?mhbyZ&oK-h>qUbH_>N7 zs9hn1#`$C5Pm46`D7rUR_>=n50Y!!}4h=#JGhOI~P^CgCY{n^ds@!1SGTs)lR=mEK zB!ZjH*%)Xe$$;;uU@GF38PbkS^jDcs?yY$v96}Iw$0a|2TFJ5Vl~~hoc5aXtiVAvp zIWiIq9EUULnE7a#D>+EtlX;xv4DfGo+FX`bI$`Zj2#1pTF6`W`X)J0g;zjR)21(PS zLoNsiz+qUmf?qJ=Wm)#M9C;wy(WM){i^!9YUgRS|0x%Bny}3yi;{194+3r^TRUcox zxT7^p7tNG&NA4JAu5-F$$xHfL-Ym?JtfycFR!cu92<6i2?_3c>$+Vzpg*Ts0)a!D6}Y`o>e$ z&mGNuaF&K9JShI4=&A3Uu;|p&#YKpC2jI{?5BWkxYMXFhY#AB@$(xk(5Q(Eo@D%d& zE2Jw=5g$#05Ey(jdbpD>GZaE?{4T|lU3eHqIGa*mOm3R32PBCZ!Ssj}>jY~Eam*3% zp7#6(#{8#G78yB=6w7uU@WEL`Y?W~2G*RvRslqKFvvbt%(3LQZ9WVsR@tBCDuz7VKIT0E^@gKT)dTsF z_%sc!QfU*SW|`KI`|yHK7;3;&g-&aM4?4q^ok=y8!F)6|be`5h;Ukm`5h)fPYoi)y zR~G^MA5=E+5%rRPL0zFO(17W3nUuD~fP7|FG+)&d*A^e@YdClOC>7U?prZ&mjxZh7 zBj92Uld7`;d;XvO(_h8Sx^?&S{;YX>@t)R#6piAHWS}!Jc5!WgN@TsUPJCApGGT%c zig;N$0I10dYZ2Dx0@Hq+CPtMlPLErZ@Tf-YyZ6+pEWTYxG5w2&6YRu8gqk6n%SzK@ znNU6wj#fe-Q1F#2$=XWSLS9tjkX{wU^ck6g=L*`L*;7ge*2>kMc^K|klp5%{Dp-Bl&!0xys^H~gLxHKGH`g6PCYE#{jMx*sD8pBiLQL}UcjefOHhnSu_k%kM|n6=%WI_ zn7F%l5hiW4PsrF$Eq(&|gWV7R{;Jx3RR_Xo0r|sI+v3kTs5x_7iibPQms-#DifV`J z0RlXH z@@Ep0+KbROvJw$Y5HtAU6&?%N=)vi6x~O)ZW!U$Xu#J^ZxS!(YuK~3PtJ)7X$Boy` zT&eKmBq`zVY7yFX*vcH#k-^~5hHecs<7Ll%E<@mvE;~J&5PMl30_qV`HDwNTa|>CA zg**sre>%ZF>_)8^b)2c2&zv4nZr~(R23xeIx^})0jTY72a@Q6<;=&vh{#Jm4o&grq zoGrxSA&Iub8Gb8_M^9-f2Ao)R;pnh5R(&L^Wg0%TnZy{VZ)L64=uSwHGbg)aJwal| z;32V3-$2PULos$v0ru%j>dM$2Se`%u9W+@zioJ6tJslpr$mF$RkhJV-iCI!dp$UFP zfx0Ufx0tt(*~oq-MnMFMY>rl|sHA1o69V-;t-HsKx9~y+eJe;DZ1;`TLPpCixnx^! z%nQ**lmGHS_`x`w3oh+7K4_qbxky3FZ4yYrdfFu*Qu+$K(Pv}7Wr`;^Rf+1Z&B(kY z88IchB0a6YVH)KcVnTSymS<6gkb-xBfZuILV*w%WW#I$^D`Ru*KPHovN&7yCAH%$a`2H6X5ivO#KV?x zNr8deNBJP3)43dNO+DXiA;#-GK)?&r0p~5WupcmhycFe07=zAWRVyT%Hahf^kXYS0 zvsQFap(;d$f*m6V)yay1Olr_Yx@#+-A5_)W+%1$sgYRC@kCdBMN;4qa8b03&5RRBt z(hT!AYg=lo1Mpz2bo_$6D5dQd)(5*Yd2pPJ{T97W3==Vs;j&B;rshIas2v0+N7Or$ zF&?&66-60*NVO0$s&wIx#i3}~eDrN^nVm*)(kYVwl98Q4h8GM=*_b3m3W6Es2z0Ug z`lF!8gtz^(X#F3`l&jz#Kj^?Oo%?5{bT2NtM>Eb^($L4M%=m* zS2^`jU^fJ|D2}z%Wfl>3=u)|)%fM}-MQ+oMtt(T5oAhxakQn!9P1!}aK&1-gLt~Fv zIV#Zw5_!NTQhsZ#0mLFx?gtQa7PCaW{n%qhlpx4&J+XCHC*YEs(Inn2-BxVvx5Xth zFbR&&&>?g}Fjf)LW$p!{uXKMzF2stll=YyEVOz$q>y3*(+uKl(ke*FgA}i{CGPAZO z6vm?=a;XwV+FTrE2O#2G4D-K8Q{!>_1$b&4IEI*QPkL5sNv8%vI)G_@l+jRQ78Ci^ zXJu5g2)swTq@-K)PK{W9qA+8~RUb;p7uT>3Zh&J{XbV+CvSs=dgnKY~@r>BW-ANoG z{dmzAU`8Jcm;n|@OQ7CBf8sljSn=z|+k!Om6$~^!(8DIVYvUC0@+l*5D6k>>(IuXX zw&BqPCvKEPvkn*1Vet7x{>BLSIVLRircF6}kCv89cB8AxjqINWa`VuTqpE-dxiu%7 zNCU|@KCKOyjbwK-vbt{8awY1`tzWsY9?%CPoqF^AVp-hsL5xs1QoTS?VUH77>0910-@L$-Qd~`)AW|oOJgjXE{%P4VlX+96Sb4RA_P=50DmfPDwV z?3i-oK1y)$Ehq#F*8>r{?TrbP-Vjes22r!s(+LMun=uR>#GaY-(<7=1l&-ov1tkE6 zyzAjWbj@=Zla7;{3yAKGr=3oB6{9m`ml;YMKM3_$3EAABz?jV;vZ{4e08}-Q(_P(+ z9L{&e8ijR}b$|6w|$ABv(pkTtt0jvNx7K zH|`0j$gwnDLi zAs@K?vEe5JN!S!-aIY|wD12#QK3@MRu@3cX2wcOGff3oDr(wiF}F&^%zY1WNl{Fk z7SN{5cFne#(p*u~3d6!&F-uW2!Ie@Imr~2}eCa;-^W5|N&N+H zPth3L`hh2mcr`hAvcd=w6vLZCNBg=;BU^S9&sgqE+}QB}Hm&6lhK-K0RgUI$$%;e<#IDEjqvvHJZorF^)%vWK-=$x?t<-K3liOE9CUJIxEn z^3e_|pnz0Q4$0TFL)elMH+y(oqE91aUel70d=)#1e=Ep?MAK-wH*oB_PPLRky;ycZ z0#*^*HjteV!fz>Fb!W<16ik$kGv-9jExGlc*f$Sj7}F8DxO` z@fCUOFf8pf98$&$KWNTs!ayZlp2-AD+HB2W3730Q;beR*0&KkLLuNx|4;)tmc=MLP z^Hu?=O{lCbqKQ|xq{$PKiQA&E`+`E+A<6=G zQWb0Lf%Qt_FVn8uWJ<7VDhj-KXnkC^wOspE3VgBAGdyoyf^bySN`+tFGB~#$zl)|PJKy05RHKJUZlh81z_Y)n~@ChEd`tM+!3s%#$J)5?D)OI=lak~3!=XG-)GCn@K&%KBSRkz$4nUoBf=v-m zLcTROQUkk1&~LU4qEP``0qMvD65j~i0^kj1;!%17!jBDq>JUPAt>4-6YH@fTwEGEk zh!b*@XIPKk1FHJOO+ZH-*~Di;0LcoQXnZbmSrIJX3GrSkwrdc$N4MXsOMlMR?Ujp; zTXRK*Q6$6`FOT(G*{j^;3>b(quBHZksht4QyfM4uV_-iLq>`s=5etQlomx0)oPiixSkh8}k`NklW? zu>ukCO1sdSPQoL$x!ALFBo&vSBH~TyV(}SW^9;?<-=>8lTQZtr7i@2R5$bjwo z8L`K^!l_#m<|~yjc7ZCkv({USj8qr^e10T?NvydVjI5x2Gft3ug4_n35Z(`xXl4DS zI)UH|dT9_<%L%Z6sl;+yI9Q7T#O!zFzD2-wRA|)gJsjQBg!9&QSBGO=rzOXG-mZfn zrML>nnfOZL{HnEOz!+w$eW%FzoWjR%UJB(IM)|5Fp9X+wK9eieNx9{Hm9WB7CnSWT zvXPO$jcUqs#`1mIn1$eYW9OSy3ZQO?#()Z zYA5@kqR-3CyNSA!45^Bs-$D{V_pK|I*U63G6_tWL(gWl?Y1Jm3uhuoo!(|gb{xSQ5 z`TS&*7sZ0#zbzft$px*6)p0guQ$=-Wd*D9OVgP!Gj_(PfA!wsT9vVJ^f;k-L_Y^$w zpj^mP$11^Q){saItRm+a`TapR=EH>bZyN9f&!n=cz*{l8sB5E>VdgFKha=QieDVdKYD zWffm(&oBWoB6OU(lS+*d0>zTTr=R3w5_o3Y{(cuE&JAHUs#{|OeAk;ICAncHO2)PsH@y*~Cg$^M2{`ylX0@r?ONcOmr_k2c~ z!qR#M2T3D`cC~@qusv??S&$8@*|+a#H%{Wvks@pQZ;ygy`Y8^9RMAi1;|^qIk?e$@;z9f?u3RMr)wW>CWCWB#m)p43#IZ_%*GZT z|Cu0ow2nuuONP^c$~tHOlXsXQsZYRz1OUIlF8oScvyCqrL+S8FK@F!%h;N9qhki^% z-FL=-!p1AX*ab8SN{Vx0n1X-O>U3a~;Q~{G%_1#;V=}K0T9WLQ_$rb*GwQGX*NKjT zuQR%%2t^arUsL=omsS6nrN4pm;K&ieHW_{S({EQ7;ZtyU+w1oK_p+}(O>o0lArtG4 z>P3Ez|1X8e>2m(h>J>T_-Hb%OVtI)N0z z2w{6agU@6gt{PmqTaBO{5OYRF7ypgamll?=V>xMP@P*baeU*kK3kX(|1$t;GO-}3U z(7m)tzGp#g^13jH#6c*}np3Y+4q3pJo-=h{j^-IsG{V9L<*=|na$n*Nrn7?(@OEY! z_DX|qPOPZ!)UqNZG9Ri`o5hLfNMnZQ)#p5cCW5_FW_T8a-FIL%3PK`QS}Rh@yeWhz z2~lzsV2Q%_@Ed!q4x;sy@k;~gXTbSF-~XaD&lx2}`M{(>{Pt%lb2VHhu&n`n7ut5Y zBmMOH0DASCTvUDu)JOIZjtj=qK(Rt(yIIWCBdm=B4}aIR1azXm$+OKHs1F8fT4^lM zrw-E%6q`NKv~`^ICVJxwj1os`EUA%c6~U_mPl)(~b`YHhTElccMk7yfA8o6M*iaHq ztb?GxmWdti1dEzHC(yQES!suht{y@=vsNWzzCGePqfYebP4V@G9nb%{mJzJxO~^k>RcC7DO}|Xqf7g+D z*vseas@^JwX(h-1b#jymonOuz{XTNfN8-g^#&dUkpLud-9$(BQ_viMX7{48qir?AK zY;zV=r=gx+I~Ff2T8*l1x=y7xv%qgA+|BVmOWDqbl2bP%X4~)zTYu9W!w&Czm7(#s zf%kk*s+oF)kO8Y02$l6(w#G|PZ4E(2N@&a^7=!2el>WG*EThyE`^PqpZ~9yv3Gx#c(RZ{#p{=19v#AqkcTff>ONZTU zM$J=>@_X<@&w_59fwB3V#X1Y*H-ZkW)WhS=Rfz<)Iv+pZdS}RQSPuw>90->ex4%Dr zUd;~RI-ry9ZT}l?k?BHy19l_DO0msxCxp^%n&fq`@;1%ahA2Z%MeJ@3Xb96NdsI0> zIo(Yad#OJe^fX2Nj!ZZn&U`+XWD;EgCvmLZ#YmLgdAEj5OA{Q{#uAPb*EfEM@Q+Oc zg@%m@#3tkbECpF!>^y)5%LLJ{oaioN1g*5?+DD4$@ZtzgknSUMsd#4r`N|T2K|egJ zdbS9ZXmZPFP$y0Qw>s%JP@@%v^?l#rad%6m;%Sc+%a~&$sDCJxZjl974H6yEc5i2% z5Ya@l@!QsN`D~^kYO7!Te&slIrHTydNw$vmGgJ6g2Ya=%cB&CHoirS=e>hXlEyl03 zPf&3*w%}H$RO&-_1HXEJop}TEXN80*#si8{Xqs8uqiC10rCV6Ni zLsLYCk1GPcMGR?83jW-${vP)k1kk4PpexaYzX2q;6qFA|!wR-ZeO#ARAbz!ih0zK1 z!)7}+@ktH)(d`wtYuM&;bO{L*9#FrYWwG-VvCU==0}4Tt1R1rVK}TDe9zan*Eu({w zJZCUMb6XMY3i4{$7oRHk^NcR_gU22QGfm^+7No<@9&W!+(-(yXy@50?pEf!Dz*bNw zqVTbNq>-d32Elp!2_u0hXLSI!<+zR()+N6jiWyxB>tCI;3l?;A90=vt(qX^l1SfMM zrPxSCk3dfQ$gS157;mt`w(w1aez~pJUcn=?yHqeKn^Rr>AsNt~Js60$Q5+HO4c_p~ zXq;j#)52YpkWVV>Ec!l-(KyG}#O zxbP+_YmFD`p^~-kEO_XJ-m7_9Ep{|v_*6Q+^zobUrsj`k6-phxkBw=PV64pzReS~m z&mHrgKq@Q98V~Ssp5acF=w)5-X*P)FfvL%I2@%jFYqW^QDY&JOP>3nvirp=RH9@tQ z5QrRhluJzWi$n4+XR+K4euth=wsKTJN_fT+q#7{43yGT@d!Pa+DVTgab4*k40_yt720@ zOcx*mIyGqEg_+OzjR_YcXKL%dP>YBAyQ!D)v@d^uVQrN$k_VRM_J4s&;`+ZTi7ws~ z{{ELOZ`;2=0U^padP_#WzYO9Ha?7dnm++_Ui-#HDhBr~h#~4QgL#?mKC`SW$AOkvi2uXxB)dp1e@(Iyd%U9UXS=qUN8=OK+xq_q5Z}P;0hvqKU9*18s2*Bx@fa ziwujt7ndc<19fUy@pAgM=j@BED_x3uGMmQb67yPsh^yLkb6wPkeT9>^I-#$i`g+_q zm#2%8`SpKx;^W7>VNB0TVF}oGdWsqf=$=0`VN}56Zj_9HrdJ(o86^GYBNR90fIlbnL+dmqR5Xq4{HQ)H1|1za;_x$&RAXa_i zcm36S*Xc|JbTQ*>OzG3`)1TBonzX;rmeWJy4>wM;Ja9tK!tx4+P+ z2GdkU&qCEQSlhb@d|h_<9w}0JR?6c)(6Mu&<8As$DjP{ zEG&Ue8GyROHZ9MYk{%Wd7NI?ysODA>$Y;@M9k10D5q^VUTdl zzZ@H5Uo?Or-*|6Je( zAQv{}V+jg1Yv+^ydF%W*{awx4z+j){KTNW95?qVvb`s_0ra@HwZ*sX@NdU4_-1t!$ z&Bni3>v^Vl&uqv?wZTWclo&|e8!%YCO5gg8roJMey@l5mi^4z!Y*bWc1F ziImk)Py}F*_26IZA1UM~c2r0M}!m$W3Fi^Hm^Qcz`fj$|6-3r8{ zrPlhZ<oP*q?%v)Bq=bnu;|UQXdxY<6&MNUJ0gA>#p|ezl*K7g3^nsr2I$$2!$RrLqgz4oGK z5E{iHXvPyBHt6c4Z~I#|J2Nz`FHX zo&hXEYIj$C%SV4bLO)DKOi?mMUPuhDs0MEHqCurgEP`7Dxu{=mEdV!UgtaobCpG06 zu>{Z-p)&G9gPvQEOz2+c(pah-H*&)Y+XRH|kR?q0@)TYS1?M`022F8V6beKJFk7aB z^5HTR*`NBt`Q*!Z2A)7N=JqHl4S;p?1%_1cw=lQ!I6z&3`P2(|mr!nTq)q@G9XV+p>Pl%H{on%wCgD#$SO%=b={Eo_hZlX(*hwBRd zUr*6mP@HMWTwjVJhOfn3sDcM=_zFI9Z%2i+<_`_5bB3yfzcrj13M-?q*rP&n9tVWrCB%sAv7)ya4#Zfafeh1n%=baNQj{tHGuY?b)@%1H$PSA%_&Oj;yFfC-+Qq%)e`Wdis2&c_FJiXhD)Xgc=5RIFo?(7lH#a}MNwpOyo#&&(9N1dtcm@Z)I0G&XyC+1!Q06Rke7%DJKc6*o}mr^CeYAM z+kQX-W4BZ2bYzMqqhBU{R~!W(TQU!QH(zwp-rcyO(71A2cF)aM|Et?!Vd~^(hkt&~ zuKJL$py{ZwNUHXr>`e_QAlc8-+@2mRsJU!T4K}Sjq%1xxHBZLqUH_;QA@}YgY#Jd; z`?<8b*k!625NNohX8XyE>`G=+EEoQ+p0=O8y|7!A`}h>Hx$+$ZCNL<=ICwy1lYG7) zqe4RBWiAIbJKvsyXjxMD6M<`k!mlULR@^ym#7$zR6xO;s{Nv7~pnhUv8h^MbZ-phc zor?X2fWkZ-zfe7W5>WpDfwSh6zIOr3KoBdXe~ay!g$k)Z8&@8rdE8%V zhqfQxjXjCk3R7J<2@6iVa*_(^trxFY1gWxdBhQ+&l#o7+o$X<+?A=EO%oF=A}Jv z&XK!QMH)G`Mo_>c3FjwFZdE@Je$V(aJg1r20u7# zFzr%R**@5Jq_Tw{b~sDCAs3LUb7!!dZ)KiYTD4^>xB;N%ISg)j=*$f${LXgpQPe zOYe$`Bihy(!uya|bux-@G-$mWCl{q6K`xV!UoixO(2<8%hY~z#l>AYRB|` z0888xdH>0W4}g8SD7n(0aVkR;OE?AeN@c(mt*~8g-wZ$eqcTHtkd&)p-?snkPIxuD zLrkE;<if$4UCbwdnkC? zFMLo6ZRr&SQmN?O0ctD1Y0COXXh>kTZg@Z@Ndv9mJsH>q%@vn^v$QS*2@uu4urw^JZ+6Detp=2YT~qyQ^zROk4)+8n!kP4||l+c(F2{p<+T z6Xg#tqw(_NdJ5({{~xiH_LlOSm(GoLd{S&>2L6w?0-bREE!ZKF!yNalUKh-PpUT1^ z6+E{G_E~Cnes@8Dad?*)Z;v6GNw2mtCkba%*(TOvfhvlAK#k&K! z0OJtY3|5z{g*t?Zx(T7)o!JNiIiZSw_c;cOUv43Q=jJ)az2yal;NDC*$+7~ri{a0JjGkX%k;ZyS}H zLe7p<2OWI!CLnu91y&f8WCfo@@o;3muCfk!-OWui42HM)ginZ^&g6X z7@hBrKlw21>dxS4iIgSyj6o`@c^oa&DlaYuF(rR%E`aHh!G=La`Td?k*i=PbRln{u zKEkz9;d%MNnUbb`zKT3Ap6C#}0Iu)$Kly0u<<4#kGU}jS@BH4mFAoz={31Ca?U>eo z@DYCLjr$h5yA>;y6Mpr>!{04ck|;0%zk{rmr_O+!V&JR`J&S8yUWaKY6l7;Yf(DED&XnD?>|di4wx{J5)$>h* zO&~k$eswB0uMZU#a8$^2#~)8*)uvG3L20Fy2{_Ae^aoZ#2;6a!r!yNg6rJI;e=haF zsDYfG9}**1EatVJV*+CaJltQ>Txc{{%=pK`_nJHiSm917=p|iHcbqAw@EgNC@KZ%h zzZhVL%F{taR#@#L_?E3kg3*~F&^)e`Tv)+iJ!cz24?6l%?D|6q@zYyS*JbjLD$~?k z^B*YFtYy5>MoH1GP8|<|*f3h=IEleG(EpQde!^sySqhsSY+bvZ+-zaPl)n6lJV?^< zN&nvWGqCsnJOfjcR!q#I1f$DqR`Wk)%;-To#fO6N7T~*<+Zvp~jaTJ$D+vu3@wDDnJG%f6al+O+fmJrEB#P*pxDTb&@)sEENuV!ohtow!OoX)>DBYbo zv0}GDWW$lXG0e`87#Y_^i5OVb|7OD}LJGj`;2RB!)l7gAIS&q0SMw>Mkq?ZsMD_2% zBu)Z(eMKi;1;4bG{dFWGk_8%R939{lMWR;B`rZz26qoqP3dXbE0>W~zUex z+HH}W7j-%nb$oE_&H&BnA!2vN6#eVt!N;3k<3K=zRL(FIm>^cY9+gBr$-1owM|m*O z?S%>sc|L;1tKeuM8O*5rr4Bn3?ImbXXK1$c~fhJ|M*uy+= z>!@7p{^s=^iegEo=ZJ+HM@TL&bA}Sf5#+5oruiUZOAN2XyB}^POOT4ug{shrf;-$S z^DZx4It@|afTakbYtpyQrb{BuBRPf)TokwLTpBFzVIqffzXD2X3~9N|3&n;*jpVz2 zJ>-xB4%Sc+83G)IeD;SKp96Xgf`{C{4h6{J4e!BoO0UC1}eaFZu{cq#?X!WrJOiSGD? z*>OC(5+o_%ZT!Q_z;DdDjy*7nHtd!dc`_}HKv3w!8L5y$H*B4v+s|)YWN#A5`}{sO z1oS66adD2z?G27=gdtxW%$n7C=Rx0t+>dMlR)&g0VUPDF2m{N>5H<;BA7Gv5QN&w2ohia^KE)wS*!%M0x7Z(aA= zuLY3uFnmFfG7t`jw(=R2lh7P9@RnKt?t%hYpe4`IArR~fr8UXNgXyK@hP4blLq8qI zT0-MpF6a7yW)40!53Trs9w_aJaBce_v>`k!mK`WBUM**I;*a}RHW|}+^K3uG^v-Z& zp7-FR{5QJ-i0gytI60_xorLC3uC1W7GKXk&5lN{W72>BUQ6{(p=P}#aB6&KpA*71> zu;(wSR04T9)+=F83XQ$c92ej^H<{c2R{G^jwz*7rqy+CIc)H6r`oa9a%)=oPo8l2S ziE8l!{X0!@MNU=;@2G>NRF{j$Bk_A}s7>4W3z~b^u^vxR2gg;HCeEZ4FXaAo;h=s@ zppEAwn{z>t6#cS+y&i>&SbW5x5=WPL)CcH)4gAX?qv~nG&>aib$WIqKt|o@q^qby2 ze~=L2SNUn*{NvgAkh>R<7`y!L&(GsDkWXi7j%Cdx2IN(KyA)XyKGyt=oDV+vAAG;e zch7%}9M9X0?#|n{4uxw^{Lt+(Cs?2b#EO%?T$~PRXuQ=soa|s4w6D5iK6oL? zd0)$~8-BHS*!a2bI#T(@pVBLvXYB^{9%1nPbG$DObt3Ztq`q8~OzEAxH>{5L%NNy9 zVoYyYkp{9=-i~A3%lBZ>hxp}J@|G~vQUvqp;4^A+zB0Pj0QG{0%6SnFXV>ycDAk*4 zg0Zz;tV6TYMX)nN9>Ua3+Zq=g9{#nURY(8zXRpzUZ=Y9^UaZ#OWE#^Q8;~5G=vhSY zEem_kw!G)SHi^GpO6v2(R;?V(D3#Rcw0BK@6t+%=g2iyVh^1 zChhW%rOPs)n1k$;Prz*!mTkj!fpm*3CPbvi0?jux)_@Lc^+pn86TE=j#-H2dXvqN& zLIfKMpw=uDG^AK8XhluwXj-ie*qs$(;{E(QE(wMQHB@zCbQ%n_mP69#dlT4;G4Ol% zux2k}wx{Cvlpu4N5^{M72#xo?`d}@}6|7(4h{}aaxT|^btIZ@|;(8{|QUymK-IKz; zu1m29)8Igc=xKbYYo!tlr_>{WtE{B3Dm**@a=lEW8)BuGMbuTK)zRufoaGHWmf)8< zVQjZN^;5C^FfJ_W{KvxLSY{a4YK9dsbr`C(o2NOpl~+GbB$=-K>D$Z=y$6V3dOxN9 z;>R&d1YYp1vEyR`|DwGo+Y&u7cq9Ipjl4K68D%4Fp!qX;zNm{ezFQl%t%?hLu`nROsH4?Q#sR0 zd@*Tg_e^EduA@O;6>6~m-k)r>f653gTU?^`1g^$N$BPYW z7{e&L#sq9=0&G7Zc2wp;P%)qd-9JS@5SAk_q~zzNq-hdpAbitp9;y;wpe`Zl z!giNelNmCDD6rmi5nfFfhHwl^;er%`oJrfRT;lYXhllDcVpO67%Z|;w?$9tm3Aht} zdg27wG9U0RXev?>RBV^qRY<+_fZHu7G7=q7EKxZsc*=VEYse8ztfRS5CK2FMCSzt4 ze*aoFzd_0X6!1MMl*^a#W>rYOyoTJcKH3q30k=iz?cY^_ua&DMQ6345?0v% zGFtct2pO5b0^yJfl}a$JHkAf;uM-9Y0J3S?-RlHKTPpkU`sh)oh%xZ9;krJEOao#X zYuG)s&J`t#bCGiD#mL^C-sjeI*Pls|rRL+}Tka&CknRp-J z`jp|5x4C(D?;eZoBsgq2CX_nNoZxy!b>jzTo=mq!PHI7x`N0#k3T{!t*FVe8FFPFX z)43p7)}VDa>E8Uen1QMBr{|B*1-A!}ElgwUZ~t`THw>fv$j3{OU-r6vy*^omor+ZyQuY5Xr0Rxa{_BY_ za1@|m#Uqb<4l+ic)pQXeQPWR*mN63JGQdDY%es8hYI9jh`hl_PLFu0HH&n5w^!+xB ztaJ>X-#@j|ZgY7}H3vsB6Ml9S?cgEHOZwD@SK9bFDe~sbqx|Bxsz-!itI|Yilq~59 z-~4>2)%92~<2xU>7o*gVpw9S7s{$ifY8}-m|zuq^|z!Nl6-I zcX!`nsgb$`MeYS6t9uiBu#3GYr=XW9wNkJ)UtlgShol%42TeOoTdzX^UnBz=P%cGf zi?yeDMtQ8X46tuLP;OGNqGvz*(d$~lCTR*K`fE*^t6WHfr70gk={OF4si?i4 z3HUhQL`qNnsn&i+i_-a{e(Tr2!0f~jQ2PP468-v2%Lhg^8U#eZuA|AUTrNKzZXAbF zDn9EmG*Ci781qJl^ay58jZ)xWIu!G~|L;R-ql<+R93MesjS*i%Z|cd~=hKVul&t*gSq?aNk3R zzphR%>D6r)j-tf}4Wkq81kXOzH>LnVVD8s>Lr)ePikQKp@$=nRoluDc(r>zWd8~XO zQbcsvZ~x_*#?h}vdJT{E^?G7#-q%kyEL3>Yzobo`oOxP;EBuTcoSOY6Pk;~I-Dh9I zO!S^iRSjM>+P98XE!B-%pgh@M;Bms!=4-!ty}WWpSbp)e!$gZUG2m-&V#R}@F;#D? zwxp->ipfV}d(ITOt8$n8n8zVZm8`t}AgeroL3{)M?TA5=%k1N%pxK4mzXtCt>>7Xi zp-ta3=38fsd?vTN>GO%Mg%7g}D#1&yPrZyBpZT!yDXsaB!6yBa3s38AI-$p3`>|9QOm4~2X++}=F}}L&_p80; z7U?=#c8#%11LGan`BqFZa2e^ifY7^nK7uo21qch-j?AE?#9pMuRidmg%0A6io>QrB zn#jxC4@1w((wdqe;pcl}>>tqvi&C;HVS){^*PJnLpH}E6w_$f~gMB8Ca|#FCTm!J< zKZ7)bKD89ewPB}W*f^bs^e<07tlyc?%BI4TlY$=t2j{u6*I=mwER^7aU%n}4O+k-0 zG)=9*z$=E(pWGPVQV{x)zSgQ z%pHVXo1yHUM60#`mKqU!p-Lc6d4XA62KgH<7niNDHO9bl4UEl9lxh)Dfi!wQ&!*0~ zmKUE=(B7+?b0C_XM29549GKk{c6|u6A!;zEolXx2e4y) zlWPkwy>hgumBDfZL{9gsdLM9pIx5rG$fPy!Tl-ss^SG-M56+%Y;{6`l5jqV8kw1g> zjFSw!2a44AMO(&m*arZS-3-jE0Pyht^K;aUZIht_R;-g8nV(FQPd=ndZNKpvT4WhX zHA{pM!j}gx?kY%_k)LeY#{X$fniM$yFk+&^O~yzvG--)2nqTi(@vSm)Fmw3AjsBz) zU*^PV?*p0A-Lc0{6^aG)IZM-mT&kpJ}%LH+_jbTY)nzu9n zT9mA|azY)6^wP? zfX! zKLy7C&`w0qeeKS^#Vtm3^$``ju1Z2k72nFiAQiugY7$xN3lK+UE9i;*L%0>E#O`j> zh|xlC0MTe6MF(=3MnP)W-pL%Lq0Ub(Xs{if(~)cW#l0TO_e!r+-?U{`+Sh6Xh|~FS zNyK6evy0mEj$q4#wzuh%%jW9t6cz+lNyuP?7VLaA%>yBLXu$FLiObOTPGCsvFkZjw z3q^FNS+o~B?iyzOMVcbXECnd?zn7PsLjw<&hQsjdb%=*gXk$$rs&AqOga^Oq5LmCW zTxec4-YAXr4}m%C=IvKXG)Xw4v7Uv97PeXekpU5@`09GDdo_S{M^>hRlsL_LRQr{e zAmhXg-xcZG-z9SOWj)(CJ2j>!~Zd>MWf8H|h)OP73YkL#6I#AM~ z|77Ut#oqa`-i=S^P)AG_rgJ*)EY`z!Wc8lQ-%oI3Ev%dAO&kk+aayVy>~UQF$KdbE z28Dz55&FhAE)MUPX}2#-7EKQE3yh!A!v6coG0~R4@JMUPm(P7{d9w8KvWY3LG*ooR z$Ceti@lRt~THUdevHEZKdTc9u+J5Zqjiwu6v&}~?5L@EoV?R!;x)2ps{rIYVf`x^i zZQt<1r>@WY)E92p%xVmtbJ)DF@BO=XZV{Iz=VBGc70tPM%+tFG)@w7cb34 z^6LyCu(_lN$%$DTiUvjLj^$(nf~BTaanQ0t@-MAa48-(ae|EX@H9xbz>$bm?b?#$ZSh3}fqK8r`-Hi`L|O&4jXA8Oco1$T`jH5ohF_(}rT z!gt=eNJ+o$?lk-u*M+Z}LFO?Z5=S$%WrZFdmZF*wHQV3~Pd%v~K$EHs$@gW<0eS+3 z4}u~lkhXaYm>ao5Z(hH%1{p;eRv7gg<^DxR5KBeXQwW!9VPERSN7f*7m=eG$h(8EF zE>~>FxqylYI|^$%qu_`)K9I!$Qu`VsyzhoF%-i<*kdTuAkioeZm~zbcc?w324Q;eIi`_IPL|WXEKJP)wYWj}*&_H*rfDAIJE}^Is zcJxe9a@~QBsY0+n+tAR_10tt7X|9nE>N)Jq@C7~QT7VLn^@C+@8qgk3ven@~OGQOM zCgFAholpbB^J|vz$bCGYU)%m4TZge;`#A4Thvw%zedlX#N&cHrwl9)Isv&3J3_j>R z->HjZt||<9Q#AfXPg%3^Zo_4xrl;*?LUR?rQ@F^l56^p;*1{fcI(R!*(9)!{d$v~p zL+_zP>6qZcmZa&>DSPhDqDrCDSuN*3k2<5~D-T|5OZr%)I+Rp90}NLBXjsFZ`z#k#j3X38S3D-&8Lte(3~nDQj7ITwbe?npbt3y8qhzwEIbS*RysfiGSII8a~KU8 ze^9T0^;HNt2QY1FzE?srp^2n}>H2DRuk!N|#Nb;!fMe7nsN>J8fa7Vo{oX|yu##C^ zYKyi*J4C0kRD@(*f;tS1)~Mjacz|P%^A#P?gQ<=`@&2RRzi;9z;)}cRf;Er^!Gu{n zMBNLW2A8&tTBr0C8i5=(L1^XjBUPBJHG1#BfKWseI4Oa&&E@jNgL1jhJIdfi3OqRF z7cPVxhhGrX3jo}_@ViA}HyJFR5;QabW)R1a?KHTSrXt^=c7Zsmy;>NNv=wtjl}A43 z{Gu9YnQ6UI0O#GE)8C50Tl^zLrlt5zV)iG|aQy{J{)YQuU+zzagdCp#z4CK~#R&GS zm5%L`BTpt;moz9nI!5voyFU54>!R6VL~80K9AE4Z)wJ{Ym%kDC*iGG&PiGxGp75GD@vN2`EL<#kZU7nnWRWnU`%C??xk{J<867QaJfhpI z^jD7+a|1Bl=aVF_BYTGL#X6Oc-$QlKLXv6QTaBYM7d$)7=I!+pqHeUdww0W-7@V5= zuCSh*!zb>WOAEaX&j<5VQ&Y=)BTE~EdBOJSh+jN_sJuE(j>9gq7BuR35hAF8nikRN zb7*?D;joGxU(2T#)vMOwG0WI06UujE^^~zrSf;+20MrNSFsLhtk~5$t-P!gTLgLI( z4$CRwLF;$cVp1@xR5Y3T-JcC48F|U){T88+evid-V03wstJ1Au| z*6gCnGz1z-ZDAt_^3=DN)y-DT8jNNb)7AfyO!)1&2K8{}7E*YcVrTZp7?i4JP^zLr zj>4t6HsLi5H9$vzCJG{ysmEz39;ys}pnbf^(~t%c;Z@1Uw87@bN#Y~|KNlUg*QWWp z;C2VA2tG1BAZB+B0CgXmZ< zEmUByMob$mxfl>LTDFSt6BSiT^z#ygR(&{3^C(-KYvczv%1&>iACuW%h|wm4|CKT! z$8~-*@fc?XkUxAq9Xa;$rwhpNC4c?dkj$Tu-T#)8a7aS6$U!E_rd!MSKh5^nD^afg za`;5;`$(So#S27-n50hmPvsYwqgKCP)!>iEFg!Uz^MSYf4trn%6NPUSM>(L=9*#0b zQ5AoiymY161Vx}l+9b?+eycayU|f#55%ezPiZXuO1 zLs(D4({H=P}>I3W2f8%;1DPjVomk%B`6();6# zv!@8fXD?i>>RyX@CXikxW8APEi`J{yzZZW!jyW9EhM-h`dOPzd>hhRVhpsFm&j5AT zdB@{4UWC{LTFJtonzN7EgTiZ_hE@PEayf+dbuXTnb($i1Wz2l9LP&YGJC?!|6F zQchwjLcturNlXPuFgQolh!Rh!LX=Wo6}Rd^*JJN->n4145{>8vf$SSakRfpIZ2uR zT4@MD0ELH@-801(W8fkOvS6VJkCT?&y}?Y7faEfj>kNTAS+YsuvA>CjOTftKkj!CfbTTVJ0Sk!+z3P6>_ z7d#}xNZec>7O+hNasd%^uqqq}HIN1@3&K6C_&F@~bvG6fsxI_E^*#DT&r$GLBf2(t z1N~NocV3;+gixu@8(8cZ{E~l8GgS{8aKaL9CiGwFT{}XcXD6}VVP`GnK~fvP1+#oV zK6B=L5|I6mUPB0eDaq|ddi>h+QmKSI7eSSy|Q&wG~&cxCxWzbeN!{(>H*? z8`znX)e`}w?(5-EBW^l=S}!kort1FV59a(nO5GQk@gA&oy|{QRUvrB=2>|aN-+8wE z1knp!qxMuIg4rCj4#YbMzq*Jui}egALedGvPd=^;joI(Z737y|25P2)jYHc4g*`rOZ~4vS(OY zZf>B~Juj0Q2Jyhb0xpXLkyjfZYeo^J>jcF5w~@f?^XL9qMwXxx zFqaJA1Dy?s<+=N`8f~^mJE?=*MLexJ4S(PcDv#s1+X03Vqe zk=D=!UfQY^DVUw7ac=7pert3Ej^MgT$04$S4-({miV#=MI*FKAhm?iSF`4@JYQ8)J zImwciOuHC`RTq0ZDRl3qyqDh#jO?&fKe9{bRF}kD7;kRqH!`I_OXRk$tohY4A+Y_zMDQU6&DLku=E&TDN^t0{t z_q6DvtX3_(Q%63H#5QgPxzcCrL+)GYbrpZ=k_saEFK~$_yCQCn+zO+_)EaZvI449?eoyhQWpSo6 zj=s|~f5rT=dJDUad+;^urAF_?^DamCV8^P=H`*pdL}d}W|CS5QSK`;%?@n~2xi-9F zZewvig@64e&Y+7=NywI?y=;~l;NNL|)*Z{NRr}@8$)gwW8MyXO&vs>)JmY({8jdAD z-K9mqO&qAlteP;q$=?>AE#E&^GkbGz_Y>Sv7WYed()jtV#imO#Jn4?%J3JnaWG=$~ z@F!uNeX%iCH6?F%#B(iux4`>r>|7JJzLPG#bbnOy(Yt(;vK60qMlOHGZ@`@WY}T4d ziT8hWe|LiGd6qB}zs~knSS$Y2wz$IS{nDD7d|}_<$>|*j2)_vO^ahnSKjXi|u;uBm zj8^HJ*t*gaD62`Q;0Hrm^T_CS*}C_1$H=Ykyo-^yKI2`7j5#LH`bXB;Joz#Ic2OT1 zyOE^XW0);x4D8S%9ATfVSAF+I#x^EuN+(o**dAMPk8MnPlJhKYr8M(yN&PkoRw_Lz z;NBjcU9*er{`38~b@x5#vog#^V&F%jk1u5{>9@#%kHp`i!#z%6f=vG=yPqE2;rg2( zzT_U}^{Cm_Xvf6)F!x8{2L_sUVE6y2!|&U3iv0$TOKi|hDq~gkC%?EtP8WTn+|aG+ z!|(1-d{!(h@!FlCq0D`LBD^qVZq}_SlZmE0rg`ZEx)Kqv_ z*$Kqei@n9N@N-myR;@&O=+A*EKGcX$ox?(#V>k93Gb%Q^-yt=hlWs+QPFsaHAx6H7 z7}80(sHaYeF}D42wNcH@Jmp%|@~&h57hT^T&UF9(PwJjjoAV(yhan_~(jD6zVr&j2 z${{%<6e&5(j1ZyCX*1`#b+A!#ibW2I9CGYn%Bd)G$kbwLNu+)+_xJnxUBB=3y?)o# z)gMk{@8|3Je4L)|cmJH3I@{NnV<@DFW(bb;`ED4$%(faCF2OmR3rKn466;<^t{-|8 zfYWy%)Jz!vHr7;(*is_$sG-#{a+vHK9$G>!_M0?vvyCqlN)4 zA**DZUGTH)q~oVjdH8JuBDyHgTwMN8x#BSWxF}7x*(#abM}2UX4YyI|6oXzsfl?Qb zsSnLW2(~G6B#>zJJeMQHH)0=OEcpG&3kM7xY!PZo#EAU6DQ(r{`Tc-ff^ z<=!26m&e;Xn9^_?Yf>RsbmLEw7VS!(HLm-kG~$vaq4GS#@6IVHxJb)X{h`-Mac{aW zC&K#Hc0E`w8T)iFIB59O!MgC_PpzjFFQl(ewO(59+pMgy4p3U4z*l4_>3uszrBh$|P3l)G2k!a_Zw zcJ41j$!Tb+7~D89t%luCj8 z$eF-rBU7o~1bz-EOoFE^$Ts(xdA(aV4XCpC{DXYiE zi#IB7LDo(ZAkLmAeO`MAmplU9p_b;PeHyxQG}6?8C~&wa^HvKDBz?NtNA4-R3Uy4$ z!9L&b7*;bvmcvPgNOdy=lr1Hcucv?d-mk(YzbS$IxG8w*ek4}Z^+RVe`Phh{mx(DU zjMg)AE`dm;knuE+67qH$R?n0e>V&W*ih7eMg4M*kkNQ88ei`&bIqLz*xN9~s1T_qn zRLG;y;Z}Da4wXTl4M1fPrlsUmss{n0hHl{xio)W}V6NVK#B{Ey{;?7q9DJ2EYa@}1d;9f7t-&I;6WlvH@kRB9&ie>5rc z3zoZrjtZG!Hfp}S#d?6c*=d75bO^MgcR7b&zUq83ydoSL(*1KcEy$dEZ7(X;2;6-X zcSOBqa+zUeP)qN2wyW?YzM(iFvJgi}>V2I2%iCed$_FD~2T{sncOO?DQnO0v+oiZS zg?!aaf3%dm$37yUC>smm+7NqVPRXYF4k5*RJ`p}gyPM0YdS4LDt99?iD3g_zhh`cb zi0*BA7IxzjNQ~vX`}0oi=}@27omwe4+aob=@GL8)=^BOR{=7v8^y)^lPA9~ZQi$=^ z69G6?Q(XpAy&oonKu94pO=GNM%2s=4aVmG~wp3`?SL z<<;u*Y)I}O2B>eA#Apt&Z%W7yKKl+_7Z`Zn*qg7MKi%I)WmVx}K7$x*QW!)cp>h9rUTj`>w{yc0At{KdHxa{40wJ9;M_{vnKkw}r}0(g&cuyY?8@RO?uMnq_tV*fGvYyd@FwnOUfT}KQNEh~Rj6!Pn9!t&J~lStkt!$?71xk8 z!X-oeaSp*QSkjv^3iK640w*TJHk_s)<)sj|vFh!dg3Eo>&RBE%OD+^xDH0Qdjlv)P zmLy-o>aVr@I4SLRa6ux4ETufG_A!OmtA?>N>PPx1tI!cQ18GklJ0Uu}Gq6#GtKw{7 z&a&b$JDQexb5K=@4>r1npLGLKD(Llxe2XYXqLQ`H`#HXp)n25Tq~TlSvAYdv^_T>k z=o&%j{VL-pE}CYZ$vrR82h>clgjFrt&BAKFyl%fpBqn|~tzf;*)tqt=lf0Cp@A>{d zhfc>u#g5w33Ww}IM@h4itZ}I{&;dXbG#;1u9Hr%RfU}%uwoqvlmuF3yWm)wG^b|Os zfXwX3+v`NoI!Yk;gKm;gfC$j3&_X4vqt3IeJ`r;lD))~UcUYBxu~qlv_{qGEPU|T5 z;&A2hVt)YHs6U>VZy8^Xm)35uoaHmPZ~92hUUmZ>P8*+m}4NRir&P=cpSl4 z#iX(-4LXP%Lgv76=ynbpTx>Syem~SI_;M~7yvho3NKF5nGeNo|DFA2Mf$XPBQmBLZ z45khmB=&UB&6|fux_4Lb5yFW8$Z2+>Yj8{r7ypKV8C(;E^@7f>X`0*0>SVHzIfza= z-29d8(9F4$=2FVnq?9EWIBzG9cY%rj*v+53CkGw?`h{~_()e}XaSj6$TSAu6E0~^( z9B<#}PnLsI1fuHWPFUhr=j5J$=y2PZSq78L8+;2FSBg$1TN6p+stkZ{N^{|wC{#UK zHzf~C>^k9^VISZ^hc|J{2B;3;x|8qLPP?YyG*M=+CE%sJ-t;^-28z{7ea~WMEK*oZ zarkqxc}ERzb&!0Lgk=|}^pp2)f?idWyxjP*_Z^1c&uI;XjglT4Da7>&zCaSrDZIfY z9seJ#0n7&*zXA5k7vNeiulWBrZcTWhiRz%esPYO=hOW6AY8#?y^n=K--JK$aExn!JSJ{I@sAs4jdd?oJVzqgA^ zA@1jsdtrMl7~tK+%Ez-d}HPDHLe2OM5uq6;Sn&^@*H%H2)m8rM)EK6*oSjTn+xJ^v#fH_KV zB%VQ>PyWnb&w}e!xddLmS3l%|Q2LUn1*_A30!}nOmn;UmD~(VyRd2RR&+h>}bP_-y z$Dn{sq-qbdKR1`s6V;~Zp_&7K3oe_o4oZw8!S#A9Vm#jdMh%@IJxOUI1kqBifjhB=yJ}u{rq9^}D#mGl zQ-b@T1=BxdcY-J=1z;oYe}Ik6N%BbP%fv*W(e6u*f0>xWez?Aaj1@7Mn$Smozce|g zo=IRNF~Kya%o^OMuQ^v(^;CsGXi?k9iOulZSNo9yr{_frY0xYzPSs4hq$c}&P)1KA z+!?G}Whqq8T?}XYT7-sPKCfq@7UCtX)co8}(4g(&tNYeJ~@W5 zqnWXi8Y=pz{bS4d@9pv-Qiy=h0pzSTO>;?a63Lyk8X$v^kbyrcvV}d{LG`w{U~!FL z4Ovq&F>sYaIKM;6fs>C73G^w2;G#o)0&s`-^aCJ|*n@7o1@VWh>6(hMIcpl?O5nD5 z7$sek_|M6ZHE*Jp8p^YToCZC0ZMJ5l3!;w85KvfF)?{SqBj@GXdp2>Tan8?3jki|8 z(f$Wbi?-57Y(c~Gd|s5u&1$u!X_+OY(PB!-S=KShe-HD;7gST8yTp|7EC@Z0`}&bK zF|!uo{y|kn(@6o49k7!6{R_2Bo67*6hLWD)BoW&ERt)q`eT<5UdWT3h zGef}N1RHpNTpC}?ANO>0k>3Hhix)?tl$`#F(t#XV1Ygi*`Db?kdH4%g5{W+6z;u2L zn&m5GGk7Ds7U-bl4Zc3 zP$O;U6$n}H6$5_n53se^8g}Nx#{9K&fe#Yczv=-l(xUQGaaf*Thy|~?Yo(mO0DLN zY1%>Q2u%|Qz(G_uXrkBotN3hqqHcMoKh8!E&ES(RDNJ&(IPL?_X5IZ+f-;+E-Eurk zO7Y;Q>`hj5<^MLS{Cz&wd)=z4s)LOH3(6Os|M8aeUyW<^WyaFW`p8?H_vhDzI%VOC zH@FmrNEINH43i7v&K|@3dNgT#E7>~kgccIzue(ptn^03zWB8?Kd3T6c26sx7R+ow2 z6?{3z7ABZ+KYH&jPWg-xxxQQbJ`Buut96uXKXvTfi4Z=j7aAOa4P0&2{Zam{T>>%j zk-&Iz;}b{#i1GYDtm~s=@$8@Z)r+ElguOP099Rq|m@lz|9!U zYjTKM<%9{!{VEl-1A(;i{-1X@1T`IVI~i!Z1rE}Tv2{FWUDwnhHu-NcnsaRO?+4(9 zov=h}O6JDb0*QRYby00~N&!OelL#H5{Q{6Gm1ZjqoVu8JCV%z5DDCzdo_19DTQxr` zUJ@ZuJqLmlfFS!$0{dE z0JJqKb>UithZ4v(q*?8%*8BD7R#gdvj%oI+xI5uQ3eJsiA~-5^JwV&_x5gWuI7gW6 zJH*sPv$)jC5P&2>>oa!#LiHrL=i zMK8NPHnyj`?I7Fm=??G92TX%~CJhGL9R97Uk0SrSs*;08ztz!4y4l?Ytbj2f5nchPrG;$fQAZk(_pF&Ma%c`=9yoGEB0j~3;XIbkR4F3qJt#BR8y z-N23e777OAwK16Nk2(b^*SVDN&V9ija3t^uFDuw^D`gj5aeGFD4-@SSa1D(j1>X%| zRlmqp=3YRTyIRFy*x1lIoV?WluR-~^4BV8$hHI>gdJc%xtZJ3?cvJYtI>K`iN5i`; zKWd`;%^0RY=wqmDWmx6k{Iy!zF`$N8AxLhWnfUWeU!?5}J2Bw)nvPBMFNQcmSPG$Q z>X{7T$Jt&?5rtu6wi-8gs+j_qw~NBd+VU}Qpc!FUC1Ve}SWk+!EG1i$ez}*DeZO`x zFh(6oOpI}}RTvwteKXG%W_vDAW*?b%3>1(kO?uMT37*|==RsW@1ll}mFZI*uanP!i zhv_A_Y@pWg8A$7>K>r@14n|37eEFHjBsA#2xUcbo1QRVb-d>O14}=_;PnnXtU4=Ir zZkfg_>!s4H>4bW?`cxf`|kpX&>8 z!q})vk5;vsBG-LV*Yd%)iu$vdI+*N2pZL%AMl5(zKZ!dKuuT#H4GITP`Tuwp0;7eLq!}|d!WoO2{6YgGYVcia-O)5|7!J_q|R2IX8xsyDmn7CbBgg%)BFU9RDMbE~t6_49|8ABFW%<+)A>O_catdtBqzm3TXWi2mMW+)pk zB#P2<`|Zw00SDyI;SXUQ$kKt?87!00$3~U>oInFl}%I^Fj3IQ?s$G8${Y3rCk9VF z`h{wd3om=OZ0Pn_1ZYXdEj)^L)&6l%;2Dck&A`qvJC@2j4XhMg`Mc%#6ziV#_x2U4 zJKc%$PfB~lD1YrSUE4u(ChAHia<<~g2!`KDDfp$e|A4s4TGB$ACK|xEnTuOqF4Zq@ znj#nC`EZ2~dBLC0@BEh;yaCKW{Lx>G39VbObSgcPt*Tx1lDdy3$LbWnpE$B+Vpx^e z;U})x;)#cT>6tsAqF=d0c~>@N@dR2m4w~HeC(?9L*vZ?+`Xu;YJG-;FgCm*OxyEod zBB9|p2XL}9C{7Qku4?_%gmy9e> z^7?LskA4?41nD=rbT%72eOnVEMseaWeHqIXbaWSvIt#spi!sOv`p*DnueDF z&Pb@2{Zc&Ld9MMOV|eipRkS(_umgE>_6>xE20l$7&Y_2Ph-~T$Q`2*X9ac=h=ZK=3 z81)8yLJJ}Zcy5Y5Io0yig~&*SK#7(mIlqy&9U*-*A6`da=Ql4iCI+ z6B&xCpnazRd#(7ZZaGs8)yxtJsx0E2W-pt0JPg;U^7aK}prpaBBdL@uhd7x_E`Xi4f2 z^^_rEKCQ#(RL8jx%8bT5DB&WxudGcP#Nj*D0X-9*(J@Qd4*lRZ?)ycB^1SAexh9G= zccAVSfJ>Ra*(KMO<|%ImThn&#_19w{w?iHG4Rumjou8r$C(5J^AwaIN-Ss8d6oyZ$ zJKD!U|0Sg&JFdFnd0t@2z+_Kx#hIugu4w2dordRrrl74JWOU-gJHONF4WFr*s_kDy znya8$(r&I#Cp`2qnCq)rYGzdNpMZ_E1Cx|0=EYX^AF>L-A%;~8f1M_Irr;g`^sLh_XdwnV;UK1mIpHn=Y z1cYtyoSH*_O#|tRvTCqUyTd~YP)iY@e`}%(;Zl@EAtjD#SFS545W1{h1DA^FNU{bc z@75^-lt+^KxGx)CHo1Ip52lU?UMTlvznsn}!BrxiXHgrT0|YRjcy%xp$LXn18)uJz zj?PsE1)Li!o1To*J@LEg70!Y1Ml5d+XZbo_8gcv6$PzxN5*veMFN4Aj+L!5qBf6g; zrvWq3c@txbj~BC;vXc@_l-+o{Gz%DTp^<1TkVt?66zp2XIJh^{dx+_n3+l{?2s)*% z?s*n4f_%=3v1?0GsfRBAha#+qM3aw@#{58$Z8j~V-B*qlPRW;O;`;ubp_-`V;;q2h z1d5_2#;#AbsyEX!R6a|t_4Z7+QI!n%r93JgRwhJS$Er+sTF1$2qAFiaUtRaQ&W6zy z0y90sbFuVd1~biuNO^;w@g~Rv+geKlC%-8q zfum;pK4)N;dP#8hsZe&cqnS0YDM%_3R) z`|<$^WgTdl~g`06Cr8>)#{S>YfKTWQj0D z?t>n}-G_?l1v-~#bTTgOlaMAV_hJ!$wUI8TqaOeiA)%uLX;x=CHPIgsk9_!bt_7;% z2m7r{MV`yim`Ww*7!rqb+~X&g8lczAd;p%df|7!ly#kp2RMUSj{U(0ey#ATpcYXhD zD_IQWg(T(V2T!)MoutGr;r^0PvcVC6U(;(1X*e1xP;>OlONZL~Y>P!zJiLt0+znjM z8HB8g4UEls>q$yT<$r;=C zh)mw?!a{!3_eSGL#*8Ur=*3#46r8F#xEKp;t5}P`+-=X%5{NrAt^o_gC0NY8XdB}N zryTfofmRCHmo;M`gBSsCgfu*HWQ)y4LSe-NeI9zNa!F4EVb5hwtbt67& z^jo+X6*SNR)@uU!PJ&aeXX7d{zpYJ^Ai`Qt|)>iuAudM#y+o4x@!AnoQ*GR^iy}3S+ z_-71kSFirD9bMb$^4lbXv_~5zYr({@2F`!6@BLFF27azpzANS4e zm;sLj*!$ytWegx_;7AjHJ-%}gNC`meEb2c`0evTPc$6Egmp9xN|Gna>80`jNgdlma z0~XZqSsUG2=uaLx;ybr#&6o1T*@?Z>8(gk7x`nG1067lakWIPM%oVUk)E2XHkUcP? zsLgGp;BK{k5>Z2Sd~F^MF02>-%R2^`I$EaJR53vNOkWhgZ&=0^UBAHa8~cxRe)hkL z1=P%#(SPBaK!)Gsh&HO<50MsuyS0ZQq6Y9FZ86JoZa&#iMo9;SG3yYq1Qx{L8e=XF zoDKKE2?pkqr3hO&OW}K@u#~0_b`4~AcxArd(ek?ga8E}kx|WhNzo!cD+SS=(T5|ws zUX#^dLZXGXUaQ~vW0w^C##^1rBp&6Rj??*~%6&OI#P3SOQHTP#IDF`q3SB9ME94YH zV*6q_5`$Og#Fg%oOaPozW)<1fvb|^EeZF6uaC6!~JWviGkb9B=lF>5L$rjm?Yz@3= z`T*$h%v>dngA2a1*)w;>Ml6cIYMB|818rOy9b9P)aCIegvZ&gT_>Bsm18u*a=`R$R|Iyf(KG$~V_Lt=YdCHdqXL}rj z4&`k+342e6I3dvY>RfeA8GQWx%GeEPK1f1giGg3BDFHYYo2X5Tuadzx7*`eK!;g|e z2|b6<;KZS_`3CIvi?GcJbCS^G2B@PPyq*h{Lg=7#wNPeD&hFL@1a%I*gxpJ22X!O8 z$qSeJ91Zik#jc7x>ucJai&~fyp+NrfB8WN>%RmE@LwFNoSCM&YbIR0%=&gB5%Aj3H z0#yz*lDvcy-}ZD<#jA+HM$dM1j`H1w*RWAFE!6Lk;^WV8u zjD^JSOyiO+in5*XiI!R|lTVio=fYrzCE@ek6yV(gJ2l4Qy!*Rn1DU(C;3H|hW%e{o zGqYg!>EAksjkj!lr||RdAg@b(PRR7H30#Yct4PK{bfXK0GkOltG!OXDk8q~4rdf$k zfs%&vL#BKMC3Cmag;3=!0QZTh?5{j>+9Wge4Hv$NWZXc&DD+R`jQ`J7brU-VPY7jl zLF!pcWlmSvw0lAHwEMH}+Cu-4`0WFLJvFUb5XeVRQ9~Zv)SElk>M2yr(2QvC5mGKV z8Cb%bUiF@0FLqe(VZa#U$)yiJ^;31rMQWxjDv}Lz5M^`i;ouPyzHzc}O^nzw9Y1Zw zkdMK`<{H|LTupRRIAhu6u}Hqy^vc=NV7dK_)v0`y^?p`qhh(v^z>HJtel~ejh;B%} zHmbBF^*)%gLsTp_#N_QliPXimDztDQ1D4QA+=Y=+b-XC4S%d%G8p=74qu(Rr#)Y`P;bIm;9ek4QDpcDlPh? z%;gr9!z;hO-%{GOMfj?ycuBiuaReQ4IvfF{<&&%(pf z7XhOMu6Y?ynOh&Q0G+$}7Vi2mDFx^K5Lk*bpdPb2`gefF2#}XPs!$8n!dtbw5J$jC z!Yw7?YUoD?jGC>$fZgB1!*^I=LAnY^{#cO7bAOl0)AEPdL!S+~M~?&f|5zF|#J{KK zGDwJpV8@qLgDMzINjBg(u$Dhbx+w2mA7~lzo7oFx4*Ni}9{^1zc4<*<3*pO;|9cy>egRt<>5BijB5eaxRZ=21zabF2|kcr%G zv$sjL7gY|`yy%k@mPBG2Y3ryyiSP6?~_&VN(jBvi^oq! z>wcOU^D7;0-IE5DRhkQ@ic$yFQu6wNd8~sjx#R>Ny20;NGt~t=5mmX5&k#9?Uh9Qo zBk`YyPHG$8lo89T-~#^v4INgSgB-~3*U3?qVYf@b-&zLV(PvpbRc}*aSgFZp^Na^~ zz;fX0ORwTWB2Biim*1(Q47%eKtrSQ9xH_Xu`VzxJBGI1*){JDAn3h}P=&F?1ULfmV zNDjlxW87L?$>blxO9!zNeS2=o|Mqs=KA(4>qiOj9>Q76=NoUX4fXBK08Y}Q71{vq4yWc5_ z`W;Pp*&D&Fd}y6E<$1O3ok?{1dPu-4{O(f&4cZ5f4_8suB7>Ch2oET)ZJz+5o# zg48=LwAspXO9)OC4H61qAf$f+8Mr=Z=caae-}oow{g19jhR}0}|pOZayHWM+hwKoSOPM%v${tT>#!_B?BOu;GNu&iqJS#Rr`X5eq`- zJ9}cInSPWtBw>saN7)s;SEZxV+2gwu;+7Vs@Oa}~nC$Km&D1a*f3h7cb!BkbGQKxv z@@1w~NN!w?qEk$lDD-f?qra^)_=Z+G;+hm39vic_7n(qD?LjEM!&}G77ipGNerab} zaaw_ARgSFFF+Nj)WlPjhsIfb<{kERO0)pasXTl%n$YIOCDPOrRl}Z+Y6h5B91-7Nd zC!+g@?EuN=iGq}mX>PS(%>0oq+VVp>RmA;FNiQ^CdELIvsuAXqFCVMi)HbEQ{Xx_Q z@Z938eEcC;GWL>amLeqC4{h86B~32>wtB1T0)J^wscW2_PG9&saK+KhCsM;JYVshV zbJytW7l%IdI!~{lLzs`3nsOGO{6RGTX%ZS<#AS`dcd#{zKmMcb9w2 zbYGZY_O;{S@mCmI$E0OPja$)aJ;INK9We)~3O{CNo{!bQcuVYB+91;R1B~1wzrjxu zJms!0v_ck3A1^&gUu^sgZT`M;YT@pw5QbvNZtKy^=KSi`k2=lM4?cEsUa_h- zquQehD<v-d zJ9_5!97P{EnT!kc+XW&=V&4IVcHc{fefaW;q({IN3NEW?ik zXfQx#NZqvYm`O5O3NE=Re|PsmRX5pl?Dq;?VEU1T*s4`Dp4MykZ6p(GA~-~bj7~om z(iUV1Y;xgd>Pb>}AgJ6xPBciv zNuElbDhy<7&qc<}EB@cyO)utu@wVGJ+$rVxxiHr$Wx-mDojajy_Vjg5@-;kEb!Em~ z+jvOcekYuEx>6`7X*i6sBTw#Tt%c_Zg{Flb{87j|cD+4!Ky3D!oSH6pN)h8~k_JTm z+IN%s;f0`-U8?G(O48*mfTL_5C!^R> zw3UpF;5Gv-oO)ZGuA+PJL_A5DF>nxs8ZLz9_J~g>F)gyD`<*@R^U841Tz74Zg?vM6 zY3y0+mtr4g~><6Bo+3Q;fNh9};lk)L#ai;?)~)(}XEwh8jlY z@4-V0oVjav{FFFhl#*)H*mhcydZvq>8JOvT_;)f0SK5|(Rl7c4?JEC?ZWZH77k2x= zh?|POWw06$&2rW9F2cr?*(w(6VV<1zf%r4$b@y$JA3O}XHoAOF)_u<{(w6jHB?Zmk z=y22a>fH-|@c3Ft%kp2;r;VSh(F?8Xiy${)j`!=vIM6cVGP+F-skdT2YR8=II3VDC zlUn#``-+{O<8!C)xF$QdV9)D;*Y}3%6!c#U6kZ2*KVLA47}H4OQui93Xj>N`q$-J_n5Oe<5 zaihiiJtu1{C>il2qk*i;i%q6auk)U0^g}bZ^Ss ztJ7hijXIb){dP>Wah}ZTBmEO0Z1@MpCB-Zyyzg896uZ$j)NM*<`ry0E8D;#eQOX3HGjCodUysN4_CdL?nV z6Ul-Q2>i>VL@niY12vQi2(W<^s_^D$%Vo@n)cI~y)TzeK`*MUkelUJ_0CRj_vtJ=g zL=AiuG+!zIk)1Ve?1LCjk@gzfWAXndt|x#nzx$E8h&?pUOk*V$Edi&~i~4m|WF&Ss zaZlH|n_wvvT5s&vq^dKB<0Xy4_nhfGeH~Bs8DgBx%APjiHuI8NKo_+MpQv&(q0_?4>XgAmw>|+KD zm<|Mur_I&HK4bOn7at|BCj{6BE(~e3v^}$zWlulb5yW9^t2@n!jpsFen;$G{dU&$X zqRax+zQe7rl242mQbKvJhLX?qJS+s0$T)_;V{(3)Vt<&E%qJt4yp;07!L%ES9QEn> zI+H!1SNcuAB>`!E6un`DS}u`VDD$I!sKV0M?=LSMY5P6=fIDx#ZZf(u_GQmw zcJ*Uke+LrSd!iuo3J(I$Ub_R2IyNpbW#npI`_;XTi9eSDe0h9R0UQ`vfHwe1*2#t# zx_fU9*q?AGXnmOCQq{_5d`d$gYm>cDDZoeU`hb!(DN551*ksHcYX}gnoF0I3_AdGL zP%y50)8z;OWJ)RDQEFb}wfp3B{v=bVH2cJ_h5etH%IR#l(r|GuIAz+IE>Hb&8K1n? zsyAuMskG}+x9rz_RvaQH2X*hI!pv>-_f=GFy8Ts2{{}|j>}7}y{8CE9YV~ZwgyYx0LO>MwhfvLiS?8IZycm8k)uz(D`z$KK5ZQr>8|& z9r@^vq=K9YzI!Ijr<0j{{N(pk9^WD)^zz%Fu%=25d+tPtb?BWVtj?`}b*mgCHFcHX z#*XISoYg82QQ{OkAzCW^oI?5G*>K87i+8})J0$dA)GU41)T~XWLwMtEgc4igtrjK* zpx0gb@QD#?NDT_1iOF8o!s05&*_n$5Kf#HT1TJdJtO1*|u0Z}rfM+0OwIDK^i#o8b zYxPcqt>8R1mO-(xn~7B*^z+>%5eg8B&|wgPYb8ZHTr35PM?NbZYMJu`2;S>tu^)zr zQZeeBimVmsfeM#@=nC6fZpg*W%l6QF5^^$n6zpYiA?`vdYSZI#nnPeNl-xgXcj&p> z*`W*h3g3d-)$ke5>Ifz!UEvs!&M6`O*;{zR$ zqVtR2BS)%W?}Vam*MA<0JoDlPUp{fTqSLS1+q%NFswA(%HQ7(Xwo`-eOi$Bi={YpN^?YnpD^!;xJvppg|*f*m-^wE_^ zKZKgL*_0g)fB$nnq6_j8rqxP)^-Yx%k!<9K7k)JV;L4G<)t~x62w!Y}_I%$H84wE> z(ylVT=>ppoc z_>*a$R!1xG(YN|_9*l!DjWpcnmviN$uOO3`09FT<$L;)k&V4=M+Qb!&?LSW}1vYa^46ovQQxInfTS25GuC!G###70t=&mg|Q*37_(Jji7kpn zJfW2*qGLZaPk(s$KAGoU%<_xI5dr=VN49@8lUF>7r;{yGcv8_H*;UV{i1o^cBZFuBj(A}Xgaubd@ z<(~oZ_p@K#dXw|=McdetJBllK7dUO%Fx2txd+}+_RvQYywbQ_eYBlJyn)W4^ORYzRp4DfD+L3Au+v%+F6@0&(oYNFV2LEEM2skm zu9zZ_FRseQlCQQ(!4(2FpnK5Sd_D*|DQmRGL*%sO970Jk1JK}`fY38po!`*ui)9p= zWR7z2ZQHE={WvmL{ztT;*HSlsRo69C`LBfrDD**}#IM%r1H0Jk>mPi&t)DEld?r@k zGh9jSF`j$9Bgf}-tzqy>-5ZZX5&PU*vY6V8=k5=0j7(uNBbH-Kf1J50GkRmxqH!^K zrGC`VL>uo@edJ(cZ|LDiyEHYv-}w08V&T@w8bo7+c{O-~?o-3*8(UNv`~BzaJQ929 z{8(|BEyDY+v}0}@h=RVjbuj&|<87Pc_l_3Z=6IKYh1&dE3V)0;FHeWX%(4xORz6i< zUSymKd6KfD*e7gGhOn$5_qsO%7k(4=X4Uu;mkDmw5sjXTf(*XDY#XyTT`=~2&*N|} z7!@pyg3X6UF4eu#Gm&TUg8IejA#g+AvjI^6OAR~ndU~O9!@zmgLM31cU^h<`h#{6~ zpSUvPs=eVEeC{5!E(@f$w4Gn3^t%wToaLrh>6>d>PY0k??}`~oNHmHub3g|bgC$C{ z{J31O{vehP#d#65fZodObdW|I1wlm%oeB$XI(M)w)d66Q<@5W1E{3a|lsJC!$;okH z^$w)o!`Z<=X!Wbw4A+ow_VS+MRBF|q|Kc5oOvK^H5N1$wOUUv8!lEy3sE-LLY`g)U z1Ub9-?w^KQuDf~u(aCk^-N#S;Th2Qv)Z+^`t~a;DYiB;;V%LThXoG1lA8qS2e(>ZV z``@`^{%^&j7|7G+Ywx!+n3!rw{X;^3NTc&-$0Gi^&Mhk`FJC@u4FP}X9STqUJP0|U zV!~bBLP@A>k(yo${)60&3|Sk&-;gT+}BmHZf;VtJzgb&b>WpG@Jk ztDM)$wM-LIX=%J(EKYgc_k;#S{u)CB}%XP%$Hg> zJ@551{57_pc-q(uyITq2-Qd@;9{)Dz+~7|+PYXBiNSVpl&-(~Vpym3_bPpxZcfYnj z?!#Dkc<*)5wum6kkaZp5*?y0vvl>+=$z>U#?+dh; z$=u?rX#N{wpAm>REz8Sr8k}#yb=QW#iX?~DzlHB$7-0e87Tie>fjpwBeR|Q#Gw3gF9q3@C` ziDld@BM|#v-dMto)2*Xo=g~*O#{h2HtW)N#?sBjS*#9pWiuK3o9J@tf_<;yn8cz#} zW`O5ugU{UlPTzEOJWk+n0956&fCVFbyIZFK9zek9q0E+RJ&7c?#A`Q~0Gu|e(i?0A z-5)PzbjFQrlY*zk`D|I$+tv|Pt(!OePyQwl3nX!T{dTJ3gnm3Ti%ErU+&cGR&imb+ zD+8FS{$d&P_0e8#A=2^A2Fx_chI$?M z*}&{)JZc_bM>$B5%{}i>6EBmH+6!&GfaY(k2<{y#$2&>l;5NCg6E}INao`QuD9W(r|bXc)bSUe^2Y*dEBRMjEYq9 zi@syp6}+*`@0`e*`2h1YQ_^ou6bMiqJug(5G^W)9B3}_h;oKid);1TPXQpor3!hOM zg;|^pP0w{{J@~SGJ2h4*9C^y@b!lC=bEj4?kw2zzE)ZR~s-*!rA46i(hgX5|<4F(# zjc()kaS#qx@D6{|ZB5jgAh&3(IdH$N*oMkCP4G!SRT)XAzU(vDoceTbKo{OB6oj>XwN<&p+5p!?BNssxIlDvCrGQd)=!*MV5e8B9L}{d zK`1BLF4To$jL(WfrSF*U`!(qm%C;uFh#X~qiF$t+X8Kj!)bw{yOjv)qQQV3H^{snL ztoA-10QL@FP>GaQJ0PCAY}1&(RYgBTOLspOjK)#5Cz_GlM=NS}9ryYm@)%}*)b>G` zs;y+6%JS~}qkX#`)ONKW^q*{Y>#PriS9w3Y=34FXEU4+2n?Gkdg>wTmKFz`5&Xpw&}k)X>>nK5OPONK-e{;jVW)twF?C0s)L?jxQZ7 z!j;qEM+u>Sf=|-FVwz_v9P~`WK`7oD#MH96cotZcw$MhU0k;q(=}ny7*8c>=`M`2$ zHZF43y8i&Xn1#Gs`;|3=tain9Sx0Vd;Zo{R(tt62w;NAfveZEpg5bl-#@^V)QJeAF z^Pyl54ISR(%l&)z3yIgR#r$Y#I4A5oTIEXH>m>r|=p{RX^wrL%QS+LpN38$Axzp1; zieliej%P;Z2GP8OgfU^{0`(^=elk63uvbxY(4q2nj2!&QQYvAaX8lO@ou5Cv<(D1@ z4#&h?+`yv#JVh?l>y{K?L;mqCkcGR3h2jG}=csLo(05Bl&E9*2(0|lSat_5IE^nOo5b$;p2H(pKd68NI zY;SEI?EGzv8R=%;cxcG)g&vb|X###Y)ZO0|%o^yU1M$Mcq1X6^C1_csD! zDtFv_{k?}A-2UPrDR%MZx!1RFA1oa8+;PFO4tLUV+(`Ay8JkxB{{OQ2-9h3kvpBfYV-dQ{6@%d%ic${X&-^9x8Ob zcwahH(LUSCgCh3j9m?xcxK(#}ExrAn{fG3OfBDsi_0+sRdDuVnpY&bcM7pw5XsCIC z(qBVv&&>4a&sddZ`mM)0V6Ua_!g)f*h5fGw{N<0!-F-CNB0;|SIiqtH?A8K18SVFM^bt_!!3O+}V7R8f1LcIqUyJ9s zaf3^uvQpsrrwH$LKW;8x!AA87wpQu;Wj!{ z@gU_J_cJG^_{_{fPy6?)B2Rvk2e$}hWEBGKrz7!emqI6kb#g8&q`vq5$H#1-7J=O=y4s8t6l~si=k3NUg zrHjGrgHi~tk{n@9t?B3z*X_Qu#b9QD(D~d|+NWM5`UEl1 zA4K?y?}MdWI*tgo)}=v$8|22PUc2RO>knpFiT43Xeb457D$00cHg6q>>TGr@PZX?( zS>te|SiY4LDV8fj`6|x#1#fr9%nM*C!bSS10Y`}^3=}%pN*k|T$A6fvdy|*Kju4F4 zhJ%~p_j>Kh@UFTGKg&ZcePorHCB(L!_{2Ec-4D|~Kd&ay?ru=)@for2A}eHHf4K)g zy663apsKXMESVp-Kcqq>zV;dX`uv`K?s1vK%rnKnKXLk{hO2IUbF0E`L&k@G|H|9b z)z?^Tjl11*D_Rl0KjV!{{b(Zqs@Lql{K^(#hpB&wthx0M%v4YC>Q1_D`OAm#2Vb=9 zExO`(*?q_(Z@%7pe#9dBx96dcXXFl^nZm>v-6YNLt-E{R{SkYuGc!O(CpFu70!%yu|sb#|EuZGmpc*#17H^m0YY+p}z)(tDFhBJu{S^%CQJT zp2=I<8OH`2oMpELgDI`}m%sH$xOu^DO(V7DOzSWM-TnQw4`)#C-*;!68Q(!uM~i_) z9D+2Eoarc(QnJxRd48Y`V{y6g z5m(@@2Y|(}H_q{+K!ZVDWpW)8ib9}3;BPn3V3zy%aBj}WGKoQ+N z<<{`NrP;_8k?UMhi{0-32y-Mn(u)i@zSF8U(S=-jsLlan4cV2{xMh~WNgjSi!#bPm zckQtwKAUh&|gXp$k#)N~0iV3&~VIJkfpRpl@Z)&u5{vW#D zJRa)y{rgWUTLvLwhU_U>iZHgyzLv_GeJOi`F~ry^#yYY^))=DfvaeY(mMlYMj|mz3 zzHj&G`rOy|@%#Ml`~KIXN7v)Z`99C%cpb0T^N3}pXQXF-QAdVwUUO#o{d>pA=_15I zTfJV;0#9#oZ;fIfS8>HsF6v-ug7?KCmpa`lT(yKUSL7Ax-BywF2Ls8w==`ZTv%I=p z!?L%3zb7l+e%6~MIPkG=BQ`DPIlpXoP&X%9ZC1TvOw}03&WA_zQJUH$4(K*gPb4Ks+BzMqR@?xzO*P|vWne6W}af? zg$IvJ)fPYnZE7bpYrIi4OO&+|%$NEMY@NRrRx_<@MV^=9_~Gl~`Y!|B-a+MSKz|6r zzfCOHJQ-jSl)wo2K?TU|0@B>TrW*nwKlKYbQv){xB&&E`0@zCNm_$0vF-Cru*yksT zUU_|SwUnH97?|-tw_{i3wE7JTbcajeK*xmZ_g{~P3EcqpRU$D z>N@8sBTEF2IBzPQE zs7{^Hl`4`RKWn~4^?#x;6n~>h;jy?EqtVynGxM@wuv((;8AD>Eqjss6Qg13Q7Am`=Bt(FT^*-pT!pqsuut9+ zdEI~#=7^@j%&UoP@ulc4v9R|AxnE8z=gu5vt@$*rRg_IS~S6?;S-#x!(jwm&w3l~MNia(ql)WuF;ho_?*A^Oj zJ`LJspK4&+Kqj+}H#u`j8(Jl6@9r8@t-Tuy3VaT(uLmA*`F^_nVbW+!S%s`PaN-QQW-;h>FXV6sSY zso8txjo7pk{&=*$`3dx!CdLhJS040PH*+xVCXPilf8b&NnjRe3nBM5`T!&8C9C`(P zOnb~SCiix0+&BA9&RC^@O3qNCPH_X-UdEt7ZQ-AqH;%)f_44e7!)(jEI{qMhRg&_I z4o4bA=g$8#dLs>~f_d`_?&(TZd|~yman1BjLjN%F5PL0(r)AuZpOoNg+_{~tq-bfd5Krv+krUfSI?_HV>F5h?{ z#9$xnC&85KfhePnLcfx))&}a2l4|tnF9lxOl2a0v*@cy7BRRe(xi2(SnZ0tkaQAhccYee|I}o}7M@V}Vdh5g4o{&#x%y2>^*hI< z)^h>j{H@+>?|H8+%WV7|=x@B%!}<^@yXLe1lS{I1!!I8%rSNOm+Il{oAAVYEo&{;X z-KV%mxR&(ZwEB13a*Nl!o9FF!H_Tp|EqJ)lUc3&!n3YzuNLqcpeJ88$dT-Ge(mqbL zkKpOxee0*N;5DgD3Iy-*2Rf3wWY1CvgyAx4_!RsZH~DC2Rr^fvM31OCR|JzQ5g+ahP&l`r z?TWd?wpu(%8iG^O*mFsrYxL!pM0@8Mf4;%8rb~@YyNj@!(1e6UIvG9I|0e(VG;nn_ zfo){gHS#!Q;nG{iu{PVe+5VP&`-qv~1yvvf2;DA=>`KysZ%FA%Q)J^ssJP z%0Vdxc0tu$iNN(}O3hjGDm=@hlzbfo1wtb)fy}HnYLgN67Sb?5BO>B7-1euY6Q!Q3 zmOfy=R&+%nUTXL6gelYm^{n=cgT2xM(g0eLSJLB&hA5vTLi_9B^H8DN4 zlV{-)DLGhbe`|{19cF-d+5VC~P^d5*i5h;%GV&6Wid&sbf~i?&my&zP(05zf=RX3w zovrzz!E8&*3ys0S#Up=KJ}jCVRR=tfC!j@)oS*cDiPi8#ErO7Pi4A_RLB;$rfvZ`O zSM~ZMVf%bT6k>rU&gXP42JmCld1t|ANFB$L*%B6~O3Yw@T_&zwiG^|qfaI&8_bKj# z0u&rjyXZEJ0fUm&T`#*>kmXuq2(kqhRb{q3I`u%eSPpKu{B*+(z)vZDEGb>Fecg`r zYsR%|0&R(3`t5a^^Mt`pu|)7)wQW%LhC434X{_dlA76a&x&Z*oK;lrt8K(|;sN+yz zuF-pV{AB-U2VLLUo-RMBRJh83;Z2`FVGbfM8GkRfD(K^o?@PC(Cp8X9E{%R1-67xF zDd=H0(m&jWNWo>(E?@QcmF`Yt-Zwdz8bbKP%-OM1#XNJO>6keNQ+?MN^QE~?O|Q@bZVvQdV?v9kOcc+A47c@T^bnId*QN;{heYRx;syM?kK z-ucgW8p#hon{AjI8lv5S$dvY0hwL$kWO44FH48V(vpw7n%V3q>tfx zg$q`rT}rom#RK~BETH-EW`g%@KTO#Jp#5HTY))w*4Dm2Y}sR&9(w9pF3&=aEPQ9yWqrG<d@3uIfq`tO7;d?n(wi0`;xf4E(&4r zlwI_aA%DKUoaqGnGDck*Q#t&Q7q!vHMoAJr{5Oqi-u%jc+Ep%T)^hHM(>3+$R&y4) zu3=mS-HXkyMfl_25BrZ{=v0kI{@2QfGG&%Q5++{2w<${^1!uvL$;2T+YS(byH;A(Ga%qcuq*}U zSPj#7Rb*=}Xx$#w16m?TabEt(HJ4YKi~YO^?BnaX*r>myQ!Ov4^P>EATeM7(0gq6V zwf${i{vEG(wG->-)j?DSmcpSO`QtQ}uO;j5Uw5P@c_~PT+AM<5G1IX>b7{%jC z7WsP%Z#8C)HSJH1c(ps^Dc*=U>{yJFDXUt{xl8uzQbfG<^hT0P^6=wH zoNc4!35EF`dF`a2Db|;((PguNM0tvIWrHyv80;2^IxG|^7_a!enu$9N-!(Pu)!#Y( z6+lAFKFjilpGvgS@yuP#-f}0){-i5N!p*_Hd&_kOX@lOWPbn_J!qY!$w$QECHy>{+ zYz`Gih0povXwUQ(g^aD@X~aL<99^v0dQyAhTGksl+t>ZYW8%~n9QxJs?>z_9fAp0& zps$EK-nV*8c!|a+=vd%;6#}-Tx8k7t@PK?3OgRH(cwbUoyONsDC1%SjRxMF^q)FTO zOKK086Q)oZ{uKoeM!hq*d@CW%irK2uPO-3tbQ*|R&%S=yn6u1PT|{voi5i%4cV}+* z$|@r6PtU<8bi2) zn3ocLV00HX)~8f#(BdEKf2?Ge|S|@OrE>pSirr6z{4-w68HUE zr=K-a=@{(@NCwM`U=B_Mmy56+iPvmCXNBtjrBy7FA;P=+$r(R+)0(jqS~^yJ*wZks zf$DP5%*9t1A;#lo&CzLTlq=&TjOu{1=a`Oxxeu)=f*6F~PT5qHZml&1Kpx?)$!eiM#*|m)ZRdQg5nJAi9YENN`7>?&K{oGaMvP*= zVJK)CNtNDB}~yEaCr0if|7II zLOduwmIF3SM#zW(Y?e3PHW+23;W(Bf6SU^QAHpNE3JW)qL=Q87yhk&RU#{7Zu0zbm z7joqHd}8i!!fd~G=?A|3gm^ai^cNjB3ZBB^MZBJkR4U!`$kb-bo3#w zwk)88a7E6%YT???lITWPA#wa`?mCKtrkmy?Ow_eD>&j&arlm{X(A(30`26cTTWhPi*yzl+A zGYqW1ydA6=n9MMfeG4xxaZoith$C73z2SU=BtAK-J zmLi(~WBbXF4NT==KH!4Hs4A)QOS1|{%Ufo)fDu7EfK!|bhbZ(kE+Ptm4&nFHZSfcd z-t;fP6@F`&EbxQZFbKz9h=D26LNt5=Q^dhJ{+cgyx4#sV_W?y~NIbpgS7Q#fLdQ^Ov(} zP1RRedQn0ejFq(!DUPL?!cq~~;#><|F0)6JtP5f;J0gS3)^p+lrl!%!DU}SO=xx@ul zhb_1g5XGYzvhjVdT4I$wszR_&vg&TgvlIg`&vErAdHHu{UZ5MdU3LQC=qnF08@DfHN*&)N&fh)ar4TJ4 z2C1m%SzOeql09EHxtgWkk!Ep=u~iN7cKTa7fTXY}zH;{9te8+UT*(cM?2!t`S_yFv za`J~oA${A*!xvuWKV^4(`Q2PJ>RC_Hy;%MSnK^g`Z7^CB9Y@VBA!mLX5TuE0D3je$ zxoc>Czb~_9n_uMobQFed4MyxSsdm7rYy%E6=i#+81j}z7+9-5Xcr1QXPyVywW5W`X z#V|;7%N*C=0KlbUW7vH`8LN80;l2uQ_^SkvUTmkzwKnjPgFlzuj?(o`Bmvoa1gb*6N4-@%V;5M-|T^G=}0RKNu?U3p}$gt!5 z_dYC3YrX#J{(I@iq4Ikj{+TD62`ib6hqDuep>C$^pFO9TL*3`gsR}s%9T<&#BI_6aIx@drGZyW(@NQF+`YU{!zIs!OTB&179a^*j^ zV7t9;#DFjbZU0BEYvqi8k>I0FnD`(RlE z@(~1om5_#wOfY^)lSUrt;7c%%VO?feAnP~XFf2zi&N%TjATeF&(R>8epR=})-bk6c~kitmnl4Xy(yw};9bb~E$L z(3`7p2UkNTiu1>DSt{q&xytlD>9YIKtXhkiAz1fXZQ>D-UOsT6vh5wkIhmXa-4{4o zDh%e=Gc6txbXS8fizhf5gsmHvdo$;hCp9AEyHn(1!G#u@h6bMSRU!de?LN06f*WX- zUT^1#$~@YZ3GmwsH+L;rPeqLx4HzC7Dgi+^;Ba3gKsgjiU1_ zNxMU;S!NhzPXHANrX)M5ZQ`i3)A;@+Bms6vcm)TdLy)vKOVJiBS^1zUVMvhkO*dEZ!+ST3Ca;NRhX_oqx4 zE;CnNx_xVrOtigEF-jD8B_SBglkn7He;yjb~Q+k33l`b zlW#qoVJ+B?)6}rFJ=@Y|()dNY84%PnUnj&@6^1LLrjdLYU=e7C{MZ1v7^IK}mP_f> zeDs#%^eev4{@Bp9v2rmt6#L^82=Q5QUk7>D=|f};Id47H$|^0BY|m|@EXUNm4lDUNqAnT75+{r>!+i^!2aN!km;{mlMf~zY+?|wxsdQz zl3^&dtD@eK>=n=v1@FvB)`xBZ#XOBEt!?R#*ei(gBiAdOb+%cF-fJI~RGm{?2QxTY z7q-|F5zIs|O)D>Ri2SjZSc#C=PM1hEOWZsg>Ei|+cb}+7c5e+5+BbS|Q_U0GD-hWb zkT;f_%c)%wE}g>5II!i#HB)cF{Q`_(VZeOz6Igo;0h^GX{wI+Kpx2O(4SjDbBkv-W zhC%yyx6(}YeRF3Qwg{y}F}T300c_I1_?%O$5!B(^4Vl{bXLuM3OW#LId?p9}d0-uV{B5ZZoGM10*@`~L0{kGBGc2+=veT>jUvn?LYC(Pe+X z#_&E+pAQ3WQ@8iu@J7$6b(|0r+%gxj^QU3wA1u)stLt;fHv6}%HPA}Um=>P(vmn6B zXP$2DhP&-&Y>MLXX=-hi-cVbNy(Pe4)1oX+jQPRTe60>z&agq)rrlnAa` zavpm3w1H=e>sv?aqD3_zYP=_Xz16R;Ot1uv>d_;FC`P@@s%7NTJLq*OsJa!>QjlRr zdDvLt!>CmR&%LlCD0NBmC<(NiLzb9+f@;)c?__p@nTF&9nsIO{QIugz>RJDv>~OoK znsOp~7e2^S$~|F{Kz{395V`Skqk6M&u&=+*_lVVWxV>9{Y!{_;(U-SgEngc~AyZWs z0tWAT-Q>1BTCkL{*rD2aaQ2+<(>C}^C?l9gzQI37j(On>IoRs&T#?%DYXdk}j-rAb zkA=bhPKg`#9qWAJFNm^9cdXe?H5>0;f~r)_KW6{f7HQyAtN!g+Y0h&iw8OsS|};NDpfsiW*!ahnB9^sufv_Xbs3J6 z)cp;`prq+DyUSxrczqQ6=J}%f_Z*A3JHLThn6~$*Y~kcc*n1*mF5ZJZ9p&NzS}L;Y z7Y6%{6I=3ZV)dmijH3VYtAFvs|IqyznUboTKBMcFzak*rPiK)vx;+*(eod5GFxp0G z_&!^E>6Y`;ADe1c&3(>dE<D!-tO?&aL*Q(^52j>(4>YX@9)o}GCRn(55fz>OX zZNf;+7i}kt)L5{cC33O7Ly1Bpc~PyEMEms>_lv1Jx@LCl30&9vS$Ddey~fmOLrz7d zCzx`NtI}0V3#AR1L99uXU@@DHTtYVRmBAnJSsuzS%*~yl3LLkeY zkMxUcr;*KFt&(jpCe&fV5y#kCU49|AeQC+v@?|L{66Sgu3fn&vr=_?DK{Dh9Vdpzx{pJ$N?tAQ7Am1Y6)~Zn z+49Ix&(Q<0UBNk*5z16lo6A!=RUZmTYFlss3BUZ4!Iq-Z38Hc0)3gfMA(Ge$Zt?&Yn3^T?NR(gI4R~;T^XE9gj>tD(vB5mB zE${KIq?yeSYJRQW-_k{U8G3hRfe(Y8$Ww9;Sd|pZ--mwS{(oG^JZmVe$o|vl{7d9- zmJ^Zv@CWJIG^8c2oPEQC7x#|@Bhm-}O;rBunUO65J2O!q)Huw$&^w!W)^(vi( zuirL$Pqnz=LZaHbf|u%NQoz;K{=lGnh7}&HnhUo1?g|q%cH!to`~2H3))%5Q6(%nG znxdw-#Sc;TvQhJWV*H^IkG#A}81Q3o``90=hEKrt`j)kNwRou?E+X*+Ri%biD>02& zmO9!j);X|Dr3z=8sbT%uv}ErZ!517x`wrl_z3PUpbxdD)M=Ky{h{y#&gDIvM60K42 zkM=j1X5|iR&ijf74R=&kkzf*JVB`xiowdbN;W_aIp%6-t-3CfpTk&@{MXDp~o1ei# zB|wJ-QTY*^v^Oq}Q4wkO^=5!%aKrl`{BYjV zv%R#h6#0+ExWXOJ7GB9SDEi2JL%**yyUTtpldsI8i91R1xqSCaTjB2aasr|W2`t^_ zN?n&laLio~1uQ(O`0F*;&Aw#`^FI^=(c&iSk=9Rgg+il_wXVvTj3`_ISf0fAi(i($ z`it97YU5%%GVOHf)HVf~Kn^d`y&9Jv*My(Qb02Ci)%P}(9X#-Fi-yS@@8V59$TGqv zPRNc{eUdsmZSXr_Fy$G$2)xOUz;x=Q5F{5*^Nt~ZL+pXF4Y9w`3|<1OZb(Ag{0%wk z*r)z7lyrF@IW%9)q|Yr!|C6gg8Fklb8qf=iZV|Yg!~O!RGCLrYBb6~qsH>k_LP1G# z7m2>F44w+uuq7}k=Z0aUm`;iOtK;fTJGr~5)DuMj>S2QqPAes^TJhG>_C}tkXy;W8 z;M9x?Y@D6*tlx$8`u2A9FrViZP9}M&{{Jc##rHmfzB39Z=be3wJEUONi(OrplE{WqMIMbKimmvWr}O(RYVTy3_n)%a_ml#Wlv9XHtw7BN1>80uE*dG z$cMtQSYFXkdfWSUgy*7Zzd~(D_r{`DWIi!eDsx)zq2xk$N)80HoD3>aXf6t6orC z{h0SCgDa%&8XmV8Vz+r=U&pPOWD&|0vRhp77FtmtEHG?2&D}ET-22v#iG3t$$c^q3 zmqju%ErKZL8}%i6dBI~ z^Q-0tViPGO{1}-jDHUrZle9r9Y(5cQ|-dP~8yZ@TxkBa*t#v}(Kc;Dh-eymKwJ&>fAd@cTKMem(Dg=n|{f z_F#iz)hNqS|DRy8iky37T!rB3P6f`}?HB`=OY`=IJ>d@@P~S9+zg!rEG0FIu+G0#W)`NT+|L)v*v*(S6`f7uP zZ*=D8w`d1tOvuemPp%gj*C&BE<@ax$={`hh@7-uIBmh$MSI?%a@-$EUl=nG#w>@<9 z96H=&pYOLjg6?I@;_~4|zak>DMIF+VQ+ypKQ(+52 zz9KT-VUUk%)?3j}z9=f;Rd-)x^ZJ_KT7_ve?Fb&3>2+cU_d6jLnJu|!ibdt2itEy6 zxut8A+cEoTvvUIfuh&3yM|fmQ&i~%YUS7h-=IP|X|I|6h-|O8CynD?LpYL~OCqKCV z`rmCocalk>n|OO|mw-hIq1+`L-LYW+j0XVd%Fk3Etc7>TpS)%{@s;f88tJq(WSu)qQ~){RZ^@%vvr# z+Ke0!=1+xg@WXpjRgFzPbFK4 zW29|8)S323EmX?l2$X?M2RX!Q16b!u6ucA<34y<2yCA@<1xm+w=vZY4*)P)2 zF61qWR%aA!Ziqi7$Gv*9(_*E#<8kerDhY#9lJZ0M2-j{ts@Zz?RM|}n{Z3$$MdV)1 zb%WT~y)WHybVVQpGr-Tw8OL*R!i7$_e!RRz7m z?Q!=NIVi)%=D>r2tOT>n_g|j5o@-yXySjJd*!IzVj(k zb=q}G;``S%QGVHJZ>;0=yhNUnS9jX&WYDd0%dHRLkOMa*2VTya-&KYA3N6(FP+S2N+^2|X`9P1O*%-C4NOmx)_NY`x+r{g@1?qpLzC|M$>~`Sv?y9ah%GvgaKkGj_i=-pRL;@ck zqZ)yeh~G+%`JXQ#nDjNO>l%yq&pDFv9(hGYw`d4HL#cyMc?Y8b*_mRARX)ZG^Y^df zwxO}->0v?nxytrWZ12Z0)QfTv5B$C}YzMvi`~J+_my=sZdb7J@-j`YJrrd{<6bLLPlHWoH^q z(P23YPYVzU(PdMqDs;LPc7(wciN=w>ZmbR7(o$+@W6a2XT!QSl5~{V)0|E7Crd^2hY}@Z;~-K9|NSMugFZis!*>w*&8zVFL>y`JwoXpCNL`3Z zB%AWixF@IFS}UU^r$~@*cZRKI0%4?lVRxVV{CJBPLTo3XTq6!f&raNpoh<%Uyp_>A zT3hkDAt7T+F#n;V(sa1l)Z!K;zS^(F+qfPjanrOR?(g?hMUe*aP!9G3)~O;Bd56kHVB-qq6DLvLnBk;oho;Ci{GE;x7m~b=NrJ1Sf4$?^EY#nhv4xFX!j1_;~d_AUQZ%daOjoxv5A55 zT_-oBRucPgmJ6G$-=_L()Ct6p;;pCUu@i0d)lH;bdfrdEWn%`R!~;m^@Aic1nK->) zD_m0fGa=XvK45`o7#Z^9Rs>Z!!?z2S+0!HqIzYHDqJ%74rz#_Cq&gT_ulq97&`$L7 zg8BCZU7Ri~jF_p3t>a3;QYNTeJGkCsrqn+D1ngK! zut0(yBx}40(NHQtT5?mgJ-?$FEk=1=ybtrE_~;5?xVfcxQ=6g*-;f*Hk^_o#q0@J3 zJ}YZpcGad%p);>U>iNY$W9KD~JWgjvFJ@{P;v zxU`e+q^9}(y;5_K=0obAI7!1Qta2Y1G1>@fx+{o)=M=Kz}J6E_>sK*I%Z=1swkk^a~AY^lSQbBqkbndy2VgkpR_cNdu3i-xQ+N#nK z0`Gh*ykwOM{uym;fQbmi(WOr#OZs^@0~=MjAi#77k~=Gbs}#ow@{B?8$h?9<&wnVDP?xX*2Xc}-c*Ycm?GG%ea{a4$le`fiT*?LIUFX5@UD3i}$dJfmm`|+B7~ivWBE~ zAIBo-*0LW2q6g8J`_cDc4VYW?b$>_^*_|zkoE>e z3j3vT0s|ClJo1^*TM2=v`3%-0V0k6aHkgN#EUE!WOstuiYsXHU0?c>?C2u)A!hK-u zASCu9CHfewT3ENJqRL$PIWKzTz9bEnP9<%RMvU!HH!k+H_#L4=D%?Br&9NxVO z$DaH6lRX7e`pM2o)=tBUIcX|=m(|y`R-|voZ&PmMd86;#kJ~ypUCGGIlBu@fqk1hG zmYW}c=IQR(yl$vCHokP%hCMEWkY#l!pI;X&S{cg~>P*Jxq(Ujs=Vj&^g@ z?bzzgKbj)_xG1?(4V4Decl(J`Ql(SReGXGlzwMvzuSJ!asYZ}ifekXo2P{|f0BDiR z{GOSw9~)MS>t}<3WrkqBw=$9vM7#kf8!R4n18zrkB)TIML}_W&)_~*oriMgclL=P-us%W72@aDf8BRBoC;nKi+R|}D;-mwhx6`_c zGo|lJRc@01w;=?qbNy%k!vDD{?;|~IHvuKVP$JmDp)eWMjb#~fFP@F_YnLHp6e+eI zPy z6CLpgxyq^;F8&TASOlxO+nzWL-HHnRKIsa05#O(jSdA7=i)vX-cR@JZtDM1cb%pJ- zF7UzUJ>at3aJLQu1MJQ(1{gIBNFH95c+?240k=4*4gy%T0>-ZZ+up)U;a+wLtlpH4 zGs~RH2vIKh&!z^>yx}MgjGu!Y8c}7ybhjE$ru3G>?ZjK^L$r&v* zByWgd1?wY0x*?I&TFn`o{ z(t~so=P_}7v(o>-oZMg0f2Y!4)BnWNU-e`+Y9(W*{-l1hTEy=iYQIA#<AG#9277Ud;e}Csdj(s;*3P5J7&~$y@annKS z?@z};1Y4fL!!J|cSnR&@MD@{1j;ZnV%0#KWX{_|!9!@}=%q^DS$ zF3%8ouHi8Q%%n&$JQk<`G308N>rcM$fQJkV4%;x{DUuS*iT?l|KI~^MFwukYtuFw1 z?EcUr2irSDt7A`xngEQD2_%;F%qiU37p_UP?uUsCH!Y!at8$vrE2i9>zv~uBfeFyS zDu16tgbBCw*U3zPEu)-$=l6fD9{w+D+q`aMTDR=;%4iTC#xf*_t(V->Mh>mf>Ae{{ z8L}OV8u=nd@c%U&k)9>;jpEmERwHMxG`@iD;O4#%Yv-2bcbYIKqrrr!3g?l+ojX_7 zO2++;60LeA*xjebcBa18ql83yw2m%;gjLj%Bf#Tx<4g*tlrcBMzZcuabF|rOke=(Z zm%Ip>6a|}n8igejA7>RPyL&02Qu2>tWt0d_N3m5T;3#5%W$2+hKVOq9wdL$4E74Vh z@P$E^1%{(rRkV?;Iw-s34%)$4TV@zOmlV)iTyf4y@eQzEzLDp}D0u)#{OS+MjKGpd z2t#(Tf)gaRWS?klUWQ56X%D?q4(86}DKm~pade+7X)gKDLv`Fx*Z`WfgXLOjW6Sk8 z!Ba+nF;xdc4+i~P(bc6z`>^AsLKEW*eD3(d$bDqu841b_Pj+>*EVp3s z;5r5CUR6DKi9q@8gse(1!;)$~KP>`7H`C$hOnBX<#S<4LuzcYD>7S#bD;SJE-N677 zR`&Vw)?h{9{nG(A99%1a@024|9SX$SOVx`*&u}4-Gu0o!mjr6mhp3CZveX?6IO((7 zLsI;phR{Z~7ldNT%m9(#pP~CxHZ-d%*({yg?%NSs-v~?7Mmi@@7dv&=nf7)kzeXJc z-|CC!|1Kr`TJ7?XK(`BiG#N+^D-dVes7Wt}bgD;U16H*XzDh4eR9{Ip#jb+gk(B=(5{ zEa7CatxM|k!&`*Fk}4-_G6CR&8bcTsKrXWhfF+4!1g@nHSQEnm{~f9L_-z=po2(4#!Fpp!jiK5icX`HMU@A^GYFv&n?SXLqnXB>(HD)8G#b`_ ze0#>K0fo#>Ub3NQ!e4BdrDtV1<@w}65vCV&`5t|bKRxU{+AB8B3Zzcc6;wL7CI=v+}TaH;ptl{(i?LGgJnreu(iEwWaQqPAXD6}7&^I4#Qv7ZPFDKjA>)XpkGADEU0C6@ml5;3W?}OI zq&_Bn7=if6P`w@?;~U7-@C13;xA-_ZI4j5pJJ9~<67pv5#m zw`k~wI528c^#D%vh~-^m3=B+834~9BAW`gm2?n;V*nkBe4yR0y+(FBU$irEl7T>7O zCQl)X2KPD7BFURF4$X>#2M4Xbe_lj2E|5vt~f>egqe^MF$tdXx|y3-Uy z_xGWs(`)C^HwIu&Z_XM@M)Cz@WUmo?5w!m*-vRDp#mcH5m|(la_f!-DJa_npt*@^o zo(0d3yuvYAe(!t#!}$ezszGuS#;-<+DYH~N!o$3B4@fN59HzaoCpAtn9~1>y1D+~8 z3c66y#TWc~FuC>GFTvx68NXdG(}|++g>lzC=a{}#-=`JwW{Qy`lm+U%j4ZmS)vBk< zWc~WCqc<{J&Z*wJ1idbi3C?~r>rZ)_iwiOxO^{*L(Lob5A&xxPtK9?h03$zi^yQeN0V%BYa0FZb0%k1~RX9pZxs$+PD|456ZEU=nF z2G7c{@Hb1GnN=$Lb@I4M{>{cDCl|EYBfFJX`GFC`Drh0f4~Swdinsd{+}*;ktFz82 zcn|K_-i5^JII0Jiw~Aao7!2m)e%>$`SkJy;M>zZ@{`?m-3BG6RjRM@eUF)a;@*M>= z&oPw1xL3&QGW z?pwp;+&KH&XD;kU(Z%%g0g@*LhtfZ6vsswuGhou@JPhzVN6^hG+W+mTg+wO|7-W0H zf=B>T8iFk~1Iyre;g1Tii4E5^PPZ#umsaw=coSZl3ju0?l&_uwp($h<>>s6ud^?qE zdCGK==o273-Yi}^Z2{Gh5+L?-+2~qd3%)Q@K-B&&3=#Tvz#0hJVhJQkVT6EjG z>=TzJ$wTPM$A9IUvNTXe;HIJ!l2H@B<`=-zkvfh@zcc*PPW-oeU24z2kH=G*f9I={ zhj)PH#hZ8(B4N}AY!Hv6ezdl>Wuav!ang4MrFSWc zv2war^r z&btkb-apxlOVE5bO-ti0e>Ybv`ktmZ@KGlxfxlj~-z@At>%L#)v$CdG>uvFkzE-7wCK8U(31FFuDu^ z3!WdA{8xGQxgHrF-Q|IcC%p!e%f!vchPJ?2fGq`8(1|eE6$ECe#yN_h_Xe6chkPb3 zXYm>!>JzvKP-P{G$`}CHc?+%H-MycGx(+G5MuP_q|ECN6%#$}5ZG+FbcQYtn*~5va z9G zq~z`@(aO3K!@tbh*MY)LqMC?-od%b{Ff8A$C|j`%Z7SIA3V}F2ka*8!ZFCOP z?E8-05S6J|g46>6xrF*37CNOM(B)!B(~)tLkq=WL!2AjXaoo6PzTs<|UpuA$NbKdJZ^O@1)eTJfJfzdS*42~N*O`o~?6?YuOd9av zoLRT4jAO*Z!mbys9bL%6=U&hSy|~TC&^a{zHrS>1s->N;?dluHvU1?veKob}OCe8w z`6uzWPu-oO;Mp(F#zzvH9eynuLHWcleCv2o7kX;(he`jrw%`3?`{TG@Df`mbpbmgw z%`OTn1*_hGeoYZWt_ur1K}5sA>18+#BxVQY4Y0}Z!b^eA8;J;XD;U&`2RjGFGr>YIvSAQt6^@KgfTbG4 zpahWAvZ~6lZ$|Z zvB)Qmt)k~Q4$64tv^7?b%%DK`~8|+^UJj;rk*Gwocx}pvxxV{kDnP({BQ8r zi0-$IoA~){O$5YTCh6r>WARDxWMz2<7$6aj zCzWr`k9a^CUQy|5<&R{2R^#=Ror&@igsP( zrRJjfwiV})LGq#lK&Hw(-5;xywwpZxiAsrE|J&|(rpG?{5z=YV81`BNVu7bztXg`x z3R+zk@=(aP2Y!ku3+Wqn&c1K8+q+6RyQFS(K$3@cVo&76hQD?d>XGTjGu2=TB~G

z*E5tfJVKt+DpW6U3uE$VaSHdL3^dv+N?7UyT|*3JZ{GPy5MLV(6?fkjGOls5mnv#x z>v<_PXnFQ&iMtt8b5`*ktj_oTa@0yaRV@F-bP;9QNuOH{&OV>lt>~Lmt=YFnGo(bq zCs$`(Qd?Ws?8PxZNyQxoT4n27WP{-d+6a%2&7QsuJ?DNke!fpmn*OZ)v7vz|;mxy% z5zq~6;Lj$3y^vViqoh{WFoZKl7?va&X5EQc5onZmOPwh8BX8sgp}s#J&8PXUFGbqj zZhgALBzYPuvpj9J2pIggV2H0Pc2VR0Dl$t|j7Phe(eTCHv{3A=OvOirfh%BmFbs~C zpogA@IiGSz?CM>xK~g#fXpELY9jT;tUW`Qyu!!r_Lqg|6K*Ay{QYfC88=f*v|LAn= zAIJ6B5Uh4&`d@s#X*|?_|Nfm2k}X2aAY@CHLSwAi#x7gQnh>%x_AMg&*ve9t8bo%I zkbN6c7-R`CmW+MuJJ;Ly{GI3TI{(+>f8EjJ;YQu)^ZtBZ@8fkG&tu@xHC<@l+!OFi zA<4~1lvAxaSRjB<>j@Y9SQ1UyXe&u*h&lUnC4gOj+l9gr*j8Jov_NS7zu(0%J2+0D znd;8Is$c#RReZlZV|A2i_pyKRIYiBPbYS7{&-+qmz;RxZ_@B+rM|^qk_3`^RJ52Jj z=PsNY%|*BfN$z9HWNA(}iuy_zplKq&yeQ*_W=)=QzBpl(ykK}dSHF9`NF0Z`$Vpt@ ztJLD!m8TH=>T^kC#BZOt%~54*n9o?&=eYcl=E$;4)~#h`z`~3lEl4DZ_=4gL%{fn4 z!J!#%zt5?Rd2Q(BC~-i!VgrcVdC@*HXRyAU*BK_0)iGDlvy3CIsa@|SPjzX>wFM($~N zP$kZ$H!D(Q`l4P%*%?Mng9#lW4VF@U?Aa%?*RQW5qb98L?mK@`f;Ru~JUKAXX`foU z3tjC^+()tYrNTchXfQ%8e`G7tErxh}%wShm2s`r*Bg1#aejxW=BB%c7Z;nb%lGr8V zpih^sy^earyK&%adB=c98cj#S9k!{?0XJpW;#kcK;|rlv%LF*?cNBUl()}@r^o%SSmAexC%#eJU z<-aD$a`!J?-A3m-{KS^f`y3Sf)$Sh$_TNq;*#p$$CJoeD7J16uxC3;QHTD48MT3;>ih@zY1K3ASOn8zK=<1{Emht0q7i;iq{+{Cy?3% zTX`8Eaw~a&z{7Zu-jO(Hu|)<*GeOyvO_-o#Q$g1>l_j2D~u5ep)2j zsr6*$bj9reMz|L}>w<2ZdW&bC{*P@uo+{)e;w14D;}UghIrX(Q8$apapyDkT({mHs zRfI9*t+-@4R50$#uAkpaZb92eX(1$>tFO~g0JiMa%=wvKbd$knIls2>4;;rlw*J1) zJYxm?7%Tnke`Uvpl*o9ZRwCYO#g5TDKB#!BL`Dexz{5chwsx3#3L`XCmaLjA8iqke zMbC9dwhV+!QqVrjDOuTqSZTw9=H%&vTp0MiyqfuOL(e>&B?tjp?;AlKA1ofSIiu_stl@b4vYceLxg>NTr3J- z(65Lr|3SVCnw-HZNm5!!&jr`eG?#R)k+|Sq8w$IBFO&pQVYI8ZDSNNC!~tL0#I#LT z0nLJ9Bvmr&2dxFIZJRM3YTaex{VjeC+6+av-YX+i8C<>>6Ml7?BtR`^vy!Y}pfZeA zgBo%xE46$7i-Ub;#WSiBvzXTguVOv?3SC#}a5s`am6hDXTIQ+^`P^PK)lrg88y&Sq zzbqR^^%}XE`ZyVYoyE9q_|!VV5BDV$590R)ze4($FDJ{LY!4!*0{5_hLr-5CVmoky zjQu-*vDcVA6WA&OA1nCQvpaYe;N^8UwGRs!eu2v`c!M8&551A0dUEAId zq&VN(##P1tJgSsTLLbK0nNL3Q4F36EdvNw`+(i060^xm$ode&03QvKDdRpr{wb~86 zF^dqnOVjbfCtWG*4Y9JqZD;#7D_{6vt3!jt^SRZR0spzYjwa=A^ihYkzct?&4ogle z%%GnBz+w+%L%XAX*WGV%I|?ET)TU*Wcy^I%TpKgskSc6j<~Am5erEpVp7gwgp|FD6WVUNaMZ_!g1dQx}|V2sr8BE zhua#l+9VMuDb43f6(C7u8R34CU==Tw(hHSLD*d)r=Nea=-KoQ@Gicj89fcQn7xZqoYFLmjEZ!nZGp{=PqWL$6cgD~ z>s3Bbje7Q5ENq<4{Qi!%0-GJsmyoaYuIY*FPx9<93l=_EZ2l{vbvwi3i|80(s^Deb z{-C2(9siX^vel|KRMiQM15hnA&O?>nm$5|MK#ORLqE3N=(g%Q=#X^!A)uR*rPk-fA z6h?xMMXvXuFLC5FQET84zXomO1F7*-0J?`}>l*mH2B`_qL?4k1p&MFBC_w2r(}P!+ zJC3O6UW5V4PB@<-^^v&TP4be zx?wIHZ;XlNv(FO+ntx$$qTBOhK4=>F8vG?b$|-$YJ9&1JVk-%Du*UNze678)Wx0XY z(tf&~$eL)cjd@tqS+L4nc1U%vWf%T2&|IZxd<-5O%r?7$gkK zWQBk`uF$}4`x=y%4#g*w(H;rOIIItRece*igmr6r0|kFhlIeA5k33ip*72SQB9l4Y zH}bRJ)DUwyH8rliiiH&Xw4@Kf9Wy3UG;vEJlM)9jmul&qrhoM#esc~T5EhJO5l5np zI-_L06=g38%xUrwu7G%ZmeWsGX`LMAnU1wGn-9q7SJTM&>ZVPD3F#j4EGx}1EP7Zu zWK<5G(hSje=2`GK9-X(E$>wQF!m@HvJfu82eo-H_v()c??0f&{S>MM4j|J?x{?U!@ zs0vsF9QXjz7#dYjAwXV6h42nrh{Lp6{hsv)+0ST{Yv502wPTs|C z#4LytoQ*TfB!FSi9bQzeoE;aC(sU1q$&su9^;zeM%!g$leOzuwod`Wq1_Uq8$T|5V z!^7u1=a)J?rHH(eXe`h;I3!_p;8s6n4E(S#z<&1vhTBiSuy-qSxFlnp7Lx^~zT>I0 z8`Tfnn4qu`hUw#%9txKlV z5Q=fsJJU3i@VUNhaFO9@E|-AGF%cOU^LLt@nqRs%kof5=Gdb0H#WKeo zcd#Pj+J5ZJJ=Rh_T0X4o9!m?l5v&AYfPSIpUSo%PJ6-S;WbFaLK#(J&06@(@oC}Iw z^D4j1_b6LuZ`k|-p(6y7xew7~ulb4 z#{0gb7=PlP(3g%ocF}8#0eOorM8fEbGxyl_wxdvymIDcC#@-?x>5Er!mUHys&=1%f znZZ==!ZfYm_MZ|VWlfj5D^gO*OnXCT=Tq|s!YF1BcIu8wzu{BfIe%nDOsd{94u z_&x4i`(~|n%h?ZB9QcR`ILF|y1dHW}h#7tLvUYqbuw=l%1y zct)MN62{%Q%ExUVDh6^J;8k7`FQ@H2;mc&^H8zjbgOQ!Ad6NzF1@C$YY4$3215p}1 zHpf8LfL<*-l2}aJ{4WuRqPLf{H8HEoBd@SntG*KBl73{gMBa47rZ;$K7;Wd zEm`j@`)b+q+k*owv<;3ZoHTuy2HMJ_JgOdl&;1@2kP+)mzb~G_fLG&}yIA8Q`D_Nw z%OmoNfn~=ncp}r&O~$yR(=cfNi1ipQ+Of0m|qA%tR$+TnTNSWu?=b8;HlOBuF_ zvX_%jfk*3<1lK|z5j;nr*1T%G95T_>dCJ^*ap0p`*U-~SJ)YaX1E*j?Z+!OLN9N&X z*s(^kIl3ZVR^{u2->ppVKL^K>o&LUr`()U4>U}75`y26MB;hxk_YXJZ-$mPjn@r}K zBNqNLlYyAY?f#cEgR}86+OyegI>HJ8nwi=j=IK2H#EWGQ%S^S(N z2fqAD?v4tYJ{1&h^{^hOt7;;^#wx$Do3qL=2FqB9d0zW-OKINGhpJUT*<(6TMRfC@ z?4Hd&86?L3^!Px`f7kKt9h*BccTcag$Q<5a5ns9l->cukpYtplf<7~ySf^(9@5&7o zpwTwkB> zk(AGyR!rSC`#|#oun^z@TjOH|pk62xkAM&zugh=Xu?7|mYsGuqJNS^{V zYFQ?LN#kH*g*5a`9faK}Y+L^d=Cps8|9vDoDu~;&vbt6uxcA4Uf9fQFZc=8$Q+7B` zH`Z%&&K~rye?o3{GldBk>|A^euoV@OqeC6e8r2`4&izY3=YDtiuWiwj8Z>dTK=Ntz;)UTN1M3rf!_WEwdQQ`=qi#OlK-Nbm!;T=y~=5asY1rFtsNo#4IUloq@ z2hu!F65Z=uG6v*Imd#xf(jUjwB{vCW9gA0PaNilI)_pRW zj`zJNKXg!aOfsHgK`Jt0$FH~3SFbPIVMKlC65=^u+Neq)=kcx6Uqz3wh5e<**Nn^E zuTfrmHm{ySQH}aav)3kTmkznRZ!ZTX5m2;Qe=@%NaD4!|C8%_#L1BY_Z(ZX z7TN9-B(lBpWlfA(Q}C>`Z1JOOeus;;_-YfE{KSRW+=kuXF+qgM^}(i~gPy8MCeg8D z$-@~YS%k&OcQ4avzoXS)CAmqbd+;Y`#4r|$1+n@+Bc`6>lU_Ye+s%P{gEbYi_*#lS z_bN=i4!cvx>|dLOmp{%;1_o} z;d=p70}sb`XhxJGQ9EU7VKiY(fAs6W`rOuSvR=G8#Am8R$td{#k-2ZUdDiY7U|ieU z9v!Vfg7U!uo2DktV-@%tYH7lgsHOc4R8@ltK!+<~ffYEw00Vpqc83B0VE`;>72nZd zol2-wj7jc9Cirpyld)~dvwRLXIBy05By!q$|5GA# zCld9#@iLlA*?T+b+yV#E#?@~dFMGmsZLk{~PMm`9v>>433mLv&U^xraX!b6@m_h$q z(;W%=^;pv9L|Tpb;rmkA14@Xq|6b9O9k)51UZk#QbJD6w8{w(AM6}lt*S6CtTe1Gz z3iLmB4mYR#?_Is*-)5e2ex(R4v=cQv9$G#-!`^Uy^d5kt=ey#&H)EX{+iC6bHf%WR z`MdUEHRpHul;Bn2*U5hy_~s)t2@$e7(>cPU?M*vH1%w3Iu+T5R7gjZe-hc6|4HMyS zVL)@XCbFs5qTJ#WWt_1z;~zI5F|HL0KMUsg=vqQXh7O&tEbHuf!isOZwh|}8cmG^A z!`|Rj{Nu0KIk?c?{wmtAA<%CVNRiE-_=p zly3tU%jd{@hFJnECNG^RQp=`MM=iJ2mG`yL$i-5A;R;>)Uc~)&R!zx6Grje!j%|Qw zK{4iU0!4>w?<_gB5-e;BSA;W#Rwouv#Z8dkj-h(PAYB0!Nw45sd1Q3I=TUJ2h0Afz z>3mQ6JasX#p!~yVj{EIoOV&?jOmh*ikIeHok~i<9<9QghkbI|g24wUpFz;Ci%HzlZ z_r(Z<5p&pe^xN#t$JaR^!u#5=()_0tz{z*((<#vI6tD+3NVE$Eqf6ljtI*F--#Gg;Z|H5M{l2P}1Cmb%ckt00_mg$YBZ@`e@qc@=03eXg|%>>{+7`d8-ymd-t#zeLFb(P_cH9|qc-J05E&M;ocRp7 z6g93$6IY^by+gd$LUq*KSgy13Z8z=wGbUbs^M8ebTmSa;zoMjRvv&X`lSwR0ox;BF+JN$6YQ<6@oi$qc}SvbC$tZi;`;1@oyrmc|<$NK*7{c(i911&jp`qo>K! zMhPdH&K$;Vd)IlzV9ymXmp|NCEOa~|dBbwEfg7$irINK>ZjXvFVIeqo#Wa>2Q54fTX`k!vdYzLqAHT6fx zZr}gyHLlao^!11{aLZ&Wch!4e^u(`EMiP!crtHlDj@QgA<~uDp@q*KJ+-`xOIW(X%7jeH5^zp zLJtJ*{r&ll)qGG=%(fQ-f{|mmt$Qy&YNo6A$?gfpL5se?gMyV|I+^NZr)wOC2W)a+ zjWC)Rv`x&f?o0<7LeqTLw%zgrGR7rbhMRn^juQ) zWur_GJ!tkWxEN68K2>pb|Hx~4ee%WvUq%_j>Dpg4&Kv>B*m9w7+5WLS@WcPwm^4Zn zu?rEvJ2IDF--4QVfz+>E3A`2bJr#;BRexdK4GFZ&9`#-o1yL9)hbJ0Y|8ZSB{I~03 zlpD?UKs`$9{y|zZJ3T?J@x()9<=|KONOLkz(jdv*vt_?`uO%khkrE#QzkhnX;r=Rz z#F}$br}(a@r{3Gh)F{97;`roa9;sq&WrmBdl*8@V2jn~Mhuu^%?yMd=WYINcn4NEo zXlA>`_IXF<*tDd?s_fVG$f4=slDpfU?G7#{de2|n*V88w7T**-Ym}DxlYg4q7#%mO#SM2w*hCc7M@}i`9@NncO@uQ?kFL>?Y0ki!UNxj0R{EqKcUoULVxGx3vO7AMzEep;MeJ6WANeRTM(>B!`8 z{cLs;WN?n<78}@ogq@Dkb+BJgXHBX`Wc{(e3z-P-KW@Bn!Ox_BzLku*Njng7|4!c1 z6~E%^2OePGs!|(h0o>`x)fsY>tJzD_*5$Yn=oc7ACgI>kBsf>=XES*kb}g;p=iGmg zoV*3Tu7W~7G!>#Ae>LBVJ3M^C^OL+sxasU`88${?m3^1U6k4`p-ovZteouqGQIJ=`}y+<>=^^2$X|j&YPo_uTfRDp})naqz)e&%V zFzt@5w@m<9+r_(juGZJ1zxV9?INAIUB=AB;U$qKWS2hb5PqEKFeJaVkdwgkLi4h`^ zp273>vl{+s5(|>&8#HJDGO8q{Q>hs0`mU2XvucliKS;0XVVYD<0(qV*ZS-3X(or3! z`b(&M2~Zhq`29*UEa*pK1LOSi_A^&q7R5w_%X{`e`a1I`fBQ&`lOfj#lA9+i39O5s z)3|h5h94yoeb9}hj`=;^s79U4+Hog_IvMgR8jyg^#&Dz{L{<~t4-=qgQ`H4#u!QTn zUIi2KVq^$+pBCKp2S^S;T81}&i1?cbVN;D_4$+;yiLjG|zM+RSx{Rx`A89U@z~0Vw zapBvOwR0&!L%DPPMknE9`$p+c@MY2j60T@40$6$9xocgy1_=-n>ki?gM$M_(7Ye$+ zB*nN#jlM`Qs=Ja1$o!0V6PWfNoq!eClLy%!*eRKwjC0Fq29$4;$CG{trSOE@Lt({BznaNS^9+@onaBi^oQ9lY0p!L6O&=Ly zE6HslVlRB*$DTKSIiZP-;+xl)I1Q$=qjN+9O0CM<#hVOC`uHe1PA6M7^JAm*8y=nX zAMf|~hd${a)M{tT#*O4?V?cO93AmqfK}l4s^S4#%y19X=2#5J@(`D=U$-Vc`>az+Z zPZ$=Ev!`aC&dH(yzL^a$hRF~f<@o)Nxg`R6j{*n5hDZqk%&8$mhN9tbW|TqtI3N=x z?5$@ciNyhqR^7RDK|v46!^>N4h5*8^SHRP8Za8zvAnpma;-|#M7H7dIp=SLdMdFo) zn`F&Xb;Q9(C>&T=th2HgMWJrE047-!I+UUG}vki&5H#O}-h9u6NJ^ zJV+BZ{o<*y;gXn>n(TWKdAUgoC9lgOVus7R(orzI0D;2$k!WH&1B9Q>oBmWh)f>n) zy1y+8m$Gy8!4d|YiWlhyyx(?&%Y~TVe6)$WtUV#42@>(sBG1vpq+PWD+z$l41c1CV za^^2-mAC1X<)jA$TvED-{xP7YL^ytbRGlR1YS?NY_oL|7hcoO&7|ejg*Q+ z<;2TIYs<|d>yD#)7JH@E-ecI=d+2*S{{F}VIEBHtzZ_~LNb>V5JuLB7S1z{a7b+NY zGSH@ygBbsd>2fgxK?$+vigTTw$_8_y)t`_b*sZB_-@|p_x6MN72NWHltbTuS7g9*ST&KgGR`cRDyYELqfPBJN(EDi1p#{Wehku0%K8KL&As0WW$`kiod9~`OQE&$tE&XBx? zxk=0HO?no*<5yg7VB!? zR;up08GtnXMgz#kB0S%dgRad2EXdW?o-^YDmC2TnL(xfhC$|j+0ljv#Gyy7()g4|U zXFwB&cbC8p63|@zz`)WO0{~f$de|GGxDr*A_IH67>;B@%hNj?vdcwYpa>S7ou89>K zdVBONG~j=MhmEz(5zbCE{lZ2yky*hwWtNlvHt=b@g(DV}96y>Yr>gDl|YRZ?d(7d62qk6@}`&qS_-L-KZr+6nD z&FaLH@3Y*F<2Z=_m+Du*AZ30pn=RHtXR?>`8S|cT&&lTe+gNq9;-99;NFmyL;!y>tvLGT9l`oI-(_)4TAh~=#69IZ9ctnQ3A*sV7=lC3C` zL6GuB-pq(mQzVWQ*#qc5Iq}l13Qu-#eLgsl=Ol>I+=L4%kdBxrhlR&#gdu!_YG~qn zPF8&p6&tTO%HmB2=V~LC-F;5Mf}ytJA*WyNB{EsE9?9>|+oSWu4#+ zVv9GyiQ$wvXr~8oqnu;oIOq|x%*Gtdk0;>t0sAH8t{aN9I8mD=2C#Kevz#OW`nYA? z;ztvFDqf+$o3(_sVFo^Qn;1?k2r`5y#~Yr5@GmN zHRQIiy0d*TNO6J2wZ=jaZci1^h3fi5p3h@P1?RLq&J$&XfcojDQm}inv@h!C`G+xg z$Mem1DqcyDAv&If&q-ZB&4TV9^{Q@ihOYZJ^gGR*w8_p|?b1SJ-?p8;nwhc!jo0hb z|LVm3XMyySLe{6KEu2kqSky8+IMW|M{`-p`e?A~f&fZpFw=#-wD!;w<5{zq3N&kNq zbEo7;)P>%BR@Q@%@j$`pc(OUU)_F3QxP1uObl9v;EdL8VCt^*JWor+kXJeMhL$AP& zNzExfn>oIwdz&RKmz|lBxL{G2K>`7dL8mC^a&jp-MkXZe{PyPL`cAdH?5=ZsxMamM zci?mWXvcMxgBHu-y&n7G(?b|}1ZQQBBd zP>Pac7-P27T7F;VjuhTjTrH0CEmzDRKFau_(SAHt(}ae5`82uHYgV?%IObZA_XU+x z^lXvfS3DAz0XN``Tua}kV47V1N&CZa6;Oex=?Xy?;JZkbLztlOXMxFVsyd$JDZULW zG5Qao(ep3cW+9z@OxX=!6bW-~m}m6c#v&HO7c8c$6k?cee*pNb3j8$Z)N-rj)lgzx zh=X8~m8ZI4I3J+C+!l1kUW-)d;)I#{74iwc=99SmH?^nZv_gfK5Y2byJ6+8TZMUFIh58n2kP#>8LAT|qH{e@gKeAKV} z$XuJrY{S0}MmQ=eMLVd(p-q5@uog@pDt&WjF)!8bp^qE^t}yhz;9lu1KBX+MoGhPi zo-109SXh{Q^`y_Fw9gCL=JCZrw1+D9*5@v&lPiRyvUM4A|N1FfJw6ds-%yqPpxrFZrEo-F_K3H!}`o@V9U@RIzGSxxKT0{9D$ad~u3+5;kU z0muG&!u9>s4jkEHr$W#KV?3@4#hVcHf^rezj|xU9x=Hgyi1G(yXffn>&q-&#(Nuj0 zPJ?1v@$l=15{ZG|yUh-CRY)QUc<~!$vvB)$-VGVGtNV?~B+UBh54cMEawD0T@kK*V zTA3-YJQ*iflzG^$7T>$U;?uk4d&EcOym8pxSpUrYRXZPwEs=HjP92p7)e}!ZI?-<- z`b6j88abV?u`Szkl$~b%D{<+;#4RTjDh+1MapwPjX9pnJT+0 zwO}=nU<6dBc2>5tt|YcEDrTSl4XI#5n5P1Z!5T)DS{NWAaf_h>yf`^1 zEJwuDLYa)dGmTHu0Ew5Vq4`=*^H`;d!o0_$SN2_L?cwvfy)6G$??K*L@`HNcE&H(n zo2uu(o-1gP4^20>OXxU%o-Q9!x|O~!yq~J^R-lVi$?$y*>{MKi^R|(rE5oZ9Zc@rC zRgW1W+Tz0>u00};FV{(i>a8nUlj+&uU>qDNuVxiqw`{s*KyY+MaeVP#SmrB;5`QhX zeYus<_t*WQX+!L`yiuQm``J-}Rc zcY8=*r+?JVY7vX+>~D*gR-%dXjqH@(6p`7)a{;jpQR{xK z>9l5e+DLGTJ7$LY>ST2SR8U$zc7nW5T-wFg8Tb3!{khKH1Qy(v%D1cuDv{GIN2rUl zf>o{lKi3ySeQ~vYiBD%gKXWgkQnJ@f6A9@=fmvZHSZ>m9xdeGquxvjB`0qdOO|`eq zeuz*Rq@V4?wo}kAb}*Ueb|>eDO=;*W+*&N^jPp+d2x6kTwK`p7ZBW3$JcWJsK@%Yv5*t~APvy37fcN>wsNyTg_pdV2MXl@l!*^upx$!;Pu?BRYa(v% z!4>ObqX+-~X=L($WM8Ptgi0^-3B;%grY-Ph z1w?ns%A)T7>r3aP@dyxz|JeUu;<3BWl*uBWT2jWKd?3`p85}21)@QP9 zpZt{nS}*78bmXT(AJgG6KSnE_?-L81!6{Fp}{BTQ>+w9HmW4l~c z6rXqop7sOIIE95%!UzeurANH@Nz>JJ?h?XPRZhjj&f!zhxk-)-lyMnEjZQy2qSMy8 z>7;4q=_8Cy>{PM?y{3Z?)90GgcC?TN`VT*i%FCr*m#ogZNq*?&Kk;ao3r&nB-9d2l z2-WR#)-CezE1-}Hcc1-uB3b_Qz}>jvxi|t@JdY=fdeN*Ti-wg8o!&Zqq3kB_Zp09w z>@H~Z{!`)Kk!2U3sE~Ecgh0JBpXRGkS#e_}PH8c-VRSerC`#ejBysQ51Avl0VPrfz zlHV11O+^zQOO;XLj?X%mt4_shwYzrO(a?vVw_g|mO`Z$VTJd}CEcW`K-ERO#8no;D zaee#y#3Q>FG$~HptaT6LW;t^Wv!&uAm;#AxXEt)@{-FCb@m9^L!TcpX4;%hZ-0qk# z#y<}AojyN-O5Y<6Gxt^IggxtR_sF;(Ip+iXgWHb#0r#KAvYl{DdL5PgQ-@=dYTjxZ zxbp_-u@Cv2tQ~f;4CD(W!sN@oFUj6TXU=Yzn~^3dp4T9egc1GDr3R`xg2qC+-uHL*Men9357-KP zzY?B$0t=S)f7w^{Wk5%d3ylVO{`{b8-~pPs>DzF7iU+bsm6%aJ`yy$z?eKY4{r;5(uzuS|J5g=&Eg& zFOQkq{5*0Y)7v#JjEPqZ{m49BOM~`4ZDWywoxH2OGqa$ipdlR^mU25UPvfT6wHRtu zpaK@;ei~3kp$bIyk2X!sRBi<-iM#eJ|6Ou#CW&a#Wg`mAAZ3}iL=*3m$KS-&BtExS z_F>b*w1tuB8B5|qfU&Q6M(Uf?*KZzj))B?2qDsESGywCja)aekqUh+}l3|sAG@~4k zwZ?@3n=$v?5p{88R|u^}O_Q06In9WH>D}ezPj8|Om7KyPoQrfJ8>cmEVSzI>`UcVJ zZwZZMmoeoR(L9j8O4oXy2Eu7{ebOF7*--|ev3~RWEx~)wer9#JTLI*im=jBMl+OIq z8R5qLi*dogr@E4ez-N71|EQZ&nja%n7kTbm@0B}~u3OpESbW8ba5GCruw7TwkvkES zIXm9UIHg1rzn2R-4l6$ut(!$jf&FpWd;M15t@p$<=yL{U)$Fd^NA?qWG<-93ZYpm* zh!@1yKah5V9%lU_pz{HO;HzZ7eFas>2+tuU%m|2)ok!bahn8M!1DV4Y2~5ViuKUJN z3%(^}z$xWpU3d1Mbfgi>h?b2-Zn!L23l8L0qVaqVo{=jLuylBKzzn?ph~P^@m|}a` zL_``)6CJ=#PY325>rmopsi_BHtCc6y(of=xFwZb%^iaX8JxvJ2|8Y0F9jE2fP!~iT z43lAYpf?3Y<(WUmcm5rF-0*t#AEkWRb*;_nkloiFJ-^Bc7dZ0TpS`g8a{P>3N6ydR zvhVXNr#3V(eWUbZOcZWbia$6vD56RN_k(`mxNK!`u1eE1;^En^#@vbl3TI(iY}ttSQNe+^1D0iqFxSy)J-Ik|*cknn{ZvkT)rf5BzGT0{WZtz2kFYrytB6 zgw5{p=PP87EFPM-kMP$&n}^2-LsxgM^?w~PPf(>^H6e*$jbFEs|Fb*g9AS5 zMfSX_Z9Q?Ncu*{Mr0d8xpVk#;>Q&xfK0K#lcQibELDv9Qt1GyWd-SofSV~Nq?Q;HX zAco|w9+?bbn%I=da;Zn(el4T63fW!B2UL&u%E(p$W4g}!#h7xjYY^M+4~qbV_`+v; z$iQ5k4^k`oXfB-NDHm(ZW{wgm`JpIi4Kw~pKh{#HKvy$5&2Ml zC{)?Ul`7tIc}D!=o%bGp7KSsIZ%D_MuY8?+v3?eCJ9+uK(q)U6{Q|VNA`8kft+Ic} zkFK`vD1ZWM&KWPQpGpX0m*i5f2AkRsBM`}tRL3<*DbH{BP$e4a8L+Jjqb2rFw%x6I zAt|f{>~}mqcE>d%$x-$hg_pB==^234N>JutE_s`vNP~rbo$AFxz)OWKUt4Ou-ea+{ zN66;DZN$tQnO{T&SqESESe+iyYem_!g~X+f3=JK)APxthps+l*tjVHqN4RYZKbOda^POCHLI< zwFQRUzVkc@hl<-aIsrm^-88uk4HGc>F2KktaPKJt8N$U*3`3sG;@KV2ZLpRTM+XTf zxK@I+k0uh_KDd(nhbwzU<-Q2#)RB-`lJs@;*NuYa#7n!q{Dwz8AG7f}I5un40e$aZ|FHTnE@0ChDg)ttIN70k4bRgCQ+6!RjA!$tu-UCv&#NQq-<&%g3e5Gbep1D2S3i|HA|pNAw|Oq0 zToK5$WBl9CT%T#ukwBa3C^=L;O;r|2k!2`uMzkM-8a6#LCGg5 zd7_TNPa8v3=zbCb7>`j-+pi@Mk&9c}FC1P1cac=+xZ!i;d*<4N7?iMxxz}rSo4z{r z2R3X=z||5RoB6>i_)oPC)*tWQV`|$MYtSM6Pfd|K94(ZkfO7@LtpuQ%ed} zeg1^E`%BEKj4&ZPQ9z)#F#F4kRF<0seJ$B$PDe!PfPP@)4l#?QUPYh;qT@eLGXu*L zA`YKfdU*hZ5jc8y?&>WSDn-u#YXM}98)Vw*(?pca!(`{e=~-ST>{_A~VaJ3ve9dfp zULsfDhoZ8^Xnj&3ywXcY#r~!P^4Z7vo=5!xN)pA!9ayS;rc+wTg;2op)VPS6Y0j<5 zD;fwsa4E+ZM3O6W-ZQn}lC+jFPKJup>G7(#8{oT89eNtvDj-gCZ5jOd*-A`zyI`sU zEE&9N$Z1SzG`(Jr5M5*R3hdb@R^3$4gTbF$MK^i5hpE_UIhXH)LV*kM_Lz<&E(mBe zx&iJ>32NJyo;$0WU8{fSbG!C^OG%_szTYwQtZwHA%VUb5p zUkAlUOf{G++(pfNfJYW6Xam1SOtpXwoO06pEb#og(8&V4XFOb(# zk0pbf1|iC+)mHCJWo7+lXw!{|qw%HN@E2W?T0-#cbs2i-q4yspM4dQRw3*V}|E>%r zlW@+Xdto#|Hf8`%C{}$1)->`zq}XQfX93=&C0qY$ycPtpPxk!0F6QW^Ac1`=kz-Mo z#fZaqh$Z^D{irAO&;dE~@Ca>Kj(Dqk`uFeC$cI7bcf8FudJ{S+h~qH-Yw4r$$=kn& zv8v>6C}J;@YS?tR-*D7N`Cf|p!?&@C4vQxvY%pLg4#T=@DH(=fP)lI#yLP zLp4Ji&{MkhZcep)u7Aj|c7lK;MVPtg761dl2McxT2Sq=++;jJ)W306K-|C)lybN|uBec=WtSm|W@0tukh((8hLdzXp;iLV zZ`WdR3RmVE20;tZ8I`}0gr_E_seMJ*X)Z7&TRG&J8)!{EyOrB`drzVnCE! zx>=)lPpQ<5v4hC>kWFynlaEPk@yb@Xd z+oU=cVJ($F;1~grX1{%0#NyhwwHZ8>lBcO#$9zU7uurPEl`u011P4sa;_gajIUwbo zHK~uv1Hpt&DR{p#);G+#>4X|ZA}OBnb>sTkd|&W+dDUP7x=r37LdWcv36f9MvctZZ z1AYM!`y8QX*-v1jK_1RHUfq&%Th&X5hwCZ`W z<^NBqDvJqqfNiJu!f$w=tILxX-O8z4owv2UTG5w@r248 zA@~a|v3#V$9bM?5ol_-?Q*#e>`mtAK4g;I>od#;2zcFflS+8~I_n}7Q1x8P5UEiuw zHMiaX1qOVTNW+&Qn5e*Uo%^pLMxC>U*5Qu%J)_$EUwfEE3SVNKW*xd~MkKa<1Nq?@ zD*&jd=!HLen^z{F9Dy3QRg#ta#4abN>k3=M$pu@EDMbXT%r1PTg(ejWY_ED)v?g)A z?}47T0LRaXfL3OR=a^{094*KI0M`RQXix+uE>LnI^R|tWDuQ+IVvdT_yUkO(Jn{pq zX&8jQ{p(bal+5m^)%G!^FsQ4!>pR1q{0GtyVs{BOCoh9iuPeO3DOtl4@%*SAi?G9giYpXn7SY2$V5Tf=Z4D9&Hv($CA)R^a{%e zX!5iUY%I|o&+1k})Cg0e2>T%;f5=C$BW?kzhBw1IHy{aFl!>=|*y*BPj*yfcWJrE0 z*#GhPYJ=tCr^0je<^y1U&ia7{F;{VTmfsTpKYNxCvbB7vwVF&DOo47rO~dN$IKJ^{ zZC+tcNA`duCXuOip^^vQ8PCqXyTv@|IWmd-W%wwfZ|m{#-rl?OmG@dDX#5??+86q%&&0#T?w zDJ)L^+<+Ci0tzqFR``_3g0&~7G%f}sE}wqfcDY3lZA$_uoZEa&Oq)|4oZo^+E$E>y z|6@VS`cmf1jJNps&*Qlj@_&0gdEhTjAKWI5NDS(FxHe9F5b_SyLcov@#BY5C{~wl5 zDvfwiPIqceniP4nCc}^!1;dTka@MAK>bLl4-pQwSZ^Op^P-eXr=Gl@F;HXaKlY$mBzx9;q&r<_Qbw{sJPirOE^BEFh|P?Gx6*77BA`MUmQ#5hW? zZ7)s_8CJ0^OQz{ZB3MW~dt7_J!oBKMP-(y1hSH*j)KZ25_R7udCY5 zJ0NRTa|9`F=a<46?2D)u_EzdW`tlsg8c=Hud**(dpLl&aub=s`)1RbGSTs8NY8rOv zQd!oTBwLBzHZ4emP-?o%b4049VXktfLez}km-TEVH3NdI5?NXDPIYEp?NbBf`%7ed zCw1~X5(0uHtV3?{2LNiPDw< zUNRMB(hD|WaLSv|p1M=M|Moghcd>?7EnggxPvp>r}|b*$zE2)VV1 z-PBA!K`bjc$HldY7Md7r)w1CNqKu$qIT}FAVD7-$N6{6R)+%rwyLt>Fji|o+NTWa? zYeg4;AHZ(F1Wl^@KHEe_vCry0?V^5G4d&u|H>Ko{ozv*oF-HR!JG9mNe=op3lbqdg zI*9&o=4pIB6G+hfXC|;|_MX3wmTdm#HG2d@ALf5^PKZK*FXF_ z_PX}{em?KiXCfyFn}f`ZShji1y#>mF1^EUO5yR}*@@l+AFD+N!Bf~o5$zbO5pw%We zNk$&2=4mYCKH@$XMBVwkGk&LgOM~tG$T)H(Qi+6uL4h@V%|M=m@G|QXP=sj$w-tzz z<=1TngMKCl8Rz@vp1 zBsr^Ue4VMz0EX^wdt#d{JkTf7hj;7soU>cscBF_3mxte|^-~JpQw|t!5<3N$OY;5& z3!B2bNM#^xMilVUm+s54LJR`GeOya*sRU;*L8uIRlL`Hk&#EmNALccGb@1}}&>pM7v-oEd&2wa?vak~$ zX^Pa^@L!GGvins}vAtcKgB)ZI$sCXpjACNEVfly3EXYW0TF^^+^T2}C zQ^muwN$2yM2-gYNB!FCb76|8yROK}$W*Bz)FAlK35df>-196Ds-{k`s5b_|oZ;L;? zjKrMlQT|hAuI(;s_T<&|A|X6pR1bi&`Y(a%uogKuZSUs`HpmCs+24;eEKd7@qVJ;t{jsUR~CgzuOE*e$I>*#55|U$aZv)!%ytu9+I1 z=>MhO`$ix8xyrWg-|M}^xpHmF@SlZklW;llw@FJ=)xdvV@Hj4Drh%8#0kCZ%{oueS zX_>l+$L}iRkiC<*PiLX^>axcio|H_YT24vqt`9lulWV?FEI-D}B8sP2hxk=d?6725 z!5H$T9bYYJe9uYeJRZGw%lCrKN?s0lqqP~ZSne6Rjhppn$IpsHZBT3n=T0UU#vD|i z9ko?i7-Fj#Qv7&$K5Ge!ni|p6U|~!APC(Og&_{hO&nILIrSU_|c4`s>+Y=}C_*M)z zK!VllWtmtDo?yNB&*S`;XLN`hmxISZ+DGgrV7he3!i4@z07`;@KGxv-V|$;YvkAqPpTSL zHne4LWqX8cF*47|h>Sa(z~-UCusHhZC)IWoRk68KdKyT@2Q+_j&r*1e{4~u-F}vYw zq9)1Q5@h>i9~q6NJ*30PC>S3`vi^~W4%(QGpY*dyK*NVTVxdlIXR)P`TZ@)c52NOJ zB!sn$MnUbODZnlMlPU$AG@?4QFOUfSj*YqvhRZD92F;uQm^b5&cT5aPQc%g!f5DO zvq09f3U!CC7{qM5n|xMy`57gM7$8H{!t)_4KWxU*`%fW7FVf^c7OUIzgvoCRy&cmT z-EtKs>>GMW4vu=HcH*7z!+x$|TiE}EmfE}0=*#MOK}e@Q!ZcumitD0*lH=uj5FZru z3)x#x`3L9#0tesceq5$t6|G`IrRdpQmrsQ1X|7v4Q1(nM0db4-B(AtVigs#4Drm5Z z$k!=A3GKb0rR87A!JT`}F~0!?EIDBFrBd`ZcCm&Pp><^s$eFL3+`CnXqxJW{4{AjL_f(KUfoc z7R<0kKg>#rC7&ZWS4S$a-jO5t@U9syKHY~UUH@skVEI?d6&g*&Xk%{`O?-EdFltQ; zEZAL9%q9`aRO3A_Xv-V1a3K4IC1Tx_i#zQOaa9}Mp?ctw`HxuAqqlt{cgP$p(_SA; z|5=fVi0O7^f3|o0t(~wS(&)jvEe1{YX~KzUd5lzilfKy9aP}ldt;@=&_P@&TlAw&4 zFh9xuXX(Le72_Y-Z6A~@_KZ%q^!rg?915Cy7dvKLMY9wRR-FEf6v7B`|~ zD-dP~R@TtfA_x3pVilN@gij3clY}N^q6dBN{+O;_=K~B>v`?* za2b?cxn>|EiqYtT(J_2Ip_S2@Bl=^ND$LUswF(3<=@z1p?C&>aDWj;t3DgNR52(Kt z`yFq()0e*%dh33du;_^Z=w7%X6Ci3|u|+)E@$=RV}&!;)GCK zypo`{v+IlKfYsAig_yLYNB6uSOtI6Li8^*l)auN3=t?MhqHZnB;IfQReU3|xBbs5% z0x793SvKY7BI|D3UV=!)xLQX$vjgRpme%BLy1m2#y-^lc(YK3*g%O0yF6l@sn6YEuFwo{LmUFTA8OTCsrvgi4 z9!+GVk*tHMS6Q!=ftUZk&xMQpU7!O`;@5v)dB9h2LuwrIR~9kWFMT#1fmi~XCBu|K z^C`F%x1+vtRxrxdFkBnwfI0;Wf-=+(JhTmtq-f1pYH$w&2cZqlVvZQzu1#pxGsOnK zn(TR{cSr4X%|3IcU5U7E4bTMZDg$`J>B4Egb-FL}7e)Icon9v*5G)WQQ^D+{kbgPd zWbocVp!&*6dWm2z@VfP-X-+$z-KfX)#*IvL|&XnA^?#c$as|->m zE%3UN8b3{{4+CcnTm^HDU%Up9zlgrep1Pi2MT>wbZz>xK@YNrqp za6>*o6NX5*A-?o6pp!>W<-Kw0Z$7|>5>7Ku_kSZhBp}|gR7{9H0WV4MWGbX2<7^mm z37!Ps@xE(C81iyxs~yo@^CsLn3a^vmB7i*AEAwy|BY_gcUQq^KmR>lE$lt|lh@N(1 zb(%g{3_v@~_Eb|H;@a2$e1Z^hFmPuHSGb?BtD{?PS4afX4wNbztX+5yDji)_1NA?I zBOV!h;ML)=;mPAA(jB8khDt;^h5&1wggu@-5UcH<4fQZLWrGwcc$PTODt^;4V9pt9 zK3oc1Bk?~-d%OJVTG^-;{cK-nKofDnM{rZWl`up5icF%u#a|JFq8<06uRNpbY_scq1XsKk$czMnN&_ z&BLX$i9q-<1uAco727wu48{8?N|Tvyddchde?;8 ziAJ(Fxv&gU4Fd16f*vYN&YVAqotTs}qbrl}pu7H&fF|!dAlKB{^RNE~T40dJyJn+lMh1AuoswL zvRGolzWBE2!($os99CZ`x_F8eFUXd{_b>+hH+_UiDe=kP{$!^6S4@(JT^)Rm=qS>v zLf`q0?-k}k+Sy`%@McPgAR3+|$3flNe`Ro*1pK_rylw9wdwV`IGLEf6J#i2#%D^Pg z8vhPP%O`(81X3pE_W~vf^?6kgyC~EX)5I5)+=FppglgdyR&h5s=~!h@h?pJJ08qu@5d|K zLz%3R`wx3YKn#m{ooaj7=qnTQ&tP0MBu__QE9dg|u2e}`r#7qD`IT-gh*z>^>Dkir z_X&qf^+p}pP7Q9oO4JNwG~6i}wjYwY9sRk$J$Xe54BYyHj6FR`mXXxJRirKrs0~6f zK=TSUIH`&2JtnkdqRx_h1~^q^A?1Lhnzn~ENuhd_9IWKsjI_MH_7nCPP)`>K4sT6~ z*8I$8@}ydY0x{s3DJ!Q$0|pq%UQg*; zr-1c5#gh~$6g^x4$g;XM%(?j%9gE0!1K7p3?+>XN!@ux7Uv~VAwM6tX76+4*VqBR^ zF+;2GqL;pX`hOm61vlnAr_gb3$cfpe;=6Z!rC%4T&EvVF0`_tUxgoUE-mmoY#=*od z9uAYx&{-k6%^Tv0^&C#jj^=xu)%Rt64IBkdJt?EJXHa-c-Tv^)PK+x)Jf0o05Ehs{ zgZI}q_S(FZ)v4b2Xj9Q&R#<$J^{yH*eo-8Q$eZ-k(g8D0-(6v0<@wi}`}{2OUolS- zF{gIJYgx>=Te{!d{NsT-9jx}3EG(+v(MRkuJK3z3{`=$%zrTi4=sSVnADgAd8G7!y z`-)HQHE)Xi9s(xJ>2gJOyia`@Lf4&tHaxFRks>^kqF+AnoS2^on6%B=gJ0Q!9Ns7e&1Mh z@gKc*={)@AV|KTbbQ+5kI_=^<#XkD-Tx+?INBl%*lq78@OpmzzO^)*GNDKv0rHI3i z$vW#%JCr<8#D#FklGI1O`y$U$Ch9guU;EembWKzSv5QNg$YX)F8djhfm`hm6o5!@E zP>B&3#W4arQR|vPnYXIE(HPm*@)(N8lM{IgrAA+zKjq|%V|~+nmhwKE`i5n1hOfs@ z=99jC2u7^Kfey84i%Gr3oDVzZJ*;2XWZ;M=NcN{vnSg(g8XRmD%?cC#PpCj3`JGq`hIJ}Ypl_Qd!?J0y!9?5OVKs91C#LShkfL@J<_$}2i3p=-$TfP@>H*lgJH_fpJxlC-<@YMz+TqyKp} z(3%S{TLu;?a7)BK6p5#;?~jpTx=ywWFIdOnfe4O(j%h9gPba=cC<2N1Om~5`5ghzU z)_b;Rbcdm(M}(8Oec>~6jNBD5296*i$8nZSVhHCaZSb71y~lc|H<30j-}+jr-q@vA z>WiDD{ba$tzf?g}*HVhwBQ8HarHi;IjTwo)HEgh#<27p3X~?DKWl#U2DeO}9!>KBJ z>I32LbWx|Wrs+>#?$rcMD1)Qn^eN)0Wiv)OiO1#L68(Ph9fUHdfTxFz4eXh_Jso-v zzw)#WNI^Z(s;1=+H^Q1ySWu^9CUUJ5;Eov35@!G)+=6r5ZfpeGnrsw`n4;RB`T&xE{kL!Sf>H}r(T$^ZKO(r$V zNV!)`6(TuTn{E8^qcO^^=gY}jIZA92C)hH6h6Oht08Gi9Vg8t3O z`cq-MUmWRDb`NRrVQxnf9-9+i@gER13b61S*!-l_RdP%nA`wgKO=i%dj3x)ae}}voO|AMADrc98-cIuItsX*noLsPIL|3pRkAlq&x#oI&5D7AB23e}#Lm%mZjcTw`t$2FofQl! z?8=9?t0F+ej1fZiKH?Bf5(hMqQC}&*xkM;(atg*cdWQ;(rm!PagI>&n9M%3>$bPzv zs?lKv0r5J3F8X`sn+Ey6TE-13{hJ#s>^g))FBM2eLia2+3c;w_t3;B&Y8fTetHh1I z*6b&1bUZp@6}H8&&#+v+(Mu`6$0$% zt9Toor_v3ZoK?n*WE0l4vmr&Cetw4V63ZX2pjo67vN`&QuUdy;hyk~lt{nQ8{wtSc ztebgNU^r>#KvG>qJ}~~anbyNWJxdgvb1JbQ_1?N(H4}U;{h~5f6gjYc!J*NyDRhs` zJTZ0$?(8veP~_af;EsWVPIXaLq|^?&7O%K6UD%E7FV*u`G;&h8GK8da+yeNLO^3Pr zlHLZxK-mj0!@S4ow^0sHu+@ZPT-ha%8_sR1w9m+mIfo$bA6U`;iJ0!)m|=v0)u0}` zR8k$gUhefuPr;seStsng-Zba{?-Wd@0?O^Az`qUFS^mr;g7njOhbA(D>^ol8@}72t z2mSIDbz1tGNMCRBz3BJaz!cn~*Y>D8OxQN`Qi@(UA&QhiH6Ip-RQbs1UU0XHL^)<- zv|(r>k>MH2B0)#@2`%57h)2=>GQ|}S!;=(Xu2x^G-m*#KyfR%q2OyS0gwI0~W|>U? zvpq#%m)6N)7Qy6fEamH)1*fUX06N3pw9sfuj^}WWjh;LS+=-bug&3p4{DEPF!wM$r>Dveqgd={surOa2jobuWNTN~wh`7k3I>>P&dA+Dj@nP6^{#aupKAwJ3DKGbjO4h%!~w8Ce`h z?_uhD{peYr(oB^}smpSwu;;lZy<{+tMw)1(&3Kl^G4)t+|0wm;B$b-o#m+{9`@X7S zV=z^vX;-?Qi-CfJsJ)&`-?hDI)0;(A!%iCsUX#X8)cj`Z4?go1T*PioW#W6-{%C(K zY`r=WrRg}sHmu#&3^ z;9|)?(!93!z*sX&L=MGq$jP}kGE*7=?%M|4#Wt_PRraW<20LxL-^l*V0V2E`AbP9y zet0h&L|HLl9xPy;tc(+xOjKn~#8SuJ@L{-+k1!#qHdSC>y?JzPTxjrMdUGhs574Rt z6L8qC{mg;;{ll-l8FM2N!1&Q3Kvu z)8$|Mr7K@o1Rvc#FGX`#Pu4$Dour7DK7b>lv(UPRzV8&HsTot}fDG}-WP(PB@o?U* z$3jj5_!%mZHAt5OqAutD(iiysP{aTV!iK~VE?QI`8EO?cCGK;-^z=>YHCKeaqnQRw z^FXg`>caha1`H=Tct+0CIOm+VfZR{>?wQy}cr@*K<>m+pZr47KMGN_0)%d)?yx!Fj zlu!UGz*f5c#iTSe0*Ivg|H5HZR3t?H1HmXO!a}=`Bk2>U zmF%07A`}S;{5wj?83mOqHvM*pnRbQchUGSiLS)k|X0-<2gBITs)SJkYgHHuSB7S+B zOMRuF|3Jy%lVXuwsK^HmXJ-`(rJ@hcQ6?$KtfL*yvnD8-jGCBuh35oSWbx9ew;7Xo zkJLXj6@Ihsnl9*df6PoEN3Y6MWmn88mv_Et^7~?&?ab0*!&bNHT!1U{{_}<$5GA7~ zV?jhTNu*XlmtmCG+0&ZaY`g&5na;%qkHZ&pwQmWX<{RUh4i;21zBm=d8Hr6^j_um* zM>RIsFLD{y+Tin=w0xrGGqNfi`p_a|J6B;R?00FEE?|8pE=wGt6bDx9g^?7q_C{;| zmBTCHXH!HwV19Hk!17oi1sd}|u4yKD##sV*j85oq#N)ky+HvqdNSK=UhK!+6WuguG`9rQ zQ-BRpvGOD*0z=kf0enRjWHDMiC!)=K(dk?Jx=&u}mg#I_@7tAMmSFw9{d^{+-0lcu zBX>DVJI&T57XnK)%$;M$y-yKldhg1@nNGXjEcgb#Cs10!h{Bb4R|0&lfX2N9O1&p{5G5e=?#dxe;OC>zipy_Xhj(F~ z;>)r0r$O7l0;BKH@kFkO;~p^vV#{tAe|F`>Gss(f?Tg{DAa^2=h}&(K_oFPW&OJyj z+c~ahYP4H?@9%l|LLnNIcW-Ub!;0{Jq=F`V0)9WPP29bg_5ClLL;y%30+CTL+@axy z{r(gApaGLA9Sa0nsI$Wm1xp=QCA)1TYClMy`{pe9R#Hmq|Dp5$kvYt zlNtXNQIYlJb?X4%?@BCav(u^)r0;v4#GdpIu}lUtsm!jzylGJu*3nOu`H*X(%Af`> zPoS_ANJJ!I9z+4`g{NA4H-~ z8BIi*=I_cuI8lf9u=l%%8HZ>EMz$e5neoMX6fPT{HZ?iT?Ny5{rQ-n>TJ*+Id6+ZTtYb&JVt4z|;Cx(#op z-4Oz>8?0Ez%!K1NzvXBM*^K8tohqJqHw-ZH=g)-QPl)KlF$rNer9R81jC7Nw9x(fv zPToer?OL*X;q)A2EaKI^%+OHTz!iVW!bLqVK!e`zsREoeG1^jIhkB9(3W`Uu!WG(+&T?bN#Pk(NDo(#1$cAU3FfnZD=} zEgrYICMUJi=`E6~8!@wxtn7fz*Zw;(k1@3{raTzn=Oulr1pHJ0NpD*Un6s>R1Yv%k z8$7-GApV=;a6SeR;B=om7V1HZO8f-&dB!Gg;B5tL`g_Vy@0u0Y#36)H&x8Fk3XHZm%buP=U!;pDSIRDOIyoo2UhR|fjrX+oHb4;tTHAhB5fjTLqKR}L(d z_j%O-W-tD~``G7Ko_%LqznJtHvFB^mtPBTduZ@p#tIP>}Eb9~qLZwmU_8vWNg%bAe z0pK`e#6%UED7Qa~U4pU-eY7bZI1CgjI1xuN5#~{&+$okgrEK(Y4xy^djCIyavMYqK<~JwzECp#w49cT!UeL z7Vj9!(O&R1@hAtX2=?gGBO(F=E$$T- zuvA0N8S_ur)!AX*Vt^0o`=X@n)%VH3Nr=nnTp(v@)Xpsjab{B zU-%G+)pzxDK}#-;RKY~0c;?5@a>o@m&sn-nYmg6Is&OTXM(TRl&+)v>svbaV zW*uMd1eKI8X=`TWOYM~H-k-^eFL+h1JEd;IBKJ+isoGB@`6{eGIx*b1t(SsH*K)Eq ze$RY0YzTx0CA74Tre>a1jCKwPO5-3bLLA{#t<)s36P~o*_v4#mW?_RiPJ45gtqr`D z4Gz}cnJawow^8O5Ew5os!oSdH1~F%(lhK#$Fn_4W9y&>3Qnw|MQ9f`bL@4$yFR9=| z!-F#_a#YV9ysKZO+xptP?3prn*nne0+Zr_4>$Of$ut~6L)!n7bBAe*&@sAKQJ@36G z#Wp9mKfu1X+UrTXEF@@NCiC~=>T4jOl3ls$_-=sN=m*D=ahy{|75zwMMCA@GD|p~# zs_bH`*H%E4dYa(LiT2A$AiG&DQRYgR)QQOuy_S`Y7G4G@T zYZs{(q4Hr`UBNdcL04yJ&7jAD#(7d5rvn)$*Y)FyKsj&Hb0}&3o&XY{WBvse0tB6z zyDf{DFTJWI)kiRvVYLUzp72%m?qG+H%9ZoSuRDCLTQA}OOeq$W^>qY5_@9f%B}09h zOUnV~{n0~kx)B)V7RwF+i1cB9fOB!fgGSpES}PGPuwNV9!>UocKL36AWWneefL2p6 zgeTMEf3r}~s$-{}wtxcO!qX0(cZaj^gay|gmn|L7z+tBUT)r2Inl+Zxja9FbP5F(o zmIAs2J8msJcG`?dt*UoSa!egWrGICRkD~RGx5wI_Hqac;!(L5)$v}03BXx3}%Np?* zP#MhF{QlZ4N*bB7FVD{;@T(P{{HzUaovv1?{#>eh7z{sto%84MI;Ue@*QOV`^3NdJ z*4=iNMtPN66% zdK0-R`L-&qvrjkcGkf=iNw3ni4zt>ES(n#${&UXQ zrV#HdM`U78bq){B)c%;7E>!YOgWHy0KYsLR<9XaK8TomfG({MP;`PRg@&g)PnP1^}Yzi7_#_Lf76!gv~QTwpUdoW<)vh535tcrclFxwNZ@PHKD zkqEoc$k?%4lM>P0zYesYsv;p_gurdaveYt~oHjjm@FS!B5+*;=3M%_8$X&`jIA~hC zf>%BsTm?0_dcs= zjy^rgO=08#P!2>HXB`mEmc6o~3}BV{sv9MGgha#KY(0LQvo?BG^WZlw z^Fxko{oApk$OqJQ4GC}(a(bz1;15qdia{zl{8#6Kc^Ekb!-GA~Fd*L)DK300A#Kh9 zDf;IQC=KblWvoRj2Q9IK^|!^A3utlzWLP1dIxwAlJd7;>GM%?+7!_I%;xSe29o;Le z7EHpM3IG@aPD!YzuNFAi!aZiMITq-kLrQWWdt)|h7QEXA-p4m%1hl{*KkUjmzOpk; z;I5}Zam$7<6L zbuJLoA-=g?A!}H-%>VZZMvDe$SBM78aIrc^5@9YyoGJpm&(HSSNtFJb`-J4x`?sZ;I%S z{%L-p_~pRF?&~|Z2LCbDxyBGNu#BE8Oh!U=(3ND(zbSJfFp2xS2d$5hL^n3ErI?cT z8a>~~QqtKa$c`ttb8AV*7Lr_MN+YW?^vr9FP4|bER}hpl_43*ap}w$-z&c2lqfQAXv1!9q8D)3pA9~DubvQzqj|OlBCeG zGNLRlkJ6^XB&kGN)yIiKk_|FM~ z)^=$oe7cUot>?dDG3gGs%B0u(!9Y9FX9LQ0v9YN)j_bW4&LEn7r&dx2sCa0*doU`(AosBh@J~-SpP&Y-Hw-d9iZ}P5ok#U zm`QVWi$4IU28e$#Ks`WG2?!vv8}^j%lk!TpV*rILj|4!y#{JtUIjs8M%H4S1zYpr} zDurs&3f=<^4gT9!j2lT}cDfHmgZ?QN_YzKYt1vjf17=t`#Eg&TC8)q=b+D?(QYSXq z_M#+?>ebVgY59|k z=KOi?3+D7Xh1*|#_#``!xF1gG@)y$%S{t3^>Rp`2p~e`$$Oa2}pE5laa7bYh+O1V6 zG{@~PxndUV=JVGeyGie~Wx0>7k|9sQ0b_6wYO9iIdRzLvp`IrJ1F2I!9iHU2CGYZ3 zEPapPewXd%Ab!{ODesJ}utjb=co6TRU4`HCYpfkHuf)34pg@b_Bme5-NoZG%!v(GV z*o=~N*$>mIIG!2xzzMPF<3P`4-!hZh=e%EnXiuDqO$RCK50%j7>-4bB^FxPSH--jZ zNKJzO7e=`U>jq8g`Je--s!`BK%coIq=IQfpacSFvFVgjP=dgJ`YN!5?$!5HrizNvr zrTVNHee{*LX4X8xj$!|9dECtt$jkZF%-#s$x3aJuiJ6;G4`YuTyPYaq$-u1XQjO|l z2ia3=hJLB&r4EbV)h>Te*G>`n8(?K*Q^4R81lA6-O@ zN5%W|kGSVp1y_@;=8k7%f61|yVx|G(H`ruK_)JTpFrAf2NcI!D)a_R5_heu?M9}^@ z6WZdBUcD(RFYP2MZKk>$dmZ6z8lu!#bbmvp$lc3z$*Vx&_ji-?`ny{dTNNb2^Ty1# zS2VZA%Z=u#9fb76AKtkK26ZaE-Oszzl06ofc=g?tpIN%;(_9ETJnHb{O<_U7b+Oie zDD96JoIj*;cgU1uRS8D)MAx@4K2|SqZ6A zBpr@T#nA*_A0L}2&lCBoqK6%;)0+aN16c2B946Y6YVL^hF$TfLxtdnr(QsxK0%gf;spd$YVah&AYZ^oATyM#&31OIoq{#(Ga+667v+O$tv1XpuIe)~0EAy>md8UrpxKqc5 z*Qid8b?4iwY{B&!lGQVDXUwOun3}H&S;bJDXroLA&{u`@TGKr^)Ln4I3JLdMg!zh1qxUFl8c&&3|#c2 zg|3DN?+guEFBx3Ecup&6;1-7$*kTYyRZlM6WFL;}Bph^t2A= zl1q%mglL7oa3@zSL{snc7ep9sO(wWOk}Pa8%1*StST}qj!&|bn^^|&~w~9L_(O{Cdkee!o)0s?wE>ow1j2DhY*TgZMXhq6!_EQjCmuUlvAlsAoaSg1Z6KS+K z4IDXN?39^0UN?%3{WRH4e<9k6(ohBPn~eEqZ(+^Jd;e(psFP z=c?ysy7U0v*r0!7ZYEKHpKK?rRZQjaBy~P(B&GBP)qH$jf31f}?$TdDYkJKqiJj>6 zrZ{1@1iF=P-ec?6GLkEh<0b9P8C9KDJ?5oBL#e}kDSC5)h2Nc>x4~-kb@KZmH^%m6 zy8HReOOG@EmZ9HNnYxr4R(rP0c?TbsP@fnsQHIDeZ%2+a=8^BvKmFO2q75SSKwl}@!!#250ISg3)_zIo%$uspC;`eKc2bb zxHZ+V-CW-Nsdpl=mP7dW(nvmtLHFf*ME6_s&0WzJQkTE45goE7+^EyMALY5G^;v`F z!O}Z*sxI2v$^kxn1!q*xi~QNqZmF#biaBk_=W6Gv)2E5?y9z<3-!K)oVQF4Usn0%C6^fnn*$aa2kgXnfP&b-Y zCmDG+P zrxR>^l`jW1aK!nD9BF9$N!GBEpp4)wx&`tA00GUU0nB|}#7VW%@Cy?6=4$HOYoTWE z{-uZjiPqJR6g#aO4z%)LhvHriBl+y&=3lHRlTb6rR0*S;JeH4gi_@rSuzq#Y^KS`s z_63jWEqCp4BfRqE7`f%onWFCYgc4VU7#}wmYkro!kz75JgZLid>_2agKSP>BihJi& zGdS*Bjoq)hC+LGRP8SWJ2VlxDB?MR{B8mjLbQ7TcnaQNoWbh2#?)6|~bUZ&Qj=}Ac z8yc|EDd!ZQE@HnOewzz0K-@hA4R@5SABk^QRIIxyMw`DQy#KLnyPStgm2e&c4i zUghMeggAW<9Q5LKqf^F!GJ@9#0ECT)$zpKX@qRhZtrrvuJ}nchWQhI}AGC9G)M-s` ztp8&iOikk(^xc6nuV?gI(|)kRRVYT_Y3I7J;q2=4uE5g@7RmcS3QcG}WWBAQ27DR( z;XqEvU<6xY=-kO&cy#XFxSU+BHwh%h*{yWr0y@8!)}x(jKx7Dx$wfK$_Wa*tr^_8b zvpkRQQW@W6)*Km$RMzSKeDnZ< zF26}K0w1pm%+R<>1QL4#082$xXtITWisB)Xm#(QLBqYg?Ojt*;Es_ZC~csyKEXQPgRZ(vhlnl94ETDa8n0?Y{Cp*vPyT-L;jp+vch?< zA6=G2T+Mu;cWL|GzpE@4&+~+vy`7?V70Jg`P3V86F+|j5P+&Opq10lLCb+{f*muiEMse3>q{?tr=MBT zNWcFn^6~0wFE_jQFm^vSXGR3Acq1^Lfe>AqDf{$%w1u;lSqRfo$Saz z7)gUu5hskww&Ty(wNMMYW)X)%ZfZTnu4TlxK8@Et2A@cL46+JXZa=n4=~#M`Vmf1@ zCm#K0RMD>E=#q=%!|j{phx4YWErH$+iyYExr#`dsKiQ}47w7J*!O~Z+aPhuxHoLc) zx?f*waU6W)In+HsTH(KEC$K{Xm7)fyeV2#D?iu|2H+ZCsPeJlF@ipU8sP!H7;A^!SZ*ez^-_)%>uR$>)ey&jJmh>kUJGiZjII99eph8in`H6&{2)m_) zmIk*ef;k^^W|+G{kHz}Cbls`uz9)CuI{C%q4OT4@uLkXoWlQi#qXsUG(REM8yOcIR z-aM{5YZ>~3;YanjF@2gIGHc)I3hl=sCx~T{69@n3xoL3uYs#0@2|kr=aw=+M_%V4O z8p>k?t=}5oe1oy%{IDU9xgp5Eg^}7Xx3D+bPo?jM$OdDOTS|be6kU?`2|QhrTUgak&N~AObPY90EO6KF~4+w099nc!2CDdw?qn4$|-i(+}Xy8%MZRxgy~D?hXemP%{VhO75J zyzfWsdK5LW)W)gRlKITc;NxBB;FaIR)r58sPtU1A3WidT<*DSGrxl(RZzXa=71!%PSqv+9`?G&hI z^sl0fw09Uy?G&)sQg_Y`8^551q7u=yjpYZl>$=h=h6H9#o9Bihn;f?OTJiWRko|l3 zU=SCV9jRbd*b;w1iu3JeSJ5i+o(d@R|4>|0u!*1TKZi!gmoRIFdA%>fDBlgy98xI` z7Zy|wvn1ROUMfx3IHv zm0M=jv)>&sm$Es2PH@m_mu(J=mkV1INxQeP)+6hIY5nT34c!>kEt=GYpb|DX!7n&b ztC_nKEnCOV?-bv(1UNUv7wuRZO^~I)1JI2{wX{|25)f~~E5g;&XNy|>a~{0bY_H1xtge#M{)OTS zp4Db7qn0$7w%4;myVd4RaYAiX^2k~e6KGq^o5DnARwmwYz);}|C8Nj)iCJXt zifUX5_>>9|HV^k8fjx^C;)y1(cV{VlIo1BeL|3M`++MYEzQJa=)^_#ITx$?F&8CdI zx*CX^)sZ8Lo3D;#x5c$<)|47>U_7v2J7}k-KzS%6v3&^6hLhX`c8x1GjjzzA+9sqB z+FKq^4(8zDl1Z>i6O9X8bsn;hvXjoJ{R zE_2@!iS%3@my0&NjA9W#%{zN-U;+m6L5dbo`5OLC;;UrfMnLp`YunwizI79-B}zt4 z%p=aWEG8{u{zR^=daW$i!=@WuZs`HtEzqGj zy?GV!XpkIIklp351v2~+%nO5>cXw%TA&ft7w7kt(liOhadjva42BJagUr@BTL9+bj z=sXAPil^vobKE$yJibeujJX7TP?@HNf+IqBnFjD19dOEpxVBV&pC|LR-HDnXhWiWmNQm8vK$52ww2M zW{=$?iUrgz&0q>})k4kFiiUNZ#Px}NWkCAw_Jv@X9zY5JBJrG!AQXL(YBlpLJgn?^wYdT|+~Oh!AWW1P2Ou2P&QX_-)ya z3QciA>1%Gm3?nZ37t-8VzL1&zE_Taz9E9EfJ3&nMX0WQG;jPQ>Tk_8_Z>BM+%UL5j z82PD3%mKC&RYy3L(AH1xS$65i`t8c65uWUYMq{fz!Y*yI(7_YX3)PR}zj1kU)%f$bD`CD;tE|}y^2-SYIWO0xe*Ni{kzThZeZRm6O-fBW}*&C6m z*d3y~2)}e(QkE`P3W+>S+)p3A2=dk(|4lespQYbkT?@}4i75R#zsdjY!L-L{L+)z5 zzqZ5@XLbe*FD0&^%(Ow4u|RG|gKC5}F(Cp&lof!MN*sba9x|*C^lhGQcW|AS%5Pli zRPI*%Uv!;ySQPHQ?L`m}WdP|IY7h}5L~>{tLXi#;Nd*Oz9#Xn%ZS1P3iM6vCn%r|(+RZ$YzU4J*6y}fD@ zPqB%Wa{U2$r-9V(vF|nh{2*LQXIFNQ)GMZx`8mKamlT7Z$)A>$pF2}b`snLKNN)S< zt?*c_-n(6guZr!X5&=vM#KAAJ`b1GW_%qG>0}o}@R=MH_qQ4288hjcfYo?b;;TDp& zGD)fT`1eLbO#La+?BTN@JDp%pYSE4Cd{h#I1AD}X&+VM0ZX2f#cvr!`r zrgtoo#u7GiH;GUDKh=g9w5onEC@{WvU>2@r@7V4dAUVj867WdpK9ao^;*LPGptZrNa@(X)}M>B9?EsU5tXFtx-}|jNxcJ z^&hvZ42x-_p=tyif_X4tu-^MPz{Dba!$Tw+y{LHHb@Vq@DRjzF8FODa!M&Vq*ZoK) zySXUWyUDjFP;HZ`j;d6I&9C_;YJ*}c%>_Eb`=mvy#NwqUGET#ru~31D)9{{Uyhc(& z>kI3WhhNrsglB1$P$Av)qAa<-3)s5i!C0J%7cTx6@qMNvb-d%Qn%lk+BAZvxQ&DX+ zfmrfb{Ts5;lHxm_#)eNV<>l+>^k!!B>*pKa-qnsbm;9x@?r_KPIr?nbME^qTxKq>9 z#~`3tN50Wl4-qInaFiQ5!F3hwLC9=f6_$}_ntnO&MkmT*DKk&Aq1`O(@P(2bJ(D>z zFtqEGEu7bPUN}8Vk-#-x;eG82pfP`+j$T-X);A=s4C=7U_fIM%x52Z@>OC&wePvFI zk>}SAB3~nrDn|#CdvnFVvP(?wbC%^!M9y@NobKVK@duwUIil`3hNVrGPO?+a3VxX7 zXxW)7>im$|#6KdjMLU{}*xgy#8?(=e{Mg<;D|c~Z@^!#N?9H5LBCBH5@KN;Q zDv`~=8Fj@4 zLQ|107i82XN_k=}bHsG|T#15#lX~ksovpw2jz=7uGwqQawQ=@L_6%VX(tUqZZR6Wr zz7%dS@JEC`+I*KXP`BICSTG=Uj9(4nw4M@suY||CQ2&^7vel6{Ua+`nOHYN{lSM7` z@B0NwbpDcOZT0pPt+&bWhdp3?gisAC2tou{)o=bjn4E{xshjA{iC1iXKBI*c)y$be z-p$A^-9ihiM(VBbAHk$wr+Ww8{pBbda9Hn7qaT57*Zycw&R3kzoq82*`y_m4UguQ5_&hkB%4Qi9#!HQArgC<^9 z(qEtq@q~B7HM~ql-bKOIjzj!jM>n~`%Vh#0*b_8Ix|@4DvJTDCyxCmfBnb zh@L(D^T%bR*8Tw(yO{Y9;MeIus?PB&HC3p|mL7CROz=$|3n^S!cnyv*owi$9$J24w31F zCA|baV+|c+;>sIkiy&v9HVYF~-MAaayHQVese?C58${50{Gv!#>dCu-45sz~)zCQ3 zoN0+3j4zS6Li6R3^}6Tvs%JVqtO>~v6Ktx#}w z?qj-seHEWg%MFkh7wt=W9OI9J6M&l5$uO0Zwt94^u2oAyC!j?BEr0_~*3AYXp;e4E|aC}7_=vuL4p>kU}MA8NwLicNZkom zJ(Sa~wVk^$R^1(D&`Am0I)k0dat?Gc=VQ~=J123M%Ds{bb~i2@m1R5Qx1u(06E!As zb%}&UUJRBE$h2lX{8a4GYx5(DD1Yb+gFv>`ox>Z zeCVH-^%y+S7Kca*B5Os4xcq0kamBK`dFGj470q{B$@zqu#bZ}AS@}mQNF4MFt}*L` zai*lu5FagAZWf$NIpw#pgKKubjeb2N593NSRb)BN*B3PbxFf|+GiS%vwP#$KA0E9b zjyIiYj!2SER?mhvzRY5ewU+3F5Es*o2c6GoO* z(zp1<+wdH!a%2q%!CcS+-E`n?G}@f9p`aJNt1rf6-d-%W=0r zuYI-3u1u^%0m6ELj>$c$PWkNfLVqM{`|l`bh34ZSi;1~{u`u25(%J0GDuTXc!pWw1In#;U^aS z*#``Av%=T~bhq8a8Kz{0C%>GObsd!tyqk48FnCM-iNB2lNO-g47jOGB-~Mavp@+bW z^rQ+|x-r4x(-Cz$#eYr!o9sS(VYB`mAB1)1B0t@BP(A!&8W;+D1T0UtUg+6;3z$2s zm(=syv?dGZ79u-RdX~q$X>FFUvBPp8=_8%c zf)#TBUgfJ0-1p^3QsT{7#`tNL5?1Ma!GjM6I0_?G=OMv3Dta};RA=LS0?gPZIudOK zp<__pv0v6kwH$C9@syxADa}p@bQEi%^y3NFWcfS6?WyYcW0*fVQ3k!{>}_4gx$~Q| z4~|8M*9&-Ba+^FOi#BXpjZwI*$3Kf+DgLJHO)p*it~zF=FT{TPh`*@$3ioWE$n*JX ziamX@LmdSm{W^SK&&E(^KOFo{mINNn4l9YsE0@*bljdvpv-aKM1?2PMcvq!gM58&D zUw)1>drHsr|5Ui1*u`&yFQs0j=B z-%9HSahC%Y8Zp;TB+%s3Cs1^{-h|V7B8oSD}a|DD*kw49$}Afz+`9iEl}yr z{l91mgxmD$=;jS#`lziv7(8r`XSimqi)J*i0@Ef^d=?h%^@LWeNKec-_Q8*@lBUh^1T z+iWRF-l~iLLrxgJ<(U|8(Vk}dz}L6lX)rDsFG=dLDwt00pc{Eh^qjq_tnF9Ckd`6a z=ca>QHfG$G-KR-~e}T zN?7X9h3%)UY3*B^zCY(2XQJFYSy#3j6wduvO;z@yyAzL#B}xnR`jeSAFG=6-9zqU1 zX2>QxPDm`8KY-+;R^i>C6qyW|>+sc)C$4zAty|IiBK)?g8I_nL26a6IIQ?x-J)Qmi5j-nPTNf;L$Hn*`?86A^S<}dr?_IFdZiW5z- z39jFXL|`d@=hr!Gx_lF;;MzPDG7bk%Swq~tFT(LT|GLmUP(*swJh!p!kmg!z7?`*0 zh^oLz#21r4*7yWC!%U2>J{`Uwpr%emO%ibq)U^4Qg}_Ap@{M(1M;1H9BTEi!8akUCHzy z30>dk6^$kf!9I=QmA>5fV_c$*xgiVs_*ip75|HyJ)HZe6<9z$9CrhVy6 z7*9$Rsf9lFWJn1a8-39yuZ$9RIUN&y3A`oY{cV=pjr4d9_r$dxgtndyRk;rp-J$j80h}^OuYi2U#n^AqGiL_UTVRjzbK^+MB;ilAGl89AB4)Gs)fNb zv^!_7`Y#r-Ky#+ZMGyqA?X75AZBrOVv06ze=_6@RX7i0FY$Hf9^K)_zL5E(`HSs`f zaI%<~4@K$N+EFkBZFQ-Z1L_<3Z3u&4Ub~D*Cuh-~O=h41i5UX=7xo zVV3)G+aeHzm>9*B3yrBae_Uj@VwXcHWPR)X>&+B=e>|PA&+$6m(UIKF|CvEyuTn^S z@mkX4DW^p_qbp}pNbUzkn%IZB_m>s1ip;s$m+oXZpKFg9jZ*MU2j2w2t8N6ArD1i5 zfWLUkNnyr(@@EZ?a%Da@+5Ick`QKi%y|oBw%75UBXwbso!$g|4O6Gr&X~4R$+0%ru z1E7?cxfVl0pE|aUzA_n``?78Zp<&J`Q`Ye^dgIG=ocG#q^-TXcJIdhXZ6~3{;;H$p zX2+xUj6IGn>!%#+gt31>_{<(jEmkgI*8P2OnLV##FY07^m_JgHnA?p(D2X7=R5VzX zQwygAG4pQ%*M(TT>arw)@7#H^Os1K~>AZQX%M?1(R8I7Ti-Qa6^+{wX2d#9Q_xb2| zVIM%Eyp7HUWg7UwDx;03>$al;NjEGK>t6O@v5y>k&7@3bB?d&a(n%P^FuLPL!BcW# zTE@lQwAaKxC|bB6rjA!lYYUIkvN|RFPle>e_|J2(|)ve6ayd6awOERh!o~N1Hb`xuuU2D+VBoK5-(~hPA6)sYNh!cKD zR>n;Ki|LYrbejnSwM8H$?KBHcACECe_0eR|Z%kDaWuGJpzh~uqS?2e%;H69?M4^1? z=OW-|-LWK}ZzZ{hVf65$UaMA@CcRA>SfubPM67dN4cj@TdlV1$1Uhbj(dE2Y=kuiV=}n&pznS}R1^TFF>} z;m7NCgyncwcx@I1UUf`vZo!~Y?c<&*l8(pk7FiG@AzrOV3b~Kqit2Kve9=C_6yIsmR#I5M?rytWKdZ@{ zkth3=w&^gw26-O7hYz|Vy9o6^B~4Y+V2p3$6W!{8v<3qK9`ywuLshGqF@9s?s+$$u zzPxG%Z^|eVj`Td<3OBY`Un8@Zb9OGn?tHxzTKnS2S{7ksJ^^5**9-r0G1nQV&)t(JCD7O|So;E{@{o2D} zflndT^%V{C`jC24MFDguZ05a$NX?d}Mu9seeQNAxm47_2u2MM;cKuMn5K_APus3$G5|ivJ}ksON~4Y(FV{*yE|(BOJZ?JG#eu^&cH2MIQOhR9=tG1n+te zdAm@9PxdK<*b^lpr1j6SE6G-(;}xc=fy(o8<3XUM`@Ssmax?p$XGTmJ=VCop8*!7{ zpiMLOYVg9@&kRVD%K0gWOCw-{Fj1hDZjYh$f(bJU(Y&l}RhP*Mn&U|o4=n2NHiPi@ zS}_O5A!7J^t9E!do{Y>Q78iEEaF&=}8nf z*>eW@52x514fMFLYzF2%G)^9j-E3+6J|@hTk-p(E&&JwO3Ui%XA+3a*YKjGR1WpFX zSyF10ySqQ?Z_w?JqMRn09-C`o<>WV^VQy+-jP_j)m|&0xNO(!Rxg++PqQKyT!AdcL z+&KlT?3&5aj}HoG8@wqBb)+@kDy1{Q0@bpul5=ehNb_E%<7!C}@x`;Kh|=kY2gwRY^^7SO48lV`QMac%|2zH9eJR?YHIjc+<3 z7nIkFo#D&PVTM&79u^ule}&yNNu42pyh}A?@qR#$v;O4UEj$(imOv%b<*bT@RgvGe zy~jt7*IeA6V|c{{I3C15ihkerFZtJPH6dH=n5JApDZ8v{MwXjKBd7V9NohQxAD*LVd znkve8rS$HwT-8dzoo@j*eE>izGpF&96r)gu2~Um-2(ibK=)ABwf3rk}A!!eFjm+(6j&K7_WIpY+#Uo<3 zP!#l;Z9dZMLe2@0EfIzDDbCMLML`hA_ugIuuAxx zQRzZOA-=7Ib=cwH0`|~i&{-1vTSu?(k2gmIpC7+tCG_1}8%m_%w?gpey~t*dRn5ot z?+HJYgmV;)8Cy7N7UXEW?C_Vr+l2}VYMK*3$leIL^UmN7M#d$hcfY~PWJ1j3eUEk;qV7uiA@C_^bYGW% zB3o~djx43pqgzhr1lFEm@UMNkj7E9!v}s<8j;Q4G-pB7^NGM#awvOsAwF`30kDUhXv7nX_r@iX0y~$gMQfJ4#qC`x19mYnYtTx9peL97 zI&xz=DqZ+l+#{(mJ$J(F6lofE)QXV4yuI1Pd=Ny0A1z}G{Nj7|#b;0p%l&tS0VzfByu(#Y}h%VeQDlXJZeze3Gb>WyYdbQbo&p;Fr*uz zpEFHK|468;8v=ui0vC87he*X9KzWfpaOwzTSZ1mesaV=0_#|WJTn3EgHhv(P6}$yJ znm5rh`m4-5ylS7uyM*nqhuX+#Bf|)~`}(5y>R6E7X*X?y6^t3 z&0Sdy4g0cmL$85W8Y>rFFdf7ah>!Pm)%;i3_z_uySTC)K$nz|u=8=0Y(37FbNon{%!(lccLzdpPj>$$3m)Kp@c>kNs3*gpH9#Bs)6^!0Tw*f} ztm${$o>aX^GF#69Ob4ul1DMq&;Fo> zw~61+$@jN69a;DnI>{yEJ}AmNCgh-!;dwGWz(3{Ps!5+ZI*VQNV!8gNZ|%7z!_V0Y za~P+7m0y-piZnJJB=v3I!P+mUnY8AsBk{V!XvK9}!)q}7WJLo+D^P84eg&EeIE=ci zVj?^a-_)ILykBd@yOy$UG6(^FlztNHAp8SFrznh1uXesl*yqhx)XzP%&@`M*&T5U{ z76i4%vVp-HL7^xGM>hJ?FNIS;_MxByesCYO%-9Z~`*Mmf3hT|2WELNo;;{sdY}%0Q zrv)>^ryW^D?`qoscj7B><41#pZO~REoc`WKntx}?x^~QW_Ipsl@`}M4W|>G+8bd2a z|B-hwv|qoJjQ-Fg`NB&gZ)24(*JK&!Q?TI z4_wZU&70GHCok+3B8Up21%J|jOBX{)!Rf)y2utFO5a4INGJQV<&8mk@luTV|7C zp%G3SgWjxRwDOxji!-r--chPD%J1w`a}`iW=f3YH4g-apH=Jq})q1dv>aAXpGPF4D z<0-iHdJ*h!Oe=1jA{2=!QY?Rags2f)dda>za1YTEpHe#+?kD#n$ItQJ%CXA zF7-2DC(i(lqr*%4G3F~3iG0>S_2QT~;)^mI^2df(!e5zn9w%2g4T!fHroe{U35XmX z(JOxnJlB<$hSBcWyDzLtrN4I&(&;vC`C~Uf00T=&BSqgISu%HHd%V$CyI5ao`0ev- zarbjok8-y>Ez*u3`D)~3?5oVnTId`=Nsdb2l0*hFvGImTT6aF@&{~>gt{I!Guu^Jt zJx(Qo1^iCsA&@WDtql?QFoXw0o4;m z$;QZs8m#$?G@WBkvrX>8O0T}28A!7H__*6dOA7r_<*X@2h5ysz}~BawT+FaZhP>5_8fk+Z`wm6rytCIHn$=pYeo?GbA? ztiX`W@glJ{5t92}#1CPC5PFR*DX_PjsbKX@L*qV7Fsy}#7aOB_cc)pXp|@c72c$e@ zOAVU47AemTw-W-$+TmMM_(#B^_wUceCb)@x$XcoPjO}lD7*TaAKE=)M&$iy*;NP4H zz+Q#;Ysi{4C)%V?Qi4NL4bY%-Qy5uCKHcsP){C;-bL9P?$VvKgzN-9aolJ`wwSBTWYP^1IaTjT_g8 zPZ3g@n1f}-8pb`$Vka^qfP^NjEAX^ISEWzxH`kie?LyKyjdwI83|)U$1v5|NyS{PJW+<6kphzC%ycO!2=8hs21Bb3DI17 z(CPL0&+Byi0eE0Pe6g3!K}*sOKWa@VhU@ffY&|-ZJMEQKtv{%Id+J)eRevEIQAa5g za5oKbnlS~5{SN(v(<$Q`|%Xj;Tx6$hz}jD!>>r)ER(q-2xi+x$duJ2z-Hw zJS)a_w=vNaw786-f+2>E2s8}fVsi5v4z&Wjzz!qNB05c`UEKjzBtQ^L(r}Dy*HBlA zSXJGGkrs^%Qk4QLjZSNnk`+~=_ebT~kG6-roTAVD@MhytTP>v-==84tbxpA8B0pF- z3A!)#ZsxXT?Hy=HaWyB0{^QK2mX$Y4nUM7GJoy8%5eG`|NTIGRwc??W7mtEIcomem zKVnC_Obzsy=&C4^COQR4aEA$h#dT$8h9!P}))SV43*!1rK;I;cqDmloN?5_ff5E49 zb4W`HxyHO`g(I(_K*%o}lJ_=}He^NAQm*gdvl@`$bASxb_wo4dWaUQ@sF=qmaV7`- zy!WTWq#3$$dv^)%U%P?Yp92Q9 zN}ACe58pH3IkUGI%_6Vd9Yk%4A8t&rl@P#o5%$M$<)5z0JJZyEV4(RiyGO{Ds?0@1M~Av_GQoBJt1> z2X*I+NI-+?Trc>JVvx6-tV)}#d~aWf%F{xc085q7f(%GInS7$B9(ua_+BY83`tuqw z@R1isM>y5g8~^Uk;_^^&_E7&LVcsJVlNf3&o4a+}bhML>A|9T|FlIO(-6)tX^xw$8 zxK;Z=l+5f+WB3;&$$|NB_E4%6_Maz!rBv-=bG{JDm361-%mI#Pp09&UN|VJSK&3d+ zB=2-VstG5+L*^Z(y7{vCaMSSe^G?&ImFv!*)trk!26Y1iQe}4mw{aVQm@az&M|}9_ zF7C~zC0c1GvnXc^jQjX{+1ekdDwYzY0HHrkyq$4dH3fTp{Cha9;@>ENc;iR=!`n@Q zdD=w?)1FTN>d?^zia&f7Nhs*8OakNRi4sBWkO<&g-yu>#^1%ILi+%xyk{qBTy&e7KkBLDbk?v=Z4@}Jz~+KR zYB#M(PK7{@KFaU;2Y9XBHvF)}A{aK;#U>aj(C>-D$6N&a#^E0k_Y8$p>Aai}7Mn}e8fONWhkjG&3YV)(a88%Q zG{n-N_JYiwD>bYU4*J;_gN^Jv0$H*onIR``Va@h=S>s8>TV2Og0r}xL@c6N(d2E4G zI8_n=p`=Hv@Zc90&5}YA@QRjh3|YsodR5a`k1HMB&ugmS~*{u4Vra-Hv^|y7$pFt2{vpD^gbz|&K!*Auf-KLN0t%39zohfcU z&qOt%4cwORyKZh*=WI#LrW5_}R3PVgfzIR4Q!A{i%ngR0fK`!6|40mX_}lFmv#aS_ z6&}BEC+puykkgAlOQ9RX7NP4)yYk(Ww{$l6w*2{9U^`XoP$8B9sxaya7 z$fUd^TNhstbwhQu&wFd3o+oA9KMy3;?4(v)LXJ#kcODz3tZ4)_?46r5t_ddw|E%6F zq_&9tHrB%%u_!e~&SY2Aa5;`%B2aNecuLV>v$Ydi?Z;jK&<+Wrv|Xe4b?-j8Px zXrH&{hZt5RlVCyQ8nJl)@%H3_({pkMhEMLrGRsg==5UDc(T49xG@|$m|>pWCV zm&x+UD7mSoi8p9oaG|LU&-3*UjHL(HzcQJzEt@H35royLkn9gxWXa(m3$w_M*XbVY zFFM4Zax_ZHd3uN9p%W;NIWGq-NuF8Q=f|es$Z5f=XshS*XMz)T9V>)7wu2p+0@DMV z6qu+UxH5Jch|>pAjof`IE5Gu;9&=rf;1Ffb_UJ{T*lbMec$tRMe;;!%5%OjX9j(8# zi_kDG5wGyDl41N8LJA0f3I_4Ar2GE*ofXi--{HM2ZY5a(=(H&6jReURg>b4!+VF}s z;wYeM%uMpj=3x@{d&wt(^=TXtsSlAD>40p(9rexcmw6; zKcmI#5uT^miV zf4Cx&x!!ovqn61Yo15lJRF7Pje)AobeqS+u_}EfpeX#XravMQs4vOJ=|GjFlnmjTm z7k*n7JBnnZCtSh9Ypp@bMh7z=#H`;kEtZ0LM`|#Dc_TmK?iTs8Kwr@}5VHKf%5~I2 zvoIuhD~BZt8Y|q^RyaFP!AYs338uK-ByHq$v3qnbUwiW(R^7s=Ra-&nGvtAq^bQ_bMg;CgI+iOGGk_FlE`_(L3k-Is`s&-l1bz#3V7eEL#v$Z*|3brIto9nVW2S?0@b<~Zn zqo(mQ^FripdX=+s?9zp5IDOu*%3C2G=1sFfIEDB$ULxn}=%2jhBc$d{fb|0YX#1>2 zEQd$$tw}@oKc+DF*VK}=ppfX$B?W{76$T3&0D+0sWs6v3Y@K9w*lN%=Y+Y9c%|H?c z4@BU+xL~*z8Q{M*Z7HCa171|OKW7sDho^j4It?hKYe;Qdu$u5fV(nY)^fRU@*qe_8 zSVJ}jH8?~-8~2OpgVlttAnY{IB=<-tYjo7p@`_bpC_kx%{sPX>nR=B4NR z?F;54Qvyy*(Smv=hWj20pb~5l^-b<3?D2FRt(pZpOdxE;Xx(`7`PZ)SEIFH+N+I-G zxPoI2X!SJ=UE$QKzfVXD2VnY_^8TF!>q5zc9UV-RJmkGM8VV=8T}C4uG>3g$)?*w? zRaFkvPUKl%`yO&d+jY+Sc}JxqYxa1yC#nOri(e^{K2XV`DAdLD6gNo=&N`Fo zyL7}pf^*d_sn;Er^{j<(6lWM-uZ$hmOX$hIFmxi+w{NdJ?A*Lk0QJ#O{p(fQwSZSv z=r7jI{=mM>jT|%F%{;$yln~QiFn7(=e3+-5k9)K=`~^S(+kNW;A>@OP9j<@x&r7Tx zmH5r&3U^Z9E`0OV@R7~ei{Ia)+s(_TZEkhihM!FM@o8@)(F?10VH;A7Zv$-SXs*_m zJnf$y-D(-4c~kRpNQGB^fl>5X0~FlvZ0CdS`YMnZjJeZW%OAZc`sUxIW}m6m6PM(O z@3}2*KE&dX^xN!D8F_`rrK|D-f2or~QD`jsLxw&Tf;2b*0ZKAWnke|5vhEKzvLu_HH z)rE4xTOl0k&8J=OxVs>wcem{IAo-L*t=)Y^tPBf_;Vg3Z)2s6&gY^(YgQwTO!>xy0 z-lfRA6EO#EW&IVeWqe)qQ)H?KL{{m}W=@*qo6j$`kibkbWIrIL$l!5`B|qh`epgK( z0bDLshO-7r(6C2B&!M2or(m2I(BoTMqi9jnpnPXNH*6#*A?vtMDnvAjPGe+s-0$|{ z%d@*`ok5@6EGH}8nn$o@^IA766*nB-6VpKBNFhb9y3!A+LLvH*z|!Zvu%wMVEo*Pj zs2p0)=WALT1RJDx4yt4i)Skrm|1b~TQphS%*TJMsshNOF2vrol95U#TxZAe+fhZ14 z#7aKOPOuD!CB8)yY1u(rK|?Rs*C4@rCFnQ12H{aOVe5axyL48Ea*nOWJ4`V4iVB{a z?F5AO1M@SaVK!kVfx%lmQ%7L(go%2X2U-NLJO0J3N45$`UjjGQzdwdTV+r7&6`tR$h7V?HjLPGlA^~na&wMi1~#+u&ll$BR2lkt;> z;opeEgP*mM6SY{@ce!5GvG#p3|H#{ZIOV{^&(ioQG9dyHfcl9~d2fulJ%uuDUe<~a zlbd5wOHN~j14??Uf9K?mEJor(Na*fn;4%yOtz5j==H!t@$cJi=q`5!%ENCqNXxd*; z`*R=F_2%aL>wT~z2Fspz9fWP>68=FjGkWVXdF<-&I4?}gTjAW?VL&+0u?rzYoOaQp zpvEcV;cyNhQfQ{V=69|8W{(_$o(!fG)uKGE!Hl!GsK$q@`H;76YpCa?+r>eA{4h6H zZ4j3)PVIu-*=VuH)8;=%%ZIP_zHqDwHaGfQ+4NbjHGQy11u9oCyaT2zP8>RQ#5Eb-W+IB}6vo?0{IjyXpACSCweM zv>x6wZWr1n9wIE%D~G0`?0=fd-&mCJJB$0rorN7j%g^xNc{_I8sXP0 zATJsGwjXq|=@}wzLfC;+PX^pm!811LLT~#_fNu|k`QH-MGTGqZ-2wJ_9lM;(#5++D z3@Yhz;Tf&Ixcikebq*XBs>KG5nEfCM=-)U55)dvv2QmtU?F~xHkjB&B0}Bm|lR4mY zf3QlUgk!bzTqPZ+2khFP5y6C!$j(N!)`O2Xhp;O}?l$cKt8HueR%9KtdlDM717=^8 z^Pp_*DJW;v60HMu6R1}OBMKpesp*2pFKg94&2`bY8s7@!5F{fGe?)uBo*`sw4-Hqx zs1^J#H`9#MrGu&3zqLb>>PW9O28SJq8_Nv-yppse6nA_$s6rMr@gbB>hPD%+UHZ$G z5e7@j2qEuBai_{Mo7#vZZH%NeO2=x|^xR^D5#44P7!bUs?G(;TF~AzXEUxX{7&Z^zHIQF`SZCLdHSWGEsD8TN$YW z6lwRp;BWHY5{Nbgqw~L-NRl!7wF|048IZ_V7ptL>dh~5~rz)V>; zD-MWVGmBlUy+NLconZQ^r=NM&99z_FY~-&Ead2h#6xBp?T`!X(I{*f?xajo$<;Js) zVo%xT>vrq)xG5|2Q08oE)%FzTQ?AD7vEbkpHr`01=cnEG$wki$VxmZyMs9ag@47Ih zMctOS!ME@gw-YC%%DK7m`)-Q7g3?VF;W7x+#UJVuHBoQ+TE%y*Xe)7^@IS!^Gq7m+|dP@@=$;{JMM(fUW78jTxKV!xU zjJ&UMmW+H1iFW=f&D&s3p|SsCG!!^Mj=;(w7}F&O>}&+ zIi}+7A%L!)af<mOrCCOZyLsG$5HD4fk9Hg^I<0q>q|{4q>pxSR_- zoSMBv?PyLtR4c=2d{OlX1RUex71k=)xOY<@zxlo=&cwFT``8Xff${!KS4AI)uyYQY z2J_Bzp$`ba#z-KESST^i3!4K)r@F}JD{PtIRg3A0g4IBjPOS8EwesUMiVn!oBk zKX7IMVnQDlrj;=^HkMxP6JU?&RV)o^B!3S9fpONnsa9t+j74O}h#APLfk8yC%E1%N zsy5uT)3SQz9w|7074SjYDr^p4&X!EZCTJQz%{0*C>x?!#TznH8W&af7uKQT}o&867 z=>Jvqmx54WB*0ZkAS?E$bk7$1m1Nl=ZI!z@-d?Z!M-F2~YtX8{C#q&brZ1Rs`rhPwx>EOxB z*UkXL7{Dc7WZFsnm&-R;eLZ_^H}W$aOv{MG4;f?Gk7(`)x@9@;{!9*lP(LI6uA(|N zfCLWWs^FhNw;{A0puXtC=R7to@*ctrSv!T#c@GI-LN$f`t>1!4&8Npp zL79;a4Nt1XSh^J1vtN^yxoz?cU}J2Ogm5kp7jpaMzDU@!AQm#$Sk8)n{z zPg~B7B{Q%sijD+`CBdOY^2nN?rNJnA*3Z})f^CaM3XbCnLJI%kuOCEp^o`srf3H`FxdwNdO*q=nAYBD-q*&D`2d1 zsGX9o4kG{DW@%LEBiUIBuh%>b;D< zt3$STJMQo&Y7O)CAIlY{Qv&eRqIqeD%U9jTt?wW6N+%~3bivb(GPz+O_c4W<`{`GjIxEQ#E79+e{Lea z`E&3zEC)DCSXb+6<>}W`Ea#tRzX^nlR9iC?8GE%Z$%aw@TYkPUuNU6*FDzI8a5>cv z(gY7P^6fhOIJoU@O)YHKy0cpN#tZ*m5u$RMDfrUzC;}&d1fL?&>0F8SX8W=5C9?=! zDbt#}xfB{Gr~7XRDu0R3pn%7?3k5Z*H~s=ml2wXu*c`(w+z%CwPIKCw(QNi`cfA}{ z7tOg?WVi}yI*To4kp%?-!Jx?x?`Q4YreSu<<<`5yK#42wou*YQ_FyyA$qzdO4kgp> zSc+T(jZ(}5rIk&*DC$V+uX8*a+;;69F*I@?6mamK{GyRW-KQBWyNTFaHK9w_nM z2HssdWl-z&Kb}X~_ekhYAM1`_Z)UW?mS?^H>p3L!5{>=Mw2e|qEm{T)z*yj*?v9L} zAP9xv^SjC1Ru!HT>A}JEsHtQy!SsOcq;}uMe~SZ)MLtI1bqzCYB%YBKUNF9g0F$zP zT{;6Xl#UXlsI&F8jM-C8seJI{k$IrpTJ~m)%R!9(!QCLwJn^#8r6=Ah=fr6ap*a6q z6t9ps92j0=jCUS(K`7|YXgZE`Y}SaO>uSw0(XxD=^KPI+ z71EdRBqnPhu_)__*~&CXNp6sUItQ14^*gQr$xG@tkQXyI5 zfyifK+M)lTsp$g@Rx}rs9`TUWO?gy~Hy9!wv0>0D7Jdd(81VNl1*M{cHT#P>mQ#+J z<#L=M2ziIR{<3y_B1(BT#$`=CJ+H{My&Q{{sywaC4IuY}s*Y3o^^sE8(dJlu@yENo z+y)H^voAQ{Wi~?ydHfevJfH)zX2{ES8Q~_5SC&y`cXI#y?d#4M+eD+d?tK7wsWjvS zrZpRw=-~Iq<``pUxa-NhtD<$BtMATPkN#nK%0*by@n|td_+4^eM;8o3@)Z1e=p4uX z9>`|60(Yr=4o@Y|rezA(rKrI5BlS%DjxF2m)R~Tww05D-Zze~~H_j4e%in#;A$$<> z#__q`xGS9%PG@fpBoK7c(<$TNQPKjTW5WiZZQt@L;YQNMJiuf;C9Ng?lkhmbmgnQF zhkK5FHfm%1i+Ujq|5`eccxv^GGmI5ZpWaFDGC~T~b|iZU=k0>*IXyKbe;OgAhLBIl z*dmq&Mv97q8o_7pmS4j}^yXVZ%eB0tNhCn*;E?_z`2+$$t!03?qwNR`@hrlC{c{5F z{!6N$%xLKQ?W);=eA(b2RC3eWcxc=Dxg9A~(%rgdw(rWx`QD%8NB>W-pU`H&&d=t(71ltqMnx9@UZqGEmI)VODM3ORp z>|jTZN(Y74y`v^L7m<6gRtsy*zv0RDs_PDNjyZcS&9EjkVJl(a&WI$J&gpi`<%Lgz z<8sj(DVE6^2eBf(Cep{qxdMD(*c>gY+_ThBE3RJ~bW*Vn`CjSUUdF83Z1R{7Zr&Os z?P;+D&dJc<{L}k}yYxQhRX#+e#?6sR28r(N)Og60gqH9MEaz4V`k4cb6i+8z_pjr@ zs1R{fFN)z>1R* zoyTH>mt<4g{Ga!v0zoUI1eBI76#qyJ19{~sOpsqniLtuV?Wif0st6}w1H1VWAzBp- z{ZAS4^<#$gHHSULOQr-WrpPgZQFpoSYJF%smH?j%CE%=Bp>_gr=hfZ#)QzBvz01?} z{xOSux{Tm-wJy-W=UK5fSpOFxmDKWGjg0@zV-6Wd1cCO`M5wL*L1#|&gC%asa8<7^ zxJqpO-rT}6R$wya@|Jl~t zk{=_&Pcn%2x9h9I2wumKGd*eoe%MDaKu2z+C4aurL&n3R|9*_>;e08wkY&@(C*yX- z=F+>T9`YH9)Nk`De~Wwh>W!ik)^7F0!lSvvXdue&s5#ql%IXoo1m_ub28*@#$50%C z;q$on;<=RgO|p;2ihGHK-Uu5!$hcKP#a7Mdn*4jy{x;UxjOs@Ka?Dz!3b|0pqxEFw za%6SF597o4bWzaSlZ(C4dvuU(&jiWTM$;4(E^u<{jMXjpgqo=IGs|YxmjBedKn>|D zv0+;)4m2&u*Tv&_-gr5*@X7LYp6y})j{j{w%iPcu;kv=^xl2n8$!fkh*~2;pW*gtV zTJ4OMk&i(*MY_#*M+*!TbLHpJqSDA)AH*YaR4aJxCM98n_$M(E1 z0i!r-8P#PWfN3~xddf=%&u-~V9d!_jM&GaWWlBh{Wxq$+(UHbqBpd#T**kb&2A{26 z-bOwgZZ4k0CRqNfL{Dqn7e(^+wsC^z=i|Q0`tEz%jw!4Am4(|;TPQdXP+bG9%uwk6 z2^$XE4YtW|9G2rXTkPQYZ6$WElNp+6&R*}FHq_UnH^gINl0c&(2QpP5NX|WAHMmtV z+0>8ga$P$(%YUaiSq&f{p3Q)L%3);AdPa3$EWn5*m7NdNObXP9$1Z{MX;PA64BC~t zTc1ZPYwcEN&O;M-jZ{@CiPWbr8_T*e1&Qo-dvo$5#fI9zlT>;xV_&jUShVr^b+(w= zo1v`5!u$ZtegX3+0NfUgQ;cqd{rs7V213r!c04l|Oky##OsOGQ^REqpiQ&G0CL&qB zfAj5ZzJvEv203%1t*@#N5R=K{H&7~GwrrTp1|frrKm3UN0uc6~`WCsOVqFnfkw>XcUsW9PRvx+k~e51gwWYI%K ztBTfVb=-`*8z|! zZr5^{eDj$e>25C#BkPhl&Z@psn{57u)c%VJU9Bgc1#{%v<4s6{&#W+kIZ4|=%NBn7 zpCw8n#bcImL_K_nJ&Bo|Zs@JA*hXjcE%N~jI?Eo~Z9d3O@QU7M&kK>~K@O@3^=H5UM7#PcMUJ^ud9}tm?aYgPy>hgpJMTv?@cm1qLApAD zBv)WId7|I&h{o@#fo|>R6X*OG{+8EQt52@uTip5VJz1(cM>fS5Nno3OLhGMB2C{d9 zE+!Gtat?;u*4_Vk@?BwOUJ7OL?3$G$mg@`rZ=-5<#}UpznZ#oE}sC?mfE5Sm?f_O}d+d ztMF=JX=l4_duFx-i&*TE?dbRFC%T(IoeB)g&4pc(6@MVkSH{Z5QuaAiBQXh#`1)lb3~L-#1xfv&m-d zMdE^H_S2!WKm&VAChPVdJRY__SZ(|mm2PJ4jot_i%LX$Q7scAuD3a>Mf}qdJfqm!o z(_^&vhc`F3d3=KXxOJIqpHAOm{CVgp>b)byAoQHqb8{kXW3*5^ca~Q(xaoQqG5R<- zhxvN#-cdhsixL1<$fS9!j6`@1U}KJ*t~Y2=7}UhBXoRlCdi(l)N~6dr)XqKR8&8CwfT1R>hG6 z{uoyFe&R`;_o9y~5~0qo2~b32tVsYMj5nW|pMGT7p7@$c6qwxZODp_E1-MGyo@O0d zBwaaCb+ut(m4wVE#%v9GidOZaai2?HPJv#)^#6La_@Hq!-ZEj4@LEQGIxeB72d@x3 z9air%19NjeaQ`D8&GmA?6ac-9qt4>>7MAAW0T%$`afBr@Y3RMBOj>z(-Y`)1ilbiY zi9MHh#*l$t4^cI{8wE~QM;Zg08(8v%n6nyQrK@F$vcM2Wk#8aQv2h9V3!>o!>ZD4W zZxf)Ug-%VR!iXMnlR9S{U#69$<%c zZBNv#!hp45WND#iD)J1=-aJQzLtdjA$u7vEV4B$;_(NE$G*zTfD{TVuZ8Xo*fsa$# zX3sM9U!gUzzoE6VoiNYm34x#VkcypwDRslPA?95;#5ePU7nP-5zVv*Jv%h)d5#2G} z0;r72y~4J{jRVoxVLbFU=KRAi!}23l&6h&V#-_{C>n^DKDvj$0J~PRi7yAaTdDO99 z?4#u~mAdgwn(b+TFdG8i)LoviR1qgF=0Kx#HJM9*L7Rjkmfy7IW&Al(IG??~6b)Q_ z=`twBUmtRHdVNWDd2~-UlZU@GsjT7Lq;c~{)B3mWj2toZ;IQw>{MyHxqxoJ=laFR) z0&UMw^SobKn4kBBw8>3yvx|{HnltlMwZG^>cqN_}9edhy!`)m>xbcYN?DUMSRjVVF zS46;pLhqsOG8PDARI;W5=O+tH2lY9)HE5*9f$oZ_z@m=tamk)w^OF^*-|-v6wT2hz znfVz3(dj;0Zhio`hxH9qCrqQGUnwG_O4ubo6Ox4`LPR?{V-8S&aqkOeC9PMwOy)L7 z%@@zIBuRKB9G}G8N@Pyo66gridoo?jE=gZ)>?J-$7r!^y5w?Fb8B{ZNDIwp2+uvRe zl7=bi`vyNREMe$o2a6EvJVp69_HUMsMUEcQCMg=|y~Y2Kj>g5a&ML<8k7B%|uF5t3 zcd}B`{qQj(Sw^jMCHQ-*_Cb>tv0AL@+NcA$%hNB>BKrAS9zbcQ87-fM-MRCELD*I^ zTUqnw)sOqp{|HWJRVegeiwu9}CG{Q2gZHAtNo~=jaVQ*+_rwb%rzbg&Eg{SrOLm&r zwz79kHa-v`r=$#>XYcJ3rme|Ll*!2*x?~e&gEW9EBBmUQZ)&n9U0yh_O`y3z_~p>Y-oqc6bz&%nZLNl z#U_lwYL3x*n2R}l{?6ny941(0M#1&q%?Xj`f|SI^^z7V73P$?|LOeRL?}}{pS8o6X z>2q!0ng{zT=_z7fmzqr$enGaCy)9Y$Ny@0j@pf;x+tcnt5yLtyGxP>ljqlBQD;=jq z7G4+$t8aTs4_RJhdtcMQ7q88qrJae!s*7O=na)Vk`F3ksXm9Kqc@zuI*3R5#l9s0% zB;QwoDWXz$q7plp?Dkna^p#R+7*{WrM(+JzzwcMmB!wtN@SVjK#;wgA3ydBb*iy5~ z!(adMAQ-7ts1T^?b(>;1JyyQ-GixRj5$W#r)bn99L6jJOilpT?@38wAd5cjCDv8LN zsM*w6ubat3G6%hOZd|85KQ0eep)*4W0_{{b`?_gw74iF7GKTqTkDzh?oN+C^X*V-p zgF_|aW&-5cDEb)K%Ogpl-#z(RAQge=mAUH2g5sK^j@r(@8X1Q*8DsX@%1yy%%=l{5DJj{^%3FY>0dY{wKTrjh}UrGxEwnFzs^g@NMMPPA?i&e*}mJg!je4 zlk@ig(Ww^CQnnYeHK=FkCFi<##6&f1(J$iHpW|JpS~StPE0?$M{6cKE?c zmnQy+TW`HK(#)4`NDP|+O1cErlo1rK9?@(Lxqh2&`@1J>KB$4H8O5W;H6>(1?25d zzlkc58oS}Qen%S*g;x7MDk8~>Hy_53qQOpG40o{Pwv_3LA~{T1(VCNeA;2z%$=tcE z@p~o_o9e|}IDsw}T$w|J?Sl#9jq>93rx^~l%QZqDTUrH{UmNg}b|}cJ6ZG5yBoGO$ zhBciF>x$raFnA*5!L7^zqWtZS)x^eNV}vW~K{IHKHZb|*b?g3<+Se-s8QN6$G;(&N z{r0fjw^M6s)(Rd2A_2RUf_O(Zr@i<{Nnv4bfmt!_-}~_HZ}4{3DNmO^=!w zh|# z;k;i*dM1xFJ`z!VEnbB5x_97RqJ%pxUj`~z?@

wa>Q!HR7!wpNdE9?fzp4e6pd4 z9!amYwqSalCIe0G>v`l6DAihflWd;#Lj7$H_`)RV8M^(Ir&3Iyv4GkG)Gy@AT!4Vs z<3RGuFg9nWLOwhJ$7k&#_4(ME@cHj`fu}uWPC<^;B2Zf zf8ftHo`~84SG-|WzcWw+pu#!SSvrZ$0^9l13r(ET$;f?2_%Oa=F9gM@25RD&WB?23 zB&SaN59Z45Jy5iF@xd?_w6;?~$Qd6tSluD8#9)$4>O`}$ zYiOZPCN-H}+pxj5_AuM!jb;6UAHVk(@2%0FErm1L7>+rrx)&(rquPRhSSnYxhoF|J zq(v6yaq;yQAcm*(_@)qMgN$Y=j3=MnQUKh8G8 zwHLWN!n~`_c7aAq#QNR(15+sQxbZFX#}{B}7rXoQDxdeVT*rznrlL3$7 z*@4Z*a1)t`{jY=#Iaf}bo8#>xNt|TUUJ)v)jup6}g93-V;`Q34`~#)A!v>?}7TVLF zGz$ZpYNfEG0)7d|+=Wa=(~v_q)3CjS642PbLYxC(d!HdUaB&XS;_+9^I23Tc^e^^y z$VGSk?5}VlhbH@%xNk9()VVFUBYN3IwFrZ<9y^bDUVH$&5f->Yww9w?-80^fFROQ5;Fj_v-9tG+xOS9f?U? z^D(tYfWyVhH&ECcnk9Sytd0E>@~bEI)%WjIOeWw91Qz~o6d4a}LA~0$_;SL7`N9Twf!3WLU(dK+yVA<10)DJ0$}-0qcW@@gaLXl+%g#-@M_G za?CJ}&tT8$bexSwUly)$Zv5w920$PGJC0TQ_|c@DtU(SpmzJV{WN26rJh*`L@(}=^ z7piN!tiLczIaA1A+OW)c_z7b&gn=E5%lD!JRN0c+ogCnPT^A7=4>~&uiQ4$UO1 z+$}4S2~c8E%-oCKNLZsFc;SD3_CW?xdRN4aX4_b#6w z?!LLNPQw$V;ps4z^<4A)qi)je!n#Hooz93m-Qh&Sp5OJ+KT@#&ws2@?tCH%4b<*CB z$L-E}NKSb-ccdqgmo<{)HXMiG$naEAc4M}jUz>oG{^N-R3e1Db?4y1KpIAF^C>UST zGIoeF$nqsYl4_$jJqrA9Mf4;F73+55^SaFhmqVJ5ziu845%UiU-7y6zYfm}XU-|~| zQc3b&go$%|L@K`FK0P;q_OzU-3f34N2}iIqDRlsM$UwSgjbXD__F`NIr1 zmlV)`PNyl@BzYf>vr;e-oNV(XC7k_wk^2|;uwD`!yMuc8mxO_^eQ2%p_gk`H1YC(r z|Dk?;O3W)$`yYQk7cDRBdD_y*IBc*C?59dGv!(}D~R+yQS zNnNIBseqIEbdz`fx={Y`l_J3!eOa-#z+9<}M<@xpc=M;yeYKAUpTf`GJ4sA>jyOW7 z4Wu|JRhkWcLEtYSlJ2ib>8SVSNUp4DaPk>}fzfmG=g`-}8^nn+lKI zL)JWcxG%%#r`F?$!t!yr{7;^1#P_M3UK5xEU5+zfC9p z0Ig3n|08A&mGJB~is(in!B9N*^os>#|=`}M8XGG`@a}7`SZbV*(2l-b2T5&q@$DABSz{g z7j5U-KD)?8!nnwgNGOWU)G%nxNZ6M^(@ z`Hd9=L9dY`prp_*p~tL)I@&na)>FR(A%)oOf|Ev0t`6m*0NFYN1UOr4kBl3ViDuAyYjp(o8`*`IXkzNs2z4oG{ZWxbL6- zhc0)97O}zSp|7b5$2|-Wm_lN=$V;{ak~-BLXcD09Yovx=a9})wI$xqCHK)-bYrian z-PU!h_j;J7DTv}ZK0}fgJpcuRLY^CTMa|Tfn_-ejZDL*_5;FsZxF3pyWU3Q_A-Ub zFKrT$Q+JRd*j)W?w9tANUDO|7*J_Rb0#x9FcNMX;{3d-5uyHyRktEChH*m_2H%4;( zJZaE-1rFI#0qYcWbRpTI9Mg{pkEcL5dSA361H57j{K>Pjap>%xV+1{mXrVl^8Ecjg z+Y9qQ$osyKlc^*L^g(}oyXjReh077D@lMl^X&}O=L!#TkxXMPb-$Ec>k6V7_qmD=x zm1|r)lSI4Qb=gwuye|GToPpZ2+#<>$^dSmMh}Ms%-z#9{TRZl#Jct z8O_dSG*uc|93fd<>Dc2ydovU%q!nJF+9*6VnSjlCcc#hPWn-k_)%hQXUarf%-%(xw zCR6BNw=+_1VF7w6E6%3{qGic+=sMp(cwypxpmqhdH>V12CzWp04Be~vaNI7?-qtDp z2q%$I+8<$fvH4LZ=1Ji$I>3G+M{rrhxoenq!#}U|{PWIKAv>KM^RL#{D_#bXq_0KW zayGV=^I3NYLQ)c|ABW-J%8}VPwtjMdM8u*_%cV^Ms||6ER-5* z;O=^%dqDh}ah)}*v$kf>(iAiQjSRXBqGSv6DO+;4$TTji4V*2gm-j8@0_5!Bg{5qm zc3mbnu&i9aX9qa5MCtGBwA1a|Bpr)8{wei)ZTeZTgdfp*CR{t`F$+H=rQ6*S#&_M? z*k2vFMs%Cgo|yQc{!$yLmCd^%RBn7CB~AibbKZZ~nt6RHym}2L%7Sng-S%|kb*z_e zq{k4{MmSkdW(MZve7O0~`ocm3eYo%!vj&o$EZC`H_xO*gV#wxtkdAvEH!cNzWXb7L z#0Dy@J6P*Ax*}qZCYNhQdvAM7OoS#s-@5vj?XYqkD{LZ5 z8`EOu&md@xTRK}IZx+09d4&t2P=rqwqe)RW<>3Th_jBXBkDks^Y}C!c>aH`jFJF{h zk~fk!P%zN1&9$Skp1<>+wIs(o{f=R;ezEIpQte1|3o4!32n2*}nAb2VVkvBm>`BB7o?4nuR1lmao0`@$ujU>w2euqFDt#v(&q!V2mN?pc zkQFBTN}vt#`sQfhO-yKLjnk95jdO_lTlb~5mw3c>Cne*;?q`Jdxn-;P#|*CeDuFn} zWN(?2eZ<3a2{@EBLM&(IWC$h!`9e94VNV(vy3!JO;qJ$hGu% zmIpXDp7so~3R}trzh%HG9O3X&c%)=yHsah~1I{%%mJmH!M#?V~xy3`ekXM>TMNyxj za5unZY74zAtULc4im)w}(F0pfPT98!eITV59l7Cwlje=bS7%Q_cs;d|Zo{=p&;Ph# zI2+^u_-?~&1k@!U%r@Od?4kn8zbl_2gpnqEcEt(!#8tQ$_5OqRMHhLre}N@$e4B)u zPrmaU{rNOtkJ%5WfGU`JaT)HV&yA``MWyfF7V=nUj%7*+-of9Fc zc7Pm0HG^G7YTb%W3O5mYw@6Vv!PY!WiepGIoFGfmCzYJZIb22RJ76Qci_bv439AS)H^ELrRW^+9EG(vvmvc;*b%pLetXc&B|!k;jQ%rR!P;(KmKw%z-z5ms>hN|dQ~)*5^(rKmDqj06JNL-E+EqLXn?pf^q4 zuf=fW($jR>LRGiMWq|35XA9p~b8Q;<_r_*?(##EA)354gavp%Qo%?R9HjT_x3b$dQ z8dym3O==u;!_IFN=yocPRLCoWd#K=;d;3LbEAH-4F7ff^Y{u7E2E|X$e`M7Zj}6U8 z`;STohU106)8~A0$YB{4@u~=;5s#`&rN~)6zLN&~;D<+*&%dgYMqFtOoeJ#?C?bhR zJZq2y)t)xkkM7;Z2>Oev(9FTU{4TL5Z;`h_QZU^e>U;(Dtge_7C5}zO!md0vt%wI{ z$D-3RJkO0BB?68RT4++E8bBZ2>Y0@HNu))qMYY@rgg zSNTr#ZT63^siVcl`hAA2@5qfqRBwT=~# zyWiqh^s)&RDqQaTo+I5wXaRZDyt22^`aIm{W4zlBXfLVE^Eimu)1R1>)s$x^$K3-_ zM=n#AD{)J5a3qY34Mv{%^03ktqmU{#3$HCWW%$zMDY%D86?`i0_jDmVO4xVEMoS{% zFdvV=-aSQ?lwGuBPUn(5&4}z+96{a9QMM}R`~gsB8JH8ez^bu$<4kk(1zGqMIOV#| z9|HGfm|F8WcML(!E5FmUx-ZJ_rZU^oQB~f;jgj5F7;A zZ&{s2b$TGj=?e|yHNKh*di@Ne>+Ch#-fFE%+ZKq(nH_wWJ0`M>fN;D7RC!NY3 z0Q2+fzq{FtshK3y{7-hWmw60|g14MSS|6!ivX{H)*oLA-&3PKe9HFjd?BBpr{+nDy{Jt46GFqLb0>dFSEv#VZo0 zL^?>+{E<4iRNUebN-)5C(AgLB%d)|0SpW7ntL4bls*#W>TZc*1+MEphuB(Or#!wAU zf4by*g4|>Omzy_1NYAz{4WW-rZA|L?l1xXDQFL9WH`?~$YQT8FboWfVoIoIfHSG3~ zsAWRu+zk7}zRQ=`)TS;GNV_lJm-X|V@1Jj4>}I}qX^C=foQE}TugDIUMoT(3r~PHj zU#ZWiFbBR?d(HchzvYOnS@&GiZ#QFkZBdP;(0%l~evRMbBIBmJqs8UfIH{seB|rRm zHhwk}tN2>BQAd=DIZdeHAz38*9Q3+z*8vT1!_)VXBI@+1OyoUInR{N6_7)Aqr0uGt zG6OKDHzT7GMy@EY-Rb@?Ou;~<&%QN!O)d>3;juFKOYo`B9?e04{N*`AA`~BeG-vn7 zr(Vv{X7knRSn(6&M)9RE$fGF{Tvah06H8&*3yax+N0y}Q@HNFl3dV4;OFudD^S;A= z_ZhTQw^BO06JBpZ+^;&2i$^%F_`(G(ErZ7&F{%`VKI+UVtODwTvumZBc^SL7O%eFYA_5+$ykr@&9$8ELaHH5_ zR%YW%wMN>XJvETj8BJ#4SFtsx z8n5aXt_lwlFhTenk~J2Vu}EkX@$kU1Wa7~0gH`NYvMdsVwH=Gt4?UKZ{kj}{P{ML&dt`gJgR)1JWqmT)dK?Kw7mwAUFwsP{Jp$1kN_-h9IF#y6iNaIYPy z-{AhN$e@{9#PL_c!QuNyNK$AaV0W3~`fQBcuJ#)B;}&-{g1(n=HstVOW1peT8DLrM zyrUOfsdEv%H(*B@wKNFIj0*>_F`*ZSZJ9XL9%G~v6+zPZx+b4}DDIo&yp#0iqeJct zwn&ROkECQjV-U5S>mj9JvbVQE@<@pfDQ@_2Zu}g5kj)n|C7{>Tf^!o3TE2d@5wX*C zOu09)P+!R*Uy5sk+p2$dmVUl!i zjUx!N2)L5qzHTSjxU^N3ZuDLZJ(T#gTEQ{6pwh}jLB}R8>Vk3F>3qNr`%G<+7oUs2 z6zMUOv}%yJ9`LK>mZ>AuF?>z{11ZpaS@0IYD=O86m9 z^FW*-#~WDWZ0A@QWCH3NomJdyUK5g*HD}Rhg3mS0*y>u?rMJQCo%#>Q;@U3K3)l^ zbv>l_s&IyIhM*ma94d(~*>SAq=T&?-vL)H(L2iSJNyfk(17;6}L(jLhL*!ik0p zp7%@fhz%-dj4ZX`477Vx-g4xLTlqh|i`RF-`+$z>6=N*Bm?`L8&b^~>__hT>{_SI4 zR1lfen8vis;go>U^Pfam>vKfJhL*o-6X}%=)}Hy9pVQ}LyRXui*m5LR>w_d{4eMO_ z3Wz@B4{o23L!CwgCP{@Iy9l)o-Qj_?3b+u-XorW{<@Ze;{N&OJ+T1jgoE1$Ah)yfJ zbguKCMeZyH<;R!XoywX!qfV_Z+WHP_1uELtYW-boW#)x-X9Ba(%yz|l6KHCtOq>g{ z!M=B&3K8pd>#4<8a&(uoN{Hnp>Bn5da(;Rgd2WcB#&vQ2_ z@8>&|Q3}W*~kI9v!oUs0GX0c~&`ZmZszR~hx*JXkP z-~8ENb7@fVMJ-eMq%P-2n&iI4=chUh!HEM|Pkr@cpODpWwtI^`enV0}iS3&|z?eW* zJ-r%iu;o_ZWSzjANKD_??F+Eq58@aEKQwx8ezl92;XKv$E9(+M3|Q=B$zJW8BK8Na zzkd3yJ)*mNI0cg}?xcU+ z5*WAClW!tYRP1rV^!HkL)DuQF1O=8FnI~7Oe?eAj$;oC~AejO<8~`5hxb*^{a=<6x zhop193R4iu+;?5c&!YKWjl;+i+p<~W-EtS%8JR3mofySlv~B>DeF#UX2N2Uju13_G z4-uhZN_> z_FcKyCOWr0k_E{wzVtigsC+I%rg~5CN`h5;3ccaW(*MTU0T?Y)!}tC|BZecFEce|q z;67b81Ox0Hi{&Oh;)zU2LuxT(aKEX3_l{Q(?01R81flHGQuB!p2(Ycug14?JsUgBP z>Lnp1EfjFYLS_TcG&x(JP4DjRd25GXHe3Ej9ru6a-v6;IJ5AT7(ejySJnfDHQyKGj zdR4BCP|pg!^srTH#$N`t%;j8M`TvFua-!G7N&93;=1^QWOveJ|yM zPCV<*H;kosppWrmr}m=Q#7fpemTYIy#CAdnP^U><)hV@p9 zP3w+Zgg{p32Y;D?`uKRJ`3sNPtH6yWe_}FGe&a^CI{Hftjk<07%ipG*AJy}JydKLT z$}hJz0%UNV5tY}-s3QTEV`?fYdZryeQXiQZ^k?9Z9})r08d7J@_>0CSDbI)&F00A_ zB5ozgDDt&NQaYEy(oJS$YMDhHdRXYb^8Eh#uy0$FcDcD}&w1ym>_#*$4f}X2!hPIB!`*PZll--6;t8O*Xn} z>A@iEik0te(r6R)M%KhM1$B#E1hf=DfvtsyiMq$u>vyK?2Aqf`UHCaP^XoM%shXWXMzHPquP7I(21f zg-VCd^^28naT}hP`OP=1`mp@@y6N)K!p!Z?<)4m>VL-HFKc4|PHQBSP)AerIE5XY@ ztFrk#Bdh&2W2TDUSIhjoR5P5%41uPZQX-GW_-#Bv@L<$mC?T@&()DJsF1u!95e2U- zd4J}x!SUib&DlD;f^4<^-^uHC5MV$?HZTi`MqgdTN zVu>LY%Y9R}KCeb*h=8AAprWNk7BNy zP<)+#t}mI*N+#$m5EOurXfc>eXQ9yg`N;kJ7j~P#n9c)x)YP^3hv~p`(dwPzyvQI_;^t{PQ0ITgAnUJSo1PnBam}y zv9XsZazV>U6GZhwYpqHgsjTN4ei+9yD%>z-;>cKz@GqSl4Uj577FSm3_as&asW!zjidW;K)M%add%$N-oH8?2xhOb z12-9Gvs~n)z&i=iKF0siCGhb5QQB?*GU((}Mx|-<6}VwqT*v|mnHvyqR3ZTIKeo|p zUG4JXIGGq!}MOf@m+P|3n1NJ_}ky0iuem& zgL|(Lk{S9)J)h9cL0U#2C!fPZ=$SA!uUO1qBagm)PGr*UQlaE?B!%oJqE1v%@isOD zoq@sXwEV~;kV~>LDFy>SfOSm+!^LSNKzjq-85dUtcxVR!!G32CaJ}c}Rmrs7E7k-C z)Rk0z{C9PZ2DK%pkN-#~a|WMHL^)P2@oVM=b9fm2C0T9aXOc}zGpT!jpHj7dE?pmu zvO51{>r~(jlODwPyi&4dZlIV+pZk8R{SBDs#c^rz4d8MLOVRB7ay+9iLQt-&%M!J- z)q!@Id9nPw9Ybsv58}#dSUGYcRXD^<5k=hW5-+RsnWX&&v%bWv>x7=iimXBlJK=ti zN?;_;z?$LzoDtA3fkXAYf{`os%tVRfWoQJRD?WRQ#$|(b`tCHd1|t6KG`fYBRVv5G zOTOJ|qQU82f2v@zpVzKp9D{J)gWcD;-AutrfC0>RKI}P$sTng@Z@mQH-8#doRDfIq zXhb@?Vg8`2U<4*YHa{lFi`DFCn0S4Gc=WUVQ-R;#0pV|~V0FstV*6;JmtV&`ND&mq zikBKUnNCk4fBQ^5{dtzf$|*r#gZA6z6Q|p68GvumyQ{SB0(pCOAYvmGrrLD6wbH&$ z!}IpND*6kr#9MA!1`GzAzpvB}zs9A~k7ud^(=3ePIUUpO_VXX2Ec&UU{d%WwDWIm= zPdVt4zP5=ylC$~<=|;r=5pQYG7JEPDlgK4Xw!VpZzI0i-PF*QP5U9WsPQ+E=UZ`! zl>Nhcd3D;F-+H~ctG_bsUGVVLSK*JrvZ+NsZatEVnVnjcwS+U}V+Zre7mugP4 zQGK|FMw%L6cVq#~{o2Cf28`i>k3fEm{bX%T491?Xi9B8A{1#mAsH(rRC`eGxqSo@7|@rIJaTQ>x0h550w^dcnP2itvMq>6 z3me-Y?@2?(t@4P*tq7I93DHMBSUw${aBy<4y7`uIy}fF2#-IJ06}-$@M0 z&n^OYNA>jZ!8Dd42lG zl25yRcM;Vm39UQw43A$4N|nYeeUpmt+{}(?4;* z#@vj!aPl4k&Tf#S^CXkL8X49LZEZ z1RDrK(MCmJx#KEze1g&G!s7sy8nFJy9HOQT*h>v@jAB`T{z%!E?#QcR=bXyJyI*s? zvSSJgP<^EB7aEXJ1W3n~BlD-q z@y{hf3P>RYH#3_QI%%NJIY?FXLJa9?K+0H*$Gj@KFAFE4XUrB;32+VK+T>8xOj=5) zhcy7(cYAUmwH|+wbJuvUrUwf8@-#pvh39u}Y8+-z&~&^xiG+DNmX|F@>NhNO>u-!2 zinGl{ZkFZu@-@7^(+viD?zF&_ab;1vc|;_I)@r&3X)kHcr+;eOh%-LP^>hA@z@Gn0_MNNV{&QYr15iOE5SntsQ*$?Fj>(nYee zao_lu^paZmz_S#?UK=p*NY-!n+_Lkp>U1KoAy_fHk;R?_wOVHtk&znDTxqEIt(aQB zXSqI47+-LikO&1PMNmkUPpjf1UZr@5)tS{v!QAqxl=f~%^sHl?2kD@ zMb)bw133H@hisr=NX|Xw90Ky|znkeVBil}+?*2*34k9ktEg?VK{s3m-@&7Ul7vjoF z>1sf}d_CQo1}#|yv!YLGi-JcMnA^@&_6@+wT@vQ0_bu_D{O&&y60Neey@-x)9N^!e zFhFEQFn}jiu|r7-QE*VjW|xHfsXoEQw=~8sv`p@PD>1BLWVP>WoBaikLuImdh?IzA z&_Fw`OK$QN15x!a%l38!MPfARM@53^9m|z>q<}h4-(m5BnGf}&E{N<%4mZnI__+iz z`K3AQL1*psDa~%Hk_B?#K?K!!6<>TTtLsEfK4z_OyszKSXq8FubM&&UN^UT0dkE=? zNsiq+(|011^9fMs&N(5>#v?@kuJCIHcfNeh_jQ=01 z1q_tqyr0{Ry1hTs3Y?J6BZU{&FLtHcwhM~t$Qsr&>E!X0x}w>>w1z}vWuaKIve>>T z?BY@q5``xyP!3TI@c=lux$187=?qnp4sPr5ws&dsNy$5*(-l(A3$%-#eif z6Ai|#zGDn>uV#Fa&->EUH-EZ(lx{j5Y(mD@UoMtK#ZSF}%^zNe?yj6u=vO4L1~qqg zk=_=Q<-B=fIdL+qfApv81=R3LoPOSDz9sDN#_G)*rjJkoIHe}*dieJ(+S{LtrB2{5~Q2BVEfLo4t9o+vE$wDC;64IKlSXr7~LSY3PBiYM&nKG&ThHx z$T*0OHvXisC}9Tw|3}!HM?)R|?f`7>0F!pVR7^wzj zsgSW0QmBN;Iz)uA6j?&Z*!O+=z5CqX@9)0vKYr(TPUqAgoz4l*dA(lObv+*!RWf~a zHXdIusf<>mVT9Pdmuf!wxX8}@8FFuXG;_`MG5=+B!(@%G5;OFzQ%;nrdGqA;3o z{oGlAv?^EFVKV<8U_MpaktDIK9y6C^~P205EF!>!g7t zGy2Pu{$eUc>Ji^W3Z2mPPD+)96T%y;{PqSMF%AhKhkVAs|7$orVsU8o5f`_zcevWZSf#8xDlA;o<@=P=}<#_`#@+_sKAD|6J+rEM0{og zSnPN4Ucj4*(-P=&PNul%zgZ&<&59)3!4II?!ZCPK-hhbm`D=$7lHf~Zki?U9(?`2@ zbm#6mD7UO`pzpkqBJjEm%Te6xAImI{PIx2GEuSYvV2unVG9?a5gX_xsV_w^fDixkR z8eu|YN_Mc;%0VQF+@f$VQfOzc+|hAm#-w(J`$eNj-iJ=NeH}V=HC?UYw+*V1)7_jB z)v|9jZtQLRrm*+6{kC_FThJdi?2P~mo{$@AFZ+kK*Mw%D?5ZxToM1=R-XA#a*B+P( z)oiRr1|PefR4--RJEpz^w1}jW9gfdt$v^8>#O`XkBvm`--`j0#t1jIdPLG;4_v-*7 zqJGgWGVU<;%NcFgsU$`^YxH4KM>0;5-3e1rD#1O9wyNcl5aAT^L=ZA9iU}3i^|I_hR;bcTHN^C`_-;y}8wWQ(^j^Y~1fvPkuqU5_EqOuz9&>1Wkhet#ZK^kvQk;N*aO@i^$ zjqLG4eV!9dRH$i^roL$sk_OhJip_0GeqVC4a!{B~_%^s<{%T;woW_wQ=sD-nbU3`W`n8@xHZ@U@G>%pvgm(NZ#jG%W)~g)XjE z#CpdC7CiR+KcjZx-^`Up90N_BGWSqa;LE!&q*vlty_%D!Ji&e@W}M$k$w(WnmOWJL z?uigU(_?cXqY1B7^`_ju>%ccvKkpD{jz0K2@;JC<;-;Iso~(V_Yj;k#_R*u6X4XR4 zz7JMnj|}j!1bv7P@Z-SX-`E7E5){um)do$0hWcK0(x^?8&0__>dyT(DU7|*7+HeEp6>|ty=ZC zW9ofDg-`e|7w;d44rF_c)uh#?i%|&`n#{nz;i$KubeG)aK&L!3QJV%~rJL~JCxp(m z#VIfiEIivhI*4!?7jVc~_*k(ro+R8*@a}rkT>5L17bd`MIQXDIrOvMSl%_>gt5U;N z&zDgQwJ@WX{GV0CpUHv)PxqbuJQrhLmM{LVEE=E&jOT_g?}_~=J*ug7!Od=(7^k0m z#U^lhT?70D^Fo>%`dEB*wI3wNeCh2|owyY)Buj9?msM-DC5qF^KdJofSCfcG8am9})FPr)xN!F&)0`6i_a2GpFj)00kB#jeq!+vm zTW5mDN1p&g^1-;lWHviMFzvk4PhbW3eez{x)lokX4b#OnFu(g(ss4)6Scn*G8Z3Ex z@Qoh8QJ)T1?kfe~eT2O9ME^1J+m=>xtAAJ`M=X~t#7+{g1kw)?E(1pwH-kD?h#k{v<5q@EBQjacKMWMK z+dQK{^OG3!&}u7-N>ieRB&ETXpBO;}_g;UHzV_vj%S#d1PY*4kHVH>IADhrQ> zjV4(ZnsU&}<#h;juG-U$)V(14@qG`EP@v0%^y_jvh!w*`e;WPOzw?(uJhz~05iX0s z@b|qMb=6hpZ)j1jBKzF;dp zUZ73G6v7OTBA>{*#$_xB-9mNu=2D0>VH}P8w9?rJxgqv#ecjn!BKofdRF4$OyDsRO zP$~E*L>R&@%HEP4AQE@Fl#_1i=u)gro5rJkJU=sL7u~<`IGe?kRguo7JR&D!>dv(m z5dFdi^I)k`yaajW%3M^><>Da^J@##u7@8_UL=EgS;tJel;h891?_V>U!^{9t+5) z*FW;dwbSFyZIIxm4xg0Ybb%#c7??0|;O{D)Quy}I3uIO$bFw>w&U>KWy)X6KQ}5>8 zHN_jdSYY)Bp?aJDi+zss+e3&t_9Sxiqk~VbOno4I3M8cxXJzR3g}UGDLv34aPrTE+g9jC^*PHrvt$&gsYg_8RG9J!tLRUJnb$ zF!#ztp#UnA#T@;H6^7Ws1*u}z3SsMTe>A(NkPhaH`i@42m^jEZESrC%B#Bfbs+#*= z8G8Y5U&UJkUh~^Qwr?B?;XLn-*Sa)c%Q+wFN!!-2ro|YGO^PO9h?((EV?`41q!=K4 zdH2_c%{aNJl&GkMcR(=h%sjOe^;kfYq(ueLSflm7n(+{M3K|{rkz(8k3~CHX(u2D` z{6btBNdhHn1NN_;;xu;n#H80Qreq)uNEN{EZQ57tTYcbC7+|O1(NhYS;B2aO|F~Q3 z<+kZ7PrVkODJTMnq4~+>vhxm#g@Z07re_BJIBcL;(shc!O zA^Hu!IX*hDDsxsJTe0yBipEBg2Je`sWtito+5ZP(1w_*FfTyDH(UKg1Y&+U2Yx1luse#S5O2Bx-2wTbhl7gk%+GI;x`T+DN{ql)km*&j&MR32LUZ6(%F|)ym|AEfB^D7U*suW-k3nHW3`<%&FTMLwW*_`g zk(z5%qw4PlOQjP`E^Bpb?fiCd!v|h?b6EO#Y-ZeUFMDSEYT4$vu*IR0+^OTBZcJO@ zV%iGaO!b+~0b(MFl3%>>-$$uOhj{b&_uUbddoQj3>&eJPqn==nXC-;Cm=o4%S8CHI zDUhfd&Rm!?!zSG0NWERU5oWFB}o``1zq^0qK`RhX2c<%@b8>>R}o_X&*Au)sK4 zD0BzbEf-ur7dp$`t*35q6#6h=Hx}ucV0Nc3cFf^2kX!Imz80SH&1GiK%1oU$S>|>JV-xB5~hJ$5g#3@mmnI z+B8OpFdJyfh=$^T7S)HtRd>vQ{offb8;IKVk03N%NXRaDwdqtLOeXzr$JUYZFi7dJ zq8_%;WVOtSt_dzAKpC)+P_^77ywd1d#wTLW>H`HJ`+nsjMu=t}Y3TVyJG_X`@b>2) z8h`g}+9n+_OApQ%pY8_~DX)Qql+DW8!5^jePh)>7jbforn|lw{Xsg8~YTv1#+8DU0 z{j12AvRVKrG_S{HGqh$V-J$44YvRDyr4x&M1HatmOUt`pd5{rZQ3?q82BiGtMq^5) zOZIA{lR$l0RB$0k1+qA#n|Zb4jUpi2)TRmI&N)?@FsBGm2?N%SnUq*Ki;AL0T8(lA zNQFE!0I1G@XS+PgH`kpDq_XnGcw)-d|2#yFp8PV4HnNR4VBR04&&luauqIp5zbd-< z01}OS8>{1Lz>4EaE$ElY($6_qoTaUI$`xk%e0ksmGt*oX|50k4Sq8(C2vBYuD=_uI zCgtRx{YaoTF@YFng;lDvFB7b@sYBPz2^8F%33niKe}Pk z7p)uwrfdaUTbeRY<5$D;5>K{UxR1J9;~KIrQrD!yXKE?^@Yg?jcQo1;zE%8>3+!=5 zT*LFVs(@j&l0Wr*Kf!0T4AwP+w>1g-LON{n*Y)tZ@{^j+HIh4zDm+Y6BdE%`?APGZ zg3}mGy>Uhr{RR2&?Qxt67`LxaHlFnR1-gMX?kg;Z=lXJy<3=v~{CJQ%Mf-U3TonP-O-B#l1huIhAqJG!4Tv{+r^ zpj(|RZ38-WgoHld`N<=kWOu|@E+oq25kM^@%N8^mN^)d*uGo$A^phuz}%Xf3mBpSkKyzvLl^o_cly(ZD}5-5 zWTLYA(9)zh903+$)`9GLoe_>D81g=pOA5?;b`c|S8z)J_)X~$eXUI92F8pl%gh#VG zh`+NG<@JqS(TaPl!(YBb7R9ikf)fd(f7Vk#Sph)=x&loIup3 zSPd;8r_lcuD{TT(!|;n6mmo?NA#C>noQ7G=6BPzjkNVEJ!EDU?rO=JPeYb;=?qp~( zv=mN{WC&}y9T4*BRZ*gNvd)e|l@F^qwBN6tqpYWi3?;!G5=QUIbZc>mCJoNo>9XiV z!j;i~*Mg^pksuVSO3ODe*uVTKg-1B&@;&LR&o4axnk3L!oWH-R|L*zEP5tcan~VKs zoG<^LG;L{;%N)0@(X(#@itl>6u>VHE7CpfNr~P;5tUOLKq?}#=rQ#+vyLk`iRBa=O zK6?+i#L9LTKRVul&SHB6K`5yA5H)vk8q*MU_?`)fPs$x6(9{a9=D-*P`)~{__0|>M3Ge&B-8nZwi1l&N zmxY^J(6_ry#s`~94zOp$2Rq9TnLiULhfpB|Q#xf8>Krsvl#@CjTuAzy4$U1I^PepC zl^Yyv^e%$o8{ZJcRr#JUN;Yj{AkHAn7qRSB1}5Sm}A7ERcgn!z>L~ z&T`qDYb0I1=g+00o)zKpnr+jNGul&UQg*cKUY)OQ$CG z(z`4s#*C~iTEX}EC>3d_eviKNfI$MvKno}N`CJx@p;H|B2sj(PHxn#2g9>G@72Y}@ zwkj7rbIkyRnlaAUBIo0iFG#^~S#O#GO~R>@*Xf4k>UVDdg;de{+%g)SBtAU6Ah20s ztU_v%W@uRU3(Hn08HB*!I8fSA)nQh)`~5BV6!kqJTX$hhCEY4vbZ{2W!k{o}mY}(r3QKpq;R7#B+7oKh|BpH-kaAQ?Wg>7uMwSOm^CxM7$#(cNy+Y>1+%^Ma1 z<3~3n(&+`I<4`s``wT}Dyw_K_kyN|POoo-c@)#(dFBgqL z>0hBX$wSGM_9P}+IbKH?*0({nrV{+ox^m+1h!Yzf(>ptv@nG_;87P~yD*7Es5z@<}#lpE~p8j+2{y0I#~Tm0N& zJ55>S-A^xNck1oNjlS~vRB!2Zo=HWtW0~QIk7^pkDsEC*`Pi;$v?rCYAP6pv^l|a| z{oz|I=@KhY)D2w)&7$r%evBc(msHD3#28eED?LQWnGC8=m|MTA4;ebd|9E`~;B3SX zvH`Y5LG($KV+-HbY4%0u%DYm9p3Qp+G4s8l=b8KeX0aYMr}kW6kJj%9q23z)Q_5uO z)Orb>{^0CjG;<^n^~xW)V&2fs+O%Q~Nd4@Z@@a>DokZGTs?q?i7W?s}$C?rgT6Ci6ld7FmIdr`ddW0NGNhJ!zF(r#_o?lG3 z(xk8Lc8)%%1w+iaRYjCW6cTOu7tIR|PDQkgp`^;iz-s3fcgLI>+~|katVc~pf8GA-( zkN6QX^WWm=v29#K+?tZY@#XHpZ@K>^!hD0GK9^5EEyAvfOnI+-vp(f7K8Dnbg~uVy zK?nv{I{B2M6bs%Ks^80&s1;wWgqwS01B-!QN*-@pvOyRy$VWk@KzS`A5kJ@1M~zIY;CK(;IgKf~i+A@q%? z7BkSK7y7rLkR($i{@OdG4^u()D^roChXs|ToadPls&TG`gtPpNt(9>TQjm9y_03Fw zvL&!Fh8^b!!cA;{tZqEu)EyIEuAjh7;71IoS{M;!&4_)ObZvHe5&nsk0HHzVT6Ojz zzd==4Fo>C#H{|(3wv}<5FiF6exiii?xS@Q9HM_DiKJ>vHSr;$0=FA{n4O5^|m-O*Q z%j&zNki6iek?73J`SJJha5amD^fBaTay~$d0m@npf&xK73BwOSs>2})n+d9FZ`l)U zlL=9Gkyy$j1)A4jI<%?!^ug9bXL0nuh7f{JwRriMa@jdrGU$tM2Zb%{&Xv|)%OII!p?0i{*2`xC@}-H41m#-?m+7^mVLGEp}|Kh!d|D} z-&d;F0y2gxMe{P&#qMn+#h_8UAkD@z@A*h@2GmHa+YFc}&7^z{FXpeF7~2Wy1D%i5 z+^+MxD$ov&yEFbktBv9FI9UVeI=NmOpKAg_8E5PPa|YOf%;;Oa=Oj#R=_QB+WWO-L z0*~vrxtb;a5^O(SUpKZloIYP~L3|mh-)9qfvcE9bC+B43nG3p1my{RZcCzEaVATPx=IJ;^w+1jdVP>2spz%P^1zeYWkpt`*%E^Y_Z? zBp{0Ri^ZTuXXi8xjbxyJ8WlT+5-pv~H3|&joydhFf)@j{3@pm0Xb%V|AJn%%9gbCV zn%irNKOSjk*4O3MZxc$LJV`&|Z8-1zems{;ui>HJ|GG)qrzQv65r|RxfF1vTrIiz7 z%f=M5tE7U&10x66H7;8D3Ddv{ueL%vA?BL`NQ7M!&VO77xP?WA)F!PdZA{yU6Q3@I zqa}Gx@^Pw=f#x~r-prz!vxX(Bqqs+--kVdO6#b{IOmc->&cfy7-?8fqI1M;~7_PIR zZzNL~lk+{zz$hT<@hV$2=)$zJ|87FHirO?j6WSD6P^e8662?F?_$Pkgr_MGqU|?~l zG!odn&&Pi{{&rg}vRhm8tlC`8<>i@iB3af_@EbaE{J}%GfW2YNJ<8=_ykuo&Y@rBw z-s+Z?;C)Hflzrh^?R)I!i@N8^L7Lm2!{a%(`BXLS&Tge~f*8B41Mzfiu)>=4CP8J< z2y$C9>0~Eq)77+Bn_ZHbj++_%#69R`&ejO;HFOpr3c+b#UI?J z!itj`$?lg1zt}av+YBMSj2dRqp6J5yxuIOnK(9eUHtHNybz<2I;pc)kcrmOkctbbx zqTt^LnNTfP2GsJ41wge2th!VdW3k4^7-h(`a`c2?--d3_iFVw?mp4TUCCz_p=aMTs z610e%-5v#=+Mwv)qn{PsM(O7JKO!#O8@Mm6u>p|ufr#!_SyZ;Ej0?diLsE~vP?$;* zu4U8;q(@o+v4>BEFOB}iWt>8Pq-_?4ue=((Q{TN)w8nXs3h-z0Z%i|zPg%0RIQuJ! zV=+@z1h=?*H66CFzi)w|0WapHY8=bcae8$`(Tgy{4~ufOMVN^djF`x|yh z%W7y`%>rI|h7Oma4wW^0!#*F}Ua;}C=o-93ok~(S3jXZ}i`;o}hsMI=Wa4>EIUB`a zlwv7Lk89a!7?_o`ea)I3Tv_n021xtyT^FGpkACd2?BXvA@bjzYJlLKy2Cz!Es0u(Y z2E!0GU~~S*!{{Dtx6-}Q`wIcrqGznb-dmmt+kWo(hfFCfWBx#;D67$h=&p)MR%?(M z@pQYn%o7xuM@+kh*35M8KbR&J{P`D#@t!YX3~NCOo{nm96pS{a7aXqgNwS;tPru^L zoh6tIL0C+$uWqTTZAxm;lod+hv}X=x{zuXbg8IE`P?an?e_kb6OGD&(aLMucgZC!~ zr$02PdMBRz)%)Lf$bZR~|2t|D2LJH__Y1WQ@B%4M$dltGj_oNX4sj>Evn=SEprx|x zX8bENjln*FzRlcXns0yAgwe^3kK?@abJc3xPyj08^@aZ(bN^fGx7MW%9aX(*GUsNm zpF}wq`tAk%O!RMLtYo<%ENZS>g@SX$D>7>jvBkK)K}g?OZiBm=8`EJg z*kIP{-C2B97yjdja3mFI3ISQb5>$i5R@VgUR!UjkJ?4aH&Of6zn!4>rv&X0n-*)+X zG9?62-Hkdm{nh%*qm*pVzwQp8n^=?(<LL1faB9t3cAc{7L>7rN3QN%CW|vQdbtdt)^tel;SF!GK(s zo2x_oSDB}LwKq?X?x~)P4Wk z)3iEk`kHVAvs|dUZux_!gt`)a5?=E!dZOa_Fds9o<3(2Y4;;eRG7JMbH8P9bAuG@v ztVxCmg(b}YC^M)F-^#o4^lX2#-or6Jz1;2d5x$1naE?+QF-Oe^RPjTKi$g?YE_b!t z4nf)BW)9Qz>b+!blH(5Vva#|`wFVJ}!q#)m|9utYpK4{Pb6s zf_I-Tzh=}qHognb!m7g$9qKjOCu>9NgXab>PiRGEVQYSEzQ_BbH zMOJO5Szq7zQS`4B2AvOk1?-Y;HnRYQ#by@)_PI139*4pP=@!4`3Q=o^BUscyN)2gHZI1K?6Ds zAUf3V$TzeWF6G!w6~iZqFB)>4-=XTb5*f);!3{_E$xtKwLo?Asw)DeC0QQ{{&glbh zKZ=@|%mmB^n)`;5&!Uo)Uazl{BVZsvQ8^!M>s8g3TyOpNyDoKZx~RIWA`l}Xh%0D{ zkIv{5CO(ZrCdVO7Py0_4H}TR!9Pr>#&c$U=>ZUkOy3{{ZJdEUw@1)xyCttl=!cR$YxMy%wY7O$h{Gt1f&p$dK>>S74)X^OQ<;YYGDOBTEex|dfOqgcxAGhJB${asE z$r*N;xt|`Vpegv;sW#?Q%!qssi{tB~ z;0@u=eyfMwCyA2pH&PEE8bjGLu^0e-nqreUCfF7bfgI$>lh?hzR8svLHoc-UBF&k~(t?S&I-4(&3d%vEd0~f!S z=erY01K%DJ3}6V=EUN7U2RJ#8eu!h=Ys8qPy*F1UJO~g!d4X*So^<_0QMx8zH>eDCW4o^j(Mj$+s-#iEn5s#o9CJI1l3XdX>l zZML`r5<`eGJ&soo4kSMB57-RPdJ|9(%?jVNzjP0T4IDY>EOt2rF0P8r9VQUiS0E2` zlG>M2a8bZtZJKjS%#c}JZKS-e?1&SL<_ZQrkVZX0d7soks>jKO-ZKj!yurlR75ihp z1yKfm7}#bR5v8XE$WPBItl6W+H{}>Rr{>MrA+?4d6 z>*&pYhC|SP4u7;rI@x?1zOXSBkP1q$f%j5T8PoRCdcl}fPx-ARlnjBW58_uzdZ1D+3}NGER> zlIwb7FL4GUCtnw#0u#{d^xtRH)_(T)rk-u)@U${(Tx`}^s%WL69d(a>E$nf+_$&r>Ip>(vuc^lmn3h`gP30m zZMJqJ`X__OJ!pP(gfm$ujHf2|wanY0^l;|A1N^|&DnY1hI#Or0=^F?AeRj$93!T$% zDd!)a&WSrSn?2=g>5WppEqvT1wVhck=*4pQoZe$)Tja6 zZ!4@{e7ODF@x?_YxJtogV!>O;kd_m?;O&d%WSueOQ{}E-K*Oxp1Q|T=DnR1)Xt{`W z=$an<<@oy39BLI1+>%v0ftc!8Gm~{#QUD0#>y#sxH#e`Kr#AcNw5wI>j{|FtYZho3 zRbyWRrJRd)k*XK3E8a5&YD8|46|jvM{=1_<^e*l|6R#=kvT>`c$a$=0^ZM5OmwS@B z$D2>hm}w!K7Gax{26q1fI66rnwNq5UqcP=XQO~<(`)4_b0hrqZK;A*JYH35CM z{WxYSwuzWf=)NlUqB}5>@h~A4v9gWl4HJHqhDoxEL?G0)oe=Rt5H-3m#_4jZj)&EC zJxd~yv0TT29u>X1G}e%oYST)OqCTKQQ5C9WQ;v`q1=D||TI;=fw@#*T8GXgByHa}?LCBf0{>F)S@@cRmUFdt4S0Hqs^HSbet!SG8HoUxl%C%^X zV?G>M^%`UVO&*S{3o^^_pb^CFgm@AEcLXodPBfF2yNwA z#CaW*M1rWHuXLIsG|x_mIpKwezwT>rr#qcqm>(_X9GAhqV@1m2^+`yrrxIp=8sv3)Z%cIRWie~a1P7M##3J(7lv-|R8M{W8o7M7N^?{N1*ofQZVCgd$&_X`nT6@{ zsi^DX2in^ioYAw&K+I#Ztd`A`^$Z)W@zUdk$mg}&8P!uv?I}}#pWP9LS=w}EiOjt* z0sGUVo`O}*n%z`tyZ>Oyd-GX}B7kW#CHySXf`%3Qd%%-VHrq6Ffn8_+n$?RumY+Xh z`+2ENu3}d$Gp9xax^s=|LztXGXVR93#=e_vXl~QQ{?WT{ul~(+d?wVObF{xL#szGC zVD6E?Km;d8BXZQ&M5#XhdevP$fIjnDX?`GgzO24u#AfZ6=RY=KfrQ{CGV~LAVsUku zRL#0-4mu@HhQ2c@oC;NiYywFEq#zA8(F8~M1=D4~T-(+#u<7M4Z**w6Sn00+ z*RIKyWXcs`gVai+C<@o_Sr|HRym;F8Ib*b{CwaL`-2 zMNFBgo#3h%b6jFmnn?cYpGM?g{2-5|Txo2zynX{izt1fH=lXXu`?l$?i)s~lXVj8f zr+oMhRD51|?BhjP^}cUEkP(j&lHrZg1&Kq1Aes}0>^zK?0q;cp@F6^m(_lv66A!}F zjW`YYz}RO*J>BFOx*ll&5~>Xft%3oDuchOaFqwg^kPOlUf8!;>0^J~@RKanj8Yceo zQQ84RjDD&0U|ZWCDrj|L7&W@HQQ-@SGCzR>w$$1&fS7!Hq1~M;)uJ z+}N?9zQ47_|679pnI~ZDWf>IhQM;TFtUyi?LR+2xXZ53gFZroWOlBY=Ki=IuhJW!< zdw`f=qACE8T;M!YzM7>>V6mK1qij#@uK8VDj%D-;*>h*nbA|MCJevq*{8{VGqS=fV zZV|6>a%}+J9>O=ElU1CX*lOtRc3Km-;TP>W6 zG>r7Qg0dKpYD_H!nq&YyWUqqVbP`lhm`J{6d-}fQVFJi7@h~Dz;M*SfKIU}H;Tiwf z!!A7g!alV=Fid`vI$)E#(nK`M76b?BGW#HcLPBU9j)InrqmU%!P0%X4)HEX*DvKzT z%ub{2Gz8QfEQ3@M5olAExQHkFNWgTI`~SWLDXk5ZgEgg#+=1oQ} zW|RIaSL|J0B(k7MH75fyH1?|d;L}%zCSP-#(|C6R?aM$4Hxfa5>|Y}_F)W5-yl(uB zq6(+K>VZxTzm~?ZDPPQsdZB!tduL6f(Agp<8)X_9w)I*ga=(w}=k9g#`-m0Iu4{ln zfrIEqErKC)&o&=;!H+LaBv4qnYZcSKPU_QL5;e?ahZ6LGu+EP^t)+p9=PCWBDxLbB z7~^ZZCO-5**v^qz5ynOs1yPoKs*{og(x8j-l0Ns2B@d)KoW`oj8&l2+%&8@ePO3a_ zz}9ldT?}@YqeYL(qvb~TfG>>v)!;%ax2ihp=Bg=zOA)pU+B9D!+(8U#S{nHYGl2_| zOhDhDh!SxcbuRi)lSz3p68mtHX)O3x7lF%xxHtTx2lH?b=(wAmxLhx@4c=yPWxpLD zoPYpTycD6HWk7MBHSr(389YDwnI+I7aDMR&8PF3}?sU4Nil0`tr9cqQFUQAIV*3H6 zz+Xkp>iAXN!*Vwr@59|S#~ko1FNg<13wtZPpxDkqDtQWGPHB8F>TwC5qtOFpi!5^~ z>+atw$p>%UN`B3z_pQ+Mr8bZf3mmWItg|STRo|Mn!VP8z_Xd~kX##N^p58g`ZZfc2 zW&X_ytZH-$1dCbpFiLy9(X9@UlMWql3vy9fNgBtzKSJ~2fl;+AkNMSK-0FEbRT^tu z;dZ1yT%+ge1jM2tG!Wwz80ya-?6}Nj(wvvjYVePvgr8UL_#2K1r)k3rN>3@la>AFx z=*n9zb?uxDc41d)gS;SCSTp#yj3NCbU_8P*b3p3utTv}bLIg4vh`7P1Bg<;H0Yq=D zEW2yN(ri^XHO6(aH)gg;1l1|(@H?0|95|l;G+}VsCjL7iW zn=_l+X=un4j9#>OWAjP+Qjtr}7;AcRWbvWXn4Lp4ZbkA*&tIqZ6IDSv-! zzIUOl?(4`_Gw=}jXr_793<%uLmDn+>UK0${EeHISz_9y%VARjOrX!H*x&@soWY?VP zcA&tw%5gpCSQ@NRu(A-ryUGy|Vz)b)iVIUCmy0o|FJCNv_otllbGtnb#qrM>Qv;?m z;m3+3Gmdx2uL7IRI%H8j&?Q2iisf|75j!+}g@OP7o{^G5)ZAnp=%Y6g7=u$6A}sYl zEiCR2Jk~AX)2q^_aLV-5-a4X{Y2?b|!AXa?6evfDt?%enAH@@)2? zQxk2L2#vRopt=qcSE6U=cioy`#uG#g!Y?vm${h;3_r3=VuJrk(JyMz1s6r+dFT58p zMHEzb1crgopb(7X_e1q7e%!&~5xk$(57qCdN&3h(KZ7AlzL~Kt};OF3RXSm(eL9U(pb}OJJ?uCmg!)ef_Nr(h{pUC8k#lD$)KS!Kus3Y0j zIAw34cYL-(7OA2|Zu{B!dtd5hvw!sx?f}aq_5Yd{`Bj6?6`pT?dIECXmHp!?DJm?q zFKE8o_=T;n?Q6`R1;!~K6?zmJ6<)UO^f2-vsG%C2ej)jpd=zABWjqJi57xPK`-^Fk z#yanqUt9KO^hxk}r?5GX&F34kF8MlXRgJD+-0yTb zG5X;g8F#(XM-R#XFZlMfurDtTTmAOcRRN2Sbn-eVq%$j;&R?F^SijyVHZX125jpEM zyafB`lD{I=2%QC-OhF%Avd--Q!(r)SiTkPE*u2T#aR;2sU;uCZB~+nZ!B~V?qw@MH z%YPE(WF6r)x)<_p+JNNBz=ms(^R?P|eRU3q=D4wAXHgA#D=xw6ku#kxxd4MGgXNLT zqG64f5jU6l-iH|=Rp5egIbP40n+GyFnZ7j-mVs_e6+;BZLg16i3nfqG!4glo;JYwv zjr71@5Di}o-3fLy#>xVVZcq4?ZtDgv4CD?vN2X<;1zwxqtpssu$I7v!HX&}Q#d1LL zv0+A=2s{NP8D1#G>a;-?vHe=;TP@hBrfMB=5JGMf3L;GDxWTTcH6t)d&pLHpmFp~o z@fH&4g@Enk8{P1rFvF5^vE|ij;_=Oekm{%luhJUf)v8}8Q#+w}IL)h6c%3b^h0~@juID zZk+s|)M~Z~HHCryc+3a_w?2gv*3)JUs{VIsg0Sqax%W!xu&(v?3@ZVaULyTthx<76 z6Z~VwTLnyDtC6#_QVA3F7J&8nwE3YZmoSki0DR3bYr|1T2mR~ZGM~@3<;D_QyWacC zEe(Bs8vBrOzSOxc-ihrALr3G|Ggqo}%P}zt=b-$3nER4!mLb&1LXPBfMr$)uD_5W5 zld97(#*X9PqIe|zql>;r$wRolfknSEoiZkTVlFNdT?Mxh3qET7mvRvZQ1Z11EqeIo4@sX@tmt5Y){#NJ@{H z-k5zq0vkW$Lgj#RAPba^6whRsb$7ZWo1(Kc7w56F0^K`&0HuYu$``1M(($eP@#Iaz zK0nZ(=Z+injWSjm4Y}y#*J_QM%cwWuiCn%>v%A65Z`-hc=^uQ~0MKRyj`n~}h76$h z;cVB74;Sx#AA0}iuU&oL5{utoL&6#z4Le)Hy&{wOQo8jQnA&vE$Lzj*(Z8V(hT5&i zqYUk5`faXsNas{ClieQ=aA~7>9Sf_K+!_oy)`2p(Z#rj8adIDqWK5=(l( z>HP<2Cv!Ut0oX@bBW?NK?a568{YRH8?yjA#bIxaorQ=GjpNj`J3dRplM*_escUyyB zmL59Hz)JQRkcyE%Thd0iN>Wl=aDBj;v5=|9xHeR9xz)s6;mbe!{EzuB6>Q%d`8+OM z+|{Jl4wyHFiOIDJhomn@K|G~BF%{NDvjm(iaBJ0} zfocfeYR~Owz4!#w($NTCa$w9eFThT4ylPFPz{i!`nN@F4#n^&)i9pnlDVW&N3=awt zx4IrW#1Sh9#uYTep=f{sK#y{UsQw{~!Bt7qZ0Kt$jrr9#ic((d4ogc!GM;5XSFo!9 zvBACZOjg_>qW(Y8xQ&84VLV|Ue#&3x5y-jPmE)^jr!@Q?*R^Q=@Z~*~SorJj52dyO zM3=HIC4_w+eu|d+A2#UI-2Y4^H^2!vuJ)q3iNKC8?s?;p=KRyHt-?e}26Pq(ZQdyw z!{y9;f$OgLY`D~fIrn8*tk?^Ebdh)qqcCNgPt9xLei%Q9!#Suvj{7Z0dhuB%yi=E5 z>^loy=QX{L-PUB`M~({hkO^TFcDTy7p<1QVcM2~8S(ULVy_rF2re~SjlD}aokKbcP z(+S%(x_(L?iCgSQ$l|+jOG}S`$S`p*_U8`efby)=p~btb{9cp*ba9nTAw4+U(SNv- zuu`MA->vrm%&5a2Cp3S|{_iAx**qIFyYMq{1jzq>A*(=|IX3bA?mRq?lgMqy?Q?#N zao^8xVh1}E{K6*cPX&Z}B|jh#34(v4Hc~hn8UH;aSL(y>{|-D4O8nwNmmJeG_W2GB zRy6siaR)+c3?^97k85pzW8N97tU0Q9q)+14Gj#g#$=Ei(s;&k*aatZC^{!y)+$sf< z%*C5GA$G2$bLi){7?MV0SO7I-I)MW@@KW9LvJ0&G>?$fx(fF)zPBPo~kIb>$dqt8| zj=xtXqWjM>ZM)}8IXX;~f|WS|f#|CGSRGmKVHNGu{3Y5qiJaG(W8s=d>lwjRHl5JHTl;Pb5PCpC5E6=50I5n1Er1#<6hVsgVnd1nLy=~vL6J~w{FD-U zlcECBivfZn5CH|Lq4$o0G|zJH|K9I=&Nyd`^F_ZphFQ;=YtCz4chgTDM*-TshZ`CJ z!1|s4xn15Yvo&^Z$zH5cS;|1!kK7_t`dCwxmp8VlDS<_mbCa?EA_7${ z|KQ<;-D~1FCQx5AcR+D5mFi#>m!P+O~*2Q8~}yN~X?PwVk3IgksUOT0V>`Z?J>g1*lZ z!d6dQ_SSmFKJ|H{_HsR*&2DY!^ejndn|Ju2eNcUyi)U^9viaX`>muF1#!LWYm8#aL z9{2br*2n$q*yQpEc@zYZy5rvG7^-`icL*ux6=yY_u-;)r|k5dqtt=e?b-*DhUIh^^rs} zM9X=Z8>!t*Bt?p_0H(+w>;0isUT9n(e3hpSoLQr*LZ=rNxA)nY(b5-Pe;O0xCPKLl zZKj@uk~18R&bGPy?d3czet3v5Su{Z;f7=)8DTJ$JcmD57@y5UBoGnl^``?{OZP|GE zA6_Fc>V%>a#wqY(G6Z898s&Fejja?C5luXA`z|$E5h>@5Wr3V`wxvX}uIe&aa`^8; zOpa$^B{u8)s|9}EBJ$pQ`flTAol`v&9lXPM^s}k#_Le=S@(*Nb(2p2Ch36EDj}{v% zc%C@VEarT$_nV2x6UxnYko(9fjKp+Ap?PwejnT#|AoHYV%5-Kboz>fF$`gmW)8@-R zt@o(g@%(sj#>V1a|GRfx{ans&q5Yed&>(|?(+itCv*OGh9FZa#!G>{JJrj*k41QG% z9nqs7N~*CSqhhFe;gYwI6=+=lqY?)*u|-XTDbV`RRspaazBa zz)El0Tx#HQ=I+WvqDaUrZ@}`8$D0Fg%6YEuZ}pbjs0z*hg2VRnT;ColS66W-3?B0& zFGW_V>A6u@ticN66&>(VT-#C^_EetqI(2?24{a`odkpM$ga z?h<$;Shg#@WT4N zuPp)(LpwEq0CT_Wtf1##t81s`K4&#!j{^{b^;f{6mBuDjbT8g>x5b0dCA#9IU{Ymz zqQLI`5#@UWPkOHYTpS7C*;?rGd!4IuuriMnLAOi zxwl~(y*GC%Eh9J8KQT3Eh)Sk0`Qr<@vtLr5${oqm!Z(x%U;UCFgdrIHz|+ctOLdnQdwfURQ@g=vjPe))rX#txo_1{h7j z04qP7UejGu3C(fTM_=#u3N;KVS}W$9t+OANwfulCzdPFD%2W4# zu!A~N_K&Vj&aZUUyUroEh3Y!()Ru!!U+~FP9g4f(f ziK#bkJ8)xGnF2Vyqn#<#d|X_rm*ki=i|~-;R%BY2X>|H}`va?{6_=_9AzvQT0s@Fl z+_92N*C2dJUG3+3VB*P@E4gEzOE+p^S*xFqTKbRWF4Yd*41B+O96wqg?ApKTRQ7Nb zYGfDr3aR0@H12pjERuCL;};VwoHbDog1DH+`;zI`QTA<>#4wgVX(C=0&Q(U^g*`P1 zy0R-6*JXA-4ZVWf$XsiQu^V+&+*(5uLLRT$+vApR=gO^U>u<%WX=U(Pu{O4%Md#@7 zRW_(?{v0n!1ZDdxzbHO{Q!WnV3DrzglYT7?o|Z%MDUzsg=H?X0Y5Xu=Hg0Rw1slqE zS~Rit6*nz1(W)TYcYK7l0)E^mRP7fUOk?B^Lkoj<)bSXtkNtr3Q8US88YIu5JvDff zQ&pDGM&V5Kw~kH6gfwqR`rPjmWtX{}l2V-UVU*ickXJ1FFa@V163f=SS_C_URB>+i z zCc>T1()sPsHpSFdGMZLy3qq(3sU)u3mdvI_=|;HNKKqGhW~|PC#%CHR3-x~J;E9D% zihRh}p}$9bZts5A{iWNp<1unMB_UIVgl~Su8KV2ExXt!K&k3Rm?DyXWt@6;CYi}vE z+@mL|>uTe(PQzV zTtyi{LllO<$G+!SBnnfka1JGQ)uDBwGd|1To+xgQl;R83+`}%sctj+r>G1ha>LHPu zu%$wO5KJOq zJU?S;Y!7ZyB;x)$J&da%V)mj~4&SA8wC>Zqw@@_g$ghk`=7T+LKUcaFsKTyRJ=MK; zx}8!#cw!F+yz6jZ7ESI=O)WYbVS(C@Cc6t6bJ=^~sH%&Z;VZjc`PWEyfy_)@4&dOU5(dKQ>y%FBDN_j3D$F4IYKXT@GGFD>SbbH zs`fnk`6%P(b*bp4z{=Q;ms>JcYC7LpelI;+`^|ZKe6XI_hom7^x&Wfryartrmu)0L z;+~uK27Lp3C8t*JR6>M^O>Mi&uCJd=Y`e<`V~=w1!ggVJ#d9RVeJPzReQVhG&vtGB zhT|h^etyWEnudl_8|ttPM7cCMjOaJzDt-_3a-aM$MJQA`@n_!Go886a2CZ`$mHzEw{in@(!rqJb=Fnip9*1vp0@_z z9Ggo@(==k#((Z#{h~Z}4_LUD@mG*x@`r=%W>hAUV7*op_*=nXD_-wa|D`kXfmRV_= z67)(Y53B2eEHs@GztEYqLStOG@~)HB_sz@(%go_US2Y2iQc)0eiJ(>8@u87>>w^R5 z>FIF9C$AN~x<_waXn4BlGGYjcAA;mFO-nW#JY5!UGjtn`MQlZ}?{;<^{_Q&a(g+rt z;Tlw8i1%Xb{$6P-)fXonE=FFoDSuodu9g?}nrat!Q5ARXrG5%(4IF(U%82|($!VNP z=-v?6d+#>}Pj~NKf?7CY??{yI&9Gj2;4!gi5A3_WNXnINT&jn6Z zh<5!%PRagAIRKD6a~y`fG<1H%VM9QagQFG9&tTz&tok=0GFOc{3-=9r=n+ruMz=jr zhGx`4rca?oZ<*KOf~{8JYneN4*SK5@rOn58P+i*loB|RL|Jqi-n3RVB)(xL^e!k>H zO1ae*`u!Jw&~XY5{}3BDODIES#Y;T-9m0g=WKwiy@GhMGd%(ej!Ql%kUO$~AKqSfD z5D6Et2g_1H+hrE)+DftJDF37~i;*8|jlmg=b^0|6qXEl)uM&hva_fBD!za4rW4g2k zA>ZRzlA#?x77^lbhJN+GPOa7|vNTF5yX;8-odC8$P(2SCuffRYeKaQ7A4(B@ou+;4+xP#b~B z&WWs6O|<5lJ`Hx!iMd*mtVN+B76)2$>pV>!mtQ5e7s4Y_1VH7$dWY__R<>))3^uX^ z8afo|p{{>Kew>mXj{3bAbRB}=Z2gK@OpD-cMfK7^?Q=95w6vV*ihiA%1J$m=;Ea=YxCF7o z!{q|=a}bQ=BGEQYH{u>VY4#*Nyk+&{hoqA32cNv2zwSz#G8-BjIIp{|8_sfuCuz2V zzX^{@v)YlC=jz1eX91&C8`g2;GW=)$xMRpz&-r4iyqnJJmk1t59x(`HhBH}jn;U;t z?EK+*LopK_=Sc%2G)pW{T(!oE8k^-By3n|anUf#F)K%7RN0gZ@r@oCH7CLgP8Ig9%~LGn-(j|PSw&>!jscdfBVXRWFU!K1GO)rC4^I_d1? zdG9aDO}p^U6u&Y#nv%oI@D$+9Yy1Rl8&mSsGzH$0CsSqSlD0bDldLS~DrbD>`x6#i zdV}_T0uFvZMx8?}ZvXDO2ZDMk6N(#t1^D_0{n;JO1-U(sXe&EE?e?DOz;>S7`={Qm zwMY@_Eisn0DWe#nG3)o99udPCa-Oq01Yd)k{?;N|St zIJV2K#TRru3v$jW8fS-4FJteV-UpMRX zW1v$0r7jrHnb&j2o9QZf#-dlg-V;vQJk(20u&xhjxr1X_ zqaF#nA~7#0=2fnqKG*yFx_#~p$h;9YCdEM1AegK;$9WKdcKy{s%u5$give4Y@P5VE z{Tpem=_Ess>_)ijBk+Evx&}Zrc_v#F&xr|!x-g66y?kqCX1(-dX&AkmkufFWUs?m# zmGYU319$x>~r6!n6n z4{JQv@bV+u!E%VG)d^Z@EqS5d`S*jjif=~Ce+42yj`8?F?19F0sYyUXtZQAp%{mGN zjL9im7Xb5X1^RyMP}HhaPi&z&!CsABeJ926tOgL+G!&R%a zL2`lC9-V6YaICmyIRj+Og~!DHnI}7Z1#LLtVp=FeeTZM)y!c8<;3*Q8%pGJuo{ALU z&MguWGXg(_<_@6va)lg`(;Ims^{<__g6mHUBs|P-W)%gbrUw4Hj>M#L&lQ$^yJlex ziaoZ~^kO6n&_75rT_mqn`i0L1>2Q^b6VL4&TlvL0QQtKYHx-5)yqm_a8wb(hBltr; zE|XE9FY`rW3Zr?v#orGFpzyo(o#T`V2PO?~ zGb?^5CfOM{6L2h;)z4U*$X`f5$F-|IoWO8gJ2-5aN9Mp(m^NUP^M4ck5@|1ZuhZAY z4%B-yEZ_z9g;I6aHT*uW>*DN0HRa*x|3MFp-mR*hIj}H#{W9lYt!Ffn(7Zk)iXb#M zg(vKG5%0L8D5bW#-q11PASK`c}80dM_t%zV~)y=Y!PBMy;-u(dc@cy%$OZ+J5ZSI|5 zE8fZYm8s5$a16syckN+TA61#NGx*0v8{ zTdR6}t!Td8)-2O2GiuL15|!7$Yig1Il$w7pt$xoY{Ql9%EpRn{vysieIMJGn;YhPt znkv|I&c4Knk%kd8B`EscmJe&)U_ ztK#jI-(?RslG0wXXgRxlpGtRTh{sdE|8}f$N#xMZ;Il+pgE5cj$PZr7Jw|6hFv|kB z5j7fu{zLN1+^2e%qfsJT>t$*h2j{kiC7k=$a2Id?e1yfB;*6tAu?W6yOYq`Nvt+@n z_y1KNNNHyKeiTCxZ0T#f9>PBo!F%?Jh1=4Tt+=neqaP?P6O5c|MWZa&Pv(O@^W4(i zBL^e@iy>U+JFw{N`*HPO)$GLI{p~6WRge5}2zBK69&S4HCY_2;}FF{$tW153F z&VCfcMizHbxlVJOO?2VZwW>RM#$b>%s3i5Ibf48iu*wH(ke~VadMg`LF<58kE8*Vq z#Qi94;Y&6j+%tm%w>F+0u&Nx}=jiUSqK8sEiNY7~J`@YmrbIuly7)K|phf-l0*#@1 z+^N5G|6Gfl%20WwqZ5WsxD7~o@$wX)AN!!0n;7OiSmK=)NtDv= zx;S*yt6vex<5G-3-=A4Mya9Gt&sU>f7$H6dGGI-Z8^YqhMG{9oFS#IJA7k)-TgPg_ zm`eHtNeD!~s>dX@O540)SU?uH#BF%Y?kRHze(2-yVs5-ebt`4!$@+op5%VD(M7U9&~#+zKhYW!y+2*TR9 z)e~GWm!zh!@=IAm$vs|s)66jnIbL9QI08NiC#4^naUZj$c zx7Ao!zt&h97Z-*G9EBk7)m|lWG+5@FMlRf}UESljmfy{>*?n2Q`}DU0?^lJY3w@cl z3}2fJelN$4&?jp*(Cpq#x;cWkzMWXSxL1 zYdfbCD+=E2zL`lYlq8dHr>5U#%&$tl0gpx|t<)RoQMe?QNrnncB!jOHaf5-o6V4Ro zXsc^r0*g>sW!>OpeY?((NF*W)3A4OXT)qa6Pn&g7qn$`z0eAMvjAeyVDlCq%yjrf2 zL9i&;<(!Qs$WPj#X$Uku;b}>x>$_43HVhLmFCeI7PD1~GPF@fD1CrAx4cFc-p&i?jwwOg^wbt9ZJzLcStO+N-h2iGkoJ$+#8@8CQVoj?WZ zJFT+YA9i)7d<|`w>2JRySS{)AvSFAPa=a~A($WkZBX|L2EMnv@Rav^P7rBLhSbq_5 za|~{MQM^T2$u>>AIr)cD;E~k%RHL5TwfZYpQZDq2g8TxbG+jWij#FlXK8!?itfdV3 zag^#0p*IAu^NaYj2^|hPtmzu|iTeH9_eJ%plhxyQZAB)2pD|8Hgdck0UW3spja&WQ z-?f-?{aECewdR0D*S!d;%YM{P6z4Q$>xh?l<8Y|+)Y>%*c94uQZx@BGtk@QMGOVK@ z>xmq4Q&XGLMYR4}8f{N|?=*2zJNHG^eYQaarQ-gKT*x6&3VhUMBS*H(w^2)@}*4G}uY!)zICLWbp{OQ|(L&P3>+v)+bpm?CpjtERBlVbHwoS5nz!G2iPkZ;yf?v;H^@} zqrE-2WwD*{aH9TQVBm7e2u00LuNrY*v^+rp%bWl@aAxA5z{QRhbE}2njH=kIIoqO`W25V_?HJRK23C|#C- z(xIe}ao1WJNunDQ#_!le#ib*~2euTB6Ol`UZz(h~QY)+--kmXhgm8}e%UH$Le&7LO z3Yg3(TQ3DH{KJGD^!V~>DV5#zjh^)BiA7L^goKK0J)(xUX#4r%Q< zqyrj>Ccv?~L6O*h9!l2q{Z~xEQ`a_~WR(?%j7)?ptvH-;++$=E*|X^zoA;>~ zPPp+$4u0{qYc^?8I) z{v{%}Buprz_9Y*O04b`o=eOX-db{2c=jPUcpTCOIMr-i#p~o1X-r7>q_wPJ`p3mrF z#{>R^zttyYx-JCeYP9of3Fp#E&H+nXXBV;bz=>(YvJemeFGzpu{!b+aaCw@^FJxjN zxO9rkRrXm33i5@nVh&RVN{<7tA#}Whc0rtYDX}TEXYt4TE?CY^dLtLeJTT;a6K z;Lz>Ui_utGEkvH_?w4uCUGiS70lQW+?-^AHa;q+k>LRY8S?X{!pwdOGQQy>B+z#o8 zyVgMMTkgMe*AL?BjvFSUZKJi(zT2;`c?M=QDrd(4Q$k#cMV*70feG_K|y|72y^|wuTZ>aSS&T+v`R|v}O^$ z8y+pPJir8LDnlF&Kyzf#r+fzFXGTj_*m`8Ht5l1&{fu)v+DFbR5&B}hD8Z`To^rW= z2G7ONm_HbL1;>cNh4#u|980*3QWR|Gg46Fxg1T>bgwI?+#sio^#@i(S)X%b1mv`L5 zw~mNKXrxN|dN!RijSS;190!BT-|PAHH(sm0qu!WcL4xX-KM6rgiHwB{JH}q^CM|@x#RFxLZYg-wzz3P-) ze|I)s-1dvTBTl}Ut0&(Kcek%tw%q+&qIKCFqmI|AnMaHuVj#x&mdV#_vr|U;k)ub3 zddg2Cmd$OSQVsP3(r8sNAV>6JnH#tuOVpL`_7g|)%wFHUR$^pcSnJ+x(E?-y=z4n~ zg+1;>5;Y@=8^sOPe~hK~%`xo}odL9Z#-Zr~p6K7=h}aTsNBT06aPNo@0;CHe7|z9L zI1-Mj6(mFuEKJ{MvS8T5(VUuZ-cxHd_y}tJq692XJH7imbGGclHKs)#OYp8+5B|?& zrD*lWfR4HUUKacL_R%b(%d5C&Ptb#B6NGB}Q(HwZ$-VyX?KRg!2_Nk9BHu5J4J4m< zp>d%`wJ6iW_2CBE`m#&)hwSej)jdhdrxf<*y&@X^+3?a6`0M7%>!-l~{BxN|HrAMI zqjjXif_UzThklmC&p{Eb*FuJ1Pn4v-u6Fd+lv2K;Gsk@7pSw~d9mh>H2L7H?5{h5>%G6;#6NsE@Jvd{ z6|~}&oz;HM4?i8b`ySZSv_REpRz~@Lof>=gwLOVfbg?SLM-NIlKK~w^@Yh z9?`RI$zrD`_+iH5t_JMS1!erqRryz%H-$QU(%_abjzPy{G&^P*R_2d~3Lq*Fy53n| zbk}Yd^Fkouu4OV%F?P-4Qvkfg^7&@TU<+iohWEFSk?nOgaGG>MImD0l$_mIPV|gLC zOX5=3xclEe^<5b;1k|45`Ohssye3&4X}o^1zfTc{eWV(x`=V%F>4&1QO^D!+y4wN> zKoR5brD@$3DY{yH~AkPIbHA08R0mv_4P6Pg7BK(A!daHm^4_oJ9_bv z{<-EJ*RFfvyrq@hRy6%zywJ|^?04aJ`vnDFldbSiuPYkILRzJ&+@2BFKk01 z;b30TVUYT>rxBcDg>4>s!t|<1TY95JR+)DIYyO)t0jQAfEA=tY2cZJ#PGqdp>)9l|N2O>h23n4pSZ%*Ym#{S8;yjM4PLEGr% zOF6^aK@+&m%zMeY*~U_aFB>!qdHhx%S!zGj`1gser09!-dyHCi8gYCNahmHEU$*_o zO{#M`&eFGeHOIPaa-}Zs295Im=_N$aWHR%eyp1# z0vi>HhI-?>I^>_ zE7a2iRumd`zWo~~4_-@@|9bs)s^L_-IVJJLrK^w3IL_trnS2UMyp{oWmn9`BJw_U_ za=*t<5C%OOvMdYgFWA)({1#G=HNpoW{Y`^x{b8qQK)Us+;JOIm%P;2m+h5&tufMsA2YcbW(Gl9`SvZRjD6ZwF6^UgoV~UF^cT3= zDJMxxsRCXeWI%3JdpLaYQGj4Jg)Z9%2FPxGcf&l%Tq*IYtEiH1{Ak}LZGTGt(kM;s zp|5QK*Tjx)>4RCiIBGT(bCX^4@S}}b?`WfXZoK|3L{v@G%YGbn{nrFYSX8q@`y4m(`2ELi?Ke+k$k-u~){63) zNNK(y16kj}DWlZ&--A{E5}&wICMM$IKqiy-ab29ymF>7gVR=Rui3uf2DbJYBXA*C+ zdJiaa$A9+k1n?T;vBi{jIGl1z26nVUv~P_O70EY%>>&yfg`KQh{p&m5Z&1&4(`F#v zjzTL(Rh*e*?W-exEsfwbd>K-K4=x1^TdQH12x2CAr7pkEG;%+!zx(OXMW>c!?%WpB z_xeQAM{cj9&8AjA>R%E$<&heKArW4oXmz55WNN|P&c*9WPtCe(_04jOrX8o^#lyEB z&Ggv^K0nwd>CpK{tHV0g7u==o-tj%xEN{$b;!pOLE@Ya_G4@%?&wZ?5XOhv_;aBup z4(Qf^@37P1>wWp5hlwI%JH3jh0u48Q&OMJ1c1o@B zERXwLt(ny)nc9YH6w>wJJAoUJAQnxHd<&&$!gdztv5g~CX5qXKk5TNVi?5sFS8T*f z(@-4VG$RT&=t?Uys!elrYZ+-UGb70+ntaRv+Lpe-*@!Q)lWXfj63_K5jHaEP{&2W^ zB@-{vwj{x!Cg|RR}{=w{M~SHq~&+bHT!VjwF+%#=DdV0sF1*IR*+beyRF_=!!a zcrjO~Q1SV%YgZ-0!(zgflbKJ^N}Y%(ea%V1Mdda)lE~5fW44P9pE1XKs(D!#gz!$z zA`ytVG3}KU)N*q3Zw`|%f)$=5HpM@L>UgDcQx&<(?D8Xd1DX{rNcna+xrii>v29 zUDu8^??o^md!;^_5WOKKA`TWZdj=wqcpwcE$B5*7wH91I-bnfw-<%XF`nJAO<5&yS z;A*KvR+Te-%?bke6TQ1Y4Fp4J68J-HmZ#q-6tKBRe9tj|&Ng@; zgx~f%E#Q0Z@5myr#|iKQ`UgkpYjJ;0T?$3<{NE!DtZ)PV|7%`&%6?-mQa(jb$G!U0 z&D7^IFA0RnJAP+(#-toF1g5jUXs~^rxi|B#=j47nmV{tFb!kU*a6)RS?#~lFEFXpX zQZSXBh_`%_z~N8< ztgDVB9H*}VegsS@He;KW$EKj9-ii2YS6L6>Oc3@j{YA3mzQ$oo@Y>X$!c%O4K_l)T>IN&Sf8HB3I&E2el5fL)tb&;V zZ)v;Tk=!vOQ{16VL@f)=^$7RH+q!sb?JO9T7}v3g%j5>jS81;CZiXZ@iU2P$`EBH_f6FvPAp?& zRUu+!_#miR83K0eEyRb-bIIs@!`I(8En<%JR$kTrOZS*Y)LJX2Ra}|}{^Pt^agvYk z9n5Ku`;(yvNc7Ta2E+9OYD<>&Vud~LW@8yi^n#NNf`}Cfuv$%Dr$|(4YBHjA0gWr- zdpyes>Gj}_P$8?oK9)oKjfIjZyj7a z@!~Q9?e+JaN&fSHt6=|U7QOP5wjn%y%i&az%G zn@(Z+Z!%@KE@=9eY(ej244wf&CS_ZNUrPtJH;P2hm;V8Z|y}^ zmU4C@91DIiUrS<0Hb#BqO3i(2e3nSJ5N9^F!H?0FlAdah6?z{PFPDyi6hnJw9*MRF09EEsfW2aio6aUZ$B@xr%AOB0END*-l~4(!&1=kf%kMn)il8Ytn$7&n|QhY+;fMJe{wqgX~LjC&{{h%GGk&r^;>&%L? zJW^khntsBPZ*}5(>Q^Ejj0HmpU%W@}`{oUcm+E5?8XEq8K4}ZwrkQH{;-?u_#S!ma zVzb`rb&x^O5uO6zoa2lA@B3pq6>KHA}9y zTaQ;b^>x~n1@?JvRW00E%V>#Ea?R^{8?qGeO2tRW0a;nU^NZJjR;uenvcIQ!GWh!U zaLx}XdC{j1BNt~r`3T5n_GQXQq#Z4`leg8eV(B}YT8RsFWokKP^h(gr=Z)P$dIHmd zGNnX!m(^QPfF$ZNo4&7bME2Cm+Je*-5N?nY>`-wEcC0!$-;~uUw+bnlZc1@_u{Pn0 zA(Hz8YI~b_QK#P3IsKVj6d;nhSiC9Uj!l{Zzj}NLSPrQ~0Ll8WD1JZhDLlWRvUIy& z5ol2HLKt^eE}tP8)>h(%%;1H_>Rt+MT-A37eyY@j`xMYIA;EVJIs4?O~*F1RA=@*AqSaMcB~ zl!0=vq4O>ORm~~afdYY4RSI=M-4}dg;1sB-$hn4tpLT@y4_(<|VKK7@x)(_}ZHkLVd0jS) zf2cpvH@j13?Z3BViaR}li8;;p=d!GE%^3!^aP=ebej+ZNPf~GVxtpg);a}J7<}vVb z`JSELbdBv*b$tb!6tRt&|I95~dC7!~=G>q$-1(~Gl@lw^As&YUt|X}?_uJ|)DB89}VtO{-n&&N_g>H=NMF`sK(R*rcnROms;!xNoyO!FW zE+RFYtE^t+_%eP0YlFYbCT(7z6g>B?J#U=j`CLl=S=XDRJ;5jWq-mjP%_%uxBS)SBGJrqF6s(4XJ4E1M$-+MTZY zI?gMV7Gb-bMXyV5ci9ir1tLBAOA|gYD<6qh@%TBftmHL*y#6G<=sKOZ8GE%@;|i{r z!z1(N&z)XZ11DxkH6QNIN_8^68Ozt3MWdi9kn_SiR+y@?3^SnNACR{?~v2>^F)RE?_sav*(5zO zm#2fv!$N%O`Ja(5HXg)nh;Fgko~leWiE~-cy0|zFc_W0mQCeeJy-{pwJU>!*T7Ye3 z$5+52&U4rwMH9ip8lBr1P0LzD5mR?t=1SlX$~uW7qBvELaS;|0GR9#VR`rek5cJrH z>__$JjiY@m$&a=TW3u1O(^J7xxuNVU)!f!tp{sS(KKauN%7V+sJODBv9SPW0ol`As zvWq;7F((p6vl7|Js|)sNj)VaxEk)vo@#C2j$zsM%18TaO3`a{u+=E%(9T9>R^X!2q9yfW9UB((wD2zDq-fl9VrzZR$SHnK*bUM$BkM&{;(lr*UA$BR>_XLE=>A)>+SZ za(jGh0oArXh)jGXAMYXu`}6*G4~RuplkI2&X*c(CJFiV;OA~rxRlEPmP1DW31o1Wi zhU-9mvuj|9J(%B-guz~d_$JS{Laz*A+F!_CwzPY|^{uFf_(I;^h+|kqF1aB-{LMVT z!FedWO`KQmyD?jm{CsEM#pl$r7nAw=(|0d!3ICEJBgjX)e>3%m2B3-Ph3{VTqD+ar zPb@4Eee?ZGZa#}!#bhd}Qu6oJtYcrDC$$Zj8D5KZnTt$KW_xbmFVT;Ny7m_JfAA9x zKg?9?3`LXQjMPkOnmB^d$c%Zz$a5^}pr0>-in)1tMH7Xx# zXRR+^DmlSaz)KfV4&dwaZB%@<@=LuqPT8%`n`eY&ttV^?^=2#B;#jF@Yw~8@)#^eu zKFMPL*I$0|7icoh&HImh+|67XUzy5%x=UR?9ffNBlx|uYRc8LZBlMYhr7tA=q~R+p zC|YyIs8R0%GjGQ&FN=Il7^v_t{2OUEZTPy>TFLD~kxfMw_tL=EK|74uN~%_VgN6=% z@uP;V?u8Y0Id_>Q6vJFlrzX7#;Y{xBFxF+B(x zV6joAd}hFP^YySePabpFbrg}yMQHLdtbkM5^aPJY0IZ+Q1ynIJbHLdzwyhGpL*Uk2 z8G0KN-pz_Ssf*~FvzpUIxK%ZhlL{qF4*{H9;@elJ-I#If<1L@~eY3Jlz*77{(5n?Y zPS#t;7;yxXzjOGLhRJQl_8&aIy%GuDRIJ_;9?g1}eaY;T+2u2{M2>LX^ROGGfL7## zdOtf?1lU4|I%8h)YS^VC&$8Zks*yr=U+d5p|Jk_nY5Zmx58zPcZw2DOay{!1PopB* z(WUxcaGH(0OdJMn+ZE2A{q_A}0)^kpqP!JV;jxBUp@d5Y)Yi|%M%iD92RSdxbE($CE9x0C+y_Lf6_Gj-o9#y0QG z?t0})mZGF=Pmk?9n)WgYz&>R2MT&fJjR7<_tsH}w!P`#+HeDxD>R2{=U|YJL4tE?F zIHeADeC8cCrbm=eSUBpRH8q0T(RXQ1+G(^pLQUXvr|-GFkk zJgAV_XPzF47THEOK7jc1@*lO_is~I>Tr+1^7sOk&IW4zOYnRLjb{eZUFi($dAsUSF z-kY0q1x((A^v)EF7j#{v$mVLyk%ZTo-xo^Eep=e67nmRS;Ge&bpO;30@Q`$heoLu7 z1cwg-iGgdH7D1XUx{1yPFuIee{Tktt4u6OsJ>8Us*lHQ(g^PawF?PbW2oNnp=%Gu} zr;C1ow&w^~tg?Fh9>nrRq6c(R--qjxynNM8*nuh|NEpltG3SsVyb&a?K5|~b>$$e8 zRdqcEE?3{$hcYlUI~XD|R{A*!ZT}&-|DPe}nX)_ zgHRqEE}#QsT(I>+DOas@ya6^XEpcU|ALe@*=#57-S7wQ?B?`I}o%Wc7)Evc>U5B0> z5W4Ds&`jGnyZ_VGMvpTd=JlpHgct&jWI;<_As?GMk$)}!9oW6Gyc#oIG#p}mo}>S3 z`uT2UpXXuR(4#%KuL#haUUj~sP%+pqRx};GfoUeaOoFvVnAwp@e>c46WOg6{XEl}>Li;}NiuG=#M zz(wY-9&6|x4(|h?mYM&7S}@(M|1+VxiFu2;a8^2)RKKyUFo3_-N_KZQZcy9c!)(Nb zAEqBe6IdrDDJ9w(Q9Z>>9y%zSe|3n5}s1Y*d_} zE{A}uHutO*HFW;@_itmmwDp@X4IUHQ`78N87P!4Cyvk8p83I_*)aqJWyyJ0ZH6GFrt_` z0i(5b5~`DXgyiYqEPCI|+wTjgtocql)ICm%>iam!dP(A2FE`(bQK|EvwlXBEj2%E} zzAS%;@^OD&ALGBxqoKz`v>)qC`R3NQzKcX_og;LaJv$jrs(CLqSA)5G8=(R zUWQ~5tvuY;GN<*(a(ASe1-O@9$mUlK7DsUjnbcL^0}04~FDHlWf_heUz^h z1O{68=`Ekkt4b%{g4Zw+jMXmE6C%(e1Q5*{56-PYsJD+_GnVu+jEq=r&=I(kCB=oE z%qEmJ>7|@J4X!W%Ywnr-f6m?>eNTv`&PFbPmW3I6UeNDu!sq{st}_pZ`fuOdN;PwRKG$-duTwT_I&q{tCrd0O1YNQ}boz2D`DVY6K5hrryV0O@DeX)PR&(3S-7&0P9h49E4`iOFJ8!PO>x+0r^9UkA^wK?!@p_sZw0(3FII# zJf&*AC(pcUQg`u3=yV)NHAew9Q%Y#xRc?$?lFa8~#H*UNAG&y((dc$8C_`~k{%Z(dY*59zB1 z%z=MK@Oo?k2+t?` zFfR>_Fq%~JeE7+1^q%-5;{D9E_Z7;E-(Fln?>{oMe-{v6ma4XPY92krE#O{3P^VN$ zwT=u7^;d+Ari;O&Y{^I#Ux{z#+n^$ngdE2qYl~s4wlxziXS2k27$p4gdc8rveQ?q2 zE+K4XOuQR}c%&?Bb*C}!w}{;!o!bV>i5p-^pXXu_jo3(`hT3&VM|4 z>?=B&;ROFVBDFhol(i+8&3`=}uGf|E#PTw91uz*egB4@0zJHZk{{2@Oz7sN@y{j-a zGi?LJT#TLADW(pn&wt=93WBtGqCrJ)fleW`cRL7;Nn}VW2*S{b zxD39D!V(!8bb|sAjokPn|3PgS%NI&{YW&}PsVu2r6(6emQ2N@R-Ms(Z54r+TCU+sX z;zp(!#OHO=@2hS`2c<6g1-N3*uclpiDfglUR)>Yir8AhOe<*_NWx}p8*NrFA*>t0G z#RWbc0r1!B``i?HqCQmjLJH^Ek4}w+zDce3diCj{sDXQ5amM@F;oYN^^9-s|1j(@S z0B{sLM%ArzV?K1IYx|xsCfD$Cs)If1UkE)W=dDeh^}7MI#s@cQeB{>QC=_h(?(WnQ z!3x7nrfWrkV7Epd+Npp!>pIU*6Dj_aS1oY)m+V<-2|p*%AA2Tapky73 zrW4|JD0+>*(39-0tS_|Qk@th1Bb+vGaI)WC)eT5&S%364mVOpG7T98fZ80j&fuf0wQ?PHjy=9(5 zR|d<@KrskprY;F_hKZh8IK+vGuK{!8;?8^I*H6`v{!-SD$}I^}IDaS2S(U=E4q}GO zb`8(Mor{nCx`8QttvQHmK(hPbwO0SgX&LJAvHN}w6CI(`TYt1Wg(G?hCsy$FX(~T& z%%M{m;`d(n{G1V158%G|Qse%x8m{Mfz?LwBFOh3G#sMHZ!7S` zNC9Kgdh++y;ppJ1li4OObjChCOizzwzpQ{IUn$Dm(#Xza zCDZ*`XjHxL^s{OCXZHFde?O@*Ulx?S77Yj4%45$OGPf4D4`^ zJhZfd55b^{r&Q`{0o)703n10P0RtG6EJ%NrawvgQKr&srNy{=4b)JE<%Yu+uKD1hu z{gPv%4~MtB4^)yf+<^iNmCpaSERB8sxv81RFiTCF<)B%6JsR>aaRV<|73vQlg z?O&nteGdiM)BpoCR$8hxi4W#P-wd;JbJ?HF6UsBNNpVlt>9kFPBIz}5piDyR-K03r z!SNm-RnFbjUyd2WnF#b-M{&)qItVQ9;*#llG#g!t!B0Y&#n9D(Dwo0&Ylpf#lYdpo zFPa@S_NjXDkqGMB^rP^)N9|{KL*y(?&79bCiW8MA4%*4kWjF(v=w##!Qzj_3XNFO3 z&6<^G48mfD7wyL?g49mfR6U>kmU7AO)`b~40Vyk5x))FMp~X8#A*9TAWs~4uR`?W8xFt-09b26`x>T7H(BO{aFpP3)d!U}9DQs!0X*lDsG&^T}bLcTI za@Imv#1=Gm_d-hQvYkQJrIOU13V->0+gcTN`eN<6L(MOM7FM>h*LAEPH>&|QALd`b z_6)xcwP%Y5(i)hn`0qEKbta8eCxVcHW1I8d;TL%|H&pMI`5ry*+*_yC%m(53o615BC4I1ms@ShU&hY!rQ%xFIByb z_H3PUxi{M6N6wXhZXm~ z`ptmrX!5!p06_}}uT$8w$J!AY@ilGgn66B4dTU0jqvMkN{e@d4S=;QzDUm6t`YQ%H zGYc_hR)?%emmCGntheXAf6j31p{HTIMV7EeLijG8xP!4@`&0_Eq)PO~m1%Iy5=Ap}1b}#;zxOX+l zYuri6W^> zG9L;+O?S4WzE-gJ&C4vrWU2aIS zTi5G{^n22uE4x>ed`<`X$t~)Obq7eI@e#aH;%Bl~1IN}Z&+PyB8HyR`g~g z!%>_s;wRa`F3;m=Xs?LQGGdx_M> z&A0@2V&w17Pqz*uxgO50s7XM&3pqSWA&A6KxPUM8UK@eG7P&sJvDhPH9QJYKvZ zS(T#|7aJnyUVR9RF1^&J_GeqlU#;(Sa`U#^_m!*=M_LW?fV44%znN4U?5A*hq?kuY zz+n6S*E$;kE6_WR∈!^3(>czVm2s*OFz>ECMA(lusN^+nW zxgMZ`#hrsGVf4uA0k20>sjHqWN_dPcs|Z0@@7t|Pb_j`t;?}JyiG}mNYBRD&a-O{- zLTOgtyG37OW4*DX`Y%!mE3pNCww}=1ilpSFbrJN* zEw%m+dpO1Tf2)rR6Pr}g)=b%4^#~ z!4X2WJnj>J`NUIrxk%4DsKY+`Z9Z#w^%cpgFP>{+EbbpiB`Gd5<$2}6g*Le7LwLpi z5G(v4E@U;sqd$MiM(TGi+X2sPRIi` z{e{zWF^qGWO0)!(>@RY2=%oIr9!sy`qir-+z?=>bV}#k%+>t$D^XAFqH%IK-TOFfM z%umrqf03g^2iLm~M!2RR&O0kV?zY&oz%QR9Z<~o|-hHTUSKJ1H?5>?Zr)`nBIl3Am!L!ED(T>*a7aW*Ie~FBH z&eaU(gbJQF9NltGJtXciN%A@4IPrrE&^&=^SSza6`lXWF{$>P{cg~NB0F>+i2}6Z` zk%&LDL5^-qTG~^jT>30v~+g*$4sHyJ& zG){za0E=wzD6MBN;)hh{6`Re`slNmg0bvZ%tsBo z@Uz2?Agbo>TL;bC!*7n}B*2mhQq4QX14Wp-?ZDeGDi6*-3X_j&Uaa z>WnX>v^-??#O+VA1tO-ys+o{K?j`jietvZp)o*<87T2ha6u4UT0=ZO= z(CKUbAGWnM{b!EnJ2o8E+;ly9a8tNhBXm^r(A+;I$o)?V`eIvJz(DnP*j#{#+b#R+ zO+vIy$NkL24}68pB<2qsszR=P0KW7dGd{37cFS1V3E{N#~IvKb_Xc z4$>OlK<&4B)Y$_-z{ZkA?a7WJ$0_R09AeGw9I4rv`#3QBjk>%LE%MkwDGcDYHSRmBG!1A@#U5#tD!UbYW4ap+)xxp z0Qu0FXpp#~1C+oyw^HwtPwxIsvPhqswpi&gv%nca8;O#!Gyc{`q3oHf5cKO)L?j3) z%<`Qu$r0aX=+DfOS5Frky!q;x#=aNH3yv@TeprY|H>)d!m2N6K&PQ{$LmkB9HuLEb zJzFxHffWpe&<2 zv_&Pr1A><;#W|17lc zLgY&ey`_S%#CleNeUiVPlt;4KRk^J-|1Ce2`upc}BQ)S4QVmGKk#8aD^mlLWT3WEP zcDa*F)GIsQiN^);U4w1I-~!wqh*$!f$@%A0GB1{mSKNB=mf|S%9&x|gmznYx3Vc#9 zR6uHBzrwBlj8Fw1pN++_hBORDV1cPc-M&BTt2j=W^~h51e_FK~to1FRzp+R5vD;T> zr&4Q*w0GdNah#}fX1=Av>xRF7pqqa>ywc@MHW2oI|2&v3?Czj+v1War_()sr*^kdR zzrX)zE3UX^si%kRi1< z{Upz;Sk7*GASWSGP)+rSrr;6TddI z5elp*vw~)MEZGpKxQgxTlo0BNX_E2q&;YyuCnj@1C3QUd6&xBatFM)G%tQc~y#^Ek zqEi(hYm>!7n9emTAoKS-F#8<6@Z`05m=)eV(?$__@J%4}LFv*WFrQ%!y>@?W`q@ zOhk-BLF1~0(hTT1(U5ed8gK?7eP!XhP=}}HR@GYVs>v?as+1Ob^{VT}(&z%Rf7L%8 zfCz~~>5CmVSPyZlXxwpH#-Hqg#4rv@NI_z+pWRPj<$NgJqUROl@GP|C;peSrnqpu* zs8&%`^07-VzP50CNzM7qxo2LD$cSqE#3o)980r?PIC5cB|MY;k__`?-j)-O*w2EN} zlK32iD~-q#Z|lfI6HU*F;$gLG?>j5>hLBrf`=-NUC0gXziqJV_k49e~TKo;TGJo&l zXZ5|6z4dz?L!-L)4TlXWOoLTRwt~Z7R?O_$nLLlYE{pwnlN$i;B;ME_0ZP(ix(na_ z{Z{u4$DPymTj9G?Kf*<~Cw8_D!FmAY$QeF^$IlMN-Tlp!RaPngr3mjn=w;1ru+I1a zVJt21EHW6c$Luj@#+h@H2DKHQd4O7IC!Ptqd+<`;T+>g%*b(YBnqV0t}BL0U8T45N5oV4bXZ*apz!k z+7)fEc)uH)&Q=S?yQC;2U>3R&qNzj`C9nA|!#N@R1#vYaVDOO51A{k^SoL_&&_ z#a7zDZl(P15`Xid{d3>nli&l~7}Jbq0)pp=4Y19E888ck`H*j19wr|d=L>4(f7}E{ zLODe18RpWqj&_R%Vn(X<5oLER@JBi@H_<-aKOl^!1i*f4!eZh?R5M$|!z02ryVE+Y z>R0?RvxR=S_|K^64SUprF5a zbi4dc=joTKK92kmaJ-w`<(FX*L-QP~jeSlR{^ZhTPcr%x z!YNx*RJXLab|GpgIegc7^#nOcSI03*r`qaP@r~e6Vw5hBe--gqs+Z)Q^ zG@pX;)AuJbJXhkSJi<5E9~{}bw|H>@IRP7A*wn#j(BU_~CbyqFnaMlbT`Tm?bNJ=$ z{`~WoG=H&d|5j~CpDn3Pt^b>KVLJJ3dqDr~7mRVkf`0R#J?44%yhQxupnbayxu2)xz7RdsR3kQYY8|1c#NEZa=?9(6vp#U?RF0`erykKuw_2IQ3B8hviaWYv6SNi_8$o2j#>AW zyaFjGU>Xanl8QPL*f9Iu3WR-g!PoXlO};fn3KFNZ{We|-f$n<3rT%rwmgdZv7!H9X z5Xn9N3t^cx6zu>3GLrOrWU=5!-o|$(#>NeeQc%|HeMybv|3Mvf-#7b@h$~kSFnKu+ za)Mn85+pjdCa{2dmf&c-AjP6kl(^~_<$t6v=y2BTrR_PXOb}Nd0S=E4;jpm6lJ0%` z#vubsrBcS?;`k5Xa<;x6%8CSRemG{onQKBaIh7E&EkJ9WE)AKH>(Rv$g4c_4`Kr;F zakrUw?hp0>RT_}039uvo5XfjSpGE&>#l>ihV?7a7YLo5Gt;8Oa?rf8+&ySCFZY+;g zq1Ql)*|65Ufo3p12Z0*A+|0(4ckQFm+N^rG?w$(Le(n1M=GX4_@rVz1+G;`Dn)tSX zLV%~LKtuw;{psR)lR)!gkD!{LwpO%)5{sO`n_nr)Ug9V1W2L_dbw>nox&5B2R*J0b zs6l6wM3CKeiKTF(CUDW|`361L^Z9Qd+$-&tr$q~OLv!^l$Jw}GYZ`Tm z*ht_TZWH&lR^)#IJ0t9y8G&dm;rfzC$FHeH-uc=@E*twq%lw@CeHkGxo;jmqQzH=vB=wf~yPgTP;*_N=a1wR>`n+=iIb)E5@u#r;nmsV%OWD}?raEjq@^@2} zd1fj+y2KB=_jsFS;<_QIqZj_AOLeTR+Vgc#W@)m9w#0@rhP?@$w5Xq#@lHdkTbxpvS(@oXic!> zYee!GoNVazM|iF@jh8kGRGq$iZzDril=vU^;N(OnDOs-n%55rd`j;?eGmyE>tA!6g z9!{M)IpdW{d>E3ctX_Ju@XY+%e-n#Wf48E$7s6LLeO7Z zQ~z>YI&D;|`xii)V1dc&r_Q*HQVKv0mRGZ)_+W2ULM>Uq;{&I?0<60Qy`b^B+6twP z4&9lNgQ8v^8Mmf3ZUwC#y&(uo;RcaiW714%W8n_%4t^;ETz+wV=w0)5FZ(?Ep}w`7 z?>7M;Zfd&NaYV0wXJmaZ4B^~VN+=@b@{STBBK1nWMv|;$Tb@#q+|@*o--Q!-wKCD> zb82}iCKAtwiQioIo+&je~#O4y?4(uN_ys6zGXajAf&R|Z?bmYcS!Y1|K#&S z)o2!6?hl_Ka5Iei;Zqa^u(9eC0h+V30XJU4Kk49+G;t!Cbs$`g;^Ic{93`u(V^of5 zx0npR4ZOsSubVAaeIE|NJB18K`+JEOQj!X+s#o1bQ99JwVJWX4l7qE@Y3eO1RX14h z9s{MHLtCbkH}_F=+efuSb`ZW(w=r~_S=H!fvQg{}B3$AgVIRQE8n+yfaABIdx`urH zLN05n(HmW*T~q(7^nta_0FiUb#o%o-hiosdaPr%$jw$uuwF`b!y}p_%VZyz>W?n9+ z00LM|3PuwMoCndRDg$m&k`B;-0iW7Bt5ocDxYn2Zm3oyO@;o@1rOSDbU4DrKUh!YZVO84VrPAm7mEAMSdZpGWDA6^$&I(N zqN>=FLqA=eV^(eE7X)c?V^V{np}GeM(so80K+ zQp7=Ba(me0+b4bt!`Yiqg};^I|7cBE|S*dL{jV6bV6 zd?d_>A@ZC#_{p52Z#Kt_F|+>YL`o8U0OoVwbK3yqgN7|BQkn&%ZhjY4~8&i4jOR$CHHP|hW3IaWL{oDGWbOTGOCve{%Co6AXj(*>@E54T0cFwW*iEQ`%uHB3a@?nM< zLc5V2!+q~>X6ZZ(>1o)-Ikdb~7R_~TuIQ=qVZYs#>GB^N>j7oY!Guqb!li#L(#QT+ zE_N6T+!KJw0Y$03W0x>$?EDFij;+r4(h$}WfB%R7oqDs!_nmv;eaiKPPBJZ*Y zk%Xwa`|(b8jC)_7-MH>RIQVHfVRn!&YMIC(a^cJD8pUPhiEO#o@ZG_8eyV#8X%+9m zuh4l2Mk)6>3z+GA+fXbYSb-B)*B;8PR#yweKu;AcQt|#03@>}&oHrd9ja;q2@vqSr zxVdpZpIAOlcys%(P^6gh?aV4Q`ualWdr|*6TIx$>Po2F?pM}4G=@3^Ep3<&11(8wl-Zr$SUi+14AjL8{Qg$PO_w z)zDnYdm-ed>X2tQca`eR&b1eBU7xyVz{X)}VANF9MWVWednoOYM;TX0CH}uGjjzym#*}&H)W{jh_+>HP=;&)!ot%BZ;0DA~kU@ zoOu#MxW~CfjH++LBIaj(>Ab+?_hfdGfUm`Gft;yR$t1M+=;@cIZ=4p_?KwsqV~^T` z^7#~{^VPA6CDckGFt0_#eoQyU>q3YidGRErIp@9^&1n$*B<9n2=%CQ_Y0=@}=3tf2 zZ=6LIn`?r$SE_}1hZtF=@WjQ)`C;maZG&id_SIeS;9c$hGowqEUAYX12Gr} z+c)AM@^ldyfkumkpW6$*0*L-~R$uVk(DF=@@x>23v7B}**f9<#W;A2T;j&D_DB(YO zhP2V0ALW#>Z6s~<2@w(Y4@IezT)zC8rPaskZG-uDl4`eX`1j|am^;(@yJgo;

d9 zM4L}1P3G}Qi38DOME~+~%Pub5&NC}zL1W~7aQHvP40?`d(T}jPuYNX@|MS{4EyK33 zw?I(~XP=*MT^{*4Zu$|+?iA(z;2!Gyujy}2x{~abY+f;ktDqAY^-S?Vzcm*=U<++I zzQIRGH!_hH)aE`rFZit+$j5fJhcUxz>fJeK=wK})1Tq=ohLJMD+%%$-6t+bqleA3K zdsS~_Zk>+v-yZ;^u`EnyqJhWV^W$^Y+%!pQZv;rIT)@B`l=>7WPij@MwtM7m@xODgT*J>sA zO^=t#ek$MYy^2h4;eM&*f?3>xs4{*yvA2}R*BD_*52ti z3!;twRwo+((QCs~6%RWFic0|jfqgJs3kBjJevpv;2)PV2G(R-!`TqD0mbytr@(3Jm z(L6>V`*YNJU#FGb?iB%dh&Ra@pE{w21i!0J#K1c&G)V%yh!D*YAn?@J2phEsx8`0g zg^T=zMyIhoVf4vA+zQ}O;QjM!tK%lLZt3^TFX?`a&BF{PW(IIB(4D9)w~)evUmd9} zkp9P2>HwB%JU7F^00VjsWS@X9(%_)tcq^PI0sDYn=4wY_wNdr_Q8HFIek+%LI{D^> z$~0nhId_1aBN(fSIY-X2snK_I+VZPcC#z;>Q1z2O^!U_bz15rC5I)77@0YyNy&ZQv zH|bGn8JrVxrg{61vfhOGCdb+!bE;<5eetdjZnP!IUYJS?*0K-yegi-MuD@%(ayN13 zFV^B=-POn0Fbu|ja_d3gKuqt@Qer*3C);&}AngJ+qDh^~_D+LeH>X zUIqcCmJ$X@au%%qYIU!4!Jvtgx{}kZjyb*tAoHV9##>LPf}cBQ5u^+R_1JK^4|Jgh z`|%&L1@U)STStdQU0syHVon1!%PvIvon}HHOF&VE=7NE~&!1R|dC35l8L3K*1s5ai zAzanyMP@K#z*a!#q!$8<@c9?_ZA<~CRyXk3nU2i;20r5M?e&qVg@UTeyAxEN-qCQd zg?C2PXbmIIpOB+8@1u-apVDq=nI`Bav8hS?olho`FMXFE*$bd4`geg#x4cshG`-)j z3GvT1mevV(Fv$7qR(94;R?tbnU=y6sTF9De+W$$X~@|7X#P+_LTD5 z6LK~)?Qth;D^K%v&id{W;mMmoG&iPS7+A1H7gP#sVi6&%KG9}}d~(Vm0!PNDOv`w_ z>k^SYeSFXKSX2+2ri-ZHt$V~(jvE7xe3`)K;NjD{ho7^*bdDL4O`#5g-z}5e$K-GzIw|~4in&aBpKQHZee2b z+3~B1CTHiBHs_+!vb;mKMb{!1!_yY(R;|`(BRg$aC^cPgb+V`4KA26AaL~*#;{wh@ zrYGKqC}LmpVU&$7nqvB(8H+Hn`e;I~%ne$-*^}wxa`$amX#SQyD*xB!EdTf&*$5;= z1XLUy3B^Nh=W5Wg?op?%u&8;wCm#l3naRN?p<2Pa_Rqf;MHqjdoL_x`VL zNs=-9-U))41Ac~WR=gCI%;5_@Y7U&@xFN`9RPX5=)i$7iOfLn`NVHnWmw50v5%2}8 zsAe)dQ(>KJ+|-~2XiD-JuQ%R+8wp}vosV|;WEKl)SZ43k)g>pc0mj5i zt^}>R7H^&8SdsDs>fd&8Xl(4bwNR2;+(AB$fBKY;iu#QWC&rU=|d^nnSop^x3 z8p#deL8Hg_c?SeMK__u?UPqQj+Jg;<*^90lYB-w;4>e3tliS*#j?h$x&|iWtPkCZa zq&7UCY=3F6db9n2Dm#O8$^IxAk>kdB=7@c7d0uwM*v!IneY1xe2{pH~sboCu6bXfz zn=FlQjzIY1H^xBZ#507D6gv~w;)o@o6$U!X{G{h6dw?vdO24IcaYea>GWI^hZuQ2W zm81phlLu%-LVu@=*NcquT|({1!mzGrpDgzKOog*-+#&V8v`k)QEWeM93;EFZi z70A*@k&1f?Zt(`ruTM-E#39cEDGsCOI{KN99iw>EWpo(Dtx5N9?awrKg=%SVm@*nE z5C~MFu}zni4ROjz5|+zRdRXO>`Q@N$ADOTHVa_YF{cgi-wHIf`X-byQNvjHw)OWtT zO`7cS@O8G#A~>!27jauPWJB>siT68Klip$f4Tf%bM%wmT4*rj7gPu^>% znVy6i{J{Yzp%E+9S=r64Mi0gWFbz{EEaR?2**x#?v*cu?ckEtMeb@h_R=YAdUbem= z^F)bWfe0D)3q>La4Q8|Wbhj!lo|MYVx7gR}Y_lf`yGDTjRI#VInKT9=<=foBJTu6hTSR!H6!5Pk(m5D?OA$RFSS zy~^so;?|ab_U&U`XiM5+gCiZeKD_vZmx=!CO++59^%_o9qAfcBbm@OrXjX9>-s}U# z<;Ok~`(I^S8NvQ+nE!01wA0#pEEmq+-At<=iy+_I^A;u&7!7)%!uYkDSSZbTG&=kB zegc1-G`RBqRnq10Ou>JxVqkyJ9)zoPcmrx$4nei~D94 z*?3twd()S4PvkplW?~EIQ+0asR>%L95I6V2&$SN^LVFQBQNsIZ=@F4qXaFa!D*zTr zb};5j(V$cpZD84#K5u(8uK-uaFHBa)upD#Vy>M!!wNp|VQ<&lMcI#}93KS*j58?rk znoZjPL8v(XTs;ul_>OpkT(gC+6Ng~}-e$lNuYv8n+SA4`LqBe1N~YbJc!*n-Zk_87 zt@cyhrzVW&jxNw4M#}VbL71hqCJ2{2OWP=fv+23u3U+OGcgHN?d zk831#xNuZH?V}J$Dr*izs!+ffeg8o-oUiqGi(bdKH>2-w9!fsDautU5sF8z;cJay< zl+K53EvNKttl4v3^$~9jYkbEW-{M77_|Rt@Bb^i5M~-tGPT;V^$;v#7XqiBrtR#xNj2wScYY;V32T*y z6dZl$J(BuJUY8s4kX#&P9=R1@M~%*f`!6{G=)1>7^07G4k0~Jj%~Erm)-r^?5egtwGC>jf&47>(9A{1HN$JwsdaG=_){p6;Rb#Xwu$)-MhN#kNIktf#((!~Bu^AF zx!5QLX~M1E(>vZuiDY29a9yZrVmAG}1~GZ#Y^Qe&=-7hEaIuzB|qK(OY`A0+%IZEa_L zE$_I#gD{gC&e%_pdQri-Tg^>r-oABaA4dc|T>nwk?vqsXG=}T)po(?Yd+F?rsq|yI zjy>N^1NC999COs?#x4#J(Rf%K?b$NNo!sjpjYcT0csMYv(;rEYl30SF%C!C~*l59K z?)eP=FVlEyecQ_8B}4m$6t6S+Y|3%`^FOLzMRvWa^Z>CzUW7g4Uno1mr$i8H-Vk+Hoch@bYW}4#yPenp=H)b@tih*OYI>Ff2=E*<}!R zQ653QdGRHVI-6h6pexvxWa-ziJSaSMANOKr|?!C!@6>306W?Y9MP|7)x?I3A&)G6SIDbOg@0IhUe8L z|D;IecRB-R!Zl(1Mvo;Lv183j8zn18FgZ*}J(j0vWshb(!6j}% z=5^@pO+Vbw!h#C?ljKRr60R8Khe;;9A&lmx=6Dsp1Pzz=3k8z3tr>joKOJOt#)$bE2~AlT~J`rv)73- zSTVEi`+lHHi9+E#Qo;tOlPhN29k(#{;luu3p_M+=L)w2tN_r{!Jfbx&-xJNPuHK3^ zGQXS$ITJw-hNxKNwsiJ+26OvvyhQrX3b$S^dRf~bVc&b@z_Jbtk%5rCcKBS`IYK}e7aEf$*Zl-jr!x`5E>tqzYzgf6ZVREAjO8U;8d;C z>GX+4!c=_)E?6Foo$(Q-@?+WUke?czvvAF^DBw>z3~ri8K%Q~NlKQ4N;&q>lF>&GR z<1?jO!L48R1VLPKoR|0GjtD;ei1FX9xyZJ6KmjD-U;o8n+b;OHn=m5T=UV3!Xc8&> zhhy<0*K8A}Y@xj4>;LFyZ^)7V(IXthGs8DPPB{d8FM)-g70=Q-DgfUn$IG*H{r7!~ z{pXTpvO*8`RC+fESD8|sfnAGBVW<+Ucg37o82s5%vdoxTun*1v@Nt6ZzUAj60X@Q?~-y z!u{=gGoKbvXAqEcb4%0tb&r9TF|*jF6}(m#FGogjCHl?Cp)uLCa{^N0a2gbtD~okf zw_3!59@hVkD>3_SmX(LU50i@;MBZsqzx*tc7;O?<1@$qKk7*g+GGa_GC0f*295T7# z-{*FVrrFRyislwEc_4H$z54A`*YTbyTw)CphB@A2c%W7*x!i$05|LIDt4(_5?}y~Y zGi2{@VepPF4P$5k_%dUcR1CGFjTTo*ag?Lfjo5j`U(efCR?kNT-?%vrgplIRzQODB zS@yL7Wf|~~ORApqrs=w9Zq!VBaA2w&QAjM_vUIOtJo zC%z!=Iv&HXbogRTVQl}=rcEf{jo{ZH!>MGA*%^rmTsSvBSdm_7Q_ff|1^4BiQf>Hc zdKG_v5LE-Mh4VHGWKZTq7w75{UG9xV&R*=kn$-v#7Ww|L19bvQh9k|SBM$*Bms9mH zfw-Jo0`N}5TV=`hq^z`Nv-r_Uz|I&kNfLdwI4a4W3 zFYD&-%KD3{KXCA2|EUr+98Z;L&K1wwgH`)L5LB$W)jp~sf+y)!^jIWOKe(gM}A6699*P4|v+Nclnjrs5yuxVfb@|S|-JmcZWh%Raw&0L5Jkt79rk7E(iK7Jew z(BX5dS%`U+W;DMmdtwQ$v*R4o`8y!t)|w56bC&?yMaX#7I!h#i>vU`)RD^Wk&FbrQ z<}IcQ6pzjV^4Iy7MDoUefi%`i`hpk3v9;@S{^9o&vcs!H4>&xQ?l5FMq_=Oek!zM( zQ)*JEK7ZyPjzDb2Kbv6?e^{txP!Hs~+T?K3=I?1~KEM{NcOY4DUmxgej%{sBU+fR+ zV#|sC?Xp7o*ONR<1Vmfti;S0!Xb-(Zd7UR>Ez8}f zyoz%O5-O2^A*d#CKWCobLjXwR01)I#(Ml=&1i%}k51*J?vwYcP1mHCpu!)Z>CBB~b?na+(lB4Gnb zlA&~E8Tpy)cDGqk79E2%mAy9)o^Y&5oc#9UOEHkro4$fWq@XIk;VQtGO9%w~gn%!^6@sT;b(SPOoM=MWJAUmQt>76VxeHUu_Z;+?!E(2YAn$Y{ zXKl78@8Y}LeVK#rZZ97IY-taNt*`1I>BfRm&h^ZBi zz~V8CDMbzBiw1hnUohPFNv}06Gs0QyYX+AN-1_j}@$g@mS7dXhobML6aZ#hE;Wrg&*rBJr zea&$kIO;@qB2j4JGA_1f@6_5;{7qRczhP6(vcM(@KnKiN(5b4_2;w~w9|l}mKa{@a z%R0deg8WbwX)K~XtW{u5g1eph8GPjA3>LYrqlC#8>5(iF=mc zlj-{Gk>u$`&}aw(b0WqfS!k4o8<2&(Fn(VD=dt1>z5KYI5DW zvn@&-H1BTz5=oBcfO_m%p~f)WaiS`jeJ5;vG4fUFrSmd{k9uytz4dLPbj%YCmE1SdzYDC&irjye>R)O=Oi4zgr^wQ9`nlHUBK-oV`=R?%!@1g5ZyIH@f z`;T3sYuV!8++GmsekdM(#>a=_85mIMK%k`xsC6zXv-uLGBKQZ_ zH1BPMc2vbd>ng2|TOYOcmAqQ6hX#mU3XVx%h0z&JBQVv7zj%*&c0Q=ra}@&TSgR?4 z|HOf~2}AQMf7Zv|2G=fPZp$$oKEBC6zVr@L_bSqsfPky>(H}y|AI;2l9wa;fU0mx_cOAfVe0_=$ z2659_=N}*Isw#8&U9uo5?wz-@7Vn90f&|Ad0lJYzBFii*DbRZSyx0KY`4X}WiX#ZD?hK9+Szd> zyrnM_3L5*SO@+a^6Ybe+eBK@`&xP<`RlE=YF=67>02Fq41&Neqlq%AZ4Ye@9#T*d6 zU@r(qE$m(;jc9)L$57sH{qFqaFM7z1+F9nZb@Cc*bV1F~?;T(P4_3QNvsb8k|CsyT z$tj}hRi|#1Fmt~B2Ac@CGK;U`5^l;D=Z7wHkLb1v8X)S7i`@m?Z!;pZ;F=;3ZE2Iv z+us03YAJ(*C$YR^^lVs_`A+A?iN>XO^lx9sF>-zYhzabq@W ze)Sid@5euhcr~UpHd(cKd-L-*)$YZxEm4F+#!3C6e)l)CFaiu@{6!NU8hReOS?;rr zbh9;gFS+ugvGdJ@ZHShV``|ruOJ&!kW4-?Caf8b4W^w%vk6(`|Snm7y35pbuqf`ZQ zO9)f=y7Z#lSouxvG*}S?=&EC`j?OTBC4 z#QsGW=#(I$uNe&7wf^WD)$*tZVBn1ga>9+DV=xtsaon{k(74yCS`d&gv)gLWg^z)d zu(ffS;_q9vnl$TU-P#zsIF2i-K|_Wluw-E{Wv&Wvs)6KR0Yjt#+qH2Wgk`z$NhYDx z^AJ;}{l7cfMZRRAEnehx=}#8xY0$qug$j@ddFHpO&kET7AF|Fo916E@`?Fvq24y#6 zNgJ|dAKM^N3faq^WGRe&*C3{KP@%;#wrp7{vSp9)t1xz1#}Z>33XLUtukPo$@8@~n z_b-3QanLc>b$yrf{G2Q`G1?sS=sf%PY03Hiw{Jeav;$!V-dlj3CM&8N72kpNM4Y zdP*dM=OzQiTB%6I?YA2NpcMI73LaV33$DoxcAC6tgz3w3AcQFQO73~Tbz$-w5Oq1Q z-IahM)y+JRUVNk2{rcUBFY>h0S99(@Ra43-XjG7aQUd>pWI-2*$(TS>&Rl&N9ooiO z2p}GnfxYI})NHWi%|%5`8a@|zr7cq-#3B~|(^TLiP^8Xd#r-ckxGe=7$0C*jNIn+; zs_Zb!o+*}mEVw5x2Fa8zLE$d%g;IdAB!)E*rG9BmBqf+JIZ+urD6};Bo-o$<&F5Pb zyKQ1k1TuB1;THeucvT#H9D;txFPGh$OI{EAyik9uy^EVy!d;c%`?NxT&ca6h5C9M% z>eVer8^a+E8$`GIZPz;?4l^&8hZ=Fkg28&^)YHb#Vn0*MBIu^cq~clp4R;@dMEo>>;rt z&!^{ovKoyz$M4APZ_mG4Tz1+Q$^d#p%h!2%rBc`>sb=Jz{gdK|yU&8%vw~C{B756c z=C=>FrV9QlnCjLh|MW~o;Ek`i-THG;OH{}NL`Oyoy=aNoe zUW~7{ekp>eqXPfiv9v>yl;#v~y+<)TQR^ixIS>kYedrYO9k=3Ayg;ll) z!d|6RV*U{_MC= z3OAO>w6dPmCV5x*Opp(5S=9OD_(1TM1yVe89=FPcFDAi-6kyI!F+Bj9VP|63mv5oO4jYn= z-Q`oER~vJ~Dp^!R)t|vwbh^i+NM=85ufN#C^8k19))Drhw_G@RrryQ}3&CTLg7yBy zakuRZ_X?}}Juwf0^Lo0G6Vo_(R zzmi=5e2Om?|L#>CF*n8---r;-(q`mdSf4>m2_S2;|Q3o7G+rQ^oe|t#RZ` z`Zl$B#w`u{V)Ko&3<;v)z$*xmQsdx-6XAuYNrb1&*r(0Rfj6+QBd>`fP_N)^Z1V0fFgx^Xu&8J zY3$@yR8o_`=2QX+tl4wkuU?+me;d;n!nmJTcGi$AGFoq{OLn5twqt(nS0sK|9XT(Z zR{lDAt^lS+yis5hl8g~nSP5wCcNm5zPm!Tr^{uZFu}{O5_Kkw$dA#_rbgmu{|)LqBFa zdMk@8zD+3ChRRI` z%L?_}GoxA1(VZ@qs5Re!K-!-0gE%6*$Tm4=WnOK!Fn4jYznk=qKP|tc?Bf%!DgVP- zp?~&QaFBc0Q7J4N_HgsV>WGtg7JZHj#_?O?RBWB}$}MK{^b9@@;r!46^wqasO8Mc8 zD5B5h63iWDQzOZ1#98ZnStq@kfm5PV|5{wdIq>F%R zx?*-FI`m^GN2ts}5bXAGDodZamX9n^-T>(2KMLT^fwSpf_)sFLR+Y)0bsS)n`>l$A z&D)SkUCD~KnhoF;ZNrQA;D(5{$6UC(Y}E5-9js_`&fsh%D4%Afv1Xqq>Iozhc&_oV z`#*nD(U#dmG{qvsY7k(-&J07FNaEX`0a;XJNH>=3yK;r{zYn=pE27tFoFL|P8)8e% zg8g1{Z%bI)7G-dSlM)r)w5g2n?KwMrP5vKcR8LWCWQgpFgg25##v>!;MnwUBHdgcd z!G+6KqaXgTz%DtjbHUNNqNTBj5U1XcWVt> z`n4)6Qev=5=11PK?l@1?Wg*#y(JH3?3E%3nG0dwU2Y};Iod( zSbd0`_Y~kcuZ^}Z_6?Q)N&VxlKXE@%=it*s7Tq!Edb5iR-Ms-o$mHi;$R3v+@_{msGk(Rl{@I}w5MRNU$U6re6x?g|~BKKo&BU{wN43E5I+cgSZ~Aev~*4OR}6 zJpPu-$AjJFfw%gG;XB{0fq3XBlB&H)(cf&|XZSVqw2Zie*_C5twqU&um;If+)oKq4 zHYlWK=Sn@Caj!%y$5W>LfI?R6k3k;2{V zUf7l+E~9GLmH<<`B;tR19OpgW!u{(hPNIy`9>egZ&9E-}ed@D%5iwg< zzvy@`?Sxf|oMv>v1#{~|D;{D##N80{xZSNgU_=~q-Vyj3;ppe^U1d>F`Zpgkt$&^^ z|F2lBKNkg8@X~O}LNi~u$>T#uoo#}gaN}vNtWl3#(0^b0Q-H<&-{K7?J?0&{{fOZx z%1nsXtb=a?EykZ`FSl*Rbj+tUo9^~y9-VQzC;Ui2{cN@92L+c)(+Q9Lrv9mV>vxLt zgq~yt>+qgiJm-h*Hr7<8V7JVQDkbaQ;An>m-vLY*B}V%LGYz-#NJQ5-Ro0EtvZW5A zymiz?6qN2>gMt3|_-R2_X9;mEKq$}C?)BLy%&ilN5dXqYUEKMl*Na}Yp;(PmB|+Sg z6_*B}G=0rg4}e0H2u&;oSKe%I^yuU!Y9DlFJmglovF^MyRV}yBe&=o2VJ`|*wr+jq zUEntN9nvv8+DIn|^57ZV5r|&>Bkrx(Pz9u|D{Jgz&l6CAYRb2_nI31U{nL=jTY2Nh zk!$jIpj|IHAQC>vyz=YR?8V~aZYeE^fK+jc5c5~(7z1|w^Q<)Nu5524<6|w!^^iW& z*x`f^=<`;9T~Cq;O%IolZ;0Lx-!-wY@%{#IoPxBGqcO;xui&YtkLe4m`sRfNELez! z5dDw`KjD4W`x(KjqXu>p2lXLqb9+8;dbRIRfY56N*N#5c#aG_Pn&_Jc9V)#-~f%?d9Go0~Pi)+ZKwJC3yfAX;)PK=i5tkoy^q6BMxGTX}Y+6iRe zEsCQ(o75)vc*q6>at`uoOAy6+PQFu&xmkjB+64&uZe>tj9{B=Og^^s$@4-4}4nMA6 zQ0ko4Wv^yaee>P5-LE7L9(4*Xd?5^ySMEUV$c@ZA)|u^DoPHgN^ma{Z%Lobf^LSYt zT3k$N7niZuNy;lZ!cS=7SevVOW@RFB8yv`=AU%gzancYDz|**Z3XsA@KNIV~8-lq8|-ow8@Bq=R3#OOAoDu&>nJ z;}ISXVmbQrX)|*Qj73B=U#vAX!@PRfkLk9n*fFkh#UIhM<%2hK7`sHV3K_zzRJB&@ zKcdl_;xcw`(}5{+VcSHPT#Xtqd0jR`&5I#If^nB20Rh-_6~p5seH09w_ak`Me3p?Y zP|zXO*x7>%pXxgUyj8ya6K5@dhD2zShXUBW)1GS(bje=@`#tOUq*-WKv(2ZbI@z3L zv60_Uj_5Tl;I>j-_;#oE`SNDZq6W z;a?|J0Dnkn$wy|7`832Oa0*WoMLlb+dt2ji0blc9ydPIkf065oAtltQv)>l@>z9M6 zRor34n7N;L)5{%$Eb4Jdf{UGYyr&5RH(Imu+e$0;lJTu`SfM7WD&?(Te+O&e%2^8d zM=;umF}MnHUi7>7)x)QPHcI`n6#tN-bbhA*y1dEQ2cPVT`u1L^Dqzrv^Kz#PhKi0x z2}3zHIU<>4u!IY`gpk1F4ruEVfQ%)eU6!Omd|Bs`QXSAInfXP6nLrpTF%QJ2GFlF@ zeg;54Kt5BW)NYT+3n_VUZ7uevg>PcJh_ahruWT0E`Cs)1^k|cHy_iha>en3WrVc@E zm||Y3cz$jF?6f?R45;soV^b5pZR5+Rw(fkn#_iY)9lSjxc7DBCS$V{zInM4QSHRMH1hXmasO^uC!0`!il@~9i1k$fA7%BXmu_hRz?6U0+ zC*_{|F%t;l^!l!(zgr5d+s=7d@APK=Fvw#v|(pMMKTmmMt5&-u#Masp8DJJ-Q6dbP$<#`uy6pm(5 zXFmzWL40s8rGk7uU5gWzVn)wUao!NMCx2x>rdB6Fir9h!2wm|pdRT{=pK4h7!jl1PFQ&7vDxk(q~k;uk)>jVI0e!Z4uY-*sofnE!t@0_h6~5EzbVkpFNX#`)gC9Yx^Pg3Yj67a*cU2MTQZOImc^tev zzAUWnuN1YrQ&8!@1)RiF(xCN67aVy225P6RSk1ympp@xdm?pdbz2n{~{nsuy@D444 z!TG>o%VIHblALW8*Atsu^1F9@)%-X!`XN|Fv;LAWVoPh@^z5e{=14lG_Mn+IlGt(9 zSqKv--=K;U+X`Pmhd0=%dhL1H0R_gPiHIq9U>k>W(fA2o+NYmfmL&g>M} z2*y3;p>RuGbO6qn?Oh;Qo4yrT_!JPQ{vswj!xaB2$B4x5wIulPU>wnRxIW^XE_tY{ z(tA%O*nQ<&J>gd^Lon8w!l|>8I}s65zuFkG5Xr`|?|eF{iY{GEU#oQG?@;ai;B6;i zNT^?E{Oykhd#G;{Uvjl*&Pmy8S898k|nJ6x2N2Yl& z7%_TV26rp5XL<%KwD1}zbdFy!S7=R?+J1`Kx%H1A-RS$UQsg!+9B@2}?)*8h@2rcc zrVlhmA-r~eei^8_aP)88pvJn_95+M9mr#2X5^>m__M4X%2Zv5uPsUG%Ez1b0Q5{UC zm_p!7Ar)j%wyht|~Xo&1(raa2Z?1?$P{khaGWq1j)GF1~pm#_?ukApwGv zR3S!mLl8u6Mrq8&8a1;-IQ8bcIyJMWZTiR%6KEptvY%Y@iT-L+-=*=QqsjqmvwQS9 zf|q>|M~Y;W2ztLDmqv->%zk2EZ05(%mSv$pc_sDQAMoXaK9NfDQVVG*2g({;Kmh>R@ z=V_J6A`8PMN@K>*jK_s8;EB!ts%WZXkf<}76t@HsG_t!(RYAO;*wFuVI2su~6~vDq zb024Loy%rLj45*^QZ?t52(e6zd=~cDKQL{kBdnYEsIWF9GuO-qF16`w!@`1DSuR2H zhu-LE)DQzF@hp}j0j(wykMcZ1K5s7)Sg1)|?+c)U1mW(>QdIfH+8bbPciDNhd;{uE3!L&B(4;NlGSHb_WVF*cehAgyd_s!#@ z&(Jy*{}u*{hcJEk7GEpYlm1CMd`ec|nVAHFk9VXWuMs-)YTWj=qp-2?LJ(gi2f6io zsJyD__uKA!%k~$dXhZ<7IBl{Q>nss{{L_}Y%59Qw1_aZ%b5kpdgA%i8YOSF#XX{Bs#-mo|Y_sVcrw**D^Go`U%^4^E72C?39|e=- zcoggSdmjJhmS&#>;MSn0e^U~6j#&<*4pYHc@wcL07ZX#v&bIUO*IeOgyPHb}T9ctq z>jLS2Nptyt*_cz18JR$DT+puD8X8;N6;d^;k}k`;nC76HW>a`BMYjBsm6T2Xl8to% zkd^f7b870LM?(S^82T^OU1f*oY=OeH>lNXd)q6oR>hK~=?Ncu_32ZRccY@v=r zFa)&?xoSqMedXeiJTh7GVP~UM=!&~OZ6TX!qIpBd`f?5(|Mri;da@J;1HpC?&UTho z!N{~r#h|6e+ZyE4rCZ^7p^JL{UPl-ZgSDT8K0N;ptHOpxm}Mk~VxCkQgMorSD|Q<0CSU)J2C?&03dN;Q#a9^J|`9B<5`UJ3@g5}FH|?HlJ_PY zzkKE6Kn`=(LU)FvtC$;BAEdE!s7{tjn_C3A1ugcyK%;OkNF$;3Ad&+}b$*=?FA#5lgQ$0n`+*7G*2=?aQHBrl~18E^!OCY041@ZBR^c{`| z0lT?ec#mc@G?ZypF#7a17B7lqVfu3-|Ean*lYqdbb?WT>&Zy}agCjRaY+r^>8Nrli zo-6=W^bD>Ig%Sr!R{81&1Qc6*u~km$5`48dAGjBIg0ccs*mwHXJJ|AUO#@(H*|tL zr=GzRcv)TEs^9y za&5nO?QLYd9HtvDk)-=~uZpsN+QtLC!$c;qw{tDigGgT_jvq`8{#ZP<*-Lg6{2eJj z0~b1bL!LuODG3a+$#mr0n4wP;(;4o(6-pz#DxwNq6}`h7zLRe~Q0yk{)xT>6rrr__ zy;T|wV+A^og%n<%G)X_#fAdoA3woG46`If60w3;qL2kIq~cNRn49yl z$T*-1p2vBESoC{9L^J~E1$51DfcEMmtnvp&{y}^DGrkO!+Ij2|S1WH-gDR(#J#Zy^ zA|kk#G3EREA9}i+e@}JPa`0%qyJ+d3U^t>NY$~IETytzY+78&l;LMW0d`a-qdoBc6 zqMGxwh*whxU~-E=PIEYVi%-5IPsp;^R}t)$H|Ngj>x%= zx+{v!9kUdR8%Q}$RqM8fx8^9jC2F(Q{p&jc3!cU5%qV-p>_bVV3;mYR*lGK4n@16U zX2KQbaKquxG(tq#|KOKXHkbPeXdWX(eDg1vDj$H?wLf2w5iGR7*s0T>jX>+=KL85y zc4PCRC;7CbIL_B?oEqr|zYBEwFPzFew~!u%bhuWAatQjK61dz|jl&8-L-XEvZCt!1 zi{cb`(8Gd1aWu8Pwl{uQah#$ziyl{oQa$>GvP9+l6atJbm`Cu&X~J9>&thTK66BibrMPS zSkB84Zd82-b{6fCAKA^#efB4^4g2mVGYKApfZIOWE?(&53)8u279Nax?55?(xU(X+ z3&A@MP~BiJVA&~Nu1;=fAxYqc+0|l&P-aK`u@Ka2NMK&A8qgU52?5|KxOf&HF+l>+ z(b8s-O|{4%Q1NeT9;joDxH;~#)Uv2TgB?Ml7*qJKn_qvQ78{z_Wb88KnuHo3m5NAftxzqN}!L=P{W z9NMW3WDmf8a1`${0`3NP?gx*3&x!jF%!BRulQf+L{xKoixjOMfUdO~=?Dugga1EgPP13iJ`@TSPrYP`BW|%!gk+5unc5KqWrGCHfN!FuD zXzNIvkH)s?2$<+6uxB$SBNAEI1+e>jTeBodSVz>}FaIU|{XIs0$keJKI6lT}O@Bl< zM75vwNW$i}t9J7KdFr`TDd(~Kby11eo5pp#>+7d6 z$Wu)5t|Xuz`tefT+sg9M_&CbLk9h2_{%EdSt~&rpYu+-a#5irtI&KHIRc~rx$@7;h zgU0wq-?}1JM;kN{fh+qioU5bn`zLH(c>+{Rq}=Ma1`y~vvO2%_z`8yxgg&lYA41Oo zg+_hE?FNkw161V!HI9=h82C#J4Ouk=Kwy9xL?qnz;X{30@^b|c5XdHdiW9?wI~O(3 zWfX(P3upc1!y!_EMcx83I3MzE&kY#(`|$w zJ1rZRU#ca)zMQc{&wsJ<6Pq15&Tl*s6|)z!#>{A(i-5wmTeVzFWI89u#?ykXyu^fM z%|<_AC{Oml&LrPNu32O`c29h%nJTz!(Hz5LMb~0u3~nFt&%|{@Rcku#NA$G5P`tS! zP*xSU=lrRY+kWkaeD~F<#+5htTdL(9;@9Ly`XDo z0~Kaht@~-4HayTU{b6w=8_xgOFxwA;#1`_&H5y$OR-IqIE2afoVD*5T4>a!Zu?VW# zhoBv7a#`vxl-1T@4oC5vBX4i{>^~lMl7oEmb}G4fIYAw!?UO%t>+p7Az#iI?zl^gv zad&CgLyV%|Y)IQSf{M})&Z>zK6CvROk!I^I%F5TdBxu5$tYqrwyC zDyyy`P&X=s|MhgBPBMeSeV~CGK-^)fmKP}8uZzwWy^;vTRXmj7>=ds%5I3rce_H$7Qm%Kx(PYhS!8GA8hjF_Jq zHEjkL1d$Sp=Z!7e&aaueeVoYCV@SM zWdnmnLXOnvbwq2Xc03j}eERy*w55i9>-ej|1lB~GJOT0v>HW#C(@DL~-9&mI^F6!nE~-@+?mb;;V% zSr^P!b4Qd0ZA>40Yq*t$C$5f7{GO^AmLHxd zaHYjfw|yHFD*TH}PDjXaEwM=uJEkG60?jG8&h zniM88di-GcGg`bzrMjWm* z`Dn55s7{1RI5^E-D~=wt3UnLvvMjyGTYPPYt$4>Sa||d`-D-_j3uzFe-a3 zG+5GKrR+rs8g~4Gn>)!6g6eopSU&Zs-MJZ@J7B%;hiFC=)E9s0x&KnarIb_caLzT+ znQg4u)RMXFwNt?nO6=((M#Amux-m@62xDhvefD8h5^y!JDP5||>VhkbQx@(Way`^6 z4yf0oJBb|7Z%>W6u8Qh&xif7(66lGAcc&&y1|0J^95@dBl5pjgWKi#4qx1RpUE-u z%J$aQPMMKY+dNH1F+<0e#(tuO_M%4X{-gB0^dj2vKUYLN3)*MwI8DBjyQF9O;KwDs zSj*Zw<_2`p;peSEe~?dRTjlE2M|`1InDiS0!z#~6$bXJSlX!IpcztAt&w_;zE>wQSw(?%wdpsI7?j)Z$ z-^J^Wz(QaptaCXgErG$omtOy4alAsRNx%@h zX7JVTL~Mq}p@kzD2UuZM@7GoI1KYLvZ|@^G^^ZW&P8&M{$`{!J61S;vA@%EbTRDdP z{rrOh*Pla>!I1(q%FxsXecySFyYVIj&8AKvZ@E&ZpIvfsBO(KWLOZkRNKipvkN-!| zLrr0`LLpAD7Flg~-V*CiF;snozverKe-&$&KFH6=g8k8=L z!jzCjq8`C3w}Z4}2T$8}H_t8>6kUDVvFZOM6%b79mWHYhHw?Z_=jq4}6M~1XI&M=Ka?~{HWW}X)UHr}DHm@H~PNsm%A?R#O#F>BtaH zzliMS2ob?WpOHyXSM6x6ZQ=T<&*|kS_b8CT+ucg9~H0z=goaaHT0>juRufd=^(*|5d( zJh~5LAV*k4eae=3BgaGcIoQp-b^>jx%~tJvzJsq*EM)R3OnYf?cGiVV&cH>UGwAkv z!x_4sO@ZUE*$4Dn_F6N z`nxPDH9<48Y*+%iJimy|!Ya%VSXy_5T#;ZZ08___L>5a0w~iNh7K{1eu1K6;KF{DVKW>W-`Lqouhz_s?v z-aP#^S>+(ND(YBORGI4IDR);L$~L{UaFMFqt74qc_ugCkb-BYGlPQ(Wz@BfF!$a}A zzg)u?YoSULsnv3%mNtHhbyJsS_-w6@(u$p3_rJ}spPwzJS?D%u@c&9yL0H=UEy{Jt zQ%^{_5Fa8$2)h^U3X{J_!Z*W>`o{7q>$IOSH>j^aR!JxD>fJwW;gZFSgF}D;`yL=KnzLO*H?qUgWegdKlhD#tV zP^HB4`vYE}4$=b@z+#P|#KJpGF#~U-+{52`9uBXZernlt;1KwwtGbs%bs%SHusnCU zyeZkIu840Uim|^rGHYrM;;RcI7$Qm@ROgV7qWyFKSkxeyqa7L}m?wY3e^4Z195R@~M1mXSa0EiEr!nxrsP_lL2`*EQznvSa z_WizT5%Fs!cCRQMx^XyWo^xhJxjTXZ#X{86HSZ1#HJRFG=en}D zo#BQME?^1Gz-J~B8hJ!rgDJ9R4~my49CVSFFl570_ys`z`l=cW``_SVHLIs@79uEG z{9^2mPX|b$S(8x=hw#19>LR8p{{+M2Tf``|b; z?&(0yj}3a%VESmK&*cnEuruw)FFP%;2Z>Hk@cx8o+t!R;={}q(+|4#X-oE}|d`b~_ zr{_7HegT{G;vkx5gzo~~X1rc;L^77=YWi89zT*k$^P|l>&V{p%68h&M=l3<+7I>Od zn$N(6MR8i_n$iRg(GDN}0YxYkQVE?4WlS8bL@$1Of2v+4sgW%Sy1*ov31O9Oq+Q3X zjrUpZt^^c)4RZ^v)!SjSth#7bH@zz?0HJ?B%;~Y3R6=D4Mr0$~Gl`mloSykTnI?wi!d~tEi3kes{C>Av)AJ!3h1uE`Ay=ttI z)zbLAQMEbw8Yn+Oa=0ZMFOg*l(%@<+AJx?%Y7k8NNc(gdFH`50Y7>WI5E{GfQ>{aZ z5v@Mna{f}7E*KLU#i3s7#UQ~+1pCm=4L~`ErLR{$6l4p?d;BM~G&@GFDu2>Aa3^bn z_LzUH?|sI}aO~Ty_nDiw|Kzu5hk?afqlj@AZ&IfU`0@W-Nt??>VpAE1_l#b_cO^*@ zJ-aBnXm?;|GLkdeSV-~a8>W2@o={d24hA(7329Y{uCKXa_Zx%S_($N6IjU54%ocTy zY4&5&R_{feoV$r#xodA|MaF})xTuvXgQ!y|&Y^m~a@PSuu4nt_l=Kf5|BhXE?J3zu znzMn#m@40AaX$j?>^dkk_rvgFalVr8Bx?8fhG~)SpUHv3#f`w_)oQbE{`~!iPNie- zX#PwLq-7HTb1=U~3@6FM6pR^>(I=#8K1WIhv7*gqxG+B%hCj>8%ruK`)a49NzGsBhmZb3G(^+ipL<OeFVW>|l6i>93`6X< zYfP#7s$_fM_ig6ZLq@G$ptOKk1f;QD{7d#P>RVi1QlyjczQ-?&4N-vMGbKeESwW-0 zRW!|bu8xV`{=!-5KIItf^#->=f@}@RjCqiMWke=MH5}ZMz0v5q=2%_MBjNNuTt?&i z`AGRE37kg(aBoqbqVObbdWGu7EW)A&g3_|zw;g9hY)4lzUfu6M_IOaqngyeM#Rh>V zSjYm%Dii^;N3YP$D!5^sex9^<>N-z~N&>@v*Zi2Fn|%vkiCcJtCxaDg17oBXpR?nj z5t99(`~&bYIpv)1QGE@TB~L`cY2pV42N@xsS-Ovo zf2CSFuzAmoLs1vF{q|N`w_1W`m%XvGa7g(yJzAi{b0z^PsHYEt_FD9rZd127g_vp- z@ENiY6mWUvHqz6ZxBA{2axTp~9<@J%3nC>jbweR=ECkZx#w%rJFl=3F9$!}En6d0Y zdd&$Xlq^uJm$&!8vAM<#4dk_ zU^46Jt0D5vkkFb@G(24}t={#t&4)7%k<;;kx-T}qU4^j*WZwFcehSlcNf7hrj|wW&siOV_uw_w4UGjJ!Y%?@@~wY z-9ogHZ&afRWB-)56NXq{-h9V6Q%iz9cQ1v6xlQ;lglpQyTQGO_#7Fm8d48-oY3 zljKyoUPoutoA;;snlbJ#FRp3Y{Kwq!`jSpL+s}#Gio{BF5-f)JN9qi1yXP*7GNQnC zRIP$91_l>Gt#6iY?yE?Nge->%nn9bit!td27EL;g+#lQ8G4j4|tQNzK%a0Pn;sd@# zhl+gV{VbX*(k8(1yYAq#Rf!t!6&ek1Rt+zD0AwurbPkU$FP>wLCGiBCzISfM9-p6G zKzZCASy!m5Y;^+?>{GpKNUijXDVXOEc%@AY9`a-#L+ByQli|nE_uL9{5PbR95qtqr z!+eq^Cmy8a#2JfRG(`Q1*xg7r>@6w)KPcs>3&K#c#q|mAH=soUspUx+QJnM{$4Xll zG7R}!A_}RYF@J~$f4CJA3rQ5Tw}5*V*ji-E)eN!RRmj_LxtztQYy0BVln6=n|$lyi@VWd7ROPM5PZ z_ce|`j(%&|&+8>1BI27=tX93ov32o^Ho&S885J6rkK%)Hp5j!uguqsosL`_P&Dx=| zd4sJAvsJV9e*479C|Z~5~(2CF4iku{_zr^3+2jYIOVE3u9^5Y4EA?Tyt?JcE~J(;aoqmZ%@O%V zq*?;!#LoGp654p3bx+k62j*&&*a~@=Prl9J)>lMG0z#w&g%@Cpv8!c9t|)}hwICK7Bi5#Q^ucl}LAd4v%aS+bVskz;6k2lyR9>`n00Mm9 zqbUvC1pVqJh3MEVh?v_&{RM?>h(K-UD(AX*S>n6EqF3)VQKD(7Jy21jAm(i-|ApsE zOLaoov$H4bYH!NOJ|em{W*mMsr5i*qY=Z{XXmKTxwT?@ceXZrfa|Y?lZ-*N7*((H^ zrN!53>%Gu#!zMVXbY_#<)^gX+*$XEJwDAju%q&>bj_4r+yq`azb6`2LGCwobxYXJ7 zwHe?faK3jx`}seQ&}S+XH$lcEb5qSap%v5D_#lXSHDEt!<1i{Sp3qn_&p=Ur=r@vF zo08fV7x1VgJhHRvO9i~=@oXp7(B=Q08kusVgcF+-6Zf1Fm(F|Zj9u)X^3>N_aM@=O z8hS0wv2*Q1d-&L|HRFUjo#qKo>dfOxx3OU*S8PalAC^4zt1tCX)Z!1D^Sv|P8XkL` zbdDBMuCmscr&8A+A4F9BHXL+)nVY@8Xtw{em2%sy{g3r*|MXwm*;go9H;VYjvJ?6k zEzu;x!oE(D0Q$Jo>vE3*nqy>L8%m||YRnH~Sr3tx>YP!fzn|F8e^{M);`HK+rum~` z-?dL0$9Io0`9Ae8x7%agjsCevMp1|;v~Zp_pe2+y9V7s6xmO@i&IARhx%^2nVSOfd z?IZr=C6u9zQ}q3^Li2c{4xI@zN763}eLKUh0+D}!GP{e^q(V-`2pfdiHzTkBeR|C2 z&AqW9IQLaq$4WI`>1(kUdE_oMX>5lCt|*ZEcHCr1qtz4V9qIz}4;?+py#zaR^}}7h zY!k@%;lup*-?tgZknJyQ9j8cSdKsFVUUpAXh)RHP_}w zx3PZ$E(l4-3HnjE??VZaht#)g$8X^+;++y>iM4F%N>RhD@r5?WNNrxywxd8`clx); zz=AyGt;$7*t*ZKiWtpH91bVCwji$ecyV_Oz7L;EZh;9q7-sc;A7bZRBin~kVQja4} z)B~PF7dd&{Cg68PP?80C{JBFzI~`C#x_5{y0+<l?ie|3kRX5;G%{p{hF1jSwj0~0qj0nFH@gZwL9`l> z)X-iOWX%L@ceZi%?lUI!fhmq0q-KUfc@~$wxt5Y~+(u8QRv3ZUczi_DuRe2 zlLEy{1zECSR+%Aw76RM8vYQ@|-p#x1SB6hK+@q7VkP>JE(~)hMJ7N-KrMny@uw7_g zdsgErQ{=r_i8nvQeG-MgK?4FxUoG+lM;N&T;5nH_8phftY^(=RE&F={(U8dTQ$DP5 zBG=Z(yduvz>p2@d-<9G>5EwVAQxyyF4rP`Fnv8=TUR-lb&djX-L)Z9+`W6A-vBZdp zTmCZCj{Wq^&p@paj1!=OC{Zum^SC%Df|_s?4iJb>irJ9JLZADDaG&QTAN9@O_Z&R! zwfot2_vfD#QX=Ni0OYsF0}T34`sE2Bj-0D?-b|Qi(gokj*Eid~PJsERgYqqiR;KS# z@s`qD$`G67Pn*%@{}#gky^K8XT-@^!EdE!8q^V1=b0@)*B)}`5)rKWIaRe5=YLi5& zM|Z|BH@S(+JZ4rT=k##7J|YOR;*Q}woW)tVL(eQ3m*`FTZb)qUeI7Bj4vhRIbdJ9D zQz3C$NaA!XyJ`Xg06m8K!-E4LfstrqClH`F)SHI$10cK4R+WL@{9o$$%1)=PcY)_w zR?;RjI%H(p@{NZdi!cr+KLY8*2gKy6{i~V`5naDgtca~g-aG4Fm`?TIlx9YP&53hWHbUrUF zMI6O*2_!bhtsZt~ez>3*->m=ZhdhA0IR!uXkSxSHzNCgiT^W^nF-3Zuziaa6|idUl)V^P_B+jF*n($@>cd%LY{RSkY86 z9$ok?CiXiX&87)dLKPt9bY{hw>XOC$JDNCYoLwS;2#zeSfX~22v)ERsqiFGMGm1-T zNQCX-#~X_!?7`y#L`PUQYY4KO@ zx|mE5?G?MDRrUq9EW_$NoHJTIIg@(p;TT-9j9#B6r2U(#z(H?a#Iww-B&Yo$POUOe zrPOKqnOS+zl95Aw;7te&VU(ykMatBgoD00>+q9Lc0g&Cp8fz8(uqF5wy$g0chJvW6 z@UCcB%&4Ba*|0hf?WBEsk9Zi4GS(Zl!(Wd-mT|cfBm^4PSm>X9` z7BMRWdPQc`ykPL{=0jplzwX@4fSYRnI1V&u0e@Y62`HJD)l^H%lPtnMzuRxh;Z`}C zIcl`c{>(0($~w*y?cVG+6@{uS3m~s*U*;V@TA1y3b?4{ZXBN2Y&$Jip6Zjy41zF)j z%asshma^P zf)sqe{D&V<;dBj}Pt9V=)?$=w=6@AlsP^*aG!C$0An}QXg<*5s_dwBu!YyG ztOGPlFb`{B&079W3^KC13rqepW2kEyI@-=2uin>j=LtkHbJ^7CcYyd-q~B?oW&x7H znCYS&Hi_g2vP!6%9tOT&+2$pcXhtvojnDWy7RtKtZ=X9F4hz^f*6bsVsHa|9mr+alh1Jv4%5YDU=6gpcCSwBjPig_VnV_@(lymA^vY0 z=iY&+5^0j_%-jW3=Xp|?J_K{z+v+1%7!xLsReecSY?T)_dzkM`9)M`~egSlr?{cNs zCXG#b7VdUigF*pPJ9OqNq~i3_jfz~>KvE3n)%nKO$2sg8=@6p|(cP6_IPc!;<8t%Y z2KLnGBeR9qs-%l9ZZ!T3EpU z_P2K_YgMIX6AwwljiMe4-467|F|6XvuQ{BCcLWgVHmF*q+eo%7r`59#DWGq~%v5O{ zvoVAPhf-Zg`p$-q)FW%}xdKB4*SF8S83T*h@n{iK2VOpfg&=kXN}39jXxL|O!b?G1 zDs=-=?m35mh{W-RlAOa-y%5wN=ElKNq-b&+r}nv!i6H(zFjPgZb=Rxy;XP&Fx@4a+ ztEXP||A($OkB9pG_y1=MGu9boOEZ>Kq-+(&*anFt*+L?u4aL}HYwV&1g;ZpOY}rbT zCA(~4vKKP;vCEQO->dWcz2E12&h2*o?Z3Kp&Fi|J&&T6_F9sRU3n1&6Rw>wSZbO116#~7|x0U*UX zj;Oo2^j^{Ckt+O@2v*Kw7*WMZgvQ>So132N&OF;SwEbzoG$q?aoQL_vIE^SPA>`at zcfGWCoYn#E2!wGZxKNkM{u8Iv+qFTiy%5L#_lHA};ezk)jQ_aT9qQyIBHsu4KJ&i= zUd;!p9MIE4@v4D~F%b3OgR-A)Y$K89u9}m=Z3;GZ5Ry2A`7dY@lO zGr6J^W}vivYgXLj)%9kEJqV9d)5R2FC0YYo_-^VECGaC~wd#w`HG`C^2i(0|_*Z$X zrn9R)ukudig>IB7k-v@^COq)7P$(S)0|NmQN}u9 z6t%{OT$*e$a6ygHveM7B9PT7ps;t~#?b`WOf>>;=0M`yE&3MoTn?8*(QqqVa)@UI& z#;`9bTN^Nfg(@i-hu#SAmGmMc0V#o#YcDSaVBnz57>@|$WeEuz@TO2JTQeEM>wsk4 zuLq1?x5yJ&|H;fqefRX-+Xv!;Tdfj~Kio_Ezgp;he!Y>^bz+_GZiMrZAml<%+rX0T zZSsE~{GOV)eO$+3TKvjgdxk_}*XQg-Ah1@hjLw zd!lqEz@Snqmyu+r!pg*ht=&;~Ny&^HNhC8E%@V_K$`){Gp8HAcUM-aS4Y3WEW`IPL zk26zZ7y}W~o{iN)rh&-kYeBW&*Z}N~n5L0OM(<5=Sf%oLzP*?rl)D{z z$o3Y@xN$1*xrt%3UF4OqDG9+q_iNW}Y6hj-riF*%l%{9dQo6pRHe=4d$4_B zo#GcW{OHd<+BQF6KHqfy-Rr>sVg>Kk@tWR3N}_Ycfp5k4{Jem*4iAKtFj3fBK_sKn zm$|6}l*8C`)z>DUCkwU>3;%6_+;IGvZ29SaD<>m0?ZIn>-s%U}=col3&-#AYz;+n%;>9foc0?m36k z(oZfd38X$ccsTHOG*Yng*9pjB$q>P+MfWbbL7(5Vcj>cPZ{CXYI@dlVil_#*J~9W2 z)Ft$Y$3BP0gAJ#_!$#YyQOOFd&QWabR<%?p=TR0V1Etwp;ppL9gHpM^oQup10}&Of z`WIKY>S8|54u5R?WpskzPfLQRwMlN-mPPO>6k3eA<$7Gxh6@`#K;dKZdKbAeDHXHf zN17Im@)Bk4A?ZQ2$_&5I*?>F=+jT8#9$`nSY#EjzAm=SEHDslniH(B@x@zyT&;W>F z`Zp^?hyJsLjUX@ykn0fiZV>u39L)h1IxJM9D^XAU1xA` zbn&4;eVYT5(owZf>Z(VANe@Hl7Y!a~w*SZK=2Y%Ks~bsKI^cNWMBo@^17GS&91(K8 zKahvo+g~~+H#BgPNk8<%c$4V+BFM@1FGU9?Lk5pT-<j}{uCJgAxMdd3+xO0G z#lGuGYUPBHwH!SF9Lk~m0>I?S3`o)~ii1a>JvB)NGUxn){)%5DAK#on@YpE=&j=pH zgdy^39|eDTj(PE9A$H}GE;6j$S^DeaMC+qTJ;O(xt90c+`1WMxVSlC*n>%(g1zjoM7oa<*dV{@p-dtD-u=jcH1RUr?;T<-F{^hz7B^3@+& zW5QHAcVvBg#0#A+Q@S-v9a8uRLZ(K+#zxV5KhduYry$7|F%3Mev!dMS6Pc zrXZcfv^|x%iyB&!F~;v7y?;R=Yf|I3(1iNk&H-?6y_o5cbm zD<~A$GpMLdbumH&vDXL%KOZdz%ncL@Z!CO!j;rYp1J#p+S2cslDeT#oT)Rd#-Do_Y z1Y+MY2i@m+UtpHWdNtpTsm%Opo}RhQwWp4zH9sD>OZ|EKr{D4uo%Ey3E%r~R?*{#6 z<stLkL7c3@Qq;0b9e7vT;sPfL|ia&{87R(AZVAJRuml6bheB7u}jnW z@Pt9tPa?b@=17?coNE=|mWSLYOE-!wd5Ypmf;`@el;$d$IxIcX-;|xoGtF#! zVSe)kBQ2C zvti$sn1XNb{R}On?i+JFf)-u-RF_rJ3OIp@92bqIdQ4vqJN*rqP{%-2#ML!J14&F% z5kVZw$z^n_)o6&FD19VGEmpQ7l>*x*bMw(=C-THHIF+0K>x?g^^u88)RB(vHiub8& z-0Dn4PvmlXF{j7gruslnB=P~h^$sT6AsX51Mzfvc3?>EWhV{LA^WfWK!Oaw%M_qWG z|K4@j82xi0H>}fWR-YgP#0BWMHS46pOHbwxs**qC|H-6CoanVE=qh>Y(%sAr#Sn;-#e`>ieg#9Y#}8FWjlSxw%(oL74OHd_hiKT_8oBlUCrqyNG_T zUBq7#u2Xwh{=O4Zr;lHA~&8YgT(cVfO=F9HOv)@d)w{_@>N@sJuo4rNE zgcb)gTZWhbX`)$XgDa_Urn3w4$;EyXZi|X~VqUW3F}tFZ{7i&=14j351{-|}NL-|1Qw2wR9StEN7y!mX6ZvQKKu!oeKR8IbO-l-urLY-c-`v@i%< zA%H%G!qR9IlPFTb2(iXy`y zIubPorBZ16mQD~J5f8`eJH*23y=Q=l_lppd>OW<(KLfL=CEhd09|u~KJCU=gkJ1*0 zyuRJ=3b^&(gSr0^xR8_pq<3Zw#K6o7xna&-Vv-Q0@ziihc-U}D{xL zQv1Q_p*1^eV**m_7`b)-N=cJIS=#BXlL6mvG&ckqx9tCU$s;rkxpV*Xf_T^AlU~Uc zUnf+4%Y~30u1&8Mt{?a83%8JSqk}Zp}4MaZ^NN?fG_cuMsg-(vDDU2Gi-{heyp3FqDcnyBjl|QxW8@a{lfjT}4+j zOVCX%s&tE-4W)kB$=VJ0BsY~y30HbwLvGcBT0F4fz4)^U34~F@Hh*#0w~0>y+jZ@3 zWNoZe$^F?g;j9heC+UW@WAf-lLjF)1t0-VzgFUGSAn(J#nftC4r1N#`mxQv4;V@N_RNt^vkX-+xYhQHrNXIp*$u-E>Gv9>V#SPrS6dR5Y3EVTDRHq>}G@jABF8l=b% zc$EXWVr;&OS#XevJFJ8tJdq6$Btq0_js5reW3-%}%2ANfQt zA2L>`Yb8R|Z^#7yA>bkGWIi|%I!7o92{F-4sS9f~GE@l$%zTj`bnc-mZ=XNIjh#2+ z_DV8P`gMz|0riz%0|k?0P#fTPu`x=&~UGP zgI&j4#|V1B=6tQZRr{(mqF#;~=kblAuvujTX7nK2Knzy(ZSXXNi9t0PBDf{k`e|Z* zX2;bwZQyyKgHZQ|f{uRD=!h_b%4DD{P=(}v=+DZzxfu>t*Jxkx_@;@IrueBj|rxJ|*}kP{wyRWz%7=f?PdRNd5;?mu*OH zc<=&_a~nZF1S)Tov!KeB)a)TRCxg6z+dbLt9U%shdpYw*WjPsTIHd2<&amHl^_G?? zCEh=zP(iwFFZW&G_gN~}C#9is*Px?6;A`#fL-SinU;NL7!!`&m{(yW?ViyvO^m0E> zLOO9Re39AJo~O5ea&p+n?MsF`POhMGelL1cg%4H`sijfTv8QOv7(vlVS>l-Jm%yD; zbXzj!#$NKhXk%A#ta958s(K zsjPE)GZGPOMKEbrINH7MGL$AlhRHr|TCo@EuMyB)%>X)m z&_LO{DLU@Y3jgNeaZ8|ip$Kfhdz#%|oKosMq^In?ME>mTS0xU`fop%G2)uw5s}zpp z(}{4vR)RMPtsxon>lMc8{V)+oha=-yRmRC7yUt&;iUD8Z%_t0oN5X~fiJ2qjya^-7 z=ic5Dj8k1m2M$om>3rmT{=;V8XLap5zweLJ!heft8-|qCC24S$b(XG8R4k0{E)A*) zo=sFk%fGrnaaR;LnJ?h8o*|u5NzG$bc@x$!5vMb|RJoivpTs3y0;=_-t$<@po zCpy)aRlFFIpNbsJW+|uC?RVcz>BxzdR{7%(7ny2LuxBVj=~5aYA*&LDUk)B@tN z;dUO05E!xHlf;QPFeQB`U3V_y5R(TXFW_+3NMet5xfwZvqIz>h%Ub9}xDd;V z-HI5CYW>@3esM=XE3X`^s${tzV@_qR*#+NI8OqPs&EyTgCeQmqe24Lhps-4ek!|XG zaPUfmK1wdA>N1~aygAv1hi~!*KlT%I%s&yKS~%>- zssvHA@Lj&vzG;gl*eWq#Sh2|hcUJaUvbo9?^xf2W+$hzb7AEl+9&tQrXt+EYV4CiS z?tF`du@bd|L?TWOUYT}VTU#4T0`1bI{|J7+`R~Gbk4&0(VGKec%|uYDdqer&&DWLL ztFs6bBgMcG;#T!*HNSYh7ZMr~DJCx^Q{4p;uw=%BKs)Ju`N8QIrvC=L1^$9n0U|xM z^&B;h4SR4_1kp$)hQy3#tU$CvNZiCH49|odDT~)|okMVqi$`8~rn(V~`*Tj;ei{|> zqJjf)>fzy_ON!xvTl4n|x74;TPWP012*(MD3D1k4`#m8Sx_)^yu(B?Mv@sf(2VT0p zSEuI)t3~!B=2ltueRF;2V}JJ2UDs=2s2({Tj2KhPVYkRZhq0e|@DrLAasH{VyGuK+ zG8#=P8GaOtRYv9v8O0-2_m~}jevrSseNzyzWrW8xbAT7o&iIO9OP!@Y0OHvp7?oA9 zpAslygHy#VwmOq!&bwh62&4FVJzBU6NAM)GLF_~4Bd(!I(G6wqvr)uxXtRIWpS{|? zVopmMX#`W&^Xz!^mOOMpcHOm5)dNUVKS@Gy)wdM8b~Z*Sev{X|3;lIoE3nW9i-DW| zZ$S2Uh1X1hb;);p^NHTIL4%=WuX0x+N!8EmEcI+&6f-K9t0J5$H@qsAe$40SP34*%bgz2UZSxa2_rCTRnH~vRmssRx zm}7q1&y}W`ZclBoAj8LA1Jy2YsfweK@%XC# z%ktNlpQI-;cRO9Q@rW)J$h&%I=w|9`tS9$0UNAGsnerxeVrRY}ecs8cy(_xpQuarv zwe_N8MXsYf{G7Sz`>##qW0rOl*Rdw69)?a1gUgp4?`5#F4VmPN~}Pj9k_c03w0fCJEDteu^Bl>Q?X z;;fefLcuUjpssBo{#< zn8!p8&vlXoyNWnc&aJ33$WIdu$Qf`YI$esFyy!XCpDx;VVi3&k?`~3xvO~{wM{=oS&zFwm53EBCO;DiFm?8=h#e44R zXK^*?fPdd*y1=_OR}aPjI3@^809L3BQ{zjV5s0i})=3Q=W)+onW?RHLh3pZze$cZ6 zN725iudcQsDv@p7HmALT9Dz4w0&;AUpUvtF>8X=?>60#jaH0Me{mRu4#XVo^8ECYM zc=zB~679m6huP8wNsxsmryrHLxv1l^;U5jK^LmRn9$b=*8ACZ*;SgJ&vY$t+t}iBX~4Ev(`ef>ZgxC zgK~nXf8!B(jX@0Ip?0oS&NIGRJ_+M|y{j@jjeOen4)uYY-KU@hk&^B|PCgM8hC8#W12;@GkS}%vE=>9L^I-RUxpObRrOb;8{x8mEW3PRWfWy?8z9i zeN?kx`MUj>ZJVmhz!o#x>j>IJ3Aw0T1Le9Ls@)L~l~+`N(2BV+8&)fH!6PN0vHmGUNE-BtK?N((*x|q(8 zO_m-aS+h$<|Jo*7_CKR>aH-#&k&&U+g|(RA0c?1$=Ws9y_#mz)?lD+~grEi3Ln4?t zzrY@U2&fP^MtE(NMGk_7OIVt5j_tZWy>ji&Q(>78o~e2Fm29+|$HFFDoA+Dq3V)0q z8{#-Zu{alk%aLfsq5Q8d@kLgOHovv4x zd$KxcmZFqEdo}c>hKKsZDNZK$>-rh~~UkSrHHetxgrHARY8)4AZ)zh&sl} zH`=sfA>kR!sn;DQPW&n710{<$oQ*4TmG%f~ozgF4OyE9UnuL`C^tMRj76%bb^$1C~e}3?Zw#r zTf({sk=BdPGfqC^#MZ0?v(3I`lgWsdsfg@3Z|x>$n%JgsTgFZxh=@yO?ADnN?VsoV z$+v6mR$LqXq2N)m6IXl$C;7#W*i6s%?Y0%Uc(3MQ`wx~!M0+`9IU^1;$bOZF#@flI zLs&)5XE2){^L(pZ=4z+^rKdFJmO%`2;3WFe%B!j)O|=Me^DL_>SYK(xxQ}ccqT4ln zg%*FE6{TwRTnD`9tCS=qW2?GZQ7{kx{0`1ve^)1_;vubfjOT8dBp8e54LQ%m+q0|A zF&wQ`6^(^%T-96|%dD+8PR+!fd)9s<%?W1Mz*DZM?{@mfnPFLNB+054n$4w$L9;8K z*S&mON(9l#)Oqm%o^%};6wr(}Yl~|>;xQ~_ddh8Vm z1DVocdeQ%RneLK-3S7N@nY{Tk$ld;2M|(~e*Ml+}s@yM)ogOH^@oN|3{(Xjj32mz| z3=s|Z4%?aedN3xr@<*oHOR3*SBkT7zI1vojaPOo-SW40akmng>V-?(PgwC^{gBTKy ztZO|L7ACSMIxBb5;6GUEcJ7e~r=_ncc7FObQ&;t^_|eX`c**^3Rs-uWhMs!^g#&^Z zxC)qO7a%WNhmlLlRlfEW`*uiLmft)M_=GRt_!Y>d_;o(?Fl5O{Jf%mtdR{u3+2>e7 z=;7Vxi_eo0+=HS-rW{~VlDJXDnBui_xHrv25lFVU5L+A`BDJU~(22(+Pd~QC7*wF^ z!3&dZ?3IPCaaM!F68e?AJ3Qc5)2$tPx?FFKjg8|#sg&y4*R$wjT9%lM+48q;w=4EO zv~Te*yhx~d^5`mwDBN8xLJZSv>kw4T1d!k!GIaS{vv7bOkl$p!JZtpYUQH^wCYDc% z&{cy^y*`3jn}G#KGDbazh9}QUlN-q4hm5Z|*bsH9 zICz6dW7WeWnT_8eJj@W9S;Y}S}-<3kX5SDHEk*SP`>A?eOmVy)`DXhwib-pX;cX~?*8eV z{a_SSs6T2_9yIjN(CcVoj6#o{8V6?dFS|^2Dk`gT&*nq21$^TxO_P-qf3g|?;`{g6 zv7YNtUy;vOnNsZB6(qy=X`eh4LHWx+V z=*dAtN}Zt?9D81rW(%;K;A6t*RulpX{N zeJnL!4y0I}+2Dr_;L25ylQ&Ohx!tDLhQLbdl7#Ov(Sn*o)Ji=AnC%ap(2oqgEpr+% zZ>rK2+8D^`679-W`m3wG+upqkW!`#sLTw7K4ngoAMN9�T@0)0BLe=VTG)#Us3cy5r%-J}OWm6dN^!X(06P4_O++3axS95SkGg z0!Awk4@UbjwTs`^0?@y&3YSkn7i#D0oW7LTJw%`Jk>x;RiN2dlmsNlIKff{G+xj`x zvWKnX)wiB2s@f>EEs?%Ln^b4lM`cdEl*EE;eiGjd_ zP^Axi-pjo_3v-`$U%1k$v>JAp=_>1bEyd<{^GlV1xcP?E`CCzWoW9tmp6e|BxSA5p z7z^K1Sk<((VG?j4h~+TKyS?As|jVUkz6B*p< zN(6uY`4Pp;!av9VEuSm`lXPmsF?!3%^94bz{v)8-`{UTTEvYs%M#y996)%*^JS@Pj zSjBVaiFpLP7HVn2E-@*Ce=_LCZNYQ?(4l?8OI~OUcD=TEZvM!h!}(U_z@5BZa&?yp z1Kzp@x(`ac7GIRy>63h=co(^`Gy-NGpOGnq`c~5qJ|{m|9@dVnZ)>eR|BBr#RK)&~ zUgzH#SUD7@7k?T&SX~7{%BU4H;cr=e|8TxV^%(}tDRZ}WvRDj{nN@c35G6e%1B+VVTlFx>xAgdkg=zL<8FKuOsx@@>r1V1eW5AK6acLf&}8VFM>agsa?AaHhhl2Rf`bth>r>;X^cq+TsE?XkwQy~ZKo7s{RXL|Jx=#s|-& zGsg9x%yK8sF*EtZ3b1Kt8n-S=9lyG>78Ye3I+ZmzT$N13{Fqrc8~zg0r4>W2Mku=) zz}J~auvt3PiT8}_vRR1k>(&ypE$qJJ*oT?z9kWonQ5f6HOP5igfl(wxg~_WLey!^7 zFuiVKB`07j9vX}aV`o=`9OKb1f}CdXsmsk`{c`k0X$0VX(69vu?fa$A#K60c>*yH}rf(Gs*eatz)G-+?Z@Sjoc zy1j;uWdO;vW$fVae?_7prv&aZdY%)wFYuLm8wP#He2WR76rpG**f3yuGr+7|B=S+mwcqtj3W#mAC)j9nVKu9pz`q`$b1`!qeSFJvl2#e&93 z+Z4MJtq3pYhQjsdyANyuv)FlgFbJwkJr2PXFtrdT075^fz%YIok8t!@aQjj0jwsan zQTR=m#Ftc81oOn{vnuIpMQ}QL2te!r>DRpBd|V!EQ8oyjmKTx&(2QRk&v$?40xL(% zMQySeR5`d>#!_g#X-c#cnc6Rz!{{S>cs=sz!&uP={wmdK!!agzx7V+&CcCMO(`Bha z6Xz_}()>6es#qI{?)Zv;Zouw(TAA~<4Ui^?Az@xvR%_JfSYFwu#Sv0*WJ$e zluIfxdtrXGw^_oZf;|hX8NkNa0p>cI=u6EQ4vbc_&~e;!c6!SDs~8R@kQS+BmZ^L| znpzBA-1jM2EN+gxxzhOAJEq}Rwp*<(vl^+)%rP#K+g2k@A0qA8y4gTpR{Lfpad`Z6FaU3nyciry3 ziVOAEfYMU|w|J+siiGez+`msjaNS(Fa#NBXnVJ6~9>DBcP;Qh*W)shncmFK?^~gey zCbXZ-(SFx`#Y6bvr!C;eO2VFrXz|ps5yLojlZudw$Kc#?CoU&Xei@I-FZ*SC+LHf$ z>Jzh>O=E%JiPzXSW(AfVfZSL6n*jR9mCDs=U||*r2H(`Q*oKXAeEIyNPtF+7G){y~ zM)OH!j5X*`pI=ecnrchPQvWl1PtvH0hxe3ma)z_c2QNgt2kyLKZ7u$TFzF@Z8?u0} zoHOdQaWF~s_c6!5cT$&M<5J8-ZujZ^)y*fDm_;Bk?P63>Td{lc8;7!jfPJ>dtmBnx zbeDT-bwIWr5JLbH#{HIbio2gJSfV3?{`b%tabs(||5eHK%}0l^g*jp;ziw#5^8K?CCO7 zwSGYKi*8vnOMflqk`f;P>+GKWo?D&V17)Vyy;SVeitXtQay}#J+sR}8e<%6_FU;{K&k6X{CGlM@RTn|mozucTzz3iW^1$3{Y%hI zDUgmi6cf@u;p1hU6de-O{H@uS>{?RKQBr)6TC8TJrdOun6mUIpaU62~*os!__2VmA z8c%+6`1pu3eGdhDb1Afum&@#8r-U31hMB zZjep^nqdnHmgLuwu)V+iV)XY(<#HVy=ujp94~eN_TBHU{Dp z{%{U0y)gdg8^{B2(daFNEDPW}9{k$G(ql`&Gt6xm4T${v?yMF>8~Z?8_dQ5yE_1J^K&6VKe^A;Au z?;Fs=H($;?S2z(E=&kZxoSNI`M5#T;Q;y{U218njwt*`(?ZbiZeQ|l^{($Y{0p)i% z@^6$!Iq9q~o+U124-h+XyXc*@R8nThC5+zj=!6VzZ9T(cSoFi0Fd}l6jZ?P`Pu#d| zRa)k$K#IR|+~SeiG}e)@yubc^>te}$;Krynb##}Vv=xt_C>_;_fI#3-$dUKQUoAPD z*%?!EMB%oC&AWatf>%_1Zxm!4Ym>498WN;+AJ znr_@{iYlLi<@{B1Sm>3n`E4yDv!W6tO)8O?S$FSc8H*AkNa5eKd{_7sgLzdNHSa-j zhnVYtm)*v3o9kGdf&hImnGv%K9(MUi$kWT$70MSCfmyfb#1G)NSo9duRQ6i>t0i9D zKNB$L5zYk>l&ggvo)nOj>ep$9g8c26B?Whso)!yxAc0bW;244Ov#3FfHwcAQ=JBB@ zFbX~El*}EnV_2tESBkTH$S%lIy&uA%xny zyew48jD(G|ML0M!+FVV7}Dw#L#)fNQlrl zm4dOenc)!Cc^@{UbseR2lhs;yT(8yD|5yS8*UdKm`?V>e$fp9egQ+C*sZY}73-SCb zoU(_c{I=pykc%Ng)9bP;T$(7Jq%zG_g0Gd&0h3i%NE*1MCUB4e6iO%|N0O}5J!|wC zZtb0qRdud9fO~h1Oh&&1jUVb_9d*LX%j`T}bKXC@F@=lB2^aiE8SR&8Klo-PIvG1t z?=DENqomc#g)z`*rT|D^z%d4QjIaG1Jn&cQGj1!!;^4_CLO6RT@^$a-h=6B*5jVEc}WpldYVZo>G_^j4fP%;W(os^Iqf^Ml0r$ zBp#(RR5x|Pwb1Ux=Y}Ej+EY6wB`Nob4N_NLo;m)*O#Wmk8jq^^xsj#+_GDG((*U{0 zW?8``7ys82lZF8e`8+3LL|V+(6P(7D@LW#P#zQrY zUp~0?NU>~-$HpGEaW~QL`8Pv?knVTR2L;TFU!QGP^C`4{0|b;k8WDS>gS}?^**>oa zBWp5*=HDFouFX^6p-V9ktXp`bq-^Fbz$8To9C(cQ%Ej z!|jFDUP4~7yImwk6jCW5i6hfE-b>Z@rCD9(JPyFxZCf9==uF z|K)u5%*0iyXuZ>OcPN6r^Z4*Nt#DmTOklun_mIoy%OjPZStJau`&9moH2zl#>384w zz7h&t5p^^ygpOFv-^mT!kT=aSJ}!B^^mKw+*|`)~99l@+T?B(f0sCp6>k~ZbmgK1f z2nU`OqouAOB=O*DYc0GijZ`OhJ|F}r=KH557@03(Wa^zt6s);=X$r+PHV0g7n_H_~ ztZiAsPLl?Yp`2Ohm9ezulR~~eSrJ!Uc(JN_sp8c1uS_0t%Bs?DFG(BIjh2in(DJIi zvql?Hl18FydA+k-r)~wp`l`67>F*+h!29=*bKDQ`Yd{nT#6$StGE~Xo*kaw}#I_8O z5B!2T0Rbc=;_0K|f^)+(wM(_(+iw$9-77G_aSf43c0k1h$uRPZc}LwW^d_FX5=9Z$%3!Tll(tXU5$v3_B)$flYTiuVbBDrX`Ws z3j}F_Kw2`72JV!%bW}oy777<{Xh=D*_U`UZICZbpjYpKf1xl$4PH)J!*8%od|NMR* z>aW|3xWR8}O)a442S%u9(7OFCM;)ZNijA5RfpDmFd9V&VcWzC5Ssq!Fn%TiJKS8n3 zE*OI@Nw5G73~+A1S7e#UT4aP}&M1^-M+E=4pfUV+*z<$ahm1MCN^Ni`35wx#o|Ha| zCIqx}TV9Jg1!sk;Fs#JI2R#<3HJqZz*Dvy#wVbicvNd?4j!kT-OLLoO9iJbYKQt&8 z)pfY@WT$*VD>*16+h#Oyy>rOOGEc#(s!9n|hci(%yd7SnRG1tE?+EqMVvSq$W(;AC#c^+`_5--c{&lAKDQJu+QTJ>MsizE^0>J|#n8o-w# z#Hs`oc`w=Ie=l*#*+#biJ%dHia$pVn@ky9zXw_;}1%S@iK_xFpc)C#|!UT@n`oFRR z@h1!ZjqzNe20i+*3S1ARdcn__!qH2qrhDAGWv7Wq2y%JSu(nQ`V*pWG-BI?d ztPVVJ0t6i8ASzXB!5Ug=44xbDQ@on(p_F2@qrcNr{OGL~2^$~vLQ(_4r~Mf)5s-v@ zV({B0!+TX&%&%QBdP$E#<|}y@Kyw(P?k=M;bI#tCC0jfptIZig+$89D{3s)Y8<-&7 z&Lc_3*%c@!A{a>8R>OXxQc3Em)15EdHC8K^D_-j*J~Oob%Bj|935s-)RwoNc z3P^YY`$sQb_g%pmY8naYPukN41jOOHt-motET5DJ!gYPct`jGIR6KYcfuY-@sq~3r z;qv*7YOIogBSq~;-H%nuI#kv0eKNEqmhw^T{g9|b{<(mJ=Mh4Z%S*Nq5xq;1nIF&4 zDc-4CFyNOx@vTOEW2rCNw`%)@=j`yc zZp+W>+yl>n%DH*AdUq)u=Q&T=biG`$GnbbGQX6Xdeb?^3(aw4kTw*41L)rjUFa;{& zaL#`7ObvGD9`q5GRxgD@(h)kr-H#gATO3!ZMboKYbhTpUEJ|i%VMfON3=)3xg ze7gE_4Win)Ih!v%_Dbl5;kMh0?%Uvz9K|c0rT4--e%;L6H0@NL+4EOI=GNNsJib=u z>EW6&l%aIKf!~p29Y!@q<0QsFX2|0VL$Ts~{=&S)v2ZMIQt4>e5=WN60w+Z@_jqjj zyC9Nk4nYhqug@;*)@0i-X{RmNGB{N&j9b(a~9JBj5UZ&wmxaOd52Avvh<4pMbH|g$F}ks+xUnpb6*YWf$MH#LY_H9@&*jDm zKOw_6mfvz`Mwt}D8e5If4tdDXbTJ|tN|v?YB4wWBa7!vm_V6!)n9$boPdUBO$D?0l zG$2)&f;YvP>2mUMD+$$fWO#DOxuEtihg^)P7=z}-O_TFfKF^YRIR95^P zwt0v($z_-l_cwD7iZ<@^rEArcvgvZCuycmXHJfT#1;KgtvODZ*&$yY#MRf!h2!N6? zJ{Ow^MX<&nS)ksWGK}w+L@HP1iM`7X2iY^DxQ`9WDsBvaa?ueHnkWet1o&{YXVc<*&(A$|)aA&ZDOa#8cUrgb3|4G~0^6c)^C> ze?DQ|A^4lMuoO(6}tgrOtnx(VyN~cFl zS0r^RZ>k`@QoY(7#@DeoQiPhX6%(AqXrjgzV?4nz`;wtYfA-nQD^tyD?SdP`L{V&- zhY3JpEkdluhoB9Ku4QWd@9y)zR*!%5Mp$%6W>{TLSh&(gyjkMG-j+pk^e!v_E|k4F zW@62hlMhj4$cn(n$bP=6C%VSC9;-Rcs@nD5Ik%{Yq{2 zBS)La@OadqZ8kI$|5Rma^Tk7VE6mm|*D2jMjF>-P@j5I2Ybd+p9ScSJ8!j#fJvc*) z`~Q#L&hdX0OH&9OyBjUFu|#w7VY$+8Hf*fDI;U~(-ZMS5(JNe5f_7Z4C}mo=E!9p$ z%#9Fg7~BhArEy zrZd1}$|7Y|L42K)?fqk+NSnIa1CsC*q%!$;daMK z*_1~m=Hp#mg9I6{oWnZ86AXBYgf~Rd@R8VDUNnpVGazZt(ea|IVQfI$o}wI~wnI3r z`iuVDV{oj-zD0CHDaz25A_>88;~7}(v~TDQefiS;d&(E1-K1R)svjOaY24sbsBs)M zKH>DULcSwezBwe7cd4qV@^)tO=G8niKb>Yv6t0@@FFQ)jGog<@*fEmudnX;Gfv+nJ-1B4G907NN$3F>p60<%8f7H4jA|zo|(A0kW8Xo_lAOadH zaLX_@7JZ4e2)66?THs2>8M0jK?fZqbM|HAm>g2KGtYs)@#ABP9PX=xUgr!N9iK+?Q zm@f)H=GObmXZL}xs;*Pt+lPO#6f?8M%|hwBhE-L7Uw!Q(-_^oE4p41<+4ld~I@5Tl z-?(khf*Fh%ifl83RQ9b@c7w8{kcjM(3Sk)gI*8F?(1JwvrDQ9+>_lZNLS-5I*a?xX z=jy)i|NmY-&)Yt|X}Mg#>-#;=<9I5e!<`Ze&8twF+&EP=+Hz0iMTdr01mof_WQLB- zPj+~)sRADSWe%NSQus1uPpJlt(Xj9mwsDQ?oe_-q2R5fZ)!iKXqsWCfBub7e*t{Kk zKfw62M7V$L=*myoJe|0S`%Ud1K3oTf$N=1BiyH(XlCNRPY@cOYDn8N1n3{o@_lE?S zP!(0P)(_GD9XvR7XmVC$3wb8w@)hj+uKaEB8y zs?Nph@Rk0(|GvR@Z!dfyr@5{D4Tg`D#n ziqG96AhukUom%|sU}id1*a)clc_xl#xh<8_X-!d|cctFre)6FT308>}mppN-gCYW} z_HMDr0OYoUX_F2rKy;PdH)%z{=)33i!Z$${Xw?7{i3jU0E<)@_GX{g$0>wJhnE8o~ zD!c*4z}-7bNi~I29-^7F0K0D9RM>>NT&0GcbhH*Ru4*E;bggLoUESmS+&HLNGnWib zyMJWlwW&Ra!u}-lX8eTk)v~-~_u&B*jv-JND83-0`yz}w{C^^)eGSIjvO5jXsU#)u z3JSkJ^N%6;?#=%`Eu?~f!rwck=`DGMo`0uq{j@aXT_PmVrRnK@2X!T?aShWMzQLA;Mabws3 z(44a;34l)zp+#K;&H%fp;}RSnh9Rlng*2s~$DU{%Jl}ZKcbDC2$gixfP+u~zT{|RQ zi~2a$tbKc%*XvyIheqw~0Fnkp*GgVswVH|C?A=_Bm$wt#ToLhB3HU|V7^(5e`j7Er z@jFeK`c;_fZs*&L+bvOEE2>+)IN+f@J7V(qm05AObgaHi-4p3|G)?hf1E;j_^G_Zd z*we$vis5uQItPpyUDE7ZC|@rnnM25b4&$O`CEmek+yA4OgS(g zQR=+q@LO*SGaK7ATlp#2t+;oysRVGU4dVN>8Uae5fRVU?m$~lN+wukEH&a4wc*DR@ zv8qZ^Syeo#FAJ7}f65bJQgFyOz-qG3@?PJh_9b_&u-Ig^uBX{gjYqK1xV_`(c!epl zF@fSs`U?R7KVD#bimOLus(Y3+TOQ&VjY@(K`RFPYo=XMEqp+1XPGG16PQih%DoemW zhME^*eGM1(G^TFEj<={^WaM{huK3l6nE=^ne!-Cp6vAD*N8`Hj_I=V<+=L_bcc z_w6RHKwxG&z9DZbikhDDV8q|Xxdc9IWhF0RR^#%<0&ER%%K;Ms#b&*0sRKapIX-H( zy@=Fkr=}W54=*#qkMWPFk+DP)t^V3n_v~z1@W<1Nqpbh2#k+YU_lrLKVixBgOAxLsS@!F)6;|^pc(GH8Bly9z_m$T*2D;Epf^bqv6po zF(z4`)vcy|{mQNNq0*_EzjT=T+6m=5OiB~~S1%bFxhk|#+3+VqJ!w@m`mwMW$;S@e z`Lv1pUs}1oo}7QihVuPnV|#fOF3-CCj{6PIt&Dq zRrGLDPQsUcp4%6%uwS2hxmQJC>Y$xdI6=@HxE8vDvg95+aVg)yejR$l?%NGOzo#ew zLUpULX>iQdFKGL3a5Nk*SHjW6RyH$gSG6{65w8ejF`S-)anf_Iu6|)bMwW09b*{-UQ>Yi`=}nehtFybpID=oya<;wm#dJ zqWJbVABI=yF5Z8GS?Pd&_?yP%4G5>O%%Kh-3Ps@0BuH*$L9(za7yVN zzkt0+*+1}=r`0u;-T3UugtLkZuFl?tE!ydT0zkD7-YaOA&2@YE2UuCxJ1B?xERIwr z_FY{Xp=eH70+zkgC*oM+*R_$Yu|8k^hCDVm+)U~z-!pke0inHt_1ct<9hg$kn4nHR z#7j^YpIds;3?Ycv0!%N4L#y!+PziYjJ`P%1b`0u`6k)sAVFB=q214w_H}%y*D4)Cc z-o=FXpNaw;5MKCe3J_qfJ;=>m`i%uY+g{El#`Vy1W`p z2bq%$mT8IJHtq|~1%LYn85Ax#9kNvlK6&&$7dh-4M2%VTj--|QeiG_flHbMf-+IR* zc7r6941F;97C(rBgp;iJJ!j5{LjW_Djd~wU5&?GPOoxA z;6?891m#a^rMHWD1-wbB9@vnjH_a7xv;Mt&o<#Nmtm*Lx{L^A&db{bP(N{aCy_Qas z@u#n!R;6u3=Tu*(XBZRh?_OVARYeA$=mOBP zULD~ayS!%2HFoWa24#2jIg8SPtK@eaI|*h9zDdRa6b%Q9ANLeBdogxvj_*vq&=d3kw{AsUu#ZYql0{wYyd^3m3rlWK zo|R9W=_0ub+Nk%$IH+1A6p~KT?SsoyihtZ(#!A0d^=S5+|FaCK$ubC5nxQ+qyisaw zve$UsDjnO{M1v0>(UXnOJ+`6y@%`fk*HE~)D3!{G183=+j==h@fIs^gWnOD1Ei$~7 zs-pCeN^v#Lu2?VH3*7-a00RZTpphIgEb;Cm&9p`RNUJJaqAd`bHNZ`Fldo1UsI`oY zks&K=d{a1F+=iAIgn3+>g$}YS59@bH*|S`QhtpOXFHIIR7jeTezJ^tSH_~CTqjdp&ma2iq zf3eh8@(ecS3yQz!OIa9AY7_lco-O>$4>)f6?%h#q<6noA8o(lV`P=N5XL=*lmAx{fS_HQcwA5~X8ShODPM!MoZR=Vj5{&+9gQmP|>+qlB% z7T7Phe9D0Hhqm{{617Vq%EejN#<3qJ_3)vn00ppk>(gt8lUp+o5wVYtcRkerFPg|? zyyFGLY1MACy!P`fs?t)1ToP8=Po-nifgbEtuc>Ll=dWgb8-#!YzI&Q86C3RG3|I=` zqZnL3Oi)<*8+ANtnL8ZaiO^}kbasS?5Lq=wYJnH+2R{+yQ>z4cprNYa=Ym<1OuC)2>Pp-$O1Io%sC8hgPBoE`~ zj`*}y?7~1{UUzEX<-ww0V1*0$HyF}i7pR^r?{ss1Yk2=(o7V+K+%`oDf{XMZ`GLH$ zP>e?S(p)3Q*-u|Ei>{@8XwT#XM4yz9DV;mo)(qet%HZ4jZ`|S zgQ%5+2&xPl$#o>@ox0VPdF|~ju_{@+Fz=%gf)(m_mr8mTuf26T@+&Q8_vZmjtGB)Q z7q1uOP7=6~hsasNy4G!RS{ewj>tDzW!0JXuhELWXp;W-m9y8^F%`qti)v;y2w5WKP z5J`+^ipqWdvs?}$Z^aL!5mx799YP>7GnuK%i8Gxqv`90prvtn1B&!Bi*itelNHf}n z8>us$({i^*tx})c>q3-fs)L7vPK}hOD7axk-6WcC*dLQhoDegyO2v_JPH1$&w|L2` zZY)r3n+tA}FZ#xnq2>9#A5~_PjpsN~{VGyw(J~7CBh!2>H z{}O*axyd5Cw@%r&3%qk{!Ij@Y=c%7+DW~bqP7A4xuZRer{c0WBPcLx^TbGW57on}* zW*j6>Bx8W`hUwj|^ybQ7?9RsWC4hH7Qn$9l-=9Oe<ze==R7iUDm}Mi4BfrOQt*lA=L%tY?WSJ= zAO~aHp~4_AsN)7c9U+_(9jh0l^E!T=sziQ&ZhrQ_D{lP8*nUXzqjkPTBQwhG^1TLu z%c*G2V`BpJxDC_lbm5f^)Bi8mt;djzt$cS7n%}QA_8Gbr@?5O1Co8Z%1HNo^p5ONq z!XWF2d$#7o@O&-GS%2!Etv?6t`=rYFikjV^-CraBp3^ed9|k*9eKPd+M(RJ`lA>&0 z*)@v4;7X6&s7T@>aaI_|w|C??(IJ!kKrG%eqN(l42l=mGE-FIp{H{5RbItqft`R@? zJ9!L&Mg#*Mbvjq)x(-u1s8GQ9QQ}r(SbmfkMDiwUiL80pRr$v^yNzbwKeR*H-59u7 ziTU-w4siMJxs04~xf-fBR`e%Y z^2b<-UMTwDw@8#Ho0xK zyTZn)qw}O*{LV;Kg~yOk*h*4Xiv$sTZWc|3T>H`SjiXvQ2%;8edmyN`d_`uhieZ)4 zk3l2kG6RfecaKKNuA1<8tjWN{T(g*=1CeJ?6Ma~6CIjiE5V?DDm|bZ9NoVZr;H&{w z=VDe)Tj}}YQvtytSRw$#@*njr8vgxgUDB>a@Nok=zS?h(g%jN29owah;XJxJ zPyhHJmqik@ObpevUzP}EWL0Fe__IM7JEe!iv!QFkrkr5|XCHQk}Vfa=>;;zj`fv5s-q|VPn{w6p+ginD4 zMxG%k0_7Y3nwhF`Q;e~D3Xh=#Vc^w ztawoJt<^&5Eisp#zh7VrSgnxsa3nA($x0ird6;cCkN4$yuv?K^FDBo+x_jh?PTI(k z5Nx``nWnEz-RLOX8=U^2zO3z_gEZ@agkWiP){)(=%+oI5qfTr1>)*u;p3GM*Rsa)0 z7@I&Z&I3&V+R!SPEAr1|+U04}YV431f|@oB zDR4FQApCStu6_~1LNV`6-9{==S}w7hUz0t)N~2W>_M!TP+{lQ;ay_5wjuxQqEC1E^ zdL1x20u03;!twiwnMZYRMP8p?ECY*d(G${)Z81Lv(c$`sLXQ=c6ueCVnWGDwLKpFL*j%1bmhc7ZNKR9bJT}s4x7m93hSG$nXbMS! zFUOs>J-Yuj5v5OqSxhfSRxev+_$fme0LUN9bP^RX^|4m*9Yp2g{3`KeRs0hKZXUck z*Tf3tX?Vs_wh+RR6hj~s6HEF8hV6qhWOTzK!ysivVv5WodM>?5kWv_-ER2n%MLzt! zmhKK|spz`4(%PQx;G+`M8HW$O^RRt-h|wonv#KfhNcUyp!Q$}AUY^|9kg5iB#_szf zuA;-sY*v$WJnuO;oT4*U59m0+of3OC&}hqTWDeM}^Op*WjXxz?827!u1Z+kxa>2s> z?q7h!D|n(#SF*`uGT(nVY+pA!rhK0dKgqE{u*kY%2!d9T83;Q{&bhz+@0RenSL~kH z*rT(^8i$MUl_Ph@(h7u8Ll0bp7@dzL?)#~1yO#**jMu1CDDLzeUH*|&k#W?Vqwzub z?a&{!n`^%}70c38&_^1i-}(54>?wMA&T@0;Y0L0Gavo}Rrsr!iALW93a_FgPft=mjjjM|r`5vy4ZBM`cVyANyuh|#o z)B{Lj;ovXrva_SBZ?zr{`HKCqmPlK1tg*ewq|E6>3%qqQ*z}#CKMS4k@wduf@F{51 zbwI~!Ng&SQ@IRJWvw0}58$0K{w!pa@X$zP=2ugS8O7<@EKcyDe2gm($1O5r#DGWpJ zO`5Uya%iIWBA#*6qyIQOM;{Gw(VQ29uo)<~NdxJ8wDQ^TG7~Xb_*`zTTn9T@XhE18 zQ7fOTFKbP1FB8m{zAh5w!l8ihA|F$b(fD=k6o>p-^YK=K7Hmf)S98o*uIqDs74NZC z_a^@+ekh=_gp}tNX|B=D=fy?#c3xQec%1`kG0&vO5iTA)Q+AEuIa-Ep_w@EKFZH-& z8p(&%H7qBNAN6tLnUZbfP!#Pfm1}SC@s~PvT8exAF#-0hRnB2h#0ZkMcY3OR84)@7 zU^amy0y)>6W_^~Uh@xCsq<%P(HWE~2_CAJRCjx=VF-Wa3YSF=GIe#t6oitQYWJRa7@9>sG z`@`wRaz(`nDtgJ%&T<;sZU^O8)KrqlR&3sZ5dDPwsEyS*6(^nIMrdHL`e;J@rF?Hw z@XAjld_NvVB--|=$epv(InTfml;MsMZVt&t0=mHJ2&(?04?#ggBNu6}zaO_NXQ~AagRFL8v*F=TF>DKyAU-K}V0kgP(0v*GE zy4{8T->3ClBEgKM9+KF;zOfC;0ssL^VB?bq#<_u9eqbRls(+*Z-kqjR<->FN_R} z*3dkpbocrYgf}R9h4f?)pwb?H+);Tkd^Jt0y-+~>mWmJRsJ|49v^`w4?mhh3vq;G5 zF#IZ$8c%@ec1aC>bk5^DFFt_Cu}->h=1)9NlH&6XzyP18COrs@^sT$-X9a} zp44E+IpA-0rhF)5>~e5}J~fP)Iwq?r|3jXi#V(3z$MxHQh7i(@0xdqw%ZX$Zh4z+0 zHxCbhX$sU6#4I(?D>H;kXac%QA?0X?P!WVVx#a^#&dZlRGJ!SZ-rwNT{%^ViJx(iB=eT~t_3g`L(f#Yu zEvykjdP7idq_GJz%`Or7#gt1-8}U>nIyB>9nW}rk$nztTHXWOC3DVZxMQF~TTfL>I zZZsLp8><$O#K?sdb$Zv$_?)@#IwvI|JH3sP#7kM;t&DPjY@ zZGYVtp3P2h_G@h-L)aNv$Y>@pP6J)_CsgHem8iIuPj=@O1hyuBY%jsmg_=vx6NP== z4V>N5))ZQ`hkC90$4oUv9fo5>YPD?wa2^<4mQzl3Ol;SOpim#8Qo+G~+xX{NaJp)_ zH%(}tb4c9tz4h>Nr;%j9iPMPTKbxEPBIrIogl_NS-(Q!7{+r)NpUEPFnzqE}oJ6Y& z+|^sRo_%3b?WU2z{j#_beczVUINTVOtIy)-h9UXgEfGG!$mP#kEBy#6DGd z=9^)36Xd&*UR)gIsBXq5{0)X+jQpWb<2%iwirlf&_~&&+DT9juCX`rx<6@Pz9^Kvz zg8^YhdRXB2>r>s?B$W+?#O}(YGu&3X+_ruhnixG-M*8KoH=pu7&qv^|_5l28$h0X? zahWOe{!JJg44$X8v|zl0vzbp5RLXQe24i-e4W+0du=&mPF6?hX0yRc`!Qt=lC5&35fC z7w5iQe($+2BIP_z|8>yjLeoMXv~_GDlvo~DI5)&ONR)kbak<~T>GAErUv%kr$O-VH zmoe;40z)&eS7X;&8)p;`lZ51U7Y*LzET9`3K6Yd~>W;dPf0e6h2QqvOdB5edJtPAD z>lD}Zbish_iF{1cV&(p{1iObkY z6*$3Pw26u(EkJBCdf#zAkCos_J2q&Wy&J%DBoWG4FG9pzObm=z!PG6sAuQc4658dH z?VEjyQdR6v#_;ZQY%ZN8gEj!cQ02G$*TO-vvsIw#QwiG_2FjCOJAG-f*)3EBko%W3 zXnVk&b@cpw1)MGdf4c;{D|c>>qr_}pi{gTe#CG=WDW9*op|fZFm!Hi#ZI@t>k;B|F zrRxnQJ9523Md*kt05P`zC{-o@GjujKr|A1TN7hicq%18k1HHi$7K;tmL7Z6(FlyPVMFnpv?wt$)`*W@Vpof4MeKvtzP--TboW(Z z4*C{5K}chi8XXV*V-(&WT=Qd)@EqYk8i5TFSU~LVcx(+HRPor*%%}X+kykvAbG=jh z6A-M5=1K#y6J673i-cyj1TK8Lx9PXdmd)0Eb~v_I@a0$gbYV{)g-uFrmip$xgUspt z@ip}3;9I@d08&~s3QBv%%9Gm z=Q5r9$&yHNme_L^cv;x3ws7ab`;QYDZ`A`gz`UIHxE?pa!m{9ZSMySA_S=>xGySq; zP*eCqR*kJzd+(=+qR(c5h*GPAQ?FnVJ8_B-Baoq9@YAAOzruRHBH)fD%L2fI?x}Qx%7wqT9G@0OK@|eJ z@#@`RyEtHGwgq)QiA z{{L>>Y%TX<_l|JZ{b<@+w*B*K%Ov_TyS=T7EcZ<7uv5>oe-gkZJX}+?BzplT5}^T1 zv^&B=4M+#~Mv~E|L)`X_i3}QdBDi6S$4XoskjeaIakPqXbmbl3;iM4QvU4Zo^^i6q zcgZSwve$cs!u%N;d1XbDknru#=EwB4jz<>FYDZRnE4LSftZG`^agatI0h~Z8yAS=H zITb`;tz<2}n2FK1=UqhfuK|iu(I^^D`0UlUkP_SSR| z$y0V~h)F)<)EPD=lY23Xk=QBeGaLB%(4e&EtiW9JaJf-$sTVKd37N_z0YIp(FfULu zT~FvLBMllzYf^WxbO+orv(?t|WkrZ`ukG|qOE6RfGf#TByGPow87)WjGs|N`tw^;> zW3!0JVDCS-J2_+cX{$ZsX=bq>#D4Q>`tyW4hN|_(CbX=d4jLkcVGo88&A)GTj2Fe) z)Ec20Hb$cb+X09Ebr?$V;>;ppiHh{w;G%^+xvDyrd3MY%qQ-YY8z?Q$x&ElgxhhjR zF)aQfhdeI(_&C!-#%Mq@0u-dZs(wA zml4Mz6iGLHbz9O^{jhuBjiBAVU(B`xJqI1=l3fdU1hXD7Pnr(t*Yw$G_Jg7_Nk;{1wmsR}Rr- zLEz7x%=cwAPW!h90$WXY+xuU>y`rHV(yZnOcHohUa=zb5H~(SZb7}wEThq|c(O`+6 zU6>iFk>b+blX$X=sleL4V6ka`^5KfP@f|DJS0WVInKWRY6wDd)<7Jj0peQ zcf&GU>W!cZqEIm!r0o@S-6tg zLjpnl^oYu@{;oLp*?~gy^8588tGM8;k*$<^amfqPZht$J=`AVPu;&pP;8yHpvpcO= zbp#PUmymlu6>CgXVNzJE`qm^g+YUi=zfARwLNf7k5+R3VU$3rSKYA%iA?(D(B*J8S zY##GuPK>;CrMiZE)8;idThY5mlg9MJqJ8h>3o|gT@TRY=@iN?_C3Rp4Fh%h118H3r zjyifJudiQhVsCpfs|R22Y<JY}qdW3}zSv7BL8}-1oeiItJ#ak-_QNEgA&|<|?^}g!B z>Y<82Z(zAgOAV ze8@pZ%tLErQlu9{Lom<@sjbI=pRMr<@c1h# z?id{<9~R2y$(SqAOphNnuvjX%aX|O?K=}+yGV+Ye=}b0N6CaU;7ubk?B9s>{GS<`< zQ7g0RQKom0Gb&R&H{u-9AylYG>D!21_-#QuozO;f_M1b#I%8_Vfa^E92&ma>C*YX= zHG)NfnPsKN%F9H0ZspmM*-qDt%Kt z>k;y7oi{+RQL1-RJ_?TvCtZp?Y|tNokN?q0mRaf30ZttWH%yg$Aq!% zUB->E!Eez6=LIM8njRh_ufkH0j%49#EH%d|bm8y?W$jRD&Fb~W+3s#t{R~R&)%v90 z%#YqkCVU5%URmErFtMgqfFXeBTtMo{S?^)JVtGw9*3%@z%D?6v%-GU;dAZ;MpbQtH z0!$9)>1;|AAE95as~OnLZ_)&$2mIj6NnwN$r^(B)(vZ91E7po75gk*iWDTL|%EXYt=yi_D2J$MS%_wF~!?P3w?6VthTS4kn z;&0HG;WUM=+I2v?lBW~cqN_ZL#>gJ86A5ayN|58j6{K17rX__%g1|!Snq6GWy%8Kt zt*R(;prQoe_-O}rNZ4G+8G-MP5?6aG`Ooxjjk#yDjWNyvk5rl9Z!=x%=~QfW99)yA z48vN)wy;olO-wBw2usR$qtWk1kwoMa*;1Q8);JNxL`I@G8Dt@6lJp(I!&xwL?_)rs zY%*2t&mQSW3PYR}<&F0cWy=A1NP`1ou&N@tmBOUS2^L01kj&KxoCrQ3daC~LP}r^% zgAhuUIM$Uk(;ZRUezPn68fm!hr0 z3R+YV<+^;YcQnN@yCoyNk|4)WUXY6rt3r=Ku5Mhmlzuo>$`W|E!rr(K?}rDH$5{ko z1M$8~U0);Q-mMr#V>R4fwnK+F7%PJ7V$7G=Hn@mMUf*ETi!21?{_vUVN08bI(eN0% zS{7o7i7+LTHi1clBZiCwbyG-et757+D*A)gK z%)i#Y%uXxbxM;&lZTo&(Rp4z*^(O2T(XEV7OSP}7R>)izpG_@JJiH|jJWI>!DBQ_U{h-e-AEm+duav;o4*j{O2q4u)*=dd>yU-QL|>mDmMYm~n`A^_yvW z^SVs-$;GPs_mk2Be!je`q&V>tvJpM+f4A*Zmn!=lPj~^@vFR7XB73Bf))Tm_aSGp@ zIF_y($vuE+Z}w({^6Y+2JH#@(+ZV5+y>Jj+D!gCQKkME8Wc~YM5M2?~Bh_8UC12?> z`5{9XGzR>NJ^iPsAy+wBQ7iVNaZgJMzEpE&aif>h z>~xW1)7skGhzA0~`v*zffABsi~O}#@;W~v@S}E^Jff~ zwQ5u%U@Fm058~bcr23_ek*%--xY+9|!D<4vm^QQh^OK1PqE@m0!+4*m5bjV4jEqJ4 zAYQ)ORv4C>RG^9L)hMh!2vys{ZgV3L@Q4qOl=RQsL5M&y%;gaE**JyFN5+H+_yxmMd4eFh zO;XuggPn8B^_mZMFRX~XTt6Cj=NFb7`wjIleRa5d(j}%wga^Kn)U(ym9Lb_|Wc)h> z$Y*44A&La-K#ZcOP*KU#124O{P?UsjyyA#LRZ+H|^i*j% zdA8~FUxD&Tp`|jiErT?oCP9Ts$?IYx6DKA)>nWi|l7Aa{eC`!29aeuHp~-ZjSaP6bCd9X6MKB!0~^6qp|=;I3634OwuN zwm?%5EQ!7gU!JC3!F{jDJ5yYin`rUwTo>Rnfb1#tMbI8|z;*_O{-Sne1~yYU9K@Q3 z(xm;q|Iv)7bac=7?}Jj=>`Nyz$Y2_%PMb*I=r)Kw9RK}7M*G_Qs;mcOe<xxr$ zg}C+i5eY|nChM{CugAHqFD8m_0&WYf1APj1()NWhGDa7CV!~+FFHPPuh->B*^vx{%hZ35?02@AY)AL6l`y1Wx5uQZ5 z?I4Rc0FP9P%}rVbRv5ZdQY7dV+_uKJ-o6+CLhtB^rW zYrpq)x~_$Jl?y_tI(tlt%Yq3l4v9t@=(?;<0CrJ^VXE5iZHJv9 z=E9X)4=1f^*V5P@XP+R1(&pELij31wx}{WU3^8U>WA7bicyEqlx7}^2dr<Lglu0>6p-u45R8~kpe!h^FIYWT=eL%rrb75&XQq?_?NeRWy#OK3 zo;Yb8g+r;lc8|*nRp%JS26K6B+ykpl+zN7$pdiEo|aw3#SgsdD(Ih|PRvQr z0U*J@5VI^c{x9&YF+<-ow+X((A-yhwDPFfovT%nclB63>4t%;=1LdfOB`gbCl(V`i7l&mcr( zXb&w55dCg*5Tf_R8F*g_UwLo-d-3oUqKaa>b8*jRZ20g=2?bmBG#B3k)eYhsmz#5i zn)LYBvqI9bBp$z#PZ3uvtqG1YEXI~jP?ElDWB}e&f+CF(yG~U)sq69-j{AA}`&N!X zLCWvV!0a&a=T?V#(P|~@`);Od{a@?KAC_?~x_js!gzHE}>ebg)(+3<$+Whsqqrqgo zF6thBi0Q|5fyD=kvSuLycm7!#UW~l_4|{B5QbXq;5?BC-Y6N;q9n~-Vn6(wvQ;oFk z(iePoMY!5gLFo>*A$OLb9R3L(wU+1Fx%0$ zoL(i*uS?hRmUK!*Jn-L>@Pp#q_U}N2SZ8GHl41EKoQ&$`dhHht}P#EU*dbtvIf?}bd*)cg!+rtPq%6gfXLG;<^a zv{lCoOF;x|J^n#UL>3Q?otu)A_vre#g>OqsOou-d0_Cx%9nFnZ^IX!|h<8!nx&8X~ zo$OMF_r2yD`vE`B9O}t{p?+`lA@81pwBCb8hsAcR#anE_i}kF*=*`HTVHK1>p=8lL znwMGCtq4vtl=>9@^fm|VOuZrRu@9;zh^Q^f@L`nl@cs>;b#3DT2{#kozh6HtY+gZ^ z_=DzOb(Nh53Rr-SUCCS--k$$j;D;YB){TvtotP!Aw|tJ+;t)I>KL2ZBU$%;kd5A2@ zQ)J9dA)v2Sc)SaYXxv+qi$^-o_S&DuNxb()|Qcs$?bomPY>W}|iZ=&J^32(|uEF3U*~1j{41CxloM zpjD?B!)2#a>^r))jE2PGRRhie_yfq5mbv_(T2wlBm8&G5cDm4`E>f<7_usa!*?JKO z=C4bZE`2d)hdVT8Uzxu8S^P6T8R^Xn9Bn}cwq6MX>!A7;g`eLrN<`#UPNY{e^hm`~ z`}OcXfSY}{F%S#v97y=ahiQTww+=*AW!MhM4hho~Wm0583PVmE-Ip`^p42DM>^--B zy!{KOzCWW_|Jtm!cbT$|&+?+xx;Jq!GNavtMTPna;g=F}BDJMlRklKCh?+>(vZIjE-rkEtFCe(u+p)W9JNn`1E1)X`{~u;j z@Q$=S9>l&sT^u^e#}7F|y!DX(?2FK_C`=Q3Kzgj)>y|S^M|SOOJL5I}yYTSxpO6~8 zJ)C+Yn5r2;Wxu&-=(PLi!CgVQyn^JPj1g!bCt{;{=;@xYDbY(@h%u@3&L0;Y8pCKR zg3JT@-%$Kh7Qj?=5n7q8uwf42f!FC*2^V?0jJECAUl7j!DRP z|Bk2EoPJ|h_hV*cO)L!a8bRNl;b+3Jhddcp-+(~=Z0McSFe!}PFnv_R!uX*fw|KxQ z7=6hyvazy%+WfqP*}cc`bO+vf4~VDaQ;@qQuCGs253j>oWoa6a(bU8GrfmjjUp0?q zj{dWW@VubwjdGVEl-vi0;vfJ<#Oh(k)6cGu!LD`s<|5B$o<2$@Mm-y2s^V7Y{Q8Fu zXP2Yw)MP9}cC;1)HNKbmkOr^^F|Uq4NP~w!0thTB3SC_3l-!yLFL3@C0(5syd*JR( zBDeB}i&4r|2MB$M(HsTujJ`iYV`j{27i9Ii1a(dgUGxZtHLW==7n z9(gETlSSfnOC9<5Di(6BD%3;$I%-QSMbTlzUnhMSQOiXXq{rB<2IhwiWNUbOFzULX z*`Pc}jLafSgyR@z9g1R9?%JtWwvZ$HtQI#Y$DMs-l^)_Qf$$KFAulaIDAx!!uqI{JFb;;e0bCGUXY1j z?j;G7U2D#o$RTtD3tH>}HVJ$6F}P+#8L`1^ZdD1p1|YZZlYSGC0%k9SeJb7^=J#$q z3^BMyAG-Iw(nZJBqB1wpvLc&1xsDLiKfN}T75gOutowb(yt%(tPoisb2D+?{@ikO6 zWPaho2k5F|JOWa@-(T~3?{t}iq@Zfl2X`YYtVSyB7P}5GGMByJL*$9OAB>Pf|{KKi5@k=fu7ZO=VFd8teh_n8y=B zEYE6h<~mCCbpoaKecp>~#!kbh7Wj#u3JpQ-jMqUBpqBpIF`VRP}%94qt_HpgBc_J zAY*%Yzb`D*M-AixO3**-EEmBG{hz9lOvXPa?{)()A&I3sxC3fjJX-0?L71al>(C?0 zonTF;^}Uap>_f0QGHzxSvPxbEk65~rcWB-#$wkXbAfD$&=M&ei93i7~gC2I*QdoXI z2-vGpTb`lq@X(b@S+&|BGCVg#c~-s8K$NXmk>TDjvCbT!xqS9>?-lM}ma)m)lr?`~ zQnOgmt~PKK!dCfy=F;=uN~Hx3Exsw;_gNo3&zb}fX9fV-leqe`_>YjZ%5tCS`Sa~< zs+rPHtI1de^;6Z$cf(FZ8C9LJ&3^!ghB9F<@3WCEAPL(~)SxuTuwCPtTnhqo24*fK z>re)X`CMcgb2BFMb5=^T$B#2Y&-E>yDTfP$EwnW1Twr;fGr2`ZTc!Z`)C?DrG} z3!VVk?Km&9nE(x7Qt@w$xf^J*tmENd(gGw^P#@Z8%l=G}4$4PT{ubhkncg^&zzW9^ z*Fk-V^w%i-J-#mR_G}T(stSMFR#s$K*DsCcKc)8An4mNxYf#2=|-^(olZA)Ewn zS~t;)vrggtk_g#l598HyVjn{+MhwKB>O_X`gs8A=KSznm?itb9Eit?)M!Yv<$#E5I z3_3dWkI`XhCZQp-#K$B%?0TSGX8V(0$;4rwz3<7&aHsseirbqi6DsXtQC65JUY1OT z8_dITF1!d_Lh=QIjtM|f0J(?XH#jvPS&hKi=$d_1=kiL50HiW9{~{eix6{!X{%T(` zS@=TrJFH^!2k!y%EZMAmAr1yKJRcrF#i#+=}}#m9Q;ZCG+7_GoWry^ z-?vomp7qzBvfyl`Zw5367yo)@q{RZ6|2PXcJvdfpO53obXS^&-F;E!&VxiZo!mRd4 z8bji_3vlYWd&XvMb`-{4*7WA47(ey9n3oqC@2j6kcs%J}cUtlFLYX*u!0I z0OcCjNQSnsUD9bw7h=Fd4ri&G#Mv8Zk(t65!T;Zli2#HZ?VD8Wh3HCCXjDGxku1*| z`!Ub(p|$9@$Hw=Njg;cewP#9df=mL*%OC4PQs5nf+u)!3v|9SH4Wm3ngyQhqWwjV% z3ut}UO=MGJ%ZMdPIqryS*r;+!^93?jUa>{_BbU;k>ScWWSfdVvdVRQA)kfm72_Ab$*HDcm2aaathBzr_kzjs6nwLozg zuaZ(e-_67oE5>d&2e7%)PhQU|?pM4CW)R3+;w3N`9s90L(;kYID!6maE=r8s>+U+- z?{0B>T7;b&O=Q%HNnyidv7$*!|=F?VaT z)?JF_RtXmh08NdEi#^iyoV4~6sKqjGy)aH}-^UQ4>jB;i#!Hs&z(zEhy{S@^{gF$8 zgk$Qz5uJjc&F`D^I%reZ0$*<+8oH^HX@U2kaWD>Km<)NC0L59G5ak~0Ly|;OumHVB z2T$Y??9kx=K#aS(SMVjmFN4OllkXTjPdncJnAEvbv$t@`!!NyjonI4Ee--bSF1mkj zal)RK(e^3Wx!_dy4G^k{I%JbieiP*mr6X56CM#wQM#oe3< z6{a_;kDSWwIo3}myO2YFs|7P5B7;jk(DjsH4}Ro!97J^o$qDy62ksb!|Hs#RMK#ra zd!tDxp$1T}KoUSynpCNw1yJKFNKpdPQA9cf2)zXmQPhBiqO<_gJJLI%21OwB-a%^U zy`J^%J@$Wp=ZrJHi;R(rTxG5K%xC`EG~|z8>*x1mM>9Zvu_!7`ilV=D%cn|Eu7vW^ z%N7?nciI=2G28KzG#+Y*eJ|ShAr-O{&pbna^U6AZW3O$hkc<^=R_wX`k|%Tb4uQqW zbDwJ+tK8BviylxmWCs?|fkcTJ7g!~4&`#$Ma*~2gOhQx?uobpZJg6R5^E_J9j6sMY z3jbtfnh+LZ4mWw-qNJ$$fg8RQ&E5o#234hsGtUdv!)Swcz0yVr{I zmrMRwmpU8Ja&P^UG7kgYWl+N@KR6Nq@iM7Bp=fgDZ8R4~b&fioB`&`M1$=i5xJ3zT z#-wEf$6SV_t{IYmh`lFvFX_c{OEIvHdUd??=oXsH1V*e5)8O;Zyi~%7Z}}H+DS}nr z(cLVotbbH`RCtpwqJMyRiFEUKAb+pGqK`ha0x^xFU>zhL6!84VkG^&ey2WckygVL} zbUR$P?(&EWa<0VH09(biDMkpNeL?vith=HE+M-a2_jEp{zarA5f;Rs}1lg~3#G zR1G~H_UH8I757o$^X7&6=j;YAS*nF!qL=@|E8Z5bYc!{hiLMc$S16MMqUzn`BSMH* zXVLtC^OpeEdGMiS@(u(a|8K&DFe3_xYXEjNKvovU3u+(AdwJs*M?V4keqI66mnK-0 z(MVaUkx9LVn5Dx%?Up|8K7VeTc_j!qi=rqrdtIo58^~f#+b-x5y{2{YKG7Ahl)>hCj^P<;PD!ooMC;3<5|LDJPyzJqd z2(MOt(i8?nWg-DEa!X?NP@>9qa`SG)xyCQujUJ3Et!Q%R$^W+jdA|oo1s`uU?zf0= z{*-R|kN-gSKU;WC04X#%SZ1L(xQb&oNy-CNi@M}8Ik0*Yq;&NX%gYduGP4ehb|&d+ zOiYywNJ>GBt%-cY%?B7_twYqI{a`!;gF0aVL?8Ow%wIFEAd|kLS%E-zl^rl!T&GP} zWUtp7g-@@7T8o_pD#;I;SOj&fxio-;fJ+|f{|2{Hk9|hFPCBIuag|q?LBQ%8<(QNd*fN6vtY0|2^x{ll1 ze{*$(z4$swb2ar^mF*?`jdZSmBN%Qj;P$|1GCdu_bo3~24@n@7nHABjGkdgqZJFwy z4SCqs&a-ESTGNmExK8rHe0WB!il<^ZwBt6=E;laNVJ0+*{HveCC25*{7nwnB1w0=< zYP+`!Fz^q=7F9D2s9aAl>Swfj$?4+QR>8VEM_p-{@)s;gj0^R@!Tma!{Bu9K*Ufj8 z9siwdV{DOCf8F0a=my?RXkq_s=!k7kbV;V2_lzf~c4919S!JY)0H!keRFW zvGj-v7d6T|ekjgbOq`fz-dArUj(GuZuQ^(+m0no)!eU#yfNe%h_>aY!{Bv4ynSkf< z3!v4@`r!>ya?xeJc*7tlqO$CB0${mJuQ2S%n%azif1Q{T?Q;31+*@ub=`rP}cHA;n zRyBp)!?^?DDm@4Dp1S6psu*MDSqZ#yiM2y8APH;^?&ZK_bO!HkEq+Zobvnd*`5kLM z^#iGZkij3!k-_FQ%DI=3AU0kj67=@|C8OTS)$CBVhm0!PthZ0yYorp=A8%qC{#mpRS2@y zPWN~e-jJ-fdCpG{ZrFSpV^nALXW8kGV>&MXiE2+3s`uxT4OdY5pl%+C0H4D1T}pHw z_n9f*?jJe5rP9I5)M;}1m&asP95%{xBUmMj)!V5qrkR1Fp?LYW;fss;G#Ws{#YfO{ zYU2UbHDhl@!3}o2PhQL5J$J7#k=Rd~$%7y)f05Fij59i6btS-OYHfGY&AVmc(TG$A z$kNkYVRk(~@&pu-1A}A3OJ8~K07u8UoWOY` z#{8=g5_I=+5QCpjTb^E(9>ZPlc}#g9mQi(fzqKhH?B`UV06ZIp2n;$Zw?&}Ccev7R z^`~On-a}7MoN8D9it~}`fd%6yLHsGt**>chTZ3?1=U&5E{dNoQDWxNB4=<7G*~b>V z-Lh>n66=w$!ZrS37bj7_`F=cNEqMml`MI{|bM30m-bAHgxU)noW;s{r%ls2Ranf?_ zD|CY1Q?SF?f{a{wlhne&JSG&}HJp~6FI>}vl-c7cjtJ<9>r9g3qDr;5=r z9_fRbHThTSGdGcQ{xO?@vx~={f9_OBymhu9WF04N)a?78taWaoVPIIN+^a2RbOg;? z7U|F5_oi{_A6r;6_afjXnTA-HBIpuW@k&_O1*~`q6#sEKpgxRP&cp|GAN8K7SA2Pl zTuk=o24l(!+%e?OqlF?$iXtvVEO$}=B`!roHUx$9ykS~n@CiX6DqR>`Ey$l;Wtg0T zu@Runb7ke9Q8xEJ%RGLCN4wO-`(y-o3J z!SOxNO=Uu29Zja2uU`Dy-Fq2%G!Z}ZL`J(^UTH{;kggmxeaRZI3~qlM;jB4=jOEg$ z*kYhPcPFPkBltlOlY73V!rr#~Jv**>3ud$d%x|h%W~i0Qu3?@aRuUbIxtXO2I0`un zKM{SQ=POoTSDYwWp8LMv6&0MnMVQ$GUQa!0-j!%&X%x4qkqw1o4Olaj(m4k2`Ymc= z2!1C-@UP7^uO+}&r-eog6la_INOIWfeKrP=%UuvLD@Ayer|GRjhyqik$diXFnRZ$w zqXwL|!kP0|oOg0O3xXAjtbIl2252vBq}5#P7bYx!HDDUlRh^<(t5rgO2Y(1-BVl0j zanjp^(IaMzJ|D%|6fM6^J!YEZptK;6UELy^N_Aohi3kbBH|JM;+8;wD-+Bm>E>B2I z$PV3@IDSTt@Uj5O(V|v@Li1+)C(a5xz#Ur`_l7;6t<~G^t!_^@)O!WieY6-Xzgb{W zFVdbOFE5s;rZ!fM|HRb6neFRvHU;(ANSLl_h}cwM#^LH(?4z^jWZav_8@H5QC%`iV z4w2(!rC-y^j{}_^YI~gSmny{NiZp4I6#R4K(S)rM1|dRuwU7LB>97`-HR2+XC-z4^ zDJu$HWx<6n8iijUJgL1g{;;QSVBTKM@Zb^`NucAg+Rq!z6n(RshdRFy~9JULRfkJ1CTFml+h@{ zYcPM0{K|Rb+ryZlBj6E`)6o% zy|u}D`uVHH*b-2bzWsiCOK-o!Br8p~=SksxBa5JLGi1I0?md;7l(m9_j@E(#9h7jA z&o{=!DxjZGiK-?AZ@4x7xcM>o1w5!`E2|VqL_&~njgb~l&$OC}LRHWehRH6Fs`j;6 zyZ4a?Hjf*@I7ycm;LK-_CYfGZIe;%g?s4M2RXDx?+@a6~Ap$17tdWW3)d{)ggfr!B zXF?oxZ?8NQ!LMWiK_2X1L7X*aL!umf4kj`i&k|OEPN&96X)#Bp+<#gFYBULZsrLMH zP#6L*5n+KEME$1*`_#_1FD`nY?XHOp{7;Mfw;bDkxIPO~sx&d`ONABehNv$z#i$DxA#ZgzKE|h3#lw`yvOxN zM3^Y=#n|TW+(@VEi)(Nifgo1O-%R*M9J5SD(6Co4je~Jd*WZO@O`IIoE*`1vtE;kI z`FIRnterTqIP9-%NIG3im>6ZlM|Z08C!Wl(3-hr=&tR=|=-g*3tkA?#bFSOI5pe89 zsB=QDI80H2z>}G#()R(TP)E?#A9!6U@?6UT9(5eCDZG_PLxSXAv=(l-a|E}cpZ(_e z1{*J{-}E-nr>o+|{+%wP+dlql4v!U?xTsJt-sgv!)XVu^q;fjN@r3X(;ChR}Fez}7!AyPL7=@P9ICv;POoMrl`r9|1B8uJX0 z17>P`&XzVc%mfa64tM8j9%YuiaucOYIp-bwMQ*89FfO7>97PM$ajxGzFSL!dPZi|M z_WJNuW-`T>coqD_u20pd71XQEyAS$4oOR2u&T5?#8aDhyi;|vwX$Jd=hPj$!YZQSE zEjf!gcaZ$Yc8+KPQND;_qcQcPTmqs}tIyD|?-&M-8UjUKUhKE;;WUG-H%+M%$uA$b z^h77n=M`{?M*%`@k)8T{-I}Vs+pC)jd!zW>PqrE2NxHg#!|9%jvuB-&DI&19%dhmz z1I<=macIyr%#?+xQil6YuHqsv<>go`7BpQ@04KEey?ec#Dl{yQX6sFBveY23pmVKA zSD*}HO&HLNT#+{JrrSIQPJ*pnS7UAVIgTcDV>)*xWW?2iTPD6V4q%aNYC3N4gj8}+ z(@fFn^^-Bn+7_DIrg`7yHWqcWPQc%J@7-_wVFD;tfJ<8oOm@Mp1?;+GW4QC$aya5lyWy*7cAV3Uxy&&vJAF!^EXurkMa7p_3J3zSfY39aWJFkRj$-+qMv@~Qe-Z$~9} zc?;h;ER=U>h+}F_K@xPS?E7^2IMZwP&4F{yx$vg}sW_aEA(#kDM8Gr{sMjCT04uEB zEHKwG9RQPUd7Re57}@!I%x76F0^S-6WdO?sEVi9;syBecI(0bg*lS2Q{dDDfXHeSAK|1fTWZ;?qgzm&?f8~zMnOZksR+<>(#n0|tSSao^ zE6exW;j)RZQu=$FtDZ4bVi-F$OG2M1Q{(O0XG3N^J$ed zFMfW@zi3)t%q_x{f3UdmOl7_4MD@ff_P*N3*TwJ7lcw``x2#%%($Ss%TZ)kyf#PsA zmlRp170&b!Bxhq@L184$V-)$mRwb8O@LM_REle5b>ojf#ifUbFUmq-~hbVb5;$>Q3 z-ky_pHn*ZY&JFf6pckQKn3XIk-nBP_J0Ss%!BX~#b^y#$ioD_dZ_m6np7{FUt4E?OktLyJ5f3b5= z4wgV-OpHPT*ZmtZv1U{)|1O4AGc61v~6Sf zes%UefKjK-yeLyW;6wPK6fd8YrL~*6K@mq`LUduHo|(Td3nhBAd1`BQ7&m_h2JaLn zerx7$ohryP2_>$q0M%Mvn5%%gng|CGV_=HhXmaVC{lS9>9+(2$-AAYz`XMhI`vT$a zb;IncEjg5qnN}I+6+PxQ0v|RGueJ88@mwmCvFUzlko8GI`RKhvbn zB+=Mkn9#bpFqY@Nj8DCwFJy{G##JtbuJCqV|N%`HrcM>f#D*pJSs{HE=H1-Jgb5@H zH%z+?@>x%kEh|5))gAU;7LWR$I?g4{2+A%fm}vry7T07N_z8RR*ZZtLQ?TcM?zzB) z_t4`)Q z%eoa0eK7vJtNP7G;NKF`ji!1u@y+g*^VJBcPMJcM@}*zl0dT%5H9H%+1=64{u6mRk z0&;yiIpv)D4XmC=svL)K&a%M##YT$H?5@qS(+F)4D|6Fq`YtxcH!!S7-c}MJFsz62 z+4IqcOg*({u&bk?-I%k?q<%!-cG}~rmczLpuh>p4?du;lv*A1gDq9!#$*utcf%Op7 zzILc&z^Qy&1r~zva>D1YJaVt&zNskmeclTT(79p>_2zwm-@Uium7Go}TmgUwb!}Ty zLrX3auy5$D&T8aev^@F8J%Y{Vao<$9X5@<}4s(mROka)Blq(p0oLJ@SZ+Rw*b}6a% zIE>b)KgRa8ML3!Lo+I-(3)_I0b~1vuta^IQ#mOVQP1Cucw>x)wGy43quf)0jg3c_! z){~nQ(*hi7O86BzC58DphfO)dDpVSkdje5>&$(wMV7hL~e;2*!`o&cwdQ=@cfFOl^ z>lQK>M7D1?be$)`?+u{ z2)(_mDsmq#&)@!8RX{9WNkYuL4v32?(Wpl5O{|F(B7aG~vVO$5lRoIWX}w57i)AGL z(No_tMn(L#pL3X{P9J4EoXuUszg)ZD^S)EQNCu*s;(QTMqI?T0W0m^4j~D)K%pwQ@Q*v+1U75wnV}PlR z9-d5|`ULOFb?}{RCoJYG48)5&wKifUVI$LuU)7V0X5LHTPNqw{be2AIi zpOo@@6WSKRTCd33u$U<$84+h0IMGagAk4R>f}_G z|7pr{uLXO84)XSbQ3e0U=&FX+t%wvKDs8mpqUF z;dwFz8afzIB_(rqZ6x{0?m%N~!%_WOW{Td+{UKdy{E04CnVV;HJNli{u#qeQ26`R% z0*41VOvmfrWvlwW5uhGz9TB-+1;j2wsku0J<#`=gB8vldr?Iwj;QqCFTp0q4%DcQ;$mz9v_pkdB*k;9Q<%zbekd$v?<+`0&_J z9j4kMz^0mbxLq&fsY;ch6G`#v&dJ#lPU^m#q@z2Z=xew%USPU3G!xo8KdtqkQMBlu zJnT~Z3pPL)#&h%f*uR*IlC4fnAcsrTHIcjLrQ4-~7p9;)W6O`20zQ)UU~fd6>>i+L z+Gq0Z!KmNVz+jX98>U&#n=*v-+($-Z5-DHx=o}2@))trhZ`T&Ex6SNSJuhSJ@j}vN zx9VMK3Xg?yLY~Yz|E35YsUq{b8oC)Q0Q5zqPE1n6EfA+ByRiVFq`1m#NGV zF^6;8PKG21z!9%SjS*jDAoLBC`3W?-8bI!{Eh*f8&Ktfm8P`SBH&o5B;`R;t>uIMSfldbP!n&ib*q!K;- zb+!{Is;M;x2%^0>{n|SFN_LepgTz@v<39!b-!TOj8oK}-y8`?FYQF{BJt=3oFMzPR z{|vaYuO?cL4PD9$V_=?k20o8{bYf^LBPav>fmZO zX|7i_{Nvi#SNV6Se8dNdyzQwDqTiSY9xon9)R_QTPgPV?^GJE$ZU9|JuJOsgLAAWy6VcHOkI%XLX?AdL=`V z&4fdqO)#e1GX@Pc7V`QN(_Z3lG<*geQ_9pjr9*tXItwhDoS68L`edV~6s-lW z7)}YadR(`ye(hEH{vcJ;N5=xHf5aS@2~CZ;hkHdn_VC87u=_Si6=a-JRP*SdP)LaFxTg{`e%=} z)_TF=ffX#*X~i~|M?{*LEV2Uk%V^%5vygc?_rPX27Q@$QQXpkG{wC?eWvABau6vj9 zw10$}?j*BCgK4Ke)4G2gu(Pl|_h%ASzMO$`GbryboM4)zcxEk&Nh@jq5LNa4m?(1q z6~eo<{^I!U0_&W4R9K=xPRGWeFGoZFTC-doCN! z`U-uooB1cF<<(`sGNtR~O1Vjum4~DAJ+^8yE^O{9r({8Uw;eWzh%a(R1h!!_E@BkZ zF+(Nv@Rouqf%h87p1=!M5gNZp5isKgZq;d%jVVXzF>p>9LRkJCh9|k-B0%iWu2Ygg zY|K`Hg&>y|%@5mbo!Z=#N+#6tS||BOW!un?Hpy} zOha~IzlV3&@84@Q^qg+a<6l{4IE0?Xe^D|2*7I^F_V~hId8x*?BTKUkh$V^xAd#TM z%RpeWDO5c^Xym$^73S%MNjcCny%n2-wxPErYjZ4`+74^hZS^sIs-|m{FhlI`b*rO` zdl_-N^|LN-^=(OMLZ4Z5^Aaw2of|5=x@1R-Shbs~V!{u)1wW3fEJ(A#Rw?4#DZ_W0 zXP_Z)PWPSOYNp~18O0wx$6tJ!-r|hoAsX52ey&YS5-(!ux}rNV{yeUp9!63VLcxL9 zvW|_tUq2p^?$PsZ_~z;sX;n}1P-ePBwta3~To$$)nOJLTdA-2#)S|2$;uEjjGNV|2 zw&`W4*cj$zcv<=RSDM>s#Q{24mb8IdEo$Xx)_?8@s}qsL!*unmqjGM$d1q_O^3-ge zdHz`Zi0J0-H2zVu`+k)2u^qQ04vnJDe&}H&m+MlaR8-5=Ruy;zGNW__;S;J!gQwr| z4QK-7iyyr+`7$IuXl{Vr_ah^=iJm*)_tyT0SG&`Ab64n>|&S2OF&_26Ie_Peo^0bZs_G}f@MHqbhDK5Y3u6DRa4U{xoN*$$Kj8r54lJ!naoZ6 zP)vh`^C*L&h%cP0Rb4J(6iy({wNrE5^F;VCVAMUuS%%wppwR**hGIhQC|Z<@2L|)} z!^L9jpMPXL4&Ok$M!B1;OO=$b&&3)7kz6~In^P@4?V2w#o8`wp(vM4`r{<$-0G@-j zZjw!Ixb>1V+nk_|Tqa6Lq_rM0g@%w$iP^}J1}^p;V`$d=-gnmHBIHuk=s*> zXnNdCQXNtddZeEqI*I^A)#c;e0~7-n5ugZYjDJF2(NjtYAYNS@E2>WbkmR$ELHp^v zIg%TlnR}=6{)-W6U&8zLy1g5y&9m;Fw~lf_2umNW=uBD!EJ8V@C2Z1-Qx7MINdqpU zt<~^O)x2*iBu(H&NUgdytJmBcqHfHWA<3;99H*DgeHSj~M7ZQVhTUB>Qc5`f;;qkK zkUdBoSlB5^A@CML0%oU0nE-DFl54$r9=6^*u_ftCzgwA8?8LOd9~U7UInMdLzJN;+ zsk`VSTT$lj9RX?c2m;!ymsfNuM+dCK-u3ZqN<%FwEs$&C+C0aQc%nerjS)8d@w6?I z&^-6x{$4ZJS+FdJ22kl>qPHLa-E{b_N|ko zw%82W-6Z!!#R_;QdF9yHc3z*633wToe+d#CpN|>Eel>pqCtOeY>yL{8rI)HvM;eRh zhrCRZ$>J4z1HzZa`hh8Bq{!;d?UJIw{?2zGp108g9VsVh3_TCz;z8mEoTmZlj+ASp zGaD@`QVxJ0v~8;%53}L(ceWa%5?^-9gm`z!q8eEa<2Ayqpr~Zr5}*G!^~U2HkE{7mso* ztT9nk2o$fEF&{lj>)i@veytq!wL{?{kQ?w&$8$m7-gS;Lla2pAPt=P)uvI(x^umcP zB#J?{NQ3R+*O8V~j8}k+dKCRWKd^QwgH#l2G#4=CzVF)JlpZ6k%)tI$T|;zXQZkKE z=lL+`7DZhh>fqP8ue|XocWXo5Kpi7GI7R+R918wAV;n|2F$g1`-O@B=`&Z-ef6TEa zyl?;eMx27c!`a=x&nw;P`s^UDTe7yx?j8c;nd$KPXZ=Z0GA{Ta!-)3Qm11L;C>E5@ zW|9}Sj5O^DfyIvhCPUm1R;dXgr&*o(GFlhFeqQ09P}Q>Fyj6UXlkeA4|x_} z1wzt(n3)CPk2`~dBu@4*YAy`d1@poPOGQCDV}NL2UdghobF<&{w6ad;4u%nRg*iL` zHZ0gx@p}xOP;Y~s`pXwLW6!nb=An1XFxp)|KEoXLqnkw`ayt9sEgBpjm%>^@E$Cbm zWAp2Ksb6pS9QKTgUe*%btg5-;$OWwBvQ!o4d%3vU=e>9$REna_sZtf8v}~b2!+B@@ z2Pf{R$6s;dOrcZO)-Mx(J2*JAeTL2%9#PM5|)GS_X}2@7=(sl>-i{^02l^WQxeL8)ypePnuh2&5Km%P4blJ3`hGYbtU^t{Q>fN}5YV4nGdO{H5U6TeSI zM8FY&XNtC(F2uSzCM5{|R`Lr1=6R&r=P`}^wn@k!hh!LKU11*X^0CM5M3(+Usg-?j zvL-*2$RgYO{&!)Y-Ndfd3qGM?ESIsoSzGpHJSR0Lmn49G_5%KyE&iA6!hi)BB{C2U zf|+-M72Nl$hOu-Dk7{m+wS9tzGgay9$k!+;N7wN`hs%@e4#q1Sk}C_CFhJi$lYuDu zxrLWFcWI?dK-)Db`}+BF=HzO;g7AIz^;Q_0(i#TF`|ib<@(osm^3^e!g*!IBW)qFU zH5HbP_;Par8^^ry#ZX|jSe;K!s7VF}->+?#7q@kidU6C)KI<&$X|NUcSW9m!Y;K|1 zT;1sQL4KS-aj3A16>^UvuXpaDxyYECApXfZ2F0^mH2Ze=z^8Lpg1JC4RlM;XB1uTD z{Vs^#zKmpA8bnX#zI>4y=VG~7(;d;dPO}sLFURu1-%br@x8Rk!gKJralGEu1c@xf= z-1va2{{fo+j|sQ$(C)vnDBwt3QG4%9kH}mKwmM@+@r~}Y69lf3Ho9LgW~@k)V$M;$ zcB$3}S`91VNf1s5zmnx$2_PS|8t^PTU+D~c(ok8N))!;zDtWRbN{eb-mApj?OJs6& z<1$8YzOxMO`B7KP$G{hCj-$zf;-2>#f`}4zCx(7oV#CIt#t%bVm^7Lu_={SAyd8Ev z`gO?qWFcGU)c!p$50ILj@efpmz;iP~OMe^x`=l#PY|@HB+v&VRdz%)1Yh<p?U#>*nj;0xX2_JvAKy$xMH>-%suOeGY$vIEG zoqF92m4r2Zr|d<$Cn5p_VLPqq8(YRYUsz`C-u+ZePb3y`+&zt29GMVThl+JHJYBX6 zE4SI&3caDA

7KZi zwPIb#0w-4xnfX~xQ<~3jtEfDlG^LS~iKzg7L7=L)SMs@B0XM%?cZN4@qy&JmxiX4H zQXo}yF*u-wAi-RoE_0X&Tow2~@Z0|#vAGP>{J?FTk?fp?nSOya{#e1CooKHiW z^!JNf(o`S5etJ36A;cii3|F0ssb)J_s8HyonQHR;t|>ugP2s&!Cls z4}0WkQg|Dt0z2Rb>P=yjG+90+`Nt2Ly=vMf!U=9Va}OCtvF}{9&f42_v&J1+4xsv3 zuVIpPbG(Q>7;NjUY9?YSY?flGvQi#ci_=#<5hD+e#M9uAw~Wp^5%5FcGOntZva&CL zq3$ZFhDRi1F zK<%AiohBy%kETy++~yxxbiVy;T7gKJ)F8jw#Y7t7dc-VzarLpWtwE$v>)`i(v}oBK z6JJ$-Jv#JnnnC3s8s$OKOV6*}3)?moR}(#7PCeZ%0$e^M03|Y>nb9tMvtO2GES7r4 zmG){MS_u&{gP}o$wy$A;nU&L;u8=;9Aizcq(!~$4VJdF`+~98ZDhar!t|N;+;*Zr0 ze1bL$TTZ5ct5;MajBDC9XYUwt=WxOHGoGz3sTt4Hnd_7he2M?XhCsH~_4mDjisk!w*M(%#_-e3MHMlL8RWZ zFqP`KDy{{_MIAnpNj;#g`F_JUL;kB9J>)(hlMw7{zvf(Y->`QO@DVIv2Uu0lzlETR zK7Ocs3=q9Hu+UJ za18S%T;2r~-MKqul2?^K(XJ)$vnGW}&@-M29O5(KGz0VbxZ&(6-Tjyn21u7T^-N6* zSo3S=L))>1y&yh1k1wJ&zYQiHgIJyT3fN<5NSb+>JmMb*-<5lk1a0l#UwTzL?}kvC z!Jcoopqz+33Tj|@bApyuh;9p+h|=}c4FWBBpd88DG-1KQUVs=YNK#~-<~6`}Emowz znLhE6t=(U&jAaX$);IdTr|224emSiYg=HB37JQ6V(joG0ZGrFVMOH^C0E{K&)O5Fn zYMM+uo?!~c4>we8dZ|D%Bx+zW)uUgo^+Au-zCxm}k+(|*cL$~92N)y_h6KxhEgw9- z2q>f?z0@@B+DkUBdij3Yvo*xroMRib4IOQKp0PP=R|U-e4c!vtH!DS3C`3p+ke6cTG{{4S;C(w z7v$iZi57&uzKCp2_*R9a=9AxPtb0fSo(l#LsvDsB*_#GpKP1Ko_o>^jcpQW=wzieH zL$~%j_>qhaBKoVr*BmaY8$E^hG(9Ir|2kx5XEmzEnwn2g3YG^U? zydBAMk7Rz(dIK9T#q4ljsPWF#0Vueceewc@;u0gOf{Ug3wq|qQ=G}>EJ&z#&ExPo^ zS@RtV4+rHwhlJkLG)3rAbVy*f}HKXK>VU zIKgX@Eq+r6(xrUU(5}B0_v64!Lkg@BO|EsG)Fp6*pE|>si;phlYj31JU|)w}*^;17 zIqWUihX= z%y|IuL9T0fmtG)#>c(l3xKdCAcL!iJgK(fQr|^n3MLAM=O{_lq>Q2#dd!XEZ`3x zaIyPj8z_Mt8SVLaoCnzn=l6yRU^y;|XmW`(z5(ZJF;}((loE7gcR(4qcYy9}OD2^O z9)tYZEvAQk(S2B;fn_LS)5!uCuN-LKmD;K@$XEwg-Qp6!R$t#)Ibd5$BLnC$E9Ag9 z>py8SfB5JrM==+6BP5qD=WjW`Bq;jlM{=>6Dsx*X6B~3PQSG3!y}C`hTXR^<5m5`u z(Mp#(SgS{zxIJTX-y`io<32y(0auHE{Wbdl`Z-oYzE*SVICIpfI zrRH;{jcKDxMMeB>$O9XrlADBmHySJwL`{+73 z#7nxgl#2)5g@#WSXcQEG1>*<}xXR7SH!V0EbF0B?cZRo^UiS-S3f5H`7A*E?kd`R9 zB1<7kx?kow+JksdI468NA2eBzL&Plv_)|Ygbe>R<1%W5RaL)+Y2gXg5!+7Q&@TSMO zU6!h6v%oOvDINQ+#5%Yq*9*q~WY}Wy(Key>nvfU)%TyVEtD}Q`)1}*O%mb`Ed!i&b zx9ErqC#2>G0gI6JU9cW*HPGMM&ZY9^x%2^J%P0j0 z*jBcWLQ4v!5@`ybfuU=2-3<>ag;F#934&GbBj1cEr(Hv;Ibr}VVXDF!EQ?fRYq(KK z7qCsow0biWJSkTNH1rT|&wDt^k*OruuCP7};;olP=uI(P5CcmI7)qd?`S#5?x=mo^ z0CxqcwQ``xF%g3N{)hR(h4Ht@t~+V{qmZNfqmXN8KRUJxJ$d`I<7mPAO$TJ$1O#`sPXDMbRV;~N((!WIvtmnGM`_kRoV{c#;GNvis z|9T4bgXIi`<0N@v9*+_fC##&oTQO;D`iQyyt6Zj!vv#=I?WPVar6DvkkK?&p1?f6%I3TP z7MY5r8AX{sEQtx)PN3bl_3o&P^~bC-D!l%vU4ui_-Y}}c6{5T>dVSIpY3pY|Xawck z%Cx;EoQ8EV8 zoBJBR#+~m>jTt-uG4&!dGhkA$@D%n7=!vM@XgA!ng@Da(c)xtlkWXPq!*xn(5smEE z%`~;%j7_9wI8_{~_Wdi0%{FgCK>6b!F>6Oucfc)6OB*q6Jorv5mxYKjyO2y-=KlCM zPRBko60;MZ41Rch81~y$=%Q9z62ItlY-g!);eqA~7dm0TcSOf2g&mv0l>jCbfhNOz zuCFr!R>NFNNiEiyZ}>mFJ#poR^V22@r0EL;zl7e5&Sh-((! z7<*LTVr+{D5iMW{GAtdM5>=N!FZd3!`~}yk{x5trKsPkeqj50_|M;fMVgF2?%6%cl zqR`VhO02BCo3GqFwNl+gYt-m)CNO8?;5C!8_2kFa-2De z40}Z<<{W;BkfMBp!TU7H|LG#z(RKZ5E;4i9$gEg8Hwu(ym?a#r z0>UuWcb2CZL!u#Ni0*q=oYf%(3QdH^}wuJ-=ffWoUqru_E|BYWlB4|kC93YV%d zWpuKjuGg)DTS{8N&dyR%{DJzG*arpKr|&^$u(4vNuQn7 zeE)87VHRntu7OZ@n+t#ND!^0^eRyiqA}?Dqm}d zWR0a}V~t5m$`GrC>H0l)89?6BMF6z?Q3`*47&?&8Sg_DG?N^}5XEWs!zttmU^>^zJ&-Ub(?I8kn9_b z-?kEO97WLEEF3nKIqe2nM|*U&`P~Uu&2y6I4ygQ(BrUV@gXfPyfNgTn?oj~j-7o5% zS>6&Rr(frXuP!EbCSSXC>~<-?mwPoicegLCoqGVS+)}|AdrG!j+{KB$nJ_o|K9cIr zvjt$@_LqtSP#tdoJ3jtZtk_&Y(b1>m^<}!Z&y0UB=xsm9RT6F$i>~}&E;Zx=ES=&e zZCERR06|`;4XA$|ll_n5BSBs3@9<7)wSua}=a5!%K1`H1qUpiaFB>mDj!~jpj(Y<4 zCN6zgaSC)w%va{i+WS&{`I7T`blu$_iyv0v?|kS>{Nmgh|Ked?SkS#opo8{nCabGI zoCf!c&_D|KGgbLBTCvCc6Q8!)(nu}SW~R@}!z)=Ir@AqOPKp~f*Z8+n*uQup`P{?L zW+DD}nIsPyOldW!4ww5wvj6jMBK|`=C^=wK(i6Ly>pk3NBO&Aco&NgwzNxRrO?=(0 z5AJ`t({b;;TH(nt_q(TIoQkS7=ddZj2Rs8 zl!og+8=t`A{!zZB+Za9TLAeMwjrK|m-;|mSq`u%H}h1dd^9|LAz zzwN?~_N$M!51KxGASKg>?}z^!-8fpVAhLhhntL5F@MB1?BsA_$&za->xr^z)2ZQiV ze&f03d-M4pF4++NF0Fv7m(I%B_CNZ7wF>_}dR3-?6^5Ok z0B!$^pMp_2-DZ8cLEB?X^M0FQ2<@nn_gy^69u8?))q^qVJHwlA7k!VUcnZ%__`*#O z>a!I6CyIiI-w2mXuZG1?1vK`KzbYBJ{Ph9f-pPbrfztFhZjrPHx4sPXMX)ZOk$s5x zt=e}FS!5zTI39tdYQ`yw>%l4OEJj1NC@KIp#Z?ms`dL=8wsh)vcr!b)x zu`iAfm1!RK{L+Pi_wx?Aj!Gn*BDC?rNn6;dtrM++@Wa8}qocOyTP20Rv`*8MUwCru zB16Rw^A~^JOgZ?WvTh{%&u6@~61lJo!5q1K{^>ffE6)4&+;$>mj_vv6fvJ|?RY%34 zchdU)vOi1!_D5%B)Iz%R^O6Xk)=ByBpjClw>rV5k06A~J!<+U8;~pTZh09YPQU?93 zToj4Wji$lw^HZyK$78uc>s<|bxT@I6!WT?d>Ze7alul}?s>wHRZHD5aefs0hPMe$A zN&)RvN%#q#(;Bct{PV!?@2f*F`|8(frqq5j1D9iedhz{}aj>g;_ zfwTuMa(~lp)*ovjo6^IVm(*8=OTshHC12Z?y0Z{*^tG*{?DyCw3pB6#@+%YLRU@OT zzxFI8w%U8re*MY@oP%m`F_1r|!gAjh@m1(N(*|Jg-!E@beKa==!CwoJsd6QQg&{d% zqyxUNuV6Kwiml{Vci%pF+4doPJonx|lT+@WS8*kfoeOq9+)}}P=L&t(f<>F(kEln_ zM4>@PrO)&3?0!r={3UR=sb1r^H?id)k$(QyKuESv9(i@S9EJ|AakL!85qwOZ-BcV` zs(JP?qbOZkmd0~qE#6oy>T^Ni`MGR+>)@k`qUS;(4|D}7iY$8iPZZ%fug_SGqGr!tWUE-zE(GfD~2sEbD{% z1}(W5F4&eFVgB1q=0@1B!MUR;z3R2oB8d*VTyJ{Z8Sii?7&tU+-?Ow&|9u(dfSohB zsJ-3kh-XhKJC7VgrygHGNM3G#natg9)-KA8*um?rv2`|IWw@by=@KSw@5Zo91SW}^ zacYnsZ6l!g+f97u*IjPo@aBujy9G(TvZMr#f(LGa6E7b|cn>8};mW-HAY<3q1n=93 zcU4~~4|En@8Di^wE=jM-On#eZ=fQiLGHTAuxJ)=QFXTShUBxu5Z?1ZE*G`blLRs%* zWA|&2EwxKwj(^#z0uVFz_7+|UM{(VmeDV8N{0AOWm%Q`;`P5G{80N>zxb9q;S~Fkm zlKeb{MP8=I^_bsEf;s`WLU^dJZwr5vH7KcR+)eAf^+lx)fUk^+V{eg zP6nt1Z<|Xuo^FVzyjN#_ki6CWZKrd@L2H>c?sXKh*@Qs@qfL9VJE3>jbqf~JaqZ@d z1**jlyAOa^z4gN`todW=`}wb#s)q@~%1*m6Ja^`A9~cE#e~pz|R|h-ZSEIl5!dIgw zu&%6=m~%y3>(?Vk{e_PSebl1nt&U12fUu6^W(I4ukl5hU8CVnOKj#$o5 z2+cw9Sl^@H95GT;>$g9kV|+h7zcK%7?3UN*ebvU$b_T1S{M7GPK`x&@TBoRUrBhxK zb@?e}kQXXR|FSWCcqGOw@7{fdn<}=^57M)RexL#e8Xx!dYaef5@V+I*zk}2nA~K`5 z)Y=E()!uhs>SgmVL~KUd885DmAgQjL43J$5`y3g=cYWpci=zL>)?4^B{r2s{0!la1 zjENwi%Q;T^IZP{ zukkt0^LQWcBVfDsB7|GkEo)DL3HvW)(n6=B##Q~$Wk4`7L%;p@?A{7?dpL285HQ9# z9QG_Z6gW0bO6rEK`Y2hS?v8x}_Hdw|0|SU!;^DV}Pk;xQ#>HsRO0k-FfFz~PIe&Z{ zc!F8xC*?W2A53SBp+t4fZf$_S+*ySMJiwq@SMz6fk}ULfy+Bwao#nTn4_OuLtc8lu zA^R=Ub0{OF6)$7n*WAj)m6%PCmGqM7ZI3{&slfD|n)vdzk-a|A9Ou~jSYxjGuYx#_ z8r++S9MD-+H=@2k23Ji_# zqWTYV>oeaBj=CN1G8J-C=2M&_fl|I0&e$xvfw&%0MO|3LVVks))g#9BoLe_^l3q9T z^Yj}8%tQ)FQ{B!4zw^ zRPd9_SDDp&{TFUQWLf&;9vKHDJK-kY9<;u2S+`D>pQ3_EZ8S~va!Qrv-aSal6kiO> zlQU4z1Lg%jn$`%d@Likf*t;vrhN=^6v6yzBaBcCh~OOQxgy>n)pse+)8p+%B-Tw&ZCU*w zdFc#2!nOks9O)aeSG?DM-aFefgaHPxbe}a`ch1k^;pAmErC#qKZUEJYYx{_x3JSSq zyLgqn{kDaJAiH^;nvwksiT%bY3*BJ5Gm9qV`XdXskO}@Qn{W7XfA>lq@!Ca5Q{~ok zaOqJNz^~acy6oaRrs~To=)e%%8P>^2+^E95p*peMe5uOi^;!j&2W$PHQN!i)O@jY; zIC3cciy7=4)l@wr5xA6U$aK*9o%7gsDJvAYU&g$8?0>7~49rh=J`>;PA8bEYX2<2N z%tKK@*E#}jlV0q*tlI)1(_oA#aB;kZ10-W!eYp+VnF-KU;*PEk@4z;viy<$3xQEVU zSYGpQq5qsmwae7?Mje?XCRc*aj03a7@kvrXM32}ij&j8ZU&+J;|HLM+`cGCpLBCaVi zO~PamW6}FwGTA3eAN;hacJrKLkGY}TAbjA|LC7vS!45j@zrG)p~X|sy$^*K z_y2jzNkG;+R`Dq@yfb^HA%056vXJx$PY;9xb&HQG=1dg|htz>mIsV~qKsL*0;{SFp zb^H|+y4?LSRawcZVIpF;|B_x@(K=;<^vtb4!60{g16D&}9ObCsngR3|ynRn}Dr$D? z91-r#f%ABP{%{kipm2vt1fMZ8Q16m$ZV^jWlQD}9d>b%r%fesxm7IxSns(ywbb7t| z3d-M=W!=IsdPQrIh_iZtTG>Z?(Q{}}xReL3Z;Ee61Q&{&c0?Ig2{S%#AZL;OLiJUO z>D333;4E+%YNsA9J#{RUSoH@_ap3Tn_o}J7So^IE-haE`S!zy|wT2k63An9!sv(@oo z*$Kg6TxsLY741l160F1j2(?t=vwBhr+z;c6TT#8`v7UyfM*2@U%g)D#4z(zOCi6vBDz+Ve=0)?a|7 zMC}c}KJUy7+C2Q< z^n!m7g!yJrUULsaa2_>ot)g%?G5yvS;Y;-9a1YPcfj*w-nU~r&Map>-ffVCxCM(cS zHjHNa4(MRpK<<%?XF|p{ED08A+Ow1KL|6wZBwv}2`(?C7wHeCAAH-ND)h6r9je`3K zmw)YOukL>Z8@9TOvU6fww=s?rEZ8=`=p^uuXMUb-J6$+A*BDa_MSQ!#XTLa-wGBRnmH4o$ycEEjr zKjqSrqHg=H`O?XfC82)SmY$n6?fn?@oc6IDEKo>zZR;Lpu;#vm-(9aLk;jzv7zmmy zccvym)LUzYJot@Nt;Vx*jjZ!H_`5+*MO*wh8ERqespUzog8W4{>n~g+lNX%$<%Ytk z=9RP%vpC+CA8nCRui!hGb*R0P$JvL49895L+VoJ!6{b^ZR{4;3X`wdl#OdKt)LxcX z_F)abf0)cAe6u7bzoH7hV{$~Y9F6!Z(SXkZF)EN8>b)c&u7y0Yox6^GC;#7 zZ(+Z?oaTh6h%g&X*cas;y-QyGwGUtbDp&b+a|;E1O7$jr_+gw`wR1K}rRqp+y^<+p za59bQ@8bI(P%$)pK-mI0RPid`;0G8~Gw90Ry$R5I*Hb%i6me5kRqBKA&{4r(nkUo0fy8Fkwe zS5{!eV_LVYSBczv`CPf{@f0ie@VPAmw<4YPM+{2}-^*Et`NVOPyer6Ar1B~tGb(5B z0jg1mZP;=lo(*C)vut)&S$&;m{6ysY`Mdts!Crs>q6Wg#;qd_XT;E}4S_g}oe``)S zcA;1gbfo7G2&d%_2^T)!{%NVaa}=VyofnAO=+Z=8tYHs3v^zFe9mTc=@FkmbRAO75 zB4V5E;T*)ogUp%Y3+m9@6#6;2aUzg+Nq9(Zf;NON@yP{uQt%{y!npNPx}cQF^fBjl z?HeVRJfsHF2N@Di)RIvDCPop(O1+vyTO!hV-+jH zIWRriaZEhdu7dZJzys!rm6g3d(d(-kCE@c#F~#n01DqZzuA4oL3kK?9tim6(ttBVH zFP2>hVMWvpxMxr+O)nXqW0>{3H$BL z1iuSu!rt6{!U>Lxu;(S{5nfM*Kfldfs|sO0Xr7ADZuV#o{ldH1x`YmxK6ywuHskPU ztYwu~eVSGvH88vXC2n?ZN_%?JUweA}Ada}-!obmUfS)nB1;co2qPENVEW@-p{HER# z7T{X*)f!L3z(Q|pHM(2(MV;KzU%}T5!POKz3#N8I2x&#{hR9-P>rO#ypl3<4g;IYa zxpf-s=~s%&D%*+%*FaETaEj$5i$=RrHCvg%WYGt1JM;M>Mp52=vKs~18tlGKeDab3 zvg?B(MKPv3@E7(|4u9xk@<&_Ht1gUFee`(R?+zuB~OqKqB zQtIDi5jb??(+kC*BB5C+i=nQ8cf6XBV{%;1^U$wnUM#TGlyIHl}h_oBbSv46xZ4KpKij2n#!s$#{<1vk%49-mM;_b-XhDw7w*7hXS^IcA%CK}4g&0_`q8MP4MWsfUShy> zlbiCs9nJan1KHdopG&!CZp^tSrDQ#!lM$RPT#|RxJDD(aHg{&RQrfhsFOE-4h?{L+ zh+DhydjZG9TgqF8H**h7{}fzT-oGo!f27aBA0W>lGNs#&%(N%{!)8WyUO4 ziXT;zV4;LwSrNLJ_txgzjEi#3M_Q&%QS1b?4(yohYn(jqkhoP|jPbUJn;yu%MUXA# zs_l_%-MSI0P@@QpH=g^x?xGy;n|*VQ~c(S=EAp_fx z?cbU{A4LUk_++EpX!)D`g>!@a82N*IuIHYGG4mhZV53T#BmNl!p*DUMqg*$h@tMo8 z`merE5Lq`(5ZN$~X(&e|S=6MUtxkkz%Uy+M7xpkne{1%A$x^2sg#xiLfts&?$1CFk z6-WRrzFOnwoL|wqL4FEOAGNbK#-ew_{GY##<%jqM>zWQRB-f5X`_d*6-#^R5mAl4Y z8Ai>#F9_snE(F9$*0$Lq^}>;EIR+P_00WU4Bf*6dhZ^m9pZsDH1~GHGw3Z5&veCu?d-#V z@;zVG0_$#+wNG5c{#UGvUQzk^nKQ0E$lgqoyqw=6g>3~ln;3sE^QWW$xiqtmfJI9P z1D(@T^|fY3+>_y5rI90+d2w)OG*{MKJj0DG)3DYn@{o;a6(TEANQ7@VtrPuzV&1d? z!r39I)Ar*`kITPW!~^j%_W2;o2v~t3%asUfxz@PXlTBGp8U!#S5^1JB1!6V1S2y6E zm&yaB`S8{$c$EuX{Mp4X9L95=*ysOn^AP+zU^|DWaT86ge3GZ6yybT#H=u=FKio3$ zi|Q?`Oy1MkKBi>&4rFbs*EKL;b&r*BQL9Y2cr3{uAVGD|_$m^7@jhxiSErqU31_#l zHZ$TI%xnzU#J{QLN1%rGvenkoZqz9RU1K?L6KgL%`iv?UV3SY%fus2L5H=nYLQoK= zt4-^`&cH`U%yY%l9xyUaTBnL|$WA-i^AEkfVXOa$mQmJ$xQt#>P`n8|iFcSDi=!vV zS6{^g_uD3Tt6&|y+Yiw1ZFqx%=nzwbfA0%b>+q6d4}QN=<0MIS^0eYS9 zaqgPhrj`?xK6Y?@)_XZ&=!^N%a*UALo8J205W9bq6??h!&(oKsQYGwD@aj{i~Zc%v~G%p$~JX= zyb)PJ_2{vq9)-zZ zwIr7+j;=rR*#0O(y3~p@*?r|PY{EAXwby-{5crYjAh!sw!E|T3?}T^j&rcfELD}`} zpbGLHWm$?If;>Q9WjH}3k0g=+> z5-RkuYB^W7q`)Y0W>i7-7BJIDcsfZjc4V{1(ydvZU&LWxwum124Co_dAeG2E#wfUN-C{~glS2oG9NUfHlizV1ct}j#oGlu^8-my|S22G7J zZsXOMLzX~=t-BO*;r&#(FV}kYl!vuN>pZ1|0(YbIE7B7D*CJ<}n~U@}@7eaOSF%)4 z`MRe^Zt)|YGN*i$7CGEoj5{SgoCycc9SW(^vpvj>L!>q2zk)XsAdgI=>^VPv!(Jg(ZNxd}%>Zk4wWD%4 zw-whB%;_IoINHw+S zU(3(jc;3XG*>}JuN(B_EoE7%zCD6aBgxty>V4tTRab>Z@or6@1()aHfR#M@f9Yc5E zVBOrAINB+-((*@GG#qQHqRN&W|H;m_5<;t?{9J)Cv0sYsDG0`V3aIeWrrK0-ynHGum@7m>I%wZ^&1_>SKqN@A2{H zq^7>588OX?Dd8Nc;iTp%u;1NEcN3cahu*A6 zmlFk{lYRBv7C_ah`^!I*bTl3+FL`anBF)WEGLM^%ui=Wb0-C8lVh2X+De7xH!3J4d zYW|%ngA_Ws%c+A>BX&fcw2|YlGJdDur2JZzl2k6dFzeyPOOv(AuxUUzUx*Z!d=oH* zV&ls6_Cfa6oJGOr<`a#|6{}#&vka#4kPd)J<62C~BplNP)F$?7&!SqEZq&>~0X$K* zD=Xb2he?7%O#?N;O#hw^Nn;&pu|wLKx916qqd`_1%_n608<41p2Bzt^CX?S$Ddpd= zv6rfM$e!qKWM?>0Rh%9v9H&dcgdZNWcupw-k7n_fZQm_6f8*ULp2R#Ooq@=)qm*|$ zzj7R6%lnT%^n9GGRO#a@QdIrTK5;#V#mYJ@vcDWnyg2g3bzb8V6grvPU35RTUc7CY zv2xl+iZS^w=w09}1a$2;#S>a+rNezWIeM|y8pG#^$B39JYRJ>}xa$*B3?L82A2cxh zX1m(6BPD@R4C58DP>?do&P|LmT;51z(9nQC(hyFHYMLe#P`L&9#I@)E?k@d!senTB zGX=d;zF4Xu{Z>H}eQaVlb|xcSf%wa0Y^FYaoQTG+Z2iEzhOI%s)41XKnSDpL&4ITa zu;m$ZzF>i?@~4B`+Il!*?{~C<^k;IDGzr{(zM`pdZp2OQe6Ad>2z1|M5ra2a#9(~* zMP$dxmzinVpCc{O>~u2r(jxNY^7K8eWo%w>H(kl{`T`b{X7HsBU~*fxYZZKUfQK*0 zSMHX_TB=nREv!F6w^J;o?RYyN8}V55iE0H`PTcspb3Ax*DXjo#F_V_{5#EFH>Xe|y z5VB#qJ@nYY=qKa5@+loDp~Vyn&Fc~P7PIuSN_3^K6)2#R*SKjhS*(Q#otn^&^FNh7 zV3bNhtfkBZ^3N*4HO!p!-OOYF8{ro`9v426!bXR7nir;}e0=G}ngR%G)}%cQD-sFf z_7)QaRotTOShfchh|`~))D?)flikR7*FVbMRP)yj|HR3m>K<=2t6Ua7%ARH8jZc$r zs@$=$9@#Gxk!zWWl<*9)9k!T6E$67MhD*^0Pwg>px947g%W;Dh3id}+<8uF=GJQ$&;=h{) z^w~0|xjtjQTQ!tpn?o1lTpW{=$y_p8->}XMSfJi6|9-C7N?Zrh* zhaAh3^v0<4Zt}u0+Aq3lk4lGYK(5kVgq)fLwMRdqbhV2cS*t_r=KOOU0jRn3Yjs8E{0ajqeEuzn*4J=O}RkGaYN@>R-D zvQ*YpWYrrvlDee)S{Lx(>Fq@@adSRULC%|tvAZ10UXa`#{0Zu8rp&>PhngEJXbB(M z8@yRmj2x5cxCP%ya9e%%D(Q@_3a%MSTf=a_J#IUIaOXDNFufi+W}BL@_3NSK#h6oy zvR!gn`rORXg=PFZk=LLV1JQNF)3ls3Hkg?^tMMWpYIK*rCR5lRF90U&_&O@@hRUy6 zvZgi3z6%J@1IaxVw)OT^W=vJmp)DhAX6#Dbkee|~`(Yd@H^J`G=9W}@aDGG>fko$h zE$O*uy*-}leB?NhxBPIFq9NBCy;{Z0SLeYVhBFEHGk(SNBH&KpTS~i$-hfU~I!!A# z8(lHlw(_kdQ$-z-Dzsfk#}=E?N4jkorr5Wwbc z#}oJt@g3seu?gX5i=MD`%wY+4QKBqk&>p(aB(C}3>}iP92qW0fSlUz9uiO6mw@HXNVC%7xZD6FiBg~_t{U| z`@_QFeSZbzNp>r%`hxY|I?{>2b$Z|ZG8!9_=lwU^I$Ngsgzw_KC%$X1Z`cW5bH4ch zUjm_5LPPw7v_8ez1zFQCWD41aI4B4&-m~>GR#fx+BH01bEvKyG{ zJY#B@6J>U#kJ^7Vw(N|=qmR(jWT)x>Xm1vv88Ksvw(>ozY!oFYzB(lpXe2o=ktM8; z^Qt!0Lqn2)8SHU2fB8tG?^g0xPZU2Yy^)j!cK%GxP;=MTVBAmnfscIf7UkZ@OlWbw z&OZub(D%qnj@Q#|s}BE$&tK%LJlL8X+5OHg8C=Ner#i}QqP!R-%QOz{-tsdIPnX?S z?z=+e@vyazn*!!!er5lWD5l)5ZsfK!1@G|}I|n@>l9;)dnK&W*!7sp#kwinLx;i@8 z4x++P_D%xp2m{f`X(Nr_nsfvK zg_jxV0Mg~xsl`MzZJm9|uuuoG(OSfn(6KV1CoHIJ%*hwYVI}Zrh}` z;MP{A=gfeAGHh+}ta8iXa^XCs?Om(O84O97wsVl_Q{Kc_b*x?5=IU=o)d{5!w|+fP zl_&9*gCI$#`}=M?3Oe%p=noU+h)^p9W8hMJng0ztqDG(TnJIII3Qt6x|B3y!UAV3a zU&=dejmp(#F2Zqli1Gy%u6)*IXnFASa^vAa?1-Pf^!gEubMknXUAIA%onk1k6;c6U zoN8io-RNn5W6>x*Q)rt~PijF=JG7g1SLF&X(ayp-lazaL)72iAs=X&8!sA*dgV{1t zoz#A}x^8|Y{b`5vR3d}d(6cKU)~@bZp0@bbTSCvOUy!_BJ$9HMcYoXh#x=H)6`7tX z4D>WpNjJ?a4P;Zzl^kwaDf?~m=$2Ku26bPB%#>j|4Byy@)POJDWcRMzcR@p2RcvtYHX!_HU8d3BOKyzaGVt%=R7+jS4(_Ba4BcK& zHqCXrK_|SjJl)Sdp|z*p^T5j1k#V3XI;S_^>P|*eDek~Eg_HuI9=($5ep0AVM^2+i z@xnxr{z^R#*p@Zckulrl>R7NfR$Hxok@MficvX^XmrX{aG6*K@U?OB|c5zi)A{i6) zJIvH5D+xPKV`|DHweoQ{ zfWaj>iAyo!kCX0q{$f;rUP&isg8g`FfPH*8-y{bhK$^lM2~o&J8|L%?=4At5NP_r{ec&dwu|h zV*3NXvJ~VPvN#TKN;rCxNiwO4X#>~jFRr)&d%Q4-@bdxaL{SRDg zB<;35&CcJVm4R};S4mqm=#jb)K)I=TC!U>@27y>*6GW`a_QeLu9~DEW|Helpp|we zJTi{?WCgBs>i*p$S4%#?t2v{L5KhGR)DMNCYQbc73e_j zHw97B1h3EWcY^oR`>^kMI)C_p&qBVM-Ig+2Rhem&0)Ks~Z6(77d?07UjFYuJesHDA zdgOP)X?0KW1;fRbtQ)s!x@P*X%=h^8aP0eYwn_~O`@?Ntg2<%jMyH+QrD`B>B4+hB zJ8Dg%deuKXm|Q>XS6wDDzmWg6r^&_{NuB>eE?gijZz?>r?dAZ1p z{7H=ABa#Q!YVY(U;38Uo6YCF|$qp;YYr&p`*HP3?o=NATfkBFqKxNI>k7+>^6W0y zFuJs6!nJcjUzD>4KIshb?J<429;gw#J4N%~AZ29D#EZp^6eEo)7!-e%5WGiDsP^WO z%ri*Kry)bUVM|PUAMp{Fq}D7d^#zhDWxuMtG$a`>l60{oJ#3H7**UElS5A7TT z-M4g0O*GlXwei+N7igH=f?a6#n^1t^=0=8i>t;sziAGivT|h+6tSRh7SL*peKdJMa zD`z@g3)!+r?t&M7W@nqg^wn{r#(Gj)nIBiJD7kAA=`5RJQTD;C4h!-2 zzDDBf2l3HB$!TJHmF4{XU@5 zW?|UxZrLLWmk!)`qZ;G}i9<$jO;tVt9XLoKSSTMpbIYLxxE}Xh^iuR$WxzAhA^k+` zkn`4vsvtjjQ3!g__RO+Qh~dQe+3Yeq_ejGXcB%|V`<@xwl%{Wh?Ti|Yt>5bbGh3Rd zqtBzJu-gfPc}eWJAHRr(>m@aa^*mnsPgJ5-%8j(#$8jq3EANS);+mCmfrbf2jWHO z7ZjwWMFe>9son8|wMer~%4R2wP0xLenVqi4F%yN`)s%a~rxw9(96&;ffSDNV%l-?! z8t~$s@bJHGpoA%lf0K2wWYymyZz?stj~KJ4XpA6b@Y8sPZ(nH!W6E)dkkj40_yCx- zw6coI%#u~SI}gdQiEt6ip1u7A;{Nc+Ewr=8HX|JTU4K1c(%;9EvWpM6O7FU+tqtyB z{L}cI&TW16_C#NaUhw2ChO8-7u&(P0BK*mR{q8b^!M97pO(iN}Yn1e5qX`ln@d9Ek zv=3q#es|UE-fcb<%`o=Bsz?D_r`vcwiHBXCI@`JHhEO(_%z6Q#*6=@NxB04RKhwX9qRsT42Y{RMD^(=~Jr}evw$B?~p=n2UM|oiGdRWA%|JyWjqG`PnM3OZT@M5^6Ro)t%qK6u;GYR(Wb5Rv$##!?ThIqy+|IeqLkLa^jLp} zwhW7SyPfm=kvkRiU`hwusSzk2X+A5#XV@NWDwmc&K)*?0)_q8a-d)`!k=>6-s}#sJ ziiTm$neCniD6ykvWU(Ya#m6St zL8rQM?Z>bG9M%*a{WmcR0%M;!LxTP9dVBzoyVbYhe$lf3j6|5cOl#_=LvG`2#Y4MT zTvh)hk5*U^-&aI!3b|}CSG9j9bu6nS3n-i#(+*Ya;9tFWDs@gk7ruOeFyn~A`Wh-+ zD^GmOaC1aal~+6C&CHO|pgaF!eH?QsFKdj1pgPjg?P_^0U;J4=5pL^nov}+F0}%6u zZ^Wq+zBOO+FZ+pUTA_tam6N-ndRSt?|3en3M18CSlXyeIrUrAO9oBGnbDT(Cu=G^CWit)-Y=l}zNA4YJVd zPEF&figoy$P#PedOTX!&DQHO>c~%;z;562)AKXfQuv96ZhCBmT_up1p9N!Ji={ixizH^+8t@R>HgY1P=@SxVvQV8SeQ8?wmtxTvCVBvB4G;qv+t|VCGn&vqb;&>hCH9a*Od`u!n%4p|#^Iq-^ zslM&HXZF!fdRut4%^nz*PU1yrYzY2pp%uv{;isEiT8nos)*&YUP1#kkw3zmr%m345 zCFnheOAZcbw53^FH|QFPB1 z{U%MKPo#tzMz0Oy@BUy3geu)e>$|u;zQlv|d8j2V$JE)1jMacAGdF}!Dj;bOB)Jp6 z$m!w*haa`S#%s?Y1vOxc^_1C`0|^!xL&UgIBi7EYu2h@=vU@951GckzKBB11$xHRg z?twF(!$OR`vBzbHXYqD=)^m!HJS{*suwQ%SRZqUNR*sYA!z?jw@$XDofq1Bo$M0em1r%ubboVDRel3L0ULIJ5;B2!PDkM$cp&NurHuKh+unCjApM?58&~Y( zyBXl7Dqwc8xKGgWLq2hC(IMXKlu_RYT-j`qcBI$=LyPmS>x!=SBf~%z0)HplqR4?i0 za79N3D+va}!2{*rFp*&V40@tE0A`hIT0za^CjRQ$diz&PLcI{Fm)67TeWD;;|NqEd zNQ9)-Wm4vRw#>RAV--h5-kR)_Y=$D0dKzvCo0Bc8)x0~bH>xRKypmzIXAFhK(XBzK z3I(0JDL9ga@@NJE>}}G2qh+Lhd&(4QJY^s5@K&xG<{nnSdZN%j_=6Qam?3=SmdX^e zzI*r0H!IBt|8+{a{(VY8YF(^|$G5$UR08QutOIY9EAI?U>W6OLY(AXTXgu$f z30QcChg~GHDWIR~ADRBRJa9+)Gv=nuzw`}sdT?&J8wv*q2!>&BqAx6Dn8P_|!p^^x z*Dce}iLt-lH2+DM{4S#KJ;yHp0L@xMGEfU>b&9xfR4a2*78bWm9(^1PKcE_OHRq1| z$t(B4?FU)EzkJSI*AvyFCNRBgZa+T>wA&C1E|jCIDa0FkEa?6l+ONgTJt*hg&k;hBASIb~+o6(F#ll{Cu8P;B^ zd{tQ{sY&PFlLY_uf;jPws4cn%D9PT>B{#)G%<$J-`=lr8v6}FTBzSUi5TVa;PdzpR z{4;*u`cVT`2Lyc(s+1z}Hl#4m`$PKUatd^rsp4(hq!*PhBYs81K2Q^zswG zgabdVr#2!_V~?QE9&@c{8^zl9cWiAYk)gcJudFx2g*;5f&dZJWFsDQazi<(G7Kgef(#l4ngmK$nuRcpy^H|S|Y0?;{j6msX#Yo-iZ!^ffnsVF; z8z_v+7m()=jE%~1do<-=E~mn5M@(Vo)u;@!y3D~LV!t-c*D3x0H%aqZLArpvR-7xL zFYp2E&u_ZXmt;=j58F@U_(}m55qFpJsr{=pydvo8zi&h^u8kFtl2}Lw6Cyz--=l+x zT%tkwnK$|rt#NUuJ<^CqdOub0$bOw1ip%6 zZS?67(jA2+$FZv*!!()K%`>ALOcoz%kB6?%!F%e-vn^kzgN^6zBmBmHyAdQ8JlV$o z@4DD8UoCYhTQL<8sCJU)GNkeP!uyPTQf1tAZ25LK`9SVy?{5YNYyCBmkfUW#iWnfp z>>%{Bb;j0P(?Z$;rYy+t)r`n7Y4Px{OZQs5yWR*p345$xZ2fp-+Oo_;*z4KY+V1YV z*fW>@$J5HmwqT&k$;z;MV+5PaIQ5g5Db6n^8_M3mT6I7VHNDd^U42WV35wL zXly@oFT{W4y3jL**42}&<|h&q)yR>p_xlFz=|I&~z-!)@CSZ!! zChl#29+K83q(T+NfF{qAAG{|J{q|Iw`Pts#=L)Cx+#bipmumCr5AsR6Y1<+FC>s$oL=7dgEfBcGS#QEv|v#cGKz$K`R%s+g830 z{Vw~-a@aZ>y^66Nk6W~#cwuP?Y2wiN`WhJwy&{(?ikB6OpKYBbLpeZiNCHi6J&n^G zEH*V_NiU<2fz_FMnms2s9u%@$thChaVARg2kbx3RnY!slC;h0GC+4%&+ym6F#U0SK zuiw4-2e=R$bx))eJe}g32`&)0^|ZcI_>bD|JrJqsOUAIV)A01}T8812hV7xY7;4LDi;Ip3k(slKvjUCg0svNa% zZFFd@WWugj`!)C}Ta-#~nZ35c!Ft@-n`>^lhhKR>JX=I72MfzLXmG+)np<|Pm zm&cU4J8;<9x%bf3oU8GQ&pi+(>ye5#$`OdQ1h})@Y;X~$Us_75LbNy8wK%8R*Zz^Y63g*!XiSDI`=jcDmh**ys{i)D+4d7qF($k}x;rzK|nWD0^ zo2rEJ&~B%a*$v1&`5&Qu8L!%!dgNHL_V`CsJex2T&p$z`{q=efNT_BpHrXB5r!*BE3z z?Dyv9nAUm#YOMT!&lv$_?}@|TG;k3T~# zVoG}~5OHDO$TLBeX_&UjZv9vA>iVB6!LNhb{N5dsu{O|NpniZZNIi>P5iU6BQC#4C zZeN+nzFbv#1L8p_gVvPLy3U;a;M(|KCE?ZygY`y(XL=jM!TikTAO=1qSg<)A%?8_k zCa8Dj%Xhv#N}U~2%cadUOsuhIKwZpiLEphqYwZSnO=bM!HICPtJ86H)t{EuQ1C|}W zYp<2=iy}rCUvnO`zLCuV*?IeS5**^sXOF(p0Aj`p+|YN^D(R9|2*<*y}?`k$R`;FaCJO z1h3^(+~2?wB~Ak|x;=FJF12nCHU7^V=7E^Q0^wF*mg6y0T}!kakej&X>nzH!jo$pk zJb!OmSw! zoIU!e&d}?0y(#E}aFx5&&AQH2G8)m;y?ap=g3D}8($~;zKb!YAtCu1ttxEffL%kjS zydoKbcHNevCh9b_K^`E$$x^Htw%l}zIqC{D%&Sh`quD_&H*8+` zbiTm;aGaBgh)oB!7C#w=;@<>ho0=sIr|V-1{8O;9uHRtuhYEy7X@E9|LxTHdJ7TYa z8<0Kv!I7jW!%Eb4Ww)vIqSQ(aL)u1SK26j3f3(zpL8YjHE4O2CQ}wX9ReW{VvW3s4 zkhp96_LuKGj^>ZPrOb!-@h2Z{D`+`B!mFxW`K*~uGZ5j=sO$Og5!MmquGLih01Xa5 z5mSBRe+ZeujGJ6*aNx{EzR_{{=Y)Z2bYLkY4n2b7x5KgtweozJ4fd z&@^D$u5p_&a;yx&Q?Xo()oauwuXfykM4kXiP0#~u{&pKN&qhpx^PwBbtc5K1NE~YR zqG|V)FC=%e$|3@ym{LEv*0*Q=E$;Fc>Ysm9R}%Rx%nPGxdJ86cC91&N2hE{<_UG=A zWBLJsi>A+#H;BM3%6)o6*#WLE8N;=(tlG;iryibONf(DRRtU>ye^pahrUtn&V@^Q&gdI%60P~yJAX})PP@82? zftwjs3aU|1*wT@tTl2xL5+|e&_SZ+wU&stWe)F2B@+b4^Kez3+0w52caf&xpfET7l z0pC2ex$;Hbq1+SwF+1Hb2uj$_rrurz< z&Oj3iK-x6|_tR3)X=5Z6Rc120IG`sW}&>&-fRaB+F6Dwsh5m|hTQY^UuB#HyR?Dj)s^Ar9G3vu84O=9-fQm=Y~ zKb?%(g}_B6^Ik@W?&u)Bz4(s<6|Z%6@Vg3g&J0Ol`XCbY>$4+)BYSrZY`{V%4c8!@ zY}G&Snkos=WZ^&Cpw6>A;A&daA^BSx#L?^`t>g138ARU47 zbdulfeeU?BC$>o>dsRB$Q;@q8GdobDq4u3_9Vjg zI-YDDX30;ZO{6e%J*%x{Cdr{ZYzbmovD58I>Xu+J2K(Cp>05)0z91Fp>sSspZ!2lp z7O8b}tF(q;6R1}tol8M|?f-ajC=I}z21EuQV>dj9%nQFxw3S`;q*&L69@ZLF9lKCzgFP*e$VMqhAW#H#kqB+AyaH5)%|6maZ|LM z9goNPiO6D%^I!61;dOsrt#fCORBDV7-0`(e*UX2Ep}InmV?Niuqi?aRwPyJ_J0u^9 z7-?fz+I4Z)fx~jKk(6n(@%!>tPH;9u^ux?)xwVr+d}f9E^KNCntLfn(K4RNm%P)*Z zBS7+1I({7+2YH*@lRxkO1PXy8GxVD&U4{D?%9{vr|8=!EU(r;#2nmfaxeIcol_*$FH8>T+9ZR$*Qr6CD8IIXAg+Z*B>f+X+c`#J}ode!PLb zi9fcrZL!tuPP?laBc=7cvQPc?S4`;4=$G0N5#Vn6_UqO%x!=5?P?CmlDS?H=Jmvt? z6xI8yG^9y^-Q_CvZKNs*5=**zNq#qdKiVcQ*VK|V(cKqbM7!NO!p9_DPSc)yEJRSf z!4!{sCMea*o-5b%Vs0tsJ}1?upoVK^q8(EJtbp}>mK_@Y?)zIy$v!|w)ZQP;`0^pH ztkvYtSeE+-zHPD^uR*u_nEop1?%*2!@f~f7ho|)HXxqOR;b2O^h}G)ho4}Oz<1ymx z#|mvtnX={YY+{D>J24Vjl=-BsEoNIC>EpyxqxS>tOZTuc+?qzujXxnP^R3-*%$?PD z?^BI1Y@pA#S>K)e{h)(iJ+V(Ner|Vv*Z3c2Zy1_eM;8q8?Rh#U(9cfND9TDQI z^Q81pyh8*2f`gis0fiUYKGb~xVU6p556QF5tG0?Pcv0p)XyL0=cBzpA!uOQfu zOn$n>JtSTKVn^Wota$zDiSI1FLKTU6babk1W8vmy`_Fq9Os4}|RrjeuF@Nxrl>Xd) zRIzo$2_*UZj5fdf-Nnt7Y;KBe{J7d1&Sx2MmjVaf$H{VKx>1N(z$wCZAe&@%PkC+4 zEoQD}9?kXljLSJpeDpe$V~$yH=&tq;~dk@F%f)O>+Tf3?%$5=z^pn z)}an)M*E`HM{C0Swk8%APx;-~z$l6TDv|kR>{&X(9+ha8KeCB{)Sv+k&3@RlagD87 ze@0@Ve2H|6I9CVdE@sS*rf6j`E$*sRDgUd8mq(LaKCvns=aAqc^jAJYJZD zwp^U$IR0AiTwPr)B%?V9wQ5OT%^v;stw%=YBs)t+CV2y!V8mpeojL0ue#@ zN@?AZgcoz41r=Ev(Ytab>%m|C$uqw~e6O6my38(Jde=zTjVJm0w*TzAaSyeIUrfZ< z6t1~;)ig<2e(j3(`$7;OFn{Cm!}bY(Ivxm|iN7VE{uTiy8&{|~6YC%=-H4WP+8$5| z*j_049$chSfI3M^He7$-vH9JccW8#Hi|`^z1r!MU6$1Kyc#K|Kam)01e7m% z-t=`cnrTeSKCHxX(__VnzUWBW>#ItF8~ep-5!DN;BeG|za1kY=W+33!Wb_=B2TsMr zI_lx`3aw7S@NCjf_ua0Bio01qz`g5BV9?D+XJKE%ElP$9Ubo3aJj>xQ+Gzi?pzr+c z$L)f3wVv;^sHN?lYBO)Ijoxt4SArhs|3}_iMn&1ZZ^O5Nh=2$JLkS`ZNDLs|0s_kW)M`{n)muH{+?Yi6!}o%`6wd7Q_& z_cgp@8jCeOrP{4iOtoSU)!Jr*$7Q-oQ|Nca)@$2=-J_jc-_X{Vl5zCi_ds z`sUO^&8FoLsE$Zx070k|RU134?^{!{_IanurHc%#&l6>%9I;p65Z9~we8kG^c{}M@ zzh6#_8D&Lr2nDB72Rxx z!}TFAe%DGOvX%sIvcK7Wb%?2%WvJzKgejW7R43P=jHgAVYWz0U z74NErxwVCYT(HQ{_!ZNa5&Cu4b-5r8M_xXGchO%9gY7k?Tg7Km87mxLCz+6Vqy$tj z*{A3>xqg{AW;^bF3oZ6aZs`bv>Mj>O4kA*(*41=>Cb^~seH-|@c->dvBZ&n_M6$Q( zJO*d5qB*gn3r?1QHI01@W5z5;H5Zt zyS_Y0r2qTlAX5btp)JJ^s}W~EG>)4){9SQv$>1-_yJkYf=uPG)uFTtp*tLRl>}S8> zmS3lA6}AjjLIO#}!eGNRPtmZVgh#jwTGPffeUR5Xd(_T@7Y;=|KeMc^%CnH``B`UQ z)}6ByW%mjdPHBRqz)%L~k(T&gS{<2SRN0Ma+TlZ5rZ@Tbqss!@axs&B%~zowxExZf z%kQ}HXl=;`Njs9b=JI~q+&sEec78UK;iN$3be>&|@v3MJ=>h#*$ zVyks6W<$w9M{Q)nU^wgs#M!JWjkx@FNLsa8lA?X4>pCis5*Nbp#0=&B6&4o9h->5j;~Ib)pc?Y z3S9?LP26stq3(0fI;OE^l+i&6HIf@^lLj5kzub4b&SH?dz9zD8!G7v?!{eqgEm?ES zobb`O_`DIkR!U6ZJE^ zPBw8q#N`%4d9dT;S7xTpCV|nYvkqr*EBcoQmbrm>xJqWOaRV}GeQccwM8`?YV!a7N zTb({wu(ifqL}a*m{ix}|`cbE?!@&}2@fUHrXcy%d6e7{0$;0hTIgW3R`yHNeW@E#~ z*@&LU@dk;}cWFCaJ%Nq9#UbNW#w5ec3BuFW9lZD{&SdTz$#xrJUEB0(aP;ZQdUo9X?{M_g9 zcMxVTbV)_FXU3K$oXy@Vt)LEoFUf5J-a^snjen>IANtj-iP#hzQ2Pw$elK7p6KbZ~ z6_onQd7O3dn267fxJ%RO9{6gE-|`B8;v&8N0A~fyWR6?4g8O6F=QKZDqkIL4j4jVJ zp~MVaSmoU_fb)k_lqZidYo>}uxYDiDQHrIlct#YLQo)#uQ!P3dd{9o>mKQ|D#Ij3X zo4&|Rd-V1g>iUO_DidQ2rpuWYv|k<3wgWGR@%_8`9I?iO#fw3((He zv=V*yCjtp!>>MbjIWxgeS$Eq)eW2P-m7A~x4V0VTT@>61zT;eF(sI%oG)t^Vfy8#R z_shYcSG22A$Kv$9m)Ptn!YfmXQ>;~*riIr&n&*4_WS<7AewTkCw8flkg9KJXwL%P{ z3P!;bF7=&44#g`J4nKF6fYo7k{IM3zG^guLCp*PXXZ*9H{opVAJv$E7+ba&2eeMe- z@kpPzw`1BJK`b`AWL}S;kZ+?*>_1G)iT96cG=_b9o09n@8!O;D`@JnrunE>D{#aT> zqjqu%MxonVEW=;2WO0RMFJuHT%4OOj&7@G`jC2x=NT#6X4TyVJ!DA>)Lx=iVFSv3{ zwH7K=5i`F1a|F<}ITatWGu(c*z}I9tK{p5OWE_r%>1}%r$lZVm@K%@ea@`wM9J0#l zj!)VvqnT)!teamWw)YZ;k_1JfP-S*frYw+{$|gSTct{Sc*xs(xLk;#61GxHN?IuMP zvf5y9i8+mXfmi%q!d^Q2R!WYw!9p9@&pwWLv{3Sy4Is)?#-C+_-N4$t#i$QmU_YnfYPG@$7K{@P<(fi6w6#g#e^ZV@w<6LF< zN_K&b$3D`ZKP)zXWUyymW$|wrI7gKgGSzK2+->?DqBwAw%{_3s!M(p;9;5TaY0g)5 zP-WoVry4D_8!LS#`0n#{TvvviImJgey1Vk-W^Ji#QB7RFom|QZM_4WVGQg$*3v#D1 zH-eL-D#8;AGm!{GGvPBetW?RKM1q+ToakWl@0O-%vP&K&gB6n+4sW=LDB}3MlisfE zH?ChgxPKrrj%v6kJ?F$ZIZ}&dOFQj1o4vx6h0tAB&m@^_o%L$*Eo}5sPAu&G3q%U1 ztc}0v=E}w^5Q(XZXDZsAnS6kDnK|n<7);tURi}uEk7?dE>;L?zhHBW5_gngt%9o3M zCKhM5F=^Y1G4pZa;9oyM34tcyaB)>+$W=l0UugEo2Vz*sfmK(1qRmubp~~;-jKw2M z4mHwrnqS|fPfB8ezHS~TKS#1q{_W^7IvD33E z(e78f3tM6GSJ>%FwZxUt@Q0X-T?+@k-Sdk_eXVt48vZ$0bDmP=-D%R7OC5PG+*~bl z&~|e;&Y0FUS?%uwBMIZPhav{kP^Ns+_MQvgqOW@jJ%|kc*|-^;+_~N++8uz^!6lDQY;f_AQ<2 zz^O)0!i*4ls@O%on322RQ67^La#Zn8OoPF+6UQK5NK;Yt^#_N@|%b3Qyg1%)^S|osHD&o`ojtQ!J0k}iME zgpyPqtKj(rRB)h*Vp0=HAege6FH=; z$bTsq4X$gz&>^dR%C6~lFRA+ET@RTY`dq^hAq5ey-(W1(Lhc!t?nZdXM{{?~-MtbRFBsY zH6xSXLz+uT*lt+kqvO1jO<+ChSi+fkBrK5y-PYSPR=r`Psvu+qKhaCw< zc83hB9kvecw0aq>Cm5^2@La?)yy8JxYgA|I?Db{FDdsvDHU zbnef08xD*i?yap3^KG;iTO%=xsn-3iBQX%aEry!M%9;**V9q^AB>dbZ;i%tzpq^jZ?e&-k>RJ)|9>D8!Vh z)jQvwpbcrARRtuID>)Ib8?n!DNDHu(wO5ACRG4!?^)7Y9qxFNt#2}1T(B>RSO`+BA zQhLd|=L^7aDr}sp=`y7WTdj zgx?UQ^Egl#&niBu%-VC<2=%L&-K>h7tUx>E!@CbNMR zBYSew{+nkI|DcZePzb_l=iR{MN}cm{$k{y6i&6(ZH)!Ha&i4_E&q=M9^8KM`tq*D8`XBfSzu^ zhR#{C8fR5OO=Rz3+iQ5`GSvSyrs+fuZrFD*jh;k7a-y48k{NG?bG|AeTasS91`NUKrb~W;%(n!kNNMeWI zL1ijh#wRY1Vhi6t*6vBg#n+f^um6)&$>^;4HX|FJ1}D)3Dh*cZelU3W$dE6%>nu_r z({N98&k35+CtsOn8hjNRar`pLWS<~TbS5K~LVCvX=jD}JSx7zU1NptSmQiS`R-*I% zF|YDc-&Wx-))>to+WrfuK?#e?rd3%0jaO2+S^JMmtW?)1lYz}xlRF_`J3F^x6u*`V zi48nBmQ;SVSS>!VoLw`>x?tHu7U5ku`y$vJzs?VnOcFTDBC-$|>sovrwF{f3yhrgn zRc0+E-f{_!Y|#KEdWk@10+=vU2l+R|f(pojIW;yHg0~fdOdy!Oul7Wm67?BBmY)>j z`4^0IQ*6av4Zb@hE%U2-Zb~~7D5cF4IbVK>SCnB$SHG)>>1s0TYr?=GS`);avmFH+ zE5oS>{@5mNoHzw=oO1FNgF@X7jf8|FE-klWqX{OdD3s>xbAr>&aIf;BM=rzQE~nj3 zwY?DW{&tO+MCd1)M?%JYL1*d;Ez@n%IubgLwG-4hxiyN-CEGNBkx{m@4Z$q5&-EzI zr_m0*oPIl@c%T+zysorGJ`8r}4JU(>x^6}aUB#Vc_49((vUZ-y@1(k4zrf0wR;F_< zW)sSnRzoQZbbeNR79~67f<{`sDmd+eXs}39JL3YU_tOwFSm zlj-NDc2Zw1I7C?Je6csM<~hru<$(1Ra0W>uG{Ty7Kq0L;`ShB{1`lVqhM+%^7b|M7 zn!uKr`n_T zGRB(uaXP3zSiYLLj_1xDrz}@U(e=GN5m%Au_0^@SX_lCW)E~lyF53&a@i(y!KV2pj z*iF$T4}R;Y0cnfu;h3_YFRGx|T#ba+m6&lDOvV-g+CY!Y=6^U5JKPqJh195CmM+p~ zA(wkU23CpHKu2%JG}C*je;W{MxRaaJ@6Y);?-MChFMTS@vST1^xRB#PNG>L}o&!#)>}?b6CRo-J8)S+1ukMZ|$|Gu1%VH9ahe)T5tEWT`oi8ZhKA zdXuY!t}1H-_UL000;_5Ygj-9L7m{efzx0s^ZnL^`egeo6xeNciYLB@FnrfK+VyhZd zN4xuz%0Q{WzJuE9t)Lwl^nO+s5-(z<_j|*ljCqz1t-{GfYP%$BHtlr{&WIe!tufTP zZaNDZ9z!+VnR2EbQRK+#NPCmuc=UiJt<2r}84=30>AJGNZVOskc(+E8--PK0zxota z8mNfGhVfGqj*wQ7>h~i-Ik_vcm6`$3Xn4)Def>awROd}@OxVR^s#F)2*n8%WMo;rY zYsPngenay(*Oehoc+Rse%Itj|RY7Zny@p52p>nqC_NR1S*+(ZywNF{QHM~X@%|6JX zq|j~hyApFLG(0!W1-AM4+iR=bpfa-DXdZv6#Y@bY`&JFr2+86Ge~_9S(l(_Mj(Q|^ zW35DFd}WQ5%itzQJ7Dz3j0iKz>fH0YPI3@4D7lHYzwfMQNf^!a#z28D=aYerzamGbk zDQGuUPJm{bH{9RR*%K}tZkIt!qO&%?-rRln(e+Xa;y%kChG+?P$ZwX1{k~*6mZ-1loJZbQIJg4I=%uN2wOjc&v}~{lLyByS2;wlNd$LU|y{9y@q=; zI^8Cv!+MF}5wgJ~(#gdQ;4Et_>haE|b4YQRU8j94m*bOJV$M5prhfGRgPE`W?lHU9_y|68zpIwm-+XV5R7pAE!UZ$2n_hu<`?-4XpxTFu& zy!ZWss9a-vt%A$?hi_L3g?q0GK9tvhMaRBhv3ljFOp+zlui(7)>r~nSW0;mve|M-3zq`$ry$HE6pbJUiTD>ip=$Ub}~*IDxg5S z3hTCgk}4cUJYuH>EtVt`ky@nx7+i(ln`$nSo1@Q^J)-MSW2o=Km_>w-l^4LtDXndM z)qdcz>a`wYh5Bf;4t)lvD;4oPhGMrKT_GsL=MO)h^s5?Wi8V4jct~Un*lP-I`b5Tu z+^-s_G{bF?fIYlS%9F#9iWi&N3=2onN5^WG&}%k0ubvv0ZVsO&Niow;oAOP)Ir%@l zhdxoO5GQ2s#(Dx#a2||%AZ4}Aum0xUa?4y`%oLxOhtZ1cX&1WX7~Wc%_%!c2kvrLL z{V7gl&90yxk7L@YP5(t*awC;cE^UssK(ZlYMyFRtjvaF~)oef+72rIgC63pUtlGLi zF}HKYLHGHoeS%tA-v87|DnUk*x$qHdo{tlB+H+NU#w%f5bda6te%(^?tsfkU09qP1 z^TCrpW9yRgzZHyh8I|^@^WGFsg&oiLeso<{8m_t`0^9=i_$W5Ev>bzu6DbG5njZO} z>ZgjNf>0u8s`FRJ$lMck#REvh4486H%;3+Y2UOkzVe? zwuB(6{h`o@wL^CORGj*{boAz=7n>Al*3^gI=1uK$`_?v1&E=`B0T%&G} z$QWQzdpi*LI%dyye9*=V+AmjQzWuH95!l7&U-dAB>42>SZ6-Tp&3-`$1$$Fm)P>GrUfhJvVPEi~ zy-`I%`4PX|;4ZSvBZp<;uM^uM>8zPETKt}j!NLVcdttyA&V*KAF7zCinD1$u689gQ z^2GztEhn`~``b2S`{ahc>_WXpV@)SN-C@{i=oF%^gfZ5Wo9{~Y-pjA%ne$nBmgSi( zfM%j;t4>ECwB3U_i>`5vY(=N?1TtG=xM-%tc%n}^V@XSWmdFzvVSyhDmBedhOK8#R*yL-oc!#UG9k9*SK~eUtK!66 zhJ9FX%5yy^I8$i4Zyr|NFIR#n=i}yYu>BmYGKkO{U(< z;wpZ61F*7rmjCiiMzMhz7WwyQIyyu~ht9Gpu_94Dzi6Zax+1w0r)UR0b~orD%SMIF z{%+O*5ux7v8ad2=XCMQ=eaiZuD&zkTFZf6f=RYedrWC&I7Y$F4XS@Gt9pMT`$%x%7 zDS=eq=1pI+gb5xFge|qj0WSjn8TzIpQc^^2|BI_A_+L}CyzGwrbx`~Kzh4-})sfHl z4(+GKHRD~@1m4v8GTZ&1{R7CGCFz9ooAm!4L;6#zT&RTLTN3^tAELWUDlmQA{HODT zcQL05OdbHbQn3_+Otci-5Hh7E%yQ&HOBe;m0soSIy)DUHJ|sB5w(jxW+lA@=y`s{x zyCJ{=z}x#Mz2g!ozsqsBsra)Fhvdh5&1yXQA=IC;KH&JWx^ADzt*^59lwNc5-7{T& ztUbglem9N>ft5rIx5x-c<$3*YD)+~YveKQMJCvFqKj0zZa)CHsMyal03qB_Q9{C|X zDXdy#lQ`QteHN`<;<;ewH3mbJgwRgQ8UbK3&W;;E+V&eLR7%(7ztWH95+Qv1DUX}0 z7=mK{gqt+{vOOPMBBugB+3>-QvAXb@mjA#$>NdpTw9NqkUht!TbDn;BLD;3pW`ad8 z3wdPILG!1KlnO(pOMU6K-#?yrBYd?Q)cim`7@|^pmJZqrSR5JAq>!4w_RRS__hw@~ zHR}vNuXGBX>lEmZPK@e1*k1IHpG-&`@8!>O>(THHNN}s}DIV{PV$F#3{%U#Ky`P47 zdKUv~<$qCf+{7}v>~4W}JETus7p%YUW-#x&=Aq+XWI;rJ6*ro#(+p?7IGIe5sLux~ z+HvVFE}mIonYD)tYt0WJziG`9dWG<|eLm+VE^?LzhiHRVpO*D-UFi@_+K^^clJR^u z70>)3UukrH{e3#u^tmS40G2LaG@m>Axx}B*&Yac*P(%={KadCPo}^X{TWZVL*> zy@cN?IQy4=_MZu0bwXjY&VRMJVmk=2G7V4iG~9P=GpQmpegIkoE&X&toh4!X>-lKc zT!`%-y!_XMM?Aazc!%eo7S6TbjL95rU~OM++a9m;5G>V)n!F#=ft(Wb%>1lW`$=fb zV6+QMk?|qgzYxC}xO%biH>MZF|A92sZOk4YP~Od)M@q()uWIukBFcP3Ywok)+5)Uz zSj$WifyrvZeb+DuNNtK4=KtHay1UUuNowyU->8p5JL8~2-GODrasAnjZTE)xUXKTi z$7w>Ft!OTY-=A#~_PtME-XAQUaV~3UemCsY;C~|0&_28+m6-WC_H^G%>)_P-X8(H4 zBPLw#4t|{fuJ8YG)7CzdWc*8B{<;J8N1tsrcyV8FVSU&eYZxTu`LS`11>JduLVyB1v`?%?J>NOXy&y~-06TPzs2=Dc7jE0Q1l+(-E48u{_g|- zxk_p*ochiAt|-M7#*>gkR{bZh-67YH{gzHdgnDz#S6>?T$ZfM}{s{!0!vD)FZ?|Hl zQs)r-O9%fQsK2vIfWZH!x4&tVx!NOhUtBNdStjN3biKktDMEg;P+59k&>2=VVJ@_1 znC#!Zwo`5i$gLMGg^vuTZ*ebI5=u1RF*sLNomLc%_?v8RK}OyZFZ0esh#~R7XA*fc zhcxou@$m8m_Id4{9~Cx%lK*gySvZ%_CgzYz1Xucxf9Xf;_M-)BZ^^ufc~U0g)#-dn z8o)yAx@dPaJDeRhOeW`xJFF1#FJ*bdyy2)&ojDJodM@aB7%kSavG{k`Xk^ z2r_)y(;9p4$YS*uB!Lr`5|pCn<`dJWBGj7@E5dvi+pM8C*+Nga_{;$(~l^%70E;ZD8#Ap;W>!hqg{$g#DtuR{V29g2#@Y z6-o64@af>ChQxu#71P1G;O?@O_9K#xcdj>So8>K(XQ6x+VfTVsdg+tAJ>ZPd%pkrV zj(4CQZ)Nb)#NRw5cAZ|pKrbQtu*cE9O-aeO1hXB)IxK}a!|GVU{}aAUc{%7ujc~1h z$xze$sDifJuv}35LxwIdyk$?-FKd<5_V~-Iho3((-NX}psG_%DJG&2B={tqs zF~c*>WS>UD&|etl#iFlz!aKCUQm&*AR~LIaP>$xKQOO(NkMCyUw+ypkrr7)+l9I|y zCw<#N$NFChFFlLL;T$F|i}3YP{i2R;RhT+Wmyw79f0jqh9=LIhyJEnUm<;@_WM(SC zyRyC>ciBuB{<8GB{(E_aQY;vHX)mD(aQ)BV`?@c2egU5Gil4~a1>BA;Yw)7#ze-9}%s{3Vvu;Y6HFm4XD zeLGzSx>6*nxiyZPtXZB3?#d|}zg%SZ(292*joaab*+DPXZ&YjTU}oz^2qVZTp0)3y zTUd5g;Id>*;4@fC6c~kyZbAdkG8O1{_1cLL86&UekcT&ca#6^V``6iY7yi$(46W*+T90mk#NjSC46Jx3D`Lv2h(^?-9|z~`B?!;w5N_~W%} z+)r}t-iJT`R#o1i!*Aa7%0yh%cOrjZn(ILx3d8m5WQ5_RFWF;Ig#tFNAvAK4XBybk z0amJK^Vm0!W^kk{IeZ^jYkLC|U;{&^Md|jQCehMS8RzK;qVCL2`sb2zMLf&@uNU-G zuV_>YMIZH?&IKmjDE}phZY$PQsz-;5Ul^_`v6cz^_3Ig%;b49tgK&kttl8U+;M8kB z*}N^w>q3)+W?(63gJJ0WJ>FsqRH2BRw;BCq zR|QpgD+~Cmnv5V|Vz3`u%d)_!b9g>#^>~*`t|_c}I$Bh0e2@3Fu*UwsKbdv-2_#Z| z6kRd$3jNRpcg1l1=c~R{p6RA(^i@Hq`RAIJBsgOj!7UMlCPQ!)Nz)ltUxf~Zp|6sM zUN4hRL%xR}lRNnTEmt3lX~?BwvYu2b%bMX2sR@8pMwzD)V68T80am3jK0IN^&+#G3 zvb%I0-V~yN4xufO^8wu+2#iS&2(kdw^89Y(QnBdVPC?SHU>GX5lS^~|Kaq~&653f_ zxZ&83r{JK1E{;8LJRfopyGx!?WWc3$Fw5mtBGu7NsSKE2e1#Ls>D4gU<4<2!17T|z zuK+DK23nOv@K%J4Nf<#yqXf(#ClW7udoTFDZUraPk^|dvYS!UPS0)hGC_!=vFw0z{ z=c`m$3#+dp)3F-GfA-0!kF%oTZAvy$<~pA?{#CxM_?78Dd2&TmE+$ezs8{W8swtac zL^pPqHPhPl!5zA|m@_4BK-SQ2fkb6BBfVYUL??zS6=mabR-b6rCbFwD1Un@2lQXS z0W?^@LDnhlXEp$K26WX2c)rI6-XT4zi_rLmR-)IX<%6RAs$#(6et~$zP}cjQb^Y}R z1#|DyQnp4d&B70JEles^2be5{QJzm^-}>#8Jgx~+ntxmm8+gE)OO+-lL9~gX=`AFJ zewRKoMZ2R5MdG*JV05IF7G|4 zvCD#NHzhS2#54D0QerItc}(o`S*-#WW;qt3Nx7L>7LJU$bRGKYGsz~AmFzj$Jf_oN zd~J>NT_=bMGO&hz$nPSBgypjDc{wGU&S<` zA+5nJlAGfK&6CVSp2&)Q;YJ>>-JuE(3=%Cy%)|`JAO0ZVxe>p1$U27x{)PoMtq3f- zw!8tgy1T`Vy%k$Y{Re)LM8=!YrS1Wy3KY$A*&9>V;d(+SZ2g|f7kK+* zF?>cQ1z~fYSBE3i>yVt$Q1p@;?Ie>-#@+mvwLhW5E^(c3#PN)6?1AU4Qp_59o0+YI zYY_KwGd$cZE7sc%5nxsK<23%!>5AD$*~DaV;Sl7hbG_kQ$7Npi{s`rAfud_H=xmTd|}e2USVo6GS8S#g((?j)=r`y4C(@fl!SU6 zFET$y3uCqUPNHX~WSqrvXvbfQC)X)4@jV#9p~rZaQFupCwv+mCU+Pdp=y8}OZ#kWH zxaZZBYN#_FAVYQ%Ty;&Yb?}ET-gce5;#P^Tn`aBOD(!gsHRctBp)64t?koUy0Ii5( z0Hb62_)fP2Qq>57F@hFWbx^8XbM85PW$2l-gs8jiXRZ)L@!}lWP3f8j+siID&9oe> z>0HcXfOJzF?h;si&-p4hNZdCiJV5m4(>1r;f%WKk&chqK`F0Y zA8XQ#_{2eGF9hpsj-5{4w8T?Rc7 zypbLjd0)CkZqLGRH%l zP*j0vS{S-XJoBibBls!Zd+S(7`L#DLJ6}qz&VA1Y-_qk@mzdem7@s(h#fK2=kOh5^ z5X2XuLq1wkXBSK_ma4NPwipNx)QWBhq7<`o-+BGvwT+0ck`7E&l7tx)&r~e@C{?FL zWq?Yj<+V*y%ib~ME7B&sknAEMeRog0xv^>Gc7@%DW1xozJ}|c%s~;-WWqVYnI&l%p zAA+Ytl+t;WyTI%izy+GC3kmwpqQ$R>qX1T`4Z)nWEG8y{?)Yq~B`w!)(ueWdqd!$y zbmy`rKcF38O0igN`f9BcoW=|yrm~lVuR{rm`2?kp#$~r`;*hy3*ESe)4Xgy^%d>|$ zyzguoCk;OGNDB4xP!7)tSQTpq?j`+#^h|g{C{9s2z38w2b$B;5D?l;6222N0OvtAU z?bEfGbv$L|7Z&i#n-Rn{Hp8$q8wPV&Ov)*UZ>#XKOA`Z*jLV2{J~MBc4nW}!oc z{!~Cyz8R6R#$QpQ_~ozqgRaUSy30k2a{Lhjj|?$c;rII*v2mS{TI~KVk1Ij|>m|d_xGM zZ^LgB=V(0oOIVr}lM7k}3Abl!-Ld#9T6O{t%OT1XVbcld@4?f=nP{GbdI;_{>@fq# z*K+tsrHU-gKF&zLgSF5 z2ntZI^&=uU2ay3d;8y1X>hPzWc~+-9CrEFsTx0cOErYW|;^QEq{fF_1LW`Xvh}$It z7L$^3yasj1w|($I$^3`LE}>#6r%xVVO64E>lID%Am_OTa6J1xLj|ZHGKu%$8O)kuH z8q~#d@BUQl>wVA^-!Q;Hr@vnCZ6N+vdmgq$0t#c%jWbHUxLwq7Du zKhDB}dI+6em=I2Rj5KXTT)v$X(A z^wlg(KfSk4oH$68Sd!Tg=t1t0O@WS1%7~w=wrGATZ+>`s|4uA$(%XQa?*2M7qL5`5Naj=_uf`K`vz2H) zcu*EG_D;ShCJ>PYDvTa+C7i=m2j@|Sgaz7TESc>Sbz2r=Q7Gc zj|xr)sD1@>yED0&Xu#%Oi5~iD$`uX>1#~OIk1O3Qp({_`mfY+hPV!nSeAJSX|1RRR z`iXQg%M;)hVEeW>_~WF&rE}wnwety=U*8tb7tRLNn9*BgxI-Kh_3Wf5FWteV=@8(Y zG@d^AC?3tp1!NP&x0!l`+CFT;3u=@gVQym+l8h!p_A#QBFz%uur;oJ{6M+vrg%ubW`QTgJ4dR-a824#r0nTUOBnMMc+3|AaXLK#A*#g2AR zLZ?epKQ=16?TFf|*QU5FLs!vqVYAXi=>D9!B593E6i{ToN3rPc5;xu3K9dGPVi9Ae zOlGCXYuxilX=W@>eV;fwWs-|FdeG6}a4dQ`5C1=6B=v6~2?8sRQm8lo&TV+I$$TOQ z6e?x$57{xdQQ)Go1(Of`buBT&U*8r%DV3bMy%2{Lopx^5viQ_)*P)W6JHBf#l3aJVz}~Yxl2rf3PKn8A z7H!UMnFm*y7T-OI`hK27_IbjPw#cC0d>kRA* z$63@i%?)bMF{^0N*c}-`P0MKTRgDmN^+rl3GQ>hOjL)Qz=~FM5XSVN zn)_Bvs1Fk1)bIaM#sBT}{A`qA4&1$lJu+Z&tpHMrYNW3^%()RVzSIM$D!+~H_rBJ` zb3Hk-;T*GRicdmGU(fm22unv_S2WNO8fj5wV8_)OK$O;X79aLGT;g^6=o3maMxGgJ z4}2HMGFEL5Duim`rwP?w#M*`(rIe91#&vRLnZZ?EqVOY%Y<#h|?*A#M@i;&=%DOIg z;tDK1^t@8N0~`7I(Xn#vMRuRqyU(P5_YBmN?J$f&zPcNJ+Y|dSnMY;Be@;(X(YJs0 zeC97gUB32ygFI<#@RI~o@(@CGIkz(cQ2 zzQ3M!9T}#(!7_Tr!P1& zTL08X(x%|N?!;mPa9;w1q$&f%kFf9;t+Ww)dp7-k@9x_A2G{2)`S zxZy{8tA|#x{+84>-?COanQkS7q$PyRy=_(NDbW@Iyko{U-}qvbSM*q=`<<8Pohx#e z8pwW~n$m!{txRJ`COLx=`LiKk`M-H((txhcK9>eugxnx_9;2RKr$!?^HQ*w7BPGn_ zAn!h}#Yzfz1w>_OOSHc=IvHy|RJ?{4)sq4%z^h?b0Uo;a$$cjlRdqKU-9MKeRMPX~ zrQ_qX|J?`c5uDPBoC>sMPlxrLxu%d5@{_3C8$%wEV8G{z=xPVjF?_C%%&S(z9=5bo z&8vD}Z!J%pdz` zUt=ae>1p`{s;DQ^dpXn9zdHR{+j@i-LIP*z=vwpOd&ad)h91Gv6N6y-@%WcPIGew0-U`n z&nI=_c=+b1OOE6tr_c}g1FQg-$s``AnjeYbG%xnGK3$sSnEc7i_dATq)%w?bJITw~ z2T8x|2%l{nl&1g|zlW*RbO`#9T+aLO!3Ex^K%p(BVq5v)-AW8@7`k$gN>X;rRMO@j z+s~ie+VT=8PjpG2B%zWg{4xGEfDXWGm68WCe{2pZG2T~zu1rkHsNhEmJMOblX$oI- zk05-iB@rIeW5^5+P=o9FV>cuR#;p6pz$sqOHM|FD{jGCm8-92dg43U>0H1C~&BlZP z1+Fd&{zww26RL0xKbL6Ou>Q(Wdr*o{Xnw%&77#W;aWE>i_E!i0`x1sT-L^3wwOu|A z92k1U=l0f=x|$;WEjzOKS<>(8S+76n^|hRZoU1b}bwk!IkBzMtZ9|>}mDjt8as6k8 zEDe@D9jPIt-z}`}9MYf6zh$o6k$XrZElmVF?6&lw-n99&iJdK;&esY3x7QkX{=`~b zc2~(F9&k8|02d={ryjykVk;!s!_t}{deE>}^C13Eh$^q=oT|%CC92SqXFGM8x(#r5 z1QrQ$7|#|XS9FjrED;k@wwg_9b5`y1Yrn7@oWGjCj%7E3N(qOXvklujq=-Gz!SVTw z5a4YS+;tWjF2N{s(yhdwn%ho6X ze+{$z99)b!LRnnCCh$i;)Mi=Vy5IG%KxPV1kI=&5%kL3?Ji)}UKMl2o!Un`|Pb!bh zKkbhf;ang7w}_!z?;&UK(_vuX?4t^1IYswo4S@mqIVIr@xC&)Q*+(lfUf9kp;P5jF z++yA0V8&$z(_b6VlDushTOz7@o4^UjZNs_XR9mU}d1VH2^6Gn=cJbepS0zM!CrjMk z#q~~R=BM#1G7y^1aptsMtQV&_G>B@H=K;c0u^6fSXj8cR`3#rtz%Yt~{+9|nf1{HW ztt%iSJp@;-2ypn6ryZ+|5uGLKU+9$KrIqZycFEt_B4!elU@xb8RZq_qk{E)$!_3Oe z{K_<-_$d^f{gC^C=xBe8<;!k794+@6cU}`zsfT5CwArGmqMG*vPBQMxh4PYt`4=R z!MM@Qo|nSF1;Aky%yMhX!(0;kp&qY@!dWlk2jH_~aM#Qet41XYViZRXVMkeF)IN@Q z7!#ch*v{26Df?~Qy|h@y5%tueZOO?9O#Pd*cId0IL%mvEEy_6U++xiVtTqGt^{@b)Q(!pOH)tL%Vl>mx{PIK8^=HY;KD`~^xeC*U)%ausv z&MHTNm!addrxLt|x2iGfu8*v?aD#k{E=t)4nbSPnR5N-!iE8e_Ma)jkFYe3xtp2bQ zBzjeDCw%(Kbin#}T)JBIIX78 z^q7iYOM>7NlCbDJjZMzO+_Z|*w03+Hrs3oBxb~*QM$Gcsa@Te;E#e+;@px7&&-(gG zqaX42fZC>I6iE9-x@08~S;(I{*sK`8hrp?vQyYAU1w~8z#sLJ3{lAk7Y`w*NQk2fSdA*Q( zcH_cu*&v|o*M&bYlqEIO-fi9JX+h?!#XYmbICN=5g=Vg z&~EWl6sUh{=ikNCZ$i^7|8&2oZo1{!H*(`iJdScpXLIgUw{dv;u7?Qd@Mm+UBdD`t z{~Inid5keYS1=}k1i5o||7Ne^N52g1_)3!dCHM?O^FR6ZZJDVlKG36lJO3u6mFZ(R z0lq0XL}#a+c0a-nEhQ{aXfGwIUxqtiVy87>n5S$wfD;j!f#@+$Q)u(dZ zk65X3?ew0EBVEx`W5az9ePiw+z!CV5vxe5e8c^>5F1lU?We1yE5%^3J<2j^<5CW>h ztPUan1QiXYoB{;M_=0MJeKW{`=&TPQfVsbTNM8JN3iW{uZYPu7ps zZ=6pv`wd}uPq5_p%SupGQQXE(tv6u0tU)QAr&SwE-2Iri@>lcx<+^76>7E zPvXM+BSOzyl63flG;^w|XE47-)UV%qV%U5?fs7UcAvW$`61MwE9ar+Tk>lne=UkEVBxe`*M44Ac)U?JEvbYQ zgSBiOSNee)6ni>GVl<`|xp_&_tK6z9Xs@i*3NiM4hAwu9AYOjiV`DBS4IFUr8h5vz zqZPiCMA>WogNl33S)QG7a(Pnjq8qH*MuMb6f~s6}j|)NnL5xxD9oYfJiVEhp_IJyX z{k`DdTfP5iy#L*j_}U~*OEh~^W3B9zjHq4R6B_8&BcRZi?|+Lo{(iHsQ)nO#Gy|ME z`VtU*UC3*JEzdY+;ik_?Gn*N7qsy%UahZCfS69)kH=cMne{=uLt$pnCa%Ui?^+r;f+zPjG|=}bc1V`$uBQi-mGP(elwdCz3so7@UDWe) zHc#KeepNO2;+zFcVI*XO#%0SN_)iPrQn~qz zJV7&^8+ZN|`Su!(znX;@t55%es(EKZU#pxvi5v`L+4C9?l@JHkP>BG6x$FdU z*N)XY@6h9^)hiSTHEUd6gUq6Nlw7lvQqxQ`7C1raiylI2;BN}?e_Ob$aF|&;__`XD z;OWo~T*k++9E!D9u4F`>RfAOx#n(9EC0Db}wellvd zi3MXow{I}c1bi0Zui4Su)4O9M(?^T!>vlrsDY7RiK1Pk8M}4?N(;gxJrzd zZ|wr3@chILffip1)u!4Up==-c<;EEOmc)?P-ll{k88GUBuq^~qS$+qy|_yoI#-3MZBPr} z;Um%I;;fr)_}-9{RIe|eElQ88U#}V(>Z3SOG-E+`Cjmj=bYZKj?(L39<)LC;t+{XM z+i5E=Bt9APD#xh@AKS0=;UF+}|2Se78F;u@gy+lGe8b$7vU)H_wRCN5>9~%;;Luyi z8y9+eoAYgIYxmiSVm8JYbw>|GTAe1MHz|DkGi5}`wuFUacIbU>xVPm#17W#wT7->~ z_G=KLzOgJV#AT_QaWROU@xyT%K&a+~?0DDX#_|QzW@iEM&Et(EQ>_%+<1P6Y?+Rrd z{av<^x=4kPoiCm{9}DAO@6<4jm2s7(5!`jYDT;rAuYn?B8G^Qk9R9P2|DWdLCi>ZzCbmrtFvjKL zOIN1ds0^1a$(>Ccrw1sNHjJj*yw}EU0gg3Vxz(6x%vQ32~L5-VY=JJe| zaZZ&MW!R{P?X{)3R72vFxclHpBy=z<)X~w^3_xZ-WTtn|B6~l2FMR&KJfZj<_oRvI zLA~NrJ~Fll$P12+!Bd!}fVdXI|DB!DGU1){jeKknvGI%Ln#XH*`HO6_woWxwS1Lfh zdW!6W_&$TkH2^Gn*aZHRiE#@TMX$o96+ZCY){KwT?i~EMsAsuQ?0Hvj*-%z!Zler1 z)4j(JS-oYd(C|QA`1z{y2HBA45lj8%$^KbVZ*0`hZCH)qXmi=Xw`!hg^S;G}aSqx^ z$q@GEJG;BMF)iuSNDKVsiF1AvnN%ud^}?+?uqt|*~ zu5?IH9$|+;vaDlmMy_U;0f(eHf1jY5e`7f8)Jq1K{N9|Hr$r!dH03i)TuO$rjd^WN z+>_&9vu%#pjOClDWScM)CovsL`X=oU5Zr^ID@--Rece*YSE`U}IzFOf@$BuXh0hI2 zew4SU3rU`znR+NS{^rZGXqmAUGm4MgYjxezn^bGCea~|XwkTvvSyip=&D$TRwhE`# zJQO}zNd3Ily5UqT^VdqTzBSoD;_+Q-d9o_KGQRE;IMGv@zCEoQCH+0^v)Oo z?&&(gXXF=x#C-~rVz{Lf*U+2PgFiktxR7St?XlfM`CAyWrmFnq`su0X|oo4%b=?*Zk2=61<8hmN?uHAI$7PdRXxVXLW68f=pZG zL*tEmz>X5Dt`Ou8LFJO<0yV@0dBQr0%lkg^uFL-<^VnRIeZ>|5Tn^++ z!5RFvko-fI|FNYw&p%ky=OZ+hY8K<6Y&j(`K!Pf|h^JNnmq_}KwRMYJF*KVl4t1_A zpfs$&lEOi#CyC?%fQzWBVxQa-a7h60?1GndR~jIB2`nix#;`>o@c@bZHAYDlTwz^e z*HY1IsV(KjfCB6gmaXr1m&KpKzgj-Qwq7ONpe9SXZkEGds^AhyLwNR=YH`lSEd*=1 zfku1pctd&+K@iw(J*;(Pg5sT$>X1(-f!;>%G3Z@OUtc%P`eA^m`qJ~Z+7ZBxaC?SA zLU1w#@Y{-7Ba+H(M8_wYwglHbJborz!pmKp#3eJ^ctxbEui5*V3c3*D{ezKVCfMC{ z$AI9$prr!}o6LNl!DhNco}eN&#+3ujk%?#T%A_?aZ%`9bDW-&};~psQ4t`H-{u%=N zAPf)xpE?^T@xQ^Jh7LKj8q67y>Xes~$@`*pwl$~f3Z8!H?#c^3#o|ypBvg_dsDJ&p zT1nv3>W}dZQ5IS%T?Tjr_8X#%J)Vq!dwHefv-yIH#JFubS}zbW7}~8TZsVG9DI=d3 zHeGxE+g3laCHt${8a^Ppgm^B6qYkssmZ4d~LvqpxPpeh!{*p@8l+RnUD>)v^YD^(9 z+jcgif2@)#ykM=*Pu@p9sIaHR9Khu6i%kO*XVZZ#<Xh~Z6zDSo;ZN(dyOY)Z2SPb)GSs)P^PAM{!66obji--(Uhv%kZrM?*v z%vCL~Qyqjm8Jo5pio;!4{j67%6;u;^rC}nYQ&I^r^AQ;*X}?DF3~CGyo{7)y1p4vh z0}679A4}>8Z)}GKOr2do{A*p7ACvQn$9Ja^`q97pxipG;c(ZJXu1vv?(lbuZu|yJ= zg(EKQIcGm#(w=m%Gjcv*50;E4%grzLTvAk&VV}&mUU9yma5oQBK8eGCT4;R90~J-J z{J%PPFbNol?qHN|9{*Q;^S1w{vZL>2{&!MQV36Dta3^b?-Qf5&tp|Q&OF$7<)u1FM zm0w~!X+xSS<{94KluA^Q&3kIi4+WqBfaHYx079TbkmGcj2yZhtuIZVM6nG-S*zRd9 zF}_sqPc6}!Z$C#}p#mM$8^g^@jV}e8w_2lIzWwyJse|BHT#vUL!sMr2x(?bhZrvZv z(MyZo$r|m6WoIMWuOCNV1;R)HLgySx(o_8oDYC%sdGLUzEn`9DGe@+N%&r89kiQ)v z@K={_PZ%hx@siR#M=-ArI>U$EHDi+^4R32gAu zcJ+s~Ff=`=u;}hZD7N=1_QCoAhRb7a3Gw)$Qf1frOt{Q9WK?b7msaG)H($iBz3I zC4&_H(i~_$7cMuRnZJzyfZw>%A+9FEbl4oA?yabKzEDaC!mXK{H9|~cp~z%1wywJN z)q0JfRL;*_smuyfZS5eoPhoX^%ygj@%ALZd$HuEp$1PjFy{)GCC9i~|;DPhBPP;bC*{tldwF zPspG){b{UNF_+q^sQrg&zn1gIJpbn3Z;6hd|0^g_tLe6zn0$J|T(VWTztt#KxUci} zfX=w5SM{PhzRE#@?d^uomSP##KglV7jjXW<^{nxgg8L3q z)D?6e&|wOsanGzqgpt9mHm&m1U)^S>B*r~M*r{x$PI;6MEz(vdy$m+u)VH5xLS1_w z!q&%-ZqHVKopnuy1Or|_5}7*>iT?v%{!=Biz=tGS$PJlluOjansoMuG;&fA?P}YB0Ma`^T*S%^y&8&k;%US-wse0&-`D$H???_5c9@g z`=SHp|NN>h*6ha5)-J&Q4qR9`Mw-2qjR8}dR9*yvHh*((0DY--O8&cqLyZ#2_X=iA zVl(g8oXU;PiF8c=WPY9o!9;RZm`=W{==YN?R7j| zQr^H=)!+!{Z)W=SeR*zg9cjF_*JV%;nUc5tmTtE@gu`A2{`O zS1Ep9Jiu)omizuXT=)HT{4;yHA_c?uyWjDPOm^pz9x*o)CGLnu`1sGKQt^^vjPNQAmN`tb>pm z6tHj=&?Dnt$|?TylJN|dKn6kt2=~uWIRW6AAoISzH0=<8a~}h&MOo>QFPD@r{^OGc9$0lkRGgx`@%xz@_lbr; zi?}QZ4+2>zfq^i_rGR`kHz36Vm9T|fH`Zs@dk!dW#sFgkyvfah`rp}#VbDw$5I3SK z0XUV{noJ-t{;H;Z=$6|rdn)tSJp~krPubm#uWOm4k_ya^@n*lds+A6oK)zvCz$YFY zBx(=Djjj)dExm`Vaf5SGo&EHeXcsDNR>0Q>ga+OVy8wS;n*uCv20&Wx{KB;mS}pcL z^H*NMmruvh>!lpdMh%+S%0%t7m|xaHz_^MLCt3ME z2t*&fZfXntKHlIzyi2|(|CQOqg-1dmiGk7g+x^$~-LGjR^skd#PZLerzn^u(LQO_V z%vwmL6G;C8VPMk1&U6wlj}^UX^H={de?AguI5$dOtCV`u&a*)`HQ#s1i{=`Tg9V-_ zZpHrl1tMPp*|dM?gm)kqebnzM3kW;^<9&P_y==t!@^RQzAFO!K8b3V;1n5j>6L z|KBhD?P>bIb^8B(m+E6iye^z>!Q9-QK>{;39SNVuvk?VWx$l!cKC}=1(Rv=#%v`<) zTIQ}xa1IsC+?hhR%&}2Ml&iS41vs$7a^WvXO}w_UwahecxOWEwmaouf2)ai}hAUCQTe?JQ<*k_4L5k3zp*?tOR1# zq!#L!opa-rin=_Ev6Jet4{kU9F}Aw~(ygQ3GIZsMl9eA6Tt~cj^1H9c*^FOmZT1VtDgR zN_Vnf^<`v>fZ}A#RMEp8976X}cMR{9zzeU%_e2tg6tH27f>l21^=A=r&?_i!kT3(~ zpo*amedbT609U$UX1T8pi%+Jkm8;a92}n?fWPQ+yX>9+%KfzWXamj_W@{k?X=MUGz zr+k?=73-MTXl1Qmy8k-n$jDvUGl^CSj7G~T^*=!iDxmiBr>!}0xwk05j0 z+YBdXGXoks*RqqQ!zt9=kSNoUeYlQgSHbKt>i|VjPn8?4Y7<3za72p4Cuv2N@{~es z%D#Qj6f%n4jL(=zjyq}zuZmv-UoE`(ZYt_0<#=FrtxGfX>BwjSr9o2hVca?oq8T1m z?v9ou_+G9T{t4kj_(nOsBIS-K(3f6`?s94)07)jIR6zh)EGiMRrnMKUz=6587Kv0} zG#1-)I0$}f_R1mFA3kCivdw{B6cw`##x7xiJBaZC+H3E^OMBq@W7lv*{#b}9#HYe_ z$E4A~ET_R5n#5gS^#R{ahZf~HKO~*@LdGB#qPJW{Gz+kNiJivFL3h1+s{OZ#vBsZs zs_2j&LcJ}nfxg8cVD2G9_iCqte?6rtNmpb;kK5R;j~shdUY|bDIefSH2$EY0 zx0yafFw_a&(<4FF<-=Ndy5K* zK@gGWuCwQF@andinxM;cSn_xaoy(Z_7k+6zt`@#$l-3sc0+fune6s@!*w7*&CN0cKTz9qjNkpW40f%hS9 z2k_m4rbj*%yDtN$>HOKyBh6@|c34g~#R|4}omhCT4IXPeymg=bFnn*Wc{XNLL5Nw$ zAtryDzxa-(QJBK)nE?Y5OzCa6A63Lse1knJXb|D$8s>bE(;rnXdv5aqt8L&oey_!S z%KT1jfBT&w<3}|t6W97_Nhs51c*68#DkD=2z%3ZFGUL(_aZ=D(@r)o8r&Rs%)i8tP zFU~9M>(k7BVQZVbP<30P$DUkWdeySX<}U?p_uNTHD)@0w|dU&CkP1oI)&0I!GHCJhXJiPTe6i)4kR>nGbP<}B+k;OtLeZM`9 z0*}WR+1^mB%B1V{5m1{l1$eUjImT*5lZ4GeE}RQrE+*c61+>~M7?9gLRp9O^X4+ho zc`4>IANQ93>|j5Ncp=3_{YcWAVnS^)X!eyH)Q!hzUwfOg4{tzt(0OZZ9J7ityybH~ zg&8|(gCRZ%KE&aw_4n1zn#=!#F5*XQz=q$y@ zxreYNyV+HJ%o`cD2}1q!t7E}m-BZ_88_*05#--liRvNg?gS}g8$4)^Jg}`Jl~=m!xV%|QZsg3K~4o5Q^PIgoV%dGzeCC@> zN#14mlNLK$HV7wXg29n9tE)MlLA=j)%JscQLr182`575Iv+TZ7FN9#_9_ zU)@ezQC7FQe4I$Q5oc6L>AZPQ76QQqyH3OnP_$avNNRg{jdqMK@gwqN1#$T9u<~n) z@EYn|n0*OW0!QrXJ#)snTl1c>;oJ$rg$)8$CYwO6wN@V=*dKe>pN)f6s?50z-o7)H z+P*p{`OwCmjg*I%JMi@}((B6-g9}H1VzGXShoeG1yLdJJZwp#IttH-11J>~Er^Q}2`uw~8OvLm&6A0D>!H6SPuGo`FB!WO(;txLJy zZ7$KEwNdc|HY?YuEiyG0hgr(!->S9Siv>u4EaTYnlK3>xAZa~2VXhZCr2rB#Xk^7e zaauQ3hcv9TupM-d8pGq!jOd>U?p~Tz|6GbKvmI_!862Z=@nAcB5g{_m5M~I{ic# z3x{ReGN)ClBuF1TpaTw-OsJ57OjWodM3M`yEd^g={tW|DebkvT7t3&bhl|cUi#g3;e$m_ zg0q23d2(fsrQ!5m0re^q`vP~?wyqHQ-`=!uhqw3Q`X() z(Vb4BkyWv^r2uWgEppG}SYQr#*W{Kq^ z>?dIp#j)iUq!{jK#nBffTyLjLAuWI0e~gIIsY8h1w#=;7r22k09BK`U<@&wi?TpB| zW=Ngy-=a)>v&$Os+{yi$PwHYnw)qBn%E4JSg>dNUpG)H|qZGTJvDerx6wMD#7)$r3uN>`$W0AC~+aP zul9^-spm_lprKI+E7eS8Yl;olTrh{otLaLj)+pS~>;3!EcJtDS`C3|$i%BOHzJqer zBkacE>@{qn%Lcphb!pj#xtW#lGJ%nHZ5-SYdRA*5Ul-ygG>1j9UZTwWXf8<(QtB(` zz-VW3h+LmQSK;&e!B!5}Q5%he+L>HS3GTnL0>Mq<8u@V5639^5Qz7x^du)wxeyH@V zED~>&%O-e6tfqeXQFYs%)vF-OKJ$R(Db3?yHZd1IpI@fvbM=`4za}ackzoed(3C;nde3m_NP!A)efkwKAaeokNnw$!0gZ9C?L{*1wD7IcWOS~M9W=J(Q zZEZq9BjapBsF=r=YK*Oidv*p|)#nwQGkF}lCsY3#Z>%JhTSJ{Ah#oE(q@wtaHs7BZ z|9dUPx6u~S5$}~qEt->B3j4wId!Mu^A5yV|1lEVkDiQq!Q=IYLZ>C^rlh-eq;3nEM z1J_%xxjLf!AGJ!ovAHtgnBkx70*Nr-k$5&bB%h${9fiFbcK!H(BM97nk?E$B`0(I2 z+3iEJ*pKclUD~;?Ckvj&k2EK>>jm}B-LIpvSD`3%&E-#>cdDH`!n(N{kQ86^vX2jF zc>+;iHm29=>>4BeEV%LmKcA2j`BG4Lb9 ztKADl;+N&$&@H8Tyj&f4{A@q+u<79YZUT3*;%nt@s)k#h+j^6+%HxnPE@ zFfEMI?B3lj$pKgH-E{8QK*oNZjtaaA0`-e3AmN#)wraER4yU|GtFj`$C5EcS5(nJH za5C1Qh2_irfnRt7ZGLPM_bpSl29zE=B4`5lRd+C4Z8Vn6=@<^}t=WXQ25xdIP_-m5 zj2;u3`|+PE20qqYT$zR(_|P;9i2u^npXoJANF2zu2Fje2ng$9yzzYBA0q|i9KZ7 zm#OsvT-t;{xrbV~s)h2Mg38m%AY$dRr(-BHmp9FWq}}Qe+T~V+T3>AmZiL+CvD1v@ z5YG4={&vrR6H#b__XL;0_Y^X8$^sDLStHedI)svXPR}H3lIigB#|$=+@w+#VOxUk*>W(0k3@aX{ ziJjQp04_-qc1_NNx#-3sy3Sy9M(h$NO3z-mW4RGhe`sC2j0?NwoP%OdE7U_Pqtvne z5N8fo1r}V6yY<1$6&a~L32a)F`->$D;X;Gb5aKe)1m{;zOCgSj25n<{OhrPS>YU$}3!3hM3M*aDEY;U7H>rNA0B+P<$y~B2bmW*Fnw~Yb=l10p^UO@ z--=esZNurhB!*;RPWP{o%jVZNKYn8kd%}ui%iUPiQ{H`59>qs-$jp3Sgc#*gW1FrMTf>@7x-w z?`&C#96e@P>|{n?!8qYJ%aAXRC0Tom8zMNBE6T74iHX)(#jiMi>a5ofFUvRW+`0M( zY1CDnbP`u7Tpzxewe|raWqnm+o?>YLPi0 z$(|YA;2r(|&TxdB4;d(ANO*B1jl|Jx2DvxA29}CVN$Dh(uqKQS)_r#f@tRzt4@~hH zJ;2iehENAy%u^yXaRdg31sN$1vss{umc^dsjzM`id&7s6^;uECuHOjVb7t&b{;aLS z^wt7R@;o7L-bdjr%dqSahO=fnU7&rq?EAcTa5AwYT^C~6KjHwxplvb*VHX|6=NOOO zW9U=C=sOqsSHkGC&VS2o@rW@IIV!Fq`cZOVA<84P%AU&ysJzx*m||aY`5IH|)!f(r zqI+Cz^_H6k?@7OYD()#D#VCBKk+=!b=Oi8qPkoh!8&Q7lY*ZX|QX<_J4VyA0)$lhK zl^$DeKkvZK9tLy-vTUaWnH0*=k|1eOSBX1>u; z*1fscazVR6E&xPz?|-dPo>iN2|9JYC=F^z$zP(OPz&Qu~oYza0>Crtu!m8V(Y>o!G z|6Jy*ZOjk#Yl`LUDygsaC69e7HWo@1xa8Wol)}8)GG0;f%rO;is5_CvJ9Uen{PCh$ zzE&a`ZI;hol~V5Kk0r*kEGF`wxzw7>N3nBH95{LGQ3+OS_Bad3hXimY5zoNy_A+?V zN!FK6hbnq)dT!gXw=2NW&G){sEMzmC6k^;^OWnFeyqrNofY}c zhvbM+6rK(%o`yB6xy2!VRhKOPpXhig9il>MP>M@vOBS`&Y`dnd+BAN1nn%|`n#UhC zV)S4iR(OvM^(yiDzn1j>_q2c<`o6IXcviI>vrIX`H@X|AYHifN3Lrp%j&l#a{8Ygc z%+G2jQfzS*Dg4|8!QszRT<}o)a=}%WJVZw_6xu>gmhY^6yR^r*tx?BJM1r4%L_r>8 z)*GjXZrWcs6%P}~l>Y>W!*?$tU3=!AG<0W;JK`poTha1XQDe5mgeUm619HV8{t{4_^4Uk1V zeQ5riH-c(H1bb{HRBR2!ZWrUb+v0~@d-UgYV@8XtGv+LsWQ$!zglo*>=U#V;;%aX> z8u4GCFEG#-cafaT)dOp36q@QlOeVT03|XqpE2=V|nv5wZbJVsDY8pfOrm0x?YP^rH zyuZ%@9H0k2yv~4~RP;rZWz*0nzXJF_JwcBy6;F8L?CT zc*kYdVuHacEj)_+RL*%I`Dag}$Q5LIqR9NNNuZ~aHiT>Q*WFl`M^o;sY^D+GjnC_H zTc0!?oYZ*JCZ@@qb*aY(zuMF9k6MFY$&4?X>Xib$`pNIJq)yB~BOX|w^3zWM4eRw& zdf`^0_CB_)Np{?&{#a!9F0tM+4!o7zWP9ay?`~V&&^G+9VS6K?(;3Wy2X1=4(M3(T zD(l1EC5j-#V^98~cZv_Sv7s0Cm|X%|iK4pV7{k^=(^XU%ay`N{s_|4fe&6*c|hEzSt{x4(&>Wm0sv^ebWjUFHa`>Gtn&cg9tgsJ^Tqau!`t8 zxH}@HS_Cf(s`8GvvS-2YWUq!)pSsv@^9 z`4Qrh8fMy>ur+@k1%35*<0$;AaWZ+ruF%MyLkJ+sdq#z;yq^PJk*kg+PU6U)x)ZtU zCRVCCA&))~AALDRFeh^_nVpCEULb40T7Os2(!VQ6W=FyF-jv$pkvVa9D{e0^oG8cg zV^mPYHG2l3J?C|mj5;$UCO;+ABA2;J_vnS_ZZc7Q>I1jlEe}>Bm$l2^uQ`VR-aR%N zefM+lH-WVurve%GG?u52jjiO`@~k3Ety)8}q@%-n&`|It`oC(*@6Vb_!v z-!=!p?C^)?>gQ}6KL_Uv{w>J975MX?|GyMuS5lihWq%!z#fadOH)}Q<33rPCR;RVM zcT=FFL)d42{u16h`dv6tKYxxc2s3HK^pLfCBRjCVvqzcL%WO*118kshDJ9c}kd1bs z_Tf-)3f#PfjvPr$yYMCwf)0*{Z3=W^Q1RDCyt6xM^0cmA?RED6bBA+Ji~gEjn;o_4 zyS-#O$usAOi6i=-Mc}A5$xmihRi+6cU%a|B962V=*+|NyGG=Y|G$@4&aloK7?tfP5 z-+fQ?W68HV>I@0j7ts@$g(JPN0K(sDne?33ZE2_*DS#XDK+Qsol}@UERL$OQgAAVqGht!HP4MRBgYG(*jKBYu5t+bg*Swyb0Z zjs~R~^nM6hTMv$^NH5-FTk~kWVDnQ3UHEHRg3&TWgcfp5Lg|%OCB8D_Mcq%&_&2O3 zdoUvZmk(~Gra7S{8F63MCDvg4OmmnIo4md%L1?>fs}fjO+b3btmi1A5edL-d$0~n9%k$ccg7zu{%iKjjbqBliY=}s zhV24y)2chT@Tqpj7^u#*b9^{V1YE1o#OaP#vF)}8tx5$1ZkWlBpTXNf^t?d;!8)Zj zm7vOG6FH|5v#L#Dx@hUXSt=Yo<9boE=9=>b)*CrSs@NNois*ARyOupq;OTaJTZD0Lw~pL)B|P)crxTT}W9DeCy-52>d_C-Yp(RME*TWcYhJKPzk@=!) zl|2p8w{5{#w|L#3)rhe7!@PW7fVGf2kL1&IC-MlR5bf-p|1C z#z<&|#rwk`c5hXttDv68k3}c;xUkcku}(b-HNL^_hR$Lvt;Um6{oR9-l;hA0OlP+7 z$8B*uY%dgc>@WG%#uQL4aICw4vam8mGw7BY+l7g1+&sC6T|B~6=4C-~W7JGZ*W^+* zw)PYh$v$xA?lMA~=NP|H;})Tv_q}RM3 zf_-91NO@gBGYU!HtmhX?d=!>Oqip}44b@DiTARW302~;ecIOUL)36kaEwKF=_1k_h zxHfT_zR->kY1MvgUM>Q6u@%%}J^pX|EUg5~ApwLy{tM-lIo+Y3{YXr)u|3Kum+P?j}KAS6oyoa8Dv?oH@DSFjbJ*Dt-BJA6Sg=wtXXmSvB#huzw1%ki*`+p{~h8 zg;J=ng>p2HWLjj-g~z?qRi5t?_yx(1ARD~Eh;#uODmG@-1cC7pa4;5Gf&Kv_0!h|HV zdaYLAFYu1rSiC=W`f-iZ;q}j@HlwKtz^Lzeacf_sB=qCRgUf&wyZM3g*tefvYcj2w zPcpA`Bud}lHKMZ5K-GEQeq*CymHv%=N%6AhS$qDt==RQ{UT>qq3ksUgrWbQK7-nH&2)lb+-V3*w6d`Z_iiaTZC9T%AfzZ~C} zNAb>>#CUhmXqy05O5jwh{*{eZ$qgH}|M#R0&zxaM8l}{8pO-5aweNCdRg7Z$dq0{= zFzRn{9G|}_E5wGyY)+8gL=L7eT{7lhL(Fr12Gp@1;+Ntp0s(pK*n~XhGrQO^Ci2_8%=ITcImOlVI*E-n4IlDX8pZS~vO#trh_5otIL z_fI~8gP}C^ajv!9;PBVOhZ->rJE4h^SELj%emRE%34Tryrhz-D%o3#h5PPF}&I$WR z^v;<_kb&6VkKBx)FrN@{c_A9?93HMM7@O{`Oj#c^G=Pfm*d?xP)p+wmYNwY7>Rr3N zS!`m9yr&{?84ERfu=|U@*-oGdn`I2VzaK6=Z39)?&3?Ir*$r)!wBZ|0)AxBUiYy5Y z*uICmI-yk8UY)A^3rF!jLR3{JenjXJAm5@GZ5Wuv1A_cq3!h~A4Du0>@mS;N%6fr= zX^~4KrNmhuIpj0oHZoF^_x2VSHZm>1ciU6eqQ2SXw!bag%O^#O!oCBSq9~$GmpfsK z{jRf|wdLoAg%9n*OmMjBwK6+o&E6J%h4|B!!HAT!LUc-kf)jnf1*|oED23}7`$dbF z=!dQeQHKV1X!(5mISq{nTXI;r%DqK>!61))A6EnKVD?vPDGJ^AUWr+0Lo2A%Y|H&_n57+(Y`9ewVVponWLC;Fj=bv3;_s0dI6i4+#*P&7bm-bMFwn~iE>=&F=<&xnPnz66h$|q$KNIi%bUGoPGRDbHIoPj zz|1;cPke(C10bfAc$v)JUI1C&ql}HL>>81*Re`&I8mOOANQ;EX;;jM*oT6p`EQ=46 za0>IcIgx>e(cs?b7ex$3t{W@i1X zB&+;nM@CdC@8YM_7Un%1Gej=NXmdAy$t@Z#Vkzd7DdsLdTAy=MLq~SM(3(1)Sm5GK zo`_0?D4G(%G79yOxxQ5VF~{|lvxK1V2U@Rvyjc6C zyf?-Xee?d{(x&6$S4Z-eRMW$4PCsp35bB;l@8X3^B2zt8ktuE0JLh6EMpDaQi^JR_^Gi`+6+%uj|E-ddA)^J!-u4u`37cGA|zfH~cJ_-%rC7eL&qt91n+AeKd9GE9GF0;+Obi#g(BbaJ(q#!B; z`dkyI+g%{FRWEzA;l6ES=R5g~weh7RiHQDlgVEKa_{F=)A%Ha@K~e@9lp4=`ek77* z&*n#hqSX9jguuW<`MX?_r9y9H`~GUCb{$46>x4%QSTfiAoHN=Y{hcGEmG#8)l0%|N zv=E`jJisAS9}q&<;FXpIOu@}Z`y;xcgg(h#d2DgsF`H{`(dCZ(1IKFpvTN zQN$yGDAA1g^8C&#EK=tXFVou(_61;1&`hv9UO^=U@L>|s5WOi0a?GB*U+He%<0}7T z0Pfz>n$vN&a60gWng*ALQQXAUVElW#wq&n8qsq@`_%~ygsbi3oB6C)&K2W1yj5xq4zQg(%sY;oo1AD=Gvo+Bw$JP2kvlnz3kKN4K7T z-qVurB=#CiCZw~Gb4BkgwEv#Cxapv{TaepKZ}0k(CVyf~8r~q!iEBmao;fwzfP_a1 zp+U7ZYluR$UGl3!9zPwMaWjR}@~0eZ@)yreJrK9p*<=~DEq2e9f%?ZBnsEpa?77&` zQP#UJyVhSJLl%Bscgb<;D^;M%#E;*eR=xC_?y0m7uc6i!-N3D=ZnRHa2HP{?Egm6u zh|+8jqtYp0Oi%0$o$o4bZ}qP|YM#xV-pn_ekGz_8UaI$H!-bH9!8=|GXO#93QIfbL z0`Az~(N^5E%gRgYu|!sH({yA+C!dMczMq_|np6M}OV7zh7o=Ve~cOC?CY zrp#1+#-mzW4i>)D$kI)YUwRi3aM5VJF`bEvyTRaGXbyii0gek|1D)WbVW{9j+I_Zc z-fV)uMz_G}aF*%X3v7$L>h~|ExSD3MBj}Oc5C!&!Rg$*`fvnfLdQ`*Bvl3Zq|AQAb zm!QGT#d|^D(@87n+W#u=y@Q(S+P6`=q98W9fYOmBA|NeD2k8RRi}V(d7Ni6e6s0%m z9i&MoB=jKCYbYTQ0z{;j&|3(BFY5dLzVkdY=f7{xIWvdJ3_ID$-g~wCUiWof_ga>a znNC|n{VSBAWwEas&wt}6aG(^o7xHTaD2x3e{FD^t00!EtSZCk^>U9o6->^a^jBeq( z=(C=VpbZKwZCHu#^>3g<%9;S5=DRGpCRe%^^Pdd6i{<%N9M?MX?1nl!W*RiQx7p&2RKRsVVK9s97yiQ>S3a9_B?l5*P z*4Eikr0XYDR}JiW9y?NwEM0JyauQZt*DbY^tLtK!hUjcsGM=~^d3lE820>aDBpc>d zZ%^G2!MvCSg*frkD{@};nS9LzPqmz;WEa)(PZIK>b8O(Y02sc)`MF1ze&DBt^a>7P zf3un1ao_@@+~Znb;|QUv$*|3|kTA08?c-OY$S3?Cou6H@I=&R0lKdqSjFqC$_Uf|2 z--c#(QM5u0#N7+_TlT$>+M8~$6gP9gW7B5P;QDRWH#w(IW$XQQGHbp?wRR?Q;;u|9 zB1GmUNQ-4rVfP-=4(Fvv-;`l~k`FPec)he8XiU17{cKZiVD0mFlQi9A3jOBV3Io+EI0tvRO6e# z_XJG!tUp@z#-Ii>}fJwjYvgPTKrA6ZHm4~~-( z$)c05rt5<*ZMdR{BFjFU%bP`gKImgcyw(NHf5lRPduhy0nme9AkC`C@b1cZiKdYEc zlp7liUOXEy9^P?s-FI5*17%z|YsW0+5Z z^DT%yAPH%6WADQ8foY2Cw*_!Q2gyQ(>-QYfoa=A`;y#Yd#dBgs9_Wf6oleXLbNd-4 zbF2jC3Bl1r@RGz}-;A+(k%XW)7ocCrHlX}~g|L#(FIIs6K z;(9>9bp{-S5vy~&|4gLD!y$&)RK&fCxDGZWy9&lq(tAhmzN?tF!MaOSgnS%dx{T)) zIMNFG`A^PryT&3d->czS(AeF^2k0kOION@Dgm)(&r-XHepMkX=Zd_IL1pTEZX-}KK zcBlMEc@yx+bgN1By6QiHWX&6@N=s*$#3XZN-1^;mB{>CAxiYw0Dibf_dnSd ziq{^_JO7JLEevX+(9z~38Lv=^A)8s1ajze*H-E?!(Z`2<+h%<15>F8SI6HAFRc>oa z>LwFayAPto5tD3HPa<#91OC+8`IH(jyac#+HTDQlOsFH zia&>GlHghzHS|x>%fDl=j(N;` zP|=ZGs#sK1mKM?7kheV>;9)B5XSei#g+xPrc>O(eTkj!>P{AZJxAu!T`sfDoR`OA1 z3C(C>#BW3z5B{L8h3R}qEVQQb6T*^Y6iOx>SIV>rv6GvN`;PhU86hM9?$+!?i`ot< zLe5#2M4i^~Vp}eNR|_6g+ft|6*yR;q;JGt2cS*``YjkLQH^KALBg2E>|6sJFg+0gy zQgaA#7ib-K#7Q{bT_G*?92{X~g8+&D4{{uPYwl=GpA`De5mf>7aION;6?rM614 z^_`zBv2m)>cfx$m>G6scmex1c7gC}h_%1TnJWKz=*laU=oz8flPM-6?QlqQkGh_X* zJvK;i|D-@>W}|~v$RC;Kmn|Oj}N3p#F;%z6ICe@i3{4`QWH zr$V{a#T@})U1FJ$+>iAzL%ATkrC9UI zrj2IZ=vq3Ohpva5v?IDL@=tSpc|1v}CVWYog%uSQ5T;RI8wUs{RMI;DYxnq*)laY} z{G%(DGIH9PgS7qY-aiezp!0ln9Wtrv^yroU$skKV@u$;?bE>lCHDs2fVExHO|N66u zy#tlc%m?`a%++#zF^ox32fJRuixUlG%4(VKpzy=g^!889L?Pt=Fw~?3gi65F&m$S< zQ%epbwx`bzWJh&J?0OU&Au#}1g1^ZQI^guWXPNLlW=D?k{{TtUR-LXzV9_dJmbK`% zooRQmT&vFb17ck4NfW?tV6YO)Dj)$OB8kIdW+SXnWB4* zn^^u-i<7OUt} z*&jEVpE_*vVIcei~T{o5L)|H~Rhk{?^8J*ame zWir=a6Yz2k>i#fnz!CDk25mb!arFqL%H(0Ulr5WvPb6sfG`tVdp9$esj;b1b=`KW( znCERt+R|(FCz@%~GTvd!A<38iu)Vh;2c{L#`Ps*jJcv$u-4p_xLS-fS)SIpUe2bQ>`373IM5Kl1pl zCEtn`in&$e-3M7oLHv{byrJqONOQXd(c*e((UrJ6pjEv2=&a#-%58&(k1mqn_+KSC zh;#9D<_X44-_la5r|OU%AHs{FHkNDeHgo1CsJjlm;jm5&pNRN-tR^QV0kko!W`pHb zs734!#Kfm0b1N@HX^l0pyn6DBMWo-mHG`cBy0yUKlpq6oj$b%i9elKq#I zWGnW4K*Od=X5hMcM5)293HL;-E7B&oB{igQSt z3TeYQnCcAfJU1)!Ajur&O;3;Q$8Hb;fW?x1sI2A8FFlW{bxhqZ`~N?L(@f+%#*pNXz3!x@*?i z2lC3!#d(l~L<%qd89eOD&1mipb3CNd4yF72ZRT90Ib@19fRv+L%zya)g`BmTaRh`` zhEye0hjYWqpH+sBi>b3hCQ56o4EfAQev8WD?kC$OPe1*wVn`i(F(x1oL7Kkq-=q#6 z2y4^A2-8sH#b4N%W;dV^W*}Y0?blT`hq32i+lx2|&!0#$Sfd|Kk~PF_j{jGJoVb}4 zPI*;CUvNagugvJLqQ>>E5WhIpz|4{Go*daKRKcj}6o32C%<1NaOSyp+Q~u5u5%)4| zjbznz@SQlQs&-uXGuM&E;w0M6?=$sj%wwJDn7jIl7ubl z?P!YfVx(jl5|~!^KfQV%T_#OelM5;?T{n*$nz5|JyWuxDVOC}WRIWh3h2nf}W!j@q z?AF~Kt0L~ws^V{!@sEG0-gomqnE4p2CEQsjVAS@IcC_$gAamuSp_zpA8G_Z(yfpj; zBTXZ1->W@@r?eRE0Tx>k*>(Tyq_2rn`qNSv9@(n=7{NebeNdX@&dAY=z~&?*dp12$2SyP}V=4T_*6q zQq~9SeJg{K=|Tk>d{5>ogbECQCDr;dr5-b>JrBD{>?weMk}wctww57F-s zBE_J^-L~8rOt7X&^jr4=Kfi7FJ8R}^LEpwke<%1V2f0e*O`NjN;4MG-VYqFsz6z|SQ!>G5Q2Kld?AT~) zX@tf8%4@$baamGxzAPn*noEu?Kkrbk%=^n_ORY$`12 z$+zs;uVv+%JdZ6;KD>U#+g}`%uCM|8xJ052wy!scXej1^@+yCFdIwuzSM`TVd{SY9 z7~_D^#0BEs?ao7&_cfik$E(x8ZjUBTHfBmtsP~gA;Pq;kfTLLjWW^baNIjG)hE&cD z?@tlq#+*5?6Udut#^4%9C*|&w=RNvQ2d$|WvR@3c9EwC@EAbiH_Dis{^ofwmd6PNX zMmP3+ZP-B>F$<1?gf~q_(i8)JEkSw&BGF$6BAQw#tk|f%`HK^1dgD5K?!$JEMUNY*H$h{7O$vXj)L)JmfdBB^?&%3_FcLliZhhTX-Ik1H zr7J63U3$+hWp!(GR}QzyTnK}$x3wPEKwS63?ad{q^}e;|f_9gJnEY}K&R*)&yzOxG zJc(i_u4QB6tfD-OtUSRBH`XCd-b-fp}EtLe8-QG*{7ZA4JBUP>o^ z`7N5Il%-5$%#U`VU+ezZcqf2eaa3{3+O$&`r z8gEP3(rcA-{yB;);ny5k?%nRY`hY(I!TpMq7nId$uP8UuNi$qIV^4T)6kBV1Y1Jm; zJ154yDiC9-mO{*+pD%Ci?#$TgJXCtKzNP;bzNY#JqXbz#!T^twuu^f0gyPEp|a^7;kU%%41Wikl~$ojG!G>L7$M;jJ0P+-+)6iBF9RPObmY1B9g85qT|K+o(% zSZxSA8G`hD_8IsKBA?G!fyGnD);>{spM{uiEm(ZsbT)3PGM2WD0vbjrta*B{VvFX43_FIOs+}I~3hhNZm859EJU+op z$?S#Orufkn7#7Y#I3H}sG%h}EGLdgnpsy)PJ0)@f(Ry557K;x`=iHk@tsUM5F=B%+ zi#cW`d(#H4XGE?Srk}_}H5YQzF z3W8%zG&>&fohM7&{T@Oy5+l{u5=q+O!ImzyA`p+aBJW*}xSZOv20uGK*5~cN?V%P8 zfva^u5{yv23sY7SoWu}D4++-6M4>a^0VG@?@anWu+cUc}w_m278VsQci{2OydA|xm z{fP(pAd)a~`huF2VnLYLv()%5CZL%?Y27}?HoS$E3%^L}gZERivE zQky=O2spNXI$6lh$_-#D#ucuGyfYe?<~n6$;DE}v-Oif(d`u|xCS=Fb{~RwKz1KWI zXF1|}R)2iZ-pK;*U^AB{z)hpPqGWPxSG@xC12?m6(TBu4By-G7^>X+=eN+ukMG`O{{RoQAc0)nhLH$@8nw_yZ!Z*FgXS|jdvsI%c)~8WD7jYjO8yfD`=KVy z$91pOkxVd-Wx@fU-_1x}wb9k5&pkY-Rbq&JOnLdVqYoG$Ai6B#(#?K*?1gyI%YiL) z1Z#AKd+KrYWhjtG-?v}>i%e6BQjg3cPpYAf{rvEM2NjCIaPu@?iCPP7yzyB9B?Z16${PManVtDcKAC+8ff&q?00K7&?2=DaB; zC2>T-A!y>^K7>9@S@e-5;-z7e0pklMPT;_9511^9^0P;!M}2RS zPech=1`~aVP>5~?=@`!qnbdgoFms#!DRL*;iCG3R72u}{g8aoO8*XLDKR-z$E5zEspv?;c-vhs2}Y?4VRG zE}D)II5>bH;fLr+&j4!-#%2rR{Yi_RF0HTi3n4Po@jLzN8W^x{dfObTS$h*d*-rcaAfAM<2}<-PwR5X~c}@Z>qqQ{cW7X2ifL5~qoVcNwjBdO4h5 zmok&=@W7D~DS9UbGxZc41v-;A- zoi?#l*Bl#-OLJMQw#tk9Bw&Vw{!o$K_iO>V24$wipk`0=G(Lj~TL$y6!VU28^I*M- z2^!|SI!2nVgG|&cs!sT}kT}S2ck4TL_(6aff3TXR3eZ!-vy}Oy5f@q<(E84N^hx8# zM3?gdl1pn!wl;P}3cwSs8rNQMY9g4lqC@DUzy6_=+->!&k~Gmtt5{|ff10hZu=f40 z?bZJA&&*sC5}+d9(>e!H=Ha;cJQ1g_mRtZA%{BW&6BYYn)7>ZxXyYi9$ujX|>0@s& z&dNP&%zM7N`eus-52r5rWO^F_RcKx<@QX6EA;dd%?E4}oIolVW9Zp%MRd4hx( zdnKx3=~X5?MR$m;cUnd>j^l!5x6>p5PsRnL@8Mlu@HR0vS3C@Znzl*>2dINzdO@Ze ztqP^;$_EO@kn*_Hc0UV5joR|$2UO3JOA*s=fdWq-_VH zzrKh$rl_t3oFe6U!iWJm<&6pRQ=G~@IA$2H>zuj1`H|w;8pS2ybLY;zJFkYKY_H~J z_v{%VEh-yR{$6z^n-5+1eoh#IE4M$lpX}TxTcU2qwpWKRTzl&g4Q%kp87ODSWGHQP za!KfZJSM^!?xxCCA3)j_63R*-)|`c2=^cja`uvbNtJlHqnLIEitp~m9OeFRJU+N_Y z`^vD{omGBJND!$aUM3Xn^7z?##7C01KTNrM99QhefAO1J@>sv2ohHZH?w$)iC{_MZ zi%0^bcH98?vQQG+*JXG?^EUGb&<3(=_?>44pYhw^NR17F7TeIMkC&hZ=R$=X*(4)C z!DcqH>NPf$oWPd3LlIn{zgQkv{bTQ|<)%%brg4Jam_TjW*^4U&3cq*|O4Z{SpxTpW zHA^MlT`E{dGu{@0wvl~9i?ejNj2Bm zu5$?Y>)OaPu@R=w0gil4KuvzRX2z&F`6KmFiIaVe3M2{B^c`z1{gg+X1HL)lXnd!N zZIUK|Z@PTPt~NRTpyLn*5o0e>ILy!=v!L9&v)|#X{r1h* z3FBMYWn?zUc1bBfY{?QT!K$`jabFuarq@=Q>mFA&NG8|iY(=DP#9a;rep;UinQhf= zsWbb85}wFi8dO|JabbvFZ8)VP0>Xl25wNHmkaN3Pz@!dZ$utcE6VcXhC-<;O$XX`R zXUa!-XH`Qz9Gwk_U{H~={{p@{ z<;h#NYGrwq_QVTIEi`yQk{f5=Tk7k^g+QR%hm_@}N(wzhrfmL`PX9$y`#aR&M5}qM z7+=pG;#uDanYNH+0MkhCTCMjghK%i5ep>sf~0P z?yynfhf4K4)o2mX_vYnd)s{8=b;D*Svb5H=#h!>9t_LB;6j*)*QK6pbjPqzK4-=i* z$P0=ziz$vNrP5jhc|RD|y0l!X-Aai%SZd!#LReL^D%9X_>6^vpF>Y7-FXT2WdwNHOH4F3PA8Tu3(jlbe3Nj{Et* zEW^Yd9u^K8(n`#t&pd*Zd6m;8cB*me1^9JfiTmigDM9+roELw2xVlz`=F#KcH;G>v zl|-ypq7%Zyn&^g%U-)Y&j?uPU(Mi`6mQ30BB7H3(x@#WQcYNq=hpQRnmeCr8sT^|B zJz}IZvT_f^L#rM91}h2?N5wBm_V4X;DVGItrvY`ka0S?+sno|*xwX_Clv}TWan7!W z0$`s`#&u#-W1l@bOgRdfEX4IouS+FBp2BeS$q` z(k`&7v+7jb4VBQBy&g8nmGUILHoL*KaT#(~#h?0X`=GUWYKz@q?%WPW7@C-@maD59 z;0?DFO_?mQTiH0)Mx&($8!!eqiK%eOR;f0aYFBdK^_|hrd3GxwbzzJ3tQg>E`G&&> z4Xr-1*QsXWeF3%BCM-jWZaydS{Ed4*R|Q43;dvV#wLM*P&f9SXP5mWI;Dl;!R%srl zlc$b$cYcI)$*OppA~W?lgT7{-XWmsnX@#FiNkTglk7)(wKOPcydZ-!n8lMUazsfBN zME$DU*1?Vy9>x3*gE zxYwoN^&%_y-@dj~^O7*xl{8$ZHfyO%r)-*F)nqF3=@UQ~rpHvo&LBZORj}=}?l2jc zWoV-pp9<{!+Jh2zb$5A}={;coo!-vcbJ7C{6|qF>I=N&zLg(}`08>bIvaSV%u;^HM zCx@^=&7z9%t0XnFPQfdFA$gsMRZlgD`nMEIZ;9KBHckva(7sz?5E&L7j_ywwXIQ~T z@gS7d`B#W+kb-^d_t6wH=Qnwg^ulP{rxDe+p?w~~i;`F+6mE|0mA;-`$w7c|`_)#T zs~a_`uc0D&hC1ZNrQC;C+FV)&?Te~X=`icT_JmepE1&_6JZ-pP0HB^UPEk}bNW(P` zDLT!gPc0)cDxDR8TR>@BhP38oYUAi?n~XzTA4n;qKt&P55m~LxxBaC{c01u9R)`|A zqP#owZuR=E1THl35ktYTXjH9(M{2`(Wv_vpy1?T)LC4KCyQju-o@JA40sagqC>%C!pl9msr`n%7&T93ziYdrQJj(i; z<~&42$a$p5$O&`T+{qQA+-QVW1%Rf*Wq$1baLRy24a!GwvlK04e*AV@B-f=JsJeQv zp8KLvoKJT7<1JCe01z6ar`_a5kmYz?%aT;=HF+?jO@miO9M3P;+pE}o=qzvur_x`t zd$!OOgvw%Ym_tdr9wG}n?uWi;%h0-uTAM1aSpRz0_JT~=1id32xbQd`pQormG|>e~ z4`>JJDZ~J|JOoa(Ok65MS##|?6w4wVPgHmA7iMO#EO{FvL*UGxPo@E4tLB`Fh&QH4 zKztib!q>|lhVPFkc=v$m)AE#vXbav2+NU z&+&Psqi01SRG^3$<1$_rntDRmnV%Vk>}aVX0W$)d({@63FwA6PPmkxkJ?6ch2zNhI z=asMfSAytC_VfEWjBl_7gZ?&I^Me8|DNaS~ zpoU$cwQJ@gZx+*ZQ~ZjRdVNhcy9_d2!_d)NX}mkr+oi&-*ViSc4Sk8t3;0>cfQV4K zhGBd4iT4I_iEe?f!qnI30N#%&;t|9OKGk zL+M+B>q{_A=47-~#toiUvl^7FEK;EJCT4LFUF_=`(*~T#QjFD znwhO}?*BBIz!%c&=+AFn8@T=Ns6fAyQvUbV+a8xzt{m$8&vWsgES)_G;7*@6Q{XcmL;34A}+I|0(m|g3yLgaPW|-@E;%Lm(nU+{vB5R{p-H^r=Wk{WRw3b z+MhSQ=l*|jB0Fo?>%fW7w43h5jSNEy6II0U7Tcm-}&3ge^ zFt1E=UtqBLh2o`!aMveS^KSN^=KpB3(h(N3Bt3!pQPKU5>>$mFg+co+k zdo7)c7+m#FM4ZpUSIP>MD6l=s}deWft_fCT)5@O4nwrf&7a+%*!hg%t=f_gys8^zBp`Z-u@Rd&;M4 z_jiD>|M@o*ePp?-4zgJbkRTd_n*w%KBr=>4OrD=E?wJQ=gIY6_je+|7zWK_DLuSVQ z0|T%X`@L=a*g@LsJY02-@PU&J9Uq1P7_o+~Sx_*3sqd6zt_RdtYZy1%WeI&US#AhC zQ--I4Ez{^ZCw?u>xoSL1-RJc4e4S@1s7t3dO5p8ViyNkb?hZNO5~XP3DCoNN`wk{@ zC?L7MkuV>`nFTYzqgEK%t@Lf_zLPZVwVJ}4lc@!3R%fQQ?US~eWS5}X6!)i1R1O8iDF30xz zT@o;xoN8VxEO{edg}{qm+SD!sQ;i!>XVhZkf199p=P&+tbI)6HMmMt3uW|P3_VpF4 zh%HA6x$wUZnZ)P8?8X6vN<@47%U;^gOh!VEm9Pr(I(Ntb`@29m}$_xpE0QI{;fus3`5@H|QcMEA{iO`4ZGT2Sg7hgj|=8 z!Sd1KHV>+7j^37-OpS2EpXF#VJM9?>OVte;~@`-#scJXUtJc%u1)(5tvHe$qc zWkDI>K9rK%IA76%-Anv>U6Mp@-THAaXs~%OL=8)&IgGo@{~qDnZsZxw7{1=yi^0_| za};I)A?6h3juLvDgw`1osj){G%8pNqWcoW}_6+PTz50xexPsX+5gR|2AMpLpWm9hL zgGE1=D2<%Fy!au=X{+U@PJzb2tzgE$MNolRZ7PoZRMuRud2-N!vSqk4wq0FP4q3Eo<8GvR?|%!W zg0`P=<#!U0?J|LgI6^b)l1xrKG9tIepVgrrxyw4M!Oee&yzNUg*I~pf{m=<&pD8Io zADdjkbhxSbC)W76P(WYj0lT~FVV&xH8q&tMXOX`iFFx`2@+I(r4EIR;gRgN0RO*;} zPMF*3p@r9^!{A+qpQZg?R%PUUhonUjnGhF9qx&TNZ#CR~IxQsHEciq;()qaDdeTmE znaK|`dN5I5S}RFS`{m4lZ9^K268MTb z2u>RkvHmU3%$%D=whfKc>aUq(P@^>jR=GSh19T2DD^w|^O)I1HMGV`Dwl~&Qh2V|*%{YN1*Y!~R9I#t24>*q_1Vg9zZ-#Qp?Vv*L-k}fRm*E@yzRKfPv^Hd0 zQe5PY)(YHsn0{h#fC=i|DE=TepC%iWdTBVPc7j?oUyx<54+0eVx^vTS826c*w`gT% zd@_jb_TVrSllnuXL76`iv4d%L+GIQ^=`A|lFdTs-_G;ZA1Jh-A+w|(2Mho=f^dY}3>h@Z{!3_VSG;kyHLvx@1_o&PP)0pSaYnb(?2<6&fm$W%BFUq;BN#>e?O$j$ zzHT(8R89Byb$C#-XAYS5kEfuUu{4gdm*RJ86^Bik7-)F}5Q1?Y`y@1W7XaA(I{kRh z$jMi-XF@!%sGHuAX1xQow%&`@WCt`_d_rkL9umzaRJwXZfBP`Ka{oRu-N7uY&5eM1 z*>N$6Iq6UE^-0$oAxiMc*wRgu*90n}L_;28keqg8QOgX?%tBQA#4z<KQl?4!6tz!vavQbcMVniuubT?pn?=yHfEt}(f5zyaL-Lw&<2!mT1FD~A2 zq#C6`d$D$6Ts=3#IE+_NW2>7cm2zoI#`#a`10-hDuWp##36}nrSm=Rxp7`j#q-3`4BbzimpSm&3tbCNrw}$d$_71z}PB5 zbhFw@gH83sAXaU(ym0`1e4}xxNpq^c3^nm7rw8hr=7~b0V6ni8 z`Ie*ezo&HB=6|Q+(l?eg9URwc>(!t??8~4PbE4ddpBrI*JoXQJTWrrB7*Aj&@}J{(PyU}e@vi#qF79xBS3n}(bxX(XV9NVo2R zI+RuSW>y-8o9zNFtWtnZPbhm(<^X2!NvKx<_!0h~Z*Bp6R_!tD!7AP&zH<68>(gi< zVuJGQ)O&|ihZYupW+Mu*?>GdzMq!T4QfDLZc})Inik*2)ibvAq4;vil*RDW(89trF zWpP?I9rc#KXsXBYF`?^E?*7ZR3U6w9vw{-iU1FC=ZziG zk8;~kQ~R82Tr3>^Z;q@uFMagiS#eupz;C0M4mphd;=EkUx7#SsKyF*h<#us=f`+(q zE8VD}o8Yb2)rTXHIoHNAuMTE@2i|>l%FVYU)TiZAcxsQeIK6|FZA84Qpf9TV0rd6g zt47>lF_@R$3g$&)Gmn+33;$Ftm%i3+aGRTsEe&s5&Y`qtm$LIU!~IWw>wQrb8Ojf|fBj_G zg)PDFd=9)m;&m)YYVPwGd~J(Fpd`M37qs>(p(oS7`q#@PW&73o|KND5doKD`86lN4~T zM+{@HSDnuAX?%@mPMU#rg5-f5*Y|@pC5h>LVW)ti&9}Q5htAmd!5e1qGHa)eQ=Q>6 zty?z&ybHrAO_eTKJZz;RApjh|yAcYY#;VGR;hNYt`v4$LY8{y z)$WP)Uf0Fz6&^she;RoC*qpUY+2tHu9~K@7N>#4)`y7>I@5>$2UAuyytVi5&{L)dl z!&7e4425J~>?n+q@>(_aZi*U%u(dP=9l!C2}nGvc^$y`4&yz7nutmrdT}g>Im^en+r;q-J#18;p&w z;>_7k)+EFEEJ|$F5kMKO>d26>xpuEJ@q|081ibM%>JXvrN{sb?BuyoIN`_HFWL!$J zRGvCa(Fwy`F79uIvqkg)DbEIgJo7hf1izIn*mWdjl2(cM1lBATECnGUcK`y8oH-sN zvbx7&ne-jO8^zC%#1E5m2M3vS)rYR=%<-En)0C@?r-M{1E{z+oEikFWRpoDrFBTXl zu`7{~FN;eg`)?8R@VtC5SI!w&9~CG{}d3((n|g6KPK!|Sn3-)Ck5yfaB% zMq6A?VPBggxSIN@O%c)ytZS~0LlY1q&q&$9sbU@Nu=^U=1j4+(4}EzSB`-miFYb@Y}uMCEgz%;DY4Ix8rva4TxTM z9TSyHl9|Ykah3g>7#`Cocao(j6g|)SbxLGxyZ7A}>4iY)pOk$%r5Ws%kJ38s0#xq^ z_!CweGnL4$(ATYHn!Snnj-R=zG_|zl+xvAVw=POAjVg@7e5P{BK}MZU8ZJEq;k#{g z$k)~K=uNWq&r-iog8B@tv&vukJ6n?)AQ;xGc3zg5QhLGK~kw`cpK8cC+NFKsXub;llANx8md- zx36}U$pX5R`*LAc9rY(mgg4Mw#MC49kZCt>Pw6!W>5ro_QjV(3m`A%u%MFgIcwNMJ zW;4Om7{7Kyy-L4@F03@uQ@TjR2lo@RvH2XOt3JkkZ%=m#Tmjzzlx*z8ujaM*h?|cO z?~`2Do8RvLarZTa8x$-j-W1nQ9w=SpmQL27l)oPOpjSkTBb2k>`TFLHe4PCob~am^ zP~kgBwr=|jAsdlWh-(}&n<0rP<~r}rREOP{Gr$3iP-T; z9C&ZL&?TF%N=QA&V5iO?G)g7j#XB075HGWDHK`lyYy-k`+q3X$R)I9e1xf=Oz(l~9 zl5EP!qfuW?GyiK9g-d#d(vLpL-_m^Le6;goZ_CouNNe{;VpOcM`>)-boG}_rvx{-y zd=Y&*uK;f4w_naaWL^~D&j+(UH8u8nGa7XF>>K5EE2+}>Lp^cowo-^JG4_(M0P+j# zR-bo3+2c^PJ0C~JDKT1v7gWgJeV{nMf}r`=wL6JIQ~EE+8xabZpry2WYLC{`125h? zgY&W5^BdO>hK4$B)?{q7Hdy2#+bvgY(=>2#$SC);&_E57%_!~D+39mR{~ zpA!dk&9k9w!U|?KdzPZ4pylT8pydkP?04m(P5P(yZ^~M$5k_|%WH|1#kp;r9fEq$L z??3-|h!l}N$A8(Tn5&T`eDAv&`cY1H?s*My38j6qJ|A2=T~FaJ`^pMmt~^%B7TTQ6 zFK)LJ`S#?Nbwc9;9_cFkR;HHwJsiO$DTs5(*6?d_G3ED&>n~illE3I9@a4n3F!tzO zZBMmSSug#mM*Usc$&@FU@KpFS*%N!5fT zUAiCuP`S57V7E}7(7F<(w)WxU=(q5Pvqm<^n$DLoF%OxSXHsEC-J+8M50!I`md*EN zawe7(XlKFs)t{=0h-UJu6l7+-zVVgG3d`fXKX%@J;a;%PBp`3*Yn8h$R6PDqQ*;{D zReYfZWd*lfU2#eZI^jR#nbUM*VN12atM*ZR2yByYU?wL_)L{1`KB+$7Dv*^?M`X`D zC^`Ocs*^F20wTfFl`^D&iq_iRV*V;yZzdTH8uc^@TY>$O^f8v{^M_!z*{epW)yr3% z>5sS=+U)8-1`B4aUHzU4nlC)_Vf*}nPTR=B?BadXUGs{Tc;`qJ%6`xEUQSQfvPRY) z9tr55eZ^KU7{6cVdt}DTz^lG25h^Sc z25+d*PHch53jIuU4uV+2olz0UJhS~}7jc5pa}jj!_`I=lweb!!_=+N`zGLWtUU*`O zhPr6&6t_oX|t%~KZkmqDv>fp@EV9j~JA1Q=4{+8pMyM%z1DEBMz)rlA0GD@LSt}!i%EFbql$TrHm=_D)zwUS} zF^5sfwV0@Q%ht^krN=L&1UG32K_u&*@d>KAeLKw~Y$_gNp|ZqmOZKLy7P#`b=?8Fi z!6^VrxLj*`s)MhQVwCvZynn~x(fioy=OgyndA!k2jqEzl1pHm=tnQxA5xrh!$Tnj5 zC>N=gTe?$#7~5CnTk4464`0=Fl&YTO3U{ww48u#xkL#l4dh0c*cjS&!ca(fF&$H7( z{Fm|NU*0NfD)H8m_cmywkcju~p(Y`Qy4X)lU>45A{2t#QZx-#`k=Wm&DnC>8AVr}r1Ol`F z7w7ZmjvQiR3cQ-0QfMA*5STgqet-9v`42Df`|#Yk|C7Yg{~ZU#Zgp^WzF;XCHZ3Z+ Q`unCT3L5g|vKB%A3+a1QHvj+t literal 525075 zcmeFZc~p|?*FW4j4OW&`X6AramX?)fnzK?zP0J}$(=w$p=WxUkk&|YoCT6DQNNLU~ zIS;5DvqEz~L~}qzMFj^$1w{npN9X%{p11G$K5M=IyzBYvWx?XU@3roI-PgYM{_MR! z*M_)TmL>=HOYPsWW5>Z8rbf4S?AW8TW5>>-y?X>#GGvk7g0J5KZ=3wFqoP-4UU0I@ z{hGzK9Xl$Mgt-rQ3(oiXncfZDu|uT&=jS)9Z}HJK{#~Nrz8d2HFZ7>E`QO!PY{%zs# z9Nms$Z%M$O;ub>G4NHPXn#tp!LWT9G8hAw%!;BJQ*kKHh+#GO%XY$7VG1J5Z8uv)o zBd_~wR=;1ncRCQW`RZGuq)9#~)0`kNoNB5Eo@VU*bxHQ^#N!K%uzFjeOnpe##ri(| ze4xmOZ?{I_jUqK;+lI6Nq8W`CxTi(^$?=0S8wxP*p&Hv`n&w5F^hi^BGv^#^d=k{j z4d#le`>lvE`yuE@86CFaw0VDZrFJwkrbXzKJkk62me1J!8I-4Ue#>3#7^R@|VDN~m z25jZCRyZvXeYRfqBYkb3vI}J>HvA1?vV$zgoqsh2QbkDZ&goURyfoT}3qQ(O08oAnnE$!>x<3 z2GD#D-S(rbNVc336_2ssQ%wbNjby0F_ z#QeD?@QR<*T8Bfp&u1cYOw0;DiIy$ofHEG^$?l_O`5u$eDEl+#-CDNP4>nur%b~|) zt3%g92bJ4BsV-#Cv1db`Oc$#@T1eu%WO`36tN5t=CGJL2xpyK?{#TFwaPBF!xHE>W zIsbCK>@Z-21J_tH9pyJD5)$4Jdv@~7wjDE3HN{6ii5J1N%;B@-ryEnsUD&HuqN2b-r8Q&;yt~x+=ak+@yt^?nQQRTJ5sjNqWMG<_;hFNH&24+bx-NT zViuJ_zq*g9<3S^nK$UJx$l)Mo?<@Bn3hXliTkBC?`y;PcqQ%6QIiC4rtG2kqK`RQu zn-?v{&cgCpxI#n)v2|xT{99HqTfeEex>-eq|7Kf#If9Bn>KlOD+YI7ND2+y z1JizZ;GpE?mwQCQUdo0ANnLclZ$dZdfp_Y^^g+W9SeS0d=eEP+^nY+;c#~5QEX*q_ z)7|5bkGiE+p!euKKr1oYDGMpovTm-zh;a*dlf$G*_~PYRSzwW{B|v6VsA^AWh|D+? z*FMjv;>W15NMhz+J0@f2H=yHz$8Y4P9IP)m8i(+u&YE2BHzX5yj(Tj@Cd#(Nd^D@E z{CX&-nB}{r%Up>Tqvrc6;>Fqd7#p@t%1xbev$wTOblrOuz2)T1X zHbm%y1Bslon8m^~S)ow&skXiEBqqXRo{wF5K9QYoU;0d9uqPmNwe|Mp{?C9Sf`K}? z7p(O}k4H3Eu16UdMzbC4Xa)`~mZBIhCF5hbt+%<=s7^3T&r(#MuiE32m00meNkyq{ za9W$WYPO+fNTZJ@wHSk{E=`lyT1|RdMXq2M+g%hN4~^8V0+ZbOo|(uMxhp1s?Rl-f zR#>fHaM+aiFPl*LSfo;T|AuY|^{OsNJEqV);}6h)%D^LXpJf+5jiAX! z&Rmk({NQOQqCzrYT1#yW#4_!82Qu|G_6c0v1;UEX_ZZLx5k79IP-7m768@UzsYwTZS~ zrAKIIwPtYlkB1Chl3M%gET`FxP^7ZP2#zV#k4KzG1BX zH7hGy6rnz_`007s-0=A8nAP2|{AJC>t`N^n{b1c}Nnbo~p%xY}eZlK~^PdX}7)x^0 z{@A({2k6B@cv9rEJqBwCfM)v(gg+?jW|D3$d%(F*#0blcQ;%3_S6|qPzJFN~e^{od zx)5%fbkL&Uuj%L7%+6u#AU`a|z?ue(#TI7d=jJ!z07)%HN#whrR+FTOvfQ$8(KOoL z(pok97EogX>b(`l1k@dWE^5!#O37ge@sqvBTD@>_UG zqtXiwpCAtW13|tB$-esr6|JzY?HS0;!2WxAE|bMKHh!=CK7E6E7W68@WH{=odBo;G z`-DdKS8S1I)RKOO>#BPARNF)K8Foo<%uRF1fk?!J1#(x5?oE9Z7WjdAcT=Ku{m(gP ziE9xYdFGqYd*JhPY|#UzDl^0cth{2sX2g4T+fcu%`O;S>)S^<$@9v{M zBjW~}lHvL#5guKtv{~hsxUDst@YxZ?c|@DM$AFN}ihpYoI9-z77afwACjptYKB04p zW)F-J`d%NQH69!@tEi;YOn&3BkZ?hxpYx1Q>7vNxKV`L;+kD~Q<<6g1f4t(tyq^V# z$t^qejj)qd>SB`Qc{z6?IXZc9r_4tGW^D8k~jF8aYMVSuc1w?-Y(-|=VMSy0a<3;KTzq~DO5c4IRR`a zo9_-3KbUr2Cn`uMaqHA}gQ^R4n{&M%Fvd$lUuq7OvN*2#M~D!!uR07~PP+4Gv@0m+ z%NT!n7e5N!n&ch*l{qqurPi5Dyx%IO9xoXkP%evDZIYac`0~jlN|@p~o!1VuZ0Brt zxit&(wdUT!q_^*wqiPC@T|Czu)z_{q?@ydj2|uli5YTneL^?_vzfhBt50^%uyUrsGB2?_}NejDzLUx{uhDGRU5adg)6|q|{JY+y#-ETOHS$;2b zeSx6vM-61YE_-of^KEM`j+N9BygF2U4{yB;BRT&MIywp%@{QmvCI+&fG{^$ z2ca1gVU!>#9+${$b02Aoq?Galu}(<@8$JS3ZYzv$lc<#oMSO#=Xy|Ls1%<=b>hrS7 z>fUa~OsKnM$DMQm7M;7FacQZC`8)J3{ymzudfq!;d1kTl_;UDDgPJbz`P9*bcGfEK zDxDCG!VBlm@M~S59m{_`xpxP02*!LFeqXgU5g}`mM+>h0G5i*$IL`}6Jt~fljsIH# zUU=(SPAgdMe+0g#mQMB2B>lk}$C9r291s5cF^hper2IV;)s8o|ss_N%Yo`&Zw&CIN zk{`w{<#ZRY?!6FUz#}=hVb}h-qVjkV7@mP=1(Ye)%uH7JOk_O#X8)<-w-?^0bT;|B zT;ytKZz;%^+Q|?0_#rvVfe=og1l4dmVe>1p-^TFkiRtj{s0Vt8`}oxZEnetF&uq6n zt-%r#5>}DX*D)#1$8$N~)V5Eof|XW<$&s4|*QBWlv-ictxFF55n=ghu19HGmzujd{ z5abh$@YVYOGf5Vm&Wt{%c#)`xo{7hK zui8p+-eGve<*+x)@bDu&9gTaN9=qgXB3hqYt1pYS?xiQZd@kzz?q31_H>3o?1ZPl+ zo@#WV`VnTW%^?k|`?Gc4N^O4iYRj>?IZd@^7r3}p{b7B{Rn-(WbsC6gvJs=;a z3oT9u$hduG?K2hvEk(vHzu=O-RvQ%J+Xo^OHZZhp)lwcAd`k@f{xU0NA9GS)kmQ^! zr3nE)g3{sjBW+a@gr%fz|JSnl?VcTCVm#JhHg3Mrq~YSAY8Yv6lLa7*?VlSAk{jj^Jqtr|%4yhu3DY*NCcjB$vg&D$&m_y>nQ*CIxtehBmYM0<*h+xU|f;R&F1-WOw6XH+b9sn4`VL_f~p3 z+ePM=TKq%CQ>sUx#GKfwq~lhLEg!$Nzb)D=TX{}wy%oX|--gMWy| z-Q$J83(xyD#`st>7h7d7l02mskN{ruLQT)clXs#>scZG)oCEr-Z?Vi?bo@TD2ldrq z^`${i@zR4{>7Ha{-*~^CYMViYqC;KSZ)#7AAQQgxn#5ao0A z_r@9S*&Hd74+=*rSd|Zyi$$HF>HcF<*9DWhg2zYwFYqn@OSI0{3nAupe&Ft^Frj8%;9g)D{(rZb5Hv}$=%y%L5>!1LgyD`{ClVm!2%1P z{ZHQ4l=iz|YMx%vDo^ekt*cS`YJ3-7ga2)krc;YW5r9-w(T~e~HV#e&2FZz8$XYPl z^530#t99SPq#s@ge@8@THVk0|in4Z%)@@S_{2`O67NXpfr78NWu&X;jz}dxO%g}*S zTX~!}{zh^JmQ1{w5wetx`*z-WZE9W8`4)d@t#kNK%58(lJE{s%;mt|cr5&owBXuUv z<-viHD@NZxSnpqNJ|kz4w$^59c2@TKaRq}@$Cnw~?MC#2VtO--<(-F@t->egvBf8b*5Pf{gy%)&V=wk0Bz87F+#Q=DgVgi=dfmux=BCI=t(uv=yJPcA{X9Ak??=4- zd6FvAb6Cc@)B1$gk>`cYs5;&G%KoaI0)w!zh_3dZ(acYg7Cv_2U5v1Vmz1>hZ@-T1 zFji~Ridgs{1OkD*Tup9wTA$Sp*d>&|+vMp?hnNhaSqTvzm2WO?f|y)6ZE`|O;dzw& zDed@Of@d<0B9}vthDbXE7jLM@>;AM4SLtldEApCBq?%4!o9_?fgk$*WuL=J);I5T* z8%6!iCM6JPcJbpNVx&!C2$QvXcwXs+S5t|S%~$II3%le=hs>su`28X$wPi|qm@IZ9 z%x>+cPn;u8K9Oux(N@&Xp4?nU)781xcm6t^2Yy4AIV*lu(WvS>j)QGPLcmk(%*rm4 zYnpa9#AA=jxSozZB-8as(@wQ*^~a%|^}vMu{XY%$4cM>lC1Va}b=o9%>>u3o-83<@ z&-WploTbUdj}a}knJfIEQ-97H`-q#I)!KhrsHvpN_K3_M0nK6Qo{kgP|B}}#O1!RI z9gr%+-DCU@?^q)n3H`m;5jnM)36oqto9}hRyra+7q|@vKWD=S5-sf-jXj6M-KL~rZi8KYX9Sb zOB2KZ5wKc~y!_XbhhFEK`q|oj6&PPZbC%LMEtx|Au$o}y>Ex#JY0YE*#1zxU2Jg}e zPN|o2l)G`PxJ*G)$^PB3uLS0kZyIpn{J8Cnw)c%1~w^gqTY+!n**Klqe> zoV==O_giYI*0V`JKMo_(&U9pPx%PmON!%U^=Rc52{bKE5z~Fv?jxt9rQqyEocVTY} z4ivS0GzBW#{(rq$9B{|awshKW+}0RfT6#ij8(`~SlkQV`Qy6jCPUeoE05)j?s}nr0 z3H1A^OnshX=D3Fh5uBFB`!5ZAs3qJ2CN4-V{T@|wZ$;StIHV$H2D1Z|a zesd$bm}to}h2@k+m`7W&&pRo)6w zbtxb5gpDhkc6sl{xs!r>pQi0Y91Sy{A8Ni!%DC8<*25K1fq%RRzT1Q`Om_S5jpA#z z98s8iJr|}0t~XH`rM=Z9c0lz)NnzmeZ2Hq<<_SE@Bc6%a4l^^Za|u(o&R-d}wGhZ$ zktfgRZ0!zq4YZls^t>ZoU6XO0K)F!WW$}`wTLF&U?FoTi*Vp^Z#(b>fq@uDwckXXYJ2Ka`g_~q6q&>l8gYH&6jL+gVF7=D^SJ1w-Qvg zGnY*Y#4wX#@BldguZT0i_wtEKFs;wM&T+m}2TnkgZnS)czB^;t`a52AvS9+!OE?6` z7+5-95tPYlylVhn9S(iE6^xD_IOq0y0Vu}BUFJhSolR|(_g)<-3S>H<3MA>p3I8BY(5Yg+8NzW|L}vn2iK}@j}!uE!IHbv3IgcN2_j+E`|8%Wsu8% z>fRg@0h3!FoxjjjZ;=x!MAt=?XEF3&6*gi6<^++I<>c(eJ3iPf5O*14WLlGrqU zNIS~w=W0ME@+D{!w7o$WiZu)-pEPZxhC{t$YQ9tw^Hi~Cg|AMK##yv$IcOOZ3ef%$ zoH)Yr$`~+$9JTY;YI}c15CiT69Ni}Zu}O_je0HsM=0O83jCB6PQV{E&AcZm#K+ruy z$V!|aQ@G$!8h3Y;S3Ah7M?X2Sa2z%@`=O9yJ-gbb`0z^D(y@kjg#HpQ()lHjnM?wW zx~s9#^GD3nOh9|BrEY}Pz=dXdkA860mXrNFe{j0dF6$mZSbU>g0ckjkT&#k~rhG^v zNteGmrpqsV_}u_oim2D%wDf88Gg&^TX&=kgu#Sm1Qf80=A-g6v@cRFwR11=@nbQB& zXPXA8Y0YCHqW^{M6Lrfxe^-?!5``LAs|(d~S_#3J&2XLCE^X$6KywplsMY8S<&k%@ zC>MLzh#*TdK`(`$8oxECS8@h1e#~OycsaC2h1O7}qo9-;d?AK%NnqWF+pHbFzCU|Z zX8*aLSx1@HhcUkk7Jo-30s0U5T=|KF_P@gD8wo&y$M^j7zNb(~5g09F919AQ1k|Q* ztsSk%zFJvL3q)vt+lI>VO;lX)@@pyZc30jvPdfe;V^ak%01KgAbHatbY`r9XQYuiK zmg_LSVo1=7jYi8p*(RN1;W?eL?gbRen&iPPf+VD!ikJfVwh#hPIIh2oBQV9FSt}BY|oX!h3~sw-2?X5|B^G zMOOV8Cd?1C;M+v4Nfcz>V|{Do;FBhr0o{#wt8gmceZI;jd@*>g4#*v?mxrfXHVa;$(a!May?R&mi{$Bdyt(Q@!21(uwn_~h2&y$(U@U?Z3Cj{MEpDDDnt;j% zwLqFP)v(ykki-JUIytBafM|?yHVbyv^EIgW;zEza3vbEJ2io!LIV0*Gk3PJq{apDK zesZ=BTjG{ALVn>g8v56HFAURLeAzy8dZ2An%%l%V&+$p8^sQ7Uy=(WiunGokx%=w)bfk-qV!5f@ z6fydTc3ZS>i1XIyq8}7=DeBu9PRIcOF!Xi+cR_e(k^WVybzozLAcM z$NKrfE!&Nnj9WBx1?VrPKSxZ zBagy-df^KJ>!W%`$SmE370_EbzjeppbGhF6 z;Rkn{i+LCUOy6YPUXztB8|_`jtqbb1?hAjxrnm1LpY*OvhK%g3S&2*wQ5?+AHLX7&tvKPojMf#Cz zK(vKYL~J5O7}I@%IwO&603!HRyOGvto_s^%B))kRzH`e0Kt>DekPiu-c* zXst^D%g33Ey@cgUFT7PnnS_dDpAPPJHK6C7+urt_rl%HeBVer{(b3f!#lZ7g@x4O{ z2H3Gxn*~;d`Lg=Y`jDgah6h|xD4vv z37t{5?;2X9qL0+KJO~{89u*$TIez~tNY7uAmKqUS!s_@_%oFXrvu9uOR@-Kur~bNx z0?R6tRWj~D>H-bIHs-I9Cn8@cOB;+Fm>yi&w%h1zIKOfR1a0V+HG^&30c$|w zw|^t7^j_8R7|JiF{Y)q>A1un2XrYeYdb~jh>957X%lRO3&BFml9;RzTqrRZz z6Ui7A$1?s(oWF)vUT(Iif6_hP{f)t?7Dd-^U0K6B&oz)?DE20BTlS z<%l*#$#QP$i>@2a1=Nmte?q^ym>be=e$jNyWwG@#El_{mIxuf!rMG9GoMUm+IkG%- zsSD##=9Yygzwn*q-3CqQuWgupSP80*qv^I-MWZ6J4fs}5{87bZ|W*2cFYB>Pq4i~MLWG@pazo^aW~Xi#LsNC~S9)Y9TBe#>Z$OH03dp`LxFGz?*tJD$_zA{wQ#eM6_{+sidw8A${QE_Z+ry~Cfa(pPMVi1O+B=4sVmvcN#Gb}pZ? zUv)?5YKEHDmxNc~Fz<2c2@{%In-geJC(bEkNXgJO{@_P#(YU$-^8(iD!~!RV6@!s? z$Bya%D~HnBarJ@eHpM_7h^I}E=PBRed?Tq4OG*2d)mn@786aX5>zNK*`!YpWO~sh= z8fsuO$cd)AAQHy{ai@igEhFmvL7cCc+BL*rM~o>uW3@8}(w7VKY?v?&zc7b|lf+@7 zkNI0p6WbZ#t@_j%lb*mQq3(K#_M$TSY3(kyEmy-Z`^fa&{6KUAAc2Ht?~^r@<6~!K z)I9UB_h%RtD;!|x!J9nib2_LBrfzWm_&|PwSAIUK@T6}B>FEunY?K5H(MZl8pgW~< z14$>wc8}$z}_>Ga*-yn_oFOPCAt z&7fDD!k;9nZX3+J0h5Nw>IS2l+6oD+Ll6!n@{R*YZN*A@h+xqW{iY~e5x3?2NDxe!W zRUf{aK1SFclNG}L>IX#M}w+L4F)#9 zoNv_EI(Gx8KD*l|>%%~KKO?0x@(|(6(}iavFzr~r@8N^M9+>|(*;rPK?z%NnZ3LV} zv;>?nc-E-WoiiXxok1y^Z|;&G_e~j621e;1^?zIlrM(t2_$|vTayXPzWaEi(I)#cG zQgG9qSUhn%i+3xjez8QI>U{;L_)zg+{>h?7@}jFwh!xcFF>(S^QNL*xH(7f?))6=~ zgFPKvFCnXznsgBs;FYvq=vz(I+Rr+lHoT~CyKk9GHbFi(l|w%+YzsoLBc-v3<4dB1lL@Y0UdCrU#!>`ATGNSnwOf!R_^D?4rasdZ09R z?S||0a(ZkXrr$jbzuNz!*X6MiA4tEV@`N$D+RS?;$h1zRzyMi+%gIo^FS3fPpi9Yo zj3+icvl(a3zkiM+v#IiD2)pajd250?FO=MYjRVN_=#y><+}|PnX!81&+NYtMg=DQM z|5HG@Q z@7kI*OXJ#ajL*O8!b+>kAAz-oQEZY)&qi2LPZ;}Y4x1AfU!D?d;t0>HqzKHf~i1TS&&4qx7? zJw~2*#>vpeC~pGnhktPIM^muy$&NqBhT4Ay3By849+QNWs{o9N*jb|HmevjQF*r)R z9-g`6UZ$URdFa$*M{(X<$x3s8i>wQZtX!zpYnL5yiq<#lr*p&py=^^etK~KH;+X($ zP20eANU=+)X+Y>t8u^2Ok~G{aa#8zdU70^_+o&XROh&{9?vwIwTxUR@JjZn!F1ZrH zsxdnldlA?b(WhySFUUaCpho~9%ZZQ${B$tA-O`{U!)lLq0QCs@CeBJB0K}W^E`T+^ z;=$(`N&1MpA$>bq%@0k;LPZM8yVD_=OZyN`lt`K9pwLs?bmjZ9P12%~!FcfjwBx3X zw)h5`dS!J({>L8A%xXE$Zi(s6B;|fkFjsH$u{luAAS_;1PGOrYM3d92YxPa9N!TBK zfj9eQV~A7{(Ou|E_?RGPvh5OjE@Is1GOhJb1yRg1k+FISK*ZWt$b1I;aJ>n_ahtz9 zT{?CQhYqm0=Fm%1tb)NVG^MCF@+<46->$FXqVrbvdHFVZxIY>*oqsUek${u z({%{l*a^xiXyx_hjpw7{Eaq_*sr9~jElWwVkz-579>7Z8<$bv(h2^u1Np>^6U}^gq z$!0sCZFLfWR#;X9a(>XJ<${y0hT1 z%3~JYD%#GmY`6VJ472r4&eM!m;7HZd#|d#VZo5F*12sY0wfR$Bv0VQ`~dVIaw9DIqjTj40&n z4)^{#+NU50oUB?L|IcWPp&Ln&CzU^cJF=8gUp`sa9a0TPvz6@A5#nr} zV|76@{HDMqeUTqdM&pmlcDBQU7c4K5H{SFw1B)pc*5T=+75`16L! z)|13q!@Ba%lr&}gQ0GQfKE2Nj`7Tz@T95y3Az`{KM_b!~C*?>}&0C zSER3fyQt0f`GZ3_`?2V8&ZiW)?m6KR{-Rv^PnYF9^xYmU>Q1`Ekl|v_Z6yf%q`&)U zFU?+JObfFg&MWg>|5!6lWq%G-h5PqG&w_T5jW(=`LNUT1F_gtxoo_#rx028Q{(+RR z5axFmb+1kFmKWN$bba2qe<`pNxN5WSXTZ$Hcv#u6hhHb%^~oBPHJOSmI2So?^f{P< z$(@fpm>9yxqxCtmrdHjyGq5n-Ft|lpR^@<6&JHMDm>!E5<2yL|#~X;ow}XFpuXHV_ zc7v3!l=4OjNL}Husum?8)@=C4ov{otMu+#gjh0cCRRKq{HBgtg`blj5fx!*E&%Dq} zoqD3Yp@a%E>C~~IFNI8s0_A+u?Agd3c2HTwZ>n`5g*HYKm%G0AL%w~wSd5rAKv ztWCKwr*^VQ<+3rS_Qgcg>d}SB;Tv0z`48C9z$&?k#d}kYb`j$X-}*}6dI%8%BLFb= zO=Dks$tMe6(!zYDT2c`YK`q}XuMfFaG~F75;!#2fa%|KeY^`HwxSnZ+jN=&#t@-gp zz0l@&iCsv=j>jPEy&UK!M?5ThY^d!Ehi{?%qm0-6vZC5w;_H&TF(FA45s^KbBgm0) zUphb!u|GKQRHVLTKP)a=ZSg3asf%ScZgzuOK~C{+ff9%-)2ph)}JC`;72ucN>l zY)Xr7;B8OP4D{Us%VeW5l#+XSqE3nX*4orb6f5V zZ^t0ZD}y`g>^8^xXZPdIYp&uhr7UdNVeA7?1xEouu;P`&0LixZXOGBG3vV9Yi>Mm6 zbv^!IG>cgL$9LxtlkR{@gMt&Tw_#a(W5FAEkxF41MuV$y2$Usz5h6)Ox6YQATI#7K{AMYO^;##cJ`K z<;N=A8U7cmGkUB0k!9g9H=IAiQwiWW;gUAyw(4)|iMcNb7^m7=7gJt{Js*h-0n0UZ zE`Fv3@8a={InC0F_}UhudfBxrPi(Qsa99s0D@(oGzW}?gJ_oW7lNhoH?L#j=`paN_ zbGHkGTDK6HalASBvc(;SF;XaTA)%3;y7s%XlvCqsdSG!qr>$*vG?rPAB5Gk4b-vze zsru`4Vi7S_T{%oSdf*1MclC3!VsWYHkW+Gk}wlW%q)EvxK?0C!b zjSCdzJ7^HsB26WlMP%6g1VqrbNHZl*+H81c}V?5OjF+HASX5k;2fV-qqLGQIz{U`+-tVAW3Y> zAPfu`E8P9Z7O{M}@w^A!X`BYaZ&G@+M6#RS7ZK_fA~KM^a}K@Dxn3aQNHI5(@Y*_x znN>?|DU&Um(Y`jNnsS=|Hlal*5W`QHFjJbRiznZtOl4?spn??nAc>GV0|%8I!zYXrRpzshVg@L5$US)n$x3 z&xhs04rF|W9V8K^pvE+p@QY`l+?uZg_aR63i+nvjxRN__D9X@GlYfsF+5X%h*r2sIH`3Zd{fn`uUo!TEsFcAqy;Z-_-n6UB zW9wQ?n+0KWg%TBo%4k`0-H=6NEJ@!9X1!jd?DXUY`EYt)EkzC)IUFvxG?xHD@AUbQ z&J55`oQKPR-MdDD7sHPNLg+_fW?w1kUm^~L=&tcS>S2Q#8 zsW)_&-dlQoi5bI%T{Uy62s<#ZbbUmbv+)if((vbUV#IvFLM}RUL*U>aMtzfHA;YQD z*z0q`IOCiHB3frWQ(#t-Mm($ONHto|jd&J%<;v>lhs}hSgc{!7cE!c<*C|jDu{F)ruz-k+`Q`AjbE z|G%*vX0JF5+t3|Oo5(eCpYY3|!c&Me7072vEY-!;UDkn)lJ8HET7GT{AY)11+usZQ zr(qEDs`A{Lz$7?6VXGV$eYR}q(x$KifVVkr(aCJZ_Ni*#H#C{u-jN0#G^-u-hsa=;$UA$2X+E64vB6ThndmS(Oo8>a=_J;l)NIxh-FTyz5 zN^4lAYaB&*R7X`iIWCbgiB{f}l_9Y9VqWT&^p-I1TF*Dyg^nwf$hDxmj__)JKf%|( z^Dt|o6=q&9d*GNRX4ucsnPyj{rU3@fYw zWMH7)E;zWItijM~3-E^?YpF|}5R>7rdu+6JFfvL>a@_lfchQJWZPuH8Du^z!!kXiG zx-fMA1E7OWE*wv#IjT*v57#zlu`xA6ccZh^s zy?|wWdL&RLfpTZX;Nw`U!UVY(Hpy~11%d4E6-pSztW;m|5XyPt7)C-XZ3k>KzQd93 za_v@at#>xTiy*UoNyxg4pM=}EZONYWD3{x3wFa!;*VTsJm&*+7!FkNF%dB}MjFp-S zDP|5KX+-+EOUUN$x&gXa_9#W+Z zeY*cBX};)7q-K%+WzW&bnuyJ?7%U>J;w#o7!^Ny{Od|o2-+zwR6vL7A76yi4?dmQb z!!OMfs#tcw2OWq_-E zX{d802W}jV&`++otvDCH102K|NRqv!*lw)&A`I7f44HL5RotL%)JJ8AXLrGvcFUlV zWM)@D4L!@aXFr@i90G{tSCtH14aeZQ@JIl#Y4O6UeOhY&0QnW)i*{UxjSnYAa)-(D zQOnD*<_wZcqRRbiPzGq%0!)*$vGmnM2-vrl!j_r7`PUKJ?fGgwR_MMYUKKqiGY5MV z15%<%6m4mUA~<<4#Key#ifk$_=1~dcV;QX!w=P@9xdt4>U@qfS=?;5|1)p-_4UkWm zWLv)WTMS*Mnmo4dPh$RlhcJQNjZL()lwQDnZsuL)D@G`PrN^mHxChKY|MbNXty)xETVqe1dxELr zV@g@OijGmnuLUGEc0lzh93ZFVINYLo3hVK`F$Lxn2=pcsFRp9faScd%pIOk)K|gT} zG)Ef&$@>jR@wYc%_uk}be7wu4+jRF96N5STL?G=Mxym{UqoIOwRnViDCUe{G?5U_| zgV0yU4s7ikTxatSz(KKY&Ywy%fIN)H+~gvx4e!=eA|+#rF!Na64e*$21bF=5h1tpU z2(t722z#H)ep!Z)nwFofT2{4r`8*RY)w!_hO;Qu4mM>aZOG%X(lcl_kL??zpnE(9QbN`eWpKkv>D{}W3GT8+p8as2+A8lk#1mRegNlv z9^JhO9cC?oP<3u;h4jiZEh;p!N>KVNE}DCxjj>!X!-023A}+$(I5lUgp20!&`2%+# z&!P--Zb{q}@N&mux;ctRNZ^iX{M=UYk5%GbIMHfHd#09zq>*ON69kpCqKQG zKP>395j3ZvK&F9P!S6CCpQ!)pTYWPx(5%P{L|smmpxyB9h7s0XooZ!ri=-OtpV3*> zMCv%wwWQ)vWuHe3XHVzXfClg2-j;c=fa4~f*H&JCKNSvN$rx}=%7v_SN)XPqB)uPR zLRt*P`&|59B#D$GAwG~Bxtm%egW~QY7Hm_P(8!t7igiYju@#DVf67KdJDQbOQFLJc zXq9W88J0!8_LCJ;vowecCbEol2qOLWQ&rKjcbmCQlkpv<5i zT*)>BiX_myOayQLuy_`_j1$fWvUf()S_l3 zx15XB#i;ejK?8#X{XnnDsnREPM!kOGxci9M&%Xfax@{g72))2FHrkIK@)$W9gtAUi zuXOzVgXFa=6T&oB=U+eWtr{rQ$TPf&xFy(~Nru>nul!h5FT?Ro)?wQTZb>gpMJlER z{~&=2D#~!@Y7XXXW7NGnfF5n~=;B$rHE-UVkWF!b+<*9CU&0GDU3_@^TqsXnL%Xz zo}Si4^?OHP@YJ}K~yP$_1^)(O_X`6Q%P#p|x>;*UVj zHmX_4YQb+cev_<0@t(WF-(iMds zx_k(5Mk<^BA1(X+H3Q}l+mF*7$ue>;PlRvp$Q*GuE~PVInT+OS=|rm>qd;F`oEzy~ z1N%Xy=Jfu1%n6=4|2sdgiE(r&=LTuSr&OwfFIP&h*BcaVw!H|dFgDTeihY{?)~9qr z?_{c|=tNl>-@MwA?^_Dw!96zQ>jZt=1kVkUPJg%B&^v7_(UakBzU$cyThkbiALE)c z5St*Oi_JlRQeCKV=^|R<;=s5KAl={xtPWbbuVs+T12RI}pX|n#6%}J0@9Ix1s8d$G zb_tr&Rmvz|UJKgLX@7V2{>N_wGSf0&APV=3JTKF%p*rL)a3FcwR?_&&$>m=4ML9v* zaF=f!dNC}~X(h@)jMT%4jO?r`z!lp@6sz{R(q}R~9+(FOzzO(na_uu=E@*~V-X5RI# z@~r1sGhpXiN%sD^Goj@=)1EUgQv2tZ58L-gRR$EKFjv_b%0m-Qn2l!CeaCZS!?P0e z`1euLc)9oUdJo2)^Ca*aTn-<#TG!<0|NiWoHlfqVpyaBlQ1-q%(R>3?{T@aK zFOuD}#E?roh_0+*U;f>^dJE5l<06QLz4+@{aYyl9vACs%zsl!t+@OW*Qibh_4=mW{ z=%ncn=BzwQy+7AwoB45P!V! z5(FP^7z`cP8T%wSaNaLC9gk; zquN8-@nlv9a}+Y`ydA`Ws0!t3alSaxY8d=P!U-mrcSVbWuB=OQ<&k-Eh9V+7E6P4LPbN079_H7LI_C(7bYe^gwCj26>FChHwJqNn})`^{1KxXa=Z8rUBEa{h+Y)+aiFwUC%ECoZG&nTAnjbel_X=>tgGtI;8-W6Ar7?ZmtLt~gHSnK;Jv79Qyhw*asgRi9n`yRNA$6=)YW-=P9n?iqft8I~r)O3fctaI1!)rJVKc zG#!V4F)hk0*&l@&f#!u7pW@co_^)4d)ns=I%?z@@Kfr2kFH(NpTb8gQz1Qs}UP6L! zD_L9(Sao(iRU<^bLorTe#-9F zKB<@B@lZci0(9s&S``Xb3H?nk!O4TTO-ZyD+fI+fMlzg2m!K?v`*<0<7U!>Fi)uzS zTWzoXHvVt&}CHBjZz)e(QJb0_`Bx2tFV zF>-4~u$&%3SCeD5aSkUPN*R2OX_r&aMxN^uAOU)u!*c0V6=8MyKkxu%K21=3BHdvM zO*d#@VFM&cqbkFI#z8;@^R>T?-?Q)TQWs1cTK*h>*DbXYR05b!L(K%y#1KR z9gr@yB=h&h8TWIU?L6mi9rWBy_=m@n_f%PtK?mCB#sk8jJ0j3Bv-EtR6@**O$_wpu#XWo430cG@%Pm=SPuGE?=YnELo%!d3 zPimT`=k|BB;M^Aw759_ z_L&;#@SSCKcgZz;c=Q^B(%jNqUouu?_PU2%#Yg=LxP;sq<3f)0;-5XY50SwPJ6D6ZiWCWnauC0F;|b2iz$rpbz9;rOZG^TJ_M%= z2VAv!mn!F)Iuf??@km=-CzN5GJ-c=3pK!PLo&p#L58WFO77A2mv(%lkU^-gl{ zqP+6ed(Yh^dnNpXu|&})YeIj!=gwOrkE7%uuQExyNy?hHo&W>z#l&6X%QEOW|H8Me zbyF+K8Jz=8MGgJi1zASje1rx&*HhABtqmNmRocC&jqqJdU+~l2`-vOTcTbMkcE(+fjh{Iy0^CH7?p4*XLVlxYLDfXbANkkz9nOT_ z?FY3lIrmW*j+u|bFumq)v$$hJ~{J#q2&@7g@}0Gc8<<(NM!WJY=(X`+K&-lHF`C97sI zqDX1jri=8u-Gi zJIVmQ-{`-s^z-Bolis)HwV_wYvr{`*BRgRQ(5R8d?Adw_AuX<8kaz7uY>q8M9fR7j zr;8fb1F1`C=NsO153$pJEF4Te5PkZ*8&%|xSWW|3Fh`Fx7K&NDxTz=p6A8G8rfQoe zEdP^H$%#rOwG+Wb>;=j`KR;PqG*pOEb3ScTH3L%Pu&d=dx~|H)+rO0L#X0hi9asbG zK!9;(b}Eh&NC$4qfxBf)i#_T`?lpT?An}SjO}uhcF&R!o$0^Ch3V_=s--$bA9{brR zCcqKK^(okiatU#3dLV`!&qjx+3wp-@M4;-9q`v(}>gG zn)XQn(=999M^>|RRpg3Z&1ed*4o;bIWWjvayOpfwCf|8=HOP9mb3#bi5mis{Sn)s_ z{S5!;JI0S$92W;t1G#Tyl)aI5gNX7lqa7_fOFP*e_wY*8IUqx704I@yS zn5*dbC$MZ|TJBLlQZLy1{(I%DYvv#3Fh2O`x}q>w_g7z9%BL-f@QU|Fu$6DycMz-N zoU=Z_l!&qLBZ(0^lk#M7w=?9#sN8c02P_CD0O00hW~fhd6i!m{#PS(gt$gl>>h|9_ zJYg`YN~l@(xVQ4XfF9yonG^BObJb>~=t`01?nL@U2zG`ME4fku19;{uK#yhJ7c&gs z-l-VcOp-P~fRbF*U7F8hLXRb!Qe7u1Iab^^nzkbA*r_0!sI-gSVyfcCydT4ocSVv8 zUX>C?MS?FRyNTGfR<4ZtV?`#<)(BIp{XNrcD~)XMUn45rt$zKaJ7o+IA>0worDo!5 zS66W0Hb2#Ak|wdZc`HvqgzJ^sM@+Y6c+&SZXQFRePxe6${>sdUf(2(gOLW2~_2YLq zNIGlcc`acV29*NWn9HoaD{l@5s~4jy7y##)Gu1zx8)p3H!m->aD1@+zP||wj@Y$~a znXyV4BS4NJPmouf=D%*>(`hu@6&&(RF$ z9z@KxD1=u#&)Fo1%i%aga)F%j+9@2^5e%+3HZJWmvPdf zVpGB0Tp1*F{{^^Nd^+*lU7BiOS*n!dBNWK-)8<_l_95U%JjST8xZ#> zn8E2Ye&{4~4iG%wXY-Sm|9{t$?P@90la^V-=9t$sr}o1pk}#w)u;gFrXKuZC3jRG= z&iuD1^}hfR8nwvjcf&UzY18i%PnVbdw5~q?>qxHGOx;FiBM<;m$h7rW17?|C=YABA z2etikTtW2xDE~P3SD?DCjzPoD+td(R9PP*5$F#LMG*Rg}0O8_v9hIJ)IyI51V+kf} zlQ&1Fa|}9!+>Sqs3Ow6+#Dl0iTowDTIS4U|QB6Fxe26^++~heAW8L-669Vdn!q&L7 zl>M~AZzbqdRJz}!+p5{0l_%4^J2CBm!WW)xdphtSg9nx<{m*koEV5nXcbvL-aC*vu z+g>U=wF}HtDopBulN9X0-K%GXNK=F>YBN=zYE3JC8TpNYpA+6Te&*`3bU6P}rSf_o2-msONdk8MzGCKCu@F^Ez~6Qzg&+6I{?TT)^??IRYRrh3Z>Paq|)fQ)}m`h&}-;}5nm-X}IqqmvSnCdH5c=*?&iTfW0Cat+u@ z@zYfUaZEQ)Et|ceHwD<%?5)i>m%}Cd=3igj|N6pQ2k355g7{M?kFD+J80dfM{fol%aGIbCu*%^d z(%bQ0+tAvC6uNrVHLiu8R6TO$>venGVTiuq z|8>Gf*+kWq^|w0$g8x6}c>LFeK8P4O|NmnUiHaTh`>O#n+roNhu+$}V2}<-m)-|5Shy)mo_iKYwMHv*QJLmQzdT zZ8Qn^J+z{Sm0XQ=_TO~*eTYGjP1hg`Fz8}HV2|dovp8s2U?p)_fEQ~Ry2(I!bf_Y5 z#Q*+iW%S4V8}O3f;nx@JHH2SBzAflU>Huju{XR`k>1%NlAd;Sromjw=ay|MDFQM z?aK@G2I$SN8wFmzwYIBb$;MT#Y+*ur+Vr@I=A)2k=bVg{Zh5g~A zb&d0PdSG0hXO6GLwg^3#s2CUd-stuJ>(-=M2qOAEr^udh#Yc^MC^z!XDrf3&q{bQL?U1`>Tj z+VdU`6JOP$?>Y9h%bW?hClhQnd2210m7(X>#!Z!52{kLv{UJl@ zM13Lh1mA*`xSzh73z407GwP(r-}26t&Q}wtW}gu1yG>L)f_}R1$YNHi^gTRj z?Q8K}j0b-V2$N4dgXDZ~Ab-!(>B)L0RATqqw4qV%w?)M2o6WUZz5|dD9aW{^@aj$a zo>w*cTiaVV+sy--f10#?Sa{xF%vhAAdAOCXdE!VP7Oc%G@Hwjp%6y9p=C_)Kp?_F< z$mw274H*n~uFPp=@?<>TCq`~5SQy^ybtF+F*q%%@wl^&#p9>c7wa@a_ zM7wOiw@yKr6zOc)f%ua-;bXwfr%-*M^8Q|Nf%Wo{cQA4279YuHcww5aORA`Wfc9a zvl$1{Y_~mqq`88H#B49`Gy6HY=wi(G=O-Vn*FM(vjT_N<=DDZXxQ%nWTJBsP?kvtW zExPLIv0RNy;BreS#lW=b=ogrbertev6*97zGVhW`Vp78 z*G5jmZ?MDd#qZ-8u!_Tcmf4T68Ohjt%kicco0zYPS?u|n9=?`vvsLpQQYB@DtVsLR zCnwmWM{}%WC;GAV{72(IT(=IpIO{j(mFo`+(wu`PH0JCm>$iu5EEDalN3V|dRMaaB zfBGR@_N7SECwc85?mgbo9f5|EvzRj8FVQaP*Xc`@0azkimzvUAnqtM2%TYA`4 z3AJJQP3NX=FQ3BwH>ORmQ|+ouV=09)CjX_7i@kM%&+8dn5VN&U{KlW{U;znCe)PtlUl6DXSFhA> zj`@&MA#an?SaTjiIpkSkj)P`uc{8R!aIh!8Iw+yABrD~KzPRP~Ye8bZ+_U^F2Ti_u zkGpwK^y~(1r(EFr5W~z;#tx+Yps?KRP}us+eceSU z0o`L<>tn7Hg26-k82Mibj|~qZ%hyoNWqaTl5AT+#`SIb(!lz3%KMOo_)Fx^dW^V2X zsUZZ^FrPI?5u#5GXJpZTdLx$+i1anVue(I}f*)h5!UZN?xn=V^2;(|UWKl%^JUJ!L zay(yczT_EimgdDe)y-y)U$)y*N*2AJ3U9R!Rkt%6jrjar@9|o2b0g7weX6Bw7j@W! z@?29n#0bhal&GIlD~fo<+7 zg;7EI%9g1bo1Sv0u!@~spk}qi5qJ@1%@(-kOW`7;H+v(MxDZlO!;$1sElhxXFH2+; ztW$`A6XE668+nI4N+ z0Y4V&RkP?1uAAS!jkqRZmN8#^6rsQleAhLJ$6meb(ya((WsKf%+DuS_ZodoLGya_E zy*hKAPZZ(h=K1Z#-}OSjahXx_4dh$1qyJj8uL*h*7BRq-*S~1v?jl%^=!Ux`8?WgN zu+Hkf&*;qC&yOn2ZTVbe6S?W9n%BqmDq&Fb3zt433)6?`%Qwre&QHoT4+_67%50kX z*to69=i|Kh^ciHksDRi_tk6)6qCJbNJWwYa^eBZ3~?p`~|+ha~S zNj<4}ic&(GnZej@^L0>(rGj#Pg##YhizhJ#D{N>;7{o#Rcxkvh>W}g{6{zamc6Fqj zs-cV~^CA85z6tM0=(fw&Z33b9YOiCv`9y-9pqH{V@8#A*tB`rOn4bf4LJXREqUs+A zS0oq}yid3)ddasbMGnnsut|*a-jN3nWLy8(HbAhN9ra`_DW2CWv!Y&k-;FAJuG=zx$fdDRQY6c} zRxZ3!Aa9B1l?a)y;V^GOD+eu`wng6xkKcO7Eqm?dp8U`kvp9V`+a~os2_H;&UA5On z6$u4#Ah6pRI7?uA$|`$_Ol`o}Z=+}`CRn&o*rB^E(a`!z{C?HZ&|NN+K(cQtOu~nr zjyb_j3_XMMw?xmqI+Ab46HK1d0>3QqrHujp^+~2Jtv+S5n zI~sa5gM(4slJa1)ZQOvdO8PWx!0t*44aZ-aZk6MfV50tG!x?=`!e^FZx-}?D($Dr5 z^;wH!N^5C?q)$Ev^~R5E8WzcK(Hp~4rP%C~Of7-$`&O25 zpG^i&#Ui%RCQpTnVUBv}!Pb8@kIikPNhQ6QVtr875^p|ZGyiT~hT^>jDrHZPFCN@( z$ov3lVO>hv7Q@l53rH82msPdxN!9`U+vf zy0kC-{hLn#IP=j78{lV2w}8?n>S)O4Z}r~-h(Cl4?I3i}w5`4J_z48CBL=P;_UI(S zmtR}ru0ah)>? zJY*$};}!F%dubsZ#v#rmg>^eu+7t(s!Up6YYKC2hN+{%^z8$vL=)A}nfI=mH2FqOr zn|Ni=F@y9MB1spOxx@sPZYa-O)b%JPQ)x55T;6LKxo{m~zsO?IiqLS&w~k!8rt07| zp%{MW8BozviL{$G<8pR-j2>0Hx7%Fc5dK!?2O|vVmXAHkv3A!H%rE6Ch*jbeq%m7U zxb$h7Jc;K5%T3&keUG`O=}&uO;Js5)%vA`ygf%qeMGWgwLUYI^I3N7Aw2r4koc61{ zy}j|>Se{;L+8dT{#Ol+C%fHj#0-1{^mJGWROtl5opWpB$*B#WLa^&T$;#K62OujhK(fCzwuW!qG-H$pj7Dz%Rb#_Olo?twlSyjL}(W@w-uJoIW zCtc*4k2Y@)o@e!!>1!ku&)<+#`z$cJ@%%m6N1*oOFKd!?o^+nQuMHWvT|s#oDNeM0!gm>JIyS4gq^wd!L%JTG@@dz#`^0Ov@ab(Fqq0o3y~KUg(Tw4%hM%DV;J` z3$16OXZ{Rj;E;@(k@U*gi-J8;&6U6|O|e+wo(#Tz=%KZ^YEx;1<&&|*)ibSabjXJT z!R6iZy-fow;G4MV!`9P=e}b_o*+K7)hI~9#BKOF(bqC%ZYghU9spD>OOJ3FAUmpzs ze9OiU;V3(rs%P-)P#PH0r(oRzhkk@Do^}ARlxb9S`jOYe+#_Q#EO3-S{gBN2oJK8o87}d&$ORU1Kx?;#25J5Mot&*Z5s=8$0zJ%h zdpM!M|L)SG6~`I=ETR-YVf@3UQ!l$t!KFN%Uuzxf z_~d6|O4ZVnn)R&O`CtXUl6!=#j~fv+$4@-SG0$0j9s2H*MV;R$&I3hFkbmpkp3@@6 zQ;qF9Ct_#9Re}fMw%CmJy9PlHY^q}8w+(E^!D9R_V^v?1Y3!z~yo`^V`HP_t zFdSJ%v3`z1Q_c^c3+6z%EmBhuB(cqzxEP2!$y5eixLoX3`opzei7boWv_NMyN}GS$ z!4uZj6VKI;r0e;(P5E24>Tu?46&LS4{=>}iESp#%E|L+LQQ<++OJb2hH$D+1`VVV? zpFrcF4>HlL5*?i&HO#r`uoV)SOPoe%>jIxEo4IGel(f1)#l`5W7Eo&D@fWj8a?NNN z%so0w+>Pc(9U%Ub^lKbqOd42x4u@EjA~=>QXG^}OtZxd&DyC;&nSpB5fU1VLd`7HlU_)O&IP#XeM-)bS?X*OUtM`0Z{3_OUlEKzM>Pfu3Vv=+(b zxwed!l@>843P_0rLhzSD7)s4_9A48xUeQ8`hg=PpCw^?hhxk)>Qsajrnx|z2l@GI2 zPf8vxe;-ljJOvm4ix`))^&XvbX7HJ@6o|NZ5{7gi^!ij86nU1leWGJfuU zQ0zCzPnZ&9*xOCC+Iw0u`rAx+>fDyq$`&>{HCs(=)kaTpi zs|0?)iVQg8*a*|d(lmzLMudMo!TrAE%!o1 z!3>o2))*vrODI=45@hsu{2K$QURHE|zgL6a;M2SjPQ76Bye`N(7UohR^&{ z=6MAxAE=HVCx9=FzInSDa*iyqAEeU3*A%d zpY4H)VQe3Q^64m9;%J#QoT@Cv%cYuL{V+X`24L#XT-=CdE&MA7)Cci2O@kShIcL({ zBELltrvz6h)u?w&;p)RqWUBu)NFlMw#e`i});9SAkIncScx^B5RhH2bO$4L^S z1x_v6WWGYD?V_dlKeJ5%tbztGDe+(Sai*ZNRp5wxJU`T0r~J*E@3F9fV>M*H#(D?U zeH>g@|Gj&mEKYeh!`b-YO2MEh1VIw542#YDTR{{U&bX3V()^I`$#Kr84-E?Fnk-z0oyd@8p@$4U@jC+2=~qT#91JUTo?B>2I^lNiRg=!Iz5qmn^q|k+B*t zmXkuj*MY4{0ldQ}mN{jMA}(|5USpF2m{TH3aWERfgTd8@ni&q(WuZ{zeF-b0ok|s6F$pKl8Y^#Bk8yTsVJn0Yv z(Xr21YA$VYc}_>ibh*x`lk(%T5#8|Q4<`O+8Wt%ak zS0D+`9d&!V5u@`60MBnmmbk4?U5#0VJ^!)k^3$9J2w$&H(NI424`D;UtNJssBK?JF zD~Nn__D7r8R0LLKv3N{y%f%^`FyF|7=<)D2Ryt9;qA~lu8YsjX0OFAx3Khh_aDCqW zXC(cViKo`UnBlRVHMxr-NK(Txdd#e^sh~3w!i|suexd;Ric&+I#zVluMd$I+&|vbt z;;-@Ur79g)IWt=KJ`3`SmtUbmhfNMgB=K+ejhw?KUv zta3+8<7g4YAwEU)Z|?y0XKJ&GORba6;VeRVX!j%K!8i`YtK~^+<&X=7IF%}MKY~BBAGPYM3m!nKqej{_s0zQz8A!#Ff63Cw2n9~kS;}(=Wk8s+=3(H z0YKwMntEEJf1unsaXnErNM3<`HrAMm4g%s3&YvX5^4uI|nPuPbVYtqkyu6Zp*#do^ z(tb%#nG10azWKT(DA6GQdjt{IFrFe$h-u^s>nMd)=BCrYbL|Yx`usC5xS9YHl1}@7 z@Ah{XbaQ%>L>@HGd8JKLkTzXTbD94(1~rJ%GwhJx-6MS5T<@1!m$#`$mLFVGO~rLG zK%Qf(1ecF#kkdutnK-#d=hcn%fKF{iec(0V1!hi{PDUi{EUfLd|^SKWr}NG z(_cg&D&?PXq-p;A{yPP`{J6(;riD`4TRBG*aEUT1YE78L(V>+dcDaEltx`b2r_J)j zXz2X)=&st!RPYBGR~>sVm&gFxqgNVsKSP+ z>26UkPy=??PzoFQqAlZL`+mDfaDM}KV^I2ijLC}Wu+65Vo1UJNmgYi=;AAGPlweV}&_r_5oZ`08uS1`ZajTYGPypOb=> zz(IM)B(nUuHV+3jBMYJrcfyakSyMZ<$_Qwn>pFfmWk<0#(pVklUy z1EjlM63#*m^|%UV5&Mc*{l=yL?T1J4JQYoI*i2e&84Zm^futAt#f&QU&SwN1k$;A? z2m(sMydvv}MspRz;fARumGU#|?08z}!u?S2b;!#-J?+nMR`A%Jnw9a9)djTx|X|%HS3t; z;kf=uFnnd=DuesJHs)jwTN1pF{YtsDYdRwog%AG|^!CqM&Yh=T?T(Ly1uboNb?h0W zb-e&j(gF65VfNsXq0kMy_wt+qUmxrmv$Y^T39-Q9c%Lmw=lxR zB_WWu908m~QO<2bm7?SDcdZgI9YQIeM9c0y%G>|{Nmcbr_tMA?R@Ky3pA^o>S#X0t7469f;@G&vkUlmJjpjxy2yUbg?oH|Rq&pnNByk@= zP_fRLmgtOUc@Y^Su>KZpJ)lCsEqx17__QIPrGu3rbD_-gIV~=#6`TnUA7%Mg@?CyT zAA7HEo&tcO5Tntpo!fn$j*|1xqslll!7&Mw06AiIX2njoj zc+%bmxId)dwvA zH5*6q1;3j>j}K&FY%Ut9_&;&O~{7jmjWRCkVKBl9J;U>~QUq$P_ zT8DGZhE)fu*17u?F2>*6yI-^7@;+r=rDkPqxA&Z&uql}xW0UHGS*(uhed9kNPR~~4!Q9pj+JeZ#vWJ8`j!~Y>(H4m|#|yqoXpWN=nr+|D*w87N)|VHVB*(xa5jC!2 zJrsm9ZaV?rA4x(_HU6mfd*kc%-P%Xh!aB*a>sRb@YP6i#abhcAj zlN6~4PI=nU+Y$|_BS!!@o+22>i@cgfzQ@|E0OoIVho+J7p&T z`6*4SNm-czIJ5p&TNE`FApo%TXoZ>Pr-TU(m zJsu24umR>5|FW}x5flvAzTrYz9@6I;Jw#c(Qs-4PR3T1B15@Q*xJb1m)!Q)7MJOM< zdmS1p*b`i;1UPTV62E~0cwVv?-5HHKoxAwC8j|#HBD;> zAWB~bmn6v(9`#aFLm}RJdd=Uov&=7F2|GKSCX1^P&mrr%TN>!&!JP1EguCH({_Bb4 z7%B+dr|38YbRAsY)fjk%-$}ahw3EETZOMq-F;jG7EZP2@m~(Rim7CtdzEiSQ_h|pi z_BRhaUET^3yhjXO2kSa}oen|k-^#E3kDHRkg+?>o)o{Bu=Uj8b9GUxy6E^?d_f>}4 zz;dW?s@fnyI4}5i&$jPJf8(#4q?4J^LSn6i!MnKYbsqG|HkA*4ti6?(NV2(F{Y4FD zr0pV$-u)At;SZX(x4<<9hp%!=BG*ZeNu;cta|**IgzZRP35`>TLmK3^+NMZ#TkVCsvLFDc zGi5C{Y5Ocg6(imqQMA+E%F4^jKay|VyOV!J4!|wUc4&JoUuy%0#XyFED*LF7f~s?^ zMUS~&sdXFQNhnLq%Rdu203CJ|_qt@(j4sQ|lXPp2*nqu;a^@`KuYj3NJmT#O+#V5@ zhoH*JK6maL2#ImE&k3z<}PcdHR=IJ$k(*jzcYU zpqF>ys2Q~^EfgCQWp<-(HD7kw!6eMoM0~ZYeZqv{>CbF-o zS;dzXPPcOCPZ zu$r2x7?~l?8x!WO7!*tlTRZ?Y71s4_qlMU`gsQ4(Ay02}ZgI>g2}B;2k7dyZ%h7p4P_h@bAUjnrI7 zYu&@QhYv~4`aSfp{nTuUeuTNlE$qaFk|LR|TcGbnR43YOeA?W5!{Unsqv;yWy?v>i z34rD%Y8{zN3k8X1;a)>b1JOM|0)}9!F@%d#TB1iXW8OS%H?_n;o0>?WV3EigRjXc+afQxDLHkA4H zHq2R-d`{sX>}uh+JE#O+{9a+S@&0kcS$udV&p#bcnUI_lH(KJPlXh#ZJzBFa$#QkG zLBi^a`hsaq<;csz#Mm(%rHB8j*#xvPWI~7Ms;^czj#IZ+y?QBE2#yF?7McGG3DjFZ zN}JRJm+{mc;TEXZK;eN{-ay%mvj(Cs>w3iJL^J6BL2Q!;F>@g9m-@qzANDVoHeCi8 ztw-X+6-!}KtRiLsM4-+SWu|4KF~RNu^_M8k**NEl+u z$l%yUbG3_Wz7H6nlcK{eCKT>!*K%LFs}(4HGzVA!&{#I^b@m^2< zn}RI|V!Aw*LfKX44;?e$a@!137y3eG0J!t?TeK?H4MlX? z8w~)L?`l$hEfmz1sXJ~vcT_xGD{Jj4~9ew{_m0x~(A5F#tWG&m%tl4F*n-|%A01OMtApW?87?M^52huU^pJls^^a^Rly{xUdzp3ivu&Zl7 z-ioo)R`9V(i^NlLAlOtrGB$6LA|KaOJ89p#xnB~uFwQmpC59GC+A#L`Yg5Lnm|WZk z6RUEs@Pq?hecMcQ9C5Ya=rZA`1l6}%Ff~+yGMh4gs=iJtE`>c;CL9$Ol$OHSv-_lJ zqaj)FybMD)r&v6Gxoz#+qATZ1vV&g}t_3}mOlO`a*2pJ*w%2rW5#-N{{44@x5KL_Y zh4CT<4DB^z97n)Ht|5{nX-6+;7R<+^yf2L1pEoutr}k%W6+Z1e>2|OlxP3i?`0<>+ z|BY|`E7il%Ow8~*H5Wl#*BqgL|LG%{?7Y#gj|(g{IVJ|>>kF{Ko*epdDc!3c;dKMV zA5M#&!vmOgiK4Xr1CabhSa$b9+E4m~mz}&H{4Y#+Sh6Cvc22N#Mvh;^x2CVw-sJ#_ zey^Sf7D;q-HM$>C#wmxw_417iYMn|siEQC1Ky=e zQHx-n2l-n=VllwvRvd>!**s8NI828S0^;|Im)~5DDC+=V!3gG8G0euhI<@alKgJgC zMQFw{%K3?y(KKTn%MBIpAp6|;!-BykZX!n^VA0ez!b6Ycs_0Mj08EXl@`T@xfClds zn|lHcMq$bp9P4f5)~(qmC>YA^mUAGl__bv77--frIEPpt&^i&nrr?6UA0`jJ&4psB z+k4zVYQ}MhSBxO;qHb^^D&*%Z1JQnTPy>%6uGi2DoG3k~<{2C{bMVp7Yd-(D6Px#s zxd|DDc}y8w@>EgM80*YAIv~Bq6T+H5G)6;T9^GLV|2841Cj9+HHs8W2Wnr5JP~3N` z4pIV&_ZbXC3CthU_=BUmv59Q1qT{wlgn{*EBJqR|cMEQCij|S?L_n|ezo_PwaC@;K z;{ni6;H+Zb9ydgWgcmn9nOdN^go*i2vQo&C4V`0Ozg!d-?#XAOW6t@kr||i3iC?N&5ojFgJbF1;@D9#4 zLObETYb4v-;8>deYI+iQw`AZyOS3b3uSH!80b1g{fspXncBzEp6{hXyY**%bgx{xE z_B0Ojw|$?k$u*GKTr?lG9@(+f1A^DQGzc-&GxnApjgMh^V?#K{f#M3=q>hHNR2=5L z+KRaG2Te580B^+^rQ?km9prI#)sw=?uLCglb%=pa2Adx&1ju({nD%MdRyI1~Zg~bf zqCO)E_aQIm9f#(1`;pdi-BH_g_eX6R>H__I$q5IxS3sUl=7|)k|%%bF=<^4hM-v|vEdl| zVbstp!#vxVi$H?IuTuNIV`+qosSKI3QA})7uI$1*i(v(Nsxnx;sKxLbs@S7G74uSQ z0GiDz64W%@F1e)ZDbmgYT;FA|csTAxr*sHt?4x$d)+85L z4CuvB##0L3Tu9^d^PfxagS`O*jhT+ca4Jq9D>juy%Rj;n0mpsG+W~4oIl@d@w#3)| zj7ETmW$Mz3c!ukPMKCqonyp&~x;|XU{4EL#z^tS8@yk6%hWaJyY~t7CumB>KzX_bm zU?#FZCMNL$&}P|0(rqfZ;BL?N<)*Xiw5Z>bD5Zl8Tn6v^@~FD$Wh_5dXK9g@Gkfiq zDSro{6Vee-p{(EAU^=WW=j4zvKWR;oddho#|Jsf`6v|1mYyRfX4z6dR=P}5axUu(V zV~e*Q*-rMk!LPdOq2a$V$O;xjLZZW~@R6YI(0yF~b;w0#De-uJkU}Qf510%WrWUVs;$yF{f_`KQ1|18N$2R%<#6C6GxFz+SL74v8pZ%-6!oQ`7jL z$K~za(`IHNUY41>6d~gl|3{bienTTH-eamMQpbCKwJ-_b4@E;?t;{M;b^=k8*}zAO z9d$K$o}zcke@<3cH+XqSFcMO?HzcbRj$ChOY$`e+x8Nqm0F012hWR=tDr5N`O*4=L ziQP9c3hZkl1ps|BGb(2e@%u5*tDzu1UO191XA6JTcr^o;LQe-(^`bzomslFp>l)es z!Ri`B4rsi`pc{F8EzeO9=779a?#t5vbXJGCrl4SybeEXWKwp!N>p3GM%M2XAWV)0! z@`GN(4QPm4(CbRLYI2C3)w8L}p=P4qXUP-)#$P;yR|Kdhn0pUgfMPvrp~g59UAQIx ziHyHOAeJC$NNLpCP}9KH-Q+!aAZH5=W5(M6?Hzk#-q6*8&rY|*nS#j=HTB1+S#!n9 zK2PQZ4AMn}0PVl64-rhnsiwRQJ;4HUgiLK|2p%N*JXh>%nWi3zH2+#v>_W2zo>)3f zRr$VI{+q%w0xl{=1H25g14s{e4bxckdkal^Bk-W|u1f(sQu@6lF42uo%M?vS|4PPa zrmO#LH#l0RqqK*dUd$A~XwWadrTwcFrjL+m72X=Z!rX_c)mjqB#U@NV{+V0U%7W`* z2CqI47vL5LRmATj*V8g_*O_DgRB5@`t+b#n!-|MHb@)-+vlXX3F|x>u(y1o1!Ile% zUB0u;!32zv&I`8~X5itvUuVo>od!er3~`D(k49ND&q-j!H#@Q%}Bfu z0KiFWj8R7kM^rD5DDrS@GENf~N?3l#-~!>utq)Yp>X$!l<1Z2126e*mTg8CugTYWf zkL3Ye4`SdZlYv|9>Hz8q88?8J5;Ry41MnFw;|ivMyy*hhAb=G-IOU1R0YX3kGt{V# zln3!`p9gw8SF2U;8a9w-%jYTD@F3}$6rg8|_*zPL4R|D>_uCH9KP9e!gv)^`RnVI` zrnB#f(%-v?0I}2Z0VZHfJ0=oBNjXb+Bn3zw{|P7;TTBYG#2EujA2GtGaEMx*@c}R5 z=N^h7WzNMb@yZEv+b`M4i~s4G0;YoQd@_D?J~#RLqB999?g5yHI@4!O!!c8Zxa1>| zcu;#A_>+!zF`0r5AI%`=ml406H3CakGX>hn05mTZa9+NYi6Y{-Hq#vNK|NB<9fW|g z1Akfh`>E8oVu4~D;9n?J9GEkY(&+%We%4gemqUO>2NqtFLEF!?rsSMa$dk}$Pn*H% z2?0Bk6L}U%Vj1Qs*txeV6GAI0n zf^ftIzRqlbsmh6lB7<-RI}|M!mBPaUm*s0K6lKw@;sTBf>->i6q+lSgzP5zUMOGQY zm9(%{UZgbHR<(GSu%qHeX(cca1n=#S8R(PJ6f$vz0C{emqz&XQZIO@)LwGF2g8?6Q z4*KDdN_C^On&7kS;_1m;5L^P|f@9Cwd3?5n#k4&wJ9wUI^8vlCs%`xKz?b`1D22lS zP?8TYvnHzSmnW2t9Z?cr32w3FKid_ORs?L1NsKh11h3et!gY5Bod!WoLb^ z*;#`dS1ki;Y*Nu;=>2YBI-GfSnXBbKZ8XjHdunPNi2pg0lsM25`(>o@&!(!s7u?<& z`&Xus)w{r+WxqxZL>i)t?3tMivlr}mxif*N3^Hpu%Bg_GlMPQHCa-ibv6e$h+N8r!oWGiI}*LQ zcjWEBWw_dr(Z)FFcGd3YjE(wQy@HBcK=?CQMZ^zEPem`v-fmaK7L-r%vy8L=LsTnXM-Kpx(r;kJEg&w6W#lX{Fyp@=2vhB_>pb zL8oixOny2hdZwZYu)z>NuQjH^NasuurYj1$b*uimBCbf{8l-laaOa6<&7Hdwxgy1c z4gGR!p22=lVG<{Nw|;91nvp)^D%dZ+0eW~#%BVT>9;ip=NKA7q)U102=du&`BSB(*-psIS#JCBf84MMcC|{XgnhR-ZmR*j#iB?fvtXefk2Hqb?$S~6 z>g~vhi$kssjx{HBT&{tKds@d!Mc3HbOV}^=!K(hhE+)&ce~MOVI>InKv)v;QSPwPb@fSA=OLY0d z+yA6|4xP+43wB7JNpYCth%}MtQnDQlG|xv5hsSRHVB=H_=jpA;X9B{fYV~X8+1XbX ziV4F_O*Gu>=QhsTl*^WsIA}?v9=+U$IDads`GZ-c2>htw+|xz?o~K}Hhhc!{HAXU5 zAs9PQ;lqk`q4bsAQA?cZ>ic|Wc}!@IYdGaNCe?U*`Vr-k<;ptP<{@vVI*{^&4$tv zK-i5>548w!z;h}SH`n2D1ZgSkb!d>>dvLw^uRHI|43*t6?iW`(kq5VYXU@>i`lKBm z`|(s}-uB`{R6pgq4@nGqbfJfM?Nnw*C80O(HO)VRQ~329nIS_ka= z8yFIq9(C01mb^C<|K9VvD_paGS3c(iXuHej zuJi3ce30_n_08dLdva^)-xk+87z&Q_*J$S}KB*wSj%Zy*CBsC0X0oURzs@>Dp@)45 z&DaZ6r`)3lJ+DDJYFV_TC|hS+nV1BNIB&AAy`if4S`uSbDbBud(c&4&E?5Ji#@jU; zI3C6Nsp{pFPvhUKUt{3ERS`vy_OCe`FUiQQssaKg9n~*B^+qL00&TZ^Vka@iTN$O3 zu7M^b#eO+e{puYlV2Zh%iq1J*g70UEW1Km1Ud`2eQqfO10v$LQ_4-J*fy=w=%#k}Y zbjUxzpK+-mm<$fLg3`GQY8KRX}yP0$S*m)IHhZ6-w6(C>q`udpwZE9~1z4C`@&6JMf=?G8f! zlw4vW(tc1}-ATML4I^C1^OtY3i&PjMk;R$d1%`J_>|=@@qWUibxQ>*TV&uiyPv&Gs zj(WK@N4(wM-RC>0zGaZMzN~8*3^&GV4mv%{BbD)6$WSUOG>#z6DBhq& z(aA_TgL^Z4!nqS;1&hP%jTrok76i$G!)gOWrm40%a3mBy`E7Qg>7N)g4w&O8rbE8) zT84{XS{h~`KkeChFDE_BV@Y@LylG zd6O-g2U7p#TL43L3Ey+{kN3iPi(;(r+D*rYu#s|0_I?M93Ds`(39HztaakL`FyFte zARwkgrN3IELX>fNG>|j5Sk1yo0`z5|ftZ}5f6d9Ll6km%^VAwon&J5r!^zO7OCXR5 z3C2ySD~NQ;Sa-|K<^(TA5S5JBG%5RbT-jp1h0Ar!^!Zc4h6*4^$;oC&El#U%n^TJq=DT z^lBV_Akc#9`Fw#c!mOFJT-u3la^8@JzF-o}y*1{04CV%`hW0&EE5`BV@y!5Wa$mjB zui0NRpn}}`CO-|N-1ICTF`KYS^*bn;t^}FgL<&Hxa)3EtY6dLsHsaDS^;~kEFwT=y z36d9S=htNkKv8)633n)bp_QdVC|(MHn*RJoFsH27g}n(s@1V#~!rz+|Db{rlF13Kw zZ0Bj?KIT53M-xZaMS6Y7DEGPbvH{f)dac1Gn5&yHUG_vb;5Dl5g{5yxkk|iRII6nU zv)?6uy|rSk{eDZ2k#2!TM6}mCN=Qn-9%{5jyA)xcRbs#YT_^2&y@j1!Y3RzJLHU0R z3ne|i<|6H=PED@D7h0z9pT4BuWFJG5@9jO)8aO>F`cxs|FjxkVUt2CPiI)Uk+PJk`LL#3qW&k>Rr;M!3I0K@t;q%0CA{JmP`Z5j@ukH3LBoE7kt1+faZ(;Iro6 z$D%;f~#F5v#- zt&$iiB|_aPrUO(WSI{6bjy>=y#*4KI<2U}Kmgs}~N|gXj>H{_`;DtKq3BFdy?Z<_t+&(e$~@%`_OZf0p-3##TVC0~6g8v46#@khq} zZH=*ikbhjhH}9w$%n5QaIG6ChfT-99i?Uk0o6u@f2%GpH)cb5FtfEp(vZk&9elT(Yiq+W3`9wpOI&{1wc{oDfx1g*a68$g zVs1#Rb1&p`0f%y ziWiGEVvlsob*HiqY~*AYu*&W@L`fTSz-7Eoncxs?5>LB?zT#j@!f5t4_}kk}!_;!D z#fG9#75hv$39+mEQE5+1Y$0p$^(_9-_QX-H82p=YU|fqRybIm~b#*uE?Eg1LVgXYRothmN+{RLaG!Ty4{Y1D%O8&JNeq@hMT~4S3v>!F`83z z6=WI)N&)L(q>lW2?rCMhnRwcbtq!ent6DP4eh`BL#`Ym`1|X(azk|`B%bc4E z0qnhpC=MZM!?BMZnCH{pzDNUxKg%vOuZLZ+AC!~Y2W=wDnxDuEMWyrT?F{(s;v9}#K1q)u z%$x!tSrOpJ#U5f3)5SBNhG}sIK_nAqDb7A;mieYg-vXJEqJ~7IekSecw|J7{YmO)F z$V*Y=#pyTZ4egmc|HIn|YHC`k(H(E^R}s&hRx|~lOgi{u7V5bgJ-9+ARnXu+nUFO6 zRPe*Y#CE1-VBaUiR!O*uh_Im*`a%lJ|{+@8Dv`hmeZht3g-r=svB zyI^U^y|`PYeb8x-{~lA@zP7}+-FZiEio#j~cTQ&8`@ug~r!SDJ^ItkG)>ThtT;&9v z_+P;J4|xsg&{>B+XH>~AgFEudRzcq@}3Ugu$t%Ff=L z346mmRrdl>QUg5|b$38+MbIIQpV;7hw*kl)OQbLH)B;?Rlm?q)r z03D*LlK@-*!_%GpF*^u>wGXWMaCjXGiHnoOnuUzog*D~Tu6LRv4|Bw4hs{Nq9}9bM zPGb!{?4fR)&vgZS{{ATBlT2Ya(lF-5XdejMI&x9r_Z$XLscD`*s#K zdL+@geFJBGB*A`e`kd!4i4j%O+Z`gdkQY{R;4p`y0MK=h3KBB`b@2^p4##y4<^c`U z8|LIpNXIRZN_z5VBU*;pEakPx zu-VzF_(<=>R)i8h<^uEVy&+3(xUosFL->>%7%#189#|6(9}EXcFODRye{ZPbaF`Q|uTe$pI9=L7Z@DPHGlxB_qE}O# zUER^!QDS0}nb+}slzthrR*=&$kxM<6ku}cz!oiK1@#^fK{`cl|>ui_du_xEfdkYS| zEq{yp3iDuTuc1Hf9G^L@(D#+i|49@$ko(Udhy_?EV`nOEQQ+jC3+hJn=pHWejq>r7 z`Rbhxi{dn_5ozaRY2a$9il~>UkaWeH%r>S;lukGFZHo<_qr$jVWFQsecr7hriO`>~ zaUUZ&{%Xj9g9eqk!YxB+U%-6qSVD&O@=*Q7Zgqln#>|lBUYH`UYObH6V63Teae`1E zCj_I{B6mUI)Qa2&53iq$G_nbAYW@bLS0WNKfCo^VzC<=5C0J5`l2{985nykK;}hV9 zv#_9=)ZXs_=N?R+Rq$#DDU=GH#!=_cC3Qb%=B==jkJsn$6d<5d9q!X4WUT`!6A=h}#Ts`Red;=dci678qv$MFnv<@#PP$;xZ3z6FHUeGpT;vQm*#1KdMjP1&oT5 zPBHYH58UVJIA5}%3VHWu$J3Z)-&$Ch4clRiG9DyEzapzEXtEz614>pcWSg(81PV{k@lFhucVPh@(?(k80gp-NwH^@CeJB>8y~L zi=%nEygOtUA+kBuj?(1ch-;$52UfZGTS^tq%#K3oVQ-WT9igV1GogOA39l);BcU(T zmdL!h`U^Z1 zl{_Zb-?q*(bKP;@#5K&g?NyT4Y+73cA{;O&Fi~@r>NtREV`jMg z2IL;cC;%0<Euw2@zWg81(`S@;f-9TRV3Z+OWdgXecNo)h@xF+0Z?xBBDT_y-N| zhkfR&J(z3$wr1DO{$;NA1;-iA?_9ptUOP|0+rH)r%3=unI%ui|$IQ!h2zFrLhv}D> zkB+h>k!4pI)@n_ojzFMvC@6!nIrD6m(q5GAGKJb24o-RnyRTqGY7MjqmhNY9o=Uhx z-7bc7%l=pq@02c8%;!lvFQ>f zb0#G?dk}q)I#cUpY(|A72~iN>SVdfzv+3^>;kzc$qX;KhZksTsca11@g_4)OROMR#h+16WXI#RuxVT z-HB^E4lv-Fb7v(*nFj(_Yfe-WHpKB=fzw8J?2v|mAM6*}QK1Rz&tIASa4L5F*FG>3 za1xitKagb`tg*R2+u=*0;T{tRLg={w`h^q*8vc!Q@JbX^@t*fd)t?~C=c8XOI9Bzm zs^HiIQUJ(fSUD@xA9?LR>`Bv(q{*u53);0>?+b}*Pbi=xwgTL}tOVUX{-UR&H0qJd z?6m`T74724i{g?n>+Ji)7;j^6|B(3uaEIefh%fz@w7Z8`Di~2OtmhB!r+PJ({D@`e zTX@k*Mu)h6#(cQdlpCgg-hbDECE;KC-y-;O8VaP_R>8y{nktWEy~|P0azNSGfYZ(3 z>eOY-_%AykbwQ+qh8$>)l5bp38m{mY7?D4G`E1?}v$r@HGIgW8OdSIrkf7-o~X#KDe zH`{Ze@Z^0Gz`2f`?%n-v0WgmP_zs2x?f`I3^|TRT5LYwPb!Kz&a%u3!X$zqEEeK3A z?j|y>c}HvjULJ5^RvUt77D9RD7*eAGDo^B(sd_1_=>ozyl4+0YX5NsjHKl&aH9+sVw= zAU_{EGg^58xfV>Ac`}p`Up8=XKrPFRiN^Ja!bsJrH+7IwH&J(yi>vRW+X*{E4Y9 zS65T}2Ph~v@#W&ngJ7=1|MQ<^VEN*IPkz8MNmAC$7!rAuB)ZBd=5vVbY{IR_0k2r3 z?(Ud$ki_-t%#pEDSmVfw0q%2XgVzM6JDhV+A)q!da?XW~X>5ptnsqNo7_bRY`IDkx zWrvGD7Swd|gftu3@}o2<+0?y}(v>X8Q_)ct#|G3YrQ({BmzPQG5e7`shq`*jq_97? za5(m%I*V@ckf=Kq?l@NIVnUVf5qNRUVO$ z|GE#3Ko<@~gP2e#BNUP4o|lx@1}8h*5z>A#p0>FLuf-q%&=h&?;~h!n@{qs*9%)?q zB)Q)POl=Gv$g^8}?x$Unk)g$2RfWx}B6}+izu}=cjFeK)seL*atA2473Y^=ocFSFc z>7qZS3ilMGZ3=nH%DErx{^c|Np3lGsl^QCAVnw%KvkW%z6242dqiCpU4eVx!VNP9# z)+}-6wlP&l)RX4&0zM65S2p`zKU6>3ViO$feC^YxPp8Zxjz&P-`o`^sf>UzwG`;5J zHbmG5N02l>G5TgT|L;bDS$cj$-_PK{FY1=Z3!lSwkM~b_w-sg;VecOAVB~Nk`1MQY z`G4lMC{Ct+$Nz2_UAzCK+bn}G7mY)na2edwVI)_kAG2-=r}gZtOGTc}kK4IZ(&H_; z1ddyaIg%}Y{az35=8387!kl-wXQE<9zH(k*&1m~!YC^J!lWy&9Z$&(s*+_?}c?Qpa z!S~2WF`=VQ0zGAu?Yq!Kd)CVE)J?%W6jZ>Ol&)wcGr_&M^WzJf2!+PjRdgkz##4X{ z6fOA+D%|z-OB{=* zNn))-C~=L%r>Pz?5JS>P>?A=`bfC0{jc_d!fmv8cUV9O*PsQj0BK8tGrw0}j!zF*^ z)zhf~1oxv(Uc%~cC(W63USHg!`@`Gp!+`L+@!S1UGS{_|)giph&-s3U*+wKS1tX`) zf%-%MfCC6SDP(;0jsTP;QUYj{fc)iIV*tl(RWmbgl3ROE)Kl0ap5%5`*#7L~)c0M3 zj+H}Qhn2M691ovrP#8Hkxk%@_zEMs^pO&^`ZoG;Tl|qR_q=hX=-pNZ`uLKh1#&wr8 zs|Akhm}6kDCkuw?PG1Hwmt;~unKUPF-sKpjci^bvyaFTxl)a0E<)z&_k@U%V=VkOm zj}Va~-KhV&zSNh-ZxBwDVLW2vSJ|IP-ttxGz_t0J-(o2}u@j-_kI%nze$M~l{Aqt5 zHTrjTd2jxAb)7Py{mhoW1lKKkDw;WOxu;jw9xL=a>8aWhTbJpe5 z1S%U?JQH4-Kq(bkeHWsU3xb$EH-k7C`yHDaONtZs>)%Z&|rR zWcu0Fssw96QL=ixQ&>%pHt%q|QocvNB; zq61S{>ZF(cj#VP<2+Sy+g%to{9>UGYNQgtdz(@(7Yl!i{f?Sh+5+Z?E80*NUXC!AT z45(H2!v1~jcwY0%Y+eP>4C-a5sw7n~kPJF$9;67RVU9sT)VQ3zRn-Q{p8iFsIKF^S zXFlaMdfR)m)jK<uBF64XS&INmmiacgafW9$E;++|)uJEt`qwVb2K zI2X;qoT}S{NjQiz!xwWxk2OD-`@3_=>i%6+j#vJfjhbD6%V9TkXZucI13#0pFQE+9DvQ5c*^CNWhYq_Joln~cSyBeT{Vv*2yr&u7x4<{s7TPEr64(Ig=P=vfzL0bn8fsLTZ=q8Cs$%1 zgufN)I4kEx+dP3Q{RU6jHe3OunjMD8D*|$el_y}=scO#D9SO^uCz>70w7B5^LZynw(-l3WSyBw$BI#hh=lW^r4*tVeA zfIbcc)`3_1m{~d;aqy4#|EIRYGiW!C$CCo$|04?UjTk)F(Hb2^l;$>fRbxDdqk@lVV|aET&PEoSlGfUtVfjxgIH(;$AraJ#{70FXqP_W-hKL+k_Q6-uw> zV^h1Kj{4Td!a}=8W&jcc$1E_9y?@mn=Cs>iNVxh&Nd+*8fR5iu!^snVR}kO5NK?Wm z^T^HfDbn2><`f6%`FVeeCDs2vO;H>J3>_Jn1!)uN`on1$m|%m$TkpyJGF}VJVrw6` zL&dhdcNvFpqs9{_yQ*-Iu*I0I+&r?DX#)ik6k!VO_1rEY_%q{apFgPVyh6Gl23?d$ zZOuDZ_KfT}Xi#gJ>)*Cdy}RHA^;~H=i0e};aP;-e7e047 zzc6#sKhGM+bU0^MKVw;1d0g*)>BCwvjKz_uhCtkHZi+W-s{&qeq@H}qL-)Cs)^c_NOFIaBZ^W0>| zDrVGG>J>+K-h_*pj{Faz*^nChk&aK-L24Z|SRLT>(i zecN8=e46Z?-_S16#k6;r(x*PF+^j7tkK=b8K0L>1WhRRx21h8yL4+3d(;6{3P#dzz zLS&0@X@}!^z3F^clQH-ySFq750)}Hv=@MA8V3Y~Xp;6PIDwTc9ly+yHTS7p{5}rf5 zszwkLa0i`@g|(yVK+2BnquLE{ucBD|5f|q%82_jcN};oEm;?xzN|D5*-sX{^V4~bU z3Hw7oD-H8VF}T$xOc|IjJBmUhE?$v#!+c7vna5Qm!@jn--0(Sb+$N4tYXAe&1+Omm z(Z?h}$MLUVhf!0*t8M0h_oi|VuVzh=uI|pk0essH2U$!S`d}o#haDW?&h zQvhL<7y(UAWi-6BY|~|POdf<|#0eu^%Y-$Np3Sw+pd>dlj|5h9xpqSxb?hocBiKQP ziyi109<#y6p#EI1arDT;lVI_A$f>QK_iWx0qH;B!rvagL(7OnjfzeJW1vs11=gUz~ zd0dC1?H4U6eW9SmI&&aBD5-?oB6oODyoF&CT#8DQ-hM#dP$8QSkafuEv=-RJo=;pY zTU(M-%!prm>TQJN|Iru~nHw<+GE$S;x8vR#`TyOt?y3K)2Pk%PBG)}H)(uo7nfW&5 z86B zM!JBSX4^)Ha9C>k-~he|sZ4!c^U%pd_pK|t)!^GM&quWvklyM6fDyZd>lkz7S^>N5 ziLG2Hf{#BEj_!2X8LS=_+N>w%S58)~Eye zlk=OBPa;jG_u&l8B7!)b`#%urUwC}Qitv9cb?cniKU~ubh_AVt;^-il)1(sRVAbF+ z=Rc3hu|vK6sqM14!d!nxNeofnR53vJ&EgL7?83ywU>=BL}3>oL#N&v;~0zi~-kFSx2)?5S2ib98kPlWb^J-4-1UfFA zjpp~s%R|}Y-BAbelrgb|fojO@gqY}#vk&YN&ZI%xmwKS8$D?;|h_}|uHpYMoGt18= zR1LXRnvElxgh;GGety#y>?6s-u$IAM;zoFPp^rGiftvn`CDnV*Z8;S%kyr5=OF7^O$(?9+F~j1!tr!{rRK+d-JA7ot@?FEr zK;l;W;67}?60z%-MsQB-`Q3Wm;&wroQ&G`%B3{N$X8REh0XdCOjT{OV>4#Bb(P#@D zK8p8dTIkDiUq>x?F4T0J=2@q7$LN_}I3(5hsD!BBwW8MFcIjVzOdEv!AuOoelaskhr9GR(FY_YniiX>u{0o zhNDOK^>Y$o02OI(9U{^V71ZY*mDx9^oPavcU0uaq=cyh?gDk(>GeZ{`uDarwgzeJ6 zg&1?@_8(BegUtgX`aJYsY;MQl^wq4o8%#_{*)w3drlH%HMT6TjmPs!VQ<&;e*$PJQDqM<;+>5Jd{GQk7&Ha_dr862;ID6l?cIyxw`J7nR zYZSx6mekC6#X54$&o6UQnP@|e6IvD}_HDAuDF#Ma*X_Hln%*h=LE-;CCbVxn{K|Ws z96I;j@%ozYJ=g3F_GL|u=~3ani`@Zc{#jbB6T6q%+P#P3`g~q5^q{YH=QATPkN*=+ zZFA_KS;HkIy#Z#8NwELb0-%mBg2#?aDI21%jnfkN{Zhey2w`5Ok+w%F0T)t_^O_7hzJ{xnE$z z@+R(i9}ooVIhF|Hw$R+8hx34SKt(H8jCb&-%){k+DlXv~*GKiMo7Q0A#lS9cmp?wf2u|-xfV7Hgb-?6L_<|64_jlRU~7^kF!s0UfAsa_6-F$InvW-QW}8O3ln`&m&g*W=x{;8>wmqxUoedStrq_-C zJqABe!117t7Vn=L-KAcztFu1k;P3aZgQrW*&i$;G81XGMbkT{~Iz9Gw^7$_HyprcH z+dlXu?n=Zw9y>`jJd$Zl+=0(3Lo0ph4SxP1*x=^(q05s#f2)>T)PIkn0@xECVU#!YKk@WrpDc9x_S> zf>Um)gt`k%mm3Z3Nlj4fh?mb-9Cb3y7e2&p=GBE8?vXbGxuwqZ53VX5g1> zE_l+{)bU4N-JlNUAqK`%*>osBl?ahnmO1dt5@>^f{)W;g$CUB&kM0gb86UTlQU%(8 zn(n~v{cvwV27l;tsYM#?&P>H&?LV<+A-q>u-M&4OOJ1$|9V{`h(Z+5&I>vDzxP&wn zIS&FL2QCb|NZ5IMZC~r*@W%b!FxrNbVH5hG_p5~RkqruFFxanRpY@O|`D8f}9-l+J zIlVO}*KB3#!hbdG5r;$lCdYz5Jvnyk_PVuMwPeiMrA)$ZeP=!wfDSrhJS{93Z`ym z*&J;aDGgC+V1~m0f+O88Ucz_H6Y9<(cC>36+*|VR!DT#XFZx>;7$`tg_7n?F?_m`~ zXybLk-EDB>ZSOi?OdFB_4ePZ}ng$y<9Ih(CeS-qJmndIs0FsIGYm&U4dyQ`yqI!gy z$FCx~#*=mykmL=dk<+(^l0=*S z@Di$MrUh~AGwd%hmLc9CwMV9b3U5C?OcqRmh7Mc?{7 z533r)@%W~CPoUe;0Pp{DI+6}X=1)f}OuwLO@&Jp(A{^e}mI9#^TJsPQj$D^52Eqnf zu3`d@U0UaSEca)8zL@c$aEeYAPL9&X-3T$6t6<2Hi-G5^&nm@R4Fvz$w!F3p#ZI3C~9h|Da| zZ;mj^y7)Lr-SPq4`9RzqX%_2PfAz=rpmtyV-l_O7QV)NdUvJlGij%$dpRi*GMlbBp z&JC^T1n2X6ZvJ@NUSw$aa^%vh2)Lobl=v~^`@}Em&3Z}Z zui5vxsT1eE(jW9j93PJ~%l!gZnRWa86dXKl-FhWpJ(;Ilzw%kJXw85^DF-pFA^giv{xp}rt z1exQeVi>-hka6cn*WoLu!He+6Co_hYfFRwS9zVe6xUE2zd%(c>^i2 z*Y1k+YlBI=1Q>|EL*POIw;*!Yo+={JJCn||iK+lcCQTd#vRTuz-ESCFG){wezj#Q1 z7ZeEu>qW4WR-7&Y|5!XkgnCTaP{4brr0)nXPq{TUkw8#jxiT~Cn2;6eI}TlR6lg%`}RhQlr)XHb%uE&cajn-%d=a&XbYJM7_o zIJUe)>o$7CmO@rRJ}DFAILL?|q9-8_Q6aW|0)qF;oKzvMJRkB_ub=>bsezn8%ynoZ zIBxm0jH_@+F+?~|$JDe&Z8{(M-g)D|PGQL;_#bIUXy1`F$m{cOpQ0)mSS;625><`4 z9?l{6swc^ddg`E{&44Q^Zn9#~K6E6Q+^xwpCCv&eO#C-FS+MiV#jvSlgH64w&f$q) z4)}5X{L;zaktn0`a6P!lg1Qr|HHJUlTh>1<_w~^2p0KXvHE2(6Jo}zrvLokk=i`_k zNXhmGk54}EQo;W2`J%psu+L|@S9_9?^do=MS3)<(exF4oS4D3zxqYfwNojgw$miVn z0y z2=OqzjP7q8F49z=o-1k4VopLp9+&9JpZ|8>SXPb-=^*;?T(R`VI!DNy%VQu zN4|D`4LU#bE~%(9%DxD$jfa_`*RNOAS=q=}-wZ;(ulX_2|IaaUKkpx!ugE<3_Rsrk zx>@w~B)d}@npWM;zj=j*-HA%lzahve7TnX@2nvZju|}e6v7z)~fmEAt^86@ps`_8- zW9greNUP;(fxLlyY{puLK!pYHyGqFI%~&FPLRRZ?DFcM&0noKKr-~pC(aGJcRGUyJ z?vMCQsM?2Vm{GksoF)*@lllIsxGYO*`UOZmRE5}so9BSt2J_8HSV%nbqX(k!XEFya zAfMZYOW1~`2N1q(>|vA4A!RpYO9{_^m@Nuf`rv#$tY#3t2qBwk9@^)_nMX^xkhcf+ z?7nS^I0A$KR5s!o%DH{}M42Wb8F;;VK&lg(k!h{B$d{l)suA0O(`wfqT zLKY@L!l}>}(;Yx~wVQ=@DzL>kFJ{NnW_wh$uFxPhz>!LBbz2-OFI@tm8LGEuB1oJ* zFL)zJl#oJIbo*1+nGPUS_XACx8r{Bf!_?mv*d#Ojz0JM&hQcj8>bj-X>;h!ZNy+oql1nfPmHBi#MAXCojFcb+hSSBM8ee{Zbs%>EX`S&m6)u7Km)PCLiz-jv4?s5KiQsmE~&6Mmf z=NG>8teDsB3V7KW5!qOt`{3qx&W_2Sr=<9I4S1ySC*lj|#@O~t?u&=L-Iv-6+xF~w zQ1;H@H;?%C>hy1=brLQv1`%(+jvdYVq5j0Dg`Ktify+|%y#9B@ba_OMb3p6)?dN$F zOYiE>+8+APS7+6(vC|W#*#4lsdckT}?tTmn(G{L&+?B+iQKO24C`W2UiGZYQS7(xYgH(!XS%-W-S-TIq=GTe=-& zAf|_+a<%Mhv_x{aZf``eLTFLh@d38*9>Tpjfv$kmC}B7FwK4bIzOe&-^j!vdbF5p! znY4!S5|8(}?!skr8C@#{Oy-Cy;us=W_4{+;mF(vY&6=C3WcG+d6GIexe>R#Vj1789 zjG45;P0Fp1?-Jn4apfkh=2qUyjUh2GVvP)(BtgW$tO0k%PqEAD21X(Z5yjM8@}CUo zlBSw)HQczjCylt8Sik8re9GK!5o|R_lYtotBJAXe%|u0y_ge<} zP-nbWr+=BMWal<#W<+6=Q7`bwWD?SwD(mmSlYSG0s;Hh6Sda7xRSp-6G~u;WtaQXo zy|E)cuOKouhxgU3=#3E`4_$}Vw-91ZRQJiXcCCzw?HH`KTiAk;By5@FZ@c^Z(Drz{m7AqOFv;k@6DK0wUz_ay9 zsPHA`nvnDmnmZ|d(5Pz;+XnXqK5pID;9)07>1aYcib%WGtBKU}{8@}Z^Cr7AISD7Q z7BZokrm2KOSDHWn{FBCf{6#8u#o4gJcE3gc#T{QBKd#?C)G)IkrH9(mLHQ_x{v7e; z*GOQy7gqfV?Z9Q>_N>UsD}n8W>}uP&w;j68t6L=>3|HG;c4{}hI$#?)@-gB2jkRbg z)zm-7>%NuEXLll}`6v9JR3A%fHK-^2`!&*9Ur=>~$07)Oni`Wj?1Y#>FAZq7VuPwr zQeJ)4zdg(H3G&)_9V?1iC+GWOQtl!WiPhRpB~P=mF3 zNU@Nnuy7}F=r*7lBxa>Uy3w$_ge-J@>ulw@)G))JwqU|@4jhF$_;h<2AMiJhw^VP z;0K(M2}}vXy;-^UPTTuV`!4<{$96*~Cvx|#;Wx+v1!XSNrO zo9hMe(s~m38L^sI#XsagC>A@hCulE zq`A`8$dOhdqZz9WG!|qqVuL-R?%C{dlC+-Z{u0&|5(CY?|7V@RWJ)rT=DgsyYf^JH zdMh9%v2U8kjYu!fX>RlN+{|VK|11ueq|^|b2S_x=8g2Yf*LP2YJl|fGI)e_s6+W!b z{_(}|nEJ!dZ`&sFId125o~*!l8uHGcq5KS;3fjH`{V}X!=~?H;BkE74UIq?cJon=v zR%_RvGryNyeoq`7tK)p%PpjM^g#M`VBw*p|*f&GRa=)gZ^q<|Okd8W5 zcRI2peBZ}6GU$I7TKmh(@a5=p+Edboy$J8~IwGt+r0m@26xKK4xH;l2cG>oD%2J9?nP8-X4^k!xT!GKog54^Jp<{Z!MHP@l&P1%bgD^$D=V`Py5Tjxp zF>%;e+}l1(SVc74O&m3@fVR~L9<6^Ox4{Sb43UKC9jB6o^gc=1V+@wG>|cG0>hmeP zH^qu+X06scr9$zSHc|`mX0y$n61!rFu*{66B?t*k)wE_t?8eCy{`NB27*w3B03pGK zdz&lJWz`+~;K7!hzJL|?@RO5IR}$Ylg)T)s#7GhF;jf-ICuvIQ_sETF{j~07-zR)K zd4lTwC^zvb>0&eDB_^!e$$8e-5%b8FXUX+1;zswMm3$seYgK7r+&s6Lk>VM(cu)xQ zNR;Br#ziyHW6~8Pt6o8&@|4JlO*Il$a2NIx!n;xyTUG?oS29oRq9!9WzYnqY;DoB5dL99lnU_IN~=gtohxBRa{bnT^&P$Lj@K)NuNRdsk=T9@X#b=U<$! z)IVPzKJ(?x6!P2KUyD1gB$X{u7KbZ-fBq72J7XccbM8Gx?fm!lb9S;0E097$zP|{+ zBZj^|*6sKy@eAJSbj(quT@o=%~QCsuxedFP0)R?|QdzK}dJ@jovPWY-1o5quz1KYW$v3V|!IMZYH z2@{{Xx_kfgb+^u9@m**)=^H8~{+0)MHJYi_Rr!G^>`seL+`icqxo<5tWu0ch=s?iR z{EYQQlhNFUuI{I_9FicmV9qV~08(ZpRvkg`E>V5^J4z#b@~l}(^)G>aj@aufuypK( zUY@i-N_Qxhis2BgyV*wcc%+O^q@_IdmFJMrMdmvmJrs8zj|#U8?RT$PeH%ejtup|NlQogTZbY_|KU z@cLbT?=jn{t}6%<`Z~hT;bJ-Fw(ZQtKDRD*wD7Y%_+w2RpE6YV({Fa0DX#0_vnED$ z;I?$LiNeq2n~#MpE|g9QOuD_tJi501jzY!#*Z#pnZo?JCTOJDHguH&MyBnXy&=jI* zqwyL|s1Z(XG~&Qo%;u23nH5%}w@Jd7D6|M>O^Ed5-kG5g;Re!P6wQ#*cXidp1l)9OX8uawmxPhpqd~_C?6pt{NgsMBfLx7`VajMp z>Uka*;+!~lX5VD`tIyKW2%ocY(W6p&L#OZ2V0}`9C)54ecaa6_@uZN5-m1!hpNoes z%aljjKOAcNe!0E8t}^4?*UqaS6F48Z--HcZ4E)C)wVB$pr4C}1v5TY4ZMK4fNkalL zPs7??ocmy_j?SF!QT0{3I=Hxbb(EY=`SAGs-7H~ChZTBSk$}^zf7_dN^TG96&WS&_ zZfaI_WqtkWHMq&(SXuq>c;GhtMB0PP?S?o=6(%6ox?dlIJOE*Vm?JxhiIL|>2c4y}n{^#pN;rXZW-qBd_3-_$8-Ah0QB|4%BEs+fYVhjVbH$~UL0ekljNY)DPeKJD=7}KGCIsu zrqBqHo-3MVzj-oWp9qgtMs|Vu{bQ)EvVhQpw0+8^V_~Q6A?3kw_vs;l0zS6Rjk$e( z%+geXfZ?PAUrHgBGg1i4l0aS6;6IbMfQ*OiKY-VfuRT3j8lQ))7)E0LtHM>7aX zHc}5g(VII>tM&j$M(1SjPC7mB7*zGLL<_Rrzi0*s+ZyG;__tpg z>YolNzK>jd{5kPX$HOCC_Oe4sCwj8NH{W|^r#0Tq=Two!2?xr@kSVkNpb^XR$pz9w z8J7i~V1_=2;`1J;^xMnFZfY768%v)P6STbx0;TjGp2l6})vWw7s7{P#M%Wn-C*C7p zdy@OFU;Ceji&p%bUF58l&s>?sFU@benQ0%gNq_uZdEc)~nG&Y&`JzUw@i?=nb~cx$ zGNe_|O}$>OXkB684&;6W)ZxuVi|~jOX5IpP*b;W#H7OOb?lxwKDS{sfwJxTO^C$=p ztF^_WQf+#*{I3%Q?loD001Q91n4+7rCtGRPN?^V2_Vara*z1%a5&N!sU9ld*&;^K% z$xtnHv*)I{15tcIt69We4266Fb3t}nKZ zZSHC6zLZt0+HWDZ4-LEj=*dO2sC5w=pLYa|-nu|_*Y?vW$jw!|LxU1rv5$G-BMK3s zWpc;GbI8tE%G1)~D1B1^ZN12xZ2AMP2H)u^Ok4-GJ^#nC!?m%NJoEz_gJhRZqRR+B za%!s9Qc8U<;b+m8PbaL#l9_0IW=RqnorVJFmOnEq`>G}@uv>Xm{4fa-S+}E^D^D{g zZNa&j$E;E!Rr{P!lvy4}i)6jAt|qX(-@Z+=JkFOBTo=c9n=mn~x*d9ZS2D)kJZAik z6#Kgj%f)tn!f7QXSSPePEgPIe^QH1H7$zcI-DKMNRGn_V^wv~@Ierc)*g`pYYqBeG zOM1|}O@a0yMVVc8R$rCk-nFCF`C`88Lg zdFVU>X!N!mD??^(9)_%It48B@j;Cr}9L>mW7&a~`v~19Bhp`|m1(M=wMRu=@k@`Rx z=GP1u_H+;rAn$1obIe;!eNv*|w*BKat)uPd($ZfIY0+i?wznG!kovT2DI|hTh@Pk6 zGS_p^yLUeq*hg@7A=0lessr;x)_OdxyrvkuZu41+Hppe6_R17u629__Wd{6HK8YQi z;kW@p6P;LyW1%YWRMX)k*c^Tc&(~|L;bnq;bJ1x6RlO(-Daz?VB8{1Dde-c0w5z8n zCD=kCbx)Ph9}H^zU{@q1Tt<~-U+p?>d)Ej0nGd(=3pOdTks<_f4&_%}rc-VkBX@-^ zZI=#bOjRNQRySWoe(&o`{kk!PRkpF79-JbOVu?wRRJ8kX_g2VMc))EM922gf*DYcO z7pk+K6{21R34+Zj`}Jvi^|l5~ogGc3Lnyh~4;FZJB+)LJQ=S%N-$r}OBQKmVA$X0> zJc~aM-r&Xp;$oj-!Hb6!+C}1}*i=^!e^c+ARb(sofQACg;nzvRF9eT}zhkcWKEIwn zPFDOmUVZ3rgz79=UpthZacpr|WSCRi!T!8o5}%aXv;Y0ad1oc_;6GX5iWu{Gr-XLR zS*W4y({U<~nVPQg;g`$jn!8W>r_#X7oJvdvEw=0n&k0I_sv(jpyILUj?8a;8b^BKI zcik`pv|p`Fm*E@l^rEjnekw>FWkWQ9&m)L*V^e@UvRJvp zo&i}MTNX{mkep?z;52t6Mq>tBFgGGdM^#j_D|^Ph5%WwWg>A@GMjRrhqB2R4iKYT# zcqI09mxDplLSwMK@AM$SFX6N|ZAzM}y-}(UEG<8+NEeg!?XDCza~P(S^+>}y^_lcj zLL7tT3?^QrcJOD_&^@G9&w#42od(-+1V}Ql-eOo4#m6G|9LF=y5U4q-sFYn~-1|r8_7^ z6;eyf+HTBHptw{uz4QQuQ|tt-ENkCTz^6)FkQ7#AtQm~JE53y`*+;~#10XL(C%K9u z7Uy0>Lu&y^Z;YKIMJJ*0zum|}bLCcSGGEv@{sBY4Zvfv4+1jw(h|_c561)N>vKlez z-r$a=u7wE~gl$7=d&vf}kViX%pRpDzAk07$%t$7Y8<)h4|2b>%rSw^@K^`&TgMZ_e zasMtOHk*^f8K}_O$6@Q{@P&33QA)|0>^aur(z6ozXx|~O;yBV9%@r_M66;1MrVZo>rrl z6_at~E&BFj5$m-Y!4*~XS{#Gz%vSEzXCgprmhctylKBE}8bvNf-$wHUnKYZK+S&ep zlsJGRiM~y{^4U&-bT`v<-HR#?5=<3)9hFftV63}2^U8_#wx5C4R%n0D2WmEi>ZyhH zXI4>cD{^;-vrWgh>(g>`H}tacv#x=oW91Pra1CDGl|x%8!OypVZ1{vLsF++Q$Ol5h zvpvZ`0F*pRLaR5l0rhL+!&|Nt+c~5~CU^{ShMDLm1ULkCaYL7U&$oS2n;JG2(k~we z2q}#|r&c)kS3GN-j+M2KzF#LHd(AsoDI2aAUO!DJ>9I5N>ZIBxF3p7CYDv~Fveczz z^~@o?7#F4zogZ8m=4b|Ynt9D<2CD-V*+*DcaDYSWEniY+k+^J!${TCa~xwoZx*S`Np=VA$U?0-q0;De~~ zKeOk&!lmtF0Wad`t=5ssgVp-u(kXK=v!&&GI`q4^e0we3=Tfwr1KFd%1CK7ISS`#^_VngD9rNS~QJV(J-5i$wCvd-e}rj z)-r`p-yF}Q)7P~T=XiZgi}t}zw@rp!SM+D6sHu8>>EpGAdq#kAje{Xa--Lui_vQ+IBye<+9iy=h`C)w70&idxgIJmvlcS9T0p1gryhyF&Z_VRf~qb0ffMVj zOpdE~=StDmF+^Ng@lCJvVL;x(Hly4ArtEXYWTC6d&xI2Zq>{;KXV;=jN&Yfa>nb@F zS0kpFx_!K60~dafx|>T-`7>E{9LPr)o*5e+TdYKC#I$uHGg4VkmNKwfy`!{zn#%9P zqgpK4Rqdk%{py8xw8xAnZBw=UOv6{-<>#}*!+x9D#BT%j_Vq24MHP#0+cP3&H`#Z* zJNk=tz`NUkUri%0(?*ZA4=HbG_)=68>ZJu+hdl?_{7?7IfeN}mD1(p;lkRE(F-$(p z4psvhxOWQf3~Wx=hi68v(58_`+_N-xT3%QZ(1a^BymU=ZqeFMW?<*F>s#>ZI)B{CX(zVc>eby8xvc)AA<)Uc>Cl=<|S$QcbvO&G0NoFrtD^I*r`rF3B zvv&$CXR7cR!ibr+%!`LLdYrEm>tuC@z5ZKPc=s>T4nB1KM^As6NiCp*p^H%jqR;0i zvbNoq_1Z!3`B?;U;IayJbyQQ@^hRP75{Qr0=u9-BV5`4p)MUVTGYFX`9zFhR>>ul)1{<6=5F&YD z2m&?0?|wy(o#v82O!?U2>%`e!4`gl}%62vxh(odPjA~&Qj=Yq!-IV-swBLla%W%&E zita{4VG=6lh{Yb~f3<;URb$(Fe?~fhZehV@8Sky6RD)`Aoko20=HiVHmQj)6alu*_ zEOEt)>k|9UmggG^K)1b~N}NMRxL?c7wOyW-9l_qXgo!v-=aMr8I?Z5pJ3&d<8-0NV z*^D^dfZz(9m4lX{A2g@PZ${qn>752sO+CWvWjRUmf8fbhq*X|daBG9|uUhODQ|N{8 ziaEu09#cISJDWUo7*SiGB8|_MZ=qJ^NJu z;{m%Zc;s00`S0*Ag5Z&$e?GVzJVL)4{Jiz$8PA7(T*;F*vNqFKu$C5jVpXQgayJ?3 z)orwcOptqZ`tk})7OGHIOo}|$*VkWi8m_2)v)7!{tvWalU=08~B#;S?O}dFTJyQB8 zNNqL0$jcgGY4Fw-XzlvBL~AZ~KlL%nUd;C1>og2BgSY~7uY(>1UkamWz57dm6({bj1#-F?0-6TqL$wv2yLd? z4+yR21qu62$>ZM9n@ouECor%=y33+Ce3&X6RQdbh+zSSQ=*i%`2#v$n`uQzjK+_5N zC~bf)R0ZezjNemB{ruf^w&3dY<8I!x1ABmVgA7}bo!QaybCLqtKMlZR?8QYu3c9D{ zp&AG*XLiN;+v>s76(2NBvMx~6DJ-qJVpbb~Q>)PHjQ50VFzdJX*+f@n#>d1&|g!OvQJAAk0Vrs%bPFx{A?c5w)jFY&=X4l&fJv!_5m zPlvluFAmcM`#!5xgypQteb5n!{Z)Y_Z3=1ciChDL{L0Bl~2?V-+~JK+uZy1T5={tMz^Vd zgl@eY_bJYM(x0+A@{oE$65n<`;?u2{Bab>|A3ls6b^kDOeDuRejOtG3NY9;4dCzbA zMxy+tOxQf^cm1E@`+gNoNFtaJL|z+|JcXTVk45M95QWQ;v48A)`%>L?`^>T>a}Tg~ zlv_!<)|vnim;#*vU|~}i3h3R$kjIV|J~Wgq($l{S#p9=;M6%UWaL3pIdh#ky9OZ@k zp`8E|+8XX{vNe_I?T#RV9}ET}Yu*b4fYdSAk-A5>(KW@MWj#Bj_E`5mF)ax`O|Yay zGD^#b)OJc_E9g{W`Jw~|yw4LHfvA|)b;gmSyDX7crd14RB&gn4eN&*o2Y`jRSe>)lE^ImxL)co48{R#z6nc?o zP9)c(g!h*+uPkEff@f&J|98*=wakGEn!S8Ff~>a6q)O?6Ipk|#?#)Ar>}GqIvm%$W z)p%&?brir0m`MFs-z37+7+dDXAHFUkCOPzRm%LmPNC);L@mQ$4Ep1CO_A;@@JS+u^ zhA{+~gdtB90bRfXvx+Rn$2erBFHnh}`^e)79+&k0jif?mhe+D;fzEOeth># z6+KXgt(~8L zxpTtCX5fVJu~X?ZJ{)+je4WJpsYJktd)-3o_&9Bls>!Z}%=7wP_oNanI7{7>{@37C z;%sukU>3micJruEc)U_9AG|OPpybdL`CyN!P>!88A87CSbG25&GGC)x$~A(^G~#R$ zp5vg*D%85%SqoVDGY$ZEJF;Xyp+t5TK*|27%F7y(;)I9qzmkz$p= z8xc4JphHeJ>oGg@R<8o&IF;U3uPo_knf7pHvXrWkdDAwUih%uR=fJ~m!3W&*Mu#8o z$&{_jl6A+J*amq!FR9Yr&h0Y?OXQBpg zk+UpYDnbwd{I4sZyDecVe0^05V(M&%?8+Fs1;U1ezse{HuM;erT!&uokt?ExzZ`0?oDEPbQvd@K!LGL;IpRpIe&o!pZ zosed}bK#=K| zy+>yY0?-Xv1Sv-lEtq8| zBv^0ku{aE8do>x_AZq)l_t9?GXCiw&XOm3V%;(@&uBfN}S||fheGs3?&jVbs-;DID zmg}J4T}>riH}h~S>DZykeyLA8n<>XIS^6tH0&}zDDIvQO$9xff62bBg2@w1~AG9Z7 zN6T|Q8Y#FfNTMYX;%3IE$g^J8>R4nNGD{c`Y%h!CeUG2Sb*YBybK19oLSeF-z*=E_%k?v`4t^3`*M05s_&;0~vPpL+%jZo9NQuCxo( zNt|-s5CkuuBZd4HtcTtz(|(>W+*O!;TX2kCJAl2W@@b8FpnMJ5#cu~Q=qoc(*Xjr! zAG4|QWSg7u&J4Bt!~PH3hY>kt|XM9_3HTL8s@jJXN(GXX1omM5P2M-=G z;O~{o`j&)qtqYlcdIq#)dbYw6Xvse-1B*_nyWXE^y|8-jfd}bKfj9cb z&c}3fND-e5?eQ<#&oMt{^oi&gY!{5Cx zEeq1urCe?820D|x&O5pWkQP(ebvEV2C12Jm3sJ$POSJV^n;!yW)G;;Pl)OkWlxhE7 zfJ$TD6oC4JfWj3?_LcGhddp=zmc7&~a&w*Aha$B*1eZ&nl@>pAUyDrci(RGl8C1K7 zb7#wF2F@Vnq8s4AVW{FQ$%x{ z)luZAC9Ddot|mTLmL;!rmZ4f^Q}8jN0SUD5SVYWgaAZpFUR>pgpv-6K4x%rUjFHOp zQc?`*e!0>m_;AtPi<@e&`_`xjtLK2k#0;$@nTeL9;OS9%jB4MhV69IP`6sR%cdf(D zu6w&)0+J%mfsyVGw`pyk(l8){ATb9oOENJTkMwZJd8^5k1qu%A(WPa31BXz?-t0{~ zZM|tlH#bJRcj9lALg&qWmZGUOQCWPsAj1nT7%&@wiGa`^>x zjGsWYECdZ>`OZ&0U(f&VD4|gxM4X3E?@*3(wi5&x}sO`C#{jjdhKeHOvk~S6E~K8FwF7gbTl7*&|VMZylz|5^h@a5f+K9e zy(}r)?kO!zoPbVOaIA^1MrF#Nm0m5YC@);ri|q1I`pnflG_)Mu&CM1AbOgkPkVx>> zrj(i>+SZP?=MfRLS{EQ&F5U`Bug#1o0k~PA+pbq!qOFkAQ$ed;%0xt&8q;c01UWtc+VO!Y@QNweYyv0YT_2B1 zdk}c6XMx5?jqJmE2Hd1w?@133INS!5dK(hdS4C#tN*%Z4w!qv;kRw15_E;^OT_ak5k)TJ?GkLeyblGtjJ{p={?oqXB?*n3e5Hs*$(5}l7Q6pkei1t08@xmOLFrN